1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 * $FreeBSD: head/sys/dev/mpr/mpr_sas.c 331422 2018-03-23 13:52:26Z ken $ 31 */ 32 33 /* Communications core for Avago Technologies (LSI) MPT3 */ 34 35 /* TODO Move headers to mprvar */ 36 #include <sys/types.h> 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/module.h> 41 #include <sys/bus.h> 42 #include <sys/conf.h> 43 #include <sys/bio.h> 44 #include <sys/malloc.h> 45 #include <sys/uio.h> 46 #include <sys/sysctl.h> 47 #include <sys/endian.h> 48 #include <sys/queue.h> 49 #include <sys/kthread.h> 50 #include <sys/taskqueue.h> 51 #include <sys/sbuf.h> 52 #include <sys/eventhandler.h> 53 54 #include <sys/rman.h> 55 56 #include <machine/stdarg.h> 57 58 #include <bus/cam/cam.h> 59 #include <bus/cam/cam_ccb.h> 60 #include <bus/cam/cam_debug.h> 61 #include <bus/cam/cam_sim.h> 62 #include <bus/cam/cam_xpt_sim.h> 63 #include <bus/cam/cam_xpt_periph.h> 64 #include <bus/cam/cam_periph.h> 65 #include <bus/cam/scsi/scsi_all.h> 66 #include <bus/cam/scsi/scsi_message.h> 67 #if 0 /* __FreeBSD_version >= 900026 */ 68 #include <bus/cam/scsi/smp_all.h> 69 #endif 70 71 #if 0 /* XXX swildner NVMe support */ 72 #include <dev/nvme/nvme.h> 73 #endif 74 75 #include <dev/raid/mpr/mpi/mpi2_type.h> 76 #include <dev/raid/mpr/mpi/mpi2.h> 77 #include <dev/raid/mpr/mpi/mpi2_ioc.h> 78 #include <dev/raid/mpr/mpi/mpi2_sas.h> 79 #include <dev/raid/mpr/mpi/mpi2_pci.h> 80 #include <dev/raid/mpr/mpi/mpi2_cnfg.h> 81 #include <dev/raid/mpr/mpi/mpi2_init.h> 82 #include <dev/raid/mpr/mpi/mpi2_tool.h> 83 #include <dev/raid/mpr/mpr_ioctl.h> 84 #include <dev/raid/mpr/mprvar.h> 85 #include <dev/raid/mpr/mpr_table.h> 86 #include <dev/raid/mpr/mpr_sas.h> 87 88 #define MPRSAS_DISCOVERY_TIMEOUT 20 89 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 90 91 /* 92 * static array to check SCSI OpCode for EEDP protection bits 93 */ 94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 97 static uint8_t op_code_prot[256] = { 98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 114 }; 115 116 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *); 117 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *); 118 static void mprsas_action(struct cam_sim *sim, union ccb *ccb); 119 static void mprsas_poll(struct cam_sim *sim); 120 static void mprsas_scsiio_timeout(void *data); 121 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm); 122 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *); 123 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *); 124 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *); 125 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *); 126 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 127 struct mpr_command *cm); 128 void mprsas_rescan_callback(struct cam_periph *, union ccb *); 129 static void mprsas_async(void *callback_arg, uint32_t code, 130 struct cam_path *path, void *arg); 131 #if 1 /* (__FreeBSD_version < 901503) || \ 132 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 133 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 134 struct ccb_getdev *cgd); 135 static void mprsas_read_cap_done(struct cam_periph *periph, 136 union ccb *done_ccb); 137 #endif 138 static int mprsas_send_portenable(struct mpr_softc *sc); 139 static void mprsas_portenable_complete(struct mpr_softc *sc, 140 struct mpr_command *cm); 141 142 #if 0 /* __FreeBSD_version >= 900026 */ 143 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm); 144 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, 145 uint64_t sasaddr); 146 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb); 147 #endif //FreeBSD_version >= 900026 148 149 struct mprsas_target * 150 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start, 151 uint16_t handle) 152 { 153 struct mprsas_target *target; 154 int i; 155 156 for (i = start; i < sassc->maxtargets; i++) { 157 target = &sassc->targets[i]; 158 if (target->handle == handle) 159 return (target); 160 } 161 162 return (NULL); 163 } 164 165 /* we need to freeze the simq during attach and diag reset, to avoid failing 166 * commands before device handles have been found by discovery. Since 167 * discovery involves reading config pages and possibly sending commands, 168 * discovery actions may continue even after we receive the end of discovery 169 * event, so refcount discovery actions instead of assuming we can unfreeze 170 * the simq when we get the event. 171 */ 172 void 173 mprsas_startup_increment(struct mprsas_softc *sassc) 174 { 175 MPR_FUNCTRACE(sassc->sc); 176 177 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 178 if (sassc->startup_refcount++ == 0) { 179 /* just starting, freeze the simq */ 180 mpr_dprint(sassc->sc, MPR_INIT, 181 "%s freezing simq\n", __func__); 182 #if 0 /* (__FreeBSD_version >= 1000039) || \ 183 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) */ 184 xpt_hold_boot(); 185 #endif 186 xpt_freeze_simq(sassc->sim, 1); 187 } 188 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 189 sassc->startup_refcount); 190 } 191 } 192 193 void 194 mprsas_release_simq_reinit(struct mprsas_softc *sassc) 195 { 196 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 197 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 198 xpt_release_simq(sassc->sim, 1); 199 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n"); 200 } 201 } 202 203 void 204 mprsas_startup_decrement(struct mprsas_softc *sassc) 205 { 206 MPR_FUNCTRACE(sassc->sc); 207 208 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 209 if (--sassc->startup_refcount == 0) { 210 /* finished all discovery-related actions, release 211 * the simq and rescan for the latest topology. 212 */ 213 mpr_dprint(sassc->sc, MPR_INIT, 214 "%s releasing simq\n", __func__); 215 sassc->flags &= ~MPRSAS_IN_STARTUP; 216 xpt_release_simq(sassc->sim, 1); 217 #if 0 /* (__FreeBSD_version >= 1000039) || \ 218 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) */ 219 xpt_release_boot(); 220 #else 221 mprsas_rescan_target(sassc->sc, NULL); 222 #endif 223 } 224 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 225 sassc->startup_refcount); 226 } 227 } 228 229 /* The firmware requires us to stop sending commands when we're doing task 230 * management, so refcount the TMs and keep the simq frozen when any are in 231 * use. 232 */ 233 struct mpr_command * 234 mprsas_alloc_tm(struct mpr_softc *sc) 235 { 236 struct mpr_command *tm; 237 238 MPR_FUNCTRACE(sc); 239 tm = mpr_alloc_high_priority_command(sc); 240 return tm; 241 } 242 243 void 244 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm) 245 { 246 int target_id = 0xFFFFFFFF; 247 248 MPR_FUNCTRACE(sc); 249 if (tm == NULL) 250 return; 251 252 /* 253 * For TM's the devq is frozen for the device. Unfreeze it here and 254 * free the resources used for freezing the devq. Must clear the 255 * INRESET flag as well or scsi I/O will not work. 256 */ 257 if (tm->cm_targ != NULL) { 258 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET; 259 target_id = tm->cm_targ->tid; 260 } 261 if (tm->cm_ccb) { 262 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n", 263 target_id); 264 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); 265 xpt_free_path(tm->cm_ccb->ccb_h.path); 266 xpt_free_ccb(&tm->cm_ccb->ccb_h); 267 } 268 269 mpr_free_high_priority_command(sc, tm); 270 } 271 272 void 273 mprsas_rescan_callback(struct cam_periph *periph, union ccb *ccb) 274 { 275 if (ccb->ccb_h.status != CAM_REQ_CMP) 276 kprintf("cam_scan_callback: failure status = %x\n", 277 ccb->ccb_h.status); 278 279 xpt_free_path(ccb->ccb_h.path); 280 xpt_free_ccb(&ccb->ccb_h); 281 } 282 283 void 284 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ) 285 { 286 struct mprsas_softc *sassc = sc->sassc; 287 path_id_t pathid; 288 target_id_t targetid; 289 union ccb *ccb; 290 291 MPR_FUNCTRACE(sc); 292 pathid = cam_sim_path(sassc->sim); 293 if (targ == NULL) 294 targetid = CAM_TARGET_WILDCARD; 295 else 296 targetid = targ - sassc->targets; 297 298 /* 299 * Allocate a CCB and schedule a rescan. 300 */ 301 ccb = xpt_alloc_ccb(); 302 if (ccb == NULL) { 303 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n"); 304 return; 305 } 306 307 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, targetid, 308 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 309 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n"); 310 xpt_free_ccb(&ccb->ccb_h); 311 return; 312 } 313 314 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5); /* 5 = low priority */ 315 316 /* XXX Hardwired to scan the bus for now */ 317 ccb->ccb_h.func_code = XPT_SCAN_BUS; 318 ccb->ccb_h.cbfcnp = mprsas_rescan_callback; 319 ccb->crcn.flags = CAM_FLAG_NONE; 320 321 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid); 322 xpt_action(ccb); 323 } 324 325 static void __printflike(3, 4) 326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...) 327 { 328 struct sbuf sb; 329 __va_list ap; 330 char str[192]; 331 char path_str[64]; 332 333 if (cm == NULL) 334 return; 335 336 /* No need to be in here if debugging isn't enabled */ 337 if ((cm->cm_sc->mpr_debug & level) == 0) 338 return; 339 340 sbuf_new(&sb, str, sizeof(str), 0); 341 342 __va_start(ap, fmt); 343 344 if (cm->cm_ccb != NULL) { 345 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 346 sizeof(path_str)); 347 sbuf_cat(&sb, path_str); 348 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 349 scsi_command_string(&cm->cm_ccb->csio, &sb); 350 sbuf_printf(&sb, "length %d ", 351 cm->cm_ccb->csio.dxfer_len); 352 } 353 } else { 354 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 355 cam_sim_name(cm->cm_sc->sassc->sim), 356 cam_sim_unit(cm->cm_sc->sassc->sim), 357 cam_sim_bus(cm->cm_sc->sassc->sim), 358 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 359 cm->cm_lun); 360 } 361 362 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 363 sbuf_vprintf(&sb, fmt, ap); 364 sbuf_finish(&sb); 365 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb)); 366 367 __va_end(ap); 368 } 369 370 static void 371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm) 372 { 373 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 374 struct mprsas_target *targ; 375 uint16_t handle; 376 377 MPR_FUNCTRACE(sc); 378 379 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 380 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 381 targ = tm->cm_targ; 382 383 if (reply == NULL) { 384 /* XXX retry the remove after the diag reset completes? */ 385 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 386 "0x%04x\n", __func__, handle); 387 mprsas_free_tm(sc, tm); 388 return; 389 } 390 391 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 392 MPI2_IOCSTATUS_SUCCESS) { 393 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting " 394 "device 0x%x\n", le16toh(reply->IOCStatus), handle); 395 } 396 397 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 398 le32toh(reply->TerminationCount)); 399 mpr_free_reply(sc, tm->cm_reply_data); 400 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 401 402 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", 403 targ->tid, handle); 404 405 /* 406 * Don't clear target if remove fails because things will get confusing. 407 * Leave the devname and sasaddr intact so that we know to avoid reusing 408 * this target id if possible, and so we can assign the same target id 409 * to this device if it comes back in the future. 410 */ 411 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 412 MPI2_IOCSTATUS_SUCCESS) { 413 targ = tm->cm_targ; 414 targ->handle = 0x0; 415 targ->encl_handle = 0x0; 416 targ->encl_level_valid = 0x0; 417 targ->encl_level = 0x0; 418 targ->connector_name[0] = ' '; 419 targ->connector_name[1] = ' '; 420 targ->connector_name[2] = ' '; 421 targ->connector_name[3] = ' '; 422 targ->encl_slot = 0x0; 423 targ->exp_dev_handle = 0x0; 424 targ->phy_num = 0x0; 425 targ->linkrate = 0x0; 426 targ->devinfo = 0x0; 427 targ->flags = 0x0; 428 targ->scsi_req_desc_type = 0; 429 } 430 431 mprsas_free_tm(sc, tm); 432 } 433 434 435 /* 436 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 437 * Otherwise Volume Delete is same as Bare Drive Removal. 438 */ 439 void 440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle) 441 { 442 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 443 struct mpr_softc *sc; 444 struct mpr_command *cm; 445 struct mprsas_target *targ = NULL; 446 447 MPR_FUNCTRACE(sassc->sc); 448 sc = sassc->sc; 449 450 targ = mprsas_find_target_by_handle(sassc, 0, handle); 451 if (targ == NULL) { 452 /* FIXME: what is the action? */ 453 /* We don't know about this device? */ 454 mpr_dprint(sc, MPR_ERROR, 455 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 456 return; 457 } 458 459 targ->flags |= MPRSAS_TARGET_INREMOVAL; 460 461 cm = mprsas_alloc_tm(sc); 462 if (cm == NULL) { 463 mpr_dprint(sc, MPR_ERROR, 464 "%s: command alloc failure\n", __func__); 465 return; 466 } 467 468 mprsas_rescan_target(sc, targ); 469 470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 471 req->DevHandle = targ->handle; 472 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 473 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 474 475 /* SAS Hard Link Reset / SATA Link Reset */ 476 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 477 478 cm->cm_targ = targ; 479 cm->cm_data = NULL; 480 cm->cm_desc.HighPriority.RequestFlags = 481 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 482 cm->cm_complete = mprsas_remove_volume; 483 cm->cm_complete_data = (void *)(uintptr_t)handle; 484 485 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 486 __func__, targ->tid); 487 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 488 489 mpr_map_command(sc, cm); 490 } 491 492 /* 493 * The firmware performs debounce on the link to avoid transient link errors 494 * and false removals. When it does decide that link has been lost and a 495 * device needs to go away, it expects that the host will perform a target reset 496 * and then an op remove. The reset has the side-effect of aborting any 497 * outstanding requests for the device, which is required for the op-remove to 498 * succeed. It's not clear if the host should check for the device coming back 499 * alive after the reset. 500 */ 501 void 502 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle) 503 { 504 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 505 struct mpr_softc *sc; 506 struct mpr_command *cm; 507 struct mprsas_target *targ = NULL; 508 509 MPR_FUNCTRACE(sassc->sc); 510 511 sc = sassc->sc; 512 513 targ = mprsas_find_target_by_handle(sassc, 0, handle); 514 if (targ == NULL) { 515 /* FIXME: what is the action? */ 516 /* We don't know about this device? */ 517 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n", 518 __func__, handle); 519 return; 520 } 521 522 targ->flags |= MPRSAS_TARGET_INREMOVAL; 523 524 cm = mprsas_alloc_tm(sc); 525 if (cm == NULL) { 526 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", 527 __func__); 528 return; 529 } 530 531 mprsas_rescan_target(sc, targ); 532 533 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 534 memset(req, 0, sizeof(*req)); 535 req->DevHandle = htole16(targ->handle); 536 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 537 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 538 539 /* SAS Hard Link Reset / SATA Link Reset */ 540 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 541 542 cm->cm_targ = targ; 543 cm->cm_data = NULL; 544 cm->cm_desc.HighPriority.RequestFlags = 545 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 546 cm->cm_complete = mprsas_remove_device; 547 cm->cm_complete_data = (void *)(uintptr_t)handle; 548 549 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 550 __func__, targ->tid); 551 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 552 553 mpr_map_command(sc, cm); 554 } 555 556 static void 557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm) 558 { 559 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 560 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 561 struct mprsas_target *targ; 562 struct mpr_command *next_cm; 563 uint16_t handle; 564 565 MPR_FUNCTRACE(sc); 566 567 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 568 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 569 targ = tm->cm_targ; 570 571 /* 572 * Currently there should be no way we can hit this case. It only 573 * happens when we have a failure to allocate chain frames, and 574 * task management commands don't have S/G lists. 575 */ 576 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 577 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of " 578 "handle %#04x! This should not happen!\n", __func__, 579 tm->cm_flags, handle); 580 } 581 582 if (reply == NULL) { 583 /* XXX retry the remove after the diag reset completes? */ 584 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 585 "0x%04x\n", __func__, handle); 586 mprsas_free_tm(sc, tm); 587 return; 588 } 589 590 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 591 MPI2_IOCSTATUS_SUCCESS) { 592 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting " 593 "device 0x%x\n", le16toh(reply->IOCStatus), handle); 594 } 595 596 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 597 le32toh(reply->TerminationCount)); 598 mpr_free_reply(sc, tm->cm_reply_data); 599 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 600 601 /* Reuse the existing command */ 602 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 603 memset(req, 0, sizeof(*req)); 604 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 605 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 606 req->DevHandle = htole16(handle); 607 tm->cm_data = NULL; 608 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 609 tm->cm_complete = mprsas_remove_complete; 610 tm->cm_complete_data = (void *)(uintptr_t)handle; 611 612 mpr_map_command(sc, tm); 613 614 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n", 615 targ->tid, handle); 616 if (targ->encl_level_valid) { 617 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, " 618 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 619 targ->connector_name); 620 } 621 TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) { 622 union ccb *ccb; 623 624 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm); 625 ccb = tm->cm_complete_data; 626 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 627 mprsas_scsiio_complete(sc, tm); 628 } 629 } 630 631 static void 632 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm) 633 { 634 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 635 uint16_t handle; 636 struct mprsas_target *targ; 637 struct mprsas_lun *lun; 638 639 MPR_FUNCTRACE(sc); 640 641 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 642 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 643 644 /* 645 * Currently there should be no way we can hit this case. It only 646 * happens when we have a failure to allocate chain frames, and 647 * task management commands don't have S/G lists. 648 */ 649 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 650 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of " 651 "handle %#04x! This should not happen!\n", __func__, 652 tm->cm_flags, handle); 653 mprsas_free_tm(sc, tm); 654 return; 655 } 656 657 if (reply == NULL) { 658 /* most likely a chip reset */ 659 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device " 660 "0x%04x\n", __func__, handle); 661 mprsas_free_tm(sc, tm); 662 return; 663 } 664 665 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", 666 __func__, handle, le16toh(reply->IOCStatus)); 667 668 /* 669 * Don't clear target if remove fails because things will get confusing. 670 * Leave the devname and sasaddr intact so that we know to avoid reusing 671 * this target id if possible, and so we can assign the same target id 672 * to this device if it comes back in the future. 673 */ 674 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 675 MPI2_IOCSTATUS_SUCCESS) { 676 targ = tm->cm_targ; 677 targ->handle = 0x0; 678 targ->encl_handle = 0x0; 679 targ->encl_level_valid = 0x0; 680 targ->encl_level = 0x0; 681 targ->connector_name[0] = ' '; 682 targ->connector_name[1] = ' '; 683 targ->connector_name[2] = ' '; 684 targ->connector_name[3] = ' '; 685 targ->encl_slot = 0x0; 686 targ->exp_dev_handle = 0x0; 687 targ->phy_num = 0x0; 688 targ->linkrate = 0x0; 689 targ->devinfo = 0x0; 690 targ->flags = 0x0; 691 targ->scsi_req_desc_type = 0; 692 693 while (!SLIST_EMPTY(&targ->luns)) { 694 lun = SLIST_FIRST(&targ->luns); 695 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 696 kfree(lun, M_MPR); 697 } 698 } 699 700 mprsas_free_tm(sc, tm); 701 } 702 703 static int 704 mprsas_register_events(struct mpr_softc *sc) 705 { 706 uint8_t events[16]; 707 708 bzero(events, 16); 709 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 710 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 711 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 712 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 713 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 714 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 715 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 716 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 717 setbit(events, MPI2_EVENT_IR_VOLUME); 718 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 719 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 720 setbit(events, MPI2_EVENT_TEMP_THRESHOLD); 721 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 722 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) { 723 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 724 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) { 725 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); 726 setbit(events, MPI2_EVENT_PCIE_ENUMERATION); 727 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 728 } 729 } 730 731 mpr_register_events(sc, events, mprsas_evt_handler, NULL, 732 &sc->sassc->mprsas_eh); 733 734 return (0); 735 } 736 737 int 738 mpr_attach_sas(struct mpr_softc *sc) 739 { 740 struct mprsas_softc *sassc; 741 cam_status status; 742 int unit, error = 0, reqs; 743 744 MPR_FUNCTRACE(sc); 745 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 746 747 sassc = kmalloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO); 748 if (!sassc) { 749 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 750 "Cannot allocate SAS subsystem memory\n"); 751 return (ENOMEM); 752 } 753 754 /* 755 * XXX MaxTargets could change during a reinit. Since we don't 756 * resize the targets[] array during such an event, cache the value 757 * of MaxTargets here so that we don't get into trouble later. This 758 * should move into the reinit logic. 759 */ 760 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes; 761 sassc->targets = kmalloc(sizeof(struct mprsas_target) * 762 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO); 763 if (!sassc->targets) { 764 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 765 "Cannot allocate SAS target memory\n"); 766 kfree(sassc, M_MPR); 767 return (ENOMEM); 768 } 769 sc->sassc = sassc; 770 sassc->sc = sc; 771 772 reqs = sc->num_reqs - sc->num_prireqs - 1; 773 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) { 774 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n"); 775 error = ENOMEM; 776 goto out; 777 } 778 779 unit = device_get_unit(sc->mpr_dev); 780 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc, 781 unit, &sc->mpr_lock, reqs, reqs, sassc->devq); 782 cam_simq_release(sassc->devq); 783 if (sassc->sim == NULL) { 784 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n"); 785 error = EINVAL; 786 goto out; 787 } 788 789 TAILQ_INIT(&sassc->ev_queue); 790 791 /* Initialize taskqueue for Event Handling */ 792 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc); 793 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO, 794 taskqueue_thread_enqueue, &sassc->ev_tq); 795 taskqueue_start_threads(&sassc->ev_tq, 1, TDPRI_KERN_DAEMON, 796 -1, "%s taskq", 797 device_get_nameunit(sc->mpr_dev)); 798 799 mpr_lock(sc); 800 801 /* 802 * XXX There should be a bus for every port on the adapter, but since 803 * we're just going to fake the topology for now, we'll pretend that 804 * everything is just a target on a single bus. 805 */ 806 if ((error = xpt_bus_register(sassc->sim, 0)) != 0) { 807 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 808 "Error %d registering SCSI bus\n", error); 809 mpr_unlock(sc); 810 goto out; 811 } 812 813 /* 814 * Assume that discovery events will start right away. 815 * 816 * Hold off boot until discovery is complete. 817 */ 818 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY; 819 sc->sassc->startup_refcount = 0; 820 mprsas_startup_increment(sassc); 821 822 callout_init_mp(&sassc->discovery_callout); 823 824 /* 825 * Register for async events so we can determine the EEDP 826 * capabilities of devices. 827 */ 828 status = xpt_create_path(&sassc->path, /*periph*/NULL, 829 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 830 CAM_LUN_WILDCARD); 831 if (status != CAM_REQ_CMP) { 832 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 833 "Error %#x creating sim path\n", status); 834 sassc->path = NULL; 835 } else { 836 int event; 837 838 #if 0 /* (__FreeBSD_version >= 1000006) || \ 839 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) */ 840 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE; 841 #else 842 event = AC_FOUND_DEVICE; 843 #endif 844 845 /* 846 * Prior to the CAM locking improvements, we can't call 847 * xpt_register_async() with a particular path specified. 848 * 849 * If a path isn't specified, xpt_register_async() will 850 * generate a wildcard path and acquire the XPT lock while 851 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB. 852 * It will then drop the XPT lock once that is done. 853 * 854 * If a path is specified for xpt_register_async(), it will 855 * not acquire and drop the XPT lock around the call to 856 * xpt_action(). xpt_action() asserts that the caller 857 * holds the SIM lock, so the SIM lock has to be held when 858 * calling xpt_register_async() when the path is specified. 859 * 860 * But xpt_register_async calls xpt_for_all_devices(), 861 * which calls xptbustraverse(), which will acquire each 862 * SIM lock. When it traverses our particular bus, it will 863 * necessarily acquire the SIM lock, which will lead to a 864 * recursive lock acquisition. 865 * 866 * The CAM locking changes fix this problem by acquiring 867 * the XPT topology lock around bus traversal in 868 * xptbustraverse(), so the caller can hold the SIM lock 869 * and it does not cause a recursive lock acquisition. 870 * 871 * These __FreeBSD_version values are approximate, especially 872 * for stable/10, which is two months later than the actual 873 * change. 874 */ 875 876 #if 1 /* (__FreeBSD_version < 1000703) || \ 877 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) */ 878 mpr_unlock(sc); 879 status = xpt_register_async(event, mprsas_async, sc, 880 NULL); 881 mpr_lock(sc); 882 #else 883 status = xpt_register_async(event, mprsas_async, sc, 884 sassc->path); 885 #endif 886 887 if (status != CAM_REQ_CMP) { 888 mpr_dprint(sc, MPR_ERROR, 889 "Error %#x registering async handler for " 890 "AC_ADVINFO_CHANGED events\n", status); 891 xpt_free_path(sassc->path); 892 sassc->path = NULL; 893 } 894 } 895 if (status != CAM_REQ_CMP) { 896 /* 897 * EEDP use is the exception, not the rule. 898 * Warn the user, but do not fail to attach. 899 */ 900 mpr_printf(sc, "EEDP capabilities disabled.\n"); 901 } 902 903 mpr_unlock(sc); 904 905 mprsas_register_events(sc); 906 out: 907 if (error) 908 mpr_detach_sas(sc); 909 910 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); 911 return (error); 912 } 913 914 int 915 mpr_detach_sas(struct mpr_softc *sc) 916 { 917 struct mprsas_softc *sassc; 918 struct mprsas_lun *lun, *lun_tmp; 919 struct mprsas_target *targ; 920 int i; 921 922 MPR_FUNCTRACE(sc); 923 924 if (sc->sassc == NULL) 925 return (0); 926 927 sassc = sc->sassc; 928 mpr_deregister_events(sc, sassc->mprsas_eh); 929 930 /* 931 * Drain and free the event handling taskqueue with the lock 932 * unheld so that any parallel processing tasks drain properly 933 * without deadlocking. 934 */ 935 if (sassc->ev_tq != NULL) 936 taskqueue_free(sassc->ev_tq); 937 938 /* Make sure CAM doesn't wedge if we had to bail out early. */ 939 mpr_lock(sc); 940 941 while (sassc->startup_refcount != 0) 942 mprsas_startup_decrement(sassc); 943 944 /* Deregister our async handler */ 945 if (sassc->path != NULL) { 946 xpt_register_async(0, mprsas_async, sc, sassc->path); 947 xpt_free_path(sassc->path); 948 sassc->path = NULL; 949 } 950 951 if (sassc->flags & MPRSAS_IN_STARTUP) 952 xpt_release_simq(sassc->sim, 1); 953 954 if (sassc->sim != NULL) { 955 xpt_bus_deregister(cam_sim_path(sassc->sim)); 956 cam_sim_free(sassc->sim); 957 } 958 959 mpr_unlock(sc); 960 961 for (i = 0; i < sassc->maxtargets; i++) { 962 targ = &sassc->targets[i]; 963 SLIST_FOREACH_MUTABLE(lun, &targ->luns, lun_link, lun_tmp) { 964 kfree(lun, M_MPR); 965 } 966 } 967 kfree(sassc->targets, M_MPR); 968 kfree(sassc, M_MPR); 969 sc->sassc = NULL; 970 971 return (0); 972 } 973 974 void 975 mprsas_discovery_end(struct mprsas_softc *sassc) 976 { 977 struct mpr_softc *sc = sassc->sc; 978 979 MPR_FUNCTRACE(sc); 980 981 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING) 982 callout_stop(&sassc->discovery_callout); 983 984 /* 985 * After discovery has completed, check the mapping table for any 986 * missing devices and update their missing counts. Only do this once 987 * whenever the driver is initialized so that missing counts aren't 988 * updated unnecessarily. Note that just because discovery has 989 * completed doesn't mean that events have been processed yet. The 990 * check_devices function is a callout timer that checks if ALL devices 991 * are missing. If so, it will wait a little longer for events to 992 * complete and keep resetting itself until some device in the mapping 993 * table is not missing, meaning that event processing has started. 994 */ 995 if (sc->track_mapping_events) { 996 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has " 997 "completed. Check for missing devices in the mapping " 998 "table.\n"); 999 callout_reset(&sc->device_check_callout, 1000 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices, 1001 sc); 1002 } 1003 } 1004 1005 static void 1006 mprsas_action(struct cam_sim *sim, union ccb *ccb) 1007 { 1008 struct mprsas_softc *sassc; 1009 1010 sassc = cam_sim_softc(sim); 1011 1012 MPR_FUNCTRACE(sassc->sc); 1013 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n", 1014 ccb->ccb_h.func_code); 1015 KKASSERT(lockowned(&sassc->sc->mpr_lock)); 1016 1017 switch (ccb->ccb_h.func_code) { 1018 case XPT_PATH_INQ: 1019 { 1020 struct ccb_pathinq *cpi = &ccb->cpi; 1021 struct mpr_softc *sc = sassc->sc; 1022 1023 cpi->version_num = 1; 1024 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 1025 cpi->target_sprt = 0; 1026 #if 0 /* (__FreeBSD_version >= 1000039) || \ 1027 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) */ 1028 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 1029 #elif defined(__DragonFly__) 1030 cpi->hba_misc = PIM_NOBUSRESET; 1031 #else 1032 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 1033 #endif 1034 cpi->hba_eng_cnt = 0; 1035 cpi->max_target = sassc->maxtargets - 1; 1036 cpi->max_lun = 255; 1037 1038 /* 1039 * initiator_id is set here to an ID outside the set of valid 1040 * target IDs (including volumes). 1041 */ 1042 cpi->initiator_id = sassc->maxtargets; 1043 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1044 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN); 1045 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1046 cpi->unit_number = cam_sim_unit(sim); 1047 cpi->bus_id = cam_sim_bus(sim); 1048 /* 1049 * XXXSLM-I think this needs to change based on config page or 1050 * something instead of hardcoded to 150000. 1051 */ 1052 cpi->base_transfer_speed = 150000; 1053 cpi->transport = XPORT_SAS; 1054 cpi->transport_version = 0; 1055 cpi->protocol = PROTO_SCSI; 1056 cpi->protocol_version = SCSI_REV_SPC; 1057 cpi->maxio = sc->maxio; 1058 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1059 break; 1060 } 1061 case XPT_GET_TRAN_SETTINGS: 1062 { 1063 struct ccb_trans_settings *cts; 1064 struct ccb_trans_settings_sas *sas; 1065 struct ccb_trans_settings_scsi *scsi; 1066 struct mprsas_target *targ; 1067 1068 cts = &ccb->cts; 1069 sas = &cts->xport_specific.sas; 1070 scsi = &cts->proto_specific.scsi; 1071 1072 KASSERT(cts->ccb_h.target_id < sassc->maxtargets, 1073 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", 1074 cts->ccb_h.target_id)); 1075 targ = &sassc->targets[cts->ccb_h.target_id]; 1076 if (targ->handle == 0x0) { 1077 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1078 break; 1079 } 1080 1081 cts->protocol_version = SCSI_REV_SPC2; 1082 cts->transport = XPORT_SAS; 1083 cts->transport_version = 0; 1084 1085 sas->valid = CTS_SAS_VALID_SPEED; 1086 switch (targ->linkrate) { 1087 case 0x08: 1088 sas->bitrate = 150000; 1089 break; 1090 case 0x09: 1091 sas->bitrate = 300000; 1092 break; 1093 case 0x0a: 1094 sas->bitrate = 600000; 1095 break; 1096 case 0x0b: 1097 sas->bitrate = 1200000; 1098 break; 1099 default: 1100 sas->valid = 0; 1101 } 1102 1103 cts->protocol = PROTO_SCSI; 1104 scsi->valid = CTS_SCSI_VALID_TQ; 1105 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1106 1107 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1108 break; 1109 } 1110 case XPT_CALC_GEOMETRY: 1111 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1112 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1113 break; 1114 case XPT_RESET_DEV: 1115 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action " 1116 "XPT_RESET_DEV\n"); 1117 mprsas_action_resetdev(sassc, ccb); 1118 return; 1119 case XPT_RESET_BUS: 1120 case XPT_ABORT: 1121 case XPT_TERM_IO: 1122 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success " 1123 "for abort or reset\n"); 1124 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1125 break; 1126 case XPT_SCSI_IO: 1127 mprsas_action_scsiio(sassc, ccb); 1128 return; 1129 #if 0 /* __FreeBSD_version >= 900026 */ 1130 case XPT_SMP_IO: 1131 mprsas_action_smpio(sassc, ccb); 1132 return; 1133 #endif 1134 default: 1135 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); 1136 break; 1137 } 1138 xpt_done(ccb); 1139 1140 } 1141 1142 static void 1143 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code, 1144 target_id_t target_id, lun_id_t lun_id) 1145 { 1146 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1147 struct cam_path *path; 1148 1149 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__, 1150 ac_code, target_id, (uintmax_t)lun_id); 1151 1152 if (xpt_create_path(&path, NULL, 1153 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1154 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset " 1155 "notification\n"); 1156 return; 1157 } 1158 1159 xpt_async(ac_code, path, NULL); 1160 xpt_free_path(path); 1161 } 1162 1163 static void 1164 mprsas_complete_all_commands(struct mpr_softc *sc) 1165 { 1166 struct mpr_command *cm; 1167 int i; 1168 int completed; 1169 1170 MPR_FUNCTRACE(sc); 1171 KKASSERT(lockowned(&sc->mpr_lock)); 1172 1173 /* complete all commands with a NULL reply */ 1174 for (i = 1; i < sc->num_reqs; i++) { 1175 cm = &sc->commands[i]; 1176 if (cm->cm_state == MPR_CM_STATE_FREE) 1177 continue; 1178 1179 cm->cm_state = MPR_CM_STATE_BUSY; 1180 cm->cm_reply = NULL; 1181 completed = 0; 1182 1183 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 1184 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 1185 1186 if (cm->cm_complete != NULL) { 1187 mprsas_log_command(cm, MPR_RECOVERY, 1188 "completing cm %p state %x ccb %p for diag reset\n", 1189 cm, cm->cm_state, cm->cm_ccb); 1190 cm->cm_complete(sc, cm); 1191 completed = 1; 1192 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 1193 mprsas_log_command(cm, MPR_RECOVERY, 1194 "waking up cm %p state %x ccb %p for diag reset\n", 1195 cm, cm->cm_state, cm->cm_ccb); 1196 wakeup(cm); 1197 completed = 1; 1198 } 1199 1200 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) { 1201 /* this should never happen, but if it does, log */ 1202 mprsas_log_command(cm, MPR_RECOVERY, 1203 "cm %p state %x flags 0x%x ccb %p during diag " 1204 "reset\n", cm, cm->cm_state, cm->cm_flags, 1205 cm->cm_ccb); 1206 } 1207 } 1208 1209 sc->io_cmds_active = 0; 1210 } 1211 1212 void 1213 mprsas_handle_reinit(struct mpr_softc *sc) 1214 { 1215 int i; 1216 1217 /* Go back into startup mode and freeze the simq, so that CAM 1218 * doesn't send any commands until after we've rediscovered all 1219 * targets and found the proper device handles for them. 1220 * 1221 * After the reset, portenable will trigger discovery, and after all 1222 * discovery-related activities have finished, the simq will be 1223 * released. 1224 */ 1225 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__); 1226 sc->sassc->flags |= MPRSAS_IN_STARTUP; 1227 sc->sassc->flags |= MPRSAS_IN_DISCOVERY; 1228 mprsas_startup_increment(sc->sassc); 1229 1230 /* notify CAM of a bus reset */ 1231 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1232 CAM_LUN_WILDCARD); 1233 1234 /* complete and cleanup after all outstanding commands */ 1235 mprsas_complete_all_commands(sc); 1236 1237 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n", 1238 __func__, sc->sassc->startup_refcount); 1239 1240 /* zero all the target handles, since they may change after the 1241 * reset, and we have to rediscover all the targets and use the new 1242 * handles. 1243 */ 1244 for (i = 0; i < sc->sassc->maxtargets; i++) { 1245 if (sc->sassc->targets[i].outstanding != 0) 1246 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", 1247 i, sc->sassc->targets[i].outstanding); 1248 sc->sassc->targets[i].handle = 0x0; 1249 sc->sassc->targets[i].exp_dev_handle = 0x0; 1250 sc->sassc->targets[i].outstanding = 0; 1251 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET; 1252 } 1253 } 1254 static void 1255 mprsas_tm_timeout(void *data) 1256 { 1257 struct mpr_command *tm = data; 1258 struct mpr_softc *sc = tm->cm_sc; 1259 1260 KKASSERT(lockowned(&sc->mpr_lock)); 1261 1262 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed " 1263 "out\n", tm); 1264 1265 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE, 1266 ("command not inqueue\n")); 1267 1268 tm->cm_state = MPR_CM_STATE_BUSY; 1269 mpr_reinit(sc); 1270 } 1271 1272 static void 1273 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) 1274 { 1275 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1276 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1277 unsigned int cm_count = 0; 1278 struct mpr_command *cm; 1279 struct mprsas_target *targ; 1280 1281 callout_stop(&tm->cm_callout); 1282 1283 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1284 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1285 targ = tm->cm_targ; 1286 1287 /* 1288 * Currently there should be no way we can hit this case. It only 1289 * happens when we have a failure to allocate chain frames, and 1290 * task management commands don't have S/G lists. 1291 */ 1292 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1293 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR, 1294 "%s: cm_flags = %#x for LUN reset! " 1295 "This should not happen!\n", __func__, tm->cm_flags); 1296 mprsas_free_tm(sc, tm); 1297 return; 1298 } 1299 1300 if (reply == NULL) { 1301 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n", 1302 tm); 1303 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1304 /* this completion was due to a reset, just cleanup */ 1305 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " 1306 "reset, ignoring NULL LUN reset reply\n"); 1307 targ->tm = NULL; 1308 mprsas_free_tm(sc, tm); 1309 } 1310 else { 1311 /* we should have gotten a reply. */ 1312 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " 1313 "LUN reset attempt, resetting controller\n"); 1314 mpr_reinit(sc); 1315 } 1316 return; 1317 } 1318 1319 mpr_dprint(sc, MPR_RECOVERY, 1320 "logical unit reset status 0x%x code 0x%x count %u\n", 1321 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1322 le32toh(reply->TerminationCount)); 1323 1324 /* 1325 * See if there are any outstanding commands for this LUN. 1326 * This could be made more efficient by using a per-LU data 1327 * structure of some sort. 1328 */ 1329 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1330 if (cm->cm_lun == tm->cm_lun) 1331 cm_count++; 1332 } 1333 1334 if (cm_count == 0) { 1335 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1336 "Finished recovery after LUN reset for target %u\n", 1337 targ->tid); 1338 1339 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid, 1340 tm->cm_lun); 1341 1342 /* 1343 * We've finished recovery for this logical unit. check and 1344 * see if some other logical unit has a timedout command 1345 * that needs to be processed. 1346 */ 1347 cm = TAILQ_FIRST(&targ->timedout_commands); 1348 if (cm) { 1349 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1350 "More commands to abort for target %u\n", targ->tid); 1351 mprsas_send_abort(sc, tm, cm); 1352 } else { 1353 targ->tm = NULL; 1354 mprsas_free_tm(sc, tm); 1355 } 1356 } else { 1357 /* if we still have commands for this LUN, the reset 1358 * effectively failed, regardless of the status reported. 1359 * Escalate to a target reset. 1360 */ 1361 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1362 "logical unit reset complete for target %u, but still " 1363 "have %u command(s), sending target reset\n", targ->tid, 1364 cm_count); 1365 mprsas_send_reset(sc, tm, 1366 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1367 } 1368 } 1369 1370 static void 1371 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) 1372 { 1373 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1374 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1375 struct mprsas_target *targ; 1376 1377 callout_stop(&tm->cm_callout); 1378 1379 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1380 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1381 targ = tm->cm_targ; 1382 1383 /* 1384 * Currently there should be no way we can hit this case. It only 1385 * happens when we have a failure to allocate chain frames, and 1386 * task management commands don't have S/G lists. 1387 */ 1388 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1389 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target " 1390 "reset! This should not happen!\n", __func__, tm->cm_flags); 1391 mprsas_free_tm(sc, tm); 1392 return; 1393 } 1394 1395 if (reply == NULL) { 1396 mpr_dprint(sc, MPR_RECOVERY, 1397 "NULL target reset reply for tm %p TaskMID %u\n", 1398 tm, le16toh(req->TaskMID)); 1399 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1400 /* this completion was due to a reset, just cleanup */ 1401 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " 1402 "reset, ignoring NULL target reset reply\n"); 1403 targ->tm = NULL; 1404 mprsas_free_tm(sc, tm); 1405 } 1406 else { 1407 /* we should have gotten a reply. */ 1408 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " 1409 "target reset attempt, resetting controller\n"); 1410 mpr_reinit(sc); 1411 } 1412 return; 1413 } 1414 1415 mpr_dprint(sc, MPR_RECOVERY, 1416 "target reset status 0x%x code 0x%x count %u\n", 1417 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1418 le32toh(reply->TerminationCount)); 1419 1420 if (targ->outstanding == 0) { 1421 /* 1422 * We've finished recovery for this target and all 1423 * of its logical units. 1424 */ 1425 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1426 "Finished reset recovery for target %u\n", targ->tid); 1427 1428 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1429 CAM_LUN_WILDCARD); 1430 1431 targ->tm = NULL; 1432 mprsas_free_tm(sc, tm); 1433 } else { 1434 /* 1435 * After a target reset, if this target still has 1436 * outstanding commands, the reset effectively failed, 1437 * regardless of the status reported. escalate. 1438 */ 1439 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1440 "Target reset complete for target %u, but still have %u " 1441 "command(s), resetting controller\n", targ->tid, 1442 targ->outstanding); 1443 mpr_reinit(sc); 1444 } 1445 } 1446 1447 #define MPR_RESET_TIMEOUT 30 1448 1449 int 1450 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type) 1451 { 1452 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1453 struct mprsas_target *target; 1454 int err; 1455 1456 target = tm->cm_targ; 1457 if (target->handle == 0) { 1458 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id " 1459 "%d\n", __func__, target->tid); 1460 return -1; 1461 } 1462 1463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1464 req->DevHandle = htole16(target->handle); 1465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1466 req->TaskType = type; 1467 1468 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1469 /* XXX Need to handle invalid LUNs */ 1470 MPR_SET_LUN(req->LUN, tm->cm_lun); 1471 tm->cm_targ->logical_unit_resets++; 1472 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1473 "Sending logical unit reset to target %u lun %d\n", 1474 target->tid, tm->cm_lun); 1475 tm->cm_complete = mprsas_logical_unit_reset_complete; 1476 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun); 1477 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1478 /* 1479 * Target reset method = 1480 * SAS Hard Link Reset / SATA Link Reset 1481 */ 1482 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1483 tm->cm_targ->target_resets++; 1484 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1485 "Sending target reset to target %u\n", target->tid); 1486 tm->cm_complete = mprsas_target_reset_complete; 1487 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); 1488 } 1489 else { 1490 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type); 1491 return -1; 1492 } 1493 1494 if (target->encl_level_valid) { 1495 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1496 "At enclosure level %d, slot %d, connector name (%4s)\n", 1497 target->encl_level, target->encl_slot, 1498 target->connector_name); 1499 } 1500 1501 tm->cm_data = NULL; 1502 tm->cm_desc.HighPriority.RequestFlags = 1503 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1504 tm->cm_complete_data = (void *)tm; 1505 1506 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz, 1507 mprsas_tm_timeout, tm); 1508 1509 err = mpr_map_command(sc, tm); 1510 if (err) 1511 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1512 "error %d sending reset type %u\n", err, type); 1513 1514 return err; 1515 } 1516 1517 1518 static void 1519 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm) 1520 { 1521 struct mpr_command *cm; 1522 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1523 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1524 struct mprsas_target *targ; 1525 1526 callout_stop(&tm->cm_callout); 1527 1528 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1529 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1530 targ = tm->cm_targ; 1531 1532 /* 1533 * Currently there should be no way we can hit this case. It only 1534 * happens when we have a failure to allocate chain frames, and 1535 * task management commands don't have S/G lists. 1536 */ 1537 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1538 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR, 1539 "cm_flags = %#x for abort %p TaskMID %u!\n", 1540 tm->cm_flags, tm, le16toh(req->TaskMID)); 1541 mprsas_free_tm(sc, tm); 1542 return; 1543 } 1544 1545 if (reply == NULL) { 1546 mpr_dprint(sc, MPR_RECOVERY, 1547 "NULL abort reply for tm %p TaskMID %u\n", 1548 tm, le16toh(req->TaskMID)); 1549 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1550 /* this completion was due to a reset, just cleanup */ 1551 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " 1552 "reset, ignoring NULL abort reply\n"); 1553 targ->tm = NULL; 1554 mprsas_free_tm(sc, tm); 1555 } else { 1556 /* we should have gotten a reply. */ 1557 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " 1558 "abort attempt, resetting controller\n"); 1559 mpr_reinit(sc); 1560 } 1561 return; 1562 } 1563 1564 mpr_dprint(sc, MPR_RECOVERY, 1565 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1566 le16toh(req->TaskMID), 1567 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1568 le32toh(reply->TerminationCount)); 1569 1570 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1571 if (cm == NULL) { 1572 /* 1573 * if there are no more timedout commands, we're done with 1574 * error recovery for this target. 1575 */ 1576 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1577 "Finished abort recovery for target %u\n", targ->tid); 1578 targ->tm = NULL; 1579 mprsas_free_tm(sc, tm); 1580 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1581 /* abort success, but we have more timedout commands to abort */ 1582 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1583 "Continuing abort recovery for target %u\n", targ->tid); 1584 mprsas_send_abort(sc, tm, cm); 1585 } else { 1586 /* 1587 * we didn't get a command completion, so the abort 1588 * failed as far as we're concerned. escalate. 1589 */ 1590 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1591 "Abort failed for target %u, sending logical unit reset\n", 1592 targ->tid); 1593 1594 mprsas_send_reset(sc, tm, 1595 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1596 } 1597 } 1598 1599 #define MPR_ABORT_TIMEOUT 5 1600 1601 static int 1602 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 1603 struct mpr_command *cm) 1604 { 1605 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1606 struct mprsas_target *targ; 1607 int err; 1608 1609 targ = cm->cm_targ; 1610 if (targ->handle == 0) { 1611 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1612 "%s null devhandle for target_id %d\n", 1613 __func__, cm->cm_ccb->ccb_h.target_id); 1614 return -1; 1615 } 1616 1617 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO, 1618 "Aborting command %p\n", cm); 1619 1620 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1621 req->DevHandle = htole16(targ->handle); 1622 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1623 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1624 1625 /* XXX Need to handle invalid LUNs */ 1626 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1627 1628 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1629 1630 tm->cm_data = NULL; 1631 tm->cm_desc.HighPriority.RequestFlags = 1632 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1633 tm->cm_complete = mprsas_abort_complete; 1634 tm->cm_complete_data = (void *)tm; 1635 tm->cm_targ = cm->cm_targ; 1636 tm->cm_lun = cm->cm_lun; 1637 1638 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz, 1639 mprsas_tm_timeout, tm); 1640 1641 targ->aborts++; 1642 1643 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun); 1644 1645 err = mpr_map_command(sc, tm); 1646 if (err) 1647 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1648 "error %d sending abort for cm %p SMID %u\n", 1649 err, cm, req->TaskMID); 1650 return err; 1651 } 1652 1653 static void 1654 mprsas_scsiio_timeout(void *data) 1655 { 1656 #if 0 /* XXX swildner: sbintime */ 1657 sbintime_t elapsed, now; 1658 #endif 1659 union ccb *ccb; 1660 struct mpr_softc *sc; 1661 struct mpr_command *cm; 1662 struct mprsas_target *targ; 1663 1664 cm = (struct mpr_command *)data; 1665 sc = cm->cm_sc; 1666 ccb = cm->cm_ccb; 1667 #if 0 /* XXX swildner: sbintime */ 1668 now = sbinuptime(); 1669 #endif 1670 1671 MPR_FUNCTRACE(sc); 1672 KKASSERT(lockowned(&sc->mpr_lock)); 1673 1674 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm); 1675 1676 /* 1677 * Run the interrupt handler to make sure it's not pending. This 1678 * isn't perfect because the command could have already completed 1679 * and been re-used, though this is unlikely. 1680 */ 1681 mpr_intr_locked(sc); 1682 if (cm->cm_state != MPR_CM_STATE_INQUEUE) { 1683 mprsas_log_command(cm, MPR_XINFO, 1684 "SCSI command %p almost timed out\n", cm); 1685 return; 1686 } 1687 1688 if (cm->cm_ccb == NULL) { 1689 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n"); 1690 return; 1691 } 1692 1693 targ = cm->cm_targ; 1694 targ->timeouts++; 1695 1696 #if 0 /* XXX swildner: sbintime */ 1697 elapsed = now - ccb->ccb_h.qos.sim_data; 1698 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY, 1699 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n", 1700 targ->tid, targ->handle, ccb->ccb_h.timeout, 1701 sbintime_getsec(elapsed), elapsed & 0xffffffff); 1702 #endif 1703 if (targ->encl_level_valid) { 1704 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1705 "At enclosure level %d, slot %d, connector name (%4s)\n", 1706 targ->encl_level, targ->encl_slot, targ->connector_name); 1707 } 1708 1709 /* XXX first, check the firmware state, to see if it's still 1710 * operational. if not, do a diag reset. 1711 */ 1712 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); 1713 cm->cm_state = MPR_CM_STATE_TIMEDOUT; 1714 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1715 1716 if (targ->tm != NULL) { 1717 /* target already in recovery, just queue up another 1718 * timedout command to be processed later. 1719 */ 1720 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for " 1721 "processing by tm %p\n", cm, targ->tm); 1722 } 1723 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { 1724 1725 /* start recovery by aborting the first timedout command */ 1726 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1727 "Sending abort to target %u for SMID %d\n", targ->tid, 1728 cm->cm_desc.Default.SMID); 1729 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n", 1730 cm, targ->tm); 1731 mprsas_send_abort(sc, targ->tm, cm); 1732 } 1733 else { 1734 /* XXX queue this target up for recovery once a TM becomes 1735 * available. The firmware only has a limited number of 1736 * HighPriority credits for the high priority requests used 1737 * for task management, and we ran out. 1738 * 1739 * Isilon: don't worry about this for now, since we have 1740 * more credits than disks in an enclosure, and limit 1741 * ourselves to one TM per target for recovery. 1742 */ 1743 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1744 "timedout cm %p failed to allocate a tm\n", cm); 1745 } 1746 } 1747 1748 #if 0 /* XXX swildner: NVMe support */ 1749 /** 1750 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent 1751 * to SCSI Unmap. 1752 * Return 0 - for success, 1753 * 1 - to immediately return back the command with success status to CAM 1754 * negative value - to fallback to firmware path i.e. issue scsi unmap 1755 * to FW without any translation. 1756 */ 1757 static int 1758 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm, 1759 union ccb *ccb, struct mprsas_target *targ) 1760 { 1761 Mpi26NVMeEncapsulatedRequest_t *req = NULL; 1762 struct ccb_scsiio *csio; 1763 struct unmap_parm_list *plist; 1764 struct nvme_dsm_range *nvme_dsm_ranges = NULL; 1765 struct nvme_command *c; 1766 int i, res; 1767 uint16_t ndesc, list_len, data_length; 1768 struct mpr_prp_page *prp_page_info; 1769 uint64_t nvme_dsm_ranges_dma_handle; 1770 1771 csio = &ccb->csio; 1772 #if 0 /* __FreeBSD_version >= 1100103 */ 1773 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]); 1774 #else 1775 if (csio->ccb_h.flags & CAM_CDB_POINTER) { 1776 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 | 1777 ccb->csio.cdb_io.cdb_ptr[8]); 1778 } else { 1779 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 | 1780 ccb->csio.cdb_io.cdb_bytes[8]); 1781 } 1782 #endif 1783 if (!list_len) { 1784 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n"); 1785 return -EINVAL; 1786 } 1787 1788 plist = kmalloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT); 1789 if (!plist) { 1790 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to " 1791 "save UNMAP data\n"); 1792 return -ENOMEM; 1793 } 1794 1795 /* Copy SCSI unmap data to a local buffer */ 1796 bcopy(csio->data_ptr, plist, csio->dxfer_len); 1797 1798 /* return back the unmap command to CAM with success status, 1799 * if number of descripts is zero. 1800 */ 1801 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4; 1802 if (!ndesc) { 1803 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in " 1804 "UNMAP cmd is Zero\n"); 1805 res = 1; 1806 goto out; 1807 } 1808 1809 data_length = ndesc * sizeof(struct nvme_dsm_range); 1810 if (data_length > targ->MDTS) { 1811 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than " 1812 "Device's MDTS: %d\n", data_length, targ->MDTS); 1813 res = -EINVAL; 1814 goto out; 1815 } 1816 1817 prp_page_info = mpr_alloc_prp_page(sc); 1818 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for " 1819 "UNMAP command.\n", __func__)); 1820 1821 /* 1822 * Insert the allocated PRP page into the command's PRP page list. This 1823 * will be freed when the command is freed. 1824 */ 1825 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 1826 1827 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page; 1828 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr; 1829 1830 bzero(nvme_dsm_ranges, data_length); 1831 1832 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data 1833 * for each descriptors contained in SCSI UNMAP data. 1834 */ 1835 for (i = 0; i < ndesc; i++) { 1836 nvme_dsm_ranges[i].length = 1837 htole32(be32toh(plist->desc[i].nlb)); 1838 nvme_dsm_ranges[i].starting_lba = 1839 htole64(be64toh(plist->desc[i].slba)); 1840 nvme_dsm_ranges[i].attributes = 0; 1841 } 1842 1843 /* Build MPI2.6's NVMe Encapsulated Request Message */ 1844 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req; 1845 bzero(req, sizeof(*req)); 1846 req->DevHandle = htole16(targ->handle); 1847 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED; 1848 req->Flags = MPI26_NVME_FLAGS_WRITE; 1849 req->ErrorResponseBaseAddress.High = 1850 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32)); 1851 req->ErrorResponseBaseAddress.Low = 1852 htole32(cm->cm_sense_busaddr); 1853 req->ErrorResponseAllocationLength = 1854 htole16(sizeof(struct nvme_completion)); 1855 req->EncapsulatedCommandLength = 1856 htole16(sizeof(struct nvme_command)); 1857 req->DataLength = htole32(data_length); 1858 1859 /* Build NVMe DSM command */ 1860 c = (struct nvme_command *) req->NVMe_Command; 1861 c->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DATASET_MANAGEMENT); 1862 c->nsid = htole32(csio->ccb_h.target_lun + 1); 1863 c->cdw10 = htole32(ndesc - 1); 1864 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE); 1865 1866 cm->cm_length = data_length; 1867 cm->cm_data = NULL; 1868 1869 cm->cm_complete = mprsas_scsiio_complete; 1870 cm->cm_complete_data = ccb; 1871 cm->cm_targ = targ; 1872 cm->cm_lun = csio->ccb_h.target_lun; 1873 cm->cm_ccb = ccb; 1874 1875 cm->cm_desc.Default.RequestFlags = 1876 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 1877 1878 csio->ccb_h.qos.sim_data = sbinuptime(); 1879 #if 0 /* __FreeBSD_version >= 1000029 */ 1880 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, 1881 mprsas_scsiio_timeout, cm, 0); 1882 #else //__FreeBSD_version < 1000029 1883 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, 1884 mprsas_scsiio_timeout, cm); 1885 #endif //__FreeBSD_version >= 1000029 1886 1887 targ->issued++; 1888 targ->outstanding++; 1889 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1890 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1891 1892 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", 1893 __func__, cm, ccb, targ->outstanding); 1894 1895 mpr_build_nvme_prp(sc, cm, req, 1896 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length); 1897 mpr_map_command(sc, cm); 1898 1899 out: 1900 kfree(plist, M_MPR); 1901 return 0; 1902 } 1903 #endif 1904 1905 static void 1906 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) 1907 { 1908 MPI2_SCSI_IO_REQUEST *req; 1909 struct ccb_scsiio *csio; 1910 struct mpr_softc *sc; 1911 struct mprsas_target *targ; 1912 struct mprsas_lun *lun; 1913 struct mpr_command *cm; 1914 uint8_t i, lba_byte, *ref_tag_addr; 1915 #if 0 /* XXX swildner: NVMe support */ 1916 uint8_t scsi_opcode; 1917 #endif 1918 uint16_t eedp_flags; 1919 uint32_t mpi_control; 1920 #if 0 /* XXX swildner: NVMe support */ 1921 int rc; 1922 #endif 1923 1924 sc = sassc->sc; 1925 MPR_FUNCTRACE(sc); 1926 KKASSERT(lockowned(&sc->mpr_lock)); 1927 1928 csio = &ccb->csio; 1929 KASSERT(csio->ccb_h.target_id < sassc->maxtargets, 1930 ("Target %d out of bounds in XPT_SCSI_IO\n", 1931 csio->ccb_h.target_id)); 1932 targ = &sassc->targets[csio->ccb_h.target_id]; 1933 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1934 if (targ->handle == 0x0) { 1935 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", 1936 __func__, csio->ccb_h.target_id); 1937 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1938 xpt_done(ccb); 1939 return; 1940 } 1941 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) { 1942 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO " 1943 "supported %u\n", __func__, csio->ccb_h.target_id); 1944 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1945 xpt_done(ccb); 1946 return; 1947 } 1948 /* 1949 * Sometimes, it is possible to get a command that is not "In 1950 * Progress" and was actually aborted by the upper layer. Check for 1951 * this here and complete the command without error. 1952 */ 1953 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { 1954 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for " 1955 "target %u\n", __func__, csio->ccb_h.target_id); 1956 xpt_done(ccb); 1957 return; 1958 } 1959 /* 1960 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1961 * that the volume has timed out. We want volumes to be enumerated 1962 * until they are deleted/removed, not just failed. 1963 */ 1964 if (targ->flags & MPRSAS_TARGET_INREMOVAL) { 1965 if (targ->devinfo == 0) 1966 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1967 else 1968 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 1969 xpt_done(ccb); 1970 return; 1971 } 1972 1973 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) { 1974 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__); 1975 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1976 xpt_done(ccb); 1977 return; 1978 } 1979 1980 /* 1981 * If target has a reset in progress, freeze the devq and return. The 1982 * devq will be released when the TM reset is finished. 1983 */ 1984 if (targ->flags & MPRSAS_TARGET_INRESET) { 1985 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; 1986 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n", 1987 __func__, targ->tid); 1988 xpt_freeze_devq(ccb->ccb_h.path, 1); 1989 xpt_done(ccb); 1990 return; 1991 } 1992 1993 cm = mpr_alloc_command(sc); 1994 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) { 1995 if (cm != NULL) { 1996 mpr_free_command(sc, cm); 1997 } 1998 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 1999 xpt_freeze_simq(sassc->sim, 1); 2000 sassc->flags |= MPRSAS_QUEUE_FROZEN; 2001 } 2002 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2003 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2004 xpt_done(ccb); 2005 return; 2006 } 2007 2008 #if 0 /* XXX swildner: NVMe support */ 2009 /* For NVME device's issue UNMAP command directly to NVME drives by 2010 * constructing equivalent native NVMe DataSetManagement command. 2011 */ 2012 #if 0 /* __FreeBSD_version >= 1100103 */ 2013 scsi_opcode = scsiio_cdb_ptr(csio)[0]; 2014 #else 2015 if (csio->ccb_h.flags & CAM_CDB_POINTER) 2016 scsi_opcode = csio->cdb_io.cdb_ptr[0]; 2017 else 2018 scsi_opcode = csio->cdb_io.cdb_bytes[0]; 2019 #endif 2020 if (scsi_opcode == UNMAP && 2021 targ->is_nvme && 2022 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { 2023 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ); 2024 if (rc == 1) { /* return command to CAM with success status */ 2025 mpr_free_command(sc, cm); 2026 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2027 xpt_done(ccb); 2028 return; 2029 } else if (!rc) /* Issued NVMe Encapsulated Request Message */ 2030 return; 2031 } 2032 #endif 2033 2034 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 2035 bzero(req, sizeof(*req)); 2036 req->DevHandle = htole16(targ->handle); 2037 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2038 req->MsgFlags = 0; 2039 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 2040 req->SenseBufferLength = MPR_SENSE_LEN; 2041 req->SGLFlags = 0; 2042 req->ChainOffset = 0; 2043 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 2044 req->SGLOffset1= 0; 2045 req->SGLOffset2= 0; 2046 req->SGLOffset3= 0; 2047 req->SkipCount = 0; 2048 req->DataLength = htole32(csio->dxfer_len); 2049 req->BidirectionalDataLength = 0; 2050 req->IoFlags = htole16(csio->cdb_len); 2051 req->EEDPFlags = 0; 2052 2053 /* Note: BiDirectional transfers are not supported */ 2054 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 2055 case CAM_DIR_IN: 2056 mpi_control = MPI2_SCSIIO_CONTROL_READ; 2057 cm->cm_flags |= MPR_CM_FLAGS_DATAIN; 2058 break; 2059 case CAM_DIR_OUT: 2060 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 2061 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; 2062 break; 2063 case CAM_DIR_NONE: 2064 default: 2065 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 2066 break; 2067 } 2068 2069 if (csio->cdb_len == 32) 2070 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 2071 /* 2072 * It looks like the hardware doesn't require an explicit tag 2073 * number for each transaction. SAM Task Management not supported 2074 * at the moment. 2075 */ 2076 switch (csio->tag_action) { 2077 case MSG_HEAD_OF_Q_TAG: 2078 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 2079 break; 2080 case MSG_ORDERED_Q_TAG: 2081 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 2082 break; 2083 case MSG_ACA_TASK: 2084 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 2085 break; 2086 case CAM_TAG_ACTION_NONE: 2087 case MSG_SIMPLE_Q_TAG: 2088 default: 2089 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 2090 break; 2091 } 2092 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 2093 req->Control = htole32(mpi_control); 2094 2095 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 2096 mpr_free_command(sc, cm); 2097 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID); 2098 xpt_done(ccb); 2099 return; 2100 } 2101 2102 if (csio->ccb_h.flags & CAM_CDB_POINTER) 2103 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 2104 else { 2105 KASSERT(csio->cdb_len <= IOCDBLEN, 2106 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER " 2107 "is not set", csio->cdb_len)); 2108 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 2109 } 2110 req->IoFlags = htole16(csio->cdb_len); 2111 2112 /* 2113 * Check if EEDP is supported and enabled. If it is then check if the 2114 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 2115 * is formatted for EEDP support. If all of this is true, set CDB up 2116 * for EEDP transfer. 2117 */ 2118 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 2119 if (sc->eedp_enabled && eedp_flags) { 2120 SLIST_FOREACH(lun, &targ->luns, lun_link) { 2121 if (lun->lun_id == csio->ccb_h.target_lun) { 2122 break; 2123 } 2124 } 2125 2126 if ((lun != NULL) && (lun->eedp_formatted)) { 2127 req->EEDPBlockSize = htole16(lun->eedp_block_size); 2128 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2129 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 2130 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 2131 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) { 2132 eedp_flags |= 2133 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 2134 } 2135 req->EEDPFlags = htole16(eedp_flags); 2136 2137 /* 2138 * If CDB less than 32, fill in Primary Ref Tag with 2139 * low 4 bytes of LBA. If CDB is 32, tag stuff is 2140 * already there. Also, set protection bit. FreeBSD 2141 * currently does not support CDBs bigger than 16, but 2142 * the code doesn't hurt, and will be here for the 2143 * future. 2144 */ 2145 if (csio->cdb_len != 32) { 2146 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 2147 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 2148 PrimaryReferenceTag; 2149 for (i = 0; i < 4; i++) { 2150 *ref_tag_addr = 2151 req->CDB.CDB32[lba_byte + i]; 2152 ref_tag_addr++; 2153 } 2154 req->CDB.EEDP32.PrimaryReferenceTag = 2155 htole32(req-> 2156 CDB.EEDP32.PrimaryReferenceTag); 2157 req->CDB.EEDP32.PrimaryApplicationTagMask = 2158 0xFFFF; 2159 req->CDB.CDB32[1] = 2160 (req->CDB.CDB32[1] & 0x1F) | 0x20; 2161 } else { 2162 eedp_flags |= 2163 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 2164 req->EEDPFlags = htole16(eedp_flags); 2165 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 2166 0x1F) | 0x20; 2167 } 2168 } 2169 } 2170 2171 cm->cm_length = csio->dxfer_len; 2172 if (cm->cm_length != 0) { 2173 cm->cm_data = ccb; 2174 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB; 2175 } else { 2176 cm->cm_data = NULL; 2177 } 2178 cm->cm_sge = &req->SGL; 2179 cm->cm_sglsize = (32 - 24) * 4; 2180 cm->cm_complete = mprsas_scsiio_complete; 2181 cm->cm_complete_data = ccb; 2182 cm->cm_targ = targ; 2183 cm->cm_lun = csio->ccb_h.target_lun; 2184 cm->cm_ccb = ccb; 2185 /* 2186 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0) 2187 * and set descriptor type. 2188 */ 2189 if (targ->scsi_req_desc_type == 2190 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { 2191 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; 2192 cm->cm_desc.FastPathSCSIIO.RequestFlags = 2193 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 2194 if (!sc->atomic_desc_capable) { 2195 cm->cm_desc.FastPathSCSIIO.DevHandle = 2196 htole16(targ->handle); 2197 } 2198 } else { 2199 cm->cm_desc.SCSIIO.RequestFlags = 2200 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 2201 if (!sc->atomic_desc_capable) 2202 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 2203 } 2204 2205 #if 0 /* XXX swildner sbintime */ 2206 csio->ccb_h.qos.sim_data = sbinuptime(); 2207 #endif 2208 #if 0 /* __FreeBSD_version >= 1000029 */ 2209 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, 2210 mprsas_scsiio_timeout, cm, 0); 2211 #else //__FreeBSD_version < 1000029 2212 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, 2213 mprsas_scsiio_timeout, cm); 2214 #endif //__FreeBSD_version >= 1000029 2215 2216 targ->issued++; 2217 targ->outstanding++; 2218 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 2219 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2220 2221 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", 2222 __func__, cm, ccb, targ->outstanding); 2223 2224 mpr_map_command(sc, cm); 2225 return; 2226 } 2227 2228 /** 2229 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request 2230 */ 2231 static void 2232 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio, 2233 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ) 2234 { 2235 u32 response_info; 2236 u8 *response_bytes; 2237 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 2238 MPI2_IOCSTATUS_MASK; 2239 u8 scsi_state = mpi_reply->SCSIState; 2240 u8 scsi_status = mpi_reply->SCSIStatus; 2241 char *desc_ioc_state = NULL; 2242 char *desc_scsi_status = NULL; 2243 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 2244 2245 if (log_info == 0x31170000) 2246 return; 2247 2248 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string, 2249 ioc_status); 2250 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string, 2251 scsi_status); 2252 2253 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2254 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2255 if (targ->encl_level_valid) { 2256 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 2257 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 2258 targ->connector_name); 2259 } 2260 2261 /* 2262 * We can add more detail about underflow data here 2263 * TO-DO 2264 */ 2265 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), " 2266 "scsi_state %pb%i\n", desc_scsi_status, scsi_status, 2267 "\20" "\1AutosenseValid" "\2AutosenseFailed" 2268 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid", 2269 scsi_state); 2270 2271 if (sc->mpr_debug & MPR_XINFO && 2272 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2273 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n"); 2274 scsi_sense_print(csio); 2275 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n"); 2276 } 2277 2278 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2279 response_info = le32toh(mpi_reply->ResponseInfo); 2280 response_bytes = (u8 *)&response_info; 2281 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n", 2282 response_bytes[0], 2283 mpr_describe_table(mpr_scsi_taskmgmt_string, 2284 response_bytes[0])); 2285 } 2286 } 2287 2288 #if 0 /* XXX swildner: NVMe support */ 2289 /** mprsas_nvme_trans_status_code 2290 * 2291 * Convert Native NVMe command error status to 2292 * equivalent SCSI error status. 2293 * 2294 * Returns appropriate scsi_status 2295 */ 2296 static u8 2297 mprsas_nvme_trans_status_code(uint16_t nvme_status, 2298 struct mpr_command *cm) 2299 { 2300 u8 status = MPI2_SCSI_STATUS_GOOD; 2301 int skey, asc, ascq; 2302 union ccb *ccb = cm->cm_complete_data; 2303 int returned_sense_len; 2304 uint8_t sct, sc; 2305 2306 sct = NVME_STATUS_GET_SCT(nvme_status); 2307 sc = NVME_STATUS_GET_SC(nvme_status); 2308 2309 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2310 skey = SSD_KEY_ILLEGAL_REQUEST; 2311 asc = SCSI_ASC_NO_SENSE; 2312 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2313 2314 switch (sct) { 2315 case NVME_SCT_GENERIC: 2316 switch (sc) { 2317 case NVME_SC_SUCCESS: 2318 status = MPI2_SCSI_STATUS_GOOD; 2319 skey = SSD_KEY_NO_SENSE; 2320 asc = SCSI_ASC_NO_SENSE; 2321 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2322 break; 2323 case NVME_SC_INVALID_OPCODE: 2324 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2325 skey = SSD_KEY_ILLEGAL_REQUEST; 2326 asc = SCSI_ASC_ILLEGAL_COMMAND; 2327 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2328 break; 2329 case NVME_SC_INVALID_FIELD: 2330 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2331 skey = SSD_KEY_ILLEGAL_REQUEST; 2332 asc = SCSI_ASC_INVALID_CDB; 2333 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2334 break; 2335 case NVME_SC_DATA_TRANSFER_ERROR: 2336 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2337 skey = SSD_KEY_MEDIUM_ERROR; 2338 asc = SCSI_ASC_NO_SENSE; 2339 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2340 break; 2341 case NVME_SC_ABORTED_POWER_LOSS: 2342 status = MPI2_SCSI_STATUS_TASK_ABORTED; 2343 skey = SSD_KEY_ABORTED_COMMAND; 2344 asc = SCSI_ASC_WARNING; 2345 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; 2346 break; 2347 case NVME_SC_INTERNAL_DEVICE_ERROR: 2348 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2349 skey = SSD_KEY_HARDWARE_ERROR; 2350 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; 2351 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2352 break; 2353 case NVME_SC_ABORTED_BY_REQUEST: 2354 case NVME_SC_ABORTED_SQ_DELETION: 2355 case NVME_SC_ABORTED_FAILED_FUSED: 2356 case NVME_SC_ABORTED_MISSING_FUSED: 2357 status = MPI2_SCSI_STATUS_TASK_ABORTED; 2358 skey = SSD_KEY_ABORTED_COMMAND; 2359 asc = SCSI_ASC_NO_SENSE; 2360 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2361 break; 2362 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 2363 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2364 skey = SSD_KEY_ILLEGAL_REQUEST; 2365 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 2366 ascq = SCSI_ASCQ_INVALID_LUN_ID; 2367 break; 2368 case NVME_SC_LBA_OUT_OF_RANGE: 2369 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2370 skey = SSD_KEY_ILLEGAL_REQUEST; 2371 asc = SCSI_ASC_ILLEGAL_BLOCK; 2372 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2373 break; 2374 case NVME_SC_CAPACITY_EXCEEDED: 2375 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2376 skey = SSD_KEY_MEDIUM_ERROR; 2377 asc = SCSI_ASC_NO_SENSE; 2378 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2379 break; 2380 case NVME_SC_NAMESPACE_NOT_READY: 2381 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2382 skey = SSD_KEY_NOT_READY; 2383 asc = SCSI_ASC_LUN_NOT_READY; 2384 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2385 break; 2386 } 2387 break; 2388 case NVME_SCT_COMMAND_SPECIFIC: 2389 switch (sc) { 2390 case NVME_SC_INVALID_FORMAT: 2391 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2392 skey = SSD_KEY_ILLEGAL_REQUEST; 2393 asc = SCSI_ASC_FORMAT_COMMAND_FAILED; 2394 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; 2395 break; 2396 case NVME_SC_CONFLICTING_ATTRIBUTES: 2397 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2398 skey = SSD_KEY_ILLEGAL_REQUEST; 2399 asc = SCSI_ASC_INVALID_CDB; 2400 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2401 break; 2402 } 2403 break; 2404 case NVME_SCT_MEDIA_ERROR: 2405 switch (sc) { 2406 case NVME_SC_WRITE_FAULTS: 2407 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2408 skey = SSD_KEY_MEDIUM_ERROR; 2409 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; 2410 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2411 break; 2412 case NVME_SC_UNRECOVERED_READ_ERROR: 2413 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2414 skey = SSD_KEY_MEDIUM_ERROR; 2415 asc = SCSI_ASC_UNRECOVERED_READ_ERROR; 2416 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2417 break; 2418 case NVME_SC_GUARD_CHECK_ERROR: 2419 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2420 skey = SSD_KEY_MEDIUM_ERROR; 2421 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; 2422 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; 2423 break; 2424 case NVME_SC_APPLICATION_TAG_CHECK_ERROR: 2425 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2426 skey = SSD_KEY_MEDIUM_ERROR; 2427 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; 2428 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; 2429 break; 2430 case NVME_SC_REFERENCE_TAG_CHECK_ERROR: 2431 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2432 skey = SSD_KEY_MEDIUM_ERROR; 2433 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; 2434 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; 2435 break; 2436 case NVME_SC_COMPARE_FAILURE: 2437 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2438 skey = SSD_KEY_MISCOMPARE; 2439 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; 2440 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2441 break; 2442 case NVME_SC_ACCESS_DENIED: 2443 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2444 skey = SSD_KEY_ILLEGAL_REQUEST; 2445 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 2446 ascq = SCSI_ASCQ_INVALID_LUN_ID; 2447 break; 2448 } 2449 break; 2450 } 2451 2452 returned_sense_len = sizeof(struct scsi_sense_data); 2453 if (returned_sense_len < ccb->csio.sense_len) 2454 ccb->csio.sense_resid = ccb->csio.sense_len - 2455 returned_sense_len; 2456 else 2457 ccb->csio.sense_resid = 0; 2458 2459 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED, 2460 1, skey, asc, ascq, SSD_ELEM_NONE); 2461 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2462 2463 return status; 2464 } 2465 2466 /** mprsas_complete_nvme_unmap 2467 * 2468 * Complete native NVMe command issued using NVMe Encapsulated 2469 * Request Message. 2470 */ 2471 static u8 2472 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm) 2473 { 2474 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply; 2475 struct nvme_completion *nvme_completion = NULL; 2476 u8 scsi_status = MPI2_SCSI_STATUS_GOOD; 2477 2478 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply; 2479 if (le16toh(mpi_reply->ErrorResponseCount)){ 2480 nvme_completion = (struct nvme_completion *)cm->cm_sense; 2481 scsi_status = mprsas_nvme_trans_status_code( 2482 nvme_completion->status, cm); 2483 } 2484 return scsi_status; 2485 } 2486 #endif 2487 2488 static void 2489 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2490 { 2491 MPI2_SCSI_IO_REPLY *rep; 2492 union ccb *ccb; 2493 struct ccb_scsiio *csio; 2494 struct mprsas_softc *sassc; 2495 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2496 u8 *TLR_bits, TLR_on, *scsi_cdb; 2497 int dir = 0, i; 2498 u16 alloc_len; 2499 struct mprsas_target *target; 2500 target_id_t target_id; 2501 2502 MPR_FUNCTRACE(sc); 2503 mpr_dprint(sc, MPR_TRACE, 2504 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2505 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2506 cm->cm_targ->outstanding); 2507 2508 callout_stop(&cm->cm_callout); 2509 KKASSERT(lockowned(&sc->mpr_lock)); 2510 2511 sassc = sc->sassc; 2512 ccb = cm->cm_complete_data; 2513 csio = &ccb->csio; 2514 target_id = csio->ccb_h.target_id; 2515 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2516 /* 2517 * XXX KDM if the chain allocation fails, does it matter if we do 2518 * the sync and unload here? It is simpler to do it in every case, 2519 * assuming it doesn't cause problems. 2520 */ 2521 if (cm->cm_data != NULL) { 2522 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) 2523 dir = BUS_DMASYNC_POSTREAD; 2524 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) 2525 dir = BUS_DMASYNC_POSTWRITE; 2526 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2527 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2528 } 2529 2530 cm->cm_targ->completed++; 2531 cm->cm_targ->outstanding--; 2532 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2533 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2534 2535 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) { 2536 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2537 cm->cm_state = MPR_CM_STATE_BUSY; 2538 if (cm->cm_reply != NULL) 2539 mprsas_log_command(cm, MPR_RECOVERY, 2540 "completed timedout cm %p ccb %p during recovery " 2541 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, 2542 le16toh(rep->IOCStatus), rep->SCSIStatus, 2543 rep->SCSIState, le32toh(rep->TransferCount)); 2544 else 2545 mprsas_log_command(cm, MPR_RECOVERY, 2546 "completed timedout cm %p ccb %p during recovery\n", 2547 cm, cm->cm_ccb); 2548 } else if (cm->cm_targ->tm != NULL) { 2549 if (cm->cm_reply != NULL) 2550 mprsas_log_command(cm, MPR_RECOVERY, 2551 "completed cm %p ccb %p during recovery " 2552 "ioc %x scsi %x state %x xfer %u\n", 2553 cm, cm->cm_ccb, le16toh(rep->IOCStatus), 2554 rep->SCSIStatus, rep->SCSIState, 2555 le32toh(rep->TransferCount)); 2556 else 2557 mprsas_log_command(cm, MPR_RECOVERY, 2558 "completed cm %p ccb %p during recovery\n", 2559 cm, cm->cm_ccb); 2560 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 2561 mprsas_log_command(cm, MPR_RECOVERY, 2562 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); 2563 } 2564 2565 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2566 /* 2567 * We ran into an error after we tried to map the command, 2568 * so we're getting a callback without queueing the command 2569 * to the hardware. So we set the status here, and it will 2570 * be retained below. We'll go through the "fast path", 2571 * because there can be no reply when we haven't actually 2572 * gone out to the hardware. 2573 */ 2574 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); 2575 2576 /* 2577 * Currently the only error included in the mask is 2578 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of 2579 * chain frames. We need to freeze the queue until we get 2580 * a command that completed without this error, which will 2581 * hopefully have some chain frames attached that we can 2582 * use. If we wanted to get smarter about it, we would 2583 * only unfreeze the queue in this condition when we're 2584 * sure that we're getting some chain frames back. That's 2585 * probably unnecessary. 2586 */ 2587 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 2588 xpt_freeze_simq(sassc->sim, 1); 2589 sassc->flags |= MPRSAS_QUEUE_FROZEN; 2590 mpr_dprint(sc, MPR_XINFO, "Error sending command, " 2591 "freezing SIM queue\n"); 2592 } 2593 } 2594 2595 /* 2596 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER 2597 * flag, and use it in a few places in the rest of this function for 2598 * convenience. Use the macro if available. 2599 */ 2600 #if 0 /* __FreeBSD_version >= 1100103 */ 2601 scsi_cdb = scsiio_cdb_ptr(csio); 2602 #else 2603 if (csio->ccb_h.flags & CAM_CDB_POINTER) 2604 scsi_cdb = csio->cdb_io.cdb_ptr; 2605 else 2606 scsi_cdb = csio->cdb_io.cdb_bytes; 2607 #endif 2608 2609 /* 2610 * If this is a Start Stop Unit command and it was issued by the driver 2611 * during shutdown, decrement the refcount to account for all of the 2612 * commands that were sent. All SSU commands should be completed before 2613 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started 2614 * is TRUE. 2615 */ 2616 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) { 2617 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n"); 2618 sc->SSU_refcount--; 2619 } 2620 2621 /* Take the fast path to completion */ 2622 if (cm->cm_reply == NULL) { 2623 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { 2624 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) 2625 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); 2626 else { 2627 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2628 csio->scsi_status = SCSI_STATUS_OK; 2629 } 2630 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2631 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2632 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2633 mpr_dprint(sc, MPR_XINFO, 2634 "Unfreezing SIM queue\n"); 2635 } 2636 } 2637 2638 /* 2639 * There are two scenarios where the status won't be 2640 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is 2641 * set, the second is in the MPR_FLAGS_DIAGRESET above. 2642 */ 2643 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2644 /* 2645 * Freeze the dev queue so that commands are 2646 * executed in the correct order after error 2647 * recovery. 2648 */ 2649 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2650 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2651 } 2652 mpr_free_command(sc, cm); 2653 xpt_done(ccb); 2654 return; 2655 } 2656 2657 #if 0 /* XXX swildner: NVMe support */ 2658 target = &sassc->targets[target_id]; 2659 if (scsi_cdb[0] == UNMAP && 2660 target->is_nvme && 2661 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { 2662 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm); 2663 csio->scsi_status = rep->SCSIStatus; 2664 } 2665 #endif 2666 2667 mprsas_log_command(cm, MPR_XINFO, 2668 "ioc %x scsi %x state %x xfer %u\n", 2669 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2670 le32toh(rep->TransferCount)); 2671 2672 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2673 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2674 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2675 /* FALLTHROUGH */ 2676 case MPI2_IOCSTATUS_SUCCESS: 2677 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2678 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2679 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2680 mprsas_log_command(cm, MPR_XINFO, "recovered error\n"); 2681 2682 /* Completion failed at the transport level. */ 2683 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2684 MPI2_SCSI_STATE_TERMINATED)) { 2685 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2686 break; 2687 } 2688 2689 /* In a modern packetized environment, an autosense failure 2690 * implies that there's not much else that can be done to 2691 * recover the command. 2692 */ 2693 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2694 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); 2695 break; 2696 } 2697 2698 /* 2699 * CAM doesn't care about SAS Response Info data, but if this is 2700 * the state check if TLR should be done. If not, clear the 2701 * TLR_bits for the target. 2702 */ 2703 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2704 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) 2705 == MPR_SCSI_RI_INVALID_FRAME)) { 2706 sc->mapping_table[target_id].TLR_bits = 2707 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2708 } 2709 2710 /* 2711 * Intentionally override the normal SCSI status reporting 2712 * for these two cases. These are likely to happen in a 2713 * multi-initiator environment, and we want to make sure that 2714 * CAM retries these commands rather than fail them. 2715 */ 2716 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2717 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2718 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2719 break; 2720 } 2721 2722 /* Handle normal status and sense */ 2723 csio->scsi_status = rep->SCSIStatus; 2724 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2725 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2726 else 2727 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); 2728 2729 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2730 int sense_len, returned_sense_len; 2731 2732 returned_sense_len = min(le32toh(rep->SenseCount), 2733 sizeof(struct scsi_sense_data)); 2734 if (returned_sense_len < csio->sense_len) 2735 csio->sense_resid = csio->sense_len - 2736 returned_sense_len; 2737 else 2738 csio->sense_resid = 0; 2739 2740 sense_len = min(returned_sense_len, 2741 csio->sense_len - csio->sense_resid); 2742 bzero(&csio->sense_data, sizeof(csio->sense_data)); 2743 bcopy(cm->cm_sense, &csio->sense_data, sense_len); 2744 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2745 } 2746 2747 /* 2748 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2749 * and it's page code 0 (Supported Page List), and there is 2750 * inquiry data, and this is for a sequential access device, and 2751 * the device is an SSP target, and TLR is supported by the 2752 * controller, turn the TLR_bits value ON if page 0x90 is 2753 * supported. 2754 */ 2755 if ((scsi_cdb[0] == INQUIRY) && 2756 (scsi_cdb[1] & SI_EVPD) && 2757 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) && 2758 #if 0 /* XXX swildner */ 2759 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2760 #endif 2761 (csio->data_ptr != NULL) && 2762 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && 2763 (sc->control_TLR) && 2764 (sc->mapping_table[target_id].device_info & 2765 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2766 vpd_list = (struct scsi_vpd_supported_page_list *) 2767 csio->data_ptr; 2768 TLR_bits = &sc->mapping_table[target_id].TLR_bits; 2769 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2770 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2771 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4]; 2772 alloc_len -= csio->resid; 2773 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2774 if (vpd_list->list[i] == 0x90) { 2775 *TLR_bits = TLR_on; 2776 break; 2777 } 2778 } 2779 } 2780 2781 /* 2782 * If this is a SATA direct-access end device, mark it so that 2783 * a SCSI StartStopUnit command will be sent to it when the 2784 * driver is being shutdown. 2785 */ 2786 if ((scsi_cdb[0] == INQUIRY) && 2787 (csio->data_ptr != NULL) && 2788 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && 2789 (sc->mapping_table[target_id].device_info & 2790 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && 2791 ((sc->mapping_table[target_id].device_info & 2792 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == 2793 MPI2_SAS_DEVICE_INFO_END_DEVICE)) { 2794 target = &sassc->targets[target_id]; 2795 target->supports_SSU = TRUE; 2796 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n", 2797 target_id); 2798 } 2799 break; 2800 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2801 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2802 /* 2803 * If devinfo is 0 this will be a volume. In that case don't 2804 * tell CAM that the volume is not there. We want volumes to 2805 * be enumerated until they are deleted/removed, not just 2806 * failed. 2807 */ 2808 if (cm->cm_targ->devinfo == 0) 2809 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2810 else 2811 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2812 break; 2813 case MPI2_IOCSTATUS_INVALID_SGL: 2814 mpr_print_scsiio_cmd(sc, cm); 2815 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); 2816 break; 2817 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2818 /* 2819 * This is one of the responses that comes back when an I/O 2820 * has been aborted. If it is because of a timeout that we 2821 * initiated, just set the status to CAM_CMD_TIMEOUT. 2822 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2823 * command is the same (it gets retried, subject to the 2824 * retry counter), the only difference is what gets printed 2825 * on the console. 2826 */ 2827 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) 2828 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); 2829 else 2830 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2831 break; 2832 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2833 /* resid is ignored for this condition */ 2834 csio->resid = 0; 2835 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); 2836 break; 2837 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2838 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2839 /* 2840 * These can sometimes be transient transport-related 2841 * errors, and sometimes persistent drive-related errors. 2842 * We used to retry these without decrementing the retry 2843 * count by returning CAM_REQUEUE_REQ. Unfortunately, if 2844 * we hit a persistent drive problem that returns one of 2845 * these error codes, we would retry indefinitely. So, 2846 * return CAM_REQ_CMP_ERROR so that we decrement the retry 2847 * count and avoid infinite retries. We're taking the 2848 * potential risk of flagging false failures in the event 2849 * of a topology-related error (e.g. a SAS expander problem 2850 * causes a command addressed to a drive to fail), but 2851 * avoiding getting into an infinite retry loop. 2852 */ 2853 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2854 mpr_dprint(sc, MPR_INFO, 2855 "Controller reported %s tgt %u SMID %u loginfo %x\n", 2856 mpr_describe_table(mpr_iocstatus_string, 2857 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK), 2858 target_id, cm->cm_desc.Default.SMID, 2859 le32toh(rep->IOCLogInfo)); 2860 mpr_dprint(sc, MPR_XINFO, 2861 "SCSIStatus %x SCSIState %x xfercount %u\n", 2862 rep->SCSIStatus, rep->SCSIState, 2863 le32toh(rep->TransferCount)); 2864 break; 2865 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2866 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2867 case MPI2_IOCSTATUS_INVALID_VPID: 2868 case MPI2_IOCSTATUS_INVALID_FIELD: 2869 case MPI2_IOCSTATUS_INVALID_STATE: 2870 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2871 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2872 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2873 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2874 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2875 default: 2876 mprsas_log_command(cm, MPR_XINFO, 2877 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n", 2878 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo), 2879 rep->SCSIStatus, rep->SCSIState, 2880 le32toh(rep->TransferCount)); 2881 csio->resid = cm->cm_length; 2882 2883 #if 0 /* XXX swildner: NVMe support */ 2884 if (scsi_cdb[0] == UNMAP && 2885 target->is_nvme && 2886 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) 2887 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2888 else 2889 #endif 2890 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2891 2892 break; 2893 } 2894 2895 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ); 2896 2897 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2898 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2899 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2900 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM " 2901 "queue\n"); 2902 } 2903 2904 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2905 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2906 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2907 } 2908 2909 mpr_free_command(sc, cm); 2910 xpt_done(ccb); 2911 } 2912 2913 #if 0 /* __FreeBSD_version >= 900026 */ 2914 static void 2915 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2916 { 2917 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2918 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2919 uint64_t sasaddr; 2920 union ccb *ccb; 2921 2922 ccb = cm->cm_complete_data; 2923 2924 /* 2925 * Currently there should be no way we can hit this case. It only 2926 * happens when we have a failure to allocate chain frames, and SMP 2927 * commands require two S/G elements only. That should be handled 2928 * in the standard request size. 2929 */ 2930 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2931 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP " 2932 "request!\n", __func__, cm->cm_flags); 2933 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2934 goto bailout; 2935 } 2936 2937 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2938 if (rpl == NULL) { 2939 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__); 2940 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2941 goto bailout; 2942 } 2943 2944 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2945 sasaddr = le32toh(req->SASAddress.Low); 2946 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2947 2948 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2949 MPI2_IOCSTATUS_SUCCESS || 2950 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2951 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2952 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2953 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2954 goto bailout; 2955 } 2956 2957 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx " 2958 "completed successfully\n", __func__, (uintmax_t)sasaddr); 2959 2960 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2961 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2962 else 2963 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); 2964 2965 bailout: 2966 /* 2967 * We sync in both directions because we had DMAs in the S/G list 2968 * in both directions. 2969 */ 2970 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2972 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2973 mpr_free_command(sc, cm); 2974 xpt_done(ccb); 2975 } 2976 2977 static void 2978 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr) 2979 { 2980 struct mpr_command *cm; 2981 uint8_t *request, *response; 2982 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2983 struct mpr_softc *sc; 2984 struct sglist *sg; 2985 int error; 2986 2987 sc = sassc->sc; 2988 sg = NULL; 2989 error = 0; 2990 2991 #if 0 /* (__FreeBSD_version >= 1000028) || \ 2992 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000)) */ 2993 switch (ccb->ccb_h.flags & CAM_DATA_MASK) { 2994 case CAM_DATA_PADDR: 2995 case CAM_DATA_SG_PADDR: 2996 /* 2997 * XXX We don't yet support physical addresses here. 2998 */ 2999 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 3000 "supported\n", __func__); 3001 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3002 xpt_done(ccb); 3003 return; 3004 case CAM_DATA_SG: 3005 /* 3006 * The chip does not support more than one buffer for the 3007 * request or response. 3008 */ 3009 if ((ccb->smpio.smp_request_sglist_cnt > 1) 3010 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 3011 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " 3012 "response buffer segments not supported for SMP\n", 3013 __func__); 3014 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3015 xpt_done(ccb); 3016 return; 3017 } 3018 3019 /* 3020 * The CAM_SCATTER_VALID flag was originally implemented 3021 * for the XPT_SCSI_IO CCB, which only has one data pointer. 3022 * We have two. So, just take that flag to mean that we 3023 * might have S/G lists, and look at the S/G segment count 3024 * to figure out whether that is the case for each individual 3025 * buffer. 3026 */ 3027 if (ccb->smpio.smp_request_sglist_cnt != 0) { 3028 bus_dma_segment_t *req_sg; 3029 3030 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 3031 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 3032 } else 3033 request = ccb->smpio.smp_request; 3034 3035 if (ccb->smpio.smp_response_sglist_cnt != 0) { 3036 bus_dma_segment_t *rsp_sg; 3037 3038 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 3039 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 3040 } else 3041 response = ccb->smpio.smp_response; 3042 break; 3043 case CAM_DATA_VADDR: 3044 request = ccb->smpio.smp_request; 3045 response = ccb->smpio.smp_response; 3046 break; 3047 default: 3048 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3049 xpt_done(ccb); 3050 return; 3051 } 3052 #else /* __FreeBSD_version < 1000028 */ 3053 /* 3054 * XXX We don't yet support physical addresses here. 3055 */ 3056 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { 3057 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 3058 "supported\n", __func__); 3059 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3060 xpt_done(ccb); 3061 return; 3062 } 3063 3064 /* 3065 * If the user wants to send an S/G list, check to make sure they 3066 * have single buffers. 3067 */ 3068 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 3069 /* 3070 * The chip does not support more than one buffer for the 3071 * request or response. 3072 */ 3073 if ((ccb->smpio.smp_request_sglist_cnt > 1) 3074 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 3075 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " 3076 "response buffer segments not supported for SMP\n", 3077 __func__); 3078 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3079 xpt_done(ccb); 3080 return; 3081 } 3082 3083 /* 3084 * The CAM_SCATTER_VALID flag was originally implemented 3085 * for the XPT_SCSI_IO CCB, which only has one data pointer. 3086 * We have two. So, just take that flag to mean that we 3087 * might have S/G lists, and look at the S/G segment count 3088 * to figure out whether that is the case for each individual 3089 * buffer. 3090 */ 3091 if (ccb->smpio.smp_request_sglist_cnt != 0) { 3092 bus_dma_segment_t *req_sg; 3093 3094 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 3095 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 3096 } else 3097 request = ccb->smpio.smp_request; 3098 3099 if (ccb->smpio.smp_response_sglist_cnt != 0) { 3100 bus_dma_segment_t *rsp_sg; 3101 3102 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 3103 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 3104 } else 3105 response = ccb->smpio.smp_response; 3106 } else { 3107 request = ccb->smpio.smp_request; 3108 response = ccb->smpio.smp_response; 3109 } 3110 #endif /* __FreeBSD_version < 1000028 */ 3111 3112 cm = mpr_alloc_command(sc); 3113 if (cm == NULL) { 3114 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n", 3115 __func__); 3116 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3117 xpt_done(ccb); 3118 return; 3119 } 3120 3121 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 3122 bzero(req, sizeof(*req)); 3123 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 3124 3125 /* Allow the chip to use any route to this SAS address. */ 3126 req->PhysicalPort = 0xff; 3127 3128 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 3129 req->SGLFlags = 3130 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 3131 3132 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address " 3133 "%#jx\n", __func__, (uintmax_t)sasaddr); 3134 3135 mpr_init_sge(cm, req, &req->SGL); 3136 3137 /* 3138 * Set up a uio to pass into mpr_map_command(). This allows us to 3139 * do one map command, and one busdma call in there. 3140 */ 3141 cm->cm_uio.uio_iov = cm->cm_iovec; 3142 cm->cm_uio.uio_iovcnt = 2; 3143 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 3144 3145 /* 3146 * The read/write flag isn't used by busdma, but set it just in 3147 * case. This isn't exactly accurate, either, since we're going in 3148 * both directions. 3149 */ 3150 cm->cm_uio.uio_rw = UIO_WRITE; 3151 3152 cm->cm_iovec[0].iov_base = request; 3153 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 3154 cm->cm_iovec[1].iov_base = response; 3155 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 3156 3157 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 3158 cm->cm_iovec[1].iov_len; 3159 3160 /* 3161 * Trigger a warning message in mpr_data_cb() for the user if we 3162 * wind up exceeding two S/G segments. The chip expects one 3163 * segment for the request and another for the response. 3164 */ 3165 cm->cm_max_segs = 2; 3166 3167 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3168 cm->cm_complete = mprsas_smpio_complete; 3169 cm->cm_complete_data = ccb; 3170 3171 /* 3172 * Tell the mapping code that we're using a uio, and that this is 3173 * an SMP passthrough request. There is a little special-case 3174 * logic there (in mpr_data_cb()) to handle the bidirectional 3175 * transfer. 3176 */ 3177 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS | 3178 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT; 3179 3180 /* The chip data format is little endian. */ 3181 req->SASAddress.High = htole32(sasaddr >> 32); 3182 req->SASAddress.Low = htole32(sasaddr); 3183 3184 /* 3185 * XXX Note that we don't have a timeout/abort mechanism here. 3186 * From the manual, it looks like task management requests only 3187 * work for SCSI IO and SATA passthrough requests. We may need to 3188 * have a mechanism to retry requests in the event of a chip reset 3189 * at least. Hopefully the chip will insure that any errors short 3190 * of that are relayed back to the driver. 3191 */ 3192 error = mpr_map_command(sc, cm); 3193 if ((error != 0) && (error != EINPROGRESS)) { 3194 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from " 3195 "mpr_map_command()\n", __func__, error); 3196 goto bailout_error; 3197 } 3198 3199 return; 3200 3201 bailout_error: 3202 mpr_free_command(sc, cm); 3203 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3204 xpt_done(ccb); 3205 return; 3206 } 3207 3208 static void 3209 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb) 3210 { 3211 struct mpr_softc *sc; 3212 struct mprsas_target *targ; 3213 uint64_t sasaddr = 0; 3214 3215 sc = sassc->sc; 3216 3217 /* 3218 * Make sure the target exists. 3219 */ 3220 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 3221 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); 3222 targ = &sassc->targets[ccb->ccb_h.target_id]; 3223 if (targ->handle == 0x0) { 3224 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n", 3225 __func__, ccb->ccb_h.target_id); 3226 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 3227 xpt_done(ccb); 3228 return; 3229 } 3230 3231 /* 3232 * If this device has an embedded SMP target, we'll talk to it 3233 * directly. 3234 * figure out what the expander's address is. 3235 */ 3236 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 3237 sasaddr = targ->sasaddr; 3238 3239 /* 3240 * If we don't have a SAS address for the expander yet, try 3241 * grabbing it from the page 0x83 information cached in the 3242 * transport layer for this target. LSI expanders report the 3243 * expander SAS address as the port-associated SAS address in 3244 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 3245 * 0x83. 3246 * 3247 * XXX KDM disable this for now, but leave it commented out so that 3248 * it is obvious that this is another possible way to get the SAS 3249 * address. 3250 * 3251 * The parent handle method below is a little more reliable, and 3252 * the other benefit is that it works for devices other than SES 3253 * devices. So you can send a SMP request to a da(4) device and it 3254 * will get routed to the expander that device is attached to. 3255 * (Assuming the da(4) device doesn't contain an SMP target...) 3256 */ 3257 #if 0 3258 if (sasaddr == 0) 3259 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 3260 #endif 3261 3262 /* 3263 * If we still don't have a SAS address for the expander, look for 3264 * the parent device of this device, which is probably the expander. 3265 */ 3266 if (sasaddr == 0) { 3267 #ifdef OLD_MPR_PROBE 3268 struct mprsas_target *parent_target; 3269 #endif 3270 3271 if (targ->parent_handle == 0x0) { 3272 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 3273 "a valid parent handle!\n", __func__, targ->handle); 3274 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3275 goto bailout; 3276 } 3277 #ifdef OLD_MPR_PROBE 3278 parent_target = mprsas_find_target_by_handle(sassc, 0, 3279 targ->parent_handle); 3280 3281 if (parent_target == NULL) { 3282 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 3283 "a valid parent target!\n", __func__, targ->handle); 3284 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3285 goto bailout; 3286 } 3287 3288 if ((parent_target->devinfo & 3289 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3290 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 3291 "does not have an SMP target!\n", __func__, 3292 targ->handle, parent_target->handle); 3293 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3294 goto bailout; 3295 } 3296 3297 sasaddr = parent_target->sasaddr; 3298 #else /* OLD_MPR_PROBE */ 3299 if ((targ->parent_devinfo & 3300 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3301 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 3302 "does not have an SMP target!\n", __func__, 3303 targ->handle, targ->parent_handle); 3304 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3305 goto bailout; 3306 3307 } 3308 if (targ->parent_sasaddr == 0x0) { 3309 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle " 3310 "%d does not have a valid SAS address!\n", __func__, 3311 targ->handle, targ->parent_handle); 3312 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3313 goto bailout; 3314 } 3315 3316 sasaddr = targ->parent_sasaddr; 3317 #endif /* OLD_MPR_PROBE */ 3318 3319 } 3320 3321 if (sasaddr == 0) { 3322 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for " 3323 "handle %d\n", __func__, targ->handle); 3324 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3325 goto bailout; 3326 } 3327 mprsas_send_smpcmd(sassc, ccb, sasaddr); 3328 3329 return; 3330 3331 bailout: 3332 xpt_done(ccb); 3333 3334 } 3335 #endif //__FreeBSD_version >= 900026 3336 3337 static void 3338 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb) 3339 { 3340 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3341 struct mpr_softc *sc; 3342 struct mpr_command *tm; 3343 struct mprsas_target *targ; 3344 3345 MPR_FUNCTRACE(sassc->sc); 3346 KKASSERT(lockowned(&sassc->sc->mpr_lock)); 3347 3348 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of " 3349 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id)); 3350 sc = sassc->sc; 3351 tm = mpr_alloc_command(sc); 3352 if (tm == NULL) { 3353 mpr_dprint(sc, MPR_ERROR, "command alloc failure in " 3354 "mprsas_action_resetdev\n"); 3355 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3356 xpt_done(ccb); 3357 return; 3358 } 3359 3360 targ = &sassc->targets[ccb->ccb_h.target_id]; 3361 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3362 req->DevHandle = htole16(targ->handle); 3363 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3364 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3365 3366 /* SAS Hard Link Reset / SATA Link Reset */ 3367 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3368 3369 tm->cm_data = NULL; 3370 tm->cm_desc.HighPriority.RequestFlags = 3371 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3372 tm->cm_complete = mprsas_resetdev_complete; 3373 tm->cm_complete_data = ccb; 3374 3375 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 3376 __func__, targ->tid); 3377 tm->cm_targ = targ; 3378 targ->flags |= MPRSAS_TARGET_INRESET; 3379 3380 mpr_map_command(sc, tm); 3381 } 3382 3383 static void 3384 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm) 3385 { 3386 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 3387 union ccb *ccb; 3388 3389 MPR_FUNCTRACE(sc); 3390 KKASSERT(lockowned(&sc->mpr_lock)); 3391 3392 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 3393 ccb = tm->cm_complete_data; 3394 3395 /* 3396 * Currently there should be no way we can hit this case. It only 3397 * happens when we have a failure to allocate chain frames, and 3398 * task management commands don't have S/G lists. 3399 */ 3400 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3401 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3402 3403 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3404 3405 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of " 3406 "handle %#04x! This should not happen!\n", __func__, 3407 tm->cm_flags, req->DevHandle); 3408 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3409 goto bailout; 3410 } 3411 3412 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", 3413 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 3414 3415 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 3416 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 3417 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 3418 CAM_LUN_WILDCARD); 3419 } 3420 else 3421 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3422 3423 bailout: 3424 3425 mprsas_free_tm(sc, tm); 3426 xpt_done(ccb); 3427 } 3428 3429 static void 3430 mprsas_poll(struct cam_sim *sim) 3431 { 3432 struct mprsas_softc *sassc; 3433 3434 sassc = cam_sim_softc(sim); 3435 3436 if (sassc->sc->mpr_debug & MPR_TRACE) { 3437 /* frequent debug messages during a panic just slow 3438 * everything down too much. 3439 */ 3440 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n", 3441 __func__); 3442 sassc->sc->mpr_debug &= ~MPR_TRACE; 3443 } 3444 3445 mpr_intr_locked(sassc->sc); 3446 } 3447 3448 static void 3449 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, 3450 void *arg) 3451 { 3452 struct mpr_softc *sc; 3453 3454 sc = (struct mpr_softc *)callback_arg; 3455 3456 switch (code) { 3457 #if 0 /* (__FreeBSD_version >= 1000006) || \ 3458 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) */ 3459 case AC_ADVINFO_CHANGED: { 3460 struct mprsas_target *target; 3461 struct mprsas_softc *sassc; 3462 struct scsi_read_capacity_data_long rcap_buf; 3463 struct ccb_dev_advinfo *cdai; 3464 struct mprsas_lun *lun; 3465 lun_id_t lunid; 3466 int found_lun; 3467 uintptr_t buftype; 3468 3469 buftype = (uintptr_t)arg; 3470 3471 found_lun = 0; 3472 sassc = sc->sassc; 3473 3474 /* 3475 * We're only interested in read capacity data changes. 3476 */ 3477 if (buftype != CDAI_TYPE_RCAPLONG) 3478 break; 3479 3480 /* 3481 * See the comment in mpr_attach_sas() for a detailed 3482 * explanation. In these versions of FreeBSD we register 3483 * for all events and filter out the events that don't 3484 * apply to us. 3485 */ 3486 #if 1 /* (__FreeBSD_version < 1000703) || \ 3487 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) */ 3488 if (xpt_path_path_id(path) != sassc->sim->path_id) 3489 break; 3490 #endif 3491 3492 /* 3493 * We should have a handle for this, but check to make sure. 3494 */ 3495 KASSERT(xpt_path_target_id(path) < sassc->maxtargets, 3496 ("Target %d out of bounds in mprsas_async\n", 3497 xpt_path_target_id(path))); 3498 target = &sassc->targets[xpt_path_target_id(path)]; 3499 if (target->handle == 0) 3500 break; 3501 3502 lunid = xpt_path_lun_id(path); 3503 3504 SLIST_FOREACH(lun, &target->luns, lun_link) { 3505 if (lun->lun_id == lunid) { 3506 found_lun = 1; 3507 break; 3508 } 3509 } 3510 3511 if (found_lun == 0) { 3512 lun = kmalloc(sizeof(struct mprsas_lun), M_MPR, 3513 M_NOWAIT | M_ZERO); 3514 if (lun == NULL) { 3515 mpr_dprint(sc, MPR_ERROR, "Unable to alloc " 3516 "LUN for EEDP support.\n"); 3517 break; 3518 } 3519 lun->lun_id = lunid; 3520 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3521 } 3522 3523 bzero(&rcap_buf, sizeof(rcap_buf)); 3524 cdai = xpt_alloc_ccb(); 3525 xpt_setup_ccb(&cdai->ccb_h, path, CAM_PRIORITY_NORMAL); 3526 cdai->ccb_h.func_code = XPT_DEV_ADVINFO; 3527 cdai->ccb_h.flags = CAM_DIR_IN; 3528 cdai->buftype = CDAI_TYPE_RCAPLONG; 3529 #if 0 /* (__FreeBSD_version >= 1100061) || \ 3530 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000)) */ 3531 cdai->flags = CDAI_FLAG_NONE; 3532 #else 3533 cdai->flags = 0; 3534 #endif 3535 cdai->bufsiz = sizeof(rcap_buf); 3536 cdai->buf = (uint8_t *)&rcap_buf; 3537 xpt_action((union ccb *)cdai); 3538 if ((cdai->ccb_h.status & CAM_DEV_QFRZN) != 0) 3539 cam_release_devq(cdai->ccb_h.path, 0, 0, 0, FALSE); 3540 3541 xpt_free_ccb(&cdai->ccb_h); 3542 3543 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) 3544 && (rcap_buf.prot & SRC16_PROT_EN)) { 3545 switch (rcap_buf.prot & SRC16_P_TYPE) { 3546 case SRC16_PTYPE_1: 3547 case SRC16_PTYPE_3: 3548 lun->eedp_formatted = TRUE; 3549 lun->eedp_block_size = 3550 scsi_4btoul(rcap_buf.length); 3551 break; 3552 case SRC16_PTYPE_2: 3553 default: 3554 lun->eedp_formatted = FALSE; 3555 lun->eedp_block_size = 0; 3556 break; 3557 } 3558 } else { 3559 lun->eedp_formatted = FALSE; 3560 lun->eedp_block_size = 0; 3561 } 3562 break; 3563 } 3564 #endif 3565 case AC_FOUND_DEVICE: { 3566 struct ccb_getdev *cgd; 3567 3568 /* 3569 * See the comment in mpr_attach_sas() for a detailed 3570 * explanation. In these versions of FreeBSD we register 3571 * for all events and filter out the events that don't 3572 * apply to us. 3573 */ 3574 #if 1 /* (__FreeBSD_version < 1000703) || \ 3575 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) */ 3576 if (xpt_path_path_id(path) != sc->sassc->sim->path_id) 3577 break; 3578 #endif 3579 3580 cgd = arg; 3581 #if 1 /* (__FreeBSD_version < 901503) || \ 3582 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3583 mprsas_check_eedp(sc, path, cgd); 3584 #endif 3585 break; 3586 } 3587 default: 3588 break; 3589 } 3590 } 3591 3592 #if 1 /* (__FreeBSD_version < 901503) || \ 3593 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3594 static void 3595 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 3596 struct ccb_getdev *cgd) 3597 { 3598 struct mprsas_softc *sassc = sc->sassc; 3599 struct ccb_scsiio *csio; 3600 struct scsi_read_capacity_16 *scsi_cmd; 3601 struct scsi_read_capacity_eedp *rcap_buf; 3602 path_id_t pathid; 3603 target_id_t targetid; 3604 lun_id_t lunid; 3605 union ccb *ccb; 3606 struct cam_path *local_path; 3607 struct mprsas_target *target; 3608 struct mprsas_lun *lun; 3609 uint8_t found_lun; 3610 char path_str[64]; 3611 3612 pathid = cam_sim_path(sassc->sim); 3613 targetid = xpt_path_target_id(path); 3614 lunid = xpt_path_lun_id(path); 3615 3616 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in " 3617 "mprsas_check_eedp\n", targetid)); 3618 target = &sassc->targets[targetid]; 3619 if (target->handle == 0x0) 3620 return; 3621 3622 /* 3623 * Determine if the device is EEDP capable. 3624 * 3625 * If this flag is set in the inquiry data, the device supports 3626 * protection information, and must support the 16 byte read capacity 3627 * command, otherwise continue without sending read cap 16. 3628 */ 3629 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3630 return; 3631 3632 /* 3633 * Issue a READ CAPACITY 16 command. This info is used to determine if 3634 * the LUN is formatted for EEDP support. 3635 */ 3636 ccb = xpt_alloc_ccb(); 3637 if (ccb == NULL) { 3638 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP " 3639 "support.\n"); 3640 return; 3641 } 3642 3643 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) != 3644 CAM_REQ_CMP) { 3645 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP " 3646 "support.\n"); 3647 xpt_free_ccb(&ccb->ccb_h); 3648 return; 3649 } 3650 3651 /* 3652 * If LUN is already in list, don't create a new one. 3653 */ 3654 found_lun = FALSE; 3655 SLIST_FOREACH(lun, &target->luns, lun_link) { 3656 if (lun->lun_id == lunid) { 3657 found_lun = TRUE; 3658 break; 3659 } 3660 } 3661 if (!found_lun) { 3662 lun = kmalloc(sizeof(struct mprsas_lun), M_MPR, 3663 M_NOWAIT | M_ZERO); 3664 if (lun == NULL) { 3665 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " 3666 "EEDP support.\n"); 3667 xpt_free_path(local_path); 3668 xpt_free_ccb(&ccb->ccb_h); 3669 return; 3670 } 3671 lun->lun_id = lunid; 3672 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3673 } 3674 3675 xpt_path_string(local_path, path_str, sizeof(path_str)); 3676 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n", 3677 path_str, target->handle); 3678 3679 /* 3680 * Issue a READ CAPACITY 16 command for the LUN. The 3681 * mprsas_read_cap_done function will load the read cap info into the 3682 * LUN struct. 3683 */ 3684 rcap_buf = kmalloc(sizeof(struct scsi_read_capacity_eedp), M_MPR, 3685 M_NOWAIT | M_ZERO); 3686 if (rcap_buf == NULL) { 3687 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity " 3688 "buffer for EEDP support.\n"); 3689 xpt_free_path(ccb->ccb_h.path); 3690 xpt_free_ccb(&ccb->ccb_h); 3691 return; 3692 } 3693 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_NORMAL); 3694 csio = &ccb->csio; 3695 csio->ccb_h.func_code = XPT_SCSI_IO; 3696 csio->ccb_h.flags = CAM_DIR_IN; 3697 csio->ccb_h.retry_count = 4; 3698 csio->ccb_h.cbfcnp = mprsas_read_cap_done; 3699 csio->ccb_h.timeout = 60000; 3700 csio->data_ptr = (uint8_t *)rcap_buf; 3701 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3702 csio->sense_len = MPR_SENSE_LEN; 3703 csio->cdb_len = sizeof(*scsi_cmd); 3704 csio->tag_action = MSG_SIMPLE_Q_TAG; 3705 3706 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3707 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3708 scsi_cmd->opcode = 0x9E; 3709 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3710 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3711 3712 ccb->ccb_h.ppriv_ptr1 = sassc; 3713 xpt_action(ccb); 3714 } 3715 3716 static void 3717 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3718 { 3719 struct mprsas_softc *sassc; 3720 struct mprsas_target *target; 3721 struct mprsas_lun *lun; 3722 struct scsi_read_capacity_eedp *rcap_buf; 3723 3724 if (done_ccb == NULL) 3725 return; 3726 3727 /* Driver need to release devq, it Scsi command is 3728 * generated by driver internally. 3729 * Currently there is a single place where driver 3730 * calls scsi command internally. In future if driver 3731 * calls more scsi command internally, it needs to release 3732 * devq internally, since those command will not go back to 3733 * cam_periph. 3734 */ 3735 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3736 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3737 xpt_release_devq(done_ccb->ccb_h.path, 3738 /*count*/ 1, /*run_queue*/TRUE); 3739 } 3740 3741 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3742 3743 /* 3744 * Get the LUN ID for the path and look it up in the LUN list for the 3745 * target. 3746 */ 3747 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3748 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out " 3749 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id)); 3750 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3751 SLIST_FOREACH(lun, &target->luns, lun_link) { 3752 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3753 continue; 3754 3755 /* 3756 * Got the LUN in the target's LUN list. Fill it in with EEDP 3757 * info. If the READ CAP 16 command had some SCSI error (common 3758 * if command is not supported), mark the lun as not supporting 3759 * EEDP and set the block size to 0. 3760 */ 3761 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) || 3762 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3763 lun->eedp_formatted = FALSE; 3764 lun->eedp_block_size = 0; 3765 break; 3766 } 3767 3768 if (rcap_buf->protect & 0x01) { 3769 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID " 3770 "%d is formatted for EEDP support.\n", 3771 done_ccb->ccb_h.target_lun, 3772 done_ccb->ccb_h.target_id); 3773 lun->eedp_formatted = TRUE; 3774 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3775 } 3776 break; 3777 } 3778 3779 // Finished with this CCB and path. 3780 kfree(rcap_buf, M_MPR); 3781 xpt_free_path(done_ccb->ccb_h.path); 3782 xpt_free_ccb(&done_ccb->ccb_h); 3783 } 3784 #endif /* (__FreeBSD_version < 901503) || \ 3785 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3786 3787 void 3788 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm, 3789 struct mprsas_target *target, lun_id_t lun_id) 3790 { 3791 union ccb *ccb; 3792 path_id_t path_id; 3793 3794 /* 3795 * Set the INRESET flag for this target so that no I/O will be sent to 3796 * the target until the reset has completed. If an I/O request does 3797 * happen, the devq will be frozen. The CCB holds the path which is 3798 * used to release the devq. The devq is released and the CCB is freed 3799 * when the TM completes. 3800 */ 3801 ccb = xpt_alloc_ccb(); 3802 if (ccb) { 3803 path_id = cam_sim_path(sc->sassc->sim); 3804 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, 3805 target->tid, lun_id) != CAM_REQ_CMP) { 3806 xpt_free_ccb(&ccb->ccb_h); 3807 } else { 3808 tm->cm_ccb = ccb; 3809 tm->cm_targ = target; 3810 target->flags |= MPRSAS_TARGET_INRESET; 3811 } 3812 } 3813 } 3814 3815 int 3816 mprsas_startup(struct mpr_softc *sc) 3817 { 3818 /* 3819 * Send the port enable message and set the wait_for_port_enable flag. 3820 * This flag helps to keep the simq frozen until all discovery events 3821 * are processed. 3822 */ 3823 sc->wait_for_port_enable = 1; 3824 mprsas_send_portenable(sc); 3825 return (0); 3826 } 3827 3828 static int 3829 mprsas_send_portenable(struct mpr_softc *sc) 3830 { 3831 MPI2_PORT_ENABLE_REQUEST *request; 3832 struct mpr_command *cm; 3833 3834 MPR_FUNCTRACE(sc); 3835 3836 if ((cm = mpr_alloc_command(sc)) == NULL) 3837 return (EBUSY); 3838 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3839 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3840 request->MsgFlags = 0; 3841 request->VP_ID = 0; 3842 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3843 cm->cm_complete = mprsas_portenable_complete; 3844 cm->cm_data = NULL; 3845 cm->cm_sge = NULL; 3846 3847 mpr_map_command(sc, cm); 3848 mpr_dprint(sc, MPR_XINFO, 3849 "mpr_send_portenable finished cm %p req %p complete %p\n", 3850 cm, cm->cm_req, cm->cm_complete); 3851 return (0); 3852 } 3853 3854 static void 3855 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm) 3856 { 3857 MPI2_PORT_ENABLE_REPLY *reply; 3858 struct mprsas_softc *sassc; 3859 3860 MPR_FUNCTRACE(sc); 3861 sassc = sc->sassc; 3862 3863 /* 3864 * Currently there should be no way we can hit this case. It only 3865 * happens when we have a failure to allocate chain frames, and 3866 * port enable commands don't have S/G lists. 3867 */ 3868 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3869 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! " 3870 "This should not happen!\n", __func__, cm->cm_flags); 3871 } 3872 3873 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3874 if (reply == NULL) 3875 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n"); 3876 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3877 MPI2_IOCSTATUS_SUCCESS) 3878 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n"); 3879 3880 mpr_free_command(sc, cm); 3881 /* 3882 * Done waiting for port enable to complete. Decrement the refcount. 3883 * If refcount is 0, discovery is complete and a rescan of the bus can 3884 * take place. 3885 */ 3886 sc->wait_for_port_enable = 0; 3887 sc->port_enable_complete = 1; 3888 wakeup(&sc->port_enable_complete); 3889 mprsas_startup_decrement(sassc); 3890 } 3891 3892 int 3893 mprsas_check_id(struct mprsas_softc *sassc, int id) 3894 { 3895 struct mpr_softc *sc = sassc->sc; 3896 char *ids; 3897 char *name; 3898 3899 ids = &sc->exclude_ids[0]; 3900 while((name = strsep(&ids, ",")) != NULL) { 3901 if (name[0] == '\0') 3902 continue; 3903 if (strtol(name, NULL, 0) == (long)id) 3904 return (1); 3905 } 3906 3907 return (0); 3908 } 3909 3910 void 3911 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets) 3912 { 3913 struct mprsas_softc *sassc; 3914 struct mprsas_lun *lun, *lun_tmp; 3915 struct mprsas_target *targ; 3916 int i; 3917 3918 sassc = sc->sassc; 3919 /* 3920 * The number of targets is based on IOC Facts, so free all of 3921 * the allocated LUNs for each target and then the target buffer 3922 * itself. 3923 */ 3924 for (i=0; i< maxtargets; i++) { 3925 targ = &sassc->targets[i]; 3926 SLIST_FOREACH_MUTABLE(lun, &targ->luns, lun_link, lun_tmp) { 3927 kfree(lun, M_MPR); 3928 } 3929 } 3930 kfree(sassc->targets, M_MPR); 3931 3932 sassc->targets = kmalloc(sizeof(struct mprsas_target) * maxtargets, 3933 M_MPR, M_WAITOK|M_ZERO); 3934 if (!sassc->targets) { 3935 panic("%s failed to alloc targets with error %d\n", 3936 __func__, ENOMEM); 3937 } 3938 } 3939