1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 5 * 6 * Copyright (c) 1997, 1998 Justin T. Gibbs. 7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification, immediately at the beginning of the file. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/types.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/conf.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/buf.h> 45 #include <sys/proc.h> 46 #include <sys/devicestat.h> 47 #include <sys/bus.h> 48 #include <sys/sbuf.h> 49 #include <sys/sysctl.h> 50 #include <vm/vm.h> 51 #include <vm/vm_extern.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_queue.h> 56 #include <cam/cam_xpt_periph.h> 57 #include <cam/cam_periph.h> 58 #include <cam/cam_debug.h> 59 #include <cam/cam_sim.h> 60 #include <cam/cam_xpt_internal.h> /* For KASSERTs only */ 61 62 #include <cam/scsi/scsi_all.h> 63 #include <cam/scsi/scsi_message.h> 64 #include <cam/scsi/scsi_pass.h> 65 66 static u_int camperiphnextunit(struct periph_driver *p_drv, 67 u_int newunit, int wired, 68 path_id_t pathid, target_id_t target, 69 lun_id_t lun); 70 static u_int camperiphunit(struct periph_driver *p_drv, 71 path_id_t pathid, target_id_t target, 72 lun_id_t lun); 73 static void camperiphdone(struct cam_periph *periph, 74 union ccb *done_ccb); 75 static void camperiphfree(struct cam_periph *periph); 76 static int camperiphscsistatuserror(union ccb *ccb, 77 union ccb **orig_ccb, 78 cam_flags camflags, 79 u_int32_t sense_flags, 80 int *openings, 81 u_int32_t *relsim_flags, 82 u_int32_t *timeout, 83 u_int32_t *action, 84 const char **action_string); 85 static int camperiphscsisenseerror(union ccb *ccb, 86 union ccb **orig_ccb, 87 cam_flags camflags, 88 u_int32_t sense_flags, 89 int *openings, 90 u_int32_t *relsim_flags, 91 u_int32_t *timeout, 92 u_int32_t *action, 93 const char **action_string); 94 static void cam_periph_devctl_notify(union ccb *ccb); 95 96 static int nperiph_drivers; 97 static int initialized = 0; 98 struct periph_driver **periph_drivers; 99 100 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 101 102 static int periph_selto_delay = 1000; 103 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 104 static int periph_noresrc_delay = 500; 105 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 106 static int periph_busy_delay = 500; 107 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 108 109 static u_int periph_mapmem_thresh = 65536; 110 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN, 111 &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping"); 112 113 void 114 periphdriver_register(void *data) 115 { 116 struct periph_driver *drv = (struct periph_driver *)data; 117 struct periph_driver **newdrivers, **old; 118 int ndrivers; 119 120 again: 121 ndrivers = nperiph_drivers + 2; 122 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 123 M_WAITOK); 124 xpt_lock_buses(); 125 if (ndrivers != nperiph_drivers + 2) { 126 /* 127 * Lost race against itself; go around. 128 */ 129 xpt_unlock_buses(); 130 free(newdrivers, M_CAMPERIPH); 131 goto again; 132 } 133 if (periph_drivers) 134 bcopy(periph_drivers, newdrivers, 135 sizeof(*newdrivers) * nperiph_drivers); 136 newdrivers[nperiph_drivers] = drv; 137 newdrivers[nperiph_drivers + 1] = NULL; 138 old = periph_drivers; 139 periph_drivers = newdrivers; 140 nperiph_drivers++; 141 xpt_unlock_buses(); 142 if (old) 143 free(old, M_CAMPERIPH); 144 /* If driver marked as early or it is late now, initialize it. */ 145 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 146 initialized > 1) 147 (*drv->init)(); 148 } 149 150 int 151 periphdriver_unregister(void *data) 152 { 153 struct periph_driver *drv = (struct periph_driver *)data; 154 int error, n; 155 156 /* If driver marked as early or it is late now, deinitialize it. */ 157 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 158 initialized > 1) { 159 if (drv->deinit == NULL) { 160 printf("CAM periph driver '%s' doesn't have deinit.\n", 161 drv->driver_name); 162 return (EOPNOTSUPP); 163 } 164 error = drv->deinit(); 165 if (error != 0) 166 return (error); 167 } 168 169 xpt_lock_buses(); 170 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++) 171 ; 172 KASSERT(n < nperiph_drivers, 173 ("Periph driver '%s' was not registered", drv->driver_name)); 174 for (; n + 1 < nperiph_drivers; n++) 175 periph_drivers[n] = periph_drivers[n + 1]; 176 periph_drivers[n + 1] = NULL; 177 nperiph_drivers--; 178 xpt_unlock_buses(); 179 return (0); 180 } 181 182 void 183 periphdriver_init(int level) 184 { 185 int i, early; 186 187 initialized = max(initialized, level); 188 for (i = 0; periph_drivers[i] != NULL; i++) { 189 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; 190 if (early == initialized) 191 (*periph_drivers[i]->init)(); 192 } 193 } 194 195 cam_status 196 cam_periph_alloc(periph_ctor_t *periph_ctor, 197 periph_oninv_t *periph_oninvalidate, 198 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 199 char *name, cam_periph_type type, struct cam_path *path, 200 ac_callback_t *ac_callback, ac_code code, void *arg) 201 { 202 struct periph_driver **p_drv; 203 struct cam_sim *sim; 204 struct cam_periph *periph; 205 struct cam_periph *cur_periph; 206 path_id_t path_id; 207 target_id_t target_id; 208 lun_id_t lun_id; 209 cam_status status; 210 u_int init_level; 211 212 init_level = 0; 213 /* 214 * Handle Hot-Plug scenarios. If there is already a peripheral 215 * of our type assigned to this path, we are likely waiting for 216 * final close on an old, invalidated, peripheral. If this is 217 * the case, queue up a deferred call to the peripheral's async 218 * handler. If it looks like a mistaken re-allocation, complain. 219 */ 220 if ((periph = cam_periph_find(path, name)) != NULL) { 221 222 if ((periph->flags & CAM_PERIPH_INVALID) != 0 223 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 224 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 225 periph->deferred_callback = ac_callback; 226 periph->deferred_ac = code; 227 return (CAM_REQ_INPROG); 228 } else { 229 printf("cam_periph_alloc: attempt to re-allocate " 230 "valid device %s%d rejected flags %#x " 231 "refcount %d\n", periph->periph_name, 232 periph->unit_number, periph->flags, 233 periph->refcount); 234 } 235 return (CAM_REQ_INVALID); 236 } 237 238 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 239 M_NOWAIT|M_ZERO); 240 241 if (periph == NULL) 242 return (CAM_RESRC_UNAVAIL); 243 244 init_level++; 245 246 247 sim = xpt_path_sim(path); 248 path_id = xpt_path_path_id(path); 249 target_id = xpt_path_target_id(path); 250 lun_id = xpt_path_lun_id(path); 251 periph->periph_start = periph_start; 252 periph->periph_dtor = periph_dtor; 253 periph->periph_oninval = periph_oninvalidate; 254 periph->type = type; 255 periph->periph_name = name; 256 periph->scheduled_priority = CAM_PRIORITY_NONE; 257 periph->immediate_priority = CAM_PRIORITY_NONE; 258 periph->refcount = 1; /* Dropped by invalidation. */ 259 periph->sim = sim; 260 SLIST_INIT(&periph->ccb_list); 261 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 262 if (status != CAM_REQ_CMP) 263 goto failure; 264 periph->path = path; 265 266 xpt_lock_buses(); 267 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 268 if (strcmp((*p_drv)->driver_name, name) == 0) 269 break; 270 } 271 if (*p_drv == NULL) { 272 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 273 xpt_unlock_buses(); 274 xpt_free_path(periph->path); 275 free(periph, M_CAMPERIPH); 276 return (CAM_REQ_INVALID); 277 } 278 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 279 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 280 while (cur_periph != NULL 281 && cur_periph->unit_number < periph->unit_number) 282 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 283 if (cur_periph != NULL) { 284 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); 285 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 286 } else { 287 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 288 (*p_drv)->generation++; 289 } 290 xpt_unlock_buses(); 291 292 init_level++; 293 294 status = xpt_add_periph(periph); 295 if (status != CAM_REQ_CMP) 296 goto failure; 297 298 init_level++; 299 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n")); 300 301 status = periph_ctor(periph, arg); 302 303 if (status == CAM_REQ_CMP) 304 init_level++; 305 306 failure: 307 switch (init_level) { 308 case 4: 309 /* Initialized successfully */ 310 break; 311 case 3: 312 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 313 xpt_remove_periph(periph); 314 /* FALLTHROUGH */ 315 case 2: 316 xpt_lock_buses(); 317 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 318 xpt_unlock_buses(); 319 xpt_free_path(periph->path); 320 /* FALLTHROUGH */ 321 case 1: 322 free(periph, M_CAMPERIPH); 323 /* FALLTHROUGH */ 324 case 0: 325 /* No cleanup to perform. */ 326 break; 327 default: 328 panic("%s: Unknown init level", __func__); 329 } 330 return(status); 331 } 332 333 /* 334 * Find a peripheral structure with the specified path, target, lun, 335 * and (optionally) type. If the name is NULL, this function will return 336 * the first peripheral driver that matches the specified path. 337 */ 338 struct cam_periph * 339 cam_periph_find(struct cam_path *path, char *name) 340 { 341 struct periph_driver **p_drv; 342 struct cam_periph *periph; 343 344 xpt_lock_buses(); 345 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 346 347 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 348 continue; 349 350 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 351 if (xpt_path_comp(periph->path, path) == 0) { 352 xpt_unlock_buses(); 353 cam_periph_assert(periph, MA_OWNED); 354 return(periph); 355 } 356 } 357 if (name != NULL) { 358 xpt_unlock_buses(); 359 return(NULL); 360 } 361 } 362 xpt_unlock_buses(); 363 return(NULL); 364 } 365 366 /* 367 * Find peripheral driver instances attached to the specified path. 368 */ 369 int 370 cam_periph_list(struct cam_path *path, struct sbuf *sb) 371 { 372 struct sbuf local_sb; 373 struct periph_driver **p_drv; 374 struct cam_periph *periph; 375 int count; 376 int sbuf_alloc_len; 377 378 sbuf_alloc_len = 16; 379 retry: 380 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN); 381 count = 0; 382 xpt_lock_buses(); 383 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 384 385 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 386 if (xpt_path_comp(periph->path, path) != 0) 387 continue; 388 389 if (sbuf_len(&local_sb) != 0) 390 sbuf_cat(&local_sb, ","); 391 392 sbuf_printf(&local_sb, "%s%d", periph->periph_name, 393 periph->unit_number); 394 395 if (sbuf_error(&local_sb) == ENOMEM) { 396 sbuf_alloc_len *= 2; 397 xpt_unlock_buses(); 398 sbuf_delete(&local_sb); 399 goto retry; 400 } 401 count++; 402 } 403 } 404 xpt_unlock_buses(); 405 sbuf_finish(&local_sb); 406 if (sbuf_len(sb) != 0) 407 sbuf_cat(sb, ","); 408 sbuf_cat(sb, sbuf_data(&local_sb)); 409 sbuf_delete(&local_sb); 410 return (count); 411 } 412 413 int 414 cam_periph_acquire(struct cam_periph *periph) 415 { 416 int status; 417 418 if (periph == NULL) 419 return (EINVAL); 420 421 status = ENOENT; 422 xpt_lock_buses(); 423 if ((periph->flags & CAM_PERIPH_INVALID) == 0) { 424 periph->refcount++; 425 status = 0; 426 } 427 xpt_unlock_buses(); 428 429 return (status); 430 } 431 432 void 433 cam_periph_doacquire(struct cam_periph *periph) 434 { 435 436 xpt_lock_buses(); 437 KASSERT(periph->refcount >= 1, 438 ("cam_periph_doacquire() with refcount == %d", periph->refcount)); 439 periph->refcount++; 440 xpt_unlock_buses(); 441 } 442 443 void 444 cam_periph_release_locked_buses(struct cam_periph *periph) 445 { 446 447 cam_periph_assert(periph, MA_OWNED); 448 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1")); 449 if (--periph->refcount == 0) 450 camperiphfree(periph); 451 } 452 453 void 454 cam_periph_release_locked(struct cam_periph *periph) 455 { 456 457 if (periph == NULL) 458 return; 459 460 xpt_lock_buses(); 461 cam_periph_release_locked_buses(periph); 462 xpt_unlock_buses(); 463 } 464 465 void 466 cam_periph_release(struct cam_periph *periph) 467 { 468 struct mtx *mtx; 469 470 if (periph == NULL) 471 return; 472 473 cam_periph_assert(periph, MA_NOTOWNED); 474 mtx = cam_periph_mtx(periph); 475 mtx_lock(mtx); 476 cam_periph_release_locked(periph); 477 mtx_unlock(mtx); 478 } 479 480 /* 481 * hold/unhold act as mutual exclusion for sections of the code that 482 * need to sleep and want to make sure that other sections that 483 * will interfere are held off. This only protects exclusive sections 484 * from each other. 485 */ 486 int 487 cam_periph_hold(struct cam_periph *periph, int priority) 488 { 489 int error; 490 491 /* 492 * Increment the reference count on the peripheral 493 * while we wait for our lock attempt to succeed 494 * to ensure the peripheral doesn't disappear out 495 * from user us while we sleep. 496 */ 497 498 if (cam_periph_acquire(periph) != 0) 499 return (ENXIO); 500 501 cam_periph_assert(periph, MA_OWNED); 502 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 503 periph->flags |= CAM_PERIPH_LOCK_WANTED; 504 if ((error = cam_periph_sleep(periph, periph, priority, 505 "caplck", 0)) != 0) { 506 cam_periph_release_locked(periph); 507 return (error); 508 } 509 if (periph->flags & CAM_PERIPH_INVALID) { 510 cam_periph_release_locked(periph); 511 return (ENXIO); 512 } 513 } 514 515 periph->flags |= CAM_PERIPH_LOCKED; 516 return (0); 517 } 518 519 void 520 cam_periph_unhold(struct cam_periph *periph) 521 { 522 523 cam_periph_assert(periph, MA_OWNED); 524 525 periph->flags &= ~CAM_PERIPH_LOCKED; 526 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 527 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 528 wakeup(periph); 529 } 530 531 cam_periph_release_locked(periph); 532 } 533 534 /* 535 * Look for the next unit number that is not currently in use for this 536 * peripheral type starting at "newunit". Also exclude unit numbers that 537 * are reserved by for future "hardwiring" unless we already know that this 538 * is a potential wired device. Only assume that the device is "wired" the 539 * first time through the loop since after that we'll be looking at unit 540 * numbers that did not match a wiring entry. 541 */ 542 static u_int 543 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 544 path_id_t pathid, target_id_t target, lun_id_t lun) 545 { 546 struct cam_periph *periph; 547 char *periph_name; 548 int i, val, dunit, r; 549 const char *dname, *strval; 550 551 periph_name = p_drv->driver_name; 552 for (;;newunit++) { 553 554 for (periph = TAILQ_FIRST(&p_drv->units); 555 periph != NULL && periph->unit_number != newunit; 556 periph = TAILQ_NEXT(periph, unit_links)) 557 ; 558 559 if (periph != NULL && periph->unit_number == newunit) { 560 if (wired != 0) { 561 xpt_print(periph->path, "Duplicate Wired " 562 "Device entry!\n"); 563 xpt_print(periph->path, "Second device (%s " 564 "device at scbus%d target %d lun %d) will " 565 "not be wired\n", periph_name, pathid, 566 target, lun); 567 wired = 0; 568 } 569 continue; 570 } 571 if (wired) 572 break; 573 574 /* 575 * Don't match entries like "da 4" as a wired down 576 * device, but do match entries like "da 4 target 5" 577 * or even "da 4 scbus 1". 578 */ 579 i = 0; 580 dname = periph_name; 581 for (;;) { 582 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 583 if (r != 0) 584 break; 585 /* if no "target" and no specific scbus, skip */ 586 if (resource_int_value(dname, dunit, "target", &val) && 587 (resource_string_value(dname, dunit, "at",&strval)|| 588 strcmp(strval, "scbus") == 0)) 589 continue; 590 if (newunit == dunit) 591 break; 592 } 593 if (r != 0) 594 break; 595 } 596 return (newunit); 597 } 598 599 static u_int 600 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 601 target_id_t target, lun_id_t lun) 602 { 603 u_int unit; 604 int wired, i, val, dunit; 605 const char *dname, *strval; 606 char pathbuf[32], *periph_name; 607 608 periph_name = p_drv->driver_name; 609 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 610 unit = 0; 611 i = 0; 612 dname = periph_name; 613 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 614 wired = 0) { 615 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 616 if (strcmp(strval, pathbuf) != 0) 617 continue; 618 wired++; 619 } 620 if (resource_int_value(dname, dunit, "target", &val) == 0) { 621 if (val != target) 622 continue; 623 wired++; 624 } 625 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 626 if (val != lun) 627 continue; 628 wired++; 629 } 630 if (wired != 0) { 631 unit = dunit; 632 break; 633 } 634 } 635 636 /* 637 * Either start from 0 looking for the next unit or from 638 * the unit number given in the resource config. This way, 639 * if we have wildcard matches, we don't return the same 640 * unit number twice. 641 */ 642 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 643 644 return (unit); 645 } 646 647 void 648 cam_periph_invalidate(struct cam_periph *periph) 649 { 650 651 cam_periph_assert(periph, MA_OWNED); 652 /* 653 * We only call this routine the first time a peripheral is 654 * invalidated. 655 */ 656 if ((periph->flags & CAM_PERIPH_INVALID) != 0) 657 return; 658 659 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n")); 660 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) { 661 struct sbuf sb; 662 char buffer[160]; 663 664 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN); 665 xpt_denounce_periph_sbuf(periph, &sb); 666 sbuf_finish(&sb); 667 sbuf_putbuf(&sb); 668 } 669 periph->flags |= CAM_PERIPH_INVALID; 670 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 671 if (periph->periph_oninval != NULL) 672 periph->periph_oninval(periph); 673 cam_periph_release_locked(periph); 674 } 675 676 static void 677 camperiphfree(struct cam_periph *periph) 678 { 679 struct periph_driver **p_drv; 680 struct periph_driver *drv; 681 682 cam_periph_assert(periph, MA_OWNED); 683 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating", 684 periph->periph_name, periph->unit_number)); 685 KASSERT(periph->path->device->ccbq.dev_active == 0, 686 ("%s%d: freed with %d active CCBs\n", 687 periph->periph_name, periph->unit_number, 688 periph->path->device->ccbq.dev_active)); 689 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 690 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 691 break; 692 } 693 if (*p_drv == NULL) { 694 printf("camperiphfree: attempt to free non-existant periph\n"); 695 return; 696 } 697 /* 698 * Cache a pointer to the periph_driver structure. If a 699 * periph_driver is added or removed from the array (see 700 * periphdriver_register()) while we drop the toplogy lock 701 * below, p_drv may change. This doesn't protect against this 702 * particular periph_driver going away. That will require full 703 * reference counting in the periph_driver infrastructure. 704 */ 705 drv = *p_drv; 706 707 /* 708 * We need to set this flag before dropping the topology lock, to 709 * let anyone who is traversing the list that this peripheral is 710 * about to be freed, and there will be no more reference count 711 * checks. 712 */ 713 periph->flags |= CAM_PERIPH_FREE; 714 715 /* 716 * The peripheral destructor semantics dictate calling with only the 717 * SIM mutex held. Since it might sleep, it should not be called 718 * with the topology lock held. 719 */ 720 xpt_unlock_buses(); 721 722 /* 723 * We need to call the peripheral destructor prior to removing the 724 * peripheral from the list. Otherwise, we risk running into a 725 * scenario where the peripheral unit number may get reused 726 * (because it has been removed from the list), but some resources 727 * used by the peripheral are still hanging around. In particular, 728 * the devfs nodes used by some peripherals like the pass(4) driver 729 * aren't fully cleaned up until the destructor is run. If the 730 * unit number is reused before the devfs instance is fully gone, 731 * devfs will panic. 732 */ 733 if (periph->periph_dtor != NULL) 734 periph->periph_dtor(periph); 735 736 /* 737 * The peripheral list is protected by the topology lock. 738 */ 739 xpt_lock_buses(); 740 741 TAILQ_REMOVE(&drv->units, periph, unit_links); 742 drv->generation++; 743 744 xpt_remove_periph(periph); 745 746 xpt_unlock_buses(); 747 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) 748 xpt_print(periph->path, "Periph destroyed\n"); 749 else 750 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 751 752 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 753 union ccb ccb; 754 void *arg; 755 756 switch (periph->deferred_ac) { 757 case AC_FOUND_DEVICE: 758 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 759 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 760 xpt_action(&ccb); 761 arg = &ccb; 762 break; 763 case AC_PATH_REGISTERED: 764 xpt_path_inq(&ccb.cpi, periph->path); 765 arg = &ccb; 766 break; 767 default: 768 arg = NULL; 769 break; 770 } 771 periph->deferred_callback(NULL, periph->deferred_ac, 772 periph->path, arg); 773 } 774 xpt_free_path(periph->path); 775 free(periph, M_CAMPERIPH); 776 xpt_lock_buses(); 777 } 778 779 /* 780 * Map user virtual pointers into kernel virtual address space, so we can 781 * access the memory. This is now a generic function that centralizes most 782 * of the sanity checks on the data flags, if any. 783 * This also only works for up to MAXPHYS memory. Since we use 784 * buffers to map stuff in and out, we're limited to the buffer size. 785 */ 786 int 787 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, 788 u_int maxmap) 789 { 790 int numbufs, i; 791 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 792 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 793 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 794 bool misaligned[CAM_PERIPH_MAXMAPS]; 795 796 bzero(mapinfo, sizeof(*mapinfo)); 797 if (maxmap == 0) 798 maxmap = DFLTPHYS; /* traditional default */ 799 else if (maxmap > MAXPHYS) 800 maxmap = MAXPHYS; /* for safety */ 801 switch(ccb->ccb_h.func_code) { 802 case XPT_DEV_MATCH: 803 if (ccb->cdm.match_buf_len == 0) { 804 printf("cam_periph_mapmem: invalid match buffer " 805 "length 0\n"); 806 return(EINVAL); 807 } 808 if (ccb->cdm.pattern_buf_len > 0) { 809 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 810 lengths[0] = ccb->cdm.pattern_buf_len; 811 dirs[0] = CAM_DIR_OUT; 812 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 813 lengths[1] = ccb->cdm.match_buf_len; 814 dirs[1] = CAM_DIR_IN; 815 numbufs = 2; 816 } else { 817 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 818 lengths[0] = ccb->cdm.match_buf_len; 819 dirs[0] = CAM_DIR_IN; 820 numbufs = 1; 821 } 822 /* 823 * This request will not go to the hardware, no reason 824 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 825 */ 826 maxmap = MAXPHYS; 827 break; 828 case XPT_SCSI_IO: 829 case XPT_CONT_TARGET_IO: 830 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 831 return(0); 832 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 833 return (EINVAL); 834 data_ptrs[0] = &ccb->csio.data_ptr; 835 lengths[0] = ccb->csio.dxfer_len; 836 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 837 numbufs = 1; 838 break; 839 case XPT_ATA_IO: 840 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 841 return(0); 842 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 843 return (EINVAL); 844 data_ptrs[0] = &ccb->ataio.data_ptr; 845 lengths[0] = ccb->ataio.dxfer_len; 846 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 847 numbufs = 1; 848 break; 849 case XPT_MMC_IO: 850 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 851 return(0); 852 /* Two mappings: one for cmd->data and one for cmd->data->data */ 853 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data; 854 lengths[0] = sizeof(struct mmc_data *); 855 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 856 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data; 857 lengths[1] = ccb->mmcio.cmd.data->len; 858 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK; 859 numbufs = 2; 860 break; 861 case XPT_SMP_IO: 862 data_ptrs[0] = &ccb->smpio.smp_request; 863 lengths[0] = ccb->smpio.smp_request_len; 864 dirs[0] = CAM_DIR_OUT; 865 data_ptrs[1] = &ccb->smpio.smp_response; 866 lengths[1] = ccb->smpio.smp_response_len; 867 dirs[1] = CAM_DIR_IN; 868 numbufs = 2; 869 break; 870 case XPT_NVME_IO: 871 case XPT_NVME_ADMIN: 872 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 873 return (0); 874 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 875 return (EINVAL); 876 data_ptrs[0] = &ccb->nvmeio.data_ptr; 877 lengths[0] = ccb->nvmeio.dxfer_len; 878 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 879 numbufs = 1; 880 break; 881 case XPT_DEV_ADVINFO: 882 if (ccb->cdai.bufsiz == 0) 883 return (0); 884 885 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 886 lengths[0] = ccb->cdai.bufsiz; 887 dirs[0] = CAM_DIR_IN; 888 numbufs = 1; 889 890 /* 891 * This request will not go to the hardware, no reason 892 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 893 */ 894 maxmap = MAXPHYS; 895 break; 896 default: 897 return(EINVAL); 898 break; /* NOTREACHED */ 899 } 900 901 /* 902 * Check the transfer length and permissions first, so we don't 903 * have to unmap any previously mapped buffers. 904 */ 905 for (i = 0; i < numbufs; i++) { 906 if (lengths[i] > maxmap) { 907 printf("cam_periph_mapmem: attempt to map %lu bytes, " 908 "which is greater than %lu\n", 909 (long)(lengths[i]), (u_long)maxmap); 910 return (E2BIG); 911 } 912 913 /* 914 * The userland data pointer passed in may not be page 915 * aligned. vmapbuf() truncates the address to a page 916 * boundary, so if the address isn't page aligned, we'll 917 * need enough space for the given transfer length, plus 918 * whatever extra space is necessary to make it to the page 919 * boundary. 920 */ 921 misaligned[i] = (lengths[i] + 922 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > MAXPHYS); 923 } 924 925 /* 926 * This keeps the kernel stack of current thread from getting 927 * swapped. In low-memory situations where the kernel stack might 928 * otherwise get swapped out, this holds it and allows the thread 929 * to make progress and release the kernel mapped pages sooner. 930 * 931 * XXX KDM should I use P_NOSWAP instead? 932 */ 933 PHOLD(curproc); 934 935 for (i = 0; i < numbufs; i++) { 936 937 /* Save the user's data address. */ 938 mapinfo->orig[i] = *data_ptrs[i]; 939 940 /* 941 * For small buffers use malloc+copyin/copyout instead of 942 * mapping to KVA to avoid expensive TLB shootdowns. For 943 * small allocations malloc is backed by UMA, and so much 944 * cheaper on SMP systems. 945 */ 946 if ((lengths[i] <= periph_mapmem_thresh || misaligned[i]) && 947 ccb->ccb_h.func_code != XPT_MMC_IO) { 948 *data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH, 949 M_WAITOK); 950 if (dirs[i] != CAM_DIR_IN) { 951 if (copyin(mapinfo->orig[i], *data_ptrs[i], 952 lengths[i]) != 0) { 953 free(*data_ptrs[i], M_CAMPERIPH); 954 *data_ptrs[i] = mapinfo->orig[i]; 955 goto fail; 956 } 957 } else 958 bzero(*data_ptrs[i], lengths[i]); 959 continue; 960 } 961 962 /* 963 * Get the buffer. 964 */ 965 mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK); 966 967 /* put our pointer in the data slot */ 968 mapinfo->bp[i]->b_data = *data_ptrs[i]; 969 970 /* set the transfer length, we know it's < MAXPHYS */ 971 mapinfo->bp[i]->b_bufsize = lengths[i]; 972 973 /* set the direction */ 974 mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ? 975 BIO_WRITE : BIO_READ; 976 977 /* 978 * Map the buffer into kernel memory. 979 * 980 * Note that useracc() alone is not a sufficient test. 981 * vmapbuf() can still fail due to a smaller file mapped 982 * into a larger area of VM, or if userland races against 983 * vmapbuf() after the useracc() check. 984 */ 985 if (vmapbuf(mapinfo->bp[i], 1) < 0) { 986 uma_zfree(pbuf_zone, mapinfo->bp[i]); 987 goto fail; 988 } 989 990 /* set our pointer to the new mapped area */ 991 *data_ptrs[i] = mapinfo->bp[i]->b_data; 992 } 993 994 /* 995 * Now that we've gotten this far, change ownership to the kernel 996 * of the buffers so that we don't run afoul of returning to user 997 * space with locks (on the buffer) held. 998 */ 999 for (i = 0; i < numbufs; i++) { 1000 if (mapinfo->bp[i]) 1001 BUF_KERNPROC(mapinfo->bp[i]); 1002 } 1003 1004 mapinfo->num_bufs_used = numbufs; 1005 return(0); 1006 1007 fail: 1008 for (i--; i >= 0; i--) { 1009 if (mapinfo->bp[i]) { 1010 vunmapbuf(mapinfo->bp[i]); 1011 uma_zfree(pbuf_zone, mapinfo->bp[i]); 1012 } else 1013 free(*data_ptrs[i], M_CAMPERIPH); 1014 *data_ptrs[i] = mapinfo->orig[i]; 1015 } 1016 PRELE(curproc); 1017 return(EACCES); 1018 } 1019 1020 /* 1021 * Unmap memory segments mapped into kernel virtual address space by 1022 * cam_periph_mapmem(). 1023 */ 1024 void 1025 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 1026 { 1027 int numbufs, i; 1028 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 1029 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 1030 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 1031 1032 if (mapinfo->num_bufs_used <= 0) { 1033 /* nothing to free and the process wasn't held. */ 1034 return; 1035 } 1036 1037 switch (ccb->ccb_h.func_code) { 1038 case XPT_DEV_MATCH: 1039 if (ccb->cdm.pattern_buf_len > 0) { 1040 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 1041 lengths[0] = ccb->cdm.pattern_buf_len; 1042 dirs[0] = CAM_DIR_OUT; 1043 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 1044 lengths[1] = ccb->cdm.match_buf_len; 1045 dirs[1] = CAM_DIR_IN; 1046 numbufs = 2; 1047 } else { 1048 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 1049 lengths[0] = ccb->cdm.match_buf_len; 1050 dirs[0] = CAM_DIR_IN; 1051 numbufs = 1; 1052 } 1053 break; 1054 case XPT_SCSI_IO: 1055 case XPT_CONT_TARGET_IO: 1056 data_ptrs[0] = &ccb->csio.data_ptr; 1057 lengths[0] = ccb->csio.dxfer_len; 1058 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1059 numbufs = 1; 1060 break; 1061 case XPT_ATA_IO: 1062 data_ptrs[0] = &ccb->ataio.data_ptr; 1063 lengths[0] = ccb->ataio.dxfer_len; 1064 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1065 numbufs = 1; 1066 break; 1067 case XPT_MMC_IO: 1068 data_ptrs[0] = (u_int8_t **)&ccb->mmcio.cmd.data; 1069 lengths[0] = sizeof(struct mmc_data *); 1070 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1071 data_ptrs[1] = (u_int8_t **)&ccb->mmcio.cmd.data->data; 1072 lengths[1] = ccb->mmcio.cmd.data->len; 1073 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK; 1074 numbufs = 2; 1075 break; 1076 case XPT_SMP_IO: 1077 data_ptrs[0] = &ccb->smpio.smp_request; 1078 lengths[0] = ccb->smpio.smp_request_len; 1079 dirs[0] = CAM_DIR_OUT; 1080 data_ptrs[1] = &ccb->smpio.smp_response; 1081 lengths[1] = ccb->smpio.smp_response_len; 1082 dirs[1] = CAM_DIR_IN; 1083 numbufs = 2; 1084 break; 1085 case XPT_NVME_IO: 1086 case XPT_NVME_ADMIN: 1087 data_ptrs[0] = &ccb->nvmeio.data_ptr; 1088 lengths[0] = ccb->nvmeio.dxfer_len; 1089 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1090 numbufs = 1; 1091 break; 1092 case XPT_DEV_ADVINFO: 1093 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 1094 lengths[0] = ccb->cdai.bufsiz; 1095 dirs[0] = CAM_DIR_IN; 1096 numbufs = 1; 1097 break; 1098 default: 1099 /* allow ourselves to be swapped once again */ 1100 PRELE(curproc); 1101 return; 1102 break; /* NOTREACHED */ 1103 } 1104 1105 for (i = 0; i < numbufs; i++) { 1106 if (mapinfo->bp[i]) { 1107 /* unmap the buffer */ 1108 vunmapbuf(mapinfo->bp[i]); 1109 1110 /* release the buffer */ 1111 uma_zfree(pbuf_zone, mapinfo->bp[i]); 1112 } else { 1113 if (dirs[i] != CAM_DIR_OUT) { 1114 copyout(*data_ptrs[i], mapinfo->orig[i], 1115 lengths[i]); 1116 } 1117 free(*data_ptrs[i], M_CAMPERIPH); 1118 } 1119 1120 /* Set the user's pointer back to the original value */ 1121 *data_ptrs[i] = mapinfo->orig[i]; 1122 } 1123 1124 /* allow ourselves to be swapped once again */ 1125 PRELE(curproc); 1126 } 1127 1128 int 1129 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 1130 int (*error_routine)(union ccb *ccb, 1131 cam_flags camflags, 1132 u_int32_t sense_flags)) 1133 { 1134 union ccb *ccb; 1135 int error; 1136 int found; 1137 1138 error = found = 0; 1139 1140 switch(cmd){ 1141 case CAMGETPASSTHRU: 1142 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1143 xpt_setup_ccb(&ccb->ccb_h, 1144 ccb->ccb_h.path, 1145 CAM_PRIORITY_NORMAL); 1146 ccb->ccb_h.func_code = XPT_GDEVLIST; 1147 1148 /* 1149 * Basically, the point of this is that we go through 1150 * getting the list of devices, until we find a passthrough 1151 * device. In the current version of the CAM code, the 1152 * only way to determine what type of device we're dealing 1153 * with is by its name. 1154 */ 1155 while (found == 0) { 1156 ccb->cgdl.index = 0; 1157 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 1158 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 1159 1160 /* we want the next device in the list */ 1161 xpt_action(ccb); 1162 if (strncmp(ccb->cgdl.periph_name, 1163 "pass", 4) == 0){ 1164 found = 1; 1165 break; 1166 } 1167 } 1168 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 1169 (found == 0)) { 1170 ccb->cgdl.periph_name[0] = '\0'; 1171 ccb->cgdl.unit_number = 0; 1172 break; 1173 } 1174 } 1175 1176 /* copy the result back out */ 1177 bcopy(ccb, addr, sizeof(union ccb)); 1178 1179 /* and release the ccb */ 1180 xpt_release_ccb(ccb); 1181 1182 break; 1183 default: 1184 error = ENOTTY; 1185 break; 1186 } 1187 return(error); 1188 } 1189 1190 static void 1191 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb) 1192 { 1193 1194 panic("%s: already done with ccb %p", __func__, done_ccb); 1195 } 1196 1197 static void 1198 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb) 1199 { 1200 1201 /* Caller will release the CCB */ 1202 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED); 1203 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic; 1204 wakeup(&done_ccb->ccb_h.cbfcnp); 1205 } 1206 1207 static void 1208 cam_periph_ccbwait(union ccb *ccb) 1209 { 1210 1211 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { 1212 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic) 1213 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, 1214 PRIBIO, "cbwait", 0); 1215 } 1216 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && 1217 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG, 1218 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, " 1219 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code, 1220 ccb->ccb_h.status, ccb->ccb_h.pinfo.index)); 1221 } 1222 1223 /* 1224 * Dispatch a CCB and wait for it to complete. If the CCB has set a 1225 * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost. 1226 */ 1227 int 1228 cam_periph_runccb(union ccb *ccb, 1229 int (*error_routine)(union ccb *ccb, 1230 cam_flags camflags, 1231 u_int32_t sense_flags), 1232 cam_flags camflags, u_int32_t sense_flags, 1233 struct devstat *ds) 1234 { 1235 struct bintime *starttime; 1236 struct bintime ltime; 1237 int error; 1238 bool must_poll; 1239 uint32_t timeout = 1; 1240 1241 starttime = NULL; 1242 xpt_path_assert(ccb->ccb_h.path, MA_OWNED); 1243 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0, 1244 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb, 1245 ccb->ccb_h.func_code, ccb->ccb_h.flags)); 1246 1247 /* 1248 * If the user has supplied a stats structure, and if we understand 1249 * this particular type of ccb, record the transaction start. 1250 */ 1251 if (ds != NULL && 1252 (ccb->ccb_h.func_code == XPT_SCSI_IO || 1253 ccb->ccb_h.func_code == XPT_ATA_IO || 1254 ccb->ccb_h.func_code == XPT_NVME_IO)) { 1255 starttime = <ime; 1256 binuptime(starttime); 1257 devstat_start_transaction(ds, starttime); 1258 } 1259 1260 /* 1261 * We must poll the I/O while we're dumping. The scheduler is normally 1262 * stopped for dumping, except when we call doadump from ddb. While the 1263 * scheduler is running in this case, we still need to poll the I/O to 1264 * avoid sleeping waiting for the ccb to complete. 1265 * 1266 * A panic triggered dump stops the scheduler, any callback from the 1267 * shutdown_post_sync event will run with the scheduler stopped, but 1268 * before we're officially dumping. To avoid hanging in adashutdown 1269 * initiated commands (or other similar situations), we have to test for 1270 * either SCHEDULER_STOPPED() here as well. 1271 * 1272 * To avoid locking problems, dumping/polling callers must call 1273 * without a periph lock held. 1274 */ 1275 must_poll = dumping || SCHEDULER_STOPPED(); 1276 ccb->ccb_h.cbfcnp = cam_periph_done; 1277 1278 /* 1279 * If we're polling, then we need to ensure that we have ample resources 1280 * in the periph. cam_periph_error can reschedule the ccb by calling 1281 * xpt_action and returning ERESTART, so we have to effect the polling 1282 * in the do loop below. 1283 */ 1284 if (must_poll) { 1285 timeout = xpt_poll_setup(ccb); 1286 } 1287 1288 if (timeout == 0) { 1289 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1290 error = EBUSY; 1291 } else { 1292 xpt_action(ccb); 1293 do { 1294 if (must_poll) { 1295 xpt_pollwait(ccb, timeout); 1296 timeout = ccb->ccb_h.timeout * 10; 1297 } else { 1298 cam_periph_ccbwait(ccb); 1299 } 1300 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1301 error = 0; 1302 else if (error_routine != NULL) { 1303 ccb->ccb_h.cbfcnp = cam_periph_done; 1304 error = (*error_routine)(ccb, camflags, sense_flags); 1305 } else 1306 error = 0; 1307 } while (error == ERESTART); 1308 } 1309 1310 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1311 cam_release_devq(ccb->ccb_h.path, 1312 /* relsim_flags */0, 1313 /* openings */0, 1314 /* timeout */0, 1315 /* getcount_only */ FALSE); 1316 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1317 } 1318 1319 if (ds != NULL) { 1320 uint32_t bytes; 1321 devstat_tag_type tag; 1322 bool valid = true; 1323 1324 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1325 bytes = ccb->csio.dxfer_len - ccb->csio.resid; 1326 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3); 1327 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1328 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid; 1329 tag = (devstat_tag_type)0; 1330 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) { 1331 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */ 1332 tag = (devstat_tag_type)0; 1333 } else { 1334 valid = false; 1335 } 1336 if (valid) 1337 devstat_end_transaction(ds, bytes, tag, 1338 ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? 1339 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1340 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime); 1341 } 1342 1343 return(error); 1344 } 1345 1346 void 1347 cam_freeze_devq(struct cam_path *path) 1348 { 1349 struct ccb_hdr ccb_h; 1350 1351 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n")); 1352 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 1353 ccb_h.func_code = XPT_NOOP; 1354 ccb_h.flags = CAM_DEV_QFREEZE; 1355 xpt_action((union ccb *)&ccb_h); 1356 } 1357 1358 u_int32_t 1359 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 1360 u_int32_t openings, u_int32_t arg, 1361 int getcount_only) 1362 { 1363 struct ccb_relsim crs; 1364 1365 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n", 1366 relsim_flags, openings, arg, getcount_only)); 1367 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 1368 crs.ccb_h.func_code = XPT_REL_SIMQ; 1369 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1370 crs.release_flags = relsim_flags; 1371 crs.openings = openings; 1372 crs.release_timeout = arg; 1373 xpt_action((union ccb *)&crs); 1374 return (crs.qfrozen_cnt); 1375 } 1376 1377 #define saved_ccb_ptr ppriv_ptr0 1378 static void 1379 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1380 { 1381 union ccb *saved_ccb; 1382 cam_status status; 1383 struct scsi_start_stop_unit *scsi_cmd; 1384 int error = 0, error_code, sense_key, asc, ascq; 1385 1386 scsi_cmd = (struct scsi_start_stop_unit *) 1387 &done_ccb->csio.cdb_io.cdb_bytes; 1388 status = done_ccb->ccb_h.status; 1389 1390 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1391 if (scsi_extract_sense_ccb(done_ccb, 1392 &error_code, &sense_key, &asc, &ascq)) { 1393 /* 1394 * If the error is "invalid field in CDB", 1395 * and the load/eject flag is set, turn the 1396 * flag off and try again. This is just in 1397 * case the drive in question barfs on the 1398 * load eject flag. The CAM code should set 1399 * the load/eject flag by default for 1400 * removable media. 1401 */ 1402 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1403 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1404 (asc == 0x24) && (ascq == 0x00)) { 1405 scsi_cmd->how &= ~SSS_LOEJ; 1406 if (status & CAM_DEV_QFRZN) { 1407 cam_release_devq(done_ccb->ccb_h.path, 1408 0, 0, 0, 0); 1409 done_ccb->ccb_h.status &= 1410 ~CAM_DEV_QFRZN; 1411 } 1412 xpt_action(done_ccb); 1413 goto out; 1414 } 1415 } 1416 error = cam_periph_error(done_ccb, 0, 1417 SF_RETRY_UA | SF_NO_PRINT); 1418 if (error == ERESTART) 1419 goto out; 1420 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) { 1421 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1422 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1423 } 1424 } else { 1425 /* 1426 * If we have successfully taken a device from the not 1427 * ready to ready state, re-scan the device and re-get 1428 * the inquiry information. Many devices (mostly disks) 1429 * don't properly report their inquiry information unless 1430 * they are spun up. 1431 */ 1432 if (scsi_cmd->opcode == START_STOP_UNIT) 1433 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL); 1434 } 1435 1436 /* If we tried long wait and still failed, remember that. */ 1437 if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) && 1438 (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) { 1439 periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT; 1440 if (error != 0 && done_ccb->ccb_h.retry_count == 0) 1441 periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED; 1442 } 1443 1444 /* 1445 * After recovery action(s) completed, return to the original CCB. 1446 * If the recovery CCB has failed, considering its own possible 1447 * retries and recovery, assume we are back in state where we have 1448 * been originally, but without recovery hopes left. In such case, 1449 * after the final attempt below, we cancel any further retries, 1450 * blocking by that also any new recovery attempts for this CCB, 1451 * and the result will be the final one returned to the CCB owher. 1452 */ 1453 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1454 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); 1455 xpt_free_ccb(saved_ccb); 1456 if (done_ccb->ccb_h.cbfcnp != camperiphdone) 1457 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1458 if (error != 0) 1459 done_ccb->ccb_h.retry_count = 0; 1460 xpt_action(done_ccb); 1461 1462 out: 1463 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 1464 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1465 } 1466 1467 /* 1468 * Generic Async Event handler. Peripheral drivers usually 1469 * filter out the events that require personal attention, 1470 * and leave the rest to this function. 1471 */ 1472 void 1473 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1474 struct cam_path *path, void *arg) 1475 { 1476 switch (code) { 1477 case AC_LOST_DEVICE: 1478 cam_periph_invalidate(periph); 1479 break; 1480 default: 1481 break; 1482 } 1483 } 1484 1485 void 1486 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1487 { 1488 struct ccb_getdevstats cgds; 1489 1490 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1491 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1492 xpt_action((union ccb *)&cgds); 1493 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1494 } 1495 1496 void 1497 cam_periph_freeze_after_event(struct cam_periph *periph, 1498 struct timeval* event_time, u_int duration_ms) 1499 { 1500 struct timeval delta; 1501 struct timeval duration_tv; 1502 1503 if (!timevalisset(event_time)) 1504 return; 1505 1506 microtime(&delta); 1507 timevalsub(&delta, event_time); 1508 duration_tv.tv_sec = duration_ms / 1000; 1509 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1510 if (timevalcmp(&delta, &duration_tv, <)) { 1511 timevalsub(&duration_tv, &delta); 1512 1513 duration_ms = duration_tv.tv_sec * 1000; 1514 duration_ms += duration_tv.tv_usec / 1000; 1515 cam_freeze_devq(periph->path); 1516 cam_release_devq(periph->path, 1517 RELSIM_RELEASE_AFTER_TIMEOUT, 1518 /*reduction*/0, 1519 /*timeout*/duration_ms, 1520 /*getcount_only*/0); 1521 } 1522 1523 } 1524 1525 static int 1526 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, 1527 cam_flags camflags, u_int32_t sense_flags, 1528 int *openings, u_int32_t *relsim_flags, 1529 u_int32_t *timeout, u_int32_t *action, const char **action_string) 1530 { 1531 struct cam_periph *periph; 1532 int error; 1533 1534 switch (ccb->csio.scsi_status) { 1535 case SCSI_STATUS_OK: 1536 case SCSI_STATUS_COND_MET: 1537 case SCSI_STATUS_INTERMED: 1538 case SCSI_STATUS_INTERMED_COND_MET: 1539 error = 0; 1540 break; 1541 case SCSI_STATUS_CMD_TERMINATED: 1542 case SCSI_STATUS_CHECK_COND: 1543 error = camperiphscsisenseerror(ccb, orig_ccb, 1544 camflags, 1545 sense_flags, 1546 openings, 1547 relsim_flags, 1548 timeout, 1549 action, 1550 action_string); 1551 break; 1552 case SCSI_STATUS_QUEUE_FULL: 1553 { 1554 /* no decrement */ 1555 struct ccb_getdevstats cgds; 1556 1557 /* 1558 * First off, find out what the current 1559 * transaction counts are. 1560 */ 1561 xpt_setup_ccb(&cgds.ccb_h, 1562 ccb->ccb_h.path, 1563 CAM_PRIORITY_NORMAL); 1564 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1565 xpt_action((union ccb *)&cgds); 1566 1567 /* 1568 * If we were the only transaction active, treat 1569 * the QUEUE FULL as if it were a BUSY condition. 1570 */ 1571 if (cgds.dev_active != 0) { 1572 int total_openings; 1573 1574 /* 1575 * Reduce the number of openings to 1576 * be 1 less than the amount it took 1577 * to get a queue full bounded by the 1578 * minimum allowed tag count for this 1579 * device. 1580 */ 1581 total_openings = cgds.dev_active + cgds.dev_openings; 1582 *openings = cgds.dev_active; 1583 if (*openings < cgds.mintags) 1584 *openings = cgds.mintags; 1585 if (*openings < total_openings) 1586 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1587 else { 1588 /* 1589 * Some devices report queue full for 1590 * temporary resource shortages. For 1591 * this reason, we allow a minimum 1592 * tag count to be entered via a 1593 * quirk entry to prevent the queue 1594 * count on these devices from falling 1595 * to a pessimisticly low value. We 1596 * still wait for the next successful 1597 * completion, however, before queueing 1598 * more transactions to the device. 1599 */ 1600 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1601 } 1602 *timeout = 0; 1603 error = ERESTART; 1604 *action &= ~SSQ_PRINT_SENSE; 1605 break; 1606 } 1607 /* FALLTHROUGH */ 1608 } 1609 case SCSI_STATUS_BUSY: 1610 /* 1611 * Restart the queue after either another 1612 * command completes or a 1 second timeout. 1613 */ 1614 periph = xpt_path_periph(ccb->ccb_h.path); 1615 if (periph->flags & CAM_PERIPH_INVALID) { 1616 error = EIO; 1617 *action_string = "Periph was invalidated"; 1618 } else if ((sense_flags & SF_RETRY_BUSY) != 0 || 1619 ccb->ccb_h.retry_count > 0) { 1620 if ((sense_flags & SF_RETRY_BUSY) == 0) 1621 ccb->ccb_h.retry_count--; 1622 error = ERESTART; 1623 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1624 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1625 *timeout = 1000; 1626 } else { 1627 error = EIO; 1628 *action_string = "Retries exhausted"; 1629 } 1630 break; 1631 case SCSI_STATUS_RESERV_CONFLICT: 1632 default: 1633 error = EIO; 1634 break; 1635 } 1636 return (error); 1637 } 1638 1639 static int 1640 camperiphscsisenseerror(union ccb *ccb, union ccb **orig, 1641 cam_flags camflags, u_int32_t sense_flags, 1642 int *openings, u_int32_t *relsim_flags, 1643 u_int32_t *timeout, u_int32_t *action, const char **action_string) 1644 { 1645 struct cam_periph *periph; 1646 union ccb *orig_ccb = ccb; 1647 int error, recoveryccb; 1648 1649 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 1650 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL) 1651 biotrack(ccb->csio.bio, __func__); 1652 #endif 1653 1654 periph = xpt_path_periph(ccb->ccb_h.path); 1655 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); 1656 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { 1657 /* 1658 * If error recovery is already in progress, don't attempt 1659 * to process this error, but requeue it unconditionally 1660 * and attempt to process it once error recovery has 1661 * completed. This failed command is probably related to 1662 * the error that caused the currently active error recovery 1663 * action so our current recovery efforts should also 1664 * address this command. Be aware that the error recovery 1665 * code assumes that only one recovery action is in progress 1666 * on a particular peripheral instance at any given time 1667 * (e.g. only one saved CCB for error recovery) so it is 1668 * imperitive that we don't violate this assumption. 1669 */ 1670 error = ERESTART; 1671 *action &= ~SSQ_PRINT_SENSE; 1672 } else { 1673 scsi_sense_action err_action; 1674 struct ccb_getdev cgd; 1675 1676 /* 1677 * Grab the inquiry data for this device. 1678 */ 1679 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); 1680 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1681 xpt_action((union ccb *)&cgd); 1682 1683 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data, 1684 sense_flags); 1685 error = err_action & SS_ERRMASK; 1686 1687 /* 1688 * Do not autostart sequential access devices 1689 * to avoid unexpected tape loading. 1690 */ 1691 if ((err_action & SS_MASK) == SS_START && 1692 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { 1693 *action_string = "Will not autostart a " 1694 "sequential access device"; 1695 goto sense_error_done; 1696 } 1697 1698 /* 1699 * Avoid recovery recursion if recovery action is the same. 1700 */ 1701 if ((err_action & SS_MASK) >= SS_START && recoveryccb) { 1702 if (((err_action & SS_MASK) == SS_START && 1703 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) || 1704 ((err_action & SS_MASK) == SS_TUR && 1705 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) { 1706 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1707 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1708 *timeout = 500; 1709 } 1710 } 1711 1712 /* 1713 * If the recovery action will consume a retry, 1714 * make sure we actually have retries available. 1715 */ 1716 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1717 if (ccb->ccb_h.retry_count > 0 && 1718 (periph->flags & CAM_PERIPH_INVALID) == 0) 1719 ccb->ccb_h.retry_count--; 1720 else { 1721 *action_string = "Retries exhausted"; 1722 goto sense_error_done; 1723 } 1724 } 1725 1726 if ((err_action & SS_MASK) >= SS_START) { 1727 /* 1728 * Do common portions of commands that 1729 * use recovery CCBs. 1730 */ 1731 orig_ccb = xpt_alloc_ccb_nowait(); 1732 if (orig_ccb == NULL) { 1733 *action_string = "Can't allocate recovery CCB"; 1734 goto sense_error_done; 1735 } 1736 /* 1737 * Clear freeze flag for original request here, as 1738 * this freeze will be dropped as part of ERESTART. 1739 */ 1740 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1741 bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); 1742 } 1743 1744 switch (err_action & SS_MASK) { 1745 case SS_NOP: 1746 *action_string = "No recovery action needed"; 1747 error = 0; 1748 break; 1749 case SS_RETRY: 1750 *action_string = "Retrying command (per sense data)"; 1751 error = ERESTART; 1752 break; 1753 case SS_FAIL: 1754 *action_string = "Unretryable error"; 1755 break; 1756 case SS_START: 1757 { 1758 int le; 1759 1760 /* 1761 * Send a start unit command to the device, and 1762 * then retry the command. 1763 */ 1764 *action_string = "Attempting to start unit"; 1765 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1766 1767 /* 1768 * Check for removable media and set 1769 * load/eject flag appropriately. 1770 */ 1771 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1772 le = TRUE; 1773 else 1774 le = FALSE; 1775 1776 scsi_start_stop(&ccb->csio, 1777 /*retries*/1, 1778 camperiphdone, 1779 MSG_SIMPLE_Q_TAG, 1780 /*start*/TRUE, 1781 /*load/eject*/le, 1782 /*immediate*/FALSE, 1783 SSD_FULL_SIZE, 1784 /*timeout*/50000); 1785 break; 1786 } 1787 case SS_TUR: 1788 { 1789 /* 1790 * Send a Test Unit Ready to the device. 1791 * If the 'many' flag is set, we send 120 1792 * test unit ready commands, one every half 1793 * second. Otherwise, we just send one TUR. 1794 * We only want to do this if the retry 1795 * count has not been exhausted. 1796 */ 1797 int retries; 1798 1799 if ((err_action & SSQ_MANY) != 0 && (periph->flags & 1800 CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) { 1801 periph->flags |= CAM_PERIPH_RECOVERY_WAIT; 1802 *action_string = "Polling device for readiness"; 1803 retries = 120; 1804 } else { 1805 *action_string = "Testing device for readiness"; 1806 retries = 1; 1807 } 1808 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1809 scsi_test_unit_ready(&ccb->csio, 1810 retries, 1811 camperiphdone, 1812 MSG_SIMPLE_Q_TAG, 1813 SSD_FULL_SIZE, 1814 /*timeout*/5000); 1815 1816 /* 1817 * Accomplish our 500ms delay by deferring 1818 * the release of our device queue appropriately. 1819 */ 1820 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1821 *timeout = 500; 1822 break; 1823 } 1824 default: 1825 panic("Unhandled error action %x", err_action); 1826 } 1827 1828 if ((err_action & SS_MASK) >= SS_START) { 1829 /* 1830 * Drop the priority, so that the recovery 1831 * CCB is the first to execute. Freeze the queue 1832 * after this command is sent so that we can 1833 * restore the old csio and have it queued in 1834 * the proper order before we release normal 1835 * transactions to the device. 1836 */ 1837 ccb->ccb_h.pinfo.priority--; 1838 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1839 ccb->ccb_h.saved_ccb_ptr = orig_ccb; 1840 error = ERESTART; 1841 *orig = orig_ccb; 1842 } 1843 1844 sense_error_done: 1845 *action = err_action; 1846 } 1847 return (error); 1848 } 1849 1850 /* 1851 * Generic error handler. Peripheral drivers usually filter 1852 * out the errors that they handle in a unique manner, then 1853 * call this function. 1854 */ 1855 int 1856 cam_periph_error(union ccb *ccb, cam_flags camflags, 1857 u_int32_t sense_flags) 1858 { 1859 struct cam_path *newpath; 1860 union ccb *orig_ccb, *scan_ccb; 1861 struct cam_periph *periph; 1862 const char *action_string; 1863 cam_status status; 1864 int frozen, error, openings, devctl_err; 1865 u_int32_t action, relsim_flags, timeout; 1866 1867 action = SSQ_PRINT_SENSE; 1868 periph = xpt_path_periph(ccb->ccb_h.path); 1869 action_string = NULL; 1870 status = ccb->ccb_h.status; 1871 frozen = (status & CAM_DEV_QFRZN) != 0; 1872 status &= CAM_STATUS_MASK; 1873 devctl_err = openings = relsim_flags = timeout = 0; 1874 orig_ccb = ccb; 1875 1876 /* Filter the errors that should be reported via devctl */ 1877 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 1878 case CAM_CMD_TIMEOUT: 1879 case CAM_REQ_ABORTED: 1880 case CAM_REQ_CMP_ERR: 1881 case CAM_REQ_TERMIO: 1882 case CAM_UNREC_HBA_ERROR: 1883 case CAM_DATA_RUN_ERR: 1884 case CAM_SCSI_STATUS_ERROR: 1885 case CAM_ATA_STATUS_ERROR: 1886 case CAM_SMP_STATUS_ERROR: 1887 devctl_err++; 1888 break; 1889 default: 1890 break; 1891 } 1892 1893 switch (status) { 1894 case CAM_REQ_CMP: 1895 error = 0; 1896 action &= ~SSQ_PRINT_SENSE; 1897 break; 1898 case CAM_SCSI_STATUS_ERROR: 1899 error = camperiphscsistatuserror(ccb, &orig_ccb, 1900 camflags, sense_flags, &openings, &relsim_flags, 1901 &timeout, &action, &action_string); 1902 break; 1903 case CAM_AUTOSENSE_FAIL: 1904 error = EIO; /* we have to kill the command */ 1905 break; 1906 case CAM_UA_ABORT: 1907 case CAM_UA_TERMIO: 1908 case CAM_MSG_REJECT_REC: 1909 /* XXX Don't know that these are correct */ 1910 error = EIO; 1911 break; 1912 case CAM_SEL_TIMEOUT: 1913 if ((camflags & CAM_RETRY_SELTO) != 0) { 1914 if (ccb->ccb_h.retry_count > 0 && 1915 (periph->flags & CAM_PERIPH_INVALID) == 0) { 1916 ccb->ccb_h.retry_count--; 1917 error = ERESTART; 1918 1919 /* 1920 * Wait a bit to give the device 1921 * time to recover before we try again. 1922 */ 1923 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1924 timeout = periph_selto_delay; 1925 break; 1926 } 1927 action_string = "Retries exhausted"; 1928 } 1929 /* FALLTHROUGH */ 1930 case CAM_DEV_NOT_THERE: 1931 error = ENXIO; 1932 action = SSQ_LOST; 1933 break; 1934 case CAM_REQ_INVALID: 1935 case CAM_PATH_INVALID: 1936 case CAM_NO_HBA: 1937 case CAM_PROVIDE_FAIL: 1938 case CAM_REQ_TOO_BIG: 1939 case CAM_LUN_INVALID: 1940 case CAM_TID_INVALID: 1941 case CAM_FUNC_NOTAVAIL: 1942 error = EINVAL; 1943 break; 1944 case CAM_SCSI_BUS_RESET: 1945 case CAM_BDR_SENT: 1946 /* 1947 * Commands that repeatedly timeout and cause these 1948 * kinds of error recovery actions, should return 1949 * CAM_CMD_TIMEOUT, which allows us to safely assume 1950 * that this command was an innocent bystander to 1951 * these events and should be unconditionally 1952 * retried. 1953 */ 1954 case CAM_REQUEUE_REQ: 1955 /* Unconditional requeue if device is still there */ 1956 if (periph->flags & CAM_PERIPH_INVALID) { 1957 action_string = "Periph was invalidated"; 1958 error = EIO; 1959 } else if (sense_flags & SF_NO_RETRY) { 1960 error = EIO; 1961 action_string = "Retry was blocked"; 1962 } else { 1963 error = ERESTART; 1964 action &= ~SSQ_PRINT_SENSE; 1965 } 1966 break; 1967 case CAM_RESRC_UNAVAIL: 1968 /* Wait a bit for the resource shortage to abate. */ 1969 timeout = periph_noresrc_delay; 1970 /* FALLTHROUGH */ 1971 case CAM_BUSY: 1972 if (timeout == 0) { 1973 /* Wait a bit for the busy condition to abate. */ 1974 timeout = periph_busy_delay; 1975 } 1976 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1977 /* FALLTHROUGH */ 1978 case CAM_ATA_STATUS_ERROR: 1979 case CAM_REQ_CMP_ERR: 1980 case CAM_CMD_TIMEOUT: 1981 case CAM_UNEXP_BUSFREE: 1982 case CAM_UNCOR_PARITY: 1983 case CAM_DATA_RUN_ERR: 1984 default: 1985 if (periph->flags & CAM_PERIPH_INVALID) { 1986 error = EIO; 1987 action_string = "Periph was invalidated"; 1988 } else if (ccb->ccb_h.retry_count == 0) { 1989 error = EIO; 1990 action_string = "Retries exhausted"; 1991 } else if (sense_flags & SF_NO_RETRY) { 1992 error = EIO; 1993 action_string = "Retry was blocked"; 1994 } else { 1995 ccb->ccb_h.retry_count--; 1996 error = ERESTART; 1997 } 1998 break; 1999 } 2000 2001 if ((sense_flags & SF_PRINT_ALWAYS) || 2002 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO)) 2003 action |= SSQ_PRINT_SENSE; 2004 else if (sense_flags & SF_NO_PRINT) 2005 action &= ~SSQ_PRINT_SENSE; 2006 if ((action & SSQ_PRINT_SENSE) != 0) 2007 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 2008 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) { 2009 if (error != ERESTART) { 2010 if (action_string == NULL) 2011 action_string = "Unretryable error"; 2012 xpt_print(ccb->ccb_h.path, "Error %d, %s\n", 2013 error, action_string); 2014 } else if (action_string != NULL) 2015 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 2016 else { 2017 xpt_print(ccb->ccb_h.path, 2018 "Retrying command, %d more tries remain\n", 2019 ccb->ccb_h.retry_count); 2020 } 2021 } 2022 2023 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0)) 2024 cam_periph_devctl_notify(orig_ccb); 2025 2026 if ((action & SSQ_LOST) != 0) { 2027 lun_id_t lun_id; 2028 2029 /* 2030 * For a selection timeout, we consider all of the LUNs on 2031 * the target to be gone. If the status is CAM_DEV_NOT_THERE, 2032 * then we only get rid of the device(s) specified by the 2033 * path in the original CCB. 2034 */ 2035 if (status == CAM_SEL_TIMEOUT) 2036 lun_id = CAM_LUN_WILDCARD; 2037 else 2038 lun_id = xpt_path_lun_id(ccb->ccb_h.path); 2039 2040 /* Should we do more if we can't create the path?? */ 2041 if (xpt_create_path(&newpath, periph, 2042 xpt_path_path_id(ccb->ccb_h.path), 2043 xpt_path_target_id(ccb->ccb_h.path), 2044 lun_id) == CAM_REQ_CMP) { 2045 2046 /* 2047 * Let peripheral drivers know that this 2048 * device has gone away. 2049 */ 2050 xpt_async(AC_LOST_DEVICE, newpath, NULL); 2051 xpt_free_path(newpath); 2052 } 2053 } 2054 2055 /* Broadcast UNIT ATTENTIONs to all periphs. */ 2056 if ((action & SSQ_UA) != 0) 2057 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb); 2058 2059 /* Rescan target on "Reported LUNs data has changed" */ 2060 if ((action & SSQ_RESCAN) != 0) { 2061 if (xpt_create_path(&newpath, NULL, 2062 xpt_path_path_id(ccb->ccb_h.path), 2063 xpt_path_target_id(ccb->ccb_h.path), 2064 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2065 2066 scan_ccb = xpt_alloc_ccb_nowait(); 2067 if (scan_ccb != NULL) { 2068 scan_ccb->ccb_h.path = newpath; 2069 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT; 2070 scan_ccb->crcn.flags = 0; 2071 xpt_rescan(scan_ccb); 2072 } else { 2073 xpt_print(newpath, 2074 "Can't allocate CCB to rescan target\n"); 2075 xpt_free_path(newpath); 2076 } 2077 } 2078 } 2079 2080 /* Attempt a retry */ 2081 if (error == ERESTART || error == 0) { 2082 if (frozen != 0) 2083 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 2084 if (error == ERESTART) 2085 xpt_action(ccb); 2086 if (frozen != 0) 2087 cam_release_devq(ccb->ccb_h.path, 2088 relsim_flags, 2089 openings, 2090 timeout, 2091 /*getcount_only*/0); 2092 } 2093 2094 return (error); 2095 } 2096 2097 #define CAM_PERIPH_DEVD_MSG_SIZE 256 2098 2099 static void 2100 cam_periph_devctl_notify(union ccb *ccb) 2101 { 2102 struct cam_periph *periph; 2103 struct ccb_getdev *cgd; 2104 struct sbuf sb; 2105 int serr, sk, asc, ascq; 2106 char *sbmsg, *type; 2107 2108 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT); 2109 if (sbmsg == NULL) 2110 return; 2111 2112 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN); 2113 2114 periph = xpt_path_periph(ccb->ccb_h.path); 2115 sbuf_printf(&sb, "device=%s%d ", periph->periph_name, 2116 periph->unit_number); 2117 2118 sbuf_printf(&sb, "serial=\""); 2119 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) { 2120 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path, 2121 CAM_PRIORITY_NORMAL); 2122 cgd->ccb_h.func_code = XPT_GDEV_TYPE; 2123 xpt_action((union ccb *)cgd); 2124 2125 if (cgd->ccb_h.status == CAM_REQ_CMP) 2126 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len); 2127 xpt_free_ccb((union ccb *)cgd); 2128 } 2129 sbuf_printf(&sb, "\" "); 2130 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status); 2131 2132 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 2133 case CAM_CMD_TIMEOUT: 2134 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout); 2135 type = "timeout"; 2136 break; 2137 case CAM_SCSI_STATUS_ERROR: 2138 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status); 2139 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq)) 2140 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ", 2141 serr, sk, asc, ascq); 2142 type = "error"; 2143 break; 2144 case CAM_ATA_STATUS_ERROR: 2145 sbuf_printf(&sb, "RES=\""); 2146 ata_res_sbuf(&ccb->ataio.res, &sb); 2147 sbuf_printf(&sb, "\" "); 2148 type = "error"; 2149 break; 2150 default: 2151 type = "error"; 2152 break; 2153 } 2154 2155 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2156 sbuf_printf(&sb, "CDB=\""); 2157 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb); 2158 sbuf_printf(&sb, "\" "); 2159 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 2160 sbuf_printf(&sb, "ACB=\""); 2161 ata_cmd_sbuf(&ccb->ataio.cmd, &sb); 2162 sbuf_printf(&sb, "\" "); 2163 } 2164 2165 if (sbuf_finish(&sb) == 0) 2166 devctl_notify("CAM", "periph", type, sbuf_data(&sb)); 2167 sbuf_delete(&sb); 2168 free(sbmsg, M_CAMPERIPH); 2169 } 2170 2171 /* 2172 * Sysctl to force an invalidation of the drive right now. Can be 2173 * called with CTLFLAG_MPSAFE since we take periph lock. 2174 */ 2175 int 2176 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS) 2177 { 2178 struct cam_periph *periph; 2179 int error, value; 2180 2181 periph = arg1; 2182 value = 0; 2183 error = sysctl_handle_int(oidp, &value, 0, req); 2184 if (error != 0 || req->newptr == NULL || value != 1) 2185 return (error); 2186 2187 cam_periph_lock(periph); 2188 cam_periph_invalidate(periph); 2189 cam_periph_unlock(periph); 2190 2191 return (0); 2192 } 2193