1 /* 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/cam/cam_periph.c,v 1.70 2008/02/12 11:07:33 raj Exp $ 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/buf.h> 39 #include <sys/proc.h> 40 #include <sys/devicestat.h> 41 #include <sys/bus.h> 42 #include <vm/vm.h> 43 #include <vm/vm_extern.h> 44 45 #include <sys/thread2.h> 46 47 #include "cam.h" 48 #include "cam_ccb.h" 49 #include "cam_xpt_periph.h" 50 #include "cam_periph.h" 51 #include "cam_debug.h" 52 #include "cam_sim.h" 53 54 #include <bus/cam/scsi/scsi_all.h> 55 #include <bus/cam/scsi/scsi_message.h> 56 #include <bus/cam/scsi/scsi_pass.h> 57 58 static u_int camperiphnextunit(struct periph_driver *p_drv, 59 u_int newunit, int wired, 60 path_id_t pathid, target_id_t target, 61 lun_id_t lun); 62 static u_int camperiphunit(struct periph_driver *p_drv, 63 struct cam_sim *sim, path_id_t pathid, 64 target_id_t target, lun_id_t lun); 65 static void camperiphdone(struct cam_periph *periph, 66 union ccb *done_ccb); 67 static void camperiphfree(struct cam_periph *periph); 68 static int camperiphscsistatuserror(union ccb *ccb, 69 cam_flags camflags, 70 u_int32_t sense_flags, 71 union ccb *save_ccb, 72 int *openings, 73 u_int32_t *relsim_flags, 74 u_int32_t *timeout); 75 static int camperiphscsisenseerror(union ccb *ccb, 76 cam_flags camflags, 77 u_int32_t sense_flags, 78 union ccb *save_ccb, 79 int *openings, 80 u_int32_t *relsim_flags, 81 u_int32_t *timeout); 82 static void cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo, 83 u_int8_t ***data_ptrs, int numbufs); 84 85 static int nperiph_drivers; 86 struct periph_driver **periph_drivers; 87 88 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 89 90 static int periph_selto_delay = 1000; 91 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 92 static int periph_noresrc_delay = 500; 93 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 94 static int periph_busy_delay = 500; 95 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 96 97 98 void 99 periphdriver_register(void *data) 100 { 101 struct periph_driver **newdrivers, **old; 102 int ndrivers; 103 104 ndrivers = nperiph_drivers + 2; 105 newdrivers = kmalloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 106 M_WAITOK); 107 if (periph_drivers) 108 bcopy(periph_drivers, newdrivers, 109 sizeof(*newdrivers) * nperiph_drivers); 110 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 111 newdrivers[nperiph_drivers + 1] = NULL; 112 old = periph_drivers; 113 periph_drivers = newdrivers; 114 if (old) 115 kfree(old, M_CAMPERIPH); 116 nperiph_drivers++; 117 } 118 119 cam_status 120 cam_periph_alloc(periph_ctor_t *periph_ctor, 121 periph_oninv_t *periph_oninvalidate, 122 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 123 char *name, cam_periph_type type, struct cam_path *path, 124 ac_callback_t *ac_callback, ac_code code, void *arg) 125 { 126 struct periph_driver **p_drv; 127 struct cam_sim *sim; 128 struct cam_periph *periph; 129 struct cam_periph *cur_periph; 130 path_id_t path_id; 131 target_id_t target_id; 132 lun_id_t lun_id; 133 cam_status status; 134 u_int init_level; 135 136 init_level = 0; 137 /* 138 * Handle Hot-Plug scenarios. If there is already a peripheral 139 * of our type assigned to this path, we are likely waiting for 140 * final close on an old, invalidated, peripheral. If this is 141 * the case, queue up a deferred call to the peripheral's async 142 * handler. If it looks like a mistaken re-allocation, complain. 143 */ 144 if ((periph = cam_periph_find(path, name)) != NULL) { 145 146 if ((periph->flags & CAM_PERIPH_INVALID) != 0 147 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 148 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 149 periph->deferred_callback = ac_callback; 150 periph->deferred_ac = code; 151 return (CAM_REQ_INPROG); 152 } else { 153 kprintf("cam_periph_alloc: attempt to re-allocate " 154 "valid device %s%d rejected\n", 155 periph->periph_name, periph->unit_number); 156 } 157 return (CAM_REQ_INVALID); 158 } 159 160 periph = kmalloc(sizeof(*periph), M_CAMPERIPH, M_INTWAIT | M_ZERO); 161 162 init_level++; /* 1 */ 163 164 xpt_lock_buses(); 165 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 166 if (strcmp((*p_drv)->driver_name, name) == 0) 167 break; 168 } 169 xpt_unlock_buses(); 170 171 sim = xpt_path_sim(path); 172 CAM_SIM_LOCK(sim); 173 path_id = xpt_path_path_id(path); 174 target_id = xpt_path_target_id(path); 175 lun_id = xpt_path_lun_id(path); 176 cam_init_pinfo(&periph->pinfo); 177 periph->periph_start = periph_start; 178 periph->periph_dtor = periph_dtor; 179 periph->periph_oninval = periph_oninvalidate; 180 periph->type = type; 181 periph->periph_name = name; 182 periph->immediate_priority = CAM_PRIORITY_NONE; 183 periph->refcount = 0; 184 periph->sim = sim; 185 SLIST_INIT(&periph->ccb_list); 186 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 187 if (status != CAM_REQ_CMP) 188 goto failure; 189 190 init_level++; /* 2 */ 191 192 periph->path = path; 193 194 /* 195 * Finalize with buses locked. Allocate unit number and add to 196 * list to reserve the unit number. Undo later if the XPT fails. 197 */ 198 xpt_lock_buses(); 199 periph->unit_number = camperiphunit(*p_drv, sim, path_id, 200 target_id, lun_id); 201 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 202 while (cur_periph != NULL && 203 cur_periph->unit_number < periph->unit_number) { 204 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 205 } 206 if (cur_periph != NULL) { 207 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 208 } else { 209 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 210 (*p_drv)->generation++; 211 } 212 xpt_unlock_buses(); 213 214 status = xpt_add_periph(periph); 215 216 if (status != CAM_REQ_CMP) 217 goto failure; 218 219 init_level++; /* 3 */ 220 221 status = periph_ctor(periph, arg); 222 223 if (status == CAM_REQ_CMP) 224 init_level++; /* 4 */ 225 226 failure: 227 switch (init_level) { 228 case 4: 229 /* Initialized successfully */ 230 CAM_SIM_UNLOCK(sim); 231 break; 232 case 3: 233 case 2: 234 xpt_lock_buses(); 235 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 236 xpt_unlock_buses(); 237 if (init_level == 3) 238 xpt_remove_periph(periph); 239 periph->path = NULL; 240 /* FALLTHROUGH */ 241 case 1: 242 CAM_SIM_UNLOCK(sim); /* sim was retrieved from path */ 243 xpt_free_path(path); 244 kfree(periph, M_CAMPERIPH); 245 /* FALLTHROUGH */ 246 case 0: 247 /* No cleanup to perform. */ 248 break; 249 default: 250 panic("cam_periph_alloc: Unknown init level"); 251 } 252 return(status); 253 } 254 255 /* 256 * Find a peripheral structure with the specified path, target, lun, 257 * and (optionally) type. If the name is NULL, this function will return 258 * the first peripheral driver that matches the specified path. 259 */ 260 struct cam_periph * 261 cam_periph_find(struct cam_path *path, char *name) 262 { 263 struct periph_driver **p_drv; 264 struct cam_periph *periph; 265 266 xpt_lock_buses(); 267 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 268 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 269 continue; 270 271 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 272 if (xpt_path_comp(periph->path, path) == 0) { 273 xpt_unlock_buses(); 274 return(periph); 275 } 276 } 277 if (name != NULL) { 278 xpt_unlock_buses(); 279 return(NULL); 280 } 281 } 282 xpt_unlock_buses(); 283 return(NULL); 284 } 285 286 cam_status 287 cam_periph_acquire(struct cam_periph *periph) 288 { 289 if (periph == NULL) 290 return(CAM_REQ_CMP_ERR); 291 292 xpt_lock_buses(); 293 periph->refcount++; 294 xpt_unlock_buses(); 295 296 return(CAM_REQ_CMP); 297 } 298 299 /* 300 * Release the peripheral. The XPT is not locked and the SIM may or may 301 * not be locked on entry. 302 * 303 * The last release on a peripheral marked invalid frees it. In this 304 * case we must be sure to hold both the XPT lock and the SIM lock, 305 * requiring a bit of fancy footwork if the SIM lock already happens 306 * to be held. 307 */ 308 void 309 cam_periph_release(struct cam_periph *periph) 310 { 311 struct cam_sim *sim; 312 int doun; 313 314 while (periph) { 315 /* 316 * First try the critical path case 317 */ 318 sim = periph->sim; 319 xpt_lock_buses(); 320 if ((periph->flags & CAM_PERIPH_INVALID) == 0 || 321 periph->refcount != 1) { 322 --periph->refcount; 323 xpt_unlock_buses(); 324 break; 325 } 326 327 /* 328 * Otherwise we also need to free the peripheral and must 329 * acquire the sim lock and xpt lock in the correct order 330 * to do so. 331 * 332 * The condition must be re-checked after the locks have 333 * been reacquired. 334 */ 335 xpt_unlock_buses(); 336 doun = CAM_SIM_COND_LOCK(sim); 337 xpt_lock_buses(); 338 --periph->refcount; 339 if ((periph->flags & CAM_PERIPH_INVALID) && 340 periph->refcount == 0) { 341 camperiphfree(periph); 342 } 343 xpt_unlock_buses(); 344 CAM_SIM_COND_UNLOCK(sim, doun); 345 break; 346 } 347 } 348 349 int 350 cam_periph_hold(struct cam_periph *periph, int flags) 351 { 352 int error; 353 354 sim_lock_assert_owned(periph->sim->lock); 355 356 /* 357 * Increment the reference count on the peripheral 358 * while we wait for our lock attempt to succeed 359 * to ensure the peripheral doesn't disappear out 360 * from user us while we sleep. 361 */ 362 363 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 364 return (ENXIO); 365 366 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 367 periph->flags |= CAM_PERIPH_LOCK_WANTED; 368 if ((error = sim_lock_sleep(periph, flags, "caplck", 0, 369 periph->sim->lock)) != 0) { 370 cam_periph_release(periph); 371 return (error); 372 } 373 } 374 375 periph->flags |= CAM_PERIPH_LOCKED; 376 return (0); 377 } 378 379 void 380 cam_periph_unhold(struct cam_periph *periph, int unlock) 381 { 382 struct cam_sim *sim; 383 384 sim_lock_assert_owned(periph->sim->lock); 385 periph->flags &= ~CAM_PERIPH_LOCKED; 386 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 387 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 388 wakeup(periph); 389 } 390 if (unlock) { 391 sim = periph->sim; 392 cam_periph_release(periph); 393 /* periph may be garbage now */ 394 CAM_SIM_UNLOCK(sim); 395 } else { 396 cam_periph_release(periph); 397 } 398 } 399 400 /* 401 * Look for the next unit number that is not currently in use for this 402 * peripheral type starting at "newunit". Also exclude unit numbers that 403 * are reserved by for future "hardwiring" unless we already know that this 404 * is a potential wired device. Only assume that the device is "wired" the 405 * first time through the loop since after that we'll be looking at unit 406 * numbers that did not match a wiring entry. 407 */ 408 static u_int 409 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 410 path_id_t pathid, target_id_t target, lun_id_t lun) 411 { 412 struct cam_periph *periph; 413 char *periph_name; 414 int i, val, dunit; 415 const char *dname, *strval; 416 417 periph_name = p_drv->driver_name; 418 for (;;) { 419 for (periph = TAILQ_FIRST(&p_drv->units); 420 periph != NULL && periph->unit_number != newunit; 421 periph = TAILQ_NEXT(periph, unit_links)) 422 ; 423 424 if (periph != NULL && periph->unit_number == newunit) { 425 if (wired != 0) { 426 xpt_print(periph->path, "Duplicate Wired " 427 "Device entry!\n"); 428 xpt_print(periph->path, "Second device (%s " 429 "device at scbus%d target %d lun %d) will " 430 "not be wired\n", periph_name, pathid, 431 target, lun); 432 wired = 0; 433 } 434 ++newunit; 435 continue; 436 } 437 if (wired) 438 break; 439 440 /* 441 * Don't match entries like "da 4" as a wired down 442 * device, but do match entries like "da 4 target 5" 443 * or even "da 4 scbus 1". 444 */ 445 i = -1; 446 while ((i = resource_locate(i, periph_name)) != -1) { 447 dname = resource_query_name(i); 448 dunit = resource_query_unit(i); 449 /* if no "target" and no specific scbus, skip */ 450 if (resource_int_value(dname, dunit, "target", &val) && 451 (resource_string_value(dname, dunit, "at",&strval)|| 452 strcmp(strval, "scbus") == 0)) { 453 continue; 454 } 455 if (newunit == dunit) 456 break; 457 } 458 if (i == -1) 459 break; 460 ++newunit; 461 } 462 return (newunit); 463 } 464 465 static u_int 466 camperiphunit(struct periph_driver *p_drv, 467 struct cam_sim *sim, path_id_t pathid, 468 target_id_t target, lun_id_t lun) 469 { 470 u_int unit; 471 int hit, i, val, dunit; 472 const char *dname, *strval; 473 char pathbuf[32], *periph_name; 474 475 unit = 0; 476 477 periph_name = p_drv->driver_name; 478 ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 479 i = -1; 480 for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) { 481 dname = resource_query_name(i); 482 dunit = resource_query_unit(i); 483 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 484 if (strcmp(strval, pathbuf) != 0) 485 continue; 486 hit++; 487 } 488 if (resource_int_value(dname, dunit, "target", &val) == 0) { 489 if (val != target) 490 continue; 491 hit++; 492 } 493 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 494 if (val != lun) 495 continue; 496 hit++; 497 } 498 if (hit != 0) { 499 unit = dunit; 500 break; 501 } 502 } 503 504 /* 505 * If no wired units are in the kernel config do an auto unit 506 * start selection. We want usb mass storage out of the way 507 * so it doesn't steal low numbered da%d slots from ahci, sili, 508 * or other scsi attachments. 509 */ 510 if (hit == 0 && sim) { 511 if (strncmp(sim->sim_name, "umass", 4) == 0 && unit < 8) 512 unit = 8; 513 } 514 515 /* 516 * Either start from 0 looking for the next unit or from 517 * the unit number given in the resource config. This way, 518 * if we have wildcard matches, we don't return the same 519 * unit number twice. 520 */ 521 unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid, 522 target, lun); 523 524 return (unit); 525 } 526 527 void 528 cam_periph_invalidate(struct cam_periph *periph) 529 { 530 /* 531 * We only call this routine the first time a peripheral is 532 * invalidated. 533 */ 534 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 535 && (periph->periph_oninval != NULL)) 536 periph->periph_oninval(periph); 537 538 periph->flags |= CAM_PERIPH_INVALID; 539 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 540 541 xpt_lock_buses(); 542 if (periph->refcount == 0) 543 camperiphfree(periph); 544 else if (periph->refcount < 0) 545 kprintf("cam_invalidate_periph: refcount < 0!!\n"); 546 xpt_unlock_buses(); 547 } 548 549 static void 550 camperiphfree(struct cam_periph *periph) 551 { 552 struct periph_driver **p_drv; 553 554 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 555 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 556 break; 557 } 558 559 if (*p_drv == NULL) { 560 kprintf("camperiphfree: attempt to free non-existent periph\n"); 561 return; 562 } 563 564 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 565 (*p_drv)->generation++; 566 xpt_unlock_buses(); 567 568 if (periph->periph_dtor != NULL) 569 periph->periph_dtor(periph); 570 xpt_remove_periph(periph); 571 572 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 573 union ccb ccb; 574 void *arg; 575 576 switch (periph->deferred_ac) { 577 case AC_FOUND_DEVICE: 578 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 579 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 580 xpt_action(&ccb); 581 arg = &ccb; 582 break; 583 case AC_PATH_REGISTERED: 584 ccb.ccb_h.func_code = XPT_PATH_INQ; 585 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 586 xpt_action(&ccb); 587 arg = &ccb; 588 break; 589 default: 590 arg = NULL; 591 break; 592 } 593 periph->deferred_callback(NULL, periph->deferred_ac, 594 periph->path, arg); 595 } 596 xpt_free_path(periph->path); 597 kfree(periph, M_CAMPERIPH); 598 xpt_lock_buses(); 599 } 600 601 /* 602 * We don't map user pointers into KVM, instead we use pbufs. 603 * 604 * This won't work on physical pointers(?OLD), for now it's 605 * up to the caller to check for that. (XXX KDM -- should we do that here 606 * instead?) This also only works for up to MAXPHYS memory. Since we use 607 * buffers to map stuff in and out, we're limited to the buffer size. 608 */ 609 int 610 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 611 { 612 buf_cmd_t cmd[CAM_PERIPH_MAXMAPS]; 613 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 614 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 615 int numbufs; 616 int error; 617 int i; 618 struct buf *bp; 619 620 switch(ccb->ccb_h.func_code) { 621 case XPT_DEV_MATCH: 622 if (ccb->cdm.match_buf_len == 0) { 623 kprintf("cam_periph_mapmem: invalid match buffer " 624 "length 0\n"); 625 return(EINVAL); 626 } 627 if (ccb->cdm.pattern_buf_len > 0) { 628 data_ptrs[0] = (void *)&ccb->cdm.patterns; 629 lengths[0] = ccb->cdm.pattern_buf_len; 630 mapinfo->dirs[0] = CAM_DIR_OUT; 631 data_ptrs[1] = (void *)&ccb->cdm.matches; 632 lengths[1] = ccb->cdm.match_buf_len; 633 mapinfo->dirs[1] = CAM_DIR_IN; 634 numbufs = 2; 635 } else { 636 data_ptrs[0] = (void *)&ccb->cdm.matches; 637 lengths[0] = ccb->cdm.match_buf_len; 638 mapinfo->dirs[0] = CAM_DIR_IN; 639 numbufs = 1; 640 } 641 break; 642 case XPT_SCSI_IO: 643 case XPT_CONT_TARGET_IO: 644 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 645 return(0); 646 647 data_ptrs[0] = &ccb->csio.data_ptr; 648 lengths[0] = ccb->csio.dxfer_len; 649 mapinfo->dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 650 numbufs = 1; 651 break; 652 default: 653 return(EINVAL); 654 break; /* NOTREACHED */ 655 } 656 657 /* 658 * Check the transfer length and permissions first, so we don't 659 * have to unmap any previously mapped buffers. 660 */ 661 for (i = 0; i < numbufs; i++) { 662 /* 663 * Its kinda bogus, we need a R+W command. For now the 664 * buffer needs some sort of command. Use BUF_CMD_WRITE 665 * to indicate a write and BUF_CMD_READ to indicate R+W. 666 */ 667 cmd[i] = BUF_CMD_WRITE; 668 669 if (lengths[i] > MAXPHYS) { 670 kprintf("cam_periph_mapmem: attempt to map %lu bytes, " 671 "which is greater than MAXPHYS(%d)\n", 672 (long)(lengths[i] + 673 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 674 MAXPHYS); 675 return(E2BIG); 676 } 677 678 if (mapinfo->dirs[i] & CAM_DIR_OUT) { 679 if (!useracc(*data_ptrs[i], lengths[i], 680 VM_PROT_READ)) { 681 kprintf("cam_periph_mapmem: error, " 682 "address %p, length %lu isn't " 683 "user accessible for READ\n", 684 (void *)*data_ptrs[i], 685 (u_long)lengths[i]); 686 return(EACCES); 687 } 688 } 689 690 if (mapinfo->dirs[i] & CAM_DIR_IN) { 691 cmd[i] = BUF_CMD_READ; 692 if (!useracc(*data_ptrs[i], lengths[i], 693 VM_PROT_WRITE)) { 694 kprintf("cam_periph_mapmem: error, " 695 "address %p, length %lu isn't " 696 "user accessible for WRITE\n", 697 (void *)*data_ptrs[i], 698 (u_long)lengths[i]); 699 700 return(EACCES); 701 } 702 } 703 704 } 705 706 for (i = 0; i < numbufs; i++) { 707 /* 708 * Get the buffer. 709 */ 710 bp = getpbuf_mem(NULL); 711 712 /* save the original user pointer */ 713 mapinfo->saved_ptrs[i] = *data_ptrs[i]; 714 715 /* set the flags */ 716 bp->b_cmd = cmd[i]; 717 718 /* 719 * Always bounce the I/O through kernel memory. 720 */ 721 bp->b_bcount = lengths[i]; 722 if (mapinfo->dirs[i] & CAM_DIR_OUT) { 723 error = copyin(*data_ptrs[i], bp->b_data, bp->b_bcount); 724 } else { 725 error = 0; 726 } 727 if (error) { 728 relpbuf(bp, NULL); 729 cam_periph_unmapbufs(mapinfo, data_ptrs, i); 730 mapinfo->num_bufs_used -= i; 731 return(error); 732 } 733 734 /* set our pointer to the new mapped area */ 735 *data_ptrs[i] = bp->b_data; 736 737 mapinfo->bp[i] = bp; 738 mapinfo->num_bufs_used++; 739 } 740 741 return(0); 742 } 743 744 /* 745 * Unmap memory segments mapped into kernel virtual address space by 746 * cam_periph_mapmem(). 747 */ 748 void 749 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 750 { 751 int numbufs; 752 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 753 754 if (mapinfo->num_bufs_used <= 0) { 755 /* allow ourselves to be swapped once again */ 756 return; 757 } 758 759 switch (ccb->ccb_h.func_code) { 760 case XPT_DEV_MATCH: 761 numbufs = min(mapinfo->num_bufs_used, 2); 762 763 if (numbufs == 1) { 764 data_ptrs[0] = (void *)&ccb->cdm.matches; 765 } else { 766 data_ptrs[0] = (void *)&ccb->cdm.patterns; 767 data_ptrs[1] = (void *)&ccb->cdm.matches; 768 } 769 break; 770 case XPT_SCSI_IO: 771 case XPT_CONT_TARGET_IO: 772 data_ptrs[0] = &ccb->csio.data_ptr; 773 numbufs = min(mapinfo->num_bufs_used, 1); 774 break; 775 default: 776 /* allow ourselves to be swapped once again */ 777 return; 778 break; /* NOTREACHED */ 779 } 780 cam_periph_unmapbufs(mapinfo, data_ptrs, numbufs); 781 } 782 783 static void 784 cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo, 785 u_int8_t ***data_ptrs, int numbufs) 786 { 787 struct buf *bp; 788 int i; 789 790 for (i = 0; i < numbufs; i++) { 791 bp = mapinfo->bp[i]; 792 793 /* Set the user's pointer back to the original value */ 794 *data_ptrs[i] = mapinfo->saved_ptrs[i]; 795 796 if (mapinfo->dirs[i] & CAM_DIR_IN) { 797 /* XXX return error */ 798 copyout(bp->b_data, *data_ptrs[i], bp->b_bcount); 799 } 800 relpbuf(bp, NULL); 801 mapinfo->bp[i] = NULL; 802 } 803 } 804 805 union ccb * 806 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 807 { 808 struct ccb_hdr *ccb_h; 809 810 sim_lock_assert_owned(periph->sim->lock); 811 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 812 813 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 814 if (periph->immediate_priority > priority) 815 periph->immediate_priority = priority; 816 xpt_schedule(periph, priority); 817 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 818 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 819 break; 820 sim_lock_sleep(&periph->ccb_list, 0, "cgticb", 0, 821 periph->sim->lock); 822 } 823 824 ccb_h = SLIST_FIRST(&periph->ccb_list); 825 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 826 return ((union ccb *)ccb_h); 827 } 828 829 void 830 cam_periph_ccbwait(union ccb *ccb) 831 { 832 struct cam_sim *sim; 833 834 sim = xpt_path_sim(ccb->ccb_h.path); 835 while ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 836 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) { 837 sim_lock_sleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0, sim->lock); 838 } 839 } 840 841 int 842 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 843 int (*error_routine)(union ccb *ccb, 844 cam_flags camflags, 845 u_int32_t sense_flags)) 846 { 847 union ccb *ccb; 848 int error; 849 int found; 850 851 error = found = 0; 852 853 switch(cmd){ 854 case CAMGETPASSTHRU: 855 ccb = cam_periph_getccb(periph, /* priority */ 1); 856 xpt_setup_ccb(&ccb->ccb_h, 857 ccb->ccb_h.path, 858 /*priority*/1); 859 ccb->ccb_h.func_code = XPT_GDEVLIST; 860 861 /* 862 * Basically, the point of this is that we go through 863 * getting the list of devices, until we find a passthrough 864 * device. In the current version of the CAM code, the 865 * only way to determine what type of device we're dealing 866 * with is by its name. 867 */ 868 while (found == 0) { 869 ccb->cgdl.index = 0; 870 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 871 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 872 873 /* we want the next device in the list */ 874 xpt_action(ccb); 875 if (strncmp(ccb->cgdl.periph_name, 876 "pass", 4) == 0){ 877 found = 1; 878 break; 879 } 880 } 881 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 882 (found == 0)) { 883 ccb->cgdl.periph_name[0] = '\0'; 884 ccb->cgdl.unit_number = 0; 885 break; 886 } 887 } 888 889 /* copy the result back out */ 890 bcopy(ccb, addr, sizeof(union ccb)); 891 892 /* and release the ccb */ 893 xpt_release_ccb(ccb); 894 895 break; 896 default: 897 error = ENOTTY; 898 break; 899 } 900 return(error); 901 } 902 903 int 904 cam_periph_runccb(union ccb *ccb, 905 int (*error_routine)(union ccb *ccb, 906 cam_flags camflags, 907 u_int32_t sense_flags), 908 cam_flags camflags, u_int32_t sense_flags, 909 struct devstat *ds) 910 { 911 struct cam_sim *sim; 912 int error; 913 914 error = 0; 915 sim = xpt_path_sim(ccb->ccb_h.path); 916 sim_lock_assert_owned(sim->lock); 917 918 /* 919 * If the user has supplied a stats structure, and if we understand 920 * this particular type of ccb, record the transaction start. 921 */ 922 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 923 devstat_start_transaction(ds); 924 925 xpt_action(ccb); 926 927 do { 928 cam_periph_ccbwait(ccb); 929 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 930 error = 0; 931 else if (error_routine != NULL) 932 error = (*error_routine)(ccb, camflags, sense_flags); 933 else 934 error = 0; 935 936 } while (error == ERESTART); 937 938 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 939 cam_release_devq(ccb->ccb_h.path, 940 /* relsim_flags */0, 941 /* openings */0, 942 /* timeout */0, 943 /* getcount_only */ FALSE); 944 945 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 946 devstat_end_transaction(ds, 947 ccb->csio.dxfer_len, 948 ccb->csio.tag_action & 0xf, 949 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 950 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 951 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 952 DEVSTAT_WRITE : 953 DEVSTAT_READ); 954 955 return(error); 956 } 957 958 void 959 cam_freeze_devq(struct cam_path *path) 960 { 961 struct ccb_hdr ccb_h; 962 963 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 964 ccb_h.func_code = XPT_NOOP; 965 ccb_h.flags = CAM_DEV_QFREEZE; 966 xpt_action((union ccb *)&ccb_h); 967 } 968 969 u_int32_t 970 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 971 u_int32_t openings, u_int32_t timeout, 972 int getcount_only) 973 { 974 struct ccb_relsim crs; 975 976 xpt_setup_ccb(&crs.ccb_h, path, 977 /*priority*/1); 978 crs.ccb_h.func_code = XPT_REL_SIMQ; 979 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 980 crs.release_flags = relsim_flags; 981 crs.openings = openings; 982 crs.release_timeout = timeout; 983 xpt_action((union ccb *)&crs); 984 return (crs.qfrozen_cnt); 985 } 986 987 #define saved_ccb_ptr ppriv_ptr0 988 static void 989 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 990 { 991 union ccb *saved_ccb; 992 cam_status status; 993 int frozen; 994 int sense; 995 struct scsi_start_stop_unit *scsi_cmd; 996 u_int32_t relsim_flags, timeout; 997 u_int32_t qfrozen_cnt; 998 int xpt_done_ccb; 999 1000 xpt_done_ccb = FALSE; 1001 status = done_ccb->ccb_h.status; 1002 frozen = (status & CAM_DEV_QFRZN) != 0; 1003 sense = (status & CAM_AUTOSNS_VALID) != 0; 1004 status &= CAM_STATUS_MASK; 1005 1006 timeout = 0; 1007 relsim_flags = 0; 1008 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1009 1010 /* 1011 * Unfreeze the queue once if it is already frozen.. 1012 */ 1013 if (frozen != 0) { 1014 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1015 /*relsim_flags*/0, 1016 /*openings*/0, 1017 /*timeout*/0, 1018 /*getcount_only*/0); 1019 } 1020 1021 switch (status) { 1022 case CAM_REQ_CMP: 1023 { 1024 /* 1025 * If we have successfully taken a device from the not 1026 * ready to ready state, re-scan the device and re-get 1027 * the inquiry information. Many devices (mostly disks) 1028 * don't properly report their inquiry information unless 1029 * they are spun up. 1030 * 1031 * If we manually retrieved sense into a CCB and got 1032 * something other than "NO SENSE" send the updated CCB 1033 * back to the client via xpt_done() to be processed via 1034 * the error recovery code again. 1035 */ 1036 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 1037 scsi_cmd = (struct scsi_start_stop_unit *) 1038 &done_ccb->csio.cdb_io.cdb_bytes; 1039 1040 if (scsi_cmd->opcode == START_STOP_UNIT) 1041 xpt_async(AC_INQ_CHANGED, 1042 done_ccb->ccb_h.path, NULL); 1043 if (scsi_cmd->opcode == REQUEST_SENSE) { 1044 u_int sense_key; 1045 1046 sense_key = saved_ccb->csio.sense_data.flags; 1047 sense_key &= SSD_KEY; 1048 if (sense_key != SSD_KEY_NO_SENSE) { 1049 saved_ccb->ccb_h.status |= 1050 CAM_AUTOSNS_VALID; 1051 #if 0 1052 xpt_print(saved_ccb->ccb_h.path, 1053 "Recovered Sense\n"); 1054 scsi_sense_print(&saved_ccb->csio); 1055 cam_error_print(saved_ccb, CAM_ESF_ALL, 1056 CAM_EPF_ALL); 1057 #endif 1058 xpt_done_ccb = TRUE; 1059 } 1060 } 1061 } 1062 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1063 sizeof(union ccb)); 1064 1065 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1066 1067 if (xpt_done_ccb == FALSE) 1068 xpt_action(done_ccb); 1069 1070 break; 1071 } 1072 case CAM_SCSI_STATUS_ERROR: 1073 scsi_cmd = (struct scsi_start_stop_unit *) 1074 &done_ccb->csio.cdb_io.cdb_bytes; 1075 if (sense != 0) { 1076 struct ccb_getdev cgd; 1077 struct scsi_sense_data *sense; 1078 int error_code, sense_key, asc, ascq; 1079 scsi_sense_action err_action; 1080 1081 sense = &done_ccb->csio.sense_data; 1082 scsi_extract_sense(sense, &error_code, 1083 &sense_key, &asc, &ascq); 1084 1085 /* 1086 * Grab the inquiry data for this device. 1087 */ 1088 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1089 /*priority*/ 1); 1090 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1091 xpt_action((union ccb *)&cgd); 1092 err_action = scsi_error_action(&done_ccb->csio, 1093 &cgd.inq_data, 0); 1094 1095 /* 1096 * If the error is "invalid field in CDB", 1097 * and the load/eject flag is set, turn the 1098 * flag off and try again. This is just in 1099 * case the drive in question barfs on the 1100 * load eject flag. The CAM code should set 1101 * the load/eject flag by default for 1102 * removable media. 1103 */ 1104 1105 /* XXX KDM 1106 * Should we check to see what the specific 1107 * scsi status is?? Or does it not matter 1108 * since we already know that there was an 1109 * error, and we know what the specific 1110 * error code was, and we know what the 1111 * opcode is.. 1112 */ 1113 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1114 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1115 (asc == 0x24) && (ascq == 0x00) && 1116 (done_ccb->ccb_h.retry_count > 0)) { 1117 1118 scsi_cmd->how &= ~SSS_LOEJ; 1119 1120 xpt_action(done_ccb); 1121 1122 } else if ((done_ccb->ccb_h.retry_count > 1) 1123 && ((err_action & SS_MASK) != SS_FAIL)) { 1124 1125 /* 1126 * In this case, the error recovery 1127 * command failed, but we've got 1128 * some retries left on it. Give 1129 * it another try unless this is an 1130 * unretryable error. 1131 */ 1132 1133 /* set the timeout to .5 sec */ 1134 relsim_flags = 1135 RELSIM_RELEASE_AFTER_TIMEOUT; 1136 timeout = 500; 1137 1138 xpt_action(done_ccb); 1139 1140 break; 1141 1142 } else { 1143 /* 1144 * Perform the final retry with the original 1145 * CCB so that final error processing is 1146 * performed by the owner of the CCB. 1147 */ 1148 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1149 done_ccb, sizeof(union ccb)); 1150 1151 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1152 1153 xpt_action(done_ccb); 1154 } 1155 } else { 1156 /* 1157 * Eh?? The command failed, but we don't 1158 * have any sense. What's up with that? 1159 * Fire the CCB again to return it to the 1160 * caller. 1161 */ 1162 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1163 done_ccb, sizeof(union ccb)); 1164 1165 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1166 1167 xpt_action(done_ccb); 1168 1169 } 1170 break; 1171 default: 1172 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1173 sizeof(union ccb)); 1174 1175 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1176 1177 xpt_action(done_ccb); 1178 1179 break; 1180 } 1181 1182 /* decrement the retry count */ 1183 /* 1184 * XXX This isn't appropriate in all cases. Restructure, 1185 * so that the retry count is only decremented on an 1186 * actual retry. Remeber that the orignal ccb had its 1187 * retry count dropped before entering recovery, so 1188 * doing it again is a bug. 1189 */ 1190 if (done_ccb->ccb_h.retry_count > 0) 1191 done_ccb->ccb_h.retry_count--; 1192 1193 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1194 /*relsim_flags*/relsim_flags, 1195 /*openings*/0, 1196 /*timeout*/timeout, 1197 /*getcount_only*/0); 1198 if (xpt_done_ccb == TRUE) 1199 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1200 } 1201 1202 /* 1203 * Generic Async Event handler. Peripheral drivers usually 1204 * filter out the events that require personal attention, 1205 * and leave the rest to this function. 1206 */ 1207 void 1208 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1209 struct cam_path *path, void *arg) 1210 { 1211 switch (code) { 1212 case AC_LOST_DEVICE: 1213 cam_periph_invalidate(periph); 1214 break; 1215 case AC_SENT_BDR: 1216 case AC_BUS_RESET: 1217 { 1218 cam_periph_bus_settle(periph, scsi_delay); 1219 break; 1220 } 1221 default: 1222 break; 1223 } 1224 } 1225 1226 void 1227 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1228 { 1229 struct ccb_getdevstats cgds; 1230 1231 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1232 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1233 xpt_action((union ccb *)&cgds); 1234 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1235 } 1236 1237 void 1238 cam_periph_freeze_after_event(struct cam_periph *periph, 1239 struct timeval* event_time, u_int duration_ms) 1240 { 1241 struct timeval delta; 1242 struct timeval duration_tv; 1243 1244 microuptime(&delta); 1245 timevalsub(&delta, event_time); 1246 duration_tv.tv_sec = duration_ms / 1000; 1247 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1248 if (timevalcmp(&delta, &duration_tv, <)) { 1249 timevalsub(&duration_tv, &delta); 1250 1251 duration_ms = duration_tv.tv_sec * 1000; 1252 duration_ms += duration_tv.tv_usec / 1000; 1253 cam_freeze_devq(periph->path); 1254 cam_release_devq(periph->path, 1255 RELSIM_RELEASE_AFTER_TIMEOUT, 1256 /*reduction*/0, 1257 /*timeout*/duration_ms, 1258 /*getcount_only*/0); 1259 } 1260 1261 } 1262 1263 static int 1264 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1265 u_int32_t sense_flags, union ccb *save_ccb, 1266 int *openings, u_int32_t *relsim_flags, 1267 u_int32_t *timeout) 1268 { 1269 int error; 1270 1271 switch (ccb->csio.scsi_status) { 1272 case SCSI_STATUS_OK: 1273 case SCSI_STATUS_COND_MET: 1274 case SCSI_STATUS_INTERMED: 1275 case SCSI_STATUS_INTERMED_COND_MET: 1276 error = 0; 1277 break; 1278 case SCSI_STATUS_CMD_TERMINATED: 1279 case SCSI_STATUS_CHECK_COND: 1280 error = camperiphscsisenseerror(ccb, 1281 camflags, 1282 sense_flags, 1283 save_ccb, 1284 openings, 1285 relsim_flags, 1286 timeout); 1287 break; 1288 case SCSI_STATUS_QUEUE_FULL: 1289 { 1290 /* no decrement */ 1291 struct ccb_getdevstats cgds; 1292 1293 /* 1294 * First off, find out what the current 1295 * transaction counts are. 1296 */ 1297 xpt_setup_ccb(&cgds.ccb_h, 1298 ccb->ccb_h.path, 1299 /*priority*/1); 1300 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1301 xpt_action((union ccb *)&cgds); 1302 1303 /* 1304 * If we were the only transaction active, treat 1305 * the QUEUE FULL as if it were a BUSY condition. 1306 */ 1307 if (cgds.dev_active != 0) { 1308 int total_openings; 1309 1310 /* 1311 * Reduce the number of openings to 1312 * be 1 less than the amount it took 1313 * to get a queue full bounded by the 1314 * minimum allowed tag count for this 1315 * device. 1316 */ 1317 total_openings = cgds.dev_active + cgds.dev_openings; 1318 *openings = cgds.dev_active; 1319 if (*openings < cgds.mintags) 1320 *openings = cgds.mintags; 1321 if (*openings < total_openings) 1322 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1323 else { 1324 /* 1325 * Some devices report queue full for 1326 * temporary resource shortages. For 1327 * this reason, we allow a minimum 1328 * tag count to be entered via a 1329 * quirk entry to prevent the queue 1330 * count on these devices from falling 1331 * to a pessimisticly low value. We 1332 * still wait for the next successful 1333 * completion, however, before queueing 1334 * more transactions to the device. 1335 */ 1336 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1337 } 1338 *timeout = 0; 1339 error = ERESTART; 1340 if (bootverbose) { 1341 xpt_print(ccb->ccb_h.path, "Queue Full\n"); 1342 } 1343 break; 1344 } 1345 /* FALLTHROUGH */ 1346 } 1347 case SCSI_STATUS_BUSY: 1348 /* 1349 * Restart the queue after either another 1350 * command completes or a 1 second timeout. 1351 */ 1352 if (bootverbose) { 1353 xpt_print(ccb->ccb_h.path, "Device Busy\n"); 1354 } 1355 if (ccb->ccb_h.retry_count > 0) { 1356 ccb->ccb_h.retry_count--; 1357 error = ERESTART; 1358 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1359 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1360 *timeout = 1000; 1361 } else { 1362 error = EIO; 1363 } 1364 break; 1365 case SCSI_STATUS_RESERV_CONFLICT: 1366 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n"); 1367 error = EIO; 1368 break; 1369 default: 1370 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n", 1371 ccb->csio.scsi_status); 1372 error = EIO; 1373 break; 1374 } 1375 return (error); 1376 } 1377 1378 static int 1379 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1380 u_int32_t sense_flags, union ccb *save_ccb, 1381 int *openings, u_int32_t *relsim_flags, 1382 u_int32_t *timeout) 1383 { 1384 struct cam_periph *periph; 1385 int error; 1386 1387 periph = xpt_path_periph(ccb->ccb_h.path); 1388 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1389 1390 /* 1391 * If error recovery is already in progress, don't attempt 1392 * to process this error, but requeue it unconditionally 1393 * and attempt to process it once error recovery has 1394 * completed. This failed command is probably related to 1395 * the error that caused the currently active error recovery 1396 * action so our current recovery efforts should also 1397 * address this command. Be aware that the error recovery 1398 * code assumes that only one recovery action is in progress 1399 * on a particular peripheral instance at any given time 1400 * (e.g. only one saved CCB for error recovery) so it is 1401 * imperitive that we don't violate this assumption. 1402 */ 1403 error = ERESTART; 1404 } else { 1405 scsi_sense_action err_action; 1406 struct ccb_getdev cgd; 1407 const char *action_string; 1408 union ccb* print_ccb; 1409 1410 /* A description of the error recovery action performed */ 1411 action_string = NULL; 1412 1413 /* 1414 * The location of the orignal ccb 1415 * for sense printing purposes. 1416 */ 1417 print_ccb = ccb; 1418 1419 /* 1420 * Grab the inquiry data for this device. 1421 */ 1422 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1423 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1424 xpt_action((union ccb *)&cgd); 1425 1426 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1427 err_action = scsi_error_action(&ccb->csio, 1428 &cgd.inq_data, 1429 sense_flags); 1430 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1431 err_action = SS_REQSENSE; 1432 else 1433 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1434 1435 error = err_action & SS_ERRMASK; 1436 1437 /* 1438 * If the recovery action will consume a retry, 1439 * make sure we actually have retries available. 1440 */ 1441 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1442 if (ccb->ccb_h.retry_count > 0) 1443 ccb->ccb_h.retry_count--; 1444 else { 1445 action_string = "Retries Exhausted"; 1446 goto sense_error_done; 1447 } 1448 } 1449 1450 if ((err_action & SS_MASK) >= SS_START) { 1451 /* 1452 * Do common portions of commands that 1453 * use recovery CCBs. 1454 */ 1455 if (save_ccb == NULL) { 1456 action_string = "No recovery CCB supplied"; 1457 goto sense_error_done; 1458 } 1459 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1460 print_ccb = save_ccb; 1461 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1462 } 1463 1464 switch (err_action & SS_MASK) { 1465 case SS_NOP: 1466 action_string = "No Recovery Action Needed"; 1467 error = 0; 1468 break; 1469 case SS_RETRY: 1470 action_string = "Retrying Command (per Sense Data)"; 1471 error = ERESTART; 1472 break; 1473 case SS_FAIL: 1474 action_string = "Unretryable error"; 1475 break; 1476 case SS_START: 1477 { 1478 int le; 1479 1480 /* 1481 * Send a start unit command to the device, and 1482 * then retry the command. 1483 */ 1484 action_string = "Attempting to Start Unit"; 1485 1486 /* 1487 * Check for removable media and set 1488 * load/eject flag appropriately. 1489 */ 1490 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1491 le = TRUE; 1492 else 1493 le = FALSE; 1494 1495 scsi_start_stop(&ccb->csio, 1496 /*retries*/1, 1497 camperiphdone, 1498 MSG_SIMPLE_Q_TAG, 1499 /*start*/TRUE, 1500 /*load/eject*/le, 1501 /*immediate*/FALSE, 1502 SSD_FULL_SIZE, 1503 /*timeout*/50000); 1504 break; 1505 } 1506 case SS_TUR: 1507 { 1508 /* 1509 * Send a Test Unit Ready to the device. 1510 * If the 'many' flag is set, we send 120 1511 * test unit ready commands, one every half 1512 * second. Otherwise, we just send one TUR. 1513 * We only want to do this if the retry 1514 * count has not been exhausted. 1515 */ 1516 int retries; 1517 1518 if ((err_action & SSQ_MANY) != 0) { 1519 action_string = "Polling device for readiness"; 1520 retries = 120; 1521 } else { 1522 action_string = "Testing device for readiness"; 1523 retries = 1; 1524 } 1525 scsi_test_unit_ready(&ccb->csio, 1526 retries, 1527 camperiphdone, 1528 MSG_SIMPLE_Q_TAG, 1529 SSD_FULL_SIZE, 1530 /*timeout*/5000); 1531 1532 /* 1533 * Accomplish our 500ms delay by deferring 1534 * the release of our device queue appropriately. 1535 */ 1536 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1537 *timeout = 500; 1538 break; 1539 } 1540 case SS_REQSENSE: 1541 { 1542 /* 1543 * Send a Request Sense to the device. We 1544 * assume that we are in a contingent allegiance 1545 * condition so we do not tag this request. 1546 */ 1547 scsi_request_sense(&ccb->csio, /*retries*/1, 1548 camperiphdone, 1549 &save_ccb->csio.sense_data, 1550 sizeof(save_ccb->csio.sense_data), 1551 CAM_TAG_ACTION_NONE, 1552 /*sense_len*/SSD_FULL_SIZE, 1553 /*timeout*/5000); 1554 break; 1555 } 1556 default: 1557 panic("Unhandled error action %x", err_action); 1558 } 1559 1560 if ((err_action & SS_MASK) >= SS_START) { 1561 /* 1562 * Drop the priority to 0 so that the recovery 1563 * CCB is the first to execute. Freeze the queue 1564 * after this command is sent so that we can 1565 * restore the old csio and have it queued in 1566 * the proper order before we release normal 1567 * transactions to the device. 1568 */ 1569 ccb->ccb_h.pinfo.priority = 0; 1570 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1571 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1572 error = ERESTART; 1573 } 1574 1575 sense_error_done: 1576 if ((err_action & SSQ_PRINT_SENSE) != 0 1577 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1578 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1579 xpt_print_path(ccb->ccb_h.path); 1580 if (bootverbose) 1581 scsi_sense_print(&print_ccb->csio); 1582 kprintf("%s\n", action_string); 1583 } 1584 } 1585 return (error); 1586 } 1587 1588 /* 1589 * Generic error handler. Peripheral drivers usually filter 1590 * out the errors that they handle in a unique mannor, then 1591 * call this function. 1592 */ 1593 int 1594 cam_periph_error(union ccb *ccb, cam_flags camflags, 1595 u_int32_t sense_flags, union ccb *save_ccb) 1596 { 1597 const char *action_string; 1598 cam_status status; 1599 int frozen; 1600 int error, printed = 0; 1601 int openings; 1602 u_int32_t relsim_flags; 1603 u_int32_t timeout = 0; 1604 1605 action_string = NULL; 1606 status = ccb->ccb_h.status; 1607 frozen = (status & CAM_DEV_QFRZN) != 0; 1608 status &= CAM_STATUS_MASK; 1609 openings = relsim_flags = 0; 1610 1611 switch (status) { 1612 case CAM_REQ_CMP: 1613 error = 0; 1614 break; 1615 case CAM_SCSI_STATUS_ERROR: 1616 error = camperiphscsistatuserror(ccb, 1617 camflags, 1618 sense_flags, 1619 save_ccb, 1620 &openings, 1621 &relsim_flags, 1622 &timeout); 1623 break; 1624 case CAM_AUTOSENSE_FAIL: 1625 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n"); 1626 error = EIO; /* we have to kill the command */ 1627 break; 1628 case CAM_REQ_CMP_ERR: 1629 if (bootverbose && printed == 0) { 1630 xpt_print(ccb->ccb_h.path, 1631 "Request completed with CAM_REQ_CMP_ERR\n"); 1632 printed++; 1633 } 1634 /* FALLTHROUGH */ 1635 case CAM_CMD_TIMEOUT: 1636 if (bootverbose && printed == 0) { 1637 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1638 printed++; 1639 } 1640 /* FALLTHROUGH */ 1641 case CAM_UNEXP_BUSFREE: 1642 if (bootverbose && printed == 0) { 1643 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1644 printed++; 1645 } 1646 /* FALLTHROUGH */ 1647 case CAM_UNCOR_PARITY: 1648 if (bootverbose && printed == 0) { 1649 xpt_print(ccb->ccb_h.path, 1650 "Uncorrected Parity Error\n"); 1651 printed++; 1652 } 1653 /* FALLTHROUGH */ 1654 case CAM_DATA_RUN_ERR: 1655 if (bootverbose && printed == 0) { 1656 xpt_print(ccb->ccb_h.path, "Data Overrun\n"); 1657 printed++; 1658 } 1659 error = EIO; /* we have to kill the command */ 1660 /* decrement the number of retries */ 1661 if (ccb->ccb_h.retry_count > 0) { 1662 ccb->ccb_h.retry_count--; 1663 error = ERESTART; 1664 } else { 1665 action_string = "Retries Exhausted"; 1666 error = EIO; 1667 } 1668 break; 1669 case CAM_UA_ABORT: 1670 case CAM_UA_TERMIO: 1671 case CAM_MSG_REJECT_REC: 1672 /* XXX Don't know that these are correct */ 1673 error = EIO; 1674 break; 1675 case CAM_SEL_TIMEOUT: 1676 { 1677 struct cam_path *newpath; 1678 1679 if ((camflags & CAM_RETRY_SELTO) != 0) { 1680 if (ccb->ccb_h.retry_count > 0) { 1681 1682 ccb->ccb_h.retry_count--; 1683 error = ERESTART; 1684 if (bootverbose && printed == 0) { 1685 xpt_print(ccb->ccb_h.path, 1686 "Selection Timeout\n"); 1687 printed++; 1688 } 1689 1690 /* 1691 * Wait a bit to give the device 1692 * time to recover before we try again. 1693 */ 1694 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1695 timeout = periph_selto_delay; 1696 break; 1697 } 1698 } 1699 error = ENXIO; 1700 /* Should we do more if we can't create the path?? */ 1701 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1702 xpt_path_path_id(ccb->ccb_h.path), 1703 xpt_path_target_id(ccb->ccb_h.path), 1704 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1705 break; 1706 1707 /* 1708 * Let peripheral drivers know that this device has gone 1709 * away. 1710 */ 1711 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1712 xpt_free_path(newpath); 1713 break; 1714 } 1715 case CAM_REQ_INVALID: 1716 case CAM_PATH_INVALID: 1717 case CAM_DEV_NOT_THERE: 1718 case CAM_NO_HBA: 1719 case CAM_PROVIDE_FAIL: 1720 case CAM_REQ_TOO_BIG: 1721 case CAM_LUN_INVALID: 1722 case CAM_TID_INVALID: 1723 error = EINVAL; 1724 break; 1725 case CAM_SCSI_BUS_RESET: 1726 case CAM_BDR_SENT: 1727 /* 1728 * Commands that repeatedly timeout and cause these 1729 * kinds of error recovery actions, should return 1730 * CAM_CMD_TIMEOUT, which allows us to safely assume 1731 * that this command was an innocent bystander to 1732 * these events and should be unconditionally 1733 * retried. 1734 */ 1735 if (bootverbose && printed == 0) { 1736 xpt_print_path(ccb->ccb_h.path); 1737 if (status == CAM_BDR_SENT) 1738 kprintf("Bus Device Reset sent\n"); 1739 else 1740 kprintf("Bus Reset issued\n"); 1741 printed++; 1742 } 1743 /* FALLTHROUGH */ 1744 case CAM_REQUEUE_REQ: 1745 /* Unconditional requeue */ 1746 error = ERESTART; 1747 if (bootverbose && printed == 0) { 1748 xpt_print(ccb->ccb_h.path, "Request Requeued\n"); 1749 printed++; 1750 } 1751 break; 1752 case CAM_RESRC_UNAVAIL: 1753 /* Wait a bit for the resource shortage to abate. */ 1754 timeout = periph_noresrc_delay; 1755 /* FALLTHROUGH */ 1756 case CAM_BUSY: 1757 if (timeout == 0) { 1758 /* Wait a bit for the busy condition to abate. */ 1759 timeout = periph_busy_delay; 1760 } 1761 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1762 /* FALLTHROUGH */ 1763 default: 1764 /* decrement the number of retries */ 1765 if (ccb->ccb_h.retry_count > 0) { 1766 ccb->ccb_h.retry_count--; 1767 error = ERESTART; 1768 if (bootverbose && printed == 0) { 1769 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n", 1770 status); 1771 printed++; 1772 } 1773 } else { 1774 error = EIO; 1775 action_string = "Retries Exhausted"; 1776 } 1777 break; 1778 } 1779 1780 /* Attempt a retry */ 1781 if (error == ERESTART || error == 0) { 1782 if (frozen != 0) 1783 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1784 1785 if (error == ERESTART) { 1786 action_string = "Retrying Command"; 1787 xpt_action(ccb); 1788 } 1789 1790 if (frozen != 0) 1791 cam_release_devq(ccb->ccb_h.path, 1792 relsim_flags, 1793 openings, 1794 timeout, 1795 /*getcount_only*/0); 1796 } 1797 1798 /* 1799 * If we have an error and are booting verbosely, whine 1800 * *unless* this was a non-retryable selection timeout. 1801 */ 1802 if (error != 0 && bootverbose && (sense_flags & SF_NO_PRINT) == 0 && 1803 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1804 1805 1806 if (action_string == NULL) 1807 action_string = "Unretryable Error"; 1808 if (error != ERESTART) { 1809 xpt_print(ccb->ccb_h.path, "error %d\n", error); 1810 } 1811 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1812 } 1813 1814 return (error); 1815 } 1816