1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/linker_set.h> 39 #include <sys/bio.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/buf.h> 43 #include <sys/proc.h> 44 #include <sys/devicestat.h> 45 #include <sys/bus.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_xpt_periph.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_debug.h> 54 #include <cam/cam_sim.h> 55 56 #include <cam/scsi/scsi_all.h> 57 #include <cam/scsi/scsi_message.h> 58 #include <cam/scsi/scsi_pass.h> 59 60 static u_int camperiphnextunit(struct periph_driver *p_drv, 61 u_int newunit, int wired, 62 path_id_t pathid, target_id_t target, 63 lun_id_t lun); 64 static u_int camperiphunit(struct periph_driver *p_drv, 65 path_id_t pathid, target_id_t target, 66 lun_id_t lun); 67 static void camperiphdone(struct cam_periph *periph, 68 union ccb *done_ccb); 69 static void camperiphfree(struct cam_periph *periph); 70 static int camperiphscsistatuserror(union ccb *ccb, 71 cam_flags camflags, 72 u_int32_t sense_flags, 73 union ccb *save_ccb, 74 int *openings, 75 u_int32_t *relsim_flags, 76 u_int32_t *timeout); 77 static int camperiphscsisenseerror(union ccb *ccb, 78 cam_flags camflags, 79 u_int32_t sense_flags, 80 union ccb *save_ccb, 81 int *openings, 82 u_int32_t *relsim_flags, 83 u_int32_t *timeout); 84 85 static int nperiph_drivers; 86 struct periph_driver **periph_drivers; 87 88 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 89 90 static int periph_selto_delay = 1000; 91 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 92 static int periph_noresrc_delay = 500; 93 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 94 static int periph_busy_delay = 500; 95 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 96 97 98 void 99 periphdriver_register(void *data) 100 { 101 struct periph_driver **newdrivers, **old; 102 int ndrivers; 103 104 ndrivers = nperiph_drivers + 2; 105 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 106 M_WAITOK); 107 if (periph_drivers) 108 bcopy(periph_drivers, newdrivers, 109 sizeof(*newdrivers) * nperiph_drivers); 110 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 111 newdrivers[nperiph_drivers + 1] = NULL; 112 old = periph_drivers; 113 periph_drivers = newdrivers; 114 if (old) 115 free(old, M_CAMPERIPH); 116 nperiph_drivers++; 117 } 118 119 cam_status 120 cam_periph_alloc(periph_ctor_t *periph_ctor, 121 periph_oninv_t *periph_oninvalidate, 122 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 123 char *name, cam_periph_type type, struct cam_path *path, 124 ac_callback_t *ac_callback, ac_code code, void *arg) 125 { 126 struct periph_driver **p_drv; 127 struct cam_sim *sim; 128 struct cam_periph *periph; 129 struct cam_periph *cur_periph; 130 path_id_t path_id; 131 target_id_t target_id; 132 lun_id_t lun_id; 133 cam_status status; 134 u_int init_level; 135 136 init_level = 0; 137 /* 138 * Handle Hot-Plug scenarios. If there is already a peripheral 139 * of our type assigned to this path, we are likely waiting for 140 * final close on an old, invalidated, peripheral. If this is 141 * the case, queue up a deferred call to the peripheral's async 142 * handler. If it looks like a mistaken re-allocation, complain. 143 */ 144 if ((periph = cam_periph_find(path, name)) != NULL) { 145 146 if ((periph->flags & CAM_PERIPH_INVALID) != 0 147 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 148 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 149 periph->deferred_callback = ac_callback; 150 periph->deferred_ac = code; 151 return (CAM_REQ_INPROG); 152 } else { 153 printf("cam_periph_alloc: attempt to re-allocate " 154 "valid device %s%d rejected\n", 155 periph->periph_name, periph->unit_number); 156 } 157 return (CAM_REQ_INVALID); 158 } 159 160 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 161 M_NOWAIT); 162 163 if (periph == NULL) 164 return (CAM_RESRC_UNAVAIL); 165 166 init_level++; 167 168 xpt_lock_buses(); 169 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 170 if (strcmp((*p_drv)->driver_name, name) == 0) 171 break; 172 } 173 xpt_unlock_buses(); 174 if (*p_drv == NULL) { 175 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 176 return (CAM_REQ_INVALID); 177 } 178 179 sim = xpt_path_sim(path); 180 path_id = xpt_path_path_id(path); 181 target_id = xpt_path_target_id(path); 182 lun_id = xpt_path_lun_id(path); 183 bzero(periph, sizeof(*periph)); 184 cam_init_pinfo(&periph->pinfo); 185 periph->periph_start = periph_start; 186 periph->periph_dtor = periph_dtor; 187 periph->periph_oninval = periph_oninvalidate; 188 periph->type = type; 189 periph->periph_name = name; 190 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 191 periph->immediate_priority = CAM_PRIORITY_NONE; 192 periph->refcount = 0; 193 periph->sim = sim; 194 SLIST_INIT(&periph->ccb_list); 195 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 196 if (status != CAM_REQ_CMP) 197 goto failure; 198 199 periph->path = path; 200 init_level++; 201 202 status = xpt_add_periph(periph); 203 204 if (status != CAM_REQ_CMP) 205 goto failure; 206 207 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 208 while (cur_periph != NULL 209 && cur_periph->unit_number < periph->unit_number) 210 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 211 212 if (cur_periph != NULL) 213 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 214 else { 215 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 216 (*p_drv)->generation++; 217 } 218 219 init_level++; 220 221 status = periph_ctor(periph, arg); 222 223 if (status == CAM_REQ_CMP) 224 init_level++; 225 226 failure: 227 switch (init_level) { 228 case 4: 229 /* Initialized successfully */ 230 break; 231 case 3: 232 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 233 xpt_remove_periph(periph); 234 /* FALLTHROUGH */ 235 case 2: 236 xpt_free_path(periph->path); 237 /* FALLTHROUGH */ 238 case 1: 239 free(periph, M_CAMPERIPH); 240 /* FALLTHROUGH */ 241 case 0: 242 /* No cleanup to perform. */ 243 break; 244 default: 245 panic("cam_periph_alloc: Unkown init level"); 246 } 247 return(status); 248 } 249 250 /* 251 * Find a peripheral structure with the specified path, target, lun, 252 * and (optionally) type. If the name is NULL, this function will return 253 * the first peripheral driver that matches the specified path. 254 */ 255 struct cam_periph * 256 cam_periph_find(struct cam_path *path, char *name) 257 { 258 struct periph_driver **p_drv; 259 struct cam_periph *periph; 260 261 xpt_lock_buses(); 262 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 263 264 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 265 continue; 266 267 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 268 if (xpt_path_comp(periph->path, path) == 0) { 269 xpt_unlock_buses(); 270 return(periph); 271 } 272 } 273 if (name != NULL) { 274 xpt_unlock_buses(); 275 return(NULL); 276 } 277 } 278 xpt_unlock_buses(); 279 return(NULL); 280 } 281 282 cam_status 283 cam_periph_acquire(struct cam_periph *periph) 284 { 285 286 if (periph == NULL) 287 return(CAM_REQ_CMP_ERR); 288 289 xpt_lock_buses(); 290 periph->refcount++; 291 xpt_unlock_buses(); 292 293 return(CAM_REQ_CMP); 294 } 295 296 void 297 cam_periph_release_locked(struct cam_periph *periph) 298 { 299 300 if (periph == NULL) 301 return; 302 303 xpt_lock_buses(); 304 if ((--periph->refcount == 0) 305 && (periph->flags & CAM_PERIPH_INVALID)) { 306 camperiphfree(periph); 307 } 308 xpt_unlock_buses(); 309 } 310 311 void 312 cam_periph_release(struct cam_periph *periph) 313 { 314 struct cam_sim *sim; 315 316 if (periph == NULL) 317 return; 318 319 sim = periph->sim; 320 mtx_assert(sim->mtx, MA_NOTOWNED); 321 mtx_lock(sim->mtx); 322 cam_periph_release_locked(periph); 323 mtx_unlock(sim->mtx); 324 } 325 326 int 327 cam_periph_hold(struct cam_periph *periph, int priority) 328 { 329 int error; 330 331 /* 332 * Increment the reference count on the peripheral 333 * while we wait for our lock attempt to succeed 334 * to ensure the peripheral doesn't disappear out 335 * from user us while we sleep. 336 */ 337 338 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 339 return (ENXIO); 340 341 mtx_assert(periph->sim->mtx, MA_OWNED); 342 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 343 periph->flags |= CAM_PERIPH_LOCK_WANTED; 344 if ((error = mtx_sleep(periph, periph->sim->mtx, priority, 345 "caplck", 0)) != 0) { 346 cam_periph_release_locked(periph); 347 return (error); 348 } 349 } 350 351 periph->flags |= CAM_PERIPH_LOCKED; 352 return (0); 353 } 354 355 void 356 cam_periph_unhold(struct cam_periph *periph) 357 { 358 359 mtx_assert(periph->sim->mtx, MA_OWNED); 360 361 periph->flags &= ~CAM_PERIPH_LOCKED; 362 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 363 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 364 wakeup(periph); 365 } 366 367 cam_periph_release_locked(periph); 368 } 369 370 /* 371 * Look for the next unit number that is not currently in use for this 372 * peripheral type starting at "newunit". Also exclude unit numbers that 373 * are reserved by for future "hardwiring" unless we already know that this 374 * is a potential wired device. Only assume that the device is "wired" the 375 * first time through the loop since after that we'll be looking at unit 376 * numbers that did not match a wiring entry. 377 */ 378 static u_int 379 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 380 path_id_t pathid, target_id_t target, lun_id_t lun) 381 { 382 struct cam_periph *periph; 383 char *periph_name; 384 int i, val, dunit, r; 385 const char *dname, *strval; 386 387 periph_name = p_drv->driver_name; 388 for (;;newunit++) { 389 390 for (periph = TAILQ_FIRST(&p_drv->units); 391 periph != NULL && periph->unit_number != newunit; 392 periph = TAILQ_NEXT(periph, unit_links)) 393 ; 394 395 if (periph != NULL && periph->unit_number == newunit) { 396 if (wired != 0) { 397 xpt_print(periph->path, "Duplicate Wired " 398 "Device entry!\n"); 399 xpt_print(periph->path, "Second device (%s " 400 "device at scbus%d target %d lun %d) will " 401 "not be wired\n", periph_name, pathid, 402 target, lun); 403 wired = 0; 404 } 405 continue; 406 } 407 if (wired) 408 break; 409 410 /* 411 * Don't match entries like "da 4" as a wired down 412 * device, but do match entries like "da 4 target 5" 413 * or even "da 4 scbus 1". 414 */ 415 i = 0; 416 dname = periph_name; 417 for (;;) { 418 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 419 if (r != 0) 420 break; 421 /* if no "target" and no specific scbus, skip */ 422 if (resource_int_value(dname, dunit, "target", &val) && 423 (resource_string_value(dname, dunit, "at",&strval)|| 424 strcmp(strval, "scbus") == 0)) 425 continue; 426 if (newunit == dunit) 427 break; 428 } 429 if (r != 0) 430 break; 431 } 432 return (newunit); 433 } 434 435 static u_int 436 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 437 target_id_t target, lun_id_t lun) 438 { 439 u_int unit; 440 int wired, i, val, dunit; 441 const char *dname, *strval; 442 char pathbuf[32], *periph_name; 443 444 periph_name = p_drv->driver_name; 445 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 446 unit = 0; 447 i = 0; 448 dname = periph_name; 449 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 450 wired = 0) { 451 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 452 if (strcmp(strval, pathbuf) != 0) 453 continue; 454 wired++; 455 } 456 if (resource_int_value(dname, dunit, "target", &val) == 0) { 457 if (val != target) 458 continue; 459 wired++; 460 } 461 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 462 if (val != lun) 463 continue; 464 wired++; 465 } 466 if (wired != 0) { 467 unit = dunit; 468 break; 469 } 470 } 471 472 /* 473 * Either start from 0 looking for the next unit or from 474 * the unit number given in the resource config. This way, 475 * if we have wildcard matches, we don't return the same 476 * unit number twice. 477 */ 478 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 479 480 return (unit); 481 } 482 483 void 484 cam_periph_invalidate(struct cam_periph *periph) 485 { 486 487 /* 488 * We only call this routine the first time a peripheral is 489 * invalidated. 490 */ 491 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 492 && (periph->periph_oninval != NULL)) 493 periph->periph_oninval(periph); 494 495 periph->flags |= CAM_PERIPH_INVALID; 496 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 497 498 xpt_lock_buses(); 499 if (periph->refcount == 0) 500 camperiphfree(periph); 501 else if (periph->refcount < 0) 502 printf("cam_invalidate_periph: refcount < 0!!\n"); 503 xpt_unlock_buses(); 504 } 505 506 static void 507 camperiphfree(struct cam_periph *periph) 508 { 509 struct periph_driver **p_drv; 510 511 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 512 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 513 break; 514 } 515 if (*p_drv == NULL) { 516 printf("camperiphfree: attempt to free non-existant periph\n"); 517 return; 518 } 519 520 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 521 (*p_drv)->generation++; 522 xpt_unlock_buses(); 523 524 if (periph->periph_dtor != NULL) 525 periph->periph_dtor(periph); 526 xpt_remove_periph(periph); 527 528 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 529 union ccb ccb; 530 void *arg; 531 532 switch (periph->deferred_ac) { 533 case AC_FOUND_DEVICE: 534 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 535 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 536 xpt_action(&ccb); 537 arg = &ccb; 538 break; 539 case AC_PATH_REGISTERED: 540 ccb.ccb_h.func_code = XPT_PATH_INQ; 541 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 542 xpt_action(&ccb); 543 arg = &ccb; 544 break; 545 default: 546 arg = NULL; 547 break; 548 } 549 periph->deferred_callback(NULL, periph->deferred_ac, 550 periph->path, arg); 551 } 552 xpt_free_path(periph->path); 553 free(periph, M_CAMPERIPH); 554 xpt_lock_buses(); 555 } 556 557 /* 558 * Map user virtual pointers into kernel virtual address space, so we can 559 * access the memory. This won't work on physical pointers, for now it's 560 * up to the caller to check for that. (XXX KDM -- should we do that here 561 * instead?) This also only works for up to MAXPHYS memory. Since we use 562 * buffers to map stuff in and out, we're limited to the buffer size. 563 */ 564 int 565 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 566 { 567 int numbufs, i, j; 568 int flags[CAM_PERIPH_MAXMAPS]; 569 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 570 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 571 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 572 573 switch(ccb->ccb_h.func_code) { 574 case XPT_DEV_MATCH: 575 if (ccb->cdm.match_buf_len == 0) { 576 printf("cam_periph_mapmem: invalid match buffer " 577 "length 0\n"); 578 return(EINVAL); 579 } 580 if (ccb->cdm.pattern_buf_len > 0) { 581 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 582 lengths[0] = ccb->cdm.pattern_buf_len; 583 dirs[0] = CAM_DIR_OUT; 584 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 585 lengths[1] = ccb->cdm.match_buf_len; 586 dirs[1] = CAM_DIR_IN; 587 numbufs = 2; 588 } else { 589 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 590 lengths[0] = ccb->cdm.match_buf_len; 591 dirs[0] = CAM_DIR_IN; 592 numbufs = 1; 593 } 594 break; 595 case XPT_SCSI_IO: 596 case XPT_CONT_TARGET_IO: 597 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 598 return(0); 599 600 data_ptrs[0] = &ccb->csio.data_ptr; 601 lengths[0] = ccb->csio.dxfer_len; 602 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 603 numbufs = 1; 604 break; 605 default: 606 return(EINVAL); 607 break; /* NOTREACHED */ 608 } 609 610 /* 611 * Check the transfer length and permissions first, so we don't 612 * have to unmap any previously mapped buffers. 613 */ 614 for (i = 0; i < numbufs; i++) { 615 616 flags[i] = 0; 617 618 /* 619 * The userland data pointer passed in may not be page 620 * aligned. vmapbuf() truncates the address to a page 621 * boundary, so if the address isn't page aligned, we'll 622 * need enough space for the given transfer length, plus 623 * whatever extra space is necessary to make it to the page 624 * boundary. 625 */ 626 if ((lengths[i] + 627 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 628 printf("cam_periph_mapmem: attempt to map %lu bytes, " 629 "which is greater than DFLTPHYS(%d)\n", 630 (long)(lengths[i] + 631 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 632 DFLTPHYS); 633 return(E2BIG); 634 } 635 636 if (dirs[i] & CAM_DIR_OUT) { 637 flags[i] = BIO_WRITE; 638 } 639 640 if (dirs[i] & CAM_DIR_IN) { 641 flags[i] = BIO_READ; 642 } 643 644 } 645 646 /* this keeps the current process from getting swapped */ 647 /* 648 * XXX KDM should I use P_NOSWAP instead? 649 */ 650 PHOLD(curproc); 651 652 for (i = 0; i < numbufs; i++) { 653 /* 654 * Get the buffer. 655 */ 656 mapinfo->bp[i] = getpbuf(NULL); 657 658 /* save the buffer's data address */ 659 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 660 661 /* put our pointer in the data slot */ 662 mapinfo->bp[i]->b_data = *data_ptrs[i]; 663 664 /* set the transfer length, we know it's < DFLTPHYS */ 665 mapinfo->bp[i]->b_bufsize = lengths[i]; 666 667 /* set the direction */ 668 mapinfo->bp[i]->b_iocmd = flags[i]; 669 670 /* 671 * Map the buffer into kernel memory. 672 * 673 * Note that useracc() alone is not a sufficient test. 674 * vmapbuf() can still fail due to a smaller file mapped 675 * into a larger area of VM, or if userland races against 676 * vmapbuf() after the useracc() check. 677 */ 678 if (vmapbuf(mapinfo->bp[i]) < 0) { 679 for (j = 0; j < i; ++j) { 680 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 681 vunmapbuf(mapinfo->bp[j]); 682 relpbuf(mapinfo->bp[j], NULL); 683 } 684 relpbuf(mapinfo->bp[i], NULL); 685 PRELE(curproc); 686 return(EACCES); 687 } 688 689 /* set our pointer to the new mapped area */ 690 *data_ptrs[i] = mapinfo->bp[i]->b_data; 691 692 mapinfo->num_bufs_used++; 693 } 694 695 /* 696 * Now that we've gotten this far, change ownership to the kernel 697 * of the buffers so that we don't run afoul of returning to user 698 * space with locks (on the buffer) held. 699 */ 700 for (i = 0; i < numbufs; i++) { 701 BUF_KERNPROC(mapinfo->bp[i]); 702 } 703 704 705 return(0); 706 } 707 708 /* 709 * Unmap memory segments mapped into kernel virtual address space by 710 * cam_periph_mapmem(). 711 */ 712 void 713 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 714 { 715 int numbufs, i; 716 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 717 718 if (mapinfo->num_bufs_used <= 0) { 719 /* allow ourselves to be swapped once again */ 720 PRELE(curproc); 721 return; 722 } 723 724 switch (ccb->ccb_h.func_code) { 725 case XPT_DEV_MATCH: 726 numbufs = min(mapinfo->num_bufs_used, 2); 727 728 if (numbufs == 1) { 729 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 730 } else { 731 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 732 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 733 } 734 break; 735 case XPT_SCSI_IO: 736 case XPT_CONT_TARGET_IO: 737 data_ptrs[0] = &ccb->csio.data_ptr; 738 numbufs = min(mapinfo->num_bufs_used, 1); 739 break; 740 default: 741 /* allow ourselves to be swapped once again */ 742 PRELE(curproc); 743 return; 744 break; /* NOTREACHED */ 745 } 746 747 for (i = 0; i < numbufs; i++) { 748 /* Set the user's pointer back to the original value */ 749 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 750 751 /* unmap the buffer */ 752 vunmapbuf(mapinfo->bp[i]); 753 754 /* release the buffer */ 755 relpbuf(mapinfo->bp[i], NULL); 756 } 757 758 /* allow ourselves to be swapped once again */ 759 PRELE(curproc); 760 } 761 762 union ccb * 763 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 764 { 765 struct ccb_hdr *ccb_h; 766 767 mtx_assert(periph->sim->mtx, MA_OWNED); 768 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 769 770 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 771 if (periph->immediate_priority > priority) 772 periph->immediate_priority = priority; 773 xpt_schedule(periph, priority); 774 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 775 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 776 break; 777 mtx_assert(periph->sim->mtx, MA_OWNED); 778 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb", 779 0); 780 } 781 782 ccb_h = SLIST_FIRST(&periph->ccb_list); 783 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 784 return ((union ccb *)ccb_h); 785 } 786 787 void 788 cam_periph_ccbwait(union ccb *ccb) 789 { 790 struct cam_sim *sim; 791 792 sim = xpt_path_sim(ccb->ccb_h.path); 793 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 794 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 795 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0); 796 } 797 798 int 799 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 800 int (*error_routine)(union ccb *ccb, 801 cam_flags camflags, 802 u_int32_t sense_flags)) 803 { 804 union ccb *ccb; 805 int error; 806 int found; 807 808 error = found = 0; 809 810 switch(cmd){ 811 case CAMGETPASSTHRU: 812 ccb = cam_periph_getccb(periph, /* priority */ 1); 813 xpt_setup_ccb(&ccb->ccb_h, 814 ccb->ccb_h.path, 815 /*priority*/1); 816 ccb->ccb_h.func_code = XPT_GDEVLIST; 817 818 /* 819 * Basically, the point of this is that we go through 820 * getting the list of devices, until we find a passthrough 821 * device. In the current version of the CAM code, the 822 * only way to determine what type of device we're dealing 823 * with is by its name. 824 */ 825 while (found == 0) { 826 ccb->cgdl.index = 0; 827 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 828 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 829 830 /* we want the next device in the list */ 831 xpt_action(ccb); 832 if (strncmp(ccb->cgdl.periph_name, 833 "pass", 4) == 0){ 834 found = 1; 835 break; 836 } 837 } 838 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 839 (found == 0)) { 840 ccb->cgdl.periph_name[0] = '\0'; 841 ccb->cgdl.unit_number = 0; 842 break; 843 } 844 } 845 846 /* copy the result back out */ 847 bcopy(ccb, addr, sizeof(union ccb)); 848 849 /* and release the ccb */ 850 xpt_release_ccb(ccb); 851 852 break; 853 default: 854 error = ENOTTY; 855 break; 856 } 857 return(error); 858 } 859 860 int 861 cam_periph_runccb(union ccb *ccb, 862 int (*error_routine)(union ccb *ccb, 863 cam_flags camflags, 864 u_int32_t sense_flags), 865 cam_flags camflags, u_int32_t sense_flags, 866 struct devstat *ds) 867 { 868 struct cam_sim *sim; 869 int error; 870 871 error = 0; 872 sim = xpt_path_sim(ccb->ccb_h.path); 873 mtx_assert(sim->mtx, MA_OWNED); 874 875 /* 876 * If the user has supplied a stats structure, and if we understand 877 * this particular type of ccb, record the transaction start. 878 */ 879 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 880 devstat_start_transaction(ds, NULL); 881 882 xpt_action(ccb); 883 884 do { 885 cam_periph_ccbwait(ccb); 886 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 887 error = 0; 888 else if (error_routine != NULL) 889 error = (*error_routine)(ccb, camflags, sense_flags); 890 else 891 error = 0; 892 893 } while (error == ERESTART); 894 895 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 896 cam_release_devq(ccb->ccb_h.path, 897 /* relsim_flags */0, 898 /* openings */0, 899 /* timeout */0, 900 /* getcount_only */ FALSE); 901 902 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 903 devstat_end_transaction(ds, 904 ccb->csio.dxfer_len, 905 ccb->csio.tag_action & 0xf, 906 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 907 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 908 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 909 DEVSTAT_WRITE : 910 DEVSTAT_READ, NULL, NULL); 911 912 return(error); 913 } 914 915 void 916 cam_freeze_devq(struct cam_path *path) 917 { 918 struct ccb_hdr ccb_h; 919 920 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 921 ccb_h.func_code = XPT_NOOP; 922 ccb_h.flags = CAM_DEV_QFREEZE; 923 xpt_action((union ccb *)&ccb_h); 924 } 925 926 u_int32_t 927 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 928 u_int32_t openings, u_int32_t timeout, 929 int getcount_only) 930 { 931 struct ccb_relsim crs; 932 933 xpt_setup_ccb(&crs.ccb_h, path, 934 /*priority*/1); 935 crs.ccb_h.func_code = XPT_REL_SIMQ; 936 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 937 crs.release_flags = relsim_flags; 938 crs.openings = openings; 939 crs.release_timeout = timeout; 940 xpt_action((union ccb *)&crs); 941 return (crs.qfrozen_cnt); 942 } 943 944 #define saved_ccb_ptr ppriv_ptr0 945 static void 946 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 947 { 948 union ccb *saved_ccb; 949 cam_status status; 950 int frozen; 951 int sense; 952 struct scsi_start_stop_unit *scsi_cmd; 953 u_int32_t relsim_flags, timeout; 954 u_int32_t qfrozen_cnt; 955 int xpt_done_ccb; 956 957 xpt_done_ccb = FALSE; 958 status = done_ccb->ccb_h.status; 959 frozen = (status & CAM_DEV_QFRZN) != 0; 960 sense = (status & CAM_AUTOSNS_VALID) != 0; 961 status &= CAM_STATUS_MASK; 962 963 timeout = 0; 964 relsim_flags = 0; 965 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 966 967 /* 968 * Unfreeze the queue once if it is already frozen.. 969 */ 970 if (frozen != 0) { 971 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 972 /*relsim_flags*/0, 973 /*openings*/0, 974 /*timeout*/0, 975 /*getcount_only*/0); 976 } 977 978 switch (status) { 979 case CAM_REQ_CMP: 980 { 981 /* 982 * If we have successfully taken a device from the not 983 * ready to ready state, re-scan the device and re-get 984 * the inquiry information. Many devices (mostly disks) 985 * don't properly report their inquiry information unless 986 * they are spun up. 987 * 988 * If we manually retrieved sense into a CCB and got 989 * something other than "NO SENSE" send the updated CCB 990 * back to the client via xpt_done() to be processed via 991 * the error recovery code again. 992 */ 993 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 994 scsi_cmd = (struct scsi_start_stop_unit *) 995 &done_ccb->csio.cdb_io.cdb_bytes; 996 997 if (scsi_cmd->opcode == START_STOP_UNIT) 998 xpt_async(AC_INQ_CHANGED, 999 done_ccb->ccb_h.path, NULL); 1000 if (scsi_cmd->opcode == REQUEST_SENSE) { 1001 u_int sense_key; 1002 1003 sense_key = saved_ccb->csio.sense_data.flags; 1004 sense_key &= SSD_KEY; 1005 if (sense_key != SSD_KEY_NO_SENSE) { 1006 saved_ccb->ccb_h.status |= 1007 CAM_AUTOSNS_VALID; 1008 #if 0 1009 xpt_print(saved_ccb->ccb_h.path, 1010 "Recovered Sense\n"); 1011 scsi_sense_print(&saved_ccb->csio); 1012 cam_error_print(saved_ccb, CAM_ESF_ALL, 1013 CAM_EPF_ALL); 1014 #endif 1015 xpt_done_ccb = TRUE; 1016 } 1017 } 1018 } 1019 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1020 sizeof(union ccb)); 1021 1022 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1023 1024 if (xpt_done_ccb == FALSE) 1025 xpt_action(done_ccb); 1026 1027 break; 1028 } 1029 case CAM_SCSI_STATUS_ERROR: 1030 scsi_cmd = (struct scsi_start_stop_unit *) 1031 &done_ccb->csio.cdb_io.cdb_bytes; 1032 if (sense != 0) { 1033 struct ccb_getdev cgd; 1034 struct scsi_sense_data *sense; 1035 int error_code, sense_key, asc, ascq; 1036 scsi_sense_action err_action; 1037 1038 sense = &done_ccb->csio.sense_data; 1039 scsi_extract_sense(sense, &error_code, 1040 &sense_key, &asc, &ascq); 1041 1042 /* 1043 * Grab the inquiry data for this device. 1044 */ 1045 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1046 /*priority*/ 1); 1047 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1048 xpt_action((union ccb *)&cgd); 1049 err_action = scsi_error_action(&done_ccb->csio, 1050 &cgd.inq_data, 0); 1051 1052 /* 1053 * If the error is "invalid field in CDB", 1054 * and the load/eject flag is set, turn the 1055 * flag off and try again. This is just in 1056 * case the drive in question barfs on the 1057 * load eject flag. The CAM code should set 1058 * the load/eject flag by default for 1059 * removable media. 1060 */ 1061 1062 /* XXX KDM 1063 * Should we check to see what the specific 1064 * scsi status is?? Or does it not matter 1065 * since we already know that there was an 1066 * error, and we know what the specific 1067 * error code was, and we know what the 1068 * opcode is.. 1069 */ 1070 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1071 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1072 (asc == 0x24) && (ascq == 0x00) && 1073 (done_ccb->ccb_h.retry_count > 0)) { 1074 1075 scsi_cmd->how &= ~SSS_LOEJ; 1076 1077 xpt_action(done_ccb); 1078 1079 } else if ((done_ccb->ccb_h.retry_count > 1) 1080 && ((err_action & SS_MASK) != SS_FAIL)) { 1081 1082 /* 1083 * In this case, the error recovery 1084 * command failed, but we've got 1085 * some retries left on it. Give 1086 * it another try unless this is an 1087 * unretryable error. 1088 */ 1089 1090 /* set the timeout to .5 sec */ 1091 relsim_flags = 1092 RELSIM_RELEASE_AFTER_TIMEOUT; 1093 timeout = 500; 1094 1095 xpt_action(done_ccb); 1096 1097 break; 1098 1099 } else { 1100 /* 1101 * Perform the final retry with the original 1102 * CCB so that final error processing is 1103 * performed by the owner of the CCB. 1104 */ 1105 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1106 done_ccb, sizeof(union ccb)); 1107 1108 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1109 1110 xpt_action(done_ccb); 1111 } 1112 } else { 1113 /* 1114 * Eh?? The command failed, but we don't 1115 * have any sense. What's up with that? 1116 * Fire the CCB again to return it to the 1117 * caller. 1118 */ 1119 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1120 done_ccb, sizeof(union ccb)); 1121 1122 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1123 1124 xpt_action(done_ccb); 1125 1126 } 1127 break; 1128 default: 1129 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1130 sizeof(union ccb)); 1131 1132 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1133 1134 xpt_action(done_ccb); 1135 1136 break; 1137 } 1138 1139 /* decrement the retry count */ 1140 /* 1141 * XXX This isn't appropriate in all cases. Restructure, 1142 * so that the retry count is only decremented on an 1143 * actual retry. Remeber that the orignal ccb had its 1144 * retry count dropped before entering recovery, so 1145 * doing it again is a bug. 1146 */ 1147 if (done_ccb->ccb_h.retry_count > 0) 1148 done_ccb->ccb_h.retry_count--; 1149 1150 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1151 /*relsim_flags*/relsim_flags, 1152 /*openings*/0, 1153 /*timeout*/timeout, 1154 /*getcount_only*/0); 1155 if (xpt_done_ccb == TRUE) 1156 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1157 } 1158 1159 /* 1160 * Generic Async Event handler. Peripheral drivers usually 1161 * filter out the events that require personal attention, 1162 * and leave the rest to this function. 1163 */ 1164 void 1165 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1166 struct cam_path *path, void *arg) 1167 { 1168 switch (code) { 1169 case AC_LOST_DEVICE: 1170 cam_periph_invalidate(periph); 1171 break; 1172 case AC_SENT_BDR: 1173 case AC_BUS_RESET: 1174 { 1175 cam_periph_bus_settle(periph, scsi_delay); 1176 break; 1177 } 1178 default: 1179 break; 1180 } 1181 } 1182 1183 void 1184 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1185 { 1186 struct ccb_getdevstats cgds; 1187 1188 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1189 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1190 xpt_action((union ccb *)&cgds); 1191 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1192 } 1193 1194 void 1195 cam_periph_freeze_after_event(struct cam_periph *periph, 1196 struct timeval* event_time, u_int duration_ms) 1197 { 1198 struct timeval delta; 1199 struct timeval duration_tv; 1200 1201 microtime(&delta); 1202 timevalsub(&delta, event_time); 1203 duration_tv.tv_sec = duration_ms / 1000; 1204 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1205 if (timevalcmp(&delta, &duration_tv, <)) { 1206 timevalsub(&duration_tv, &delta); 1207 1208 duration_ms = duration_tv.tv_sec * 1000; 1209 duration_ms += duration_tv.tv_usec / 1000; 1210 cam_freeze_devq(periph->path); 1211 cam_release_devq(periph->path, 1212 RELSIM_RELEASE_AFTER_TIMEOUT, 1213 /*reduction*/0, 1214 /*timeout*/duration_ms, 1215 /*getcount_only*/0); 1216 } 1217 1218 } 1219 1220 static int 1221 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1222 u_int32_t sense_flags, union ccb *save_ccb, 1223 int *openings, u_int32_t *relsim_flags, 1224 u_int32_t *timeout) 1225 { 1226 int error; 1227 1228 switch (ccb->csio.scsi_status) { 1229 case SCSI_STATUS_OK: 1230 case SCSI_STATUS_COND_MET: 1231 case SCSI_STATUS_INTERMED: 1232 case SCSI_STATUS_INTERMED_COND_MET: 1233 error = 0; 1234 break; 1235 case SCSI_STATUS_CMD_TERMINATED: 1236 case SCSI_STATUS_CHECK_COND: 1237 error = camperiphscsisenseerror(ccb, 1238 camflags, 1239 sense_flags, 1240 save_ccb, 1241 openings, 1242 relsim_flags, 1243 timeout); 1244 break; 1245 case SCSI_STATUS_QUEUE_FULL: 1246 { 1247 /* no decrement */ 1248 struct ccb_getdevstats cgds; 1249 1250 /* 1251 * First off, find out what the current 1252 * transaction counts are. 1253 */ 1254 xpt_setup_ccb(&cgds.ccb_h, 1255 ccb->ccb_h.path, 1256 /*priority*/1); 1257 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1258 xpt_action((union ccb *)&cgds); 1259 1260 /* 1261 * If we were the only transaction active, treat 1262 * the QUEUE FULL as if it were a BUSY condition. 1263 */ 1264 if (cgds.dev_active != 0) { 1265 int total_openings; 1266 1267 /* 1268 * Reduce the number of openings to 1269 * be 1 less than the amount it took 1270 * to get a queue full bounded by the 1271 * minimum allowed tag count for this 1272 * device. 1273 */ 1274 total_openings = cgds.dev_active + cgds.dev_openings; 1275 *openings = cgds.dev_active; 1276 if (*openings < cgds.mintags) 1277 *openings = cgds.mintags; 1278 if (*openings < total_openings) 1279 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1280 else { 1281 /* 1282 * Some devices report queue full for 1283 * temporary resource shortages. For 1284 * this reason, we allow a minimum 1285 * tag count to be entered via a 1286 * quirk entry to prevent the queue 1287 * count on these devices from falling 1288 * to a pessimisticly low value. We 1289 * still wait for the next successful 1290 * completion, however, before queueing 1291 * more transactions to the device. 1292 */ 1293 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1294 } 1295 *timeout = 0; 1296 error = ERESTART; 1297 if (bootverbose) { 1298 xpt_print(ccb->ccb_h.path, "Queue Full\n"); 1299 } 1300 break; 1301 } 1302 /* FALLTHROUGH */ 1303 } 1304 case SCSI_STATUS_BUSY: 1305 /* 1306 * Restart the queue after either another 1307 * command completes or a 1 second timeout. 1308 */ 1309 if (bootverbose) { 1310 xpt_print(ccb->ccb_h.path, "Device Busy\n"); 1311 } 1312 if (ccb->ccb_h.retry_count > 0) { 1313 ccb->ccb_h.retry_count--; 1314 error = ERESTART; 1315 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1316 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1317 *timeout = 1000; 1318 } else { 1319 error = EIO; 1320 } 1321 break; 1322 case SCSI_STATUS_RESERV_CONFLICT: 1323 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n"); 1324 error = EIO; 1325 break; 1326 default: 1327 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n", 1328 ccb->csio.scsi_status); 1329 error = EIO; 1330 break; 1331 } 1332 return (error); 1333 } 1334 1335 static int 1336 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1337 u_int32_t sense_flags, union ccb *save_ccb, 1338 int *openings, u_int32_t *relsim_flags, 1339 u_int32_t *timeout) 1340 { 1341 struct cam_periph *periph; 1342 int error; 1343 1344 periph = xpt_path_periph(ccb->ccb_h.path); 1345 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1346 1347 /* 1348 * If error recovery is already in progress, don't attempt 1349 * to process this error, but requeue it unconditionally 1350 * and attempt to process it once error recovery has 1351 * completed. This failed command is probably related to 1352 * the error that caused the currently active error recovery 1353 * action so our current recovery efforts should also 1354 * address this command. Be aware that the error recovery 1355 * code assumes that only one recovery action is in progress 1356 * on a particular peripheral instance at any given time 1357 * (e.g. only one saved CCB for error recovery) so it is 1358 * imperitive that we don't violate this assumption. 1359 */ 1360 error = ERESTART; 1361 } else { 1362 scsi_sense_action err_action; 1363 struct ccb_getdev cgd; 1364 const char *action_string; 1365 union ccb* print_ccb; 1366 1367 /* A description of the error recovery action performed */ 1368 action_string = NULL; 1369 1370 /* 1371 * The location of the orignal ccb 1372 * for sense printing purposes. 1373 */ 1374 print_ccb = ccb; 1375 1376 /* 1377 * Grab the inquiry data for this device. 1378 */ 1379 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1380 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1381 xpt_action((union ccb *)&cgd); 1382 1383 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1384 err_action = scsi_error_action(&ccb->csio, 1385 &cgd.inq_data, 1386 sense_flags); 1387 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1388 err_action = SS_REQSENSE; 1389 else 1390 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1391 1392 error = err_action & SS_ERRMASK; 1393 1394 /* 1395 * If the recovery action will consume a retry, 1396 * make sure we actually have retries available. 1397 */ 1398 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1399 if (ccb->ccb_h.retry_count > 0) 1400 ccb->ccb_h.retry_count--; 1401 else { 1402 action_string = "Retries Exhausted"; 1403 goto sense_error_done; 1404 } 1405 } 1406 1407 if ((err_action & SS_MASK) >= SS_START) { 1408 /* 1409 * Do common portions of commands that 1410 * use recovery CCBs. 1411 */ 1412 if (save_ccb == NULL) { 1413 action_string = "No recovery CCB supplied"; 1414 goto sense_error_done; 1415 } 1416 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1417 print_ccb = save_ccb; 1418 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1419 } 1420 1421 switch (err_action & SS_MASK) { 1422 case SS_NOP: 1423 action_string = "No Recovery Action Needed"; 1424 error = 0; 1425 break; 1426 case SS_RETRY: 1427 action_string = "Retrying Command (per Sense Data)"; 1428 error = ERESTART; 1429 break; 1430 case SS_FAIL: 1431 action_string = "Unretryable error"; 1432 break; 1433 case SS_START: 1434 { 1435 int le; 1436 1437 /* 1438 * Send a start unit command to the device, and 1439 * then retry the command. 1440 */ 1441 action_string = "Attempting to Start Unit"; 1442 1443 /* 1444 * Check for removable media and set 1445 * load/eject flag appropriately. 1446 */ 1447 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1448 le = TRUE; 1449 else 1450 le = FALSE; 1451 1452 scsi_start_stop(&ccb->csio, 1453 /*retries*/1, 1454 camperiphdone, 1455 MSG_SIMPLE_Q_TAG, 1456 /*start*/TRUE, 1457 /*load/eject*/le, 1458 /*immediate*/FALSE, 1459 SSD_FULL_SIZE, 1460 /*timeout*/50000); 1461 break; 1462 } 1463 case SS_TUR: 1464 { 1465 /* 1466 * Send a Test Unit Ready to the device. 1467 * If the 'many' flag is set, we send 120 1468 * test unit ready commands, one every half 1469 * second. Otherwise, we just send one TUR. 1470 * We only want to do this if the retry 1471 * count has not been exhausted. 1472 */ 1473 int retries; 1474 1475 if ((err_action & SSQ_MANY) != 0) { 1476 action_string = "Polling device for readiness"; 1477 retries = 120; 1478 } else { 1479 action_string = "Testing device for readiness"; 1480 retries = 1; 1481 } 1482 scsi_test_unit_ready(&ccb->csio, 1483 retries, 1484 camperiphdone, 1485 MSG_SIMPLE_Q_TAG, 1486 SSD_FULL_SIZE, 1487 /*timeout*/5000); 1488 1489 /* 1490 * Accomplish our 500ms delay by deferring 1491 * the release of our device queue appropriately. 1492 */ 1493 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1494 *timeout = 500; 1495 break; 1496 } 1497 case SS_REQSENSE: 1498 { 1499 /* 1500 * Send a Request Sense to the device. We 1501 * assume that we are in a contingent allegiance 1502 * condition so we do not tag this request. 1503 */ 1504 scsi_request_sense(&ccb->csio, /*retries*/1, 1505 camperiphdone, 1506 &save_ccb->csio.sense_data, 1507 sizeof(save_ccb->csio.sense_data), 1508 CAM_TAG_ACTION_NONE, 1509 /*sense_len*/SSD_FULL_SIZE, 1510 /*timeout*/5000); 1511 break; 1512 } 1513 default: 1514 panic("Unhandled error action %x", err_action); 1515 } 1516 1517 if ((err_action & SS_MASK) >= SS_START) { 1518 /* 1519 * Drop the priority to 0 so that the recovery 1520 * CCB is the first to execute. Freeze the queue 1521 * after this command is sent so that we can 1522 * restore the old csio and have it queued in 1523 * the proper order before we release normal 1524 * transactions to the device. 1525 */ 1526 ccb->ccb_h.pinfo.priority = 0; 1527 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1528 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1529 error = ERESTART; 1530 } 1531 1532 sense_error_done: 1533 if ((err_action & SSQ_PRINT_SENSE) != 0 1534 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1535 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1536 xpt_print_path(ccb->ccb_h.path); 1537 if (bootverbose) 1538 scsi_sense_print(&print_ccb->csio); 1539 printf("%s\n", action_string); 1540 } 1541 } 1542 return (error); 1543 } 1544 1545 /* 1546 * Generic error handler. Peripheral drivers usually filter 1547 * out the errors that they handle in a unique mannor, then 1548 * call this function. 1549 */ 1550 int 1551 cam_periph_error(union ccb *ccb, cam_flags camflags, 1552 u_int32_t sense_flags, union ccb *save_ccb) 1553 { 1554 const char *action_string; 1555 cam_status status; 1556 int frozen; 1557 int error, printed = 0; 1558 int openings; 1559 u_int32_t relsim_flags; 1560 u_int32_t timeout = 0; 1561 1562 action_string = NULL; 1563 status = ccb->ccb_h.status; 1564 frozen = (status & CAM_DEV_QFRZN) != 0; 1565 status &= CAM_STATUS_MASK; 1566 openings = relsim_flags = 0; 1567 1568 switch (status) { 1569 case CAM_REQ_CMP: 1570 error = 0; 1571 break; 1572 case CAM_SCSI_STATUS_ERROR: 1573 error = camperiphscsistatuserror(ccb, 1574 camflags, 1575 sense_flags, 1576 save_ccb, 1577 &openings, 1578 &relsim_flags, 1579 &timeout); 1580 break; 1581 case CAM_AUTOSENSE_FAIL: 1582 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n"); 1583 error = EIO; /* we have to kill the command */ 1584 break; 1585 case CAM_REQ_CMP_ERR: 1586 if (bootverbose && printed == 0) { 1587 xpt_print(ccb->ccb_h.path, 1588 "Request completed with CAM_REQ_CMP_ERR\n"); 1589 printed++; 1590 } 1591 /* FALLTHROUGH */ 1592 case CAM_CMD_TIMEOUT: 1593 if (bootverbose && printed == 0) { 1594 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1595 printed++; 1596 } 1597 /* FALLTHROUGH */ 1598 case CAM_UNEXP_BUSFREE: 1599 if (bootverbose && printed == 0) { 1600 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1601 printed++; 1602 } 1603 /* FALLTHROUGH */ 1604 case CAM_UNCOR_PARITY: 1605 if (bootverbose && printed == 0) { 1606 xpt_print(ccb->ccb_h.path, 1607 "Uncorrected Parity Error\n"); 1608 printed++; 1609 } 1610 /* FALLTHROUGH */ 1611 case CAM_DATA_RUN_ERR: 1612 if (bootverbose && printed == 0) { 1613 xpt_print(ccb->ccb_h.path, "Data Overrun\n"); 1614 printed++; 1615 } 1616 error = EIO; /* we have to kill the command */ 1617 /* decrement the number of retries */ 1618 if (ccb->ccb_h.retry_count > 0) { 1619 ccb->ccb_h.retry_count--; 1620 error = ERESTART; 1621 } else { 1622 action_string = "Retries Exhausted"; 1623 error = EIO; 1624 } 1625 break; 1626 case CAM_UA_ABORT: 1627 case CAM_UA_TERMIO: 1628 case CAM_MSG_REJECT_REC: 1629 /* XXX Don't know that these are correct */ 1630 error = EIO; 1631 break; 1632 case CAM_SEL_TIMEOUT: 1633 { 1634 struct cam_path *newpath; 1635 1636 if ((camflags & CAM_RETRY_SELTO) != 0) { 1637 if (ccb->ccb_h.retry_count > 0) { 1638 1639 ccb->ccb_h.retry_count--; 1640 error = ERESTART; 1641 if (bootverbose && printed == 0) { 1642 xpt_print(ccb->ccb_h.path, 1643 "Selection Timeout\n"); 1644 printed++; 1645 } 1646 1647 /* 1648 * Wait a bit to give the device 1649 * time to recover before we try again. 1650 */ 1651 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1652 timeout = periph_selto_delay; 1653 break; 1654 } 1655 } 1656 error = ENXIO; 1657 /* Should we do more if we can't create the path?? */ 1658 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1659 xpt_path_path_id(ccb->ccb_h.path), 1660 xpt_path_target_id(ccb->ccb_h.path), 1661 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1662 break; 1663 1664 /* 1665 * Let peripheral drivers know that this device has gone 1666 * away. 1667 */ 1668 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1669 xpt_free_path(newpath); 1670 break; 1671 } 1672 case CAM_REQ_INVALID: 1673 case CAM_PATH_INVALID: 1674 case CAM_DEV_NOT_THERE: 1675 case CAM_NO_HBA: 1676 case CAM_PROVIDE_FAIL: 1677 case CAM_REQ_TOO_BIG: 1678 case CAM_LUN_INVALID: 1679 case CAM_TID_INVALID: 1680 error = EINVAL; 1681 break; 1682 case CAM_SCSI_BUS_RESET: 1683 case CAM_BDR_SENT: 1684 /* 1685 * Commands that repeatedly timeout and cause these 1686 * kinds of error recovery actions, should return 1687 * CAM_CMD_TIMEOUT, which allows us to safely assume 1688 * that this command was an innocent bystander to 1689 * these events and should be unconditionally 1690 * retried. 1691 */ 1692 if (bootverbose && printed == 0) { 1693 xpt_print_path(ccb->ccb_h.path); 1694 if (status == CAM_BDR_SENT) 1695 printf("Bus Device Reset sent\n"); 1696 else 1697 printf("Bus Reset issued\n"); 1698 printed++; 1699 } 1700 /* FALLTHROUGH */ 1701 case CAM_REQUEUE_REQ: 1702 /* Unconditional requeue */ 1703 error = ERESTART; 1704 if (bootverbose && printed == 0) { 1705 xpt_print(ccb->ccb_h.path, "Request Requeued\n"); 1706 printed++; 1707 } 1708 break; 1709 case CAM_RESRC_UNAVAIL: 1710 /* Wait a bit for the resource shortage to abate. */ 1711 timeout = periph_noresrc_delay; 1712 /* FALLTHROUGH */ 1713 case CAM_BUSY: 1714 if (timeout == 0) { 1715 /* Wait a bit for the busy condition to abate. */ 1716 timeout = periph_busy_delay; 1717 } 1718 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1719 /* FALLTHROUGH */ 1720 default: 1721 /* decrement the number of retries */ 1722 if (ccb->ccb_h.retry_count > 0) { 1723 ccb->ccb_h.retry_count--; 1724 error = ERESTART; 1725 if (bootverbose && printed == 0) { 1726 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n", 1727 status); 1728 printed++; 1729 } 1730 } else { 1731 error = EIO; 1732 action_string = "Retries Exhausted"; 1733 } 1734 break; 1735 } 1736 1737 /* Attempt a retry */ 1738 if (error == ERESTART || error == 0) { 1739 if (frozen != 0) 1740 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1741 1742 if (error == ERESTART) { 1743 action_string = "Retrying Command"; 1744 xpt_action(ccb); 1745 } 1746 1747 if (frozen != 0) 1748 cam_release_devq(ccb->ccb_h.path, 1749 relsim_flags, 1750 openings, 1751 timeout, 1752 /*getcount_only*/0); 1753 } 1754 1755 /* 1756 * If we have and error and are booting verbosely, whine 1757 * *unless* this was a non-retryable selection timeout. 1758 */ 1759 if (error != 0 && bootverbose && 1760 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1761 1762 1763 if (action_string == NULL) 1764 action_string = "Unretryable Error"; 1765 if (error != ERESTART) { 1766 xpt_print(ccb->ccb_h.path, "error %d\n", error); 1767 } 1768 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1769 } 1770 1771 return (error); 1772 } 1773