1 /* 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/cam/cam_periph.c,v 1.70 2008/02/12 11:07:33 raj Exp $ 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/buf.h> 39 #include <sys/proc.h> 40 #include <sys/devicestat.h> 41 #include <sys/bus.h> 42 #include <vm/vm.h> 43 #include <vm/vm_extern.h> 44 45 #include <sys/thread2.h> 46 47 #include "cam.h" 48 #include "cam_ccb.h" 49 #include "cam_xpt_periph.h" 50 #include "cam_periph.h" 51 #include "cam_debug.h" 52 #include "cam_sim.h" 53 54 #include <bus/cam/scsi/scsi_all.h> 55 #include <bus/cam/scsi/scsi_message.h> 56 #include <bus/cam/scsi/scsi_pass.h> 57 58 static u_int camperiphnextunit(struct periph_driver *p_drv, 59 u_int newunit, int wired, 60 path_id_t pathid, target_id_t target, 61 lun_id_t lun); 62 static u_int camperiphunit(struct periph_driver *p_drv, 63 struct cam_sim *sim, path_id_t pathid, 64 target_id_t target, lun_id_t lun); 65 static void camperiphdone(struct cam_periph *periph, 66 union ccb *done_ccb); 67 static void camperiphfree(struct cam_periph *periph); 68 static int camperiphscsistatuserror(union ccb *ccb, 69 cam_flags camflags, 70 u_int32_t sense_flags, 71 union ccb *save_ccb, 72 int *openings, 73 u_int32_t *relsim_flags, 74 u_int32_t *timeout); 75 static int camperiphscsisenseerror(union ccb *ccb, 76 cam_flags camflags, 77 u_int32_t sense_flags, 78 union ccb *save_ccb, 79 int *openings, 80 u_int32_t *relsim_flags, 81 u_int32_t *timeout); 82 static void cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo, 83 u_int8_t ***data_ptrs, int numbufs); 84 85 static int nperiph_drivers; 86 struct periph_driver **periph_drivers; 87 88 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 89 90 static int periph_selto_delay = 1000; 91 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 92 static int periph_noresrc_delay = 500; 93 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 94 static int periph_busy_delay = 500; 95 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 96 97 /* 98 * This is a horrible hack. The CAM code was just bulk-copying the ccb 99 * to 'restore' it from the saved version. This completely destroys list 100 * linkages and such, so hack the hack to not copy-over fields that cannot 101 * be safely copied over. 102 * 103 * This fixes list races when scsi errors occur simultaneously on multiple 104 * requests. 105 */ 106 #define RESTORE_CCB(saved, ccbh, field) \ 107 bcopy(&(saved)->field, &(ccbh)->field, sizeof((ccbh)->field)) 108 109 #define saved_ccb_ptr ppriv_ptr0 110 111 static void 112 restore_ccb(struct ccb_hdr *ccb_h) 113 { 114 struct ccb_hdr *saved; 115 116 saved = ccb_h->saved_ccb_ptr; 117 bcopy(saved + 1, ccb_h + 1, sizeof(union ccb) - sizeof(*saved)); 118 RESTORE_CCB(saved, ccb_h, retry_count); 119 RESTORE_CCB(saved, ccb_h, cbfcnp); 120 RESTORE_CCB(saved, ccb_h, func_code); 121 RESTORE_CCB(saved, ccb_h, status); 122 RESTORE_CCB(saved, ccb_h, path); 123 RESTORE_CCB(saved, ccb_h, path_id); 124 RESTORE_CCB(saved, ccb_h, target_id); 125 RESTORE_CCB(saved, ccb_h, target_lun); 126 RESTORE_CCB(saved, ccb_h, flags); 127 RESTORE_CCB(saved, ccb_h, periph_priv); 128 RESTORE_CCB(saved, ccb_h, sim_priv); 129 RESTORE_CCB(saved, ccb_h, timeout); 130 } 131 132 void 133 periphdriver_register(void *data) 134 { 135 struct periph_driver **newdrivers, **old; 136 int ndrivers; 137 138 ndrivers = nperiph_drivers + 2; 139 newdrivers = kmalloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 140 M_WAITOK); 141 if (periph_drivers) 142 bcopy(periph_drivers, newdrivers, 143 sizeof(*newdrivers) * nperiph_drivers); 144 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 145 newdrivers[nperiph_drivers + 1] = NULL; 146 old = periph_drivers; 147 periph_drivers = newdrivers; 148 if (old) 149 kfree(old, M_CAMPERIPH); 150 nperiph_drivers++; 151 } 152 153 cam_status 154 cam_periph_alloc(periph_ctor_t *periph_ctor, 155 periph_oninv_t *periph_oninvalidate, 156 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 157 char *name, cam_periph_type type, struct cam_path *path, 158 ac_callback_t *ac_callback, ac_code code, void *arg) 159 { 160 struct periph_driver **p_drv; 161 struct cam_sim *sim; 162 struct cam_periph *periph; 163 struct cam_periph *cur_periph; 164 path_id_t path_id; 165 target_id_t target_id; 166 lun_id_t lun_id; 167 cam_status status; 168 u_int init_level; 169 170 init_level = 0; 171 /* 172 * Handle Hot-Plug scenarios. If there is already a peripheral 173 * of our type assigned to this path, we are likely waiting for 174 * final close on an old, invalidated, peripheral. If this is 175 * the case, queue up a deferred call to the peripheral's async 176 * handler. If it looks like a mistaken re-allocation, complain. 177 */ 178 if ((periph = cam_periph_find(path, name)) != NULL) { 179 180 if ((periph->flags & CAM_PERIPH_INVALID) != 0 181 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 182 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 183 periph->deferred_callback = ac_callback; 184 periph->deferred_ac = code; 185 return (CAM_REQ_INPROG); 186 } else { 187 kprintf("cam_periph_alloc: attempt to re-allocate " 188 "valid device %s%d rejected\n", 189 periph->periph_name, periph->unit_number); 190 } 191 return (CAM_REQ_INVALID); 192 } 193 194 periph = kmalloc(sizeof(*periph), M_CAMPERIPH, M_INTWAIT | M_ZERO); 195 196 init_level++; /* 1 */ 197 198 xpt_lock_buses(); 199 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 200 if (strcmp((*p_drv)->driver_name, name) == 0) 201 break; 202 } 203 xpt_unlock_buses(); 204 205 sim = xpt_path_sim(path); 206 CAM_SIM_LOCK(sim); 207 path_id = xpt_path_path_id(path); 208 target_id = xpt_path_target_id(path); 209 lun_id = xpt_path_lun_id(path); 210 cam_init_pinfo(&periph->pinfo); 211 periph->periph_start = periph_start; 212 periph->periph_dtor = periph_dtor; 213 periph->periph_oninval = periph_oninvalidate; 214 periph->type = type; 215 periph->periph_name = name; 216 periph->immediate_priority = CAM_PRIORITY_NONE; 217 periph->refcount = 0; 218 periph->sim = sim; 219 SLIST_INIT(&periph->ccb_list); 220 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 221 if (status != CAM_REQ_CMP) 222 goto failure; 223 224 init_level++; /* 2 */ 225 226 periph->path = path; 227 228 /* 229 * Finalize with buses locked. Allocate unit number and add to 230 * list to reserve the unit number. Undo later if the XPT fails. 231 */ 232 xpt_lock_buses(); 233 periph->unit_number = camperiphunit(*p_drv, sim, path_id, 234 target_id, lun_id); 235 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 236 while (cur_periph != NULL && 237 cur_periph->unit_number < periph->unit_number) { 238 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 239 } 240 if (cur_periph != NULL) { 241 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 242 } else { 243 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 244 (*p_drv)->generation++; 245 } 246 xpt_unlock_buses(); 247 248 status = xpt_add_periph(periph); 249 250 if (status != CAM_REQ_CMP) 251 goto failure; 252 253 init_level++; /* 3 */ 254 255 status = periph_ctor(periph, arg); 256 257 if (status == CAM_REQ_CMP) 258 init_level++; /* 4 */ 259 260 failure: 261 switch (init_level) { 262 case 4: 263 /* Initialized successfully */ 264 CAM_SIM_UNLOCK(sim); 265 break; 266 case 3: 267 case 2: 268 xpt_lock_buses(); 269 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 270 xpt_unlock_buses(); 271 if (init_level == 3) 272 xpt_remove_periph(periph); 273 periph->path = NULL; 274 /* FALLTHROUGH */ 275 case 1: 276 CAM_SIM_UNLOCK(sim); /* sim was retrieved from path */ 277 xpt_free_path(path); 278 kfree(periph, M_CAMPERIPH); 279 /* FALLTHROUGH */ 280 case 0: 281 /* No cleanup to perform. */ 282 break; 283 default: 284 panic("cam_periph_alloc: Unknown init level"); 285 } 286 return(status); 287 } 288 289 /* 290 * Find a peripheral structure with the specified path, target, lun, 291 * and (optionally) type. If the name is NULL, this function will return 292 * the first peripheral driver that matches the specified path. 293 */ 294 struct cam_periph * 295 cam_periph_find(struct cam_path *path, char *name) 296 { 297 struct periph_driver **p_drv; 298 struct cam_periph *periph; 299 300 xpt_lock_buses(); 301 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 302 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 303 continue; 304 305 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 306 if (xpt_path_comp(periph->path, path) == 0) { 307 xpt_unlock_buses(); 308 return(periph); 309 } 310 } 311 if (name != NULL) { 312 xpt_unlock_buses(); 313 return(NULL); 314 } 315 } 316 xpt_unlock_buses(); 317 return(NULL); 318 } 319 320 cam_status 321 cam_periph_acquire(struct cam_periph *periph) 322 { 323 if (periph == NULL) 324 return(CAM_REQ_CMP_ERR); 325 326 xpt_lock_buses(); 327 periph->refcount++; 328 xpt_unlock_buses(); 329 330 return(CAM_REQ_CMP); 331 } 332 333 /* 334 * Release the peripheral. The XPT is not locked and the SIM may or may 335 * not be locked on entry. 336 * 337 * The last release on a peripheral marked invalid frees it. In this 338 * case we must be sure to hold both the XPT lock and the SIM lock, 339 * requiring a bit of fancy footwork if the SIM lock already happens 340 * to be held. 341 */ 342 void 343 cam_periph_release(struct cam_periph *periph) 344 { 345 struct cam_sim *sim; 346 int doun; 347 348 while (periph) { 349 /* 350 * First try the critical path case 351 */ 352 sim = periph->sim; 353 xpt_lock_buses(); 354 if ((periph->flags & CAM_PERIPH_INVALID) == 0 || 355 periph->refcount != 1) { 356 --periph->refcount; 357 xpt_unlock_buses(); 358 break; 359 } 360 361 /* 362 * Otherwise we also need to free the peripheral and must 363 * acquire the sim lock and xpt lock in the correct order 364 * to do so. 365 * 366 * The condition must be re-checked after the locks have 367 * been reacquired. 368 */ 369 xpt_unlock_buses(); 370 doun = CAM_SIM_COND_LOCK(sim); 371 xpt_lock_buses(); 372 --periph->refcount; 373 if ((periph->flags & CAM_PERIPH_INVALID) && 374 periph->refcount == 0) { 375 camperiphfree(periph); 376 } 377 xpt_unlock_buses(); 378 CAM_SIM_COND_UNLOCK(sim, doun); 379 break; 380 } 381 } 382 383 int 384 cam_periph_hold(struct cam_periph *periph, int flags) 385 { 386 int error; 387 388 sim_lock_assert_owned(periph->sim->lock); 389 390 /* 391 * Increment the reference count on the peripheral 392 * while we wait for our lock attempt to succeed 393 * to ensure the peripheral doesn't disappear out 394 * from user us while we sleep. 395 */ 396 397 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 398 return (ENXIO); 399 400 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 401 periph->flags |= CAM_PERIPH_LOCK_WANTED; 402 if ((error = sim_lock_sleep(periph, flags, "caplck", 0, 403 periph->sim->lock)) != 0) { 404 cam_periph_release(periph); 405 return (error); 406 } 407 } 408 409 periph->flags |= CAM_PERIPH_LOCKED; 410 return (0); 411 } 412 413 void 414 cam_periph_unhold(struct cam_periph *periph, int unlock) 415 { 416 struct cam_sim *sim; 417 418 sim_lock_assert_owned(periph->sim->lock); 419 periph->flags &= ~CAM_PERIPH_LOCKED; 420 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 421 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 422 wakeup(periph); 423 } 424 if (unlock) { 425 sim = periph->sim; 426 cam_periph_release(periph); 427 /* periph may be garbage now */ 428 CAM_SIM_UNLOCK(sim); 429 } else { 430 cam_periph_release(periph); 431 } 432 } 433 434 /* 435 * Look for the next unit number that is not currently in use for this 436 * peripheral type starting at "newunit". Also exclude unit numbers that 437 * are reserved by for future "hardwiring" unless we already know that this 438 * is a potential wired device. Only assume that the device is "wired" the 439 * first time through the loop since after that we'll be looking at unit 440 * numbers that did not match a wiring entry. 441 */ 442 static u_int 443 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 444 path_id_t pathid, target_id_t target, lun_id_t lun) 445 { 446 struct cam_periph *periph; 447 char *periph_name; 448 int i, val, dunit; 449 const char *dname, *strval; 450 451 periph_name = p_drv->driver_name; 452 for (;;) { 453 for (periph = TAILQ_FIRST(&p_drv->units); 454 periph != NULL && periph->unit_number != newunit; 455 periph = TAILQ_NEXT(periph, unit_links)) 456 ; 457 458 if (periph != NULL && periph->unit_number == newunit) { 459 if (wired != 0) { 460 xpt_print(periph->path, "Duplicate Wired " 461 "Device entry!\n"); 462 xpt_print(periph->path, "Second device (%s " 463 "device at scbus%d target %d lun %d) will " 464 "not be wired\n", periph_name, pathid, 465 target, lun); 466 wired = 0; 467 } 468 ++newunit; 469 continue; 470 } 471 if (wired) 472 break; 473 474 /* 475 * Don't match entries like "da 4" as a wired down 476 * device, but do match entries like "da 4 target 5" 477 * or even "da 4 scbus 1". 478 */ 479 i = -1; 480 while ((i = resource_locate(i, periph_name)) != -1) { 481 dname = resource_query_name(i); 482 dunit = resource_query_unit(i); 483 /* if no "target" and no specific scbus, skip */ 484 if (resource_int_value(dname, dunit, "target", &val) && 485 (resource_string_value(dname, dunit, "at",&strval)|| 486 strcmp(strval, "scbus") == 0)) { 487 continue; 488 } 489 if (newunit == dunit) 490 break; 491 } 492 if (i == -1) 493 break; 494 ++newunit; 495 } 496 return (newunit); 497 } 498 499 static u_int 500 camperiphunit(struct periph_driver *p_drv, 501 struct cam_sim *sim, path_id_t pathid, 502 target_id_t target, lun_id_t lun) 503 { 504 u_int unit; 505 int hit, i, val, dunit; 506 const char *dname, *strval; 507 char pathbuf[32], *periph_name; 508 509 unit = 0; 510 511 periph_name = p_drv->driver_name; 512 ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 513 i = -1; 514 for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) { 515 dname = resource_query_name(i); 516 dunit = resource_query_unit(i); 517 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 518 if (strcmp(strval, pathbuf) != 0) 519 continue; 520 hit++; 521 } 522 if (resource_int_value(dname, dunit, "target", &val) == 0) { 523 if (val != target) 524 continue; 525 hit++; 526 } 527 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 528 if (val != lun) 529 continue; 530 hit++; 531 } 532 if (hit != 0) { 533 unit = dunit; 534 break; 535 } 536 } 537 538 /* 539 * If no wired units are in the kernel config do an auto unit 540 * start selection. We want usb mass storage out of the way 541 * so it doesn't steal low numbered da%d slots from ahci, sili, 542 * or other scsi attachments. 543 */ 544 if (hit == 0 && sim) { 545 if (strncmp(sim->sim_name, "umass", 4) == 0 && unit < 8) 546 unit = 8; 547 } 548 549 /* 550 * Either start from 0 looking for the next unit or from 551 * the unit number given in the resource config. This way, 552 * if we have wildcard matches, we don't return the same 553 * unit number twice. 554 */ 555 unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid, 556 target, lun); 557 558 return (unit); 559 } 560 561 void 562 cam_periph_invalidate(struct cam_periph *periph) 563 { 564 /* 565 * We only call this routine the first time a peripheral is 566 * invalidated. 567 */ 568 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 569 && (periph->periph_oninval != NULL)) 570 periph->periph_oninval(periph); 571 572 periph->flags |= CAM_PERIPH_INVALID; 573 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 574 575 xpt_lock_buses(); 576 if (periph->refcount == 0) 577 camperiphfree(periph); 578 else if (periph->refcount < 0) 579 kprintf("cam_invalidate_periph: refcount < 0!!\n"); 580 xpt_unlock_buses(); 581 } 582 583 static void 584 camperiphfree(struct cam_periph *periph) 585 { 586 struct periph_driver **p_drv; 587 588 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 589 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 590 break; 591 } 592 593 if (*p_drv == NULL) { 594 kprintf("camperiphfree: attempt to free non-existent periph\n"); 595 return; 596 } 597 598 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 599 (*p_drv)->generation++; 600 xpt_unlock_buses(); 601 602 if (periph->periph_dtor != NULL) 603 periph->periph_dtor(periph); 604 xpt_remove_periph(periph); 605 606 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 607 union ccb ccb; 608 void *arg; 609 610 switch (periph->deferred_ac) { 611 case AC_FOUND_DEVICE: 612 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 613 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 614 xpt_action(&ccb); 615 arg = &ccb; 616 break; 617 case AC_PATH_REGISTERED: 618 ccb.ccb_h.func_code = XPT_PATH_INQ; 619 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 620 xpt_action(&ccb); 621 arg = &ccb; 622 break; 623 default: 624 arg = NULL; 625 break; 626 } 627 periph->deferred_callback(NULL, periph->deferred_ac, 628 periph->path, arg); 629 } 630 xpt_free_path(periph->path); 631 kfree(periph, M_CAMPERIPH); 632 xpt_lock_buses(); 633 } 634 635 /* 636 * We don't map user pointers into KVM, instead we use pbufs. 637 * 638 * This won't work on physical pointers(?OLD), for now it's 639 * up to the caller to check for that. (XXX KDM -- should we do that here 640 * instead?) This also only works for up to MAXPHYS memory. Since we use 641 * buffers to map stuff in and out, we're limited to the buffer size. 642 */ 643 int 644 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 645 { 646 buf_cmd_t cmd[CAM_PERIPH_MAXMAPS]; 647 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 648 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 649 int numbufs; 650 int error; 651 int i; 652 struct buf *bp; 653 654 switch(ccb->ccb_h.func_code) { 655 case XPT_DEV_MATCH: 656 if (ccb->cdm.match_buf_len == 0) { 657 kprintf("cam_periph_mapmem: invalid match buffer " 658 "length 0\n"); 659 return(EINVAL); 660 } 661 if (ccb->cdm.pattern_buf_len > 0) { 662 data_ptrs[0] = (void *)&ccb->cdm.patterns; 663 lengths[0] = ccb->cdm.pattern_buf_len; 664 mapinfo->dirs[0] = CAM_DIR_OUT; 665 data_ptrs[1] = (void *)&ccb->cdm.matches; 666 lengths[1] = ccb->cdm.match_buf_len; 667 mapinfo->dirs[1] = CAM_DIR_IN; 668 numbufs = 2; 669 } else { 670 data_ptrs[0] = (void *)&ccb->cdm.matches; 671 lengths[0] = ccb->cdm.match_buf_len; 672 mapinfo->dirs[0] = CAM_DIR_IN; 673 numbufs = 1; 674 } 675 break; 676 case XPT_SCSI_IO: 677 case XPT_CONT_TARGET_IO: 678 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 679 return(0); 680 681 data_ptrs[0] = &ccb->csio.data_ptr; 682 lengths[0] = ccb->csio.dxfer_len; 683 mapinfo->dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 684 numbufs = 1; 685 break; 686 default: 687 return(EINVAL); 688 break; /* NOTREACHED */ 689 } 690 691 /* 692 * Check the transfer length and permissions first, so we don't 693 * have to unmap any previously mapped buffers. 694 */ 695 for (i = 0; i < numbufs; i++) { 696 /* 697 * Its kinda bogus, we need a R+W command. For now the 698 * buffer needs some sort of command. Use BUF_CMD_WRITE 699 * to indicate a write and BUF_CMD_READ to indicate R+W. 700 */ 701 cmd[i] = BUF_CMD_WRITE; 702 703 if (lengths[i] > MAXPHYS) { 704 kprintf("cam_periph_mapmem: attempt to map %lu bytes, " 705 "which is greater than MAXPHYS(%d)\n", 706 (long)(lengths[i] + 707 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 708 MAXPHYS); 709 return(E2BIG); 710 } 711 712 if (mapinfo->dirs[i] & CAM_DIR_OUT) { 713 if (!useracc(*data_ptrs[i], lengths[i], 714 VM_PROT_READ)) { 715 kprintf("cam_periph_mapmem: error, " 716 "address %p, length %lu isn't " 717 "user accessible for READ\n", 718 (void *)*data_ptrs[i], 719 (u_long)lengths[i]); 720 return(EACCES); 721 } 722 } 723 724 if (mapinfo->dirs[i] & CAM_DIR_IN) { 725 cmd[i] = BUF_CMD_READ; 726 if (!useracc(*data_ptrs[i], lengths[i], 727 VM_PROT_WRITE)) { 728 kprintf("cam_periph_mapmem: error, " 729 "address %p, length %lu isn't " 730 "user accessible for WRITE\n", 731 (void *)*data_ptrs[i], 732 (u_long)lengths[i]); 733 734 return(EACCES); 735 } 736 } 737 738 } 739 740 for (i = 0; i < numbufs; i++) { 741 /* 742 * Get the buffer. 743 */ 744 bp = getpbuf_mem(NULL); 745 746 /* save the original user pointer */ 747 mapinfo->saved_ptrs[i] = *data_ptrs[i]; 748 749 /* set the flags */ 750 bp->b_cmd = cmd[i]; 751 752 /* 753 * Always bounce the I/O through kernel memory. 754 */ 755 bp->b_bcount = lengths[i]; 756 if (mapinfo->dirs[i] & CAM_DIR_OUT) { 757 error = copyin(*data_ptrs[i], bp->b_data, bp->b_bcount); 758 } else { 759 error = 0; 760 } 761 if (error) { 762 relpbuf(bp, NULL); 763 cam_periph_unmapbufs(mapinfo, data_ptrs, i); 764 mapinfo->num_bufs_used -= i; 765 return(error); 766 } 767 768 /* set our pointer to the new mapped area */ 769 *data_ptrs[i] = bp->b_data; 770 771 mapinfo->bp[i] = bp; 772 mapinfo->num_bufs_used++; 773 } 774 775 return(0); 776 } 777 778 /* 779 * Unmap memory segments mapped into kernel virtual address space by 780 * cam_periph_mapmem(). 781 */ 782 void 783 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 784 { 785 int numbufs; 786 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 787 788 if (mapinfo->num_bufs_used <= 0) { 789 /* allow ourselves to be swapped once again */ 790 return; 791 } 792 793 switch (ccb->ccb_h.func_code) { 794 case XPT_DEV_MATCH: 795 numbufs = min(mapinfo->num_bufs_used, 2); 796 797 if (numbufs == 1) { 798 data_ptrs[0] = (void *)&ccb->cdm.matches; 799 } else { 800 data_ptrs[0] = (void *)&ccb->cdm.patterns; 801 data_ptrs[1] = (void *)&ccb->cdm.matches; 802 } 803 break; 804 case XPT_SCSI_IO: 805 case XPT_CONT_TARGET_IO: 806 data_ptrs[0] = &ccb->csio.data_ptr; 807 numbufs = min(mapinfo->num_bufs_used, 1); 808 break; 809 default: 810 /* allow ourselves to be swapped once again */ 811 return; 812 break; /* NOTREACHED */ 813 } 814 cam_periph_unmapbufs(mapinfo, data_ptrs, numbufs); 815 } 816 817 static void 818 cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo, 819 u_int8_t ***data_ptrs, int numbufs) 820 { 821 struct buf *bp; 822 int i; 823 824 for (i = 0; i < numbufs; i++) { 825 bp = mapinfo->bp[i]; 826 827 /* Set the user's pointer back to the original value */ 828 *data_ptrs[i] = mapinfo->saved_ptrs[i]; 829 830 if (mapinfo->dirs[i] & CAM_DIR_IN) { 831 /* XXX return error */ 832 copyout(bp->b_data, *data_ptrs[i], bp->b_bcount); 833 } 834 relpbuf(bp, NULL); 835 mapinfo->bp[i] = NULL; 836 } 837 } 838 839 union ccb * 840 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 841 { 842 struct ccb_hdr *ccb_h; 843 844 sim_lock_assert_owned(periph->sim->lock); 845 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 846 847 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 848 if (periph->immediate_priority > priority) 849 periph->immediate_priority = priority; 850 xpt_schedule(periph, priority); 851 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 852 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 853 break; 854 sim_lock_sleep(&periph->ccb_list, 0, "cgticb", 0, 855 periph->sim->lock); 856 } 857 858 ccb_h = SLIST_FIRST(&periph->ccb_list); 859 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 860 return ((union ccb *)ccb_h); 861 } 862 863 void 864 cam_periph_ccbwait(union ccb *ccb) 865 { 866 struct cam_sim *sim; 867 868 sim = xpt_path_sim(ccb->ccb_h.path); 869 while ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 870 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) { 871 sim_lock_sleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0, sim->lock); 872 } 873 } 874 875 int 876 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 877 int (*error_routine)(union ccb *ccb, 878 cam_flags camflags, 879 u_int32_t sense_flags)) 880 { 881 union ccb *ccb; 882 int error; 883 int found; 884 885 error = found = 0; 886 887 switch(cmd){ 888 case CAMGETPASSTHRU: 889 ccb = cam_periph_getccb(periph, /* priority */ 1); 890 xpt_setup_ccb(&ccb->ccb_h, 891 ccb->ccb_h.path, 892 /*priority*/1); 893 ccb->ccb_h.func_code = XPT_GDEVLIST; 894 895 /* 896 * Basically, the point of this is that we go through 897 * getting the list of devices, until we find a passthrough 898 * device. In the current version of the CAM code, the 899 * only way to determine what type of device we're dealing 900 * with is by its name. 901 */ 902 while (found == 0) { 903 ccb->cgdl.index = 0; 904 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 905 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 906 907 /* we want the next device in the list */ 908 xpt_action(ccb); 909 if (strncmp(ccb->cgdl.periph_name, 910 "pass", 4) == 0){ 911 found = 1; 912 break; 913 } 914 } 915 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 916 (found == 0)) { 917 ccb->cgdl.periph_name[0] = '\0'; 918 ccb->cgdl.unit_number = 0; 919 break; 920 } 921 } 922 923 /* copy the result back out */ 924 bcopy(ccb, addr, sizeof(union ccb)); 925 926 /* and release the ccb */ 927 xpt_release_ccb(ccb); 928 929 break; 930 default: 931 error = ENOTTY; 932 break; 933 } 934 return(error); 935 } 936 937 int 938 cam_periph_runccb(union ccb *ccb, 939 int (*error_routine)(union ccb *ccb, 940 cam_flags camflags, 941 u_int32_t sense_flags), 942 cam_flags camflags, u_int32_t sense_flags, 943 struct devstat *ds) 944 { 945 struct cam_sim *sim; 946 int error; 947 948 error = 0; 949 sim = xpt_path_sim(ccb->ccb_h.path); 950 sim_lock_assert_owned(sim->lock); 951 952 /* 953 * If the user has supplied a stats structure, and if we understand 954 * this particular type of ccb, record the transaction start. 955 */ 956 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 957 devstat_start_transaction(ds); 958 959 xpt_action(ccb); 960 961 do { 962 cam_periph_ccbwait(ccb); 963 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 964 error = 0; 965 else if (error_routine != NULL) 966 error = (*error_routine)(ccb, camflags, sense_flags); 967 else 968 error = 0; 969 970 } while (error == ERESTART); 971 972 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 973 cam_release_devq(ccb->ccb_h.path, 974 /* relsim_flags */0, 975 /* openings */0, 976 /* timeout */0, 977 /* getcount_only */ FALSE); 978 979 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 980 devstat_end_transaction(ds, 981 ccb->csio.dxfer_len, 982 ccb->csio.tag_action & 0xf, 983 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 984 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 985 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 986 DEVSTAT_WRITE : 987 DEVSTAT_READ); 988 989 return(error); 990 } 991 992 void 993 cam_freeze_devq(struct cam_path *path) 994 { 995 struct ccb_hdr ccb_h; 996 997 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 998 ccb_h.func_code = XPT_NOOP; 999 ccb_h.flags = CAM_DEV_QFREEZE; 1000 xpt_action((union ccb *)&ccb_h); 1001 } 1002 1003 u_int32_t 1004 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 1005 u_int32_t openings, u_int32_t timeout, 1006 int getcount_only) 1007 { 1008 struct ccb_relsim crs; 1009 1010 xpt_setup_ccb(&crs.ccb_h, path, 1011 /*priority*/1); 1012 crs.ccb_h.func_code = XPT_REL_SIMQ; 1013 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1014 crs.release_flags = relsim_flags; 1015 crs.openings = openings; 1016 crs.release_timeout = timeout; 1017 xpt_action((union ccb *)&crs); 1018 return (crs.qfrozen_cnt); 1019 } 1020 1021 static void 1022 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1023 { 1024 union ccb *saved_ccb; 1025 cam_status status; 1026 int frozen; 1027 int sense; 1028 struct scsi_start_stop_unit *scsi_cmd; 1029 u_int32_t relsim_flags, timeout; 1030 u_int32_t qfrozen_cnt; 1031 int xpt_done_ccb; 1032 1033 xpt_done_ccb = FALSE; 1034 status = done_ccb->ccb_h.status; 1035 frozen = (status & CAM_DEV_QFRZN) != 0; 1036 sense = (status & CAM_AUTOSNS_VALID) != 0; 1037 status &= CAM_STATUS_MASK; 1038 1039 timeout = 0; 1040 relsim_flags = 0; 1041 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1042 1043 /* 1044 * Unfreeze the queue once if it is already frozen.. 1045 */ 1046 if (frozen != 0) { 1047 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1048 /*relsim_flags*/0, 1049 /*openings*/0, 1050 /*timeout*/0, 1051 /*getcount_only*/0); 1052 } 1053 1054 switch (status) { 1055 case CAM_REQ_CMP: 1056 { 1057 /* 1058 * If we have successfully taken a device from the not 1059 * ready to ready state, re-scan the device and re-get 1060 * the inquiry information. Many devices (mostly disks) 1061 * don't properly report their inquiry information unless 1062 * they are spun up. 1063 * 1064 * If we manually retrieved sense into a CCB and got 1065 * something other than "NO SENSE" send the updated CCB 1066 * back to the client via xpt_done() to be processed via 1067 * the error recovery code again. 1068 */ 1069 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 1070 scsi_cmd = (struct scsi_start_stop_unit *) 1071 &done_ccb->csio.cdb_io.cdb_bytes; 1072 1073 if (scsi_cmd->opcode == START_STOP_UNIT) 1074 xpt_async(AC_INQ_CHANGED, 1075 done_ccb->ccb_h.path, NULL); 1076 if (scsi_cmd->opcode == REQUEST_SENSE) { 1077 u_int sense_key; 1078 1079 sense_key = saved_ccb->csio.sense_data.flags; 1080 sense_key &= SSD_KEY; 1081 if (sense_key != SSD_KEY_NO_SENSE) { 1082 saved_ccb->ccb_h.status |= 1083 CAM_AUTOSNS_VALID; 1084 #if 0 1085 xpt_print(saved_ccb->ccb_h.path, 1086 "Recovered Sense\n"); 1087 scsi_sense_print(&saved_ccb->csio); 1088 cam_error_print(saved_ccb, CAM_ESF_ALL, 1089 CAM_EPF_ALL); 1090 #endif 1091 xpt_done_ccb = TRUE; 1092 } 1093 } 1094 } 1095 restore_ccb(&done_ccb->ccb_h); 1096 1097 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1098 1099 if (xpt_done_ccb == FALSE) 1100 xpt_action(done_ccb); 1101 1102 break; 1103 } 1104 case CAM_SCSI_STATUS_ERROR: 1105 scsi_cmd = (struct scsi_start_stop_unit *) 1106 &done_ccb->csio.cdb_io.cdb_bytes; 1107 if (sense != 0) { 1108 struct ccb_getdev cgd; 1109 struct scsi_sense_data *sense; 1110 int error_code, sense_key, asc, ascq; 1111 scsi_sense_action err_action; 1112 1113 sense = &done_ccb->csio.sense_data; 1114 scsi_extract_sense(sense, &error_code, 1115 &sense_key, &asc, &ascq); 1116 1117 /* 1118 * Grab the inquiry data for this device. 1119 */ 1120 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1121 /*priority*/ 1); 1122 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1123 xpt_action((union ccb *)&cgd); 1124 err_action = scsi_error_action(&done_ccb->csio, 1125 &cgd.inq_data, 0); 1126 1127 /* 1128 * If the error is "invalid field in CDB", 1129 * and the load/eject flag is set, turn the 1130 * flag off and try again. This is just in 1131 * case the drive in question barfs on the 1132 * load eject flag. The CAM code should set 1133 * the load/eject flag by default for 1134 * removable media. 1135 */ 1136 1137 /* XXX KDM 1138 * Should we check to see what the specific 1139 * scsi status is?? Or does it not matter 1140 * since we already know that there was an 1141 * error, and we know what the specific 1142 * error code was, and we know what the 1143 * opcode is.. 1144 */ 1145 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1146 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1147 (asc == 0x24) && (ascq == 0x00) && 1148 (done_ccb->ccb_h.retry_count > 0)) { 1149 1150 scsi_cmd->how &= ~SSS_LOEJ; 1151 1152 xpt_action(done_ccb); 1153 1154 } else if ((done_ccb->ccb_h.retry_count > 1) 1155 && ((err_action & SS_MASK) != SS_FAIL)) { 1156 1157 /* 1158 * In this case, the error recovery 1159 * command failed, but we've got 1160 * some retries left on it. Give 1161 * it another try unless this is an 1162 * unretryable error. 1163 */ 1164 1165 /* set the timeout to .5 sec */ 1166 relsim_flags = 1167 RELSIM_RELEASE_AFTER_TIMEOUT; 1168 timeout = 500; 1169 1170 xpt_action(done_ccb); 1171 1172 break; 1173 1174 } else { 1175 /* 1176 * Perform the final retry with the original 1177 * CCB so that final error processing is 1178 * performed by the owner of the CCB. 1179 */ 1180 restore_ccb(&done_ccb->ccb_h); 1181 1182 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1183 1184 xpt_action(done_ccb); 1185 } 1186 } else { 1187 /* 1188 * Eh?? The command failed, but we don't 1189 * have any sense. What's up with that? 1190 * Fire the CCB again to return it to the 1191 * caller. 1192 */ 1193 restore_ccb(&done_ccb->ccb_h); 1194 1195 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1196 1197 xpt_action(done_ccb); 1198 1199 } 1200 break; 1201 default: 1202 restore_ccb(&done_ccb->ccb_h); 1203 1204 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1205 1206 xpt_action(done_ccb); 1207 1208 break; 1209 } 1210 1211 /* decrement the retry count */ 1212 /* 1213 * XXX This isn't appropriate in all cases. Restructure, 1214 * so that the retry count is only decremented on an 1215 * actual retry. Remeber that the orignal ccb had its 1216 * retry count dropped before entering recovery, so 1217 * doing it again is a bug. 1218 */ 1219 if (done_ccb->ccb_h.retry_count > 0) 1220 done_ccb->ccb_h.retry_count--; 1221 1222 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1223 /*relsim_flags*/relsim_flags, 1224 /*openings*/0, 1225 /*timeout*/timeout, 1226 /*getcount_only*/0); 1227 if (xpt_done_ccb == TRUE) 1228 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1229 } 1230 1231 /* 1232 * Generic Async Event handler. Peripheral drivers usually 1233 * filter out the events that require personal attention, 1234 * and leave the rest to this function. 1235 */ 1236 void 1237 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1238 struct cam_path *path, void *arg) 1239 { 1240 switch (code) { 1241 case AC_LOST_DEVICE: 1242 cam_periph_invalidate(periph); 1243 break; 1244 case AC_SENT_BDR: 1245 case AC_BUS_RESET: 1246 { 1247 cam_periph_bus_settle(periph, scsi_delay); 1248 break; 1249 } 1250 default: 1251 break; 1252 } 1253 } 1254 1255 void 1256 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1257 { 1258 struct ccb_getdevstats cgds; 1259 1260 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1261 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1262 xpt_action((union ccb *)&cgds); 1263 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1264 } 1265 1266 void 1267 cam_periph_freeze_after_event(struct cam_periph *periph, 1268 struct timeval* event_time, u_int duration_ms) 1269 { 1270 struct timeval delta; 1271 struct timeval duration_tv; 1272 1273 microuptime(&delta); 1274 timevalsub(&delta, event_time); 1275 duration_tv.tv_sec = duration_ms / 1000; 1276 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1277 if (timevalcmp(&delta, &duration_tv, <)) { 1278 timevalsub(&duration_tv, &delta); 1279 1280 duration_ms = duration_tv.tv_sec * 1000; 1281 duration_ms += duration_tv.tv_usec / 1000; 1282 cam_freeze_devq(periph->path); 1283 cam_release_devq(periph->path, 1284 RELSIM_RELEASE_AFTER_TIMEOUT, 1285 /*reduction*/0, 1286 /*timeout*/duration_ms, 1287 /*getcount_only*/0); 1288 } 1289 1290 } 1291 1292 static int 1293 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1294 u_int32_t sense_flags, union ccb *save_ccb, 1295 int *openings, u_int32_t *relsim_flags, 1296 u_int32_t *timeout) 1297 { 1298 int error; 1299 1300 switch (ccb->csio.scsi_status) { 1301 case SCSI_STATUS_OK: 1302 case SCSI_STATUS_COND_MET: 1303 case SCSI_STATUS_INTERMED: 1304 case SCSI_STATUS_INTERMED_COND_MET: 1305 error = 0; 1306 break; 1307 case SCSI_STATUS_CMD_TERMINATED: 1308 case SCSI_STATUS_CHECK_COND: 1309 error = camperiphscsisenseerror(ccb, 1310 camflags, 1311 sense_flags, 1312 save_ccb, 1313 openings, 1314 relsim_flags, 1315 timeout); 1316 break; 1317 case SCSI_STATUS_QUEUE_FULL: 1318 { 1319 /* no decrement */ 1320 struct ccb_getdevstats cgds; 1321 1322 /* 1323 * First off, find out what the current 1324 * transaction counts are. 1325 */ 1326 xpt_setup_ccb(&cgds.ccb_h, 1327 ccb->ccb_h.path, 1328 /*priority*/1); 1329 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1330 xpt_action((union ccb *)&cgds); 1331 1332 /* 1333 * If we were the only transaction active, treat 1334 * the QUEUE FULL as if it were a BUSY condition. 1335 */ 1336 if (cgds.dev_active != 0) { 1337 int total_openings; 1338 1339 /* 1340 * Reduce the number of openings to 1341 * be 1 less than the amount it took 1342 * to get a queue full bounded by the 1343 * minimum allowed tag count for this 1344 * device. 1345 */ 1346 total_openings = cgds.dev_active + cgds.dev_openings; 1347 *openings = cgds.dev_active; 1348 if (*openings < cgds.mintags) 1349 *openings = cgds.mintags; 1350 if (*openings < total_openings) 1351 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1352 else { 1353 /* 1354 * Some devices report queue full for 1355 * temporary resource shortages. For 1356 * this reason, we allow a minimum 1357 * tag count to be entered via a 1358 * quirk entry to prevent the queue 1359 * count on these devices from falling 1360 * to a pessimisticly low value. We 1361 * still wait for the next successful 1362 * completion, however, before queueing 1363 * more transactions to the device. 1364 */ 1365 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1366 } 1367 *timeout = 0; 1368 error = ERESTART; 1369 if (bootverbose) { 1370 xpt_print(ccb->ccb_h.path, "Queue Full\n"); 1371 } 1372 break; 1373 } 1374 /* FALLTHROUGH */ 1375 } 1376 case SCSI_STATUS_BUSY: 1377 /* 1378 * Restart the queue after either another 1379 * command completes or a 1 second timeout. 1380 */ 1381 if (bootverbose) { 1382 xpt_print(ccb->ccb_h.path, "Device Busy\n"); 1383 } 1384 if (ccb->ccb_h.retry_count > 0) { 1385 ccb->ccb_h.retry_count--; 1386 error = ERESTART; 1387 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1388 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1389 *timeout = 1000; 1390 } else { 1391 error = EIO; 1392 } 1393 break; 1394 case SCSI_STATUS_RESERV_CONFLICT: 1395 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n"); 1396 error = EIO; 1397 break; 1398 default: 1399 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n", 1400 ccb->csio.scsi_status); 1401 error = EIO; 1402 break; 1403 } 1404 return (error); 1405 } 1406 1407 static int 1408 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1409 u_int32_t sense_flags, union ccb *save_ccb, 1410 int *openings, u_int32_t *relsim_flags, 1411 u_int32_t *timeout) 1412 { 1413 struct cam_periph *periph; 1414 int error; 1415 1416 periph = xpt_path_periph(ccb->ccb_h.path); 1417 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1418 1419 /* 1420 * If error recovery is already in progress, don't attempt 1421 * to process this error, but requeue it unconditionally 1422 * and attempt to process it once error recovery has 1423 * completed. This failed command is probably related to 1424 * the error that caused the currently active error recovery 1425 * action so our current recovery efforts should also 1426 * address this command. Be aware that the error recovery 1427 * code assumes that only one recovery action is in progress 1428 * on a particular peripheral instance at any given time 1429 * (e.g. only one saved CCB for error recovery) so it is 1430 * imperitive that we don't violate this assumption. 1431 */ 1432 error = ERESTART; 1433 } else { 1434 scsi_sense_action err_action; 1435 struct ccb_getdev cgd; 1436 const char *action_string; 1437 union ccb* print_ccb; 1438 1439 /* A description of the error recovery action performed */ 1440 action_string = NULL; 1441 1442 /* 1443 * The location of the orignal ccb 1444 * for sense printing purposes. 1445 */ 1446 print_ccb = ccb; 1447 1448 /* 1449 * Grab the inquiry data for this device. 1450 */ 1451 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1452 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1453 xpt_action((union ccb *)&cgd); 1454 1455 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1456 err_action = scsi_error_action(&ccb->csio, 1457 &cgd.inq_data, 1458 sense_flags); 1459 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1460 err_action = SS_REQSENSE; 1461 else 1462 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1463 1464 error = err_action & SS_ERRMASK; 1465 1466 /* 1467 * If the recovery action will consume a retry, 1468 * make sure we actually have retries available. 1469 */ 1470 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1471 if (ccb->ccb_h.retry_count > 0) 1472 ccb->ccb_h.retry_count--; 1473 else { 1474 action_string = "Retries Exhausted"; 1475 goto sense_error_done; 1476 } 1477 } 1478 1479 if ((err_action & SS_MASK) >= SS_START) { 1480 /* 1481 * Do common portions of commands that 1482 * use recovery CCBs. 1483 */ 1484 if (save_ccb == NULL) { 1485 action_string = "No recovery CCB supplied"; 1486 goto sense_error_done; 1487 } 1488 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1489 print_ccb = save_ccb; 1490 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1491 } 1492 1493 switch (err_action & SS_MASK) { 1494 case SS_NOP: 1495 action_string = "No Recovery Action Needed"; 1496 error = 0; 1497 break; 1498 case SS_RETRY: 1499 action_string = "Retrying Command (per Sense Data)"; 1500 error = ERESTART; 1501 break; 1502 case SS_FAIL: 1503 action_string = "Unretryable error"; 1504 break; 1505 case SS_START: 1506 { 1507 int le; 1508 1509 /* 1510 * Send a start unit command to the device, and 1511 * then retry the command. 1512 */ 1513 action_string = "Attempting to Start Unit"; 1514 1515 /* 1516 * Check for removable media and set 1517 * load/eject flag appropriately. 1518 */ 1519 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1520 le = TRUE; 1521 else 1522 le = FALSE; 1523 1524 scsi_start_stop(&ccb->csio, 1525 /*retries*/1, 1526 camperiphdone, 1527 MSG_SIMPLE_Q_TAG, 1528 /*start*/TRUE, 1529 /*load/eject*/le, 1530 /*immediate*/FALSE, 1531 SSD_FULL_SIZE, 1532 /*timeout*/50000); 1533 break; 1534 } 1535 case SS_TUR: 1536 { 1537 /* 1538 * Send a Test Unit Ready to the device. 1539 * If the 'many' flag is set, we send 120 1540 * test unit ready commands, one every half 1541 * second. Otherwise, we just send one TUR. 1542 * We only want to do this if the retry 1543 * count has not been exhausted. 1544 */ 1545 int retries; 1546 1547 if ((err_action & SSQ_MANY) != 0) { 1548 action_string = "Polling device for readiness"; 1549 retries = 120; 1550 } else { 1551 action_string = "Testing device for readiness"; 1552 retries = 1; 1553 } 1554 scsi_test_unit_ready(&ccb->csio, 1555 retries, 1556 camperiphdone, 1557 MSG_SIMPLE_Q_TAG, 1558 SSD_FULL_SIZE, 1559 /*timeout*/5000); 1560 1561 /* 1562 * Accomplish our 500ms delay by deferring 1563 * the release of our device queue appropriately. 1564 */ 1565 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1566 *timeout = 500; 1567 break; 1568 } 1569 case SS_REQSENSE: 1570 { 1571 /* 1572 * Send a Request Sense to the device. We 1573 * assume that we are in a contingent allegiance 1574 * condition so we do not tag this request. 1575 */ 1576 scsi_request_sense(&ccb->csio, /*retries*/1, 1577 camperiphdone, 1578 &save_ccb->csio.sense_data, 1579 sizeof(save_ccb->csio.sense_data), 1580 CAM_TAG_ACTION_NONE, 1581 /*sense_len*/SSD_FULL_SIZE, 1582 /*timeout*/5000); 1583 break; 1584 } 1585 default: 1586 panic("Unhandled error action %x", err_action); 1587 } 1588 1589 if ((err_action & SS_MASK) >= SS_START) { 1590 /* 1591 * Drop the priority to 0 so that the recovery 1592 * CCB is the first to execute. Freeze the queue 1593 * after this command is sent so that we can 1594 * restore the old csio and have it queued in 1595 * the proper order before we release normal 1596 * transactions to the device. 1597 */ 1598 ccb->ccb_h.pinfo.priority = 0; 1599 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1600 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1601 error = ERESTART; 1602 } 1603 1604 sense_error_done: 1605 if ((err_action & SSQ_PRINT_SENSE) != 0 1606 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1607 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1608 xpt_print_path(ccb->ccb_h.path); 1609 if (bootverbose) 1610 scsi_sense_print(&print_ccb->csio); 1611 kprintf("%s\n", action_string); 1612 } 1613 } 1614 return (error); 1615 } 1616 1617 /* 1618 * Generic error handler. Peripheral drivers usually filter 1619 * out the errors that they handle in a unique mannor, then 1620 * call this function. 1621 */ 1622 int 1623 cam_periph_error(union ccb *ccb, cam_flags camflags, 1624 u_int32_t sense_flags, union ccb *save_ccb) 1625 { 1626 const char *action_string; 1627 cam_status status; 1628 int frozen; 1629 int error, printed = 0; 1630 int openings; 1631 u_int32_t relsim_flags; 1632 u_int32_t timeout = 0; 1633 1634 action_string = NULL; 1635 status = ccb->ccb_h.status; 1636 frozen = (status & CAM_DEV_QFRZN) != 0; 1637 status &= CAM_STATUS_MASK; 1638 openings = relsim_flags = 0; 1639 1640 switch (status) { 1641 case CAM_REQ_CMP: 1642 error = 0; 1643 break; 1644 case CAM_SCSI_STATUS_ERROR: 1645 error = camperiphscsistatuserror(ccb, 1646 camflags, 1647 sense_flags, 1648 save_ccb, 1649 &openings, 1650 &relsim_flags, 1651 &timeout); 1652 break; 1653 case CAM_AUTOSENSE_FAIL: 1654 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n"); 1655 error = EIO; /* we have to kill the command */ 1656 break; 1657 case CAM_REQ_CMP_ERR: 1658 if (bootverbose && printed == 0) { 1659 xpt_print(ccb->ccb_h.path, 1660 "Request completed with CAM_REQ_CMP_ERR\n"); 1661 printed++; 1662 } 1663 /* FALLTHROUGH */ 1664 case CAM_CMD_TIMEOUT: 1665 if (bootverbose && printed == 0) { 1666 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1667 printed++; 1668 } 1669 /* FALLTHROUGH */ 1670 case CAM_UNEXP_BUSFREE: 1671 if (bootverbose && printed == 0) { 1672 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1673 printed++; 1674 } 1675 /* FALLTHROUGH */ 1676 case CAM_UNCOR_PARITY: 1677 if (bootverbose && printed == 0) { 1678 xpt_print(ccb->ccb_h.path, 1679 "Uncorrected Parity Error\n"); 1680 printed++; 1681 } 1682 /* FALLTHROUGH */ 1683 case CAM_DATA_RUN_ERR: 1684 if (bootverbose && printed == 0) { 1685 xpt_print(ccb->ccb_h.path, "Data Overrun\n"); 1686 printed++; 1687 } 1688 error = EIO; /* we have to kill the command */ 1689 /* decrement the number of retries */ 1690 if (ccb->ccb_h.retry_count > 0) { 1691 ccb->ccb_h.retry_count--; 1692 error = ERESTART; 1693 } else { 1694 action_string = "Retries Exhausted"; 1695 error = EIO; 1696 } 1697 break; 1698 case CAM_UA_ABORT: 1699 case CAM_UA_TERMIO: 1700 case CAM_MSG_REJECT_REC: 1701 /* XXX Don't know that these are correct */ 1702 error = EIO; 1703 break; 1704 case CAM_SEL_TIMEOUT: 1705 { 1706 struct cam_path *newpath; 1707 1708 if ((camflags & CAM_RETRY_SELTO) != 0) { 1709 if (ccb->ccb_h.retry_count > 0) { 1710 1711 ccb->ccb_h.retry_count--; 1712 error = ERESTART; 1713 if (bootverbose && printed == 0) { 1714 xpt_print(ccb->ccb_h.path, 1715 "Selection Timeout\n"); 1716 printed++; 1717 } 1718 1719 /* 1720 * Wait a bit to give the device 1721 * time to recover before we try again. 1722 */ 1723 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1724 timeout = periph_selto_delay; 1725 break; 1726 } 1727 } 1728 error = ENXIO; 1729 /* Should we do more if we can't create the path?? */ 1730 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1731 xpt_path_path_id(ccb->ccb_h.path), 1732 xpt_path_target_id(ccb->ccb_h.path), 1733 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1734 break; 1735 1736 /* 1737 * Let peripheral drivers know that this device has gone 1738 * away. 1739 */ 1740 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1741 xpt_free_path(newpath); 1742 break; 1743 } 1744 case CAM_REQ_INVALID: 1745 case CAM_PATH_INVALID: 1746 case CAM_DEV_NOT_THERE: 1747 case CAM_NO_HBA: 1748 case CAM_PROVIDE_FAIL: 1749 case CAM_REQ_TOO_BIG: 1750 case CAM_LUN_INVALID: 1751 case CAM_TID_INVALID: 1752 error = EINVAL; 1753 break; 1754 case CAM_SCSI_BUS_RESET: 1755 case CAM_BDR_SENT: 1756 /* 1757 * Commands that repeatedly timeout and cause these 1758 * kinds of error recovery actions, should return 1759 * CAM_CMD_TIMEOUT, which allows us to safely assume 1760 * that this command was an innocent bystander to 1761 * these events and should be unconditionally 1762 * retried. 1763 */ 1764 if (bootverbose && printed == 0) { 1765 xpt_print_path(ccb->ccb_h.path); 1766 if (status == CAM_BDR_SENT) 1767 kprintf("Bus Device Reset sent\n"); 1768 else 1769 kprintf("Bus Reset issued\n"); 1770 printed++; 1771 } 1772 /* FALLTHROUGH */ 1773 case CAM_REQUEUE_REQ: 1774 /* Unconditional requeue */ 1775 error = ERESTART; 1776 if (bootverbose && printed == 0) { 1777 xpt_print(ccb->ccb_h.path, "Request Requeued\n"); 1778 printed++; 1779 } 1780 break; 1781 case CAM_RESRC_UNAVAIL: 1782 /* Wait a bit for the resource shortage to abate. */ 1783 timeout = periph_noresrc_delay; 1784 /* FALLTHROUGH */ 1785 case CAM_BUSY: 1786 if (timeout == 0) { 1787 /* Wait a bit for the busy condition to abate. */ 1788 timeout = periph_busy_delay; 1789 } 1790 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1791 /* FALLTHROUGH */ 1792 default: 1793 /* decrement the number of retries */ 1794 if (ccb->ccb_h.retry_count > 0) { 1795 ccb->ccb_h.retry_count--; 1796 error = ERESTART; 1797 if (bootverbose && printed == 0) { 1798 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n", 1799 status); 1800 printed++; 1801 } 1802 } else { 1803 error = EIO; 1804 action_string = "Retries Exhausted"; 1805 } 1806 break; 1807 } 1808 1809 /* Attempt a retry */ 1810 if (error == ERESTART || error == 0) { 1811 if (frozen != 0) 1812 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1813 1814 if (error == ERESTART) { 1815 action_string = "Retrying Command"; 1816 xpt_action(ccb); 1817 } 1818 1819 if (frozen != 0) 1820 cam_release_devq(ccb->ccb_h.path, 1821 relsim_flags, 1822 openings, 1823 timeout, 1824 /*getcount_only*/0); 1825 } 1826 1827 /* 1828 * If we have an error and are booting verbosely, whine 1829 * *unless* this was a non-retryable selection timeout. 1830 */ 1831 if (error != 0 && bootverbose && (sense_flags & SF_NO_PRINT) == 0 && 1832 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1833 1834 1835 if (action_string == NULL) 1836 action_string = "Unretryable Error"; 1837 if (error != ERESTART) { 1838 xpt_print(ccb->ccb_h.path, "error %d\n", error); 1839 } 1840 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1841 } 1842 1843 return (error); 1844 } 1845