1 /* 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/cam/cam_periph.c,v 1.70 2008/02/12 11:07:33 raj Exp $ 30 * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.41 2008/07/18 00:07:21 dillon Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/devicestat.h> 42 #include <sys/bus.h> 43 #include <vm/vm.h> 44 #include <vm/vm_extern.h> 45 46 #include <sys/thread2.h> 47 48 #include "cam.h" 49 #include "cam_ccb.h" 50 #include "cam_xpt_periph.h" 51 #include "cam_periph.h" 52 #include "cam_debug.h" 53 #include "cam_sim.h" 54 55 #include <bus/cam/scsi/scsi_all.h> 56 #include <bus/cam/scsi/scsi_message.h> 57 #include <bus/cam/scsi/scsi_pass.h> 58 59 static u_int camperiphnextunit(struct periph_driver *p_drv, 60 u_int newunit, int wired, 61 path_id_t pathid, target_id_t target, 62 lun_id_t lun); 63 static u_int camperiphunit(struct periph_driver *p_drv, 64 path_id_t pathid, target_id_t target, 65 lun_id_t lun); 66 static void camperiphdone(struct cam_periph *periph, 67 union ccb *done_ccb); 68 static void camperiphfree(struct cam_periph *periph); 69 static int camperiphscsistatuserror(union ccb *ccb, 70 cam_flags camflags, 71 u_int32_t sense_flags, 72 union ccb *save_ccb, 73 int *openings, 74 u_int32_t *relsim_flags, 75 u_int32_t *timeout); 76 static int camperiphscsisenseerror(union ccb *ccb, 77 cam_flags camflags, 78 u_int32_t sense_flags, 79 union ccb *save_ccb, 80 int *openings, 81 u_int32_t *relsim_flags, 82 u_int32_t *timeout); 83 static void cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo, 84 u_int8_t ***data_ptrs, int numbufs); 85 86 static int nperiph_drivers; 87 struct periph_driver **periph_drivers; 88 89 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 90 91 static int periph_selto_delay = 1000; 92 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 93 static int periph_noresrc_delay = 500; 94 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 95 static int periph_busy_delay = 500; 96 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 97 98 99 void 100 periphdriver_register(void *data) 101 { 102 struct periph_driver **newdrivers, **old; 103 int ndrivers; 104 105 ndrivers = nperiph_drivers + 2; 106 newdrivers = kmalloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 107 M_WAITOK); 108 if (periph_drivers) 109 bcopy(periph_drivers, newdrivers, 110 sizeof(*newdrivers) * nperiph_drivers); 111 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 112 newdrivers[nperiph_drivers + 1] = NULL; 113 old = periph_drivers; 114 periph_drivers = newdrivers; 115 if (old) 116 kfree(old, M_CAMPERIPH); 117 nperiph_drivers++; 118 } 119 120 cam_status 121 cam_periph_alloc(periph_ctor_t *periph_ctor, 122 periph_oninv_t *periph_oninvalidate, 123 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 124 char *name, cam_periph_type type, struct cam_path *path, 125 ac_callback_t *ac_callback, ac_code code, void *arg) 126 { 127 struct periph_driver **p_drv; 128 struct cam_sim *sim; 129 struct cam_periph *periph; 130 struct cam_periph *cur_periph; 131 path_id_t path_id; 132 target_id_t target_id; 133 lun_id_t lun_id; 134 cam_status status; 135 u_int init_level; 136 137 init_level = 0; 138 /* 139 * Handle Hot-Plug scenarios. If there is already a peripheral 140 * of our type assigned to this path, we are likely waiting for 141 * final close on an old, invalidated, peripheral. If this is 142 * the case, queue up a deferred call to the peripheral's async 143 * handler. If it looks like a mistaken re-allocation, complain. 144 */ 145 if ((periph = cam_periph_find(path, name)) != NULL) { 146 147 if ((periph->flags & CAM_PERIPH_INVALID) != 0 148 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 149 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 150 periph->deferred_callback = ac_callback; 151 periph->deferred_ac = code; 152 return (CAM_REQ_INPROG); 153 } else { 154 kprintf("cam_periph_alloc: attempt to re-allocate " 155 "valid device %s%d rejected\n", 156 periph->periph_name, periph->unit_number); 157 } 158 return (CAM_REQ_INVALID); 159 } 160 161 periph = kmalloc(sizeof(*periph), M_CAMPERIPH, M_INTWAIT | M_ZERO); 162 163 init_level++; 164 165 xpt_lock_buses(); 166 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 167 if (strcmp((*p_drv)->driver_name, name) == 0) 168 break; 169 } 170 xpt_unlock_buses(); 171 172 sim = xpt_path_sim(path); 173 path_id = xpt_path_path_id(path); 174 target_id = xpt_path_target_id(path); 175 lun_id = xpt_path_lun_id(path); 176 cam_init_pinfo(&periph->pinfo); 177 periph->periph_start = periph_start; 178 periph->periph_dtor = periph_dtor; 179 periph->periph_oninval = periph_oninvalidate; 180 periph->type = type; 181 periph->periph_name = name; 182 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 183 periph->immediate_priority = CAM_PRIORITY_NONE; 184 periph->refcount = 0; 185 periph->sim = sim; 186 SLIST_INIT(&periph->ccb_list); 187 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 188 if (status != CAM_REQ_CMP) 189 goto failure; 190 191 periph->path = path; 192 init_level++; 193 194 status = xpt_add_periph(periph); 195 196 if (status != CAM_REQ_CMP) 197 goto failure; 198 199 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 200 while (cur_periph != NULL 201 && cur_periph->unit_number < periph->unit_number) 202 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 203 204 if (cur_periph != NULL) 205 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 206 else { 207 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 208 (*p_drv)->generation++; 209 } 210 211 init_level++; 212 213 status = periph_ctor(periph, arg); 214 215 if (status == CAM_REQ_CMP) 216 init_level++; 217 218 failure: 219 switch (init_level) { 220 case 4: 221 /* Initialized successfully */ 222 break; 223 case 3: 224 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 225 xpt_remove_periph(periph); 226 /* FALLTHROUGH */ 227 case 2: 228 xpt_free_path(periph->path); 229 /* FALLTHROUGH */ 230 case 1: 231 kfree(periph, M_CAMPERIPH); 232 /* FALLTHROUGH */ 233 case 0: 234 /* No cleanup to perform. */ 235 break; 236 default: 237 panic("cam_periph_alloc: Unknown init level"); 238 } 239 return(status); 240 } 241 242 /* 243 * Find a peripheral structure with the specified path, target, lun, 244 * and (optionally) type. If the name is NULL, this function will return 245 * the first peripheral driver that matches the specified path. 246 */ 247 struct cam_periph * 248 cam_periph_find(struct cam_path *path, char *name) 249 { 250 struct periph_driver **p_drv; 251 struct cam_periph *periph; 252 253 xpt_lock_buses(); 254 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 255 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 256 continue; 257 258 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 259 if (xpt_path_comp(periph->path, path) == 0) { 260 xpt_unlock_buses(); 261 return(periph); 262 } 263 } 264 if (name != NULL) { 265 xpt_unlock_buses(); 266 return(NULL); 267 } 268 } 269 xpt_unlock_buses(); 270 return(NULL); 271 } 272 273 cam_status 274 cam_periph_acquire(struct cam_periph *periph) 275 { 276 if (periph == NULL) 277 return(CAM_REQ_CMP_ERR); 278 279 xpt_lock_buses(); 280 periph->refcount++; 281 xpt_unlock_buses(); 282 283 return(CAM_REQ_CMP); 284 } 285 286 void 287 cam_periph_release(struct cam_periph *periph) 288 { 289 290 if (periph == NULL) 291 return; 292 293 xpt_lock_buses(); 294 if ((--periph->refcount == 0) 295 && (periph->flags & CAM_PERIPH_INVALID)) { 296 camperiphfree(periph); 297 } 298 xpt_unlock_buses(); 299 300 } 301 302 int 303 cam_periph_hold(struct cam_periph *periph, int flags) 304 { 305 int error; 306 307 sim_lock_assert_owned(periph->sim->lock); 308 309 /* 310 * Increment the reference count on the peripheral 311 * while we wait for our lock attempt to succeed 312 * to ensure the peripheral doesn't disappear out 313 * from user us while we sleep. 314 */ 315 316 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 317 return (ENXIO); 318 319 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 320 periph->flags |= CAM_PERIPH_LOCK_WANTED; 321 if ((error = sim_lock_sleep(periph, flags, "caplck", 0, 322 periph->sim->lock)) != 0) { 323 cam_periph_release(periph); 324 return (error); 325 } 326 } 327 328 periph->flags |= CAM_PERIPH_LOCKED; 329 return (0); 330 } 331 332 void 333 cam_periph_unhold(struct cam_periph *periph, int unlock) 334 { 335 struct cam_sim *sim; 336 337 sim_lock_assert_owned(periph->sim->lock); 338 periph->flags &= ~CAM_PERIPH_LOCKED; 339 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 340 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 341 wakeup(periph); 342 } 343 if (unlock) { 344 sim = periph->sim; 345 cam_periph_release(periph); 346 /* periph may be garbage now */ 347 CAM_SIM_UNLOCK(sim); 348 } else { 349 cam_periph_release(periph); 350 } 351 } 352 353 /* 354 * Look for the next unit number that is not currently in use for this 355 * peripheral type starting at "newunit". Also exclude unit numbers that 356 * are reserved by for future "hardwiring" unless we already know that this 357 * is a potential wired device. Only assume that the device is "wired" the 358 * first time through the loop since after that we'll be looking at unit 359 * numbers that did not match a wiring entry. 360 */ 361 static u_int 362 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 363 path_id_t pathid, target_id_t target, lun_id_t lun) 364 { 365 struct cam_periph *periph; 366 char *periph_name, *strval; 367 int i, val, dunit; 368 const char *dname; 369 370 periph_name = p_drv->driver_name; 371 for (;;newunit++) { 372 373 for (periph = TAILQ_FIRST(&p_drv->units); 374 periph != NULL && periph->unit_number != newunit; 375 periph = TAILQ_NEXT(periph, unit_links)) 376 ; 377 378 if (periph != NULL && periph->unit_number == newunit) { 379 if (wired != 0) { 380 xpt_print(periph->path, "Duplicate Wired " 381 "Device entry!\n"); 382 xpt_print(periph->path, "Second device (%s " 383 "device at scbus%d target %d lun %d) will " 384 "not be wired\n", periph_name, pathid, 385 target, lun); 386 wired = 0; 387 } 388 continue; 389 } 390 if (wired) 391 break; 392 393 /* 394 * Don't match entries like "da 4" as a wired down 395 * device, but do match entries like "da 4 target 5" 396 * or even "da 4 scbus 1". 397 */ 398 i = -1; 399 while ((i = resource_locate(i, periph_name)) != -1) { 400 dname = resource_query_name(i); 401 dunit = resource_query_unit(i); 402 /* if no "target" and no specific scbus, skip */ 403 if (resource_int_value(dname, dunit, "target", &val) && 404 (resource_string_value(dname, dunit, "at",&strval)|| 405 strcmp(strval, "scbus") == 0)) 406 continue; 407 if (newunit == dunit) 408 break; 409 } 410 if (i == -1) 411 break; 412 } 413 return (newunit); 414 } 415 416 static u_int 417 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 418 target_id_t target, lun_id_t lun) 419 { 420 u_int unit; 421 int hit, i, val, dunit; 422 const char *dname; 423 char pathbuf[32], *strval, *periph_name; 424 425 unit = 0; 426 427 periph_name = p_drv->driver_name; 428 ksnprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 429 i = -1; 430 for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) { 431 dname = resource_query_name(i); 432 dunit = resource_query_unit(i); 433 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 434 if (strcmp(strval, pathbuf) != 0) 435 continue; 436 hit++; 437 } 438 if (resource_int_value(dname, dunit, "target", &val) == 0) { 439 if (val != target) 440 continue; 441 hit++; 442 } 443 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 444 if (val != lun) 445 continue; 446 hit++; 447 } 448 if (hit != 0) { 449 unit = dunit; 450 break; 451 } 452 } 453 454 /* 455 * Either start from 0 looking for the next unit or from 456 * the unit number given in the resource config. This way, 457 * if we have wildcard matches, we don't return the same 458 * unit number twice. 459 */ 460 unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid, 461 target, lun); 462 463 return (unit); 464 } 465 466 void 467 cam_periph_invalidate(struct cam_periph *periph) 468 { 469 /* 470 * We only call this routine the first time a peripheral is 471 * invalidated. 472 */ 473 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 474 && (periph->periph_oninval != NULL)) 475 periph->periph_oninval(periph); 476 477 periph->flags |= CAM_PERIPH_INVALID; 478 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 479 480 xpt_lock_buses(); 481 if (periph->refcount == 0) 482 camperiphfree(periph); 483 else if (periph->refcount < 0) 484 kprintf("cam_invalidate_periph: refcount < 0!!\n"); 485 xpt_unlock_buses(); 486 } 487 488 static void 489 camperiphfree(struct cam_periph *periph) 490 { 491 struct periph_driver **p_drv; 492 493 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 494 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 495 break; 496 } 497 498 if (*p_drv == NULL) { 499 kprintf("camperiphfree: attempt to free non-existent periph\n"); 500 return; 501 } 502 503 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 504 (*p_drv)->generation++; 505 xpt_unlock_buses(); 506 507 if (periph->periph_dtor != NULL) 508 periph->periph_dtor(periph); 509 xpt_remove_periph(periph); 510 511 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 512 union ccb ccb; 513 void *arg; 514 515 switch (periph->deferred_ac) { 516 case AC_FOUND_DEVICE: 517 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 518 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 519 xpt_action(&ccb); 520 arg = &ccb; 521 break; 522 case AC_PATH_REGISTERED: 523 ccb.ccb_h.func_code = XPT_PATH_INQ; 524 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 525 xpt_action(&ccb); 526 arg = &ccb; 527 break; 528 default: 529 arg = NULL; 530 break; 531 } 532 periph->deferred_callback(NULL, periph->deferred_ac, 533 periph->path, arg); 534 } 535 xpt_free_path(periph->path); 536 kfree(periph, M_CAMPERIPH); 537 xpt_lock_buses(); 538 } 539 540 /* 541 * Map user virtual pointers into kernel virtual address space, so we can 542 * access the memory. This won't work on physical pointers, for now it's 543 * up to the caller to check for that. (XXX KDM -- should we do that here 544 * instead?) This also only works for up to MAXPHYS memory. Since we use 545 * buffers to map stuff in and out, we're limited to the buffer size. 546 */ 547 int 548 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 549 { 550 buf_cmd_t cmd[CAM_PERIPH_MAXMAPS]; 551 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 552 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 553 int numbufs; 554 int error; 555 int i; 556 struct buf *bp; 557 558 switch(ccb->ccb_h.func_code) { 559 case XPT_DEV_MATCH: 560 if (ccb->cdm.match_buf_len == 0) { 561 kprintf("cam_periph_mapmem: invalid match buffer " 562 "length 0\n"); 563 return(EINVAL); 564 } 565 if (ccb->cdm.pattern_buf_len > 0) { 566 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 567 lengths[0] = ccb->cdm.pattern_buf_len; 568 mapinfo->dirs[0] = CAM_DIR_OUT; 569 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 570 lengths[1] = ccb->cdm.match_buf_len; 571 mapinfo->dirs[1] = CAM_DIR_IN; 572 numbufs = 2; 573 } else { 574 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 575 lengths[0] = ccb->cdm.match_buf_len; 576 mapinfo->dirs[0] = CAM_DIR_IN; 577 numbufs = 1; 578 } 579 break; 580 case XPT_SCSI_IO: 581 case XPT_CONT_TARGET_IO: 582 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 583 return(0); 584 585 data_ptrs[0] = &ccb->csio.data_ptr; 586 lengths[0] = ccb->csio.dxfer_len; 587 mapinfo->dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 588 numbufs = 1; 589 break; 590 default: 591 return(EINVAL); 592 break; /* NOTREACHED */ 593 } 594 595 /* 596 * Check the transfer length and permissions first, so we don't 597 * have to unmap any previously mapped buffers. 598 */ 599 for (i = 0; i < numbufs; i++) { 600 /* 601 * Its kinda bogus, we need a R+W command. For now the 602 * buffer needs some sort of command. Use BUF_CMD_WRITE 603 * to indicate a write and BUF_CMD_READ to indicate R+W. 604 */ 605 cmd[i] = BUF_CMD_WRITE; 606 607 /* 608 * The userland data pointer passed in may not be page 609 * aligned. vmapbuf() truncates the address to a page 610 * boundary, so if the address isn't page aligned, we'll 611 * need enough space for the given transfer length, plus 612 * whatever extra space is necessary to make it to the page 613 * boundary. 614 */ 615 if ((lengths[i] + 616 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 617 kprintf("cam_periph_mapmem: attempt to map %lu bytes, " 618 "which is greater than DFLTPHYS(%d)\n", 619 (long)(lengths[i] + 620 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 621 DFLTPHYS); 622 return(E2BIG); 623 } 624 625 if (mapinfo->dirs[i] & CAM_DIR_OUT) { 626 if (!useracc(*data_ptrs[i], lengths[i], 627 VM_PROT_READ)) { 628 kprintf("cam_periph_mapmem: error, " 629 "address %p, length %lu isn't " 630 "user accessible for READ\n", 631 (void *)*data_ptrs[i], 632 (u_long)lengths[i]); 633 return(EACCES); 634 } 635 } 636 637 if (mapinfo->dirs[i] & CAM_DIR_IN) { 638 cmd[i] = BUF_CMD_READ; 639 if (!useracc(*data_ptrs[i], lengths[i], 640 VM_PROT_WRITE)) { 641 kprintf("cam_periph_mapmem: error, " 642 "address %p, length %lu isn't " 643 "user accessible for WRITE\n", 644 (void *)*data_ptrs[i], 645 (u_long)lengths[i]); 646 647 return(EACCES); 648 } 649 } 650 651 } 652 653 for (i = 0; i < numbufs; i++) { 654 /* 655 * Get the buffer. 656 */ 657 bp = getpbuf(NULL); 658 659 /* save the original user pointer */ 660 mapinfo->saved_ptrs[i] = *data_ptrs[i]; 661 662 /* set the flags */ 663 bp->b_cmd = cmd[i]; 664 665 /* 666 * Require 16-byte alignment and bounce if we don't get it. 667 * (NATA does not realign buffers for DMA). 668 */ 669 if ((intptr_t)*data_ptrs[i] & 15) 670 mapinfo->bounce[i] = 1; 671 else 672 mapinfo->bounce[i] = 0; 673 674 /* 675 * Map the user buffer into kernel memory. If the user 676 * buffer is not aligned we have to allocate a bounce buffer 677 * and copy. 678 */ 679 if (mapinfo->bounce[i]) { 680 bp->b_data = bp->b_kvabase; 681 bp->b_bcount = lengths[i]; 682 vm_hold_load_pages(bp, (vm_offset_t)bp->b_data, 683 (vm_offset_t)bp->b_data + bp->b_bcount); 684 if (mapinfo->dirs[i] & CAM_DIR_OUT) { 685 error = copyin(*data_ptrs[i], bp->b_data, bp->b_bcount); 686 if (error) { 687 vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bp->b_bcount); 688 } 689 } else { 690 error = 0; 691 } 692 } else if (vmapbuf(bp, *data_ptrs[i], lengths[i]) < 0) { 693 kprintf("cam_periph_mapmem: error, " 694 "address %p, length %lu isn't " 695 "user accessible any more\n", 696 (void *)*data_ptrs[i], 697 (u_long)lengths[i]); 698 error = EACCES; 699 } else { 700 error = 0; 701 } 702 if (error) { 703 relpbuf(bp, NULL); 704 cam_periph_unmapbufs(mapinfo, data_ptrs, i); 705 mapinfo->num_bufs_used -= i; 706 return(error); 707 } 708 709 /* set our pointer to the new mapped area */ 710 *data_ptrs[i] = bp->b_data; 711 712 mapinfo->bp[i] = bp; 713 mapinfo->num_bufs_used++; 714 } 715 716 return(0); 717 } 718 719 /* 720 * Unmap memory segments mapped into kernel virtual address space by 721 * cam_periph_mapmem(). 722 */ 723 void 724 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 725 { 726 int numbufs; 727 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 728 729 if (mapinfo->num_bufs_used <= 0) { 730 /* allow ourselves to be swapped once again */ 731 return; 732 } 733 734 switch (ccb->ccb_h.func_code) { 735 case XPT_DEV_MATCH: 736 numbufs = min(mapinfo->num_bufs_used, 2); 737 738 if (numbufs == 1) { 739 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 740 } else { 741 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 742 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 743 } 744 break; 745 case XPT_SCSI_IO: 746 case XPT_CONT_TARGET_IO: 747 data_ptrs[0] = &ccb->csio.data_ptr; 748 numbufs = min(mapinfo->num_bufs_used, 1); 749 break; 750 default: 751 /* allow ourselves to be swapped once again */ 752 return; 753 break; /* NOTREACHED */ 754 } 755 cam_periph_unmapbufs(mapinfo, data_ptrs, numbufs); 756 } 757 758 static void 759 cam_periph_unmapbufs(struct cam_periph_map_info *mapinfo, 760 u_int8_t ***data_ptrs, int numbufs) 761 { 762 struct buf *bp; 763 int i; 764 765 for (i = 0; i < numbufs; i++) { 766 bp = mapinfo->bp[i]; 767 768 /* Set the user's pointer back to the original value */ 769 *data_ptrs[i] = mapinfo->saved_ptrs[i]; 770 771 /* unmap the buffer */ 772 if (mapinfo->bounce[i]) { 773 if (mapinfo->dirs[i] & CAM_DIR_IN) { 774 /* XXX return error */ 775 copyout(bp->b_data, *data_ptrs[i], 776 bp->b_bcount); 777 } 778 vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, 779 (vm_offset_t)bp->b_data + bp->b_bcount); 780 } else { 781 vunmapbuf(bp); 782 } 783 relpbuf(bp, NULL); 784 mapinfo->bp[i] = NULL; 785 } 786 } 787 788 union ccb * 789 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 790 { 791 struct ccb_hdr *ccb_h; 792 793 sim_lock_assert_owned(periph->sim->lock); 794 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 795 796 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 797 if (periph->immediate_priority > priority) 798 periph->immediate_priority = priority; 799 xpt_schedule(periph, priority); 800 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 801 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 802 break; 803 sim_lock_sleep(&periph->ccb_list, 0, "cgticb", 0, 804 periph->sim->lock); 805 } 806 807 ccb_h = SLIST_FIRST(&periph->ccb_list); 808 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 809 return ((union ccb *)ccb_h); 810 } 811 812 void 813 cam_periph_ccbwait(union ccb *ccb) 814 { 815 struct cam_sim *sim; 816 817 sim = xpt_path_sim(ccb->ccb_h.path); 818 while ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 819 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) { 820 sim_lock_sleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0, sim->lock); 821 } 822 } 823 824 int 825 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 826 int (*error_routine)(union ccb *ccb, 827 cam_flags camflags, 828 u_int32_t sense_flags)) 829 { 830 union ccb *ccb; 831 int error; 832 int found; 833 834 error = found = 0; 835 836 switch(cmd){ 837 case CAMGETPASSTHRU: 838 ccb = cam_periph_getccb(periph, /* priority */ 1); 839 xpt_setup_ccb(&ccb->ccb_h, 840 ccb->ccb_h.path, 841 /*priority*/1); 842 ccb->ccb_h.func_code = XPT_GDEVLIST; 843 844 /* 845 * Basically, the point of this is that we go through 846 * getting the list of devices, until we find a passthrough 847 * device. In the current version of the CAM code, the 848 * only way to determine what type of device we're dealing 849 * with is by its name. 850 */ 851 while (found == 0) { 852 ccb->cgdl.index = 0; 853 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 854 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 855 856 /* we want the next device in the list */ 857 xpt_action(ccb); 858 if (strncmp(ccb->cgdl.periph_name, 859 "pass", 4) == 0){ 860 found = 1; 861 break; 862 } 863 } 864 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 865 (found == 0)) { 866 ccb->cgdl.periph_name[0] = '\0'; 867 ccb->cgdl.unit_number = 0; 868 break; 869 } 870 } 871 872 /* copy the result back out */ 873 bcopy(ccb, addr, sizeof(union ccb)); 874 875 /* and release the ccb */ 876 xpt_release_ccb(ccb); 877 878 break; 879 default: 880 error = ENOTTY; 881 break; 882 } 883 return(error); 884 } 885 886 int 887 cam_periph_runccb(union ccb *ccb, 888 int (*error_routine)(union ccb *ccb, 889 cam_flags camflags, 890 u_int32_t sense_flags), 891 cam_flags camflags, u_int32_t sense_flags, 892 struct devstat *ds) 893 { 894 struct cam_sim *sim; 895 int error; 896 897 error = 0; 898 sim = xpt_path_sim(ccb->ccb_h.path); 899 sim_lock_assert_owned(sim->lock); 900 901 /* 902 * If the user has supplied a stats structure, and if we understand 903 * this particular type of ccb, record the transaction start. 904 */ 905 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 906 devstat_start_transaction(ds); 907 908 xpt_action(ccb); 909 910 do { 911 cam_periph_ccbwait(ccb); 912 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 913 error = 0; 914 else if (error_routine != NULL) 915 error = (*error_routine)(ccb, camflags, sense_flags); 916 else 917 error = 0; 918 919 } while (error == ERESTART); 920 921 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 922 cam_release_devq(ccb->ccb_h.path, 923 /* relsim_flags */0, 924 /* openings */0, 925 /* timeout */0, 926 /* getcount_only */ FALSE); 927 928 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 929 devstat_end_transaction(ds, 930 ccb->csio.dxfer_len, 931 ccb->csio.tag_action & 0xf, 932 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 933 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 934 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 935 DEVSTAT_WRITE : 936 DEVSTAT_READ); 937 938 return(error); 939 } 940 941 void 942 cam_freeze_devq(struct cam_path *path) 943 { 944 struct ccb_hdr ccb_h; 945 946 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 947 ccb_h.func_code = XPT_NOOP; 948 ccb_h.flags = CAM_DEV_QFREEZE; 949 xpt_action((union ccb *)&ccb_h); 950 } 951 952 u_int32_t 953 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 954 u_int32_t openings, u_int32_t timeout, 955 int getcount_only) 956 { 957 struct ccb_relsim crs; 958 959 xpt_setup_ccb(&crs.ccb_h, path, 960 /*priority*/1); 961 crs.ccb_h.func_code = XPT_REL_SIMQ; 962 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 963 crs.release_flags = relsim_flags; 964 crs.openings = openings; 965 crs.release_timeout = timeout; 966 xpt_action((union ccb *)&crs); 967 return (crs.qfrozen_cnt); 968 } 969 970 #define saved_ccb_ptr ppriv_ptr0 971 static void 972 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 973 { 974 union ccb *saved_ccb; 975 cam_status status; 976 int frozen; 977 int sense; 978 struct scsi_start_stop_unit *scsi_cmd; 979 u_int32_t relsim_flags, timeout; 980 u_int32_t qfrozen_cnt; 981 int xpt_done_ccb; 982 983 xpt_done_ccb = FALSE; 984 status = done_ccb->ccb_h.status; 985 frozen = (status & CAM_DEV_QFRZN) != 0; 986 sense = (status & CAM_AUTOSNS_VALID) != 0; 987 status &= CAM_STATUS_MASK; 988 989 timeout = 0; 990 relsim_flags = 0; 991 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 992 993 /* 994 * Unfreeze the queue once if it is already frozen.. 995 */ 996 if (frozen != 0) { 997 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 998 /*relsim_flags*/0, 999 /*openings*/0, 1000 /*timeout*/0, 1001 /*getcount_only*/0); 1002 } 1003 1004 switch (status) { 1005 case CAM_REQ_CMP: 1006 { 1007 /* 1008 * If we have successfully taken a device from the not 1009 * ready to ready state, re-scan the device and re-get 1010 * the inquiry information. Many devices (mostly disks) 1011 * don't properly report their inquiry information unless 1012 * they are spun up. 1013 * 1014 * If we manually retrieved sense into a CCB and got 1015 * something other than "NO SENSE" send the updated CCB 1016 * back to the client via xpt_done() to be processed via 1017 * the error recovery code again. 1018 */ 1019 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 1020 scsi_cmd = (struct scsi_start_stop_unit *) 1021 &done_ccb->csio.cdb_io.cdb_bytes; 1022 1023 if (scsi_cmd->opcode == START_STOP_UNIT) 1024 xpt_async(AC_INQ_CHANGED, 1025 done_ccb->ccb_h.path, NULL); 1026 if (scsi_cmd->opcode == REQUEST_SENSE) { 1027 u_int sense_key; 1028 1029 sense_key = saved_ccb->csio.sense_data.flags; 1030 sense_key &= SSD_KEY; 1031 if (sense_key != SSD_KEY_NO_SENSE) { 1032 saved_ccb->ccb_h.status |= 1033 CAM_AUTOSNS_VALID; 1034 #if 0 1035 xpt_print(saved_ccb->ccb_h.path, 1036 "Recovered Sense\n"); 1037 scsi_sense_print(&saved_ccb->csio); 1038 cam_error_print(saved_ccb, CAM_ESF_ALL, 1039 CAM_EPF_ALL); 1040 #endif 1041 xpt_done_ccb = TRUE; 1042 } 1043 } 1044 } 1045 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1046 sizeof(union ccb)); 1047 1048 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1049 1050 if (xpt_done_ccb == FALSE) 1051 xpt_action(done_ccb); 1052 1053 break; 1054 } 1055 case CAM_SCSI_STATUS_ERROR: 1056 scsi_cmd = (struct scsi_start_stop_unit *) 1057 &done_ccb->csio.cdb_io.cdb_bytes; 1058 if (sense != 0) { 1059 struct ccb_getdev cgd; 1060 struct scsi_sense_data *sense; 1061 int error_code, sense_key, asc, ascq; 1062 scsi_sense_action err_action; 1063 1064 sense = &done_ccb->csio.sense_data; 1065 scsi_extract_sense(sense, &error_code, 1066 &sense_key, &asc, &ascq); 1067 1068 /* 1069 * Grab the inquiry data for this device. 1070 */ 1071 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1072 /*priority*/ 1); 1073 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1074 xpt_action((union ccb *)&cgd); 1075 err_action = scsi_error_action(&done_ccb->csio, 1076 &cgd.inq_data, 0); 1077 1078 /* 1079 * If the error is "invalid field in CDB", 1080 * and the load/eject flag is set, turn the 1081 * flag off and try again. This is just in 1082 * case the drive in question barfs on the 1083 * load eject flag. The CAM code should set 1084 * the load/eject flag by default for 1085 * removable media. 1086 */ 1087 1088 /* XXX KDM 1089 * Should we check to see what the specific 1090 * scsi status is?? Or does it not matter 1091 * since we already know that there was an 1092 * error, and we know what the specific 1093 * error code was, and we know what the 1094 * opcode is.. 1095 */ 1096 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1097 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1098 (asc == 0x24) && (ascq == 0x00) && 1099 (done_ccb->ccb_h.retry_count > 0)) { 1100 1101 scsi_cmd->how &= ~SSS_LOEJ; 1102 1103 xpt_action(done_ccb); 1104 1105 } else if ((done_ccb->ccb_h.retry_count > 1) 1106 && ((err_action & SS_MASK) != SS_FAIL)) { 1107 1108 /* 1109 * In this case, the error recovery 1110 * command failed, but we've got 1111 * some retries left on it. Give 1112 * it another try unless this is an 1113 * unretryable error. 1114 */ 1115 1116 /* set the timeout to .5 sec */ 1117 relsim_flags = 1118 RELSIM_RELEASE_AFTER_TIMEOUT; 1119 timeout = 500; 1120 1121 xpt_action(done_ccb); 1122 1123 break; 1124 1125 } else { 1126 /* 1127 * Perform the final retry with the original 1128 * CCB so that final error processing is 1129 * performed by the owner of the CCB. 1130 */ 1131 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1132 done_ccb, sizeof(union ccb)); 1133 1134 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1135 1136 xpt_action(done_ccb); 1137 } 1138 } else { 1139 /* 1140 * Eh?? The command failed, but we don't 1141 * have any sense. What's up with that? 1142 * Fire the CCB again to return it to the 1143 * caller. 1144 */ 1145 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1146 done_ccb, sizeof(union ccb)); 1147 1148 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1149 1150 xpt_action(done_ccb); 1151 1152 } 1153 break; 1154 default: 1155 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1156 sizeof(union ccb)); 1157 1158 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1159 1160 xpt_action(done_ccb); 1161 1162 break; 1163 } 1164 1165 /* decrement the retry count */ 1166 /* 1167 * XXX This isn't appropriate in all cases. Restructure, 1168 * so that the retry count is only decremented on an 1169 * actual retry. Remeber that the orignal ccb had its 1170 * retry count dropped before entering recovery, so 1171 * doing it again is a bug. 1172 */ 1173 if (done_ccb->ccb_h.retry_count > 0) 1174 done_ccb->ccb_h.retry_count--; 1175 1176 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1177 /*relsim_flags*/relsim_flags, 1178 /*openings*/0, 1179 /*timeout*/timeout, 1180 /*getcount_only*/0); 1181 if (xpt_done_ccb == TRUE) 1182 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1183 } 1184 1185 /* 1186 * Generic Async Event handler. Peripheral drivers usually 1187 * filter out the events that require personal attention, 1188 * and leave the rest to this function. 1189 */ 1190 void 1191 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1192 struct cam_path *path, void *arg) 1193 { 1194 switch (code) { 1195 case AC_LOST_DEVICE: 1196 cam_periph_invalidate(periph); 1197 break; 1198 case AC_SENT_BDR: 1199 case AC_BUS_RESET: 1200 { 1201 cam_periph_bus_settle(periph, scsi_delay); 1202 break; 1203 } 1204 default: 1205 break; 1206 } 1207 } 1208 1209 void 1210 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1211 { 1212 struct ccb_getdevstats cgds; 1213 1214 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1215 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1216 xpt_action((union ccb *)&cgds); 1217 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1218 } 1219 1220 void 1221 cam_periph_freeze_after_event(struct cam_periph *periph, 1222 struct timeval* event_time, u_int duration_ms) 1223 { 1224 struct timeval delta; 1225 struct timeval duration_tv; 1226 1227 microuptime(&delta); 1228 timevalsub(&delta, event_time); 1229 duration_tv.tv_sec = duration_ms / 1000; 1230 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1231 if (timevalcmp(&delta, &duration_tv, <)) { 1232 timevalsub(&duration_tv, &delta); 1233 1234 duration_ms = duration_tv.tv_sec * 1000; 1235 duration_ms += duration_tv.tv_usec / 1000; 1236 cam_freeze_devq(periph->path); 1237 cam_release_devq(periph->path, 1238 RELSIM_RELEASE_AFTER_TIMEOUT, 1239 /*reduction*/0, 1240 /*timeout*/duration_ms, 1241 /*getcount_only*/0); 1242 } 1243 1244 } 1245 1246 static int 1247 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1248 u_int32_t sense_flags, union ccb *save_ccb, 1249 int *openings, u_int32_t *relsim_flags, 1250 u_int32_t *timeout) 1251 { 1252 int error; 1253 1254 switch (ccb->csio.scsi_status) { 1255 case SCSI_STATUS_OK: 1256 case SCSI_STATUS_COND_MET: 1257 case SCSI_STATUS_INTERMED: 1258 case SCSI_STATUS_INTERMED_COND_MET: 1259 error = 0; 1260 break; 1261 case SCSI_STATUS_CMD_TERMINATED: 1262 case SCSI_STATUS_CHECK_COND: 1263 error = camperiphscsisenseerror(ccb, 1264 camflags, 1265 sense_flags, 1266 save_ccb, 1267 openings, 1268 relsim_flags, 1269 timeout); 1270 break; 1271 case SCSI_STATUS_QUEUE_FULL: 1272 { 1273 /* no decrement */ 1274 struct ccb_getdevstats cgds; 1275 1276 /* 1277 * First off, find out what the current 1278 * transaction counts are. 1279 */ 1280 xpt_setup_ccb(&cgds.ccb_h, 1281 ccb->ccb_h.path, 1282 /*priority*/1); 1283 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1284 xpt_action((union ccb *)&cgds); 1285 1286 /* 1287 * If we were the only transaction active, treat 1288 * the QUEUE FULL as if it were a BUSY condition. 1289 */ 1290 if (cgds.dev_active != 0) { 1291 int total_openings; 1292 1293 /* 1294 * Reduce the number of openings to 1295 * be 1 less than the amount it took 1296 * to get a queue full bounded by the 1297 * minimum allowed tag count for this 1298 * device. 1299 */ 1300 total_openings = cgds.dev_active + cgds.dev_openings; 1301 *openings = cgds.dev_active; 1302 if (*openings < cgds.mintags) 1303 *openings = cgds.mintags; 1304 if (*openings < total_openings) 1305 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1306 else { 1307 /* 1308 * Some devices report queue full for 1309 * temporary resource shortages. For 1310 * this reason, we allow a minimum 1311 * tag count to be entered via a 1312 * quirk entry to prevent the queue 1313 * count on these devices from falling 1314 * to a pessimisticly low value. We 1315 * still wait for the next successful 1316 * completion, however, before queueing 1317 * more transactions to the device. 1318 */ 1319 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1320 } 1321 *timeout = 0; 1322 error = ERESTART; 1323 if (bootverbose) { 1324 xpt_print(ccb->ccb_h.path, "Queue Full\n"); 1325 } 1326 break; 1327 } 1328 /* FALLTHROUGH */ 1329 } 1330 case SCSI_STATUS_BUSY: 1331 /* 1332 * Restart the queue after either another 1333 * command completes or a 1 second timeout. 1334 */ 1335 if (bootverbose) { 1336 xpt_print(ccb->ccb_h.path, "Device Busy\n"); 1337 } 1338 if (ccb->ccb_h.retry_count > 0) { 1339 ccb->ccb_h.retry_count--; 1340 error = ERESTART; 1341 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1342 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1343 *timeout = 1000; 1344 } else { 1345 error = EIO; 1346 } 1347 break; 1348 case SCSI_STATUS_RESERV_CONFLICT: 1349 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n"); 1350 error = EIO; 1351 break; 1352 default: 1353 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n", 1354 ccb->csio.scsi_status); 1355 error = EIO; 1356 break; 1357 } 1358 return (error); 1359 } 1360 1361 static int 1362 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1363 u_int32_t sense_flags, union ccb *save_ccb, 1364 int *openings, u_int32_t *relsim_flags, 1365 u_int32_t *timeout) 1366 { 1367 struct cam_periph *periph; 1368 int error; 1369 1370 periph = xpt_path_periph(ccb->ccb_h.path); 1371 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1372 1373 /* 1374 * If error recovery is already in progress, don't attempt 1375 * to process this error, but requeue it unconditionally 1376 * and attempt to process it once error recovery has 1377 * completed. This failed command is probably related to 1378 * the error that caused the currently active error recovery 1379 * action so our current recovery efforts should also 1380 * address this command. Be aware that the error recovery 1381 * code assumes that only one recovery action is in progress 1382 * on a particular peripheral instance at any given time 1383 * (e.g. only one saved CCB for error recovery) so it is 1384 * imperitive that we don't violate this assumption. 1385 */ 1386 error = ERESTART; 1387 } else { 1388 scsi_sense_action err_action; 1389 struct ccb_getdev cgd; 1390 const char *action_string; 1391 union ccb* print_ccb; 1392 1393 /* A description of the error recovery action performed */ 1394 action_string = NULL; 1395 1396 /* 1397 * The location of the orignal ccb 1398 * for sense printing purposes. 1399 */ 1400 print_ccb = ccb; 1401 1402 /* 1403 * Grab the inquiry data for this device. 1404 */ 1405 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1406 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1407 xpt_action((union ccb *)&cgd); 1408 1409 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1410 err_action = scsi_error_action(&ccb->csio, 1411 &cgd.inq_data, 1412 sense_flags); 1413 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1414 err_action = SS_REQSENSE; 1415 else 1416 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1417 1418 error = err_action & SS_ERRMASK; 1419 1420 /* 1421 * If the recovery action will consume a retry, 1422 * make sure we actually have retries available. 1423 */ 1424 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1425 if (ccb->ccb_h.retry_count > 0) 1426 ccb->ccb_h.retry_count--; 1427 else { 1428 action_string = "Retries Exhausted"; 1429 goto sense_error_done; 1430 } 1431 } 1432 1433 if ((err_action & SS_MASK) >= SS_START) { 1434 /* 1435 * Do common portions of commands that 1436 * use recovery CCBs. 1437 */ 1438 if (save_ccb == NULL) { 1439 action_string = "No recovery CCB supplied"; 1440 goto sense_error_done; 1441 } 1442 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1443 print_ccb = save_ccb; 1444 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1445 } 1446 1447 switch (err_action & SS_MASK) { 1448 case SS_NOP: 1449 action_string = "No Recovery Action Needed"; 1450 error = 0; 1451 break; 1452 case SS_RETRY: 1453 action_string = "Retrying Command (per Sense Data)"; 1454 error = ERESTART; 1455 break; 1456 case SS_FAIL: 1457 action_string = "Unretryable error"; 1458 break; 1459 case SS_START: 1460 { 1461 int le; 1462 1463 /* 1464 * Send a start unit command to the device, and 1465 * then retry the command. 1466 */ 1467 action_string = "Attempting to Start Unit"; 1468 1469 /* 1470 * Check for removable media and set 1471 * load/eject flag appropriately. 1472 */ 1473 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1474 le = TRUE; 1475 else 1476 le = FALSE; 1477 1478 scsi_start_stop(&ccb->csio, 1479 /*retries*/1, 1480 camperiphdone, 1481 MSG_SIMPLE_Q_TAG, 1482 /*start*/TRUE, 1483 /*load/eject*/le, 1484 /*immediate*/FALSE, 1485 SSD_FULL_SIZE, 1486 /*timeout*/50000); 1487 break; 1488 } 1489 case SS_TUR: 1490 { 1491 /* 1492 * Send a Test Unit Ready to the device. 1493 * If the 'many' flag is set, we send 120 1494 * test unit ready commands, one every half 1495 * second. Otherwise, we just send one TUR. 1496 * We only want to do this if the retry 1497 * count has not been exhausted. 1498 */ 1499 int retries; 1500 1501 if ((err_action & SSQ_MANY) != 0) { 1502 action_string = "Polling device for readiness"; 1503 retries = 120; 1504 } else { 1505 action_string = "Testing device for readiness"; 1506 retries = 1; 1507 } 1508 scsi_test_unit_ready(&ccb->csio, 1509 retries, 1510 camperiphdone, 1511 MSG_SIMPLE_Q_TAG, 1512 SSD_FULL_SIZE, 1513 /*timeout*/5000); 1514 1515 /* 1516 * Accomplish our 500ms delay by deferring 1517 * the release of our device queue appropriately. 1518 */ 1519 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1520 *timeout = 500; 1521 break; 1522 } 1523 case SS_REQSENSE: 1524 { 1525 /* 1526 * Send a Request Sense to the device. We 1527 * assume that we are in a contingent allegiance 1528 * condition so we do not tag this request. 1529 */ 1530 scsi_request_sense(&ccb->csio, /*retries*/1, 1531 camperiphdone, 1532 &save_ccb->csio.sense_data, 1533 sizeof(save_ccb->csio.sense_data), 1534 CAM_TAG_ACTION_NONE, 1535 /*sense_len*/SSD_FULL_SIZE, 1536 /*timeout*/5000); 1537 break; 1538 } 1539 default: 1540 panic("Unhandled error action %x", err_action); 1541 } 1542 1543 if ((err_action & SS_MASK) >= SS_START) { 1544 /* 1545 * Drop the priority to 0 so that the recovery 1546 * CCB is the first to execute. Freeze the queue 1547 * after this command is sent so that we can 1548 * restore the old csio and have it queued in 1549 * the proper order before we release normal 1550 * transactions to the device. 1551 */ 1552 ccb->ccb_h.pinfo.priority = 0; 1553 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1554 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1555 error = ERESTART; 1556 } 1557 1558 sense_error_done: 1559 if ((err_action & SSQ_PRINT_SENSE) != 0 1560 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1561 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1562 xpt_print_path(ccb->ccb_h.path); 1563 if (bootverbose) 1564 scsi_sense_print(&print_ccb->csio); 1565 kprintf("%s\n", action_string); 1566 } 1567 } 1568 return (error); 1569 } 1570 1571 /* 1572 * Generic error handler. Peripheral drivers usually filter 1573 * out the errors that they handle in a unique mannor, then 1574 * call this function. 1575 */ 1576 int 1577 cam_periph_error(union ccb *ccb, cam_flags camflags, 1578 u_int32_t sense_flags, union ccb *save_ccb) 1579 { 1580 const char *action_string; 1581 cam_status status; 1582 int frozen; 1583 int error, printed = 0; 1584 int openings; 1585 u_int32_t relsim_flags; 1586 u_int32_t timeout = 0; 1587 1588 action_string = NULL; 1589 status = ccb->ccb_h.status; 1590 frozen = (status & CAM_DEV_QFRZN) != 0; 1591 status &= CAM_STATUS_MASK; 1592 openings = relsim_flags = 0; 1593 1594 switch (status) { 1595 case CAM_REQ_CMP: 1596 error = 0; 1597 break; 1598 case CAM_SCSI_STATUS_ERROR: 1599 error = camperiphscsistatuserror(ccb, 1600 camflags, 1601 sense_flags, 1602 save_ccb, 1603 &openings, 1604 &relsim_flags, 1605 &timeout); 1606 break; 1607 case CAM_AUTOSENSE_FAIL: 1608 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n"); 1609 error = EIO; /* we have to kill the command */ 1610 break; 1611 case CAM_REQ_CMP_ERR: 1612 if (bootverbose && printed == 0) { 1613 xpt_print(ccb->ccb_h.path, 1614 "Request completed with CAM_REQ_CMP_ERR\n"); 1615 printed++; 1616 } 1617 /* FALLTHROUGH */ 1618 case CAM_CMD_TIMEOUT: 1619 if (bootverbose && printed == 0) { 1620 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1621 printed++; 1622 } 1623 /* FALLTHROUGH */ 1624 case CAM_UNEXP_BUSFREE: 1625 if (bootverbose && printed == 0) { 1626 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1627 printed++; 1628 } 1629 /* FALLTHROUGH */ 1630 case CAM_UNCOR_PARITY: 1631 if (bootverbose && printed == 0) { 1632 xpt_print(ccb->ccb_h.path, 1633 "Uncorrected Parity Error\n"); 1634 printed++; 1635 } 1636 /* FALLTHROUGH */ 1637 case CAM_DATA_RUN_ERR: 1638 if (bootverbose && printed == 0) { 1639 xpt_print(ccb->ccb_h.path, "Data Overrun\n"); 1640 printed++; 1641 } 1642 error = EIO; /* we have to kill the command */ 1643 /* decrement the number of retries */ 1644 if (ccb->ccb_h.retry_count > 0) { 1645 ccb->ccb_h.retry_count--; 1646 error = ERESTART; 1647 } else { 1648 action_string = "Retries Exhausted"; 1649 error = EIO; 1650 } 1651 break; 1652 case CAM_UA_ABORT: 1653 case CAM_UA_TERMIO: 1654 case CAM_MSG_REJECT_REC: 1655 /* XXX Don't know that these are correct */ 1656 error = EIO; 1657 break; 1658 case CAM_SEL_TIMEOUT: 1659 { 1660 struct cam_path *newpath; 1661 1662 if ((camflags & CAM_RETRY_SELTO) != 0) { 1663 if (ccb->ccb_h.retry_count > 0) { 1664 1665 ccb->ccb_h.retry_count--; 1666 error = ERESTART; 1667 if (bootverbose && printed == 0) { 1668 xpt_print(ccb->ccb_h.path, 1669 "Selection Timeout\n"); 1670 printed++; 1671 } 1672 1673 /* 1674 * Wait a bit to give the device 1675 * time to recover before we try again. 1676 */ 1677 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1678 timeout = periph_selto_delay; 1679 break; 1680 } 1681 } 1682 error = ENXIO; 1683 /* Should we do more if we can't create the path?? */ 1684 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1685 xpt_path_path_id(ccb->ccb_h.path), 1686 xpt_path_target_id(ccb->ccb_h.path), 1687 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1688 break; 1689 1690 /* 1691 * Let peripheral drivers know that this device has gone 1692 * away. 1693 */ 1694 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1695 xpt_free_path(newpath); 1696 break; 1697 } 1698 case CAM_REQ_INVALID: 1699 case CAM_PATH_INVALID: 1700 case CAM_DEV_NOT_THERE: 1701 case CAM_NO_HBA: 1702 case CAM_PROVIDE_FAIL: 1703 case CAM_REQ_TOO_BIG: 1704 case CAM_LUN_INVALID: 1705 case CAM_TID_INVALID: 1706 error = EINVAL; 1707 break; 1708 case CAM_SCSI_BUS_RESET: 1709 case CAM_BDR_SENT: 1710 /* 1711 * Commands that repeatedly timeout and cause these 1712 * kinds of error recovery actions, should return 1713 * CAM_CMD_TIMEOUT, which allows us to safely assume 1714 * that this command was an innocent bystander to 1715 * these events and should be unconditionally 1716 * retried. 1717 */ 1718 if (bootverbose && printed == 0) { 1719 xpt_print_path(ccb->ccb_h.path); 1720 if (status == CAM_BDR_SENT) 1721 kprintf("Bus Device Reset sent\n"); 1722 else 1723 kprintf("Bus Reset issued\n"); 1724 printed++; 1725 } 1726 /* FALLTHROUGH */ 1727 case CAM_REQUEUE_REQ: 1728 /* Unconditional requeue */ 1729 error = ERESTART; 1730 if (bootverbose && printed == 0) { 1731 xpt_print(ccb->ccb_h.path, "Request Requeued\n"); 1732 printed++; 1733 } 1734 break; 1735 case CAM_RESRC_UNAVAIL: 1736 /* Wait a bit for the resource shortage to abate. */ 1737 timeout = periph_noresrc_delay; 1738 /* FALLTHROUGH */ 1739 case CAM_BUSY: 1740 if (timeout == 0) { 1741 /* Wait a bit for the busy condition to abate. */ 1742 timeout = periph_busy_delay; 1743 } 1744 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1745 /* FALLTHROUGH */ 1746 default: 1747 /* decrement the number of retries */ 1748 if (ccb->ccb_h.retry_count > 0) { 1749 ccb->ccb_h.retry_count--; 1750 error = ERESTART; 1751 if (bootverbose && printed == 0) { 1752 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n", 1753 status); 1754 printed++; 1755 } 1756 } else { 1757 error = EIO; 1758 action_string = "Retries Exhausted"; 1759 } 1760 break; 1761 } 1762 1763 /* Attempt a retry */ 1764 if (error == ERESTART || error == 0) { 1765 if (frozen != 0) 1766 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1767 1768 if (error == ERESTART) { 1769 action_string = "Retrying Command"; 1770 xpt_action(ccb); 1771 } 1772 1773 if (frozen != 0) 1774 cam_release_devq(ccb->ccb_h.path, 1775 relsim_flags, 1776 openings, 1777 timeout, 1778 /*getcount_only*/0); 1779 } 1780 1781 /* 1782 * If we have an error and are booting verbosely, whine 1783 * *unless* this was a non-retryable selection timeout. 1784 */ 1785 if (error != 0 && bootverbose && 1786 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1787 1788 1789 if (action_string == NULL) 1790 action_string = "Unretryable Error"; 1791 if (error != ERESTART) { 1792 xpt_print(ccb->ccb_h.path, "error %d\n", error); 1793 } 1794 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1795 } 1796 1797 return (error); 1798 } 1799