1 /* 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/cam/cam_periph.c,v 1.24.2.3 2003/01/25 19:04:40 dillon Exp $ 30 * $DragonFly: src/sys/bus/cam/cam_periph.c,v 1.8 2004/03/12 03:23:13 dillon Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/linker_set.h> 38 #include <sys/buf.h> 39 #include <sys/proc.h> 40 #include <sys/devicestat.h> 41 #include <sys/bus.h> 42 #include <vm/vm.h> 43 #include <vm/vm_extern.h> 44 45 #include "cam.h" 46 #include "cam_ccb.h" 47 #include "cam_xpt_periph.h" 48 #include "cam_periph.h" 49 #include "cam_debug.h" 50 51 #include <bus/cam/scsi/scsi_all.h> 52 #include <bus/cam/scsi/scsi_message.h> 53 #include <bus/cam/scsi/scsi_da.h> 54 #include <bus/cam/scsi/scsi_pass.h> 55 56 static u_int camperiphnextunit(struct periph_driver *p_drv, 57 u_int newunit, int wired, 58 path_id_t pathid, target_id_t target, 59 lun_id_t lun); 60 static u_int camperiphunit(struct periph_driver *p_drv, 61 path_id_t pathid, target_id_t target, 62 lun_id_t lun); 63 static void camperiphdone(struct cam_periph *periph, 64 union ccb *done_ccb); 65 static void camperiphfree(struct cam_periph *periph); 66 67 cam_status 68 cam_periph_alloc(periph_ctor_t *periph_ctor, 69 periph_oninv_t *periph_oninvalidate, 70 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 71 char *name, cam_periph_type type, struct cam_path *path, 72 ac_callback_t *ac_callback, ac_code code, void *arg) 73 { 74 struct periph_driver **p_drv; 75 struct cam_periph *periph; 76 struct cam_periph *cur_periph; 77 path_id_t path_id; 78 target_id_t target_id; 79 lun_id_t lun_id; 80 cam_status status; 81 u_int init_level; 82 int s; 83 84 init_level = 0; 85 /* 86 * Handle Hot-Plug scenarios. If there is already a peripheral 87 * of our type assigned to this path, we are likely waiting for 88 * final close on an old, invalidated, peripheral. If this is 89 * the case, queue up a deferred call to the peripheral's async 90 * handler. If it looks like a mistaken re-alloation, complain. 91 */ 92 if ((periph = cam_periph_find(path, name)) != NULL) { 93 94 if ((periph->flags & CAM_PERIPH_INVALID) != 0 95 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 96 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 97 periph->deferred_callback = ac_callback; 98 periph->deferred_ac = code; 99 return (CAM_REQ_INPROG); 100 } else { 101 printf("cam_periph_alloc: attempt to re-allocate " 102 "valid device %s%d rejected\n", 103 periph->periph_name, periph->unit_number); 104 } 105 return (CAM_REQ_INVALID); 106 } 107 108 periph = malloc(sizeof(*periph), M_DEVBUF, M_INTWAIT); 109 110 init_level++; 111 112 SET_FOREACH(p_drv, periphdriver_set) { 113 if (strcmp((*p_drv)->driver_name, name) == 0) 114 break; 115 } 116 117 path_id = xpt_path_path_id(path); 118 target_id = xpt_path_target_id(path); 119 lun_id = xpt_path_lun_id(path); 120 bzero(periph, sizeof(*periph)); 121 cam_init_pinfo(&periph->pinfo); 122 periph->periph_start = periph_start; 123 periph->periph_dtor = periph_dtor; 124 periph->periph_oninval = periph_oninvalidate; 125 periph->type = type; 126 periph->periph_name = name; 127 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 128 periph->immediate_priority = CAM_PRIORITY_NONE; 129 periph->refcount = 0; 130 SLIST_INIT(&periph->ccb_list); 131 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 132 if (status != CAM_REQ_CMP) 133 goto failure; 134 135 periph->path = path; 136 init_level++; 137 138 status = xpt_add_periph(periph); 139 140 if (status != CAM_REQ_CMP) 141 goto failure; 142 143 s = splsoftcam(); 144 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 145 while (cur_periph != NULL 146 && cur_periph->unit_number < periph->unit_number) 147 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 148 149 if (cur_periph != NULL) 150 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 151 else { 152 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 153 (*p_drv)->generation++; 154 } 155 156 splx(s); 157 158 init_level++; 159 160 status = periph_ctor(periph, arg); 161 162 if (status == CAM_REQ_CMP) 163 init_level++; 164 165 failure: 166 switch (init_level) { 167 case 4: 168 /* Initialized successfully */ 169 break; 170 case 3: 171 s = splsoftcam(); 172 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 173 splx(s); 174 xpt_remove_periph(periph); 175 case 2: 176 xpt_free_path(periph->path); 177 case 1: 178 free(periph, M_DEVBUF); 179 case 0: 180 /* No cleanup to perform. */ 181 break; 182 default: 183 panic("cam_periph_alloc: Unkown init level"); 184 } 185 return(status); 186 } 187 188 /* 189 * Find a peripheral structure with the specified path, target, lun, 190 * and (optionally) type. If the name is NULL, this function will return 191 * the first peripheral driver that matches the specified path. 192 */ 193 struct cam_periph * 194 cam_periph_find(struct cam_path *path, char *name) 195 { 196 struct periph_driver **p_drv; 197 struct cam_periph *periph; 198 int s; 199 200 SET_FOREACH(p_drv, periphdriver_set) { 201 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 202 continue; 203 204 s = splsoftcam(); 205 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 206 periph = TAILQ_NEXT(periph, unit_links)) { 207 if (xpt_path_comp(periph->path, path) == 0) { 208 splx(s); 209 return(periph); 210 } 211 } 212 splx(s); 213 if (name != NULL) 214 return(NULL); 215 } 216 return(NULL); 217 } 218 219 cam_status 220 cam_periph_acquire(struct cam_periph *periph) 221 { 222 int s; 223 224 if (periph == NULL) 225 return(CAM_REQ_CMP_ERR); 226 227 s = splsoftcam(); 228 periph->refcount++; 229 splx(s); 230 231 return(CAM_REQ_CMP); 232 } 233 234 void 235 cam_periph_release(struct cam_periph *periph) 236 { 237 int s; 238 239 if (periph == NULL) 240 return; 241 242 s = splsoftcam(); 243 if ((--periph->refcount == 0) 244 && (periph->flags & CAM_PERIPH_INVALID)) { 245 camperiphfree(periph); 246 } 247 splx(s); 248 249 } 250 251 /* 252 * Look for the next unit number that is not currently in use for this 253 * peripheral type starting at "newunit". Also exclude unit numbers that 254 * are reserved by for future "hardwiring" unless we already know that this 255 * is a potential wired device. Only assume that the device is "wired" the 256 * first time through the loop since after that we'll be looking at unit 257 * numbers that did not match a wiring entry. 258 */ 259 static u_int 260 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 261 path_id_t pathid, target_id_t target, lun_id_t lun) 262 { 263 struct cam_periph *periph; 264 char *periph_name, *strval; 265 int s; 266 int i, val, dunit; 267 const char *dname; 268 269 s = splsoftcam(); 270 periph_name = p_drv->driver_name; 271 for (;;newunit++) { 272 273 for (periph = TAILQ_FIRST(&p_drv->units); 274 periph != NULL && periph->unit_number != newunit; 275 periph = TAILQ_NEXT(periph, unit_links)) 276 ; 277 278 if (periph != NULL && periph->unit_number == newunit) { 279 if (wired != 0) { 280 xpt_print_path(periph->path); 281 printf("Duplicate Wired Device entry!\n"); 282 xpt_print_path(periph->path); 283 printf("Second device (%s device at scbus%d " 284 "target %d lun %d) will not be wired\n", 285 periph_name, pathid, target, lun); 286 wired = 0; 287 } 288 continue; 289 } 290 if (wired) 291 break; 292 293 /* 294 * Don't match entries like "da 4" as a wired down 295 * device, but do match entries like "da 4 target 5" 296 * or even "da 4 scbus 1". 297 */ 298 i = -1; 299 while ((i = resource_locate(i, periph_name)) != -1) { 300 dname = resource_query_name(i); 301 dunit = resource_query_unit(i); 302 /* if no "target" and no specific scbus, skip */ 303 if (resource_int_value(dname, dunit, "target", &val) && 304 (resource_string_value(dname, dunit, "at",&strval)|| 305 strcmp(strval, "scbus") == 0)) 306 continue; 307 if (newunit == dunit) 308 break; 309 } 310 if (i == -1) 311 break; 312 } 313 splx(s); 314 return (newunit); 315 } 316 317 static u_int 318 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 319 target_id_t target, lun_id_t lun) 320 { 321 u_int unit; 322 int hit, i, val, dunit; 323 const char *dname; 324 char pathbuf[32], *strval, *periph_name; 325 326 unit = 0; 327 328 periph_name = p_drv->driver_name; 329 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 330 i = -1; 331 for (hit = 0; (i = resource_locate(i, periph_name)) != -1; hit = 0) { 332 dname = resource_query_name(i); 333 dunit = resource_query_unit(i); 334 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 335 if (strcmp(strval, pathbuf) != 0) 336 continue; 337 hit++; 338 } 339 if (resource_int_value(dname, dunit, "target", &val) == 0) { 340 if (val != target) 341 continue; 342 hit++; 343 } 344 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 345 if (val != lun) 346 continue; 347 hit++; 348 } 349 if (hit != 0) { 350 unit = dunit; 351 break; 352 } 353 } 354 355 /* 356 * Either start from 0 looking for the next unit or from 357 * the unit number given in the resource config. This way, 358 * if we have wildcard matches, we don't return the same 359 * unit number twice. 360 */ 361 unit = camperiphnextunit(p_drv, unit, /*wired*/hit, pathid, 362 target, lun); 363 364 return (unit); 365 } 366 367 void 368 cam_periph_invalidate(struct cam_periph *periph) 369 { 370 int s; 371 372 s = splsoftcam(); 373 /* 374 * We only call this routine the first time a peripheral is 375 * invalidated. The oninvalidate() routine is always called at 376 * splsoftcam(). 377 */ 378 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 379 && (periph->periph_oninval != NULL)) 380 periph->periph_oninval(periph); 381 382 periph->flags |= CAM_PERIPH_INVALID; 383 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 384 385 if (periph->refcount == 0) 386 camperiphfree(periph); 387 else if (periph->refcount < 0) 388 printf("cam_invalidate_periph: refcount < 0!!\n"); 389 splx(s); 390 } 391 392 static void 393 camperiphfree(struct cam_periph *periph) 394 { 395 int s; 396 struct periph_driver **p_drv; 397 398 SET_FOREACH(p_drv, periphdriver_set) { 399 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 400 break; 401 } 402 403 if (periph->periph_dtor != NULL) 404 periph->periph_dtor(periph); 405 406 s = splsoftcam(); 407 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 408 (*p_drv)->generation++; 409 splx(s); 410 411 xpt_remove_periph(periph); 412 413 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 414 union ccb ccb; 415 void *arg; 416 417 switch (periph->deferred_ac) { 418 case AC_FOUND_DEVICE: 419 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 420 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 421 xpt_action(&ccb); 422 arg = &ccb; 423 break; 424 case AC_PATH_REGISTERED: 425 ccb.ccb_h.func_code = XPT_PATH_INQ; 426 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 427 xpt_action(&ccb); 428 arg = &ccb; 429 break; 430 default: 431 arg = NULL; 432 break; 433 } 434 periph->deferred_callback(NULL, periph->deferred_ac, 435 periph->path, arg); 436 } 437 xpt_free_path(periph->path); 438 free(periph, M_DEVBUF); 439 } 440 441 /* 442 * Wait interruptibly for an exclusive lock. 443 */ 444 int 445 cam_periph_lock(struct cam_periph *periph, int flags) 446 { 447 int error; 448 449 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 450 periph->flags |= CAM_PERIPH_LOCK_WANTED; 451 if ((error = tsleep(periph, flags, "caplck", 0)) != 0) 452 return error; 453 } 454 455 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 456 return(ENXIO); 457 458 periph->flags |= CAM_PERIPH_LOCKED; 459 return 0; 460 } 461 462 /* 463 * Unlock and wake up any waiters. 464 */ 465 void 466 cam_periph_unlock(struct cam_periph *periph) 467 { 468 periph->flags &= ~CAM_PERIPH_LOCKED; 469 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 470 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 471 wakeup(periph); 472 } 473 474 cam_periph_release(periph); 475 } 476 477 /* 478 * Map user virtual pointers into kernel virtual address space, so we can 479 * access the memory. This won't work on physical pointers, for now it's 480 * up to the caller to check for that. (XXX KDM -- should we do that here 481 * instead?) This also only works for up to MAXPHYS memory. Since we use 482 * buffers to map stuff in and out, we're limited to the buffer size. 483 */ 484 int 485 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 486 { 487 int numbufs, i, j; 488 int flags[CAM_PERIPH_MAXMAPS]; 489 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 490 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 491 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 492 493 switch(ccb->ccb_h.func_code) { 494 case XPT_DEV_MATCH: 495 if (ccb->cdm.match_buf_len == 0) { 496 printf("cam_periph_mapmem: invalid match buffer " 497 "length 0\n"); 498 return(EINVAL); 499 } 500 if (ccb->cdm.pattern_buf_len > 0) { 501 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 502 lengths[0] = ccb->cdm.pattern_buf_len; 503 dirs[0] = CAM_DIR_OUT; 504 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 505 lengths[1] = ccb->cdm.match_buf_len; 506 dirs[1] = CAM_DIR_IN; 507 numbufs = 2; 508 } else { 509 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 510 lengths[0] = ccb->cdm.match_buf_len; 511 dirs[0] = CAM_DIR_IN; 512 numbufs = 1; 513 } 514 break; 515 case XPT_SCSI_IO: 516 case XPT_CONT_TARGET_IO: 517 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 518 return(0); 519 520 data_ptrs[0] = &ccb->csio.data_ptr; 521 lengths[0] = ccb->csio.dxfer_len; 522 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 523 numbufs = 1; 524 break; 525 default: 526 return(EINVAL); 527 break; /* NOTREACHED */ 528 } 529 530 /* 531 * Check the transfer length and permissions first, so we don't 532 * have to unmap any previously mapped buffers. 533 */ 534 for (i = 0; i < numbufs; i++) { 535 536 flags[i] = 0; 537 538 /* 539 * The userland data pointer passed in may not be page 540 * aligned. vmapbuf() truncates the address to a page 541 * boundary, so if the address isn't page aligned, we'll 542 * need enough space for the given transfer length, plus 543 * whatever extra space is necessary to make it to the page 544 * boundary. 545 */ 546 if ((lengths[i] + 547 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 548 printf("cam_periph_mapmem: attempt to map %lu bytes, " 549 "which is greater than DFLTPHYS(%d)\n", 550 (long)(lengths[i] + 551 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 552 DFLTPHYS); 553 return(E2BIG); 554 } 555 556 if (dirs[i] & CAM_DIR_OUT) { 557 flags[i] = B_WRITE; 558 if (!useracc(*data_ptrs[i], lengths[i], 559 VM_PROT_READ)) { 560 printf("cam_periph_mapmem: error, " 561 "address %p, length %lu isn't " 562 "user accessible for READ\n", 563 (void *)*data_ptrs[i], 564 (u_long)lengths[i]); 565 return(EACCES); 566 } 567 } 568 569 /* 570 * XXX this check is really bogus, since B_WRITE currently 571 * is all 0's, and so it is "set" all the time. 572 */ 573 if (dirs[i] & CAM_DIR_IN) { 574 flags[i] |= B_READ; 575 if (!useracc(*data_ptrs[i], lengths[i], 576 VM_PROT_WRITE)) { 577 printf("cam_periph_mapmem: error, " 578 "address %p, length %lu isn't " 579 "user accessible for WRITE\n", 580 (void *)*data_ptrs[i], 581 (u_long)lengths[i]); 582 583 return(EACCES); 584 } 585 } 586 587 } 588 589 for (i = 0; i < numbufs; i++) { 590 /* 591 * Get the buffer. 592 */ 593 mapinfo->bp[i] = getpbuf(NULL); 594 595 /* save the buffer's data address */ 596 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 597 598 /* put our pointer in the data slot */ 599 mapinfo->bp[i]->b_data = *data_ptrs[i]; 600 601 /* set the transfer length, we know it's < DFLTPHYS */ 602 mapinfo->bp[i]->b_bufsize = lengths[i]; 603 604 /* set the flags */ 605 mapinfo->bp[i]->b_flags = flags[i] | B_PHYS; 606 607 /* map the buffer into kernel memory */ 608 if (vmapbuf(mapinfo->bp[i]) < 0) { 609 printf("cam_periph_mapmem: error, " 610 "address %p, length %lu isn't " 611 "user accessible any more\n", 612 (void *)*data_ptrs[i], 613 (u_long)lengths[i]); 614 for (j = 0; j < i; ++j) { 615 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 616 mapinfo->bp[j]->b_flags &= ~B_PHYS; 617 relpbuf(mapinfo->bp[j], NULL); 618 } 619 return(EACCES); 620 } 621 622 /* set our pointer to the new mapped area */ 623 *data_ptrs[i] = mapinfo->bp[i]->b_data; 624 625 mapinfo->num_bufs_used++; 626 } 627 628 return(0); 629 } 630 631 /* 632 * Unmap memory segments mapped into kernel virtual address space by 633 * cam_periph_mapmem(). 634 */ 635 void 636 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 637 { 638 int numbufs, i; 639 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 640 641 if (mapinfo->num_bufs_used <= 0) { 642 /* allow ourselves to be swapped once again */ 643 return; 644 } 645 646 switch (ccb->ccb_h.func_code) { 647 case XPT_DEV_MATCH: 648 numbufs = min(mapinfo->num_bufs_used, 2); 649 650 if (numbufs == 1) { 651 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 652 } else { 653 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 654 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 655 } 656 break; 657 case XPT_SCSI_IO: 658 case XPT_CONT_TARGET_IO: 659 data_ptrs[0] = &ccb->csio.data_ptr; 660 numbufs = min(mapinfo->num_bufs_used, 1); 661 break; 662 default: 663 /* allow ourselves to be swapped once again */ 664 return; 665 break; /* NOTREACHED */ 666 } 667 668 for (i = 0; i < numbufs; i++) { 669 /* Set the user's pointer back to the original value */ 670 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 671 672 /* unmap the buffer */ 673 vunmapbuf(mapinfo->bp[i]); 674 675 /* clear the flags we set above */ 676 mapinfo->bp[i]->b_flags &= ~B_PHYS; 677 678 /* release the buffer */ 679 relpbuf(mapinfo->bp[i], NULL); 680 } 681 682 /* allow ourselves to be swapped once again */ 683 } 684 685 union ccb * 686 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 687 { 688 struct ccb_hdr *ccb_h; 689 int s; 690 691 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 692 693 s = splsoftcam(); 694 695 while (periph->ccb_list.slh_first == NULL) { 696 if (periph->immediate_priority > priority) 697 periph->immediate_priority = priority; 698 xpt_schedule(periph, priority); 699 if ((periph->ccb_list.slh_first != NULL) 700 && (periph->ccb_list.slh_first->pinfo.priority == priority)) 701 break; 702 tsleep(&periph->ccb_list, 0, "cgticb", 0); 703 } 704 705 ccb_h = periph->ccb_list.slh_first; 706 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 707 splx(s); 708 return ((union ccb *)ccb_h); 709 } 710 711 void 712 cam_periph_ccbwait(union ccb *ccb) 713 { 714 int s; 715 716 s = splsoftcam(); 717 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 718 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 719 tsleep(&ccb->ccb_h.cbfcnp, 0, "cbwait", 0); 720 721 splx(s); 722 } 723 724 int 725 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 726 int (*error_routine)(union ccb *ccb, 727 cam_flags camflags, 728 u_int32_t sense_flags)) 729 { 730 union ccb *ccb; 731 int error; 732 int found; 733 734 error = found = 0; 735 736 switch(cmd){ 737 case CAMGETPASSTHRU: 738 ccb = cam_periph_getccb(periph, /* priority */ 1); 739 xpt_setup_ccb(&ccb->ccb_h, 740 ccb->ccb_h.path, 741 /*priority*/1); 742 ccb->ccb_h.func_code = XPT_GDEVLIST; 743 744 /* 745 * Basically, the point of this is that we go through 746 * getting the list of devices, until we find a passthrough 747 * device. In the current version of the CAM code, the 748 * only way to determine what type of device we're dealing 749 * with is by its name. 750 */ 751 while (found == 0) { 752 ccb->cgdl.index = 0; 753 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 754 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 755 756 /* we want the next device in the list */ 757 xpt_action(ccb); 758 if (strncmp(ccb->cgdl.periph_name, 759 "pass", 4) == 0){ 760 found = 1; 761 break; 762 } 763 } 764 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 765 (found == 0)) { 766 ccb->cgdl.periph_name[0] = '\0'; 767 ccb->cgdl.unit_number = 0; 768 break; 769 } 770 } 771 772 /* copy the result back out */ 773 bcopy(ccb, addr, sizeof(union ccb)); 774 775 /* and release the ccb */ 776 xpt_release_ccb(ccb); 777 778 break; 779 default: 780 error = ENOTTY; 781 break; 782 } 783 return(error); 784 } 785 786 int 787 cam_periph_runccb(union ccb *ccb, 788 int (*error_routine)(union ccb *ccb, 789 cam_flags camflags, 790 u_int32_t sense_flags), 791 cam_flags camflags, u_int32_t sense_flags, 792 struct devstat *ds) 793 { 794 int error; 795 796 error = 0; 797 798 /* 799 * If the user has supplied a stats structure, and if we understand 800 * this particular type of ccb, record the transaction start. 801 */ 802 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 803 devstat_start_transaction(ds); 804 805 xpt_action(ccb); 806 807 do { 808 cam_periph_ccbwait(ccb); 809 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 810 error = 0; 811 else if (error_routine != NULL) 812 error = (*error_routine)(ccb, camflags, sense_flags); 813 else 814 error = 0; 815 816 } while (error == ERESTART); 817 818 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 819 cam_release_devq(ccb->ccb_h.path, 820 /* relsim_flags */0, 821 /* openings */0, 822 /* timeout */0, 823 /* getcount_only */ FALSE); 824 825 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 826 devstat_end_transaction(ds, 827 ccb->csio.dxfer_len, 828 ccb->csio.tag_action & 0xf, 829 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 830 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 831 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 832 DEVSTAT_WRITE : 833 DEVSTAT_READ); 834 835 return(error); 836 } 837 838 void 839 cam_freeze_devq(struct cam_path *path) 840 { 841 struct ccb_hdr ccb_h; 842 843 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 844 ccb_h.func_code = XPT_NOOP; 845 ccb_h.flags = CAM_DEV_QFREEZE; 846 xpt_action((union ccb *)&ccb_h); 847 } 848 849 u_int32_t 850 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 851 u_int32_t openings, u_int32_t timeout, 852 int getcount_only) 853 { 854 struct ccb_relsim crs; 855 856 xpt_setup_ccb(&crs.ccb_h, path, 857 /*priority*/1); 858 crs.ccb_h.func_code = XPT_REL_SIMQ; 859 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 860 crs.release_flags = relsim_flags; 861 crs.openings = openings; 862 crs.release_timeout = timeout; 863 xpt_action((union ccb *)&crs); 864 return (crs.qfrozen_cnt); 865 } 866 867 #define saved_ccb_ptr ppriv_ptr0 868 static void 869 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 870 { 871 cam_status status; 872 int frozen; 873 int sense; 874 struct scsi_start_stop_unit *scsi_cmd; 875 u_int32_t relsim_flags, timeout; 876 u_int32_t qfrozen_cnt; 877 878 status = done_ccb->ccb_h.status; 879 frozen = (status & CAM_DEV_QFRZN) != 0; 880 sense = (status & CAM_AUTOSNS_VALID) != 0; 881 status &= CAM_STATUS_MASK; 882 883 timeout = 0; 884 relsim_flags = 0; 885 886 /* 887 * Unfreeze the queue once if it is already frozen.. 888 */ 889 if (frozen != 0) { 890 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 891 /*relsim_flags*/0, 892 /*openings*/0, 893 /*timeout*/0, 894 /*getcount_only*/0); 895 } 896 897 switch (status) { 898 899 case CAM_REQ_CMP: 900 901 /* 902 * If we have successfully taken a device from the not 903 * ready to ready state, re-scan the device and re-get the 904 * inquiry information. Many devices (mostly disks) don't 905 * properly report their inquiry information unless they 906 * are spun up. 907 */ 908 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 909 scsi_cmd = (struct scsi_start_stop_unit *) 910 &done_ccb->csio.cdb_io.cdb_bytes; 911 912 if (scsi_cmd->opcode == START_STOP_UNIT) 913 xpt_async(AC_INQ_CHANGED, 914 done_ccb->ccb_h.path, NULL); 915 } 916 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 917 sizeof(union ccb)); 918 919 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 920 921 xpt_action(done_ccb); 922 923 break; 924 case CAM_SCSI_STATUS_ERROR: 925 scsi_cmd = (struct scsi_start_stop_unit *) 926 &done_ccb->csio.cdb_io.cdb_bytes; 927 if (sense != 0) { 928 struct scsi_sense_data *sense; 929 int error_code, sense_key, asc, ascq; 930 931 sense = &done_ccb->csio.sense_data; 932 scsi_extract_sense(sense, &error_code, 933 &sense_key, &asc, &ascq); 934 935 /* 936 * If the error is "invalid field in CDB", 937 * and the load/eject flag is set, turn the 938 * flag off and try again. This is just in 939 * case the drive in question barfs on the 940 * load eject flag. The CAM code should set 941 * the load/eject flag by default for 942 * removable media. 943 */ 944 945 /* XXX KDM 946 * Should we check to see what the specific 947 * scsi status is?? Or does it not matter 948 * since we already know that there was an 949 * error, and we know what the specific 950 * error code was, and we know what the 951 * opcode is.. 952 */ 953 if ((scsi_cmd->opcode == START_STOP_UNIT) && 954 ((scsi_cmd->how & SSS_LOEJ) != 0) && 955 (asc == 0x24) && (ascq == 0x00) && 956 (done_ccb->ccb_h.retry_count > 0)) { 957 958 scsi_cmd->how &= ~SSS_LOEJ; 959 960 xpt_action(done_ccb); 961 962 } else if (done_ccb->ccb_h.retry_count > 0) { 963 /* 964 * In this case, the error recovery 965 * command failed, but we've got 966 * some retries left on it. Give 967 * it another try. 968 */ 969 970 /* set the timeout to .5 sec */ 971 relsim_flags = 972 RELSIM_RELEASE_AFTER_TIMEOUT; 973 timeout = 500; 974 975 xpt_action(done_ccb); 976 977 break; 978 979 } else { 980 /* 981 * Copy the original CCB back and 982 * send it back to the caller. 983 */ 984 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 985 done_ccb, sizeof(union ccb)); 986 987 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 988 989 xpt_action(done_ccb); 990 } 991 } else { 992 /* 993 * Eh?? The command failed, but we don't 994 * have any sense. What's up with that? 995 * Fire the CCB again to return it to the 996 * caller. 997 */ 998 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 999 done_ccb, sizeof(union ccb)); 1000 1001 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1002 1003 xpt_action(done_ccb); 1004 1005 } 1006 break; 1007 default: 1008 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1009 sizeof(union ccb)); 1010 1011 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1012 1013 xpt_action(done_ccb); 1014 1015 break; 1016 } 1017 1018 /* decrement the retry count */ 1019 if (done_ccb->ccb_h.retry_count > 0) 1020 done_ccb->ccb_h.retry_count--; 1021 1022 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1023 /*relsim_flags*/relsim_flags, 1024 /*openings*/0, 1025 /*timeout*/timeout, 1026 /*getcount_only*/0); 1027 } 1028 1029 /* 1030 * Generic Async Event handler. Peripheral drivers usually 1031 * filter out the events that require personal attention, 1032 * and leave the rest to this function. 1033 */ 1034 void 1035 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1036 struct cam_path *path, void *arg) 1037 { 1038 switch (code) { 1039 case AC_LOST_DEVICE: 1040 cam_periph_invalidate(periph); 1041 break; 1042 case AC_SENT_BDR: 1043 case AC_BUS_RESET: 1044 { 1045 cam_periph_bus_settle(periph, SCSI_DELAY); 1046 break; 1047 } 1048 default: 1049 break; 1050 } 1051 } 1052 1053 void 1054 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1055 { 1056 struct ccb_getdevstats cgds; 1057 1058 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1059 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1060 xpt_action((union ccb *)&cgds); 1061 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1062 } 1063 1064 void 1065 cam_periph_freeze_after_event(struct cam_periph *periph, 1066 struct timeval* event_time, u_int duration_ms) 1067 { 1068 struct timeval delta; 1069 struct timeval duration_tv; 1070 1071 microuptime(&delta); 1072 timevalsub(&delta, event_time); 1073 duration_tv.tv_sec = duration_ms / 1000; 1074 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1075 if (timevalcmp(&delta, &duration_tv, <)) { 1076 timevalsub(&duration_tv, &delta); 1077 1078 duration_ms = duration_tv.tv_sec * 1000; 1079 duration_ms += duration_tv.tv_usec / 1000; 1080 cam_freeze_devq(periph->path); 1081 cam_release_devq(periph->path, 1082 RELSIM_RELEASE_AFTER_TIMEOUT, 1083 /*reduction*/0, 1084 /*timeout*/duration_ms, 1085 /*getcount_only*/0); 1086 } 1087 1088 } 1089 1090 /* 1091 * Generic error handler. Peripheral drivers usually filter 1092 * out the errors that they handle in a unique mannor, then 1093 * call this function. 1094 */ 1095 int 1096 cam_periph_error(union ccb *ccb, cam_flags camflags, 1097 u_int32_t sense_flags, union ccb *save_ccb) 1098 { 1099 cam_status status; 1100 int frozen; 1101 int sense; 1102 int error; 1103 int openings; 1104 int retry; 1105 u_int32_t relsim_flags; 1106 u_int32_t timeout; 1107 1108 status = ccb->ccb_h.status; 1109 frozen = (status & CAM_DEV_QFRZN) != 0; 1110 sense = (status & CAM_AUTOSNS_VALID) != 0; 1111 status &= CAM_STATUS_MASK; 1112 relsim_flags = 0; 1113 1114 switch (status) { 1115 case CAM_REQ_CMP: 1116 /* decrement the number of retries */ 1117 retry = ccb->ccb_h.retry_count > 0; 1118 if (retry) 1119 ccb->ccb_h.retry_count--; 1120 error = 0; 1121 break; 1122 case CAM_AUTOSENSE_FAIL: 1123 case CAM_SCSI_STATUS_ERROR: 1124 1125 switch (ccb->csio.scsi_status) { 1126 case SCSI_STATUS_OK: 1127 case SCSI_STATUS_COND_MET: 1128 case SCSI_STATUS_INTERMED: 1129 case SCSI_STATUS_INTERMED_COND_MET: 1130 error = 0; 1131 break; 1132 case SCSI_STATUS_CMD_TERMINATED: 1133 case SCSI_STATUS_CHECK_COND: 1134 if (sense != 0) { 1135 struct scsi_sense_data *sense; 1136 int error_code, sense_key, asc, ascq; 1137 struct cam_periph *periph; 1138 scsi_sense_action err_action; 1139 struct ccb_getdev cgd; 1140 1141 sense = &ccb->csio.sense_data; 1142 scsi_extract_sense(sense, &error_code, 1143 &sense_key, &asc, &ascq); 1144 periph = xpt_path_periph(ccb->ccb_h.path); 1145 1146 /* 1147 * Grab the inquiry data for this device. 1148 */ 1149 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, 1150 /*priority*/ 1); 1151 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1152 xpt_action((union ccb *)&cgd); 1153 1154 err_action = scsi_error_action(asc, ascq, 1155 &cgd.inq_data); 1156 1157 /* 1158 * Send a Test Unit Ready to the device. 1159 * If the 'many' flag is set, we send 120 1160 * test unit ready commands, one every half 1161 * second. Otherwise, we just send one TUR. 1162 * We only want to do this if the retry 1163 * count has not been exhausted. 1164 */ 1165 if (((err_action & SS_MASK) == SS_TUR) 1166 && save_ccb != NULL 1167 && ccb->ccb_h.retry_count > 0) { 1168 1169 /* 1170 * Since error recovery is already 1171 * in progress, don't attempt to 1172 * process this error. It is probably 1173 * related to the error that caused 1174 * the currently active error recovery 1175 * action. Also, we only have 1176 * space for one saved CCB, so if we 1177 * had two concurrent error recovery 1178 * actions, we would end up 1179 * over-writing one error recovery 1180 * CCB with another one. 1181 */ 1182 if (periph->flags & 1183 CAM_PERIPH_RECOVERY_INPROG) { 1184 error = ERESTART; 1185 break; 1186 } 1187 1188 periph->flags |= 1189 CAM_PERIPH_RECOVERY_INPROG; 1190 1191 /* decrement the number of retries */ 1192 if ((err_action & 1193 SSQ_DECREMENT_COUNT) != 0) { 1194 retry = 1; 1195 ccb->ccb_h.retry_count--; 1196 } 1197 1198 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1199 1200 /* 1201 * We retry this one every half 1202 * second for a minute. If the 1203 * device hasn't become ready in a 1204 * minute's time, it's unlikely to 1205 * ever become ready. If the table 1206 * doesn't specify SSQ_MANY, we can 1207 * only try this once. Oh well. 1208 */ 1209 if ((err_action & SSQ_MANY) != 0) 1210 scsi_test_unit_ready(&ccb->csio, 1211 /*retries*/120, 1212 camperiphdone, 1213 MSG_SIMPLE_Q_TAG, 1214 SSD_FULL_SIZE, 1215 /*timeout*/5000); 1216 else 1217 scsi_test_unit_ready(&ccb->csio, 1218 /*retries*/1, 1219 camperiphdone, 1220 MSG_SIMPLE_Q_TAG, 1221 SSD_FULL_SIZE, 1222 /*timeout*/5000); 1223 1224 /* release the queue after .5 sec. */ 1225 relsim_flags = 1226 RELSIM_RELEASE_AFTER_TIMEOUT; 1227 timeout = 500; 1228 /* 1229 * Drop the priority to 0 so that 1230 * we are the first to execute. Also 1231 * freeze the queue after this command 1232 * is sent so that we can restore the 1233 * old csio and have it queued in the 1234 * proper order before we let normal 1235 * transactions go to the drive. 1236 */ 1237 ccb->ccb_h.pinfo.priority = 0; 1238 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1239 1240 /* 1241 * Save a pointer to the original 1242 * CCB in the new CCB. 1243 */ 1244 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1245 1246 error = ERESTART; 1247 } 1248 /* 1249 * Send a start unit command to the device, 1250 * and then retry the command. We only 1251 * want to do this if the retry count has 1252 * not been exhausted. If the user 1253 * specified 0 retries, then we follow 1254 * their request and do not retry. 1255 */ 1256 else if (((err_action & SS_MASK) == SS_START) 1257 && save_ccb != NULL 1258 && ccb->ccb_h.retry_count > 0) { 1259 int le; 1260 1261 /* 1262 * Only one error recovery action 1263 * at a time. See above. 1264 */ 1265 if (periph->flags & 1266 CAM_PERIPH_RECOVERY_INPROG) { 1267 error = ERESTART; 1268 break; 1269 } 1270 1271 periph->flags |= 1272 CAM_PERIPH_RECOVERY_INPROG; 1273 1274 /* decrement the number of retries */ 1275 retry = 1; 1276 ccb->ccb_h.retry_count--; 1277 1278 /* 1279 * Check for removable media and 1280 * set load/eject flag 1281 * appropriately. 1282 */ 1283 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1284 le = TRUE; 1285 else 1286 le = FALSE; 1287 1288 /* 1289 * Attempt to start the drive up. 1290 * 1291 * Save the current ccb so it can 1292 * be restored and retried once the 1293 * drive is started up. 1294 */ 1295 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1296 1297 scsi_start_stop(&ccb->csio, 1298 /*retries*/1, 1299 camperiphdone, 1300 MSG_SIMPLE_Q_TAG, 1301 /*start*/TRUE, 1302 /*load/eject*/le, 1303 /*immediate*/FALSE, 1304 SSD_FULL_SIZE, 1305 /*timeout*/50000); 1306 /* 1307 * Drop the priority to 0 so that 1308 * we are the first to execute. Also 1309 * freeze the queue after this command 1310 * is sent so that we can restore the 1311 * old csio and have it queued in the 1312 * proper order before we let normal 1313 * transactions go to the drive. 1314 */ 1315 ccb->ccb_h.pinfo.priority = 0; 1316 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1317 1318 /* 1319 * Save a pointer to the original 1320 * CCB in the new CCB. 1321 */ 1322 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1323 1324 error = ERESTART; 1325 } else if ((sense_flags & SF_RETRY_UA) != 0) { 1326 /* 1327 * XXX KDM this is a *horrible* 1328 * hack. 1329 */ 1330 error = scsi_interpret_sense(ccb, 1331 sense_flags, 1332 &relsim_flags, 1333 &openings, 1334 &timeout, 1335 err_action); 1336 } 1337 1338 /* 1339 * Theoretically, this code should send a 1340 * test unit ready to the given device, and 1341 * if it returns and error, send a start 1342 * unit command. Since we don't yet have 1343 * the capability to do two-command error 1344 * recovery, just send a start unit. 1345 * XXX KDM fix this! 1346 */ 1347 else if (((err_action & SS_MASK) == SS_TURSTART) 1348 && save_ccb != NULL 1349 && ccb->ccb_h.retry_count > 0) { 1350 int le; 1351 1352 /* 1353 * Only one error recovery action 1354 * at a time. See above. 1355 */ 1356 if (periph->flags & 1357 CAM_PERIPH_RECOVERY_INPROG) { 1358 error = ERESTART; 1359 break; 1360 } 1361 1362 periph->flags |= 1363 CAM_PERIPH_RECOVERY_INPROG; 1364 1365 /* decrement the number of retries */ 1366 retry = 1; 1367 ccb->ccb_h.retry_count--; 1368 1369 /* 1370 * Check for removable media and 1371 * set load/eject flag 1372 * appropriately. 1373 */ 1374 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1375 le = TRUE; 1376 else 1377 le = FALSE; 1378 1379 /* 1380 * Attempt to start the drive up. 1381 * 1382 * Save the current ccb so it can 1383 * be restored and retried once the 1384 * drive is started up. 1385 */ 1386 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1387 1388 scsi_start_stop(&ccb->csio, 1389 /*retries*/1, 1390 camperiphdone, 1391 MSG_SIMPLE_Q_TAG, 1392 /*start*/TRUE, 1393 /*load/eject*/le, 1394 /*immediate*/FALSE, 1395 SSD_FULL_SIZE, 1396 /*timeout*/50000); 1397 1398 /* release the queue after .5 sec. */ 1399 relsim_flags = 1400 RELSIM_RELEASE_AFTER_TIMEOUT; 1401 timeout = 500; 1402 /* 1403 * Drop the priority to 0 so that 1404 * we are the first to execute. Also 1405 * freeze the queue after this command 1406 * is sent so that we can restore the 1407 * old csio and have it queued in the 1408 * proper order before we let normal 1409 * transactions go to the drive. 1410 */ 1411 ccb->ccb_h.pinfo.priority = 0; 1412 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1413 1414 /* 1415 * Save a pointer to the original 1416 * CCB in the new CCB. 1417 */ 1418 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1419 1420 error = ERESTART; 1421 } else { 1422 error = scsi_interpret_sense(ccb, 1423 sense_flags, 1424 &relsim_flags, 1425 &openings, 1426 &timeout, 1427 err_action); 1428 } 1429 } else if (ccb->csio.scsi_status == 1430 SCSI_STATUS_CHECK_COND 1431 && status != CAM_AUTOSENSE_FAIL) { 1432 /* no point in decrementing the retry count */ 1433 panic("cam_periph_error: scsi status of " 1434 "CHECK COND returned but no sense " 1435 "information is availible. " 1436 "Controller should have returned " 1437 "CAM_AUTOSENSE_FAILED"); 1438 /* NOTREACHED */ 1439 error = EIO; 1440 } else if (ccb->ccb_h.retry_count == 0) { 1441 /* 1442 * XXX KDM shouldn't there be a better 1443 * argument to return?? 1444 */ 1445 error = EIO; 1446 } else { 1447 /* decrement the number of retries */ 1448 retry = ccb->ccb_h.retry_count > 0; 1449 if (retry) 1450 ccb->ccb_h.retry_count--; 1451 /* 1452 * If it was aborted with no 1453 * clue as to the reason, just 1454 * retry it again. 1455 */ 1456 error = ERESTART; 1457 } 1458 break; 1459 case SCSI_STATUS_QUEUE_FULL: 1460 { 1461 /* no decrement */ 1462 struct ccb_getdevstats cgds; 1463 1464 /* 1465 * First off, find out what the current 1466 * transaction counts are. 1467 */ 1468 xpt_setup_ccb(&cgds.ccb_h, 1469 ccb->ccb_h.path, 1470 /*priority*/1); 1471 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1472 xpt_action((union ccb *)&cgds); 1473 1474 /* 1475 * If we were the only transaction active, treat 1476 * the QUEUE FULL as if it were a BUSY condition. 1477 */ 1478 if (cgds.dev_active != 0) { 1479 int total_openings; 1480 1481 /* 1482 * Reduce the number of openings to 1483 * be 1 less than the amount it took 1484 * to get a queue full bounded by the 1485 * minimum allowed tag count for this 1486 * device. 1487 */ 1488 total_openings = 1489 cgds.dev_active+cgds.dev_openings; 1490 openings = cgds.dev_active; 1491 if (openings < cgds.mintags) 1492 openings = cgds.mintags; 1493 if (openings < total_openings) 1494 relsim_flags = RELSIM_ADJUST_OPENINGS; 1495 else { 1496 /* 1497 * Some devices report queue full for 1498 * temporary resource shortages. For 1499 * this reason, we allow a minimum 1500 * tag count to be entered via a 1501 * quirk entry to prevent the queue 1502 * count on these devices from falling 1503 * to a pessimisticly low value. We 1504 * still wait for the next successful 1505 * completion, however, before queueing 1506 * more transactions to the device. 1507 */ 1508 relsim_flags = 1509 RELSIM_RELEASE_AFTER_CMDCMPLT; 1510 } 1511 timeout = 0; 1512 error = ERESTART; 1513 break; 1514 } 1515 /* FALLTHROUGH */ 1516 } 1517 case SCSI_STATUS_BUSY: 1518 /* 1519 * Restart the queue after either another 1520 * command completes or a 1 second timeout. 1521 * If we have any retries left, that is. 1522 */ 1523 retry = ccb->ccb_h.retry_count > 0; 1524 if (retry) { 1525 ccb->ccb_h.retry_count--; 1526 error = ERESTART; 1527 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1528 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1529 timeout = 1000; 1530 } else { 1531 error = EIO; 1532 } 1533 break; 1534 case SCSI_STATUS_RESERV_CONFLICT: 1535 error = EIO; 1536 break; 1537 default: 1538 error = EIO; 1539 break; 1540 } 1541 break; 1542 case CAM_REQ_CMP_ERR: 1543 case CAM_CMD_TIMEOUT: 1544 case CAM_UNEXP_BUSFREE: 1545 case CAM_UNCOR_PARITY: 1546 case CAM_DATA_RUN_ERR: 1547 /* decrement the number of retries */ 1548 retry = ccb->ccb_h.retry_count > 0; 1549 if (retry) { 1550 ccb->ccb_h.retry_count--; 1551 error = ERESTART; 1552 } else { 1553 error = EIO; 1554 } 1555 break; 1556 case CAM_UA_ABORT: 1557 case CAM_UA_TERMIO: 1558 case CAM_MSG_REJECT_REC: 1559 /* XXX Don't know that these are correct */ 1560 error = EIO; 1561 break; 1562 case CAM_SEL_TIMEOUT: 1563 { 1564 /* 1565 * XXX 1566 * A single selection timeout should not be enough 1567 * to invalidate a device. We should retry for multiple 1568 * seconds assuming this isn't a probe. We'll probably 1569 * need a special flag for that. 1570 */ 1571 #if 0 1572 struct cam_path *newpath; 1573 1574 /* Should we do more if we can't create the path?? */ 1575 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1576 xpt_path_path_id(ccb->ccb_h.path), 1577 xpt_path_target_id(ccb->ccb_h.path), 1578 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1579 break; 1580 /* 1581 * Let peripheral drivers know that this device has gone 1582 * away. 1583 */ 1584 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1585 xpt_free_path(newpath); 1586 #endif 1587 if ((sense_flags & SF_RETRY_SELTO) != 0) { 1588 retry = ccb->ccb_h.retry_count > 0; 1589 if (retry) { 1590 ccb->ccb_h.retry_count--; 1591 error = ERESTART; 1592 /* 1593 * Wait half a second to give the device 1594 * time to recover before we try again. 1595 */ 1596 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1597 timeout = 500; 1598 } else { 1599 error = ENXIO; 1600 } 1601 } else { 1602 error = ENXIO; 1603 } 1604 break; 1605 } 1606 case CAM_REQ_INVALID: 1607 case CAM_PATH_INVALID: 1608 case CAM_DEV_NOT_THERE: 1609 case CAM_NO_HBA: 1610 case CAM_PROVIDE_FAIL: 1611 case CAM_REQ_TOO_BIG: 1612 error = EINVAL; 1613 break; 1614 case CAM_SCSI_BUS_RESET: 1615 case CAM_BDR_SENT: 1616 case CAM_REQUEUE_REQ: 1617 /* Unconditional requeue, dammit */ 1618 error = ERESTART; 1619 break; 1620 case CAM_RESRC_UNAVAIL: 1621 case CAM_BUSY: 1622 /* timeout??? */ 1623 default: 1624 /* decrement the number of retries */ 1625 retry = ccb->ccb_h.retry_count > 0; 1626 if (retry) { 1627 ccb->ccb_h.retry_count--; 1628 error = ERESTART; 1629 } else { 1630 /* Check the sense codes */ 1631 error = EIO; 1632 } 1633 break; 1634 } 1635 1636 /* Attempt a retry */ 1637 if (error == ERESTART || error == 0) { 1638 if (frozen != 0) 1639 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1640 1641 if (error == ERESTART) 1642 xpt_action(ccb); 1643 1644 if (frozen != 0) { 1645 cam_release_devq(ccb->ccb_h.path, 1646 relsim_flags, 1647 openings, 1648 timeout, 1649 /*getcount_only*/0); 1650 } 1651 } 1652 1653 1654 return (error); 1655 } 1656