1 /* 2 * Implementation of the Target Mode 'Black Hole device' for CAM. 3 * 4 * Copyright (c) 1999 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/cam/scsi/scsi_targ_bh.c,v 1.4.2.6 2003/11/14 11:31:25 simokawa Exp $ 29 * $DragonFly: src/sys/bus/cam/scsi/scsi_targ_bh.c,v 1.19 2008/05/18 20:30:20 pavalos Exp $ 30 */ 31 #include <sys/param.h> 32 #include <sys/queue.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/types.h> 36 #include <sys/buf.h> 37 #include <sys/conf.h> 38 #include <sys/devicestat.h> 39 #include <sys/malloc.h> 40 #include <sys/thread2.h> 41 #include <sys/uio.h> 42 43 #include "../cam.h" 44 #include "../cam_ccb.h" 45 #include "../cam_extend.h" 46 #include "../cam_periph.h" 47 #include "../cam_queue.h" 48 #include "../cam_xpt_periph.h" 49 #include "../cam_debug.h" 50 #include "../cam_sim.h" 51 52 #include "scsi_all.h" 53 #include "scsi_message.h" 54 55 MALLOC_DEFINE(M_SCSIBH, "SCSI bh", "SCSI blackhole buffers"); 56 57 typedef enum { 58 TARGBH_STATE_NORMAL, 59 TARGBH_STATE_EXCEPTION, 60 TARGBH_STATE_TEARDOWN 61 } targbh_state; 62 63 typedef enum { 64 TARGBH_FLAG_NONE = 0x00, 65 TARGBH_FLAG_LUN_ENABLED = 0x01 66 } targbh_flags; 67 68 typedef enum { 69 TARGBH_CCB_WORKQ, 70 TARGBH_CCB_WAITING 71 } targbh_ccb_types; 72 73 #define MAX_ACCEPT 8 74 #define MAX_IMMEDIATE 16 75 #define MAX_BUF_SIZE 256 /* Max inquiry/sense/mode page transfer */ 76 77 /* Offsets into our private CCB area for storing accept information */ 78 #define ccb_type ppriv_field0 79 #define ccb_descr ppriv_ptr1 80 81 /* We stick a pointer to the originating accept TIO in each continue I/O CCB */ 82 #define ccb_atio ppriv_ptr1 83 84 TAILQ_HEAD(ccb_queue, ccb_hdr); 85 86 struct targbh_softc { 87 struct ccb_queue pending_queue; 88 struct ccb_queue work_queue; 89 struct ccb_queue unknown_atio_queue; 90 struct devstat device_stats; 91 targbh_state state; 92 targbh_flags flags; 93 u_int init_level; 94 u_int inq_data_len; 95 struct ccb_accept_tio *accept_tio_list; 96 struct ccb_hdr_slist immed_notify_slist; 97 }; 98 99 struct targbh_cmd_desc { 100 struct ccb_accept_tio* atio_link; 101 u_int data_resid; /* How much left to transfer */ 102 u_int data_increment;/* Amount to send before next disconnect */ 103 void* data; /* The data. Can be from backing_store or not */ 104 void* backing_store;/* Backing store allocated for this descriptor*/ 105 u_int max_size; /* Size of backing_store */ 106 u_int32_t timeout; 107 u_int8_t status; /* Status to return to initiator */ 108 }; 109 110 static struct scsi_inquiry_data no_lun_inq_data = 111 { 112 T_NODEVICE | (SID_QUAL_BAD_LU << 5), 0, 113 /* version */2, /* format version */2 114 }; 115 116 static struct scsi_sense_data no_lun_sense_data = 117 { 118 SSD_CURRENT_ERROR|SSD_ERRCODE_VALID, 119 0, 120 SSD_KEY_NOT_READY, 121 { 0, 0, 0, 0 }, 122 /*extra_len*/offsetof(struct scsi_sense_data, fru) 123 - offsetof(struct scsi_sense_data, extra_len), 124 { 0, 0, 0, 0 }, 125 /* Logical Unit Not Supported */ 126 /*ASC*/0x25, /*ASCQ*/0 127 }; 128 129 static const int request_sense_size = offsetof(struct scsi_sense_data, fru); 130 131 static periph_init_t targbhinit; 132 static void targbhasync(void *callback_arg, u_int32_t code, 133 struct cam_path *path, void *arg); 134 static cam_status targbhenlun(struct cam_periph *periph); 135 static cam_status targbhdislun(struct cam_periph *periph); 136 static periph_ctor_t targbhctor; 137 static periph_dtor_t targbhdtor; 138 static periph_start_t targbhstart; 139 static void targbhdone(struct cam_periph *periph, 140 union ccb *done_ccb); 141 #ifdef NOTYET 142 static int targbherror(union ccb *ccb, u_int32_t cam_flags, 143 u_int32_t sense_flags); 144 #endif 145 static struct targbh_cmd_desc* targbhallocdescr(void); 146 static void targbhfreedescr(struct targbh_cmd_desc *buf); 147 148 static struct periph_driver targbhdriver = 149 { 150 targbhinit, "targbh", 151 TAILQ_HEAD_INITIALIZER(targbhdriver.units), /* generation */ 0 152 }; 153 154 PERIPHDRIVER_DECLARE(targbh, targbhdriver); 155 156 static void 157 targbhinit(void) 158 { 159 cam_status status; 160 161 /* 162 * Install a global async callback. This callback will 163 * receive async callbacks like "new path registered". 164 */ 165 status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED, 166 targbhasync, NULL, NULL); 167 168 if (status != CAM_REQ_CMP) { 169 kprintf("targbh: Failed to attach master async callback " 170 "due to status 0x%x!\n", status); 171 } 172 } 173 174 static void 175 targbhasync(void *callback_arg, u_int32_t code, 176 struct cam_path *path, void *arg) 177 { 178 struct cam_path *new_path; 179 struct ccb_pathinq *cpi; 180 path_id_t bus_path_id; 181 cam_status status; 182 183 cpi = (struct ccb_pathinq *)arg; 184 if (code == AC_PATH_REGISTERED) 185 bus_path_id = cpi->ccb_h.path_id; 186 else 187 bus_path_id = xpt_path_path_id(path); 188 /* 189 * Allocate a peripheral instance for 190 * this target instance. 191 */ 192 status = xpt_create_path(&new_path, NULL, 193 bus_path_id, 194 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 195 if (status != CAM_REQ_CMP) { 196 kprintf("targbhasync: Unable to create path " 197 "due to status 0x%x\n", status); 198 return; 199 } 200 201 switch (code) { 202 case AC_PATH_REGISTERED: 203 { 204 /* Only attach to controllers that support target mode */ 205 if ((cpi->target_sprt & PIT_PROCESSOR) == 0) 206 break; 207 208 status = cam_periph_alloc(targbhctor, NULL, targbhdtor, 209 targbhstart, 210 "targbh", CAM_PERIPH_BIO, 211 new_path, targbhasync, 212 AC_PATH_REGISTERED, 213 cpi); 214 break; 215 } 216 case AC_PATH_DEREGISTERED: 217 { 218 struct cam_periph *periph; 219 220 if ((periph = cam_periph_find(new_path, "targbh")) != NULL) 221 cam_periph_invalidate(periph); 222 break; 223 } 224 default: 225 break; 226 } 227 xpt_free_path(new_path); 228 } 229 230 /* Attempt to enable our lun */ 231 static cam_status 232 targbhenlun(struct cam_periph *periph) 233 { 234 union ccb immed_ccb; 235 struct targbh_softc *softc; 236 cam_status status; 237 int i; 238 239 softc = (struct targbh_softc *)periph->softc; 240 241 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) != 0) 242 return (CAM_REQ_CMP); 243 244 xpt_setup_ccb(&immed_ccb.ccb_h, periph->path, /*priority*/1); 245 immed_ccb.ccb_h.func_code = XPT_EN_LUN; 246 247 /* Don't need support for any vendor specific commands */ 248 immed_ccb.cel.grp6_len = 0; 249 immed_ccb.cel.grp7_len = 0; 250 immed_ccb.cel.enable = 1; 251 xpt_action(&immed_ccb); 252 status = immed_ccb.ccb_h.status; 253 if (status != CAM_REQ_CMP) { 254 xpt_print(periph->path, 255 "targbhenlun - Enable Lun Rejected with status 0x%x\n", 256 status); 257 return (status); 258 } 259 260 softc->flags |= TARGBH_FLAG_LUN_ENABLED; 261 262 /* 263 * Build up a buffer of accept target I/O 264 * operations for incoming selections. 265 */ 266 for (i = 0; i < MAX_ACCEPT; i++) { 267 struct ccb_accept_tio *atio; 268 269 atio = kmalloc(sizeof(*atio), M_SCSIBH, M_INTWAIT); 270 271 atio->ccb_h.ccb_descr = targbhallocdescr(); 272 273 xpt_setup_ccb(&atio->ccb_h, periph->path, /*priority*/1); 274 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; 275 atio->ccb_h.cbfcnp = targbhdone; 276 xpt_action((union ccb *)atio); 277 status = atio->ccb_h.status; 278 if (status != CAM_REQ_INPROG) { 279 targbhfreedescr(atio->ccb_h.ccb_descr); 280 kfree(atio, M_SCSIBH); 281 break; 282 } 283 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link = 284 softc->accept_tio_list; 285 softc->accept_tio_list = atio; 286 } 287 288 if (i == 0) { 289 xpt_print(periph->path, 290 "targbhenlun - Could not allocate accept tio CCBs: status " 291 "= 0x%x\n", status); 292 targbhdislun(periph); 293 return (CAM_REQ_CMP_ERR); 294 } 295 296 /* 297 * Build up a buffer of immediate notify CCBs 298 * so the SIM can tell us of asynchronous target mode events. 299 */ 300 for (i = 0; i < MAX_ACCEPT; i++) { 301 struct ccb_immed_notify *inot; 302 303 inot = kmalloc(sizeof(*inot), M_SCSIBH, M_INTWAIT); 304 305 xpt_setup_ccb(&inot->ccb_h, periph->path, /*priority*/1); 306 inot->ccb_h.func_code = XPT_IMMED_NOTIFY; 307 inot->ccb_h.cbfcnp = targbhdone; 308 xpt_action((union ccb *)inot); 309 status = inot->ccb_h.status; 310 if (status != CAM_REQ_INPROG) { 311 kfree(inot, M_SCSIBH); 312 break; 313 } 314 SLIST_INSERT_HEAD(&softc->immed_notify_slist, &inot->ccb_h, 315 periph_links.sle); 316 } 317 318 if (i == 0) { 319 xpt_print(periph->path, 320 "targbhenlun - Could not allocate immediate notify " 321 "CCBs: status = 0x%x\n", status); 322 targbhdislun(periph); 323 return (CAM_REQ_CMP_ERR); 324 } 325 326 return (CAM_REQ_CMP); 327 } 328 329 static cam_status 330 targbhdislun(struct cam_periph *periph) 331 { 332 union ccb ccb; 333 struct targbh_softc *softc; 334 struct ccb_accept_tio* atio; 335 struct ccb_hdr *ccb_h; 336 337 softc = (struct targbh_softc *)periph->softc; 338 if ((softc->flags & TARGBH_FLAG_LUN_ENABLED) == 0) 339 return CAM_REQ_CMP; 340 341 /* XXX Block for Continue I/O completion */ 342 343 /* Kill off all ACCECPT and IMMEDIATE CCBs */ 344 while ((atio = softc->accept_tio_list) != NULL) { 345 346 softc->accept_tio_list = 347 ((struct targbh_cmd_desc*)atio->ccb_h.ccb_descr)->atio_link; 348 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 349 ccb.cab.ccb_h.func_code = XPT_ABORT; 350 ccb.cab.abort_ccb = (union ccb *)atio; 351 xpt_action(&ccb); 352 } 353 354 while ((ccb_h = SLIST_FIRST(&softc->immed_notify_slist)) != NULL) { 355 SLIST_REMOVE_HEAD(&softc->immed_notify_slist, periph_links.sle); 356 xpt_setup_ccb(&ccb.cab.ccb_h, periph->path, /*priority*/1); 357 ccb.cab.ccb_h.func_code = XPT_ABORT; 358 ccb.cab.abort_ccb = (union ccb *)ccb_h; 359 xpt_action(&ccb); 360 } 361 362 /* 363 * Dissable this lun. 364 */ 365 xpt_setup_ccb(&ccb.cel.ccb_h, periph->path, /*priority*/1); 366 ccb.cel.ccb_h.func_code = XPT_EN_LUN; 367 ccb.cel.enable = 0; 368 xpt_action(&ccb); 369 370 if (ccb.cel.ccb_h.status != CAM_REQ_CMP) 371 kprintf("targbhdislun - Disabling lun on controller failed " 372 "with status 0x%x\n", ccb.cel.ccb_h.status); 373 else 374 softc->flags &= ~TARGBH_FLAG_LUN_ENABLED; 375 return (ccb.cel.ccb_h.status); 376 } 377 378 static cam_status 379 targbhctor(struct cam_periph *periph, void *arg) 380 { 381 struct targbh_softc *softc; 382 383 /* Allocate our per-instance private storage */ 384 softc = kmalloc(sizeof(*softc), M_SCSIBH, M_INTWAIT | M_ZERO); 385 TAILQ_INIT(&softc->pending_queue); 386 TAILQ_INIT(&softc->work_queue); 387 softc->accept_tio_list = NULL; 388 SLIST_INIT(&softc->immed_notify_slist); 389 softc->state = TARGBH_STATE_NORMAL; 390 periph->softc = softc; 391 softc->init_level++; 392 393 return (targbhenlun(periph)); 394 } 395 396 static void 397 targbhdtor(struct cam_periph *periph) 398 { 399 struct targbh_softc *softc; 400 401 softc = (struct targbh_softc *)periph->softc; 402 403 softc->state = TARGBH_STATE_TEARDOWN; 404 405 targbhdislun(periph); 406 407 switch (softc->init_level) { 408 case 0: 409 panic("targdtor - impossible init level"); 410 case 1: 411 /* FALLTHROUGH */ 412 default: 413 /* XXX Wait for callback of targbhdislun() */ 414 sim_lock_sleep(softc, 0, "targbh", hz/2, periph->sim->lock); 415 kfree(softc, M_SCSIBH); 416 break; 417 } 418 } 419 420 static void 421 targbhstart(struct cam_periph *periph, union ccb *start_ccb) 422 { 423 struct targbh_softc *softc; 424 struct ccb_hdr *ccbh; 425 struct ccb_accept_tio *atio; 426 struct targbh_cmd_desc *desc; 427 struct ccb_scsiio *csio; 428 ccb_flags flags; 429 430 softc = (struct targbh_softc *)periph->softc; 431 432 ccbh = TAILQ_FIRST(&softc->work_queue); 433 if (periph->immediate_priority <= periph->pinfo.priority) { 434 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WAITING; 435 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 436 periph_links.sle); 437 periph->immediate_priority = CAM_PRIORITY_NONE; 438 wakeup(&periph->ccb_list); 439 } else if (ccbh == NULL) { 440 xpt_release_ccb(start_ccb); 441 } else { 442 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe); 443 TAILQ_INSERT_HEAD(&softc->pending_queue, ccbh, 444 periph_links.tqe); 445 atio = (struct ccb_accept_tio*)ccbh; 446 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 447 448 /* Is this a tagged request? */ 449 flags = atio->ccb_h.flags & 450 (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); 451 452 csio = &start_ccb->csio; 453 /* 454 * If we are done with the transaction, tell the 455 * controller to send status and perform a CMD_CMPLT. 456 * If we have associated sense data, see if we can 457 * send that too. 458 */ 459 if (desc->data_resid == desc->data_increment) { 460 flags |= CAM_SEND_STATUS; 461 if (atio->sense_len) { 462 csio->sense_len = atio->sense_len; 463 csio->sense_data = atio->sense_data; 464 flags |= CAM_SEND_SENSE; 465 } 466 467 } 468 469 cam_fill_ctio(csio, 470 /*retries*/2, 471 targbhdone, 472 flags, 473 (flags & CAM_TAG_ACTION_VALID)? 474 MSG_SIMPLE_Q_TAG : 0, 475 atio->tag_id, 476 atio->init_id, 477 desc->status, 478 /*data_ptr*/desc->data_increment == 0 479 ? NULL : desc->data, 480 /*dxfer_len*/desc->data_increment, 481 /*timeout*/desc->timeout); 482 483 /* Override our wildcard attachment */ 484 start_ccb->ccb_h.target_id = atio->ccb_h.target_id; 485 start_ccb->ccb_h.target_lun = atio->ccb_h.target_lun; 486 487 start_ccb->ccb_h.ccb_type = TARGBH_CCB_WORKQ; 488 start_ccb->ccb_h.ccb_atio = atio; 489 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 490 ("Sending a CTIO\n")); 491 xpt_action(start_ccb); 492 /* 493 * If the queue was frozen waiting for the response 494 * to this ATIO (for instance disconnection was disallowed), 495 * then release it now that our response has been queued. 496 */ 497 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) { 498 cam_release_devq(periph->path, 499 /*relsim_flags*/0, 500 /*reduction*/0, 501 /*timeout*/0, 502 /*getcount_only*/0); 503 atio->ccb_h.status &= ~CAM_DEV_QFRZN; 504 } 505 ccbh = TAILQ_FIRST(&softc->work_queue); 506 } 507 if (ccbh != NULL) 508 xpt_schedule(periph, /*priority*/1); 509 } 510 511 static void 512 targbhdone(struct cam_periph *periph, union ccb *done_ccb) 513 { 514 struct targbh_softc *softc; 515 516 softc = (struct targbh_softc *)periph->softc; 517 518 if (done_ccb->ccb_h.ccb_type == TARGBH_CCB_WAITING) { 519 /* Caller will release the CCB */ 520 wakeup(&done_ccb->ccb_h.cbfcnp); 521 return; 522 } 523 524 switch (done_ccb->ccb_h.func_code) { 525 case XPT_ACCEPT_TARGET_IO: 526 { 527 struct ccb_accept_tio *atio; 528 struct targbh_cmd_desc *descr; 529 u_int8_t *cdb; 530 int priority; 531 532 atio = &done_ccb->atio; 533 descr = (struct targbh_cmd_desc*)atio->ccb_h.ccb_descr; 534 cdb = atio->cdb_io.cdb_bytes; 535 if (softc->state == TARGBH_STATE_TEARDOWN 536 || atio->ccb_h.status == CAM_REQ_ABORTED) { 537 targbhfreedescr(descr); 538 xpt_free_ccb(done_ccb); 539 return; 540 } 541 542 /* 543 * Determine the type of incoming command and 544 * setup our buffer for a response. 545 */ 546 switch (cdb[0]) { 547 case INQUIRY: 548 { 549 struct scsi_inquiry *inq; 550 551 inq = (struct scsi_inquiry *)cdb; 552 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 553 ("Saw an inquiry!\n")); 554 /* 555 * Validate the command. We don't 556 * support any VPD pages, so complain 557 * if EVPD is set. 558 */ 559 if ((inq->byte2 & SI_EVPD) != 0 560 || inq->page_code != 0) { 561 atio->ccb_h.flags &= ~CAM_DIR_MASK; 562 atio->ccb_h.flags |= CAM_DIR_NONE; 563 /* 564 * This needs to have other than a 565 * no_lun_sense_data response. 566 */ 567 atio->sense_data = no_lun_sense_data; 568 atio->sense_len = sizeof(no_lun_sense_data); 569 descr->data_resid = 0; 570 descr->data_increment = 0; 571 descr->status = SCSI_STATUS_CHECK_COND; 572 break; 573 } 574 /* 575 * Direction is always relative 576 * to the initator. 577 */ 578 atio->ccb_h.flags &= ~CAM_DIR_MASK; 579 atio->ccb_h.flags |= CAM_DIR_IN; 580 descr->data = &no_lun_inq_data; 581 descr->data_resid = MIN(sizeof(no_lun_inq_data), 582 SCSI_CDB6_LEN(inq->length)); 583 descr->data_increment = descr->data_resid; 584 descr->timeout = 5 * 1000; 585 descr->status = SCSI_STATUS_OK; 586 break; 587 } 588 case REQUEST_SENSE: 589 { 590 struct scsi_request_sense *rsense; 591 592 rsense = (struct scsi_request_sense *)cdb; 593 /* Refer to static sense data */ 594 atio->ccb_h.flags &= ~CAM_DIR_MASK; 595 atio->ccb_h.flags |= CAM_DIR_IN; 596 descr->data = &no_lun_sense_data; 597 descr->data_resid = request_sense_size; 598 descr->data_resid = MIN(descr->data_resid, 599 SCSI_CDB6_LEN(rsense->length)); 600 descr->data_increment = descr->data_resid; 601 descr->timeout = 5 * 1000; 602 descr->status = SCSI_STATUS_OK; 603 break; 604 } 605 default: 606 /* Constant CA, tell initiator */ 607 /* Direction is always relative to the initator */ 608 atio->ccb_h.flags &= ~CAM_DIR_MASK; 609 atio->ccb_h.flags |= CAM_DIR_NONE; 610 atio->sense_data = no_lun_sense_data; 611 atio->sense_len = sizeof (no_lun_sense_data); 612 descr->data_resid = 0; 613 descr->data_increment = 0; 614 descr->timeout = 5 * 1000; 615 descr->status = SCSI_STATUS_CHECK_COND; 616 break; 617 } 618 619 /* Queue us up to receive a Continue Target I/O ccb. */ 620 if ((atio->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) { 621 TAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, 622 periph_links.tqe); 623 priority = 0; 624 } else { 625 TAILQ_INSERT_TAIL(&softc->work_queue, &atio->ccb_h, 626 periph_links.tqe); 627 priority = 1; 628 } 629 xpt_schedule(periph, priority); 630 break; 631 } 632 case XPT_CONT_TARGET_IO: 633 { 634 struct ccb_accept_tio *atio; 635 struct targbh_cmd_desc *desc; 636 637 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 638 ("Received completed CTIO\n")); 639 atio = (struct ccb_accept_tio*)done_ccb->ccb_h.ccb_atio; 640 desc = (struct targbh_cmd_desc *)atio->ccb_h.ccb_descr; 641 642 TAILQ_REMOVE(&softc->pending_queue, &atio->ccb_h, 643 periph_links.tqe); 644 645 /* 646 * We could check for CAM_SENT_SENSE bein set here, 647 * but since we're not maintaining any CA/UA state, 648 * there's no point. 649 */ 650 atio->sense_len = 0; 651 done_ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 652 done_ccb->ccb_h.status &= ~CAM_SENT_SENSE; 653 654 /* 655 * Any errors will not change the data we return, 656 * so make sure the queue is not left frozen. 657 * XXX - At some point there may be errors that 658 * leave us in a connected state with the 659 * initiator... 660 */ 661 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 662 kprintf("Releasing Queue\n"); 663 cam_release_devq(done_ccb->ccb_h.path, 664 /*relsim_flags*/0, 665 /*reduction*/0, 666 /*timeout*/0, 667 /*getcount_only*/0); 668 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 669 } 670 desc->data_resid -= desc->data_increment; 671 xpt_release_ccb(done_ccb); 672 if (softc->state != TARGBH_STATE_TEARDOWN) { 673 674 /* 675 * Send the original accept TIO back to the 676 * controller to handle more work. 677 */ 678 CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE, 679 ("Returning ATIO to target\n")); 680 /* Restore wildcards */ 681 atio->ccb_h.target_id = CAM_TARGET_WILDCARD; 682 atio->ccb_h.target_lun = CAM_LUN_WILDCARD; 683 xpt_action((union ccb *)atio); 684 break; 685 } else { 686 targbhfreedescr(desc); 687 kfree(atio, M_SCSIBH); 688 } 689 break; 690 } 691 case XPT_IMMED_NOTIFY: 692 { 693 int frozen; 694 695 frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 696 if (softc->state == TARGBH_STATE_TEARDOWN 697 || done_ccb->ccb_h.status == CAM_REQ_ABORTED) { 698 kprintf("Freed an immediate notify\n"); 699 xpt_free_ccb(done_ccb); 700 } else { 701 /* Requeue for another immediate event */ 702 xpt_action(done_ccb); 703 } 704 if (frozen != 0) 705 cam_release_devq(periph->path, 706 /*relsim_flags*/0, 707 /*opening reduction*/0, 708 /*timeout*/0, 709 /*getcount_only*/0); 710 break; 711 } 712 default: 713 panic("targbhdone: Unexpected ccb opcode"); 714 break; 715 } 716 } 717 718 #ifdef NOTYET 719 static int 720 targbherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 721 { 722 return 0; 723 } 724 #endif 725 726 static struct targbh_cmd_desc * 727 targbhallocdescr(void) 728 { 729 struct targbh_cmd_desc* descr; 730 731 /* Allocate the targbh_descr structure */ 732 descr = kmalloc(sizeof(*descr), M_SCSIBH, M_INTWAIT | M_ZERO); 733 734 /* Allocate buffer backing store */ 735 descr->backing_store = kmalloc(MAX_BUF_SIZE, M_SCSIBH, M_INTWAIT); 736 descr->max_size = MAX_BUF_SIZE; 737 return (descr); 738 } 739 740 static void 741 targbhfreedescr(struct targbh_cmd_desc *descr) 742 { 743 kfree(descr->backing_store, M_SCSIBH); 744 kfree(descr, M_SCSIBH); 745 } 746