1 /*- 2 * Copyright (c) 2012, Bryan Venteicher <bryanv@daemoninthecloset.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO SCSI devices. */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/kthread.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/sglist.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/callout.h> 43 #include <sys/taskqueue.h> 44 #include <sys/queue.h> 45 #include <sys/sbuf.h> 46 47 #include <machine/stdarg.h> 48 49 #include <machine/bus.h> 50 #include <machine/resource.h> 51 #include <sys/bus.h> 52 #include <sys/rman.h> 53 54 #include <cam/cam.h> 55 #include <cam/cam_ccb.h> 56 #include <cam/cam_sim.h> 57 #include <cam/cam_periph.h> 58 #include <cam/cam_xpt_sim.h> 59 #include <cam/cam_debug.h> 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/scsi/scsi_message.h> 62 63 #include <dev/virtio/virtio.h> 64 #include <dev/virtio/virtqueue.h> 65 #include <dev/virtio/scsi/virtio_scsi.h> 66 #include <dev/virtio/scsi/virtio_scsivar.h> 67 68 #include "virtio_if.h" 69 70 static int vtscsi_modevent(module_t, int, void *); 71 72 static int vtscsi_probe(device_t); 73 static int vtscsi_attach(device_t); 74 static int vtscsi_detach(device_t); 75 static int vtscsi_suspend(device_t); 76 static int vtscsi_resume(device_t); 77 78 static void vtscsi_negotiate_features(struct vtscsi_softc *); 79 static int vtscsi_maximum_segments(struct vtscsi_softc *, int); 80 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); 81 static void vtscsi_write_device_config(struct vtscsi_softc *); 82 static int vtscsi_reinit(struct vtscsi_softc *); 83 84 static int vtscsi_alloc_cam(struct vtscsi_softc *); 85 static int vtscsi_register_cam(struct vtscsi_softc *); 86 static void vtscsi_free_cam(struct vtscsi_softc *); 87 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); 88 static int vtscsi_register_async(struct vtscsi_softc *); 89 static void vtscsi_deregister_async(struct vtscsi_softc *); 90 static void vtscsi_cam_action(struct cam_sim *, union ccb *); 91 static void vtscsi_cam_poll(struct cam_sim *); 92 93 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, 94 union ccb *); 95 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, 96 union ccb *); 97 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); 98 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); 99 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); 100 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, 101 struct cam_sim *, union ccb *); 102 103 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, 104 struct sglist *, struct ccb_scsiio *); 105 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, 106 struct vtscsi_request *, int *, int *); 107 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, 108 struct vtscsi_request *); 109 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); 110 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, 111 struct vtscsi_request *); 112 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, 113 struct vtscsi_request *); 114 static void vtscsi_timedout_scsi_cmd(void *); 115 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); 116 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, 117 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); 118 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, 119 struct vtscsi_request *); 120 121 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, 122 struct vtscsi_request *); 123 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, 124 struct vtscsi_request *, struct sglist *, int, int, int); 125 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, 126 struct vtscsi_request *); 127 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, 128 struct vtscsi_request *); 129 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, 130 struct vtscsi_request *); 131 132 static void vtscsi_get_request_lun(uint8_t lun[], target_id_t *, lun_id_t *); 133 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); 134 static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *, 135 struct virtio_scsi_cmd_req *); 136 static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t, 137 uintptr_t, struct virtio_scsi_ctrl_tmf_req *); 138 139 static void vtscsi_freeze_simq(struct vtscsi_softc *, int); 140 static int vtscsi_thaw_simq(struct vtscsi_softc *, int); 141 142 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, 143 lun_id_t); 144 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, 145 lun_id_t); 146 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); 147 148 static void vtscsi_handle_event(struct vtscsi_softc *, 149 struct virtio_scsi_event *); 150 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, 151 struct virtio_scsi_event *); 152 static int vtscsi_init_event_vq(struct vtscsi_softc *); 153 static void vtscsi_reinit_event_vq(struct vtscsi_softc *); 154 static void vtscsi_drain_event_vq(struct vtscsi_softc *); 155 156 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); 157 static void vtscsi_complete_vqs(struct vtscsi_softc *); 158 static void vtscsi_drain_vqs(struct vtscsi_softc *); 159 static void vtscsi_cancel_request(struct vtscsi_softc *, 160 struct vtscsi_request *); 161 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); 162 static void vtscsi_stop(struct vtscsi_softc *); 163 static int vtscsi_reset_bus(struct vtscsi_softc *); 164 165 static void vtscsi_init_request(struct vtscsi_softc *, 166 struct vtscsi_request *); 167 static int vtscsi_alloc_requests(struct vtscsi_softc *); 168 static void vtscsi_free_requests(struct vtscsi_softc *); 169 static void vtscsi_enqueue_request(struct vtscsi_softc *, 170 struct vtscsi_request *); 171 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); 172 173 static void vtscsi_complete_request(struct vtscsi_request *); 174 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); 175 static void vtscsi_control_vq_task(void *, int); 176 static void vtscsi_event_vq_task(void *, int); 177 static void vtscsi_request_vq_task(void *, int); 178 179 static int vtscsi_control_vq_intr(void *); 180 static int vtscsi_event_vq_intr(void *); 181 static int vtscsi_request_vq_intr(void *); 182 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); 183 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); 184 185 static void vtscsi_get_tunables(struct vtscsi_softc *); 186 static void vtscsi_add_sysctl(struct vtscsi_softc *); 187 188 static void vtscsi_printf_req(struct vtscsi_request *, const char *, 189 const char *, ...); 190 191 /* Global tunables. */ 192 /* 193 * The current QEMU VirtIO SCSI implementation does not cancel in-flight 194 * IO during virtio_stop(). So in-flight requests still complete after the 195 * device reset. We would have to wait for all the in-flight IO to complete, 196 * which defeats the typical purpose of a bus reset. We could simulate the 197 * bus reset with either I_T_NEXUS_RESET of all the targets, or with 198 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the 199 * control virtqueue). But this isn't very useful if things really go off 200 * the rails, so default to disabled for now. 201 */ 202 static int vtscsi_bus_reset_disable = 1; 203 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); 204 205 static struct virtio_feature_desc vtscsi_feature_desc[] = { 206 { VIRTIO_SCSI_F_INOUT, "InOut" }, 207 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, 208 209 { 0, NULL } 210 }; 211 212 static device_method_t vtscsi_methods[] = { 213 /* Device methods. */ 214 DEVMETHOD(device_probe, vtscsi_probe), 215 DEVMETHOD(device_attach, vtscsi_attach), 216 DEVMETHOD(device_detach, vtscsi_detach), 217 DEVMETHOD(device_suspend, vtscsi_suspend), 218 DEVMETHOD(device_resume, vtscsi_resume), 219 220 DEVMETHOD_END 221 }; 222 223 static driver_t vtscsi_driver = { 224 "vtscsi", 225 vtscsi_methods, 226 sizeof(struct vtscsi_softc) 227 }; 228 static devclass_t vtscsi_devclass; 229 230 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass, 231 vtscsi_modevent, 0); 232 MODULE_VERSION(virtio_scsi, 1); 233 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); 234 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); 235 236 static int 237 vtscsi_modevent(module_t mod, int type, void *unused) 238 { 239 int error; 240 241 switch (type) { 242 case MOD_LOAD: 243 case MOD_QUIESCE: 244 case MOD_UNLOAD: 245 case MOD_SHUTDOWN: 246 error = 0; 247 break; 248 default: 249 error = EOPNOTSUPP; 250 break; 251 } 252 253 return (error); 254 } 255 256 static int 257 vtscsi_probe(device_t dev) 258 { 259 260 if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI) 261 return (ENXIO); 262 263 device_set_desc(dev, "VirtIO SCSI Adapter"); 264 265 return (BUS_PROBE_DEFAULT); 266 } 267 268 static int 269 vtscsi_attach(device_t dev) 270 { 271 struct vtscsi_softc *sc; 272 struct virtio_scsi_config scsicfg; 273 int error; 274 275 sc = device_get_softc(dev); 276 sc->vtscsi_dev = dev; 277 278 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); 279 TAILQ_INIT(&sc->vtscsi_req_free); 280 281 vtscsi_get_tunables(sc); 282 vtscsi_add_sysctl(sc); 283 284 virtio_set_feature_desc(dev, vtscsi_feature_desc); 285 vtscsi_negotiate_features(sc); 286 287 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 288 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; 289 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) 290 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; 291 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) 292 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; 293 294 virtio_read_device_config(dev, 0, &scsicfg, 295 sizeof(struct virtio_scsi_config)); 296 297 sc->vtscsi_max_channel = scsicfg.max_channel; 298 sc->vtscsi_max_target = scsicfg.max_target; 299 sc->vtscsi_max_lun = scsicfg.max_lun; 300 sc->vtscsi_event_buf_size = scsicfg.event_info_size; 301 302 vtscsi_write_device_config(sc); 303 304 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); 305 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); 306 if (sc->vtscsi_sglist == NULL) { 307 error = ENOMEM; 308 device_printf(dev, "cannot allocate sglist\n"); 309 goto fail; 310 } 311 312 error = vtscsi_alloc_virtqueues(sc); 313 if (error) { 314 device_printf(dev, "cannot allocate virtqueues\n"); 315 goto fail; 316 } 317 318 error = vtscsi_init_event_vq(sc); 319 if (error) { 320 device_printf(dev, "cannot populate the eventvq\n"); 321 goto fail; 322 } 323 324 error = vtscsi_alloc_requests(sc); 325 if (error) { 326 device_printf(dev, "cannot allocate requests\n"); 327 goto fail; 328 } 329 330 error = vtscsi_alloc_cam(sc); 331 if (error) { 332 device_printf(dev, "cannot allocate CAM structures\n"); 333 goto fail; 334 } 335 336 TASK_INIT(&sc->vtscsi_control_intr_task, 0, 337 vtscsi_control_vq_task, sc); 338 TASK_INIT(&sc->vtscsi_event_intr_task, 0, 339 vtscsi_event_vq_task, sc); 340 TASK_INIT(&sc->vtscsi_request_intr_task, 0, 341 vtscsi_request_vq_task, sc); 342 343 sc->vtscsi_tq = taskqueue_create_fast("vtscsi_taskq", M_NOWAIT, 344 taskqueue_thread_enqueue, &sc->vtscsi_tq); 345 if (sc->vtscsi_tq == NULL) { 346 error = ENOMEM; 347 device_printf(dev, "cannot allocate taskqueue\n"); 348 goto fail; 349 } 350 error = taskqueue_start_threads(&sc->vtscsi_tq, 1, PI_DISK, "%s taskq", 351 device_get_nameunit(dev)); 352 if (error) { 353 device_printf(dev, "cannot start taskqueue threads\n"); 354 goto fail; 355 } 356 357 error = virtio_setup_intr(dev, INTR_TYPE_CAM); 358 if (error) { 359 device_printf(dev, "cannot setup virtqueue interrupts\n"); 360 goto fail; 361 } 362 363 vtscsi_enable_vqs_intr(sc); 364 365 /* 366 * Register with CAM after interrupts are enabled so we will get 367 * notified of the probe responses. 368 */ 369 error = vtscsi_register_cam(sc); 370 if (error) { 371 device_printf(dev, "cannot register with CAM\n"); 372 goto fail; 373 } 374 375 fail: 376 if (error) 377 vtscsi_detach(dev); 378 379 return (error); 380 } 381 382 static int 383 vtscsi_detach(device_t dev) 384 { 385 struct vtscsi_softc *sc; 386 387 sc = device_get_softc(dev); 388 389 VTSCSI_LOCK(sc); 390 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; 391 if (device_is_attached(dev)) 392 vtscsi_stop(sc); 393 VTSCSI_UNLOCK(sc); 394 395 if (sc->vtscsi_tq != NULL) { 396 taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_control_intr_task); 397 taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_event_intr_task); 398 taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_request_intr_task); 399 taskqueue_free(sc->vtscsi_tq); 400 sc->vtscsi_tq = NULL; 401 } 402 403 vtscsi_complete_vqs(sc); 404 vtscsi_drain_vqs(sc); 405 406 vtscsi_free_cam(sc); 407 vtscsi_free_requests(sc); 408 409 if (sc->vtscsi_sglist != NULL) { 410 sglist_free(sc->vtscsi_sglist); 411 sc->vtscsi_sglist = NULL; 412 } 413 414 VTSCSI_LOCK_DESTROY(sc); 415 416 return (0); 417 } 418 419 static int 420 vtscsi_suspend(device_t dev) 421 { 422 423 return (0); 424 } 425 426 static int 427 vtscsi_resume(device_t dev) 428 { 429 430 return (0); 431 } 432 433 static void 434 vtscsi_negotiate_features(struct vtscsi_softc *sc) 435 { 436 device_t dev; 437 uint64_t features; 438 439 dev = sc->vtscsi_dev; 440 features = virtio_negotiate_features(dev, VTSCSI_FEATURES); 441 sc->vtscsi_features = features; 442 } 443 444 static int 445 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) 446 { 447 int nsegs; 448 449 nsegs = VTSCSI_MIN_SEGMENTS; 450 451 if (seg_max > 0) { 452 nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1); 453 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) 454 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); 455 } else 456 nsegs += 1; 457 458 return (nsegs); 459 } 460 461 static int 462 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) 463 { 464 device_t dev; 465 struct vq_alloc_info vq_info[3]; 466 int nvqs; 467 468 dev = sc->vtscsi_dev; 469 nvqs = 3; 470 471 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, 472 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); 473 474 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, 475 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); 476 477 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, 478 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, 479 "%s request", device_get_nameunit(dev)); 480 481 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); 482 } 483 484 static void 485 vtscsi_write_device_config(struct vtscsi_softc *sc) 486 { 487 488 virtio_write_dev_config_4(sc->vtscsi_dev, 489 offsetof(struct virtio_scsi_config, sense_size), 490 VIRTIO_SCSI_SENSE_SIZE); 491 492 /* 493 * This is the size in the virtio_scsi_cmd_req structure. Note 494 * this value (32) is larger than the maximum CAM CDB size (16). 495 */ 496 virtio_write_dev_config_4(sc->vtscsi_dev, 497 offsetof(struct virtio_scsi_config, cdb_size), 498 VIRTIO_SCSI_CDB_SIZE); 499 } 500 501 static int 502 vtscsi_reinit(struct vtscsi_softc *sc) 503 { 504 device_t dev; 505 int error; 506 507 dev = sc->vtscsi_dev; 508 509 error = virtio_reinit(dev, sc->vtscsi_features); 510 if (error == 0) { 511 vtscsi_write_device_config(sc); 512 vtscsi_reinit_event_vq(sc); 513 virtio_reinit_complete(dev); 514 515 vtscsi_enable_vqs_intr(sc); 516 } 517 518 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); 519 520 return (error); 521 } 522 523 static int 524 vtscsi_alloc_cam(struct vtscsi_softc *sc) 525 { 526 device_t dev; 527 struct cam_devq *devq; 528 int openings; 529 530 dev = sc->vtscsi_dev; 531 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; 532 533 devq = cam_simq_alloc(openings); 534 if (devq == NULL) { 535 device_printf(dev, "cannot allocate SIM queue\n"); 536 return (ENOMEM); 537 } 538 539 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, 540 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, 541 openings, devq); 542 if (sc->vtscsi_sim == NULL) { 543 cam_simq_free(devq); 544 device_printf(dev, "cannot allocate SIM\n"); 545 return (ENOMEM); 546 } 547 548 return (0); 549 } 550 551 static int 552 vtscsi_register_cam(struct vtscsi_softc *sc) 553 { 554 device_t dev; 555 int registered, error; 556 557 dev = sc->vtscsi_dev; 558 registered = 0; 559 560 VTSCSI_LOCK(sc); 561 562 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { 563 error = ENOMEM; 564 device_printf(dev, "cannot register XPT bus\n"); 565 goto fail; 566 } 567 568 registered = 1; 569 570 if (xpt_create_path(&sc->vtscsi_path, NULL, 571 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, 572 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 573 error = ENOMEM; 574 device_printf(dev, "cannot create bus path\n"); 575 goto fail; 576 } 577 578 VTSCSI_UNLOCK(sc); 579 580 /* 581 * The async register apparently needs to be done without 582 * the lock held, otherwise it can recurse on the lock. 583 */ 584 if (vtscsi_register_async(sc) != CAM_REQ_CMP) { 585 error = EIO; 586 device_printf(dev, "cannot register async callback\n"); 587 VTSCSI_LOCK(sc); 588 goto fail; 589 } 590 591 return (0); 592 593 fail: 594 if (sc->vtscsi_path != NULL) { 595 xpt_free_path(sc->vtscsi_path); 596 sc->vtscsi_path = NULL; 597 } 598 599 if (registered != 0) 600 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 601 602 VTSCSI_UNLOCK(sc); 603 604 return (error); 605 } 606 607 static void 608 vtscsi_free_cam(struct vtscsi_softc *sc) 609 { 610 611 VTSCSI_LOCK(sc); 612 613 if (sc->vtscsi_path != NULL) { 614 vtscsi_deregister_async(sc); 615 616 xpt_free_path(sc->vtscsi_path); 617 sc->vtscsi_path = NULL; 618 619 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 620 } 621 622 if (sc->vtscsi_sim != NULL) { 623 cam_sim_free(sc->vtscsi_sim, 1); 624 sc->vtscsi_sim = NULL; 625 } 626 627 VTSCSI_UNLOCK(sc); 628 } 629 630 static void 631 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) 632 { 633 struct cam_sim *sim; 634 struct vtscsi_softc *sc; 635 636 sim = cb_arg; 637 sc = cam_sim_softc(sim); 638 639 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); 640 641 /* 642 * TODO Once QEMU supports event reporting, we should 643 * (un)subscribe to events here. 644 */ 645 switch (code) { 646 case AC_FOUND_DEVICE: 647 break; 648 case AC_LOST_DEVICE: 649 break; 650 } 651 } 652 653 static int 654 vtscsi_register_async(struct vtscsi_softc *sc) 655 { 656 struct ccb_setasync csa; 657 658 VTSCSI_LOCK_NOTOWNED(sc); 659 660 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 661 csa.ccb_h.func_code = XPT_SASYNC_CB; 662 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; 663 csa.callback = vtscsi_cam_async; 664 csa.callback_arg = sc->vtscsi_sim; 665 666 xpt_action((union ccb *) &csa); 667 668 return (csa.ccb_h.status); 669 } 670 671 static void 672 vtscsi_deregister_async(struct vtscsi_softc *sc) 673 { 674 struct ccb_setasync csa; 675 676 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 677 csa.ccb_h.func_code = XPT_SASYNC_CB; 678 csa.event_enable = 0; 679 csa.callback = vtscsi_cam_async; 680 csa.callback_arg = sc->vtscsi_sim; 681 682 xpt_action((union ccb *) &csa); 683 } 684 685 static void 686 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) 687 { 688 struct vtscsi_softc *sc; 689 struct ccb_hdr *ccbh; 690 691 sc = cam_sim_softc(sim); 692 ccbh = &ccb->ccb_h; 693 694 VTSCSI_LOCK_OWNED(sc); 695 696 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { 697 /* 698 * The VTSCSI_MTX is briefly dropped between setting 699 * VTSCSI_FLAG_DETACH and deregistering with CAM, so 700 * drop any CCBs that come in during that window. 701 */ 702 ccbh->status = CAM_NO_HBA; 703 xpt_done(ccb); 704 return; 705 } 706 707 switch (ccbh->func_code) { 708 case XPT_SCSI_IO: 709 vtscsi_cam_scsi_io(sc, sim, ccb); 710 break; 711 712 case XPT_SET_TRAN_SETTINGS: 713 ccbh->status = CAM_FUNC_NOTAVAIL; 714 xpt_done(ccb); 715 break; 716 717 case XPT_GET_TRAN_SETTINGS: 718 vtscsi_cam_get_tran_settings(sc, ccb); 719 break; 720 721 case XPT_RESET_BUS: 722 vtscsi_cam_reset_bus(sc, ccb); 723 break; 724 725 case XPT_RESET_DEV: 726 vtscsi_cam_reset_dev(sc, ccb); 727 break; 728 729 case XPT_ABORT: 730 vtscsi_cam_abort(sc, ccb); 731 break; 732 733 case XPT_CALC_GEOMETRY: 734 cam_calc_geometry(&ccb->ccg, 1); 735 xpt_done(ccb); 736 break; 737 738 case XPT_PATH_INQ: 739 vtscsi_cam_path_inquiry(sc, sim, ccb); 740 break; 741 742 default: 743 vtscsi_dprintf(sc, VTSCSI_ERROR, 744 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); 745 746 ccbh->status = CAM_REQ_INVALID; 747 xpt_done(ccb); 748 break; 749 } 750 } 751 752 static void 753 vtscsi_cam_poll(struct cam_sim *sim) 754 { 755 struct vtscsi_softc *sc; 756 757 sc = cam_sim_softc(sim); 758 759 vtscsi_complete_vqs_locked(sc); 760 } 761 762 static void 763 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, 764 union ccb *ccb) 765 { 766 struct ccb_hdr *ccbh; 767 struct ccb_scsiio *csio; 768 int error; 769 770 ccbh = &ccb->ccb_h; 771 csio = &ccb->csio; 772 773 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { 774 error = EINVAL; 775 ccbh->status = CAM_REQ_INVALID; 776 goto done; 777 } 778 779 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && 780 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { 781 error = EINVAL; 782 ccbh->status = CAM_REQ_INVALID; 783 goto done; 784 } 785 786 error = vtscsi_start_scsi_cmd(sc, ccb); 787 788 done: 789 if (error) { 790 vtscsi_dprintf(sc, VTSCSI_ERROR, 791 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); 792 xpt_done(ccb); 793 } 794 } 795 796 static void 797 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) 798 { 799 struct ccb_trans_settings *cts; 800 struct ccb_trans_settings_scsi *scsi; 801 802 cts = &ccb->cts; 803 scsi = &cts->proto_specific.scsi; 804 805 cts->protocol = PROTO_SCSI; 806 cts->protocol_version = SCSI_REV_SPC3; 807 cts->transport = XPORT_SAS; 808 cts->transport_version = 0; 809 810 scsi->valid = CTS_SCSI_VALID_TQ; 811 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 812 813 ccb->ccb_h.status = CAM_REQ_CMP; 814 xpt_done(ccb); 815 } 816 817 static void 818 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) 819 { 820 int error; 821 822 error = vtscsi_reset_bus(sc); 823 if (error == 0) 824 ccb->ccb_h.status = CAM_REQ_CMP; 825 else 826 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 827 828 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", 829 error, ccb, ccb->ccb_h.status); 830 831 xpt_done(ccb); 832 } 833 834 static void 835 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) 836 { 837 struct ccb_hdr *ccbh; 838 struct vtscsi_request *req; 839 int error; 840 841 ccbh = &ccb->ccb_h; 842 843 req = vtscsi_dequeue_request(sc); 844 if (req == NULL) { 845 error = EAGAIN; 846 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 847 goto fail; 848 } 849 850 req->vsr_ccb = ccb; 851 852 error = vtscsi_execute_reset_dev_cmd(sc, req); 853 if (error == 0) 854 return; 855 856 vtscsi_enqueue_request(sc, req); 857 858 fail: 859 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 860 error, req, ccb); 861 862 if (error == EAGAIN) 863 ccbh->status = CAM_RESRC_UNAVAIL; 864 else 865 ccbh->status = CAM_REQ_CMP_ERR; 866 867 xpt_done(ccb); 868 } 869 870 static void 871 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) 872 { 873 struct vtscsi_request *req; 874 struct ccb_hdr *ccbh; 875 int error; 876 877 ccbh = &ccb->ccb_h; 878 879 req = vtscsi_dequeue_request(sc); 880 if (req == NULL) { 881 error = EAGAIN; 882 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 883 goto fail; 884 } 885 886 req->vsr_ccb = ccb; 887 888 error = vtscsi_execute_abort_task_cmd(sc, req); 889 if (error == 0) 890 return; 891 892 vtscsi_enqueue_request(sc, req); 893 894 fail: 895 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 896 error, req, ccb); 897 898 if (error == EAGAIN) 899 ccbh->status = CAM_RESRC_UNAVAIL; 900 else 901 ccbh->status = CAM_REQ_CMP_ERR; 902 903 xpt_done(ccb); 904 } 905 906 static void 907 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, 908 union ccb *ccb) 909 { 910 device_t dev; 911 struct ccb_pathinq *cpi; 912 913 dev = sc->vtscsi_dev; 914 cpi = &ccb->cpi; 915 916 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); 917 918 cpi->version_num = 1; 919 cpi->hba_inquiry = PI_TAG_ABLE; 920 cpi->target_sprt = 0; 921 cpi->hba_misc = PIM_SEQSCAN; 922 if (vtscsi_bus_reset_disable != 0) 923 cpi->hba_misc |= PIM_NOBUSRESET; 924 cpi->hba_eng_cnt = 0; 925 926 cpi->max_target = sc->vtscsi_max_target; 927 cpi->max_lun = sc->vtscsi_max_lun; 928 cpi->initiator_id = VTSCSI_INITIATOR_ID; 929 930 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 931 strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); 932 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 933 934 cpi->unit_number = cam_sim_unit(sim); 935 cpi->bus_id = cam_sim_bus(sim); 936 937 cpi->base_transfer_speed = 300000; 938 939 cpi->protocol = PROTO_SCSI; 940 cpi->protocol_version = SCSI_REV_SPC3; 941 cpi->transport = XPORT_SAS; 942 cpi->transport_version = 0; 943 944 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * 945 PAGE_SIZE; 946 947 cpi->hba_vendor = virtio_get_vendor(dev); 948 cpi->hba_device = virtio_get_device(dev); 949 cpi->hba_subvendor = virtio_get_subvendor(dev); 950 cpi->hba_subdevice = virtio_get_subdevice(dev); 951 952 ccb->ccb_h.status = CAM_REQ_CMP; 953 xpt_done(ccb); 954 } 955 956 static int 957 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, 958 struct ccb_scsiio *csio) 959 { 960 struct ccb_hdr *ccbh; 961 struct bus_dma_segment *dseg; 962 int i, error; 963 964 ccbh = &csio->ccb_h; 965 error = 0; 966 967 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 968 969 if ((ccbh->flags & CAM_DATA_PHYS) == 0) 970 error = sglist_append(sg, 971 csio->data_ptr, csio->dxfer_len); 972 else 973 error = sglist_append_phys(sg, 974 (vm_paddr_t) csio->data_ptr, csio->dxfer_len); 975 } else { 976 977 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 978 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 979 980 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) 981 error = sglist_append(sg, 982 (void *) dseg->ds_addr, dseg->ds_len); 983 else 984 error = sglist_append_phys(sg, 985 (vm_paddr_t) dseg->ds_addr, dseg->ds_len); 986 } 987 } 988 989 return (error); 990 } 991 992 static int 993 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, 994 int *readable, int *writable) 995 { 996 struct sglist *sg; 997 struct ccb_hdr *ccbh; 998 struct ccb_scsiio *csio; 999 struct virtio_scsi_cmd_req *cmd_req; 1000 struct virtio_scsi_cmd_resp *cmd_resp; 1001 int error; 1002 1003 sg = sc->vtscsi_sglist; 1004 csio = &req->vsr_ccb->csio; 1005 ccbh = &csio->ccb_h; 1006 cmd_req = &req->vsr_cmd_req; 1007 cmd_resp = &req->vsr_cmd_resp; 1008 1009 sglist_reset(sg); 1010 1011 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); 1012 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1013 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1014 /* At least one segment must be left for the response. */ 1015 if (error || sg->sg_nseg == sg->sg_maxseg) 1016 goto fail; 1017 } 1018 1019 *readable = sg->sg_nseg; 1020 1021 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); 1022 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1023 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1024 if (error) 1025 goto fail; 1026 } 1027 1028 *writable = sg->sg_nseg - *readable; 1029 1030 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " 1031 "writable=%d\n", req, ccbh, *readable, *writable); 1032 1033 return (0); 1034 1035 fail: 1036 /* 1037 * This should never happen unless maxio was incorrectly set. 1038 */ 1039 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); 1040 1041 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " 1042 "nseg=%d maxseg=%d\n", 1043 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); 1044 1045 return (EFBIG); 1046 } 1047 1048 static int 1049 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1050 { 1051 struct sglist *sg; 1052 struct virtqueue *vq; 1053 struct ccb_scsiio *csio; 1054 struct ccb_hdr *ccbh; 1055 struct virtio_scsi_cmd_req *cmd_req; 1056 struct virtio_scsi_cmd_resp *cmd_resp; 1057 int readable, writable, error; 1058 1059 sg = sc->vtscsi_sglist; 1060 vq = sc->vtscsi_request_vq; 1061 csio = &req->vsr_ccb->csio; 1062 ccbh = &csio->ccb_h; 1063 cmd_req = &req->vsr_cmd_req; 1064 cmd_resp = &req->vsr_cmd_resp; 1065 1066 vtscsi_init_scsi_cmd_req(csio, cmd_req); 1067 1068 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); 1069 if (error) 1070 return (error); 1071 1072 req->vsr_complete = vtscsi_complete_scsi_cmd; 1073 cmd_resp->response = -1; 1074 1075 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1076 if (error) { 1077 vtscsi_dprintf(sc, VTSCSI_ERROR, 1078 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); 1079 1080 ccbh->status = CAM_REQUEUE_REQ; 1081 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); 1082 return (error); 1083 } 1084 1085 ccbh->status |= CAM_SIM_QUEUED; 1086 ccbh->ccbh_vtscsi_req = req; 1087 1088 virtqueue_notify(vq); 1089 1090 if (ccbh->timeout != CAM_TIME_INFINITY) { 1091 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; 1092 callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000, 1093 vtscsi_timedout_scsi_cmd, req); 1094 } 1095 1096 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", 1097 req, ccbh); 1098 1099 return (0); 1100 } 1101 1102 static int 1103 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) 1104 { 1105 struct vtscsi_request *req; 1106 int error; 1107 1108 req = vtscsi_dequeue_request(sc); 1109 if (req == NULL) { 1110 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1111 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 1112 return (ENOBUFS); 1113 } 1114 1115 req->vsr_ccb = ccb; 1116 1117 error = vtscsi_execute_scsi_cmd(sc, req); 1118 if (error) 1119 vtscsi_enqueue_request(sc, req); 1120 1121 return (error); 1122 } 1123 1124 static void 1125 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1126 struct vtscsi_request *req) 1127 { 1128 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1129 struct vtscsi_request *to_req; 1130 uint8_t response; 1131 1132 tmf_resp = &req->vsr_tmf_resp; 1133 response = tmf_resp->response; 1134 to_req = req->vsr_timedout_req; 1135 1136 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", 1137 req, to_req, response); 1138 1139 vtscsi_enqueue_request(sc, req); 1140 1141 /* 1142 * The timedout request could have completed between when the 1143 * abort task was sent and when the host processed it. 1144 */ 1145 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) 1146 return; 1147 1148 /* The timedout request was successfully aborted. */ 1149 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) 1150 return; 1151 1152 /* Don't bother if the device is going away. */ 1153 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1154 return; 1155 1156 /* The timedout request will be aborted by the reset. */ 1157 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) 1158 return; 1159 1160 vtscsi_reset_bus(sc); 1161 } 1162 1163 static int 1164 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1165 struct vtscsi_request *to_req) 1166 { 1167 struct sglist *sg; 1168 struct ccb_hdr *to_ccbh; 1169 struct vtscsi_request *req; 1170 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1171 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1172 int error; 1173 1174 sg = sc->vtscsi_sglist; 1175 to_ccbh = &to_req->vsr_ccb->ccb_h; 1176 1177 req = vtscsi_dequeue_request(sc); 1178 if (req == NULL) { 1179 error = ENOBUFS; 1180 goto fail; 1181 } 1182 1183 tmf_req = &req->vsr_tmf_req; 1184 tmf_resp = &req->vsr_tmf_resp; 1185 1186 vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1187 (uintptr_t) to_ccbh, tmf_req); 1188 1189 sglist_reset(sg); 1190 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1191 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1192 1193 req->vsr_timedout_req = to_req; 1194 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; 1195 tmf_resp->response = -1; 1196 1197 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1198 VTSCSI_EXECUTE_ASYNC); 1199 if (error == 0) 1200 return (0); 1201 1202 vtscsi_enqueue_request(sc, req); 1203 1204 fail: 1205 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " 1206 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); 1207 1208 return (error); 1209 } 1210 1211 static void 1212 vtscsi_timedout_scsi_cmd(void *xreq) 1213 { 1214 struct vtscsi_softc *sc; 1215 struct vtscsi_request *to_req; 1216 1217 to_req = xreq; 1218 sc = to_req->vsr_softc; 1219 1220 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", 1221 to_req, to_req->vsr_ccb, to_req->vsr_state); 1222 1223 /* Don't bother if the device is going away. */ 1224 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1225 return; 1226 1227 /* 1228 * Bail if the request is not in use. We likely raced when 1229 * stopping the callout handler or it has already been aborted. 1230 */ 1231 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || 1232 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) 1233 return; 1234 1235 /* 1236 * Complete the request queue in case the timedout request is 1237 * actually just pending. 1238 */ 1239 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1240 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) 1241 return; 1242 1243 sc->vtscsi_stats.scsi_cmd_timeouts++; 1244 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; 1245 1246 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) 1247 return; 1248 1249 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); 1250 vtscsi_reset_bus(sc); 1251 } 1252 1253 static cam_status 1254 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) 1255 { 1256 cam_status status; 1257 1258 switch (cmd_resp->response) { 1259 case VIRTIO_SCSI_S_OK: 1260 status = CAM_REQ_CMP; 1261 break; 1262 case VIRTIO_SCSI_S_OVERRUN: 1263 status = CAM_DATA_RUN_ERR; 1264 break; 1265 case VIRTIO_SCSI_S_ABORTED: 1266 status = CAM_REQ_ABORTED; 1267 break; 1268 case VIRTIO_SCSI_S_BAD_TARGET: 1269 status = CAM_TID_INVALID; 1270 break; 1271 case VIRTIO_SCSI_S_RESET: 1272 status = CAM_SCSI_BUS_RESET; 1273 break; 1274 case VIRTIO_SCSI_S_BUSY: 1275 status = CAM_SCSI_BUSY; 1276 break; 1277 case VIRTIO_SCSI_S_TRANSPORT_FAILURE: 1278 case VIRTIO_SCSI_S_TARGET_FAILURE: 1279 case VIRTIO_SCSI_S_NEXUS_FAILURE: 1280 status = CAM_SCSI_IT_NEXUS_LOST; 1281 break; 1282 default: /* VIRTIO_SCSI_S_FAILURE */ 1283 status = CAM_REQ_CMP_ERR; 1284 break; 1285 } 1286 1287 return (status); 1288 } 1289 1290 static cam_status 1291 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, 1292 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) 1293 { 1294 cam_status status; 1295 1296 csio->scsi_status = cmd_resp->status; 1297 csio->resid = cmd_resp->resid; 1298 1299 if (csio->scsi_status == SCSI_STATUS_OK) 1300 status = CAM_REQ_CMP; 1301 else 1302 status = CAM_SCSI_STATUS_ERROR; 1303 1304 if (cmd_resp->sense_len > 0) { 1305 status |= CAM_AUTOSNS_VALID; 1306 1307 if (cmd_resp->sense_len < csio->sense_len) 1308 csio->sense_resid = csio->sense_len - 1309 cmd_resp->sense_len; 1310 else 1311 csio->sense_resid = 0; 1312 1313 bzero(&csio->sense_data, sizeof(csio->sense_data)); 1314 memcpy(cmd_resp->sense, &csio->sense_data, 1315 csio->sense_len - csio->sense_resid); 1316 } 1317 1318 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, 1319 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", 1320 csio, csio->scsi_status, csio->resid, csio->sense_resid); 1321 1322 return (status); 1323 } 1324 1325 static void 1326 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1327 { 1328 struct ccb_hdr *ccbh; 1329 struct ccb_scsiio *csio; 1330 struct virtio_scsi_cmd_resp *cmd_resp; 1331 cam_status status; 1332 1333 csio = &req->vsr_ccb->csio; 1334 ccbh = &csio->ccb_h; 1335 cmd_resp = &req->vsr_cmd_resp; 1336 1337 KASSERT(ccbh->ccbh_vtscsi_req == req, 1338 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); 1339 1340 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1341 callout_stop(&req->vsr_callout); 1342 1343 status = vtscsi_scsi_cmd_cam_status(cmd_resp); 1344 if (status == CAM_REQ_ABORTED) { 1345 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) 1346 status = CAM_CMD_TIMEOUT; 1347 } else if (status == CAM_REQ_CMP) 1348 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); 1349 1350 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1351 status |= CAM_DEV_QFRZN; 1352 xpt_freeze_devq(ccbh->path, 1); 1353 } 1354 1355 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 1356 status |= CAM_RELEASE_SIMQ; 1357 1358 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", 1359 req, ccbh, status); 1360 1361 ccbh->status = status; 1362 xpt_done(req->vsr_ccb); 1363 vtscsi_enqueue_request(sc, req); 1364 } 1365 1366 static void 1367 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) 1368 { 1369 1370 /* XXX We probably shouldn't poll forever. */ 1371 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; 1372 do 1373 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1374 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); 1375 1376 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; 1377 } 1378 1379 static int 1380 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, 1381 struct sglist *sg, int readable, int writable, int flag) 1382 { 1383 struct virtqueue *vq; 1384 int error; 1385 1386 vq = sc->vtscsi_control_vq; 1387 1388 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); 1389 1390 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1391 if (error) { 1392 /* 1393 * Return EAGAIN when the virtqueue does not have enough 1394 * descriptors available. 1395 */ 1396 if (error == ENOSPC || error == EMSGSIZE) 1397 error = EAGAIN; 1398 1399 return (error); 1400 } 1401 1402 virtqueue_notify(vq); 1403 if (flag == VTSCSI_EXECUTE_POLL) 1404 vtscsi_poll_ctrl_req(sc, req); 1405 1406 return (0); 1407 } 1408 1409 static void 1410 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, 1411 struct vtscsi_request *req) 1412 { 1413 union ccb *ccb; 1414 struct ccb_hdr *ccbh; 1415 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1416 1417 ccb = req->vsr_ccb; 1418 ccbh = &ccb->ccb_h; 1419 tmf_resp = &req->vsr_tmf_resp; 1420 1421 switch (tmf_resp->response) { 1422 case VIRTIO_SCSI_S_FUNCTION_COMPLETE: 1423 ccbh->status = CAM_REQ_CMP; 1424 break; 1425 case VIRTIO_SCSI_S_FUNCTION_REJECTED: 1426 ccbh->status = CAM_UA_ABORT; 1427 break; 1428 default: 1429 ccbh->status = CAM_REQ_CMP_ERR; 1430 break; 1431 } 1432 1433 xpt_done(ccb); 1434 vtscsi_enqueue_request(sc, req); 1435 } 1436 1437 static int 1438 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, 1439 struct vtscsi_request *req) 1440 { 1441 struct sglist *sg; 1442 struct ccb_abort *cab; 1443 struct ccb_hdr *ccbh; 1444 struct ccb_hdr *abort_ccbh; 1445 struct vtscsi_request *abort_req; 1446 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1447 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1448 int error; 1449 1450 sg = sc->vtscsi_sglist; 1451 cab = &req->vsr_ccb->cab; 1452 ccbh = &cab->ccb_h; 1453 tmf_req = &req->vsr_tmf_req; 1454 tmf_resp = &req->vsr_tmf_resp; 1455 1456 /* CCB header and request that's to be aborted. */ 1457 abort_ccbh = &cab->abort_ccb->ccb_h; 1458 abort_req = abort_ccbh->ccbh_vtscsi_req; 1459 1460 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { 1461 error = EINVAL; 1462 goto fail; 1463 } 1464 1465 /* Only attempt to abort requests that could be in-flight. */ 1466 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { 1467 error = EALREADY; 1468 goto fail; 1469 } 1470 1471 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; 1472 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1473 callout_stop(&abort_req->vsr_callout); 1474 1475 vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1476 (uintptr_t) abort_ccbh, tmf_req); 1477 1478 sglist_reset(sg); 1479 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1480 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1481 1482 req->vsr_complete = vtscsi_complete_abort_task_cmd; 1483 tmf_resp->response = -1; 1484 1485 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1486 VTSCSI_EXECUTE_ASYNC); 1487 1488 fail: 1489 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " 1490 "abort_req=%p\n", error, req, abort_ccbh, abort_req); 1491 1492 return (error); 1493 } 1494 1495 static void 1496 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, 1497 struct vtscsi_request *req) 1498 { 1499 union ccb *ccb; 1500 struct ccb_hdr *ccbh; 1501 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1502 1503 ccb = req->vsr_ccb; 1504 ccbh = &ccb->ccb_h; 1505 tmf_resp = &req->vsr_tmf_resp; 1506 1507 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", 1508 req, ccb, tmf_resp->response); 1509 1510 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { 1511 ccbh->status = CAM_REQ_CMP; 1512 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, 1513 ccbh->target_lun); 1514 } else 1515 ccbh->status = CAM_REQ_CMP_ERR; 1516 1517 xpt_done(ccb); 1518 vtscsi_enqueue_request(sc, req); 1519 } 1520 1521 static int 1522 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, 1523 struct vtscsi_request *req) 1524 { 1525 struct sglist *sg; 1526 struct ccb_resetdev *crd; 1527 struct ccb_hdr *ccbh; 1528 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1529 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1530 uint32_t subtype; 1531 int error; 1532 1533 sg = sc->vtscsi_sglist; 1534 crd = &req->vsr_ccb->crd; 1535 ccbh = &crd->ccb_h; 1536 tmf_req = &req->vsr_tmf_req; 1537 tmf_resp = &req->vsr_tmf_resp; 1538 1539 if (ccbh->target_lun == CAM_LUN_WILDCARD) 1540 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; 1541 else 1542 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 1543 1544 vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req); 1545 1546 sglist_reset(sg); 1547 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1548 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1549 1550 req->vsr_complete = vtscsi_complete_reset_dev_cmd; 1551 tmf_resp->response = -1; 1552 1553 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1554 VTSCSI_EXECUTE_ASYNC); 1555 1556 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", 1557 error, req, ccbh); 1558 1559 return (error); 1560 } 1561 1562 static void 1563 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) 1564 { 1565 1566 *target_id = lun[1]; 1567 *lun_id = (lun[2] << 8) | lun[3]; 1568 } 1569 1570 static void 1571 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) 1572 { 1573 1574 lun[0] = 1; 1575 lun[1] = ccbh->target_id; 1576 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); 1577 lun[3] = (ccbh->target_lun >> 8) & 0xFF; 1578 } 1579 1580 static void 1581 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio, 1582 struct virtio_scsi_cmd_req *cmd_req) 1583 { 1584 uint8_t attr; 1585 1586 switch (csio->tag_action) { 1587 case MSG_HEAD_OF_Q_TAG: 1588 attr = VIRTIO_SCSI_S_HEAD; 1589 break; 1590 case MSG_ORDERED_Q_TAG: 1591 attr = VIRTIO_SCSI_S_ORDERED; 1592 break; 1593 case MSG_ACA_TASK: 1594 attr = VIRTIO_SCSI_S_ACA; 1595 break; 1596 default: /* MSG_SIMPLE_Q_TAG */ 1597 attr = VIRTIO_SCSI_S_SIMPLE; 1598 break; 1599 } 1600 1601 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); 1602 cmd_req->tag = (uintptr_t) csio; 1603 cmd_req->task_attr = attr; 1604 1605 memcpy(cmd_req->cdb, 1606 csio->ccb_h.flags & CAM_CDB_POINTER ? 1607 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, 1608 csio->cdb_len); 1609 } 1610 1611 static void 1612 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype, 1613 uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) 1614 { 1615 1616 vtscsi_set_request_lun(ccbh, tmf_req->lun); 1617 1618 tmf_req->type = VIRTIO_SCSI_T_TMF; 1619 tmf_req->subtype = subtype; 1620 tmf_req->tag = tag; 1621 } 1622 1623 static void 1624 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) 1625 { 1626 int frozen; 1627 1628 frozen = sc->vtscsi_frozen; 1629 1630 if (reason & VTSCSI_REQUEST && 1631 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) 1632 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; 1633 1634 if (reason & VTSCSI_REQUEST_VQ && 1635 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) 1636 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; 1637 1638 /* Freeze the SIMQ if transitioned to frozen. */ 1639 if (frozen == 0 && sc->vtscsi_frozen != 0) { 1640 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); 1641 xpt_freeze_simq(sc->vtscsi_sim, 1); 1642 } 1643 } 1644 1645 static int 1646 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) 1647 { 1648 int thawed; 1649 1650 if (sc->vtscsi_frozen == 0 || reason == 0) 1651 return (0); 1652 1653 if (reason & VTSCSI_REQUEST && 1654 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) 1655 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; 1656 1657 if (reason & VTSCSI_REQUEST_VQ && 1658 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) 1659 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; 1660 1661 thawed = sc->vtscsi_frozen == 0; 1662 if (thawed != 0) 1663 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); 1664 1665 return (thawed); 1666 } 1667 1668 static void 1669 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, 1670 target_id_t target_id, lun_id_t lun_id) 1671 { 1672 struct cam_path *path; 1673 1674 xpt_async(ac_code, sc->vtscsi_path, NULL); 1675 return; 1676 1677 /* Use the wildcard path from our softc for bus announcements. */ 1678 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { 1679 xpt_async(ac_code, sc->vtscsi_path, NULL); 1680 return; 1681 } 1682 1683 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), 1684 target_id, lun_id) != CAM_REQ_CMP) { 1685 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); 1686 return; 1687 } 1688 1689 xpt_async(ac_code, path, NULL); 1690 xpt_free_path(path); 1691 } 1692 1693 static void 1694 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, 1695 lun_id_t lun_id) 1696 { 1697 union ccb *ccb; 1698 cam_status status; 1699 1700 ccb = xpt_alloc_ccb_nowait(); 1701 if (ccb == NULL) { 1702 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); 1703 return; 1704 } 1705 1706 status = xpt_create_path(&ccb->ccb_h.path, xpt_periph, 1707 cam_sim_path(sc->vtscsi_sim), target_id, lun_id); 1708 if (status != CAM_REQ_CMP) { 1709 xpt_free_ccb(ccb); 1710 return; 1711 } 1712 1713 xpt_rescan(ccb); 1714 } 1715 1716 static void 1717 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) 1718 { 1719 1720 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1721 } 1722 1723 static void 1724 vtscsi_transport_reset_event(struct vtscsi_softc *sc, 1725 struct virtio_scsi_event *event) 1726 { 1727 target_id_t target_id; 1728 lun_id_t lun_id; 1729 1730 vtscsi_get_request_lun(event->lun, &target_id, &lun_id); 1731 1732 switch (event->reason) { 1733 case VIRTIO_SCSI_EVT_RESET_RESCAN: 1734 case VIRTIO_SCSI_EVT_RESET_REMOVED: 1735 vtscsi_execute_rescan(sc, target_id, lun_id); 1736 break; 1737 default: 1738 device_printf(sc->vtscsi_dev, 1739 "unhandled transport event reason: %d\n", event->reason); 1740 break; 1741 } 1742 } 1743 1744 static void 1745 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) 1746 { 1747 int error; 1748 1749 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { 1750 switch (event->event) { 1751 case VIRTIO_SCSI_T_TRANSPORT_RESET: 1752 vtscsi_transport_reset_event(sc, event); 1753 break; 1754 default: 1755 device_printf(sc->vtscsi_dev, 1756 "unhandled event: %d\n", event->event); 1757 break; 1758 } 1759 } else 1760 vtscsi_execute_rescan_bus(sc); 1761 1762 /* 1763 * This should always be successful since the buffer 1764 * was just dequeued. 1765 */ 1766 error = vtscsi_enqueue_event_buf(sc, event); 1767 KASSERT(error == 0, 1768 ("cannot requeue event buffer: %d", error)); 1769 } 1770 1771 static int 1772 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, 1773 struct virtio_scsi_event *event) 1774 { 1775 struct sglist *sg; 1776 struct virtqueue *vq; 1777 int size, error; 1778 1779 sg = sc->vtscsi_sglist; 1780 vq = sc->vtscsi_event_vq; 1781 size = sc->vtscsi_event_buf_size; 1782 1783 bzero(event, size); 1784 1785 sglist_reset(sg); 1786 error = sglist_append(sg, event, size); 1787 if (error) 1788 return (error); 1789 1790 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); 1791 if (error) 1792 return (error); 1793 1794 virtqueue_notify(vq); 1795 1796 return (0); 1797 } 1798 1799 static int 1800 vtscsi_init_event_vq(struct vtscsi_softc *sc) 1801 { 1802 struct virtio_scsi_event *event; 1803 int i, size, error; 1804 1805 /* 1806 * The first release of QEMU with VirtIO SCSI support would crash 1807 * when attempting to notify the event virtqueue. This was fixed 1808 * when hotplug support was added. 1809 */ 1810 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) 1811 size = sc->vtscsi_event_buf_size; 1812 else 1813 size = 0; 1814 1815 if (size < sizeof(struct virtio_scsi_event)) 1816 return (0); 1817 1818 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1819 event = &sc->vtscsi_event_bufs[i]; 1820 1821 error = vtscsi_enqueue_event_buf(sc, event); 1822 if (error) 1823 break; 1824 } 1825 1826 /* 1827 * Even just one buffer is enough. Missed events are 1828 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. 1829 */ 1830 if (i > 0) 1831 error = 0; 1832 1833 return (error); 1834 } 1835 1836 static void 1837 vtscsi_reinit_event_vq(struct vtscsi_softc *sc) 1838 { 1839 struct virtio_scsi_event *event; 1840 int i, error; 1841 1842 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || 1843 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) 1844 return; 1845 1846 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1847 event = &sc->vtscsi_event_bufs[i]; 1848 1849 error = vtscsi_enqueue_event_buf(sc, event); 1850 if (error) 1851 break; 1852 } 1853 1854 KASSERT(i > 0, ("cannot reinit event vq: %d", error)); 1855 } 1856 1857 static void 1858 vtscsi_drain_event_vq(struct vtscsi_softc *sc) 1859 { 1860 struct virtqueue *vq; 1861 int last; 1862 1863 vq = sc->vtscsi_event_vq; 1864 last = 0; 1865 1866 while (virtqueue_drain(vq, &last) != NULL) 1867 ; 1868 1869 KASSERT(virtqueue_empty(vq), ("eventvq not empty")); 1870 } 1871 1872 static void 1873 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) 1874 { 1875 1876 VTSCSI_LOCK_OWNED(sc); 1877 1878 if (sc->vtscsi_request_vq != NULL) 1879 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1880 if (sc->vtscsi_control_vq != NULL) 1881 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1882 } 1883 1884 static void 1885 vtscsi_complete_vqs(struct vtscsi_softc *sc) 1886 { 1887 1888 VTSCSI_LOCK(sc); 1889 vtscsi_complete_vqs_locked(sc); 1890 VTSCSI_UNLOCK(sc); 1891 } 1892 1893 static void 1894 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 1895 { 1896 union ccb *ccb; 1897 int detach; 1898 1899 ccb = req->vsr_ccb; 1900 1901 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); 1902 1903 /* 1904 * The callout must be drained when detaching since the request is 1905 * about to be freed. The VTSCSI_MTX must not be held for this in 1906 * case the callout is pending because there is a deadlock potential. 1907 * Otherwise, the virtqueue is being drained because of a bus reset 1908 * so we only need to attempt to stop the callouts. 1909 */ 1910 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; 1911 if (detach != 0) 1912 VTSCSI_LOCK_NOTOWNED(sc); 1913 else 1914 VTSCSI_LOCK_OWNED(sc); 1915 1916 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { 1917 if (detach != 0) 1918 callout_drain(&req->vsr_callout); 1919 else 1920 callout_stop(&req->vsr_callout); 1921 } 1922 1923 if (ccb != NULL) { 1924 if (detach != 0) { 1925 VTSCSI_LOCK(sc); 1926 ccb->ccb_h.status = CAM_NO_HBA; 1927 } else 1928 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1929 xpt_done(ccb); 1930 if (detach != 0) 1931 VTSCSI_UNLOCK(sc); 1932 } 1933 1934 vtscsi_enqueue_request(sc, req); 1935 } 1936 1937 static void 1938 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 1939 { 1940 struct vtscsi_request *req; 1941 int last; 1942 1943 last = 0; 1944 1945 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); 1946 1947 while ((req = virtqueue_drain(vq, &last)) != NULL) 1948 vtscsi_cancel_request(sc, req); 1949 1950 KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); 1951 } 1952 1953 static void 1954 vtscsi_drain_vqs(struct vtscsi_softc *sc) 1955 { 1956 1957 if (sc->vtscsi_control_vq != NULL) 1958 vtscsi_drain_vq(sc, sc->vtscsi_control_vq); 1959 if (sc->vtscsi_request_vq != NULL) 1960 vtscsi_drain_vq(sc, sc->vtscsi_request_vq); 1961 if (sc->vtscsi_event_vq != NULL) 1962 vtscsi_drain_event_vq(sc); 1963 } 1964 1965 static void 1966 vtscsi_stop(struct vtscsi_softc *sc) 1967 { 1968 1969 vtscsi_disable_vqs_intr(sc); 1970 virtio_stop(sc->vtscsi_dev); 1971 } 1972 1973 static int 1974 vtscsi_reset_bus(struct vtscsi_softc *sc) 1975 { 1976 int error; 1977 1978 VTSCSI_LOCK_OWNED(sc); 1979 1980 if (vtscsi_bus_reset_disable != 0) { 1981 device_printf(sc->vtscsi_dev, "bus reset disabled\n"); 1982 return (0); 1983 } 1984 1985 sc->vtscsi_flags |= VTSCSI_FLAG_RESET; 1986 1987 /* 1988 * vtscsi_stop() will cause the in-flight requests to be canceled. 1989 * Those requests are then completed here so CAM will retry them 1990 * after the reset is complete. 1991 */ 1992 vtscsi_stop(sc); 1993 vtscsi_complete_vqs_locked(sc); 1994 1995 /* Rid the virtqueues of any remaining requests. */ 1996 vtscsi_drain_vqs(sc); 1997 1998 /* 1999 * Any resource shortage that froze the SIMQ cannot persist across 2000 * a bus reset so ensure it gets thawed here. 2001 */ 2002 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 2003 xpt_release_simq(sc->vtscsi_sim, 0); 2004 2005 error = vtscsi_reinit(sc); 2006 if (error) { 2007 device_printf(sc->vtscsi_dev, 2008 "reinitialization failed, stopping device...\n"); 2009 vtscsi_stop(sc); 2010 } else 2011 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 2012 CAM_LUN_WILDCARD); 2013 2014 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; 2015 2016 return (error); 2017 } 2018 2019 static void 2020 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2021 { 2022 2023 #ifdef INVARIANTS 2024 int req_nsegs, resp_nsegs; 2025 2026 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2027 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2028 2029 KASSERT(req_nsegs == 1, ("request crossed page boundary")); 2030 KASSERT(resp_nsegs == 1, ("response crossed page boundary")); 2031 #endif 2032 2033 req->vsr_softc = sc; 2034 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); 2035 } 2036 2037 static int 2038 vtscsi_alloc_requests(struct vtscsi_softc *sc) 2039 { 2040 struct vtscsi_request *req; 2041 int i, nreqs; 2042 2043 /* 2044 * Commands destined for either the request or control queues come 2045 * from the same SIM queue. Use the size of the request virtqueue 2046 * as it (should) be much more frequently used. Some additional 2047 * requests are allocated for internal (TMF) use. 2048 */ 2049 nreqs = virtqueue_size(sc->vtscsi_request_vq); 2050 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) 2051 nreqs /= VTSCSI_MIN_SEGMENTS; 2052 nreqs += VTSCSI_RESERVED_REQUESTS; 2053 2054 for (i = 0; i < nreqs; i++) { 2055 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, 2056 M_NOWAIT); 2057 if (req == NULL) 2058 return (ENOMEM); 2059 2060 vtscsi_init_request(sc, req); 2061 2062 sc->vtscsi_nrequests++; 2063 vtscsi_enqueue_request(sc, req); 2064 } 2065 2066 return (0); 2067 } 2068 2069 static void 2070 vtscsi_free_requests(struct vtscsi_softc *sc) 2071 { 2072 struct vtscsi_request *req; 2073 2074 while ((req = vtscsi_dequeue_request(sc)) != NULL) { 2075 KASSERT(callout_active(&req->vsr_callout) == 0, 2076 ("request callout still active")); 2077 2078 sc->vtscsi_nrequests--; 2079 free(req, M_DEVBUF); 2080 } 2081 2082 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", 2083 sc->vtscsi_nrequests)); 2084 } 2085 2086 static void 2087 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2088 { 2089 2090 KASSERT(req->vsr_softc == sc, 2091 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); 2092 2093 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2094 2095 /* A request is available so the SIMQ could be released. */ 2096 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) 2097 xpt_release_simq(sc->vtscsi_sim, 1); 2098 2099 req->vsr_ccb = NULL; 2100 req->vsr_complete = NULL; 2101 req->vsr_ptr0 = NULL; 2102 req->vsr_state = VTSCSI_REQ_STATE_FREE; 2103 req->vsr_flags = 0; 2104 2105 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2106 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2107 2108 /* 2109 * We insert at the tail of the queue in order to make it 2110 * very unlikely a request will be reused if we race with 2111 * stopping its callout handler. 2112 */ 2113 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); 2114 } 2115 2116 static struct vtscsi_request * 2117 vtscsi_dequeue_request(struct vtscsi_softc *sc) 2118 { 2119 struct vtscsi_request *req; 2120 2121 req = TAILQ_FIRST(&sc->vtscsi_req_free); 2122 if (req != NULL) { 2123 req->vsr_state = VTSCSI_REQ_STATE_INUSE; 2124 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); 2125 } else 2126 sc->vtscsi_stats.dequeue_no_requests++; 2127 2128 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2129 2130 return (req); 2131 } 2132 2133 static void 2134 vtscsi_complete_request(struct vtscsi_request *req) 2135 { 2136 2137 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) 2138 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; 2139 2140 if (req->vsr_complete != NULL) 2141 req->vsr_complete(req->vsr_softc, req); 2142 } 2143 2144 static void 2145 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 2146 { 2147 struct vtscsi_request *req; 2148 2149 VTSCSI_LOCK_OWNED(sc); 2150 2151 while ((req = virtqueue_dequeue(vq, NULL)) != NULL) 2152 vtscsi_complete_request(req); 2153 } 2154 2155 static void 2156 vtscsi_control_vq_task(void *arg, int pending) 2157 { 2158 struct vtscsi_softc *sc; 2159 struct virtqueue *vq; 2160 2161 sc = arg; 2162 vq = sc->vtscsi_control_vq; 2163 2164 VTSCSI_LOCK(sc); 2165 2166 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 2167 2168 if (virtqueue_enable_intr(vq) != 0) { 2169 virtqueue_disable_intr(vq); 2170 VTSCSI_UNLOCK(sc); 2171 taskqueue_enqueue_fast(sc->vtscsi_tq, 2172 &sc->vtscsi_control_intr_task); 2173 return; 2174 } 2175 2176 VTSCSI_UNLOCK(sc); 2177 } 2178 2179 static void 2180 vtscsi_event_vq_task(void *arg, int pending) 2181 { 2182 struct vtscsi_softc *sc; 2183 struct virtqueue *vq; 2184 struct virtio_scsi_event *event; 2185 2186 sc = arg; 2187 vq = sc->vtscsi_event_vq; 2188 2189 VTSCSI_LOCK(sc); 2190 2191 while ((event = virtqueue_dequeue(vq, NULL)) != NULL) 2192 vtscsi_handle_event(sc, event); 2193 2194 if (virtqueue_enable_intr(vq) != 0) { 2195 virtqueue_disable_intr(vq); 2196 VTSCSI_UNLOCK(sc); 2197 taskqueue_enqueue_fast(sc->vtscsi_tq, 2198 &sc->vtscsi_control_intr_task); 2199 return; 2200 } 2201 2202 VTSCSI_UNLOCK(sc); 2203 } 2204 2205 static void 2206 vtscsi_request_vq_task(void *arg, int pending) 2207 { 2208 struct vtscsi_softc *sc; 2209 struct virtqueue *vq; 2210 2211 sc = arg; 2212 vq = sc->vtscsi_request_vq; 2213 2214 VTSCSI_LOCK(sc); 2215 2216 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 2217 2218 if (virtqueue_enable_intr(vq) != 0) { 2219 virtqueue_disable_intr(vq); 2220 VTSCSI_UNLOCK(sc); 2221 taskqueue_enqueue_fast(sc->vtscsi_tq, 2222 &sc->vtscsi_request_intr_task); 2223 return; 2224 } 2225 2226 VTSCSI_UNLOCK(sc); 2227 } 2228 2229 static int 2230 vtscsi_control_vq_intr(void *xsc) 2231 { 2232 struct vtscsi_softc *sc; 2233 2234 sc = xsc; 2235 2236 virtqueue_disable_intr(sc->vtscsi_control_vq); 2237 taskqueue_enqueue_fast(sc->vtscsi_tq, 2238 &sc->vtscsi_control_intr_task); 2239 2240 return (1); 2241 } 2242 2243 static int 2244 vtscsi_event_vq_intr(void *xsc) 2245 { 2246 struct vtscsi_softc *sc; 2247 2248 sc = xsc; 2249 2250 virtqueue_disable_intr(sc->vtscsi_event_vq); 2251 taskqueue_enqueue_fast(sc->vtscsi_tq, 2252 &sc->vtscsi_event_intr_task); 2253 2254 return (1); 2255 } 2256 2257 static int 2258 vtscsi_request_vq_intr(void *xsc) 2259 { 2260 struct vtscsi_softc *sc; 2261 2262 sc = xsc; 2263 2264 virtqueue_disable_intr(sc->vtscsi_request_vq); 2265 taskqueue_enqueue_fast(sc->vtscsi_tq, 2266 &sc->vtscsi_request_intr_task); 2267 2268 return (1); 2269 } 2270 2271 static void 2272 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) 2273 { 2274 2275 virtqueue_disable_intr(sc->vtscsi_control_vq); 2276 virtqueue_disable_intr(sc->vtscsi_event_vq); 2277 virtqueue_disable_intr(sc->vtscsi_request_vq); 2278 } 2279 2280 static void 2281 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) 2282 { 2283 2284 virtqueue_enable_intr(sc->vtscsi_control_vq); 2285 virtqueue_enable_intr(sc->vtscsi_event_vq); 2286 virtqueue_enable_intr(sc->vtscsi_request_vq); 2287 } 2288 2289 static void 2290 vtscsi_get_tunables(struct vtscsi_softc *sc) 2291 { 2292 char tmpstr[64]; 2293 2294 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); 2295 2296 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", 2297 device_get_unit(sc->vtscsi_dev)); 2298 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); 2299 } 2300 2301 static void 2302 vtscsi_add_sysctl(struct vtscsi_softc *sc) 2303 { 2304 device_t dev; 2305 struct vtscsi_statistics *stats; 2306 struct sysctl_ctx_list *ctx; 2307 struct sysctl_oid *tree; 2308 struct sysctl_oid_list *child; 2309 2310 dev = sc->vtscsi_dev; 2311 stats = &sc->vtscsi_stats; 2312 ctx = device_get_sysctl_ctx(dev); 2313 tree = device_get_sysctl_tree(dev); 2314 child = SYSCTL_CHILDREN(tree); 2315 2316 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", 2317 CTLFLAG_RW, &sc->vtscsi_debug, 0, 2318 "Debug level"); 2319 2320 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", 2321 CTLFLAG_RD, &stats->scsi_cmd_timeouts, 2322 "SCSI command timeouts"); 2323 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", 2324 CTLFLAG_RD, &stats->dequeue_no_requests, 2325 "No available requests to dequeue"); 2326 } 2327 2328 static void 2329 vtscsi_printf_req(struct vtscsi_request *req, const char *func, 2330 const char *fmt, ...) 2331 { 2332 struct vtscsi_softc *sc; 2333 union ccb *ccb; 2334 struct sbuf sb; 2335 va_list ap; 2336 char str[192]; 2337 char path_str[64]; 2338 2339 if (req == NULL) 2340 return; 2341 2342 sc = req->vsr_softc; 2343 ccb = req->vsr_ccb; 2344 2345 va_start(ap, fmt); 2346 sbuf_new(&sb, str, sizeof(str), 0); 2347 2348 if (ccb == NULL) { 2349 sbuf_printf(&sb, "(noperiph:%s%d:%u): ", 2350 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), 2351 cam_sim_bus(sc->vtscsi_sim)); 2352 } else { 2353 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); 2354 sbuf_cat(&sb, path_str); 2355 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2356 scsi_command_string(&ccb->csio, &sb); 2357 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); 2358 } 2359 } 2360 2361 sbuf_vprintf(&sb, fmt, ap); 2362 va_end(ap); 2363 2364 sbuf_finish(&sb); 2365 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, 2366 sbuf_data(&sb)); 2367 } 2368