1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "qemu/error-report.h"
4 #include "qemu/module.h"
5 #include "qemu/option.h"
6 #include "qemu/hw-version.h"
7 #include "hw/qdev-properties.h"
8 #include "hw/scsi/scsi.h"
9 #include "migration/qemu-file-types.h"
10 #include "migration/vmstate.h"
11 #include "scsi/constants.h"
12 #include "sysemu/block-backend.h"
13 #include "sysemu/blockdev.h"
14 #include "sysemu/sysemu.h"
15 #include "sysemu/runstate.h"
16 #include "trace.h"
17 #include "sysemu/dma.h"
18 #include "qemu/cutils.h"
19
20 static char *scsibus_get_dev_path(DeviceState *dev);
21 static char *scsibus_get_fw_dev_path(DeviceState *dev);
22 static void scsi_req_dequeue(SCSIRequest *req);
23 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
24 static void scsi_target_free_buf(SCSIRequest *req);
25 static void scsi_clear_reported_luns_changed(SCSIRequest *req);
26
27 static int next_scsi_bus;
28
do_scsi_device_find(SCSIBus * bus,int channel,int id,int lun,bool include_unrealized)29 static SCSIDevice *do_scsi_device_find(SCSIBus *bus,
30 int channel, int id, int lun,
31 bool include_unrealized)
32 {
33 BusChild *kid;
34 SCSIDevice *retval = NULL;
35
36 QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) {
37 DeviceState *qdev = kid->child;
38 SCSIDevice *dev = SCSI_DEVICE(qdev);
39
40 if (dev->channel == channel && dev->id == id) {
41 if (dev->lun == lun) {
42 retval = dev;
43 break;
44 }
45
46 /*
47 * If we don't find exact match (channel/bus/lun),
48 * we will return the first device which matches channel/bus
49 */
50
51 if (!retval) {
52 retval = dev;
53 }
54 }
55 }
56
57 /*
58 * This function might run on the IO thread and we might race against
59 * main thread hot-plugging the device.
60 * We assume that as soon as .realized is set to true we can let
61 * the user access the device.
62 */
63
64 if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) {
65 retval = NULL;
66 }
67
68 return retval;
69 }
70
scsi_device_find(SCSIBus * bus,int channel,int id,int lun)71 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
72 {
73 RCU_READ_LOCK_GUARD();
74 return do_scsi_device_find(bus, channel, id, lun, false);
75 }
76
scsi_device_get(SCSIBus * bus,int channel,int id,int lun)77 SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun)
78 {
79 SCSIDevice *d;
80 RCU_READ_LOCK_GUARD();
81 d = do_scsi_device_find(bus, channel, id, lun, false);
82 if (d) {
83 object_ref(d);
84 }
85 return d;
86 }
87
88 /*
89 * Invoke @fn() for each enqueued request in device @s. Must be called from the
90 * main loop thread while the guest is stopped. This is only suitable for
91 * vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
92 */
scsi_device_for_each_req_sync(SCSIDevice * s,void (* fn)(SCSIRequest *,void *),void * opaque)93 static void scsi_device_for_each_req_sync(SCSIDevice *s,
94 void (*fn)(SCSIRequest *, void *),
95 void *opaque)
96 {
97 SCSIRequest *req;
98 SCSIRequest *next_req;
99
100 assert(!runstate_is_running());
101 assert(qemu_in_main_thread());
102
103 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
104 fn(req, opaque);
105 }
106 }
107
108 typedef struct {
109 SCSIDevice *s;
110 void (*fn)(SCSIRequest *, void *);
111 void *fn_opaque;
112 } SCSIDeviceForEachReqAsyncData;
113
scsi_device_for_each_req_async_bh(void * opaque)114 static void scsi_device_for_each_req_async_bh(void *opaque)
115 {
116 g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
117 SCSIDevice *s = data->s;
118 AioContext *ctx;
119 SCSIRequest *req;
120 SCSIRequest *next;
121
122 /*
123 * The BB cannot have changed contexts between this BH being scheduled and
124 * now: BBs' AioContexts, when they have a node attached, can only be
125 * changed via bdrv_try_change_aio_context(), in a drained section. While
126 * we have the in-flight counter incremented, that drain must block.
127 */
128 ctx = blk_get_aio_context(s->conf.blk);
129 assert(ctx == qemu_get_current_aio_context());
130
131 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
132 data->fn(req, data->fn_opaque);
133 }
134
135 /* Drop the reference taken by scsi_device_for_each_req_async() */
136 object_unref(OBJECT(s));
137
138 /* Paired with blk_inc_in_flight() in scsi_device_for_each_req_async() */
139 blk_dec_in_flight(s->conf.blk);
140 }
141
142 /*
143 * Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
144 * runs in the AioContext that is executing the request.
145 * Keeps the BlockBackend's in-flight counter incremented until everything is
146 * done, so draining it will settle all scheduled @fn() calls.
147 */
scsi_device_for_each_req_async(SCSIDevice * s,void (* fn)(SCSIRequest *,void *),void * opaque)148 static void scsi_device_for_each_req_async(SCSIDevice *s,
149 void (*fn)(SCSIRequest *, void *),
150 void *opaque)
151 {
152 assert(qemu_in_main_thread());
153
154 SCSIDeviceForEachReqAsyncData *data =
155 g_new(SCSIDeviceForEachReqAsyncData, 1);
156
157 data->s = s;
158 data->fn = fn;
159 data->fn_opaque = opaque;
160
161 /*
162 * Hold a reference to the SCSIDevice until
163 * scsi_device_for_each_req_async_bh() finishes.
164 */
165 object_ref(OBJECT(s));
166
167 /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
168 blk_inc_in_flight(s->conf.blk);
169 aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
170 scsi_device_for_each_req_async_bh,
171 data);
172 }
173
scsi_device_realize(SCSIDevice * s,Error ** errp)174 static void scsi_device_realize(SCSIDevice *s, Error **errp)
175 {
176 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
177 if (sc->realize) {
178 sc->realize(s, errp);
179 }
180 }
181
scsi_device_unrealize(SCSIDevice * s)182 static void scsi_device_unrealize(SCSIDevice *s)
183 {
184 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
185 if (sc->unrealize) {
186 sc->unrealize(s);
187 }
188 }
189
scsi_bus_parse_cdb(SCSIDevice * dev,SCSICommand * cmd,uint8_t * buf,size_t buf_len,void * hba_private)190 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
191 size_t buf_len, void *hba_private)
192 {
193 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
194 int rc;
195
196 assert(cmd->len == 0);
197 rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len);
198 if (bus->info->parse_cdb) {
199 rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private);
200 }
201 return rc;
202 }
203
scsi_device_alloc_req(SCSIDevice * s,uint32_t tag,uint32_t lun,uint8_t * buf,void * hba_private)204 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
205 uint8_t *buf, void *hba_private)
206 {
207 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
208 if (sc->alloc_req) {
209 return sc->alloc_req(s, tag, lun, buf, hba_private);
210 }
211
212 return NULL;
213 }
214
scsi_device_unit_attention_reported(SCSIDevice * s)215 void scsi_device_unit_attention_reported(SCSIDevice *s)
216 {
217 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
218 if (sc->unit_attention_reported) {
219 sc->unit_attention_reported(s);
220 }
221 }
222
223 /* Create a scsi bus, and attach devices to it. */
scsi_bus_init_named(SCSIBus * bus,size_t bus_size,DeviceState * host,const SCSIBusInfo * info,const char * bus_name)224 void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
225 const SCSIBusInfo *info, const char *bus_name)
226 {
227 qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
228 bus->busnr = next_scsi_bus++;
229 bus->info = info;
230 qbus_set_bus_hotplug_handler(BUS(bus));
231 }
232
scsi_req_retry(SCSIRequest * req)233 void scsi_req_retry(SCSIRequest *req)
234 {
235 req->retry = true;
236 }
237
238 /* Called in the AioContext that is executing the request */
scsi_dma_restart_req(SCSIRequest * req,void * opaque)239 static void scsi_dma_restart_req(SCSIRequest *req, void *opaque)
240 {
241 scsi_req_ref(req);
242 if (req->retry) {
243 req->retry = false;
244 switch (req->cmd.mode) {
245 case SCSI_XFER_FROM_DEV:
246 case SCSI_XFER_TO_DEV:
247 scsi_req_continue(req);
248 break;
249 case SCSI_XFER_NONE:
250 scsi_req_dequeue(req);
251 scsi_req_enqueue(req);
252 break;
253 }
254 }
255 scsi_req_unref(req);
256 }
257
scsi_dma_restart_cb(void * opaque,bool running,RunState state)258 static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
259 {
260 SCSIDevice *s = opaque;
261
262 assert(qemu_in_main_thread());
263
264 if (!running) {
265 return;
266 }
267
268 scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL);
269 }
270
scsi_bus_is_address_free(SCSIBus * bus,int channel,int target,int lun,SCSIDevice ** p_dev)271 static bool scsi_bus_is_address_free(SCSIBus *bus,
272 int channel, int target, int lun,
273 SCSIDevice **p_dev)
274 {
275 SCSIDevice *d;
276
277 RCU_READ_LOCK_GUARD();
278 d = do_scsi_device_find(bus, channel, target, lun, true);
279 if (d && d->lun == lun) {
280 if (p_dev) {
281 *p_dev = d;
282 }
283 return false;
284 }
285 if (p_dev) {
286 *p_dev = NULL;
287 }
288 return true;
289 }
290
scsi_bus_check_address(BusState * qbus,DeviceState * qdev,Error ** errp)291 static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp)
292 {
293 SCSIDevice *dev = SCSI_DEVICE(qdev);
294 SCSIBus *bus = SCSI_BUS(qbus);
295
296 if (dev->channel > bus->info->max_channel) {
297 error_setg(errp, "bad scsi channel id: %d", dev->channel);
298 return false;
299 }
300 if (dev->id != -1 && dev->id > bus->info->max_target) {
301 error_setg(errp, "bad scsi device id: %d", dev->id);
302 return false;
303 }
304 if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
305 error_setg(errp, "bad scsi device lun: %d", dev->lun);
306 return false;
307 }
308
309 if (dev->id != -1 && dev->lun != -1) {
310 SCSIDevice *d;
311 if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) {
312 error_setg(errp, "lun already used by '%s'", d->qdev.id);
313 return false;
314 }
315 }
316
317 return true;
318 }
319
scsi_qdev_realize(DeviceState * qdev,Error ** errp)320 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
321 {
322 SCSIDevice *dev = SCSI_DEVICE(qdev);
323 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
324 bool is_free;
325 Error *local_err = NULL;
326
327 if (dev->id == -1) {
328 int id = -1;
329 if (dev->lun == -1) {
330 dev->lun = 0;
331 }
332 do {
333 is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL);
334 } while (!is_free && id < bus->info->max_target);
335 if (!is_free) {
336 error_setg(errp, "no free target");
337 return;
338 }
339 dev->id = id;
340 } else if (dev->lun == -1) {
341 int lun = -1;
342 do {
343 is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL);
344 } while (!is_free && lun < bus->info->max_lun);
345 if (!is_free) {
346 error_setg(errp, "no free lun");
347 return;
348 }
349 dev->lun = lun;
350 }
351
352 QTAILQ_INIT(&dev->requests);
353 scsi_device_realize(dev, &local_err);
354 if (local_err) {
355 error_propagate(errp, local_err);
356 return;
357 }
358 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
359 scsi_dma_restart_cb, dev);
360 }
361
scsi_qdev_unrealize(DeviceState * qdev)362 static void scsi_qdev_unrealize(DeviceState *qdev)
363 {
364 SCSIDevice *dev = SCSI_DEVICE(qdev);
365
366 if (dev->vmsentry) {
367 qemu_del_vm_change_state_handler(dev->vmsentry);
368 }
369
370 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
371
372 scsi_device_unrealize(dev);
373
374 blockdev_mark_auto_del(dev->conf.blk);
375 }
376
377 /* handle legacy '-drive if=scsi,...' cmd line args */
scsi_bus_legacy_add_drive(SCSIBus * bus,BlockBackend * blk,int unit,bool removable,BlockConf * conf,const char * serial,Error ** errp)378 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
379 int unit, bool removable, BlockConf *conf,
380 const char *serial, Error **errp)
381 {
382 const char *driver;
383 char *name;
384 DeviceState *dev;
385 SCSIDevice *s;
386 DriveInfo *dinfo;
387
388 if (blk_is_sg(blk)) {
389 driver = "scsi-generic";
390 } else {
391 dinfo = blk_legacy_dinfo(blk);
392 if (dinfo && dinfo->media_cd) {
393 driver = "scsi-cd";
394 } else {
395 driver = "scsi-hd";
396 }
397 }
398 dev = qdev_new(driver);
399 name = g_strdup_printf("legacy[%d]", unit);
400 object_property_add_child(OBJECT(bus), name, OBJECT(dev));
401 g_free(name);
402
403 s = SCSI_DEVICE(dev);
404 s->conf = *conf;
405
406 qdev_prop_set_uint32(dev, "scsi-id", unit);
407 if (object_property_find(OBJECT(dev), "removable")) {
408 qdev_prop_set_bit(dev, "removable", removable);
409 }
410 if (serial && object_property_find(OBJECT(dev), "serial")) {
411 qdev_prop_set_string(dev, "serial", serial);
412 }
413 if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) {
414 object_unparent(OBJECT(dev));
415 return NULL;
416 }
417
418 if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) {
419 object_unparent(OBJECT(dev));
420 return NULL;
421 }
422 return s;
423 }
424
scsi_bus_legacy_handle_cmdline(SCSIBus * bus)425 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
426 {
427 Location loc;
428 DriveInfo *dinfo;
429 int unit;
430 BlockConf conf = {
431 .bootindex = -1,
432 .share_rw = false,
433 .rerror = BLOCKDEV_ON_ERROR_AUTO,
434 .werror = BLOCKDEV_ON_ERROR_AUTO,
435 };
436
437 loc_push_none(&loc);
438 for (unit = 0; unit <= bus->info->max_target; unit++) {
439 dinfo = drive_get(IF_SCSI, bus->busnr, unit);
440 if (dinfo == NULL) {
441 continue;
442 }
443 qemu_opts_loc_restore(dinfo->opts);
444 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
445 unit, false, &conf, NULL, &error_fatal);
446 }
447 loc_pop(&loc);
448 }
449
scsi_invalid_field(SCSIRequest * req,uint8_t * buf)450 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
451 {
452 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
453 scsi_req_complete(req, CHECK_CONDITION);
454 return 0;
455 }
456
457 static const struct SCSIReqOps reqops_invalid_field = {
458 .size = sizeof(SCSIRequest),
459 .send_command = scsi_invalid_field
460 };
461
462 /* SCSIReqOps implementation for invalid commands. */
463
scsi_invalid_command(SCSIRequest * req,uint8_t * buf)464 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
465 {
466 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
467 scsi_req_complete(req, CHECK_CONDITION);
468 return 0;
469 }
470
471 static const struct SCSIReqOps reqops_invalid_opcode = {
472 .size = sizeof(SCSIRequest),
473 .send_command = scsi_invalid_command
474 };
475
476 /* SCSIReqOps implementation for unit attention conditions. */
477
scsi_fetch_unit_attention_sense(SCSIRequest * req)478 static void scsi_fetch_unit_attention_sense(SCSIRequest *req)
479 {
480 SCSISense *ua = NULL;
481
482 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
483 ua = &req->dev->unit_attention;
484 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
485 ua = &req->bus->unit_attention;
486 }
487
488 /*
489 * Fetch the unit attention sense immediately so that another
490 * scsi_req_new does not use reqops_unit_attention.
491 */
492 if (ua) {
493 scsi_req_build_sense(req, *ua);
494 *ua = SENSE_CODE(NO_SENSE);
495 }
496 }
497
scsi_unit_attention(SCSIRequest * req,uint8_t * buf)498 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
499 {
500 scsi_req_complete(req, CHECK_CONDITION);
501 return 0;
502 }
503
504 static const struct SCSIReqOps reqops_unit_attention = {
505 .size = sizeof(SCSIRequest),
506 .init_req = scsi_fetch_unit_attention_sense,
507 .send_command = scsi_unit_attention
508 };
509
510 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
511 an invalid LUN. */
512
513 typedef struct SCSITargetReq SCSITargetReq;
514
515 struct SCSITargetReq {
516 SCSIRequest req;
517 int len;
518 uint8_t *buf;
519 int buf_len;
520 };
521
store_lun(uint8_t * outbuf,int lun)522 static void store_lun(uint8_t *outbuf, int lun)
523 {
524 if (lun < 256) {
525 /* Simple logical unit addressing method*/
526 outbuf[0] = 0;
527 outbuf[1] = lun;
528 } else {
529 /* Flat space addressing method */
530 outbuf[0] = 0x40 | (lun >> 8);
531 outbuf[1] = (lun & 255);
532 }
533 }
534
scsi_target_emulate_report_luns(SCSITargetReq * r)535 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
536 {
537 BusChild *kid;
538 int channel, id;
539 uint8_t tmp[8] = {0};
540 int len = 0;
541 GByteArray *buf;
542
543 if (r->req.cmd.xfer < 16) {
544 return false;
545 }
546 if (r->req.cmd.buf[2] > 2) {
547 return false;
548 }
549
550 /* reserve space for 63 LUNs*/
551 buf = g_byte_array_sized_new(512);
552
553 channel = r->req.dev->channel;
554 id = r->req.dev->id;
555
556 /* add size (will be updated later to correct value */
557 g_byte_array_append(buf, tmp, 8);
558 len += 8;
559
560 /* add LUN0 */
561 g_byte_array_append(buf, tmp, 8);
562 len += 8;
563
564 WITH_RCU_READ_LOCK_GUARD() {
565 QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) {
566 DeviceState *qdev = kid->child;
567 SCSIDevice *dev = SCSI_DEVICE(qdev);
568
569 if (dev->channel == channel && dev->id == id && dev->lun != 0 &&
570 qdev_is_realized(&dev->qdev)) {
571 store_lun(tmp, dev->lun);
572 g_byte_array_append(buf, tmp, 8);
573 len += 8;
574 }
575 }
576 }
577
578 r->buf_len = len;
579 r->buf = g_byte_array_free(buf, FALSE);
580 r->len = MIN(len, r->req.cmd.xfer & ~7);
581
582 /* store the LUN list length */
583 stl_be_p(&r->buf[0], len - 8);
584
585 /*
586 * If a REPORT LUNS command enters the enabled command state, [...]
587 * the device server shall clear any pending unit attention condition
588 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
589 */
590 scsi_clear_reported_luns_changed(&r->req);
591
592 return true;
593 }
594
scsi_target_emulate_inquiry(SCSITargetReq * r)595 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
596 {
597 assert(r->req.dev->lun != r->req.lun);
598
599 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
600
601 if (r->req.cmd.buf[1] & 0x2) {
602 /* Command support data - optional, not implemented */
603 return false;
604 }
605
606 if (r->req.cmd.buf[1] & 0x1) {
607 /* Vital product data */
608 uint8_t page_code = r->req.cmd.buf[2];
609 r->buf[r->len++] = page_code ; /* this page */
610 r->buf[r->len++] = 0x00;
611
612 switch (page_code) {
613 case 0x00: /* Supported page codes, mandatory */
614 {
615 int pages;
616 pages = r->len++;
617 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
618 r->buf[pages] = r->len - pages - 1; /* number of pages */
619 break;
620 }
621 default:
622 return false;
623 }
624 /* done with EVPD */
625 assert(r->len < r->buf_len);
626 r->len = MIN(r->req.cmd.xfer, r->len);
627 return true;
628 }
629
630 /* Standard INQUIRY data */
631 if (r->req.cmd.buf[2] != 0) {
632 return false;
633 }
634
635 /* PAGE CODE == 0 */
636 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
637 memset(r->buf, 0, r->len);
638 if (r->req.lun != 0) {
639 r->buf[0] = TYPE_NO_LUN;
640 } else {
641 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
642 r->buf[2] = 5; /* Version */
643 r->buf[3] = 2 | 0x10; /* HiSup, response data format */
644 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
645 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
646 memcpy(&r->buf[8], "QEMU ", 8);
647 memcpy(&r->buf[16], "QEMU TARGET ", 16);
648 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
649 }
650 return true;
651 }
652
scsi_sense_len(SCSIRequest * req)653 static size_t scsi_sense_len(SCSIRequest *req)
654 {
655 if (req->dev->type == TYPE_SCANNER)
656 return SCSI_SENSE_LEN_SCANNER;
657 else
658 return SCSI_SENSE_LEN;
659 }
660
scsi_target_send_command(SCSIRequest * req,uint8_t * buf)661 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
662 {
663 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
664 int fixed_sense = (req->cmd.buf[1] & 1) == 0;
665
666 if (req->lun != 0 &&
667 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
668 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
669 scsi_req_complete(req, CHECK_CONDITION);
670 return 0;
671 }
672 switch (buf[0]) {
673 case REPORT_LUNS:
674 if (!scsi_target_emulate_report_luns(r)) {
675 goto illegal_request;
676 }
677 break;
678 case INQUIRY:
679 if (!scsi_target_emulate_inquiry(r)) {
680 goto illegal_request;
681 }
682 break;
683 case REQUEST_SENSE:
684 scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
685 if (req->lun != 0) {
686 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
687
688 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
689 sense, fixed_sense);
690 } else {
691 r->len = scsi_device_get_sense(r->req.dev, r->buf,
692 MIN(req->cmd.xfer, r->buf_len),
693 fixed_sense);
694 }
695 if (r->req.dev->sense_is_ua) {
696 scsi_device_unit_attention_reported(req->dev);
697 r->req.dev->sense_len = 0;
698 r->req.dev->sense_is_ua = false;
699 }
700 break;
701 case TEST_UNIT_READY:
702 break;
703 default:
704 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
705 scsi_req_complete(req, CHECK_CONDITION);
706 return 0;
707 illegal_request:
708 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
709 scsi_req_complete(req, CHECK_CONDITION);
710 return 0;
711 }
712
713 if (!r->len) {
714 scsi_req_complete(req, GOOD);
715 }
716 return r->len;
717 }
718
scsi_target_read_data(SCSIRequest * req)719 static void scsi_target_read_data(SCSIRequest *req)
720 {
721 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
722 uint32_t n;
723
724 n = r->len;
725 if (n > 0) {
726 r->len = 0;
727 scsi_req_data(&r->req, n);
728 } else {
729 scsi_req_complete(&r->req, GOOD);
730 }
731 }
732
scsi_target_get_buf(SCSIRequest * req)733 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
734 {
735 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
736
737 return r->buf;
738 }
739
scsi_target_alloc_buf(SCSIRequest * req,size_t len)740 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
741 {
742 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
743
744 r->buf = g_malloc(len);
745 r->buf_len = len;
746
747 return r->buf;
748 }
749
scsi_target_free_buf(SCSIRequest * req)750 static void scsi_target_free_buf(SCSIRequest *req)
751 {
752 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
753
754 g_free(r->buf);
755 }
756
757 static const struct SCSIReqOps reqops_target_command = {
758 .size = sizeof(SCSITargetReq),
759 .send_command = scsi_target_send_command,
760 .read_data = scsi_target_read_data,
761 .get_buf = scsi_target_get_buf,
762 .free_req = scsi_target_free_buf,
763 };
764
765
scsi_req_alloc(const SCSIReqOps * reqops,SCSIDevice * d,uint32_t tag,uint32_t lun,void * hba_private)766 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
767 uint32_t tag, uint32_t lun, void *hba_private)
768 {
769 SCSIRequest *req;
770 SCSIBus *bus = scsi_bus_from_device(d);
771 BusState *qbus = BUS(bus);
772 const int memset_off = offsetof(SCSIRequest, sense)
773 + sizeof(req->sense);
774
775 req = g_malloc(reqops->size);
776 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
777 req->refcount = 1;
778 req->bus = bus;
779 req->dev = d;
780 req->tag = tag;
781 req->lun = lun;
782 req->hba_private = hba_private;
783 req->status = -1;
784 req->host_status = -1;
785 req->ops = reqops;
786 object_ref(OBJECT(d));
787 object_ref(OBJECT(qbus->parent));
788 notifier_list_init(&req->cancel_notifiers);
789
790 if (reqops->init_req) {
791 reqops->init_req(req);
792 }
793
794 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
795 return req;
796 }
797
scsi_req_new(SCSIDevice * d,uint32_t tag,uint32_t lun,uint8_t * buf,size_t buf_len,void * hba_private)798 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
799 uint8_t *buf, size_t buf_len, void *hba_private)
800 {
801 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
802 const SCSIReqOps *ops;
803 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
804 SCSIRequest *req;
805 SCSICommand cmd = { .len = 0 };
806 int ret;
807
808 if (buf_len == 0) {
809 trace_scsi_req_parse_bad(d->id, lun, tag, 0);
810 goto invalid_opcode;
811 }
812
813 if ((d->unit_attention.key == UNIT_ATTENTION ||
814 bus->unit_attention.key == UNIT_ATTENTION) &&
815 (buf[0] != INQUIRY &&
816 buf[0] != REPORT_LUNS &&
817 buf[0] != GET_CONFIGURATION &&
818 buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
819
820 /*
821 * If we already have a pending unit attention condition,
822 * report this one before triggering another one.
823 */
824 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
825 ops = &reqops_unit_attention;
826 } else if (lun != d->lun ||
827 buf[0] == REPORT_LUNS ||
828 (buf[0] == REQUEST_SENSE && d->sense_len)) {
829 ops = &reqops_target_command;
830 } else {
831 ops = NULL;
832 }
833
834 if (ops != NULL || !sc->parse_cdb) {
835 ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len);
836 } else {
837 ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private);
838 }
839
840 if (ret != 0) {
841 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
842 invalid_opcode:
843 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
844 } else {
845 assert(cmd.len != 0);
846 trace_scsi_req_parsed(d->id, lun, tag, buf[0],
847 cmd.mode, cmd.xfer);
848 if (cmd.lba != -1) {
849 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
850 cmd.lba);
851 }
852
853 if (cmd.xfer > INT32_MAX) {
854 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
855 } else if (ops) {
856 req = scsi_req_alloc(ops, d, tag, lun, hba_private);
857 } else {
858 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
859 }
860 }
861
862 req->cmd = cmd;
863 req->residual = req->cmd.xfer;
864
865 switch (buf[0]) {
866 case INQUIRY:
867 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
868 break;
869 case TEST_UNIT_READY:
870 trace_scsi_test_unit_ready(d->id, lun, tag);
871 break;
872 case REPORT_LUNS:
873 trace_scsi_report_luns(d->id, lun, tag);
874 break;
875 case REQUEST_SENSE:
876 trace_scsi_request_sense(d->id, lun, tag);
877 break;
878 default:
879 break;
880 }
881
882 return req;
883 }
884
scsi_req_get_buf(SCSIRequest * req)885 uint8_t *scsi_req_get_buf(SCSIRequest *req)
886 {
887 return req->ops->get_buf(req);
888 }
889
scsi_clear_reported_luns_changed(SCSIRequest * req)890 static void scsi_clear_reported_luns_changed(SCSIRequest *req)
891 {
892 SCSISense *ua;
893
894 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
895 ua = &req->dev->unit_attention;
896 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
897 ua = &req->bus->unit_attention;
898 } else {
899 return;
900 }
901
902 if (ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
903 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq) {
904 *ua = SENSE_CODE(NO_SENSE);
905 }
906 }
907
scsi_req_get_sense(SCSIRequest * req,uint8_t * buf,int len)908 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
909 {
910 int ret;
911
912 assert(len >= 14);
913 if (!req->sense_len) {
914 return 0;
915 }
916
917 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
918
919 /*
920 * FIXME: clearing unit attention conditions upon autosense should be done
921 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
922 * (SAM-5, 5.14).
923 *
924 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
925 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
926 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
927 */
928 if (req->dev->sense_is_ua) {
929 scsi_device_unit_attention_reported(req->dev);
930 req->dev->sense_len = 0;
931 req->dev->sense_is_ua = false;
932 }
933 return ret;
934 }
935
scsi_device_get_sense(SCSIDevice * dev,uint8_t * buf,int len,bool fixed)936 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
937 {
938 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
939 }
940
scsi_req_build_sense(SCSIRequest * req,SCSISense sense)941 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
942 {
943 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
944 sense.key, sense.asc, sense.ascq);
945 req->sense_len = scsi_build_sense(req->sense, sense);
946 }
947
scsi_req_enqueue_internal(SCSIRequest * req)948 static void scsi_req_enqueue_internal(SCSIRequest *req)
949 {
950 assert(!req->enqueued);
951 scsi_req_ref(req);
952 if (req->bus->info->get_sg_list) {
953 req->sg = req->bus->info->get_sg_list(req);
954 } else {
955 req->sg = NULL;
956 }
957 req->enqueued = true;
958 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
959 }
960
scsi_req_enqueue(SCSIRequest * req)961 int32_t scsi_req_enqueue(SCSIRequest *req)
962 {
963 int32_t rc;
964
965 assert(!req->retry);
966 scsi_req_enqueue_internal(req);
967 scsi_req_ref(req);
968 rc = req->ops->send_command(req, req->cmd.buf);
969 scsi_req_unref(req);
970 return rc;
971 }
972
scsi_req_dequeue(SCSIRequest * req)973 static void scsi_req_dequeue(SCSIRequest *req)
974 {
975 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
976 req->retry = false;
977 if (req->enqueued) {
978 QTAILQ_REMOVE(&req->dev->requests, req, next);
979 req->enqueued = false;
980 scsi_req_unref(req);
981 }
982 }
983
scsi_get_performance_length(int num_desc,int type,int data_type)984 static int scsi_get_performance_length(int num_desc, int type, int data_type)
985 {
986 /* MMC-6, paragraph 6.7. */
987 switch (type) {
988 case 0:
989 if ((data_type & 3) == 0) {
990 /* Each descriptor is as in Table 295 - Nominal performance. */
991 return 16 * num_desc + 8;
992 } else {
993 /* Each descriptor is as in Table 296 - Exceptions. */
994 return 6 * num_desc + 8;
995 }
996 case 1:
997 case 4:
998 case 5:
999 return 8 * num_desc + 8;
1000 case 2:
1001 return 2048 * num_desc + 8;
1002 case 3:
1003 return 16 * num_desc + 8;
1004 default:
1005 return 8;
1006 }
1007 }
1008
ata_passthrough_xfer_unit(SCSIDevice * dev,uint8_t * buf)1009 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
1010 {
1011 int byte_block = (buf[2] >> 2) & 0x1;
1012 int type = (buf[2] >> 4) & 0x1;
1013 int xfer_unit;
1014
1015 if (byte_block) {
1016 if (type) {
1017 xfer_unit = dev->blocksize;
1018 } else {
1019 xfer_unit = 512;
1020 }
1021 } else {
1022 xfer_unit = 1;
1023 }
1024
1025 return xfer_unit;
1026 }
1027
ata_passthrough_12_xfer(SCSIDevice * dev,uint8_t * buf)1028 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
1029 {
1030 int length = buf[2] & 0x3;
1031 int xfer;
1032 int unit = ata_passthrough_xfer_unit(dev, buf);
1033
1034 switch (length) {
1035 case 0:
1036 case 3: /* USB-specific. */
1037 default:
1038 xfer = 0;
1039 break;
1040 case 1:
1041 xfer = buf[3];
1042 break;
1043 case 2:
1044 xfer = buf[4];
1045 break;
1046 }
1047
1048 return xfer * unit;
1049 }
1050
ata_passthrough_16_xfer(SCSIDevice * dev,uint8_t * buf)1051 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
1052 {
1053 int extend = buf[1] & 0x1;
1054 int length = buf[2] & 0x3;
1055 int xfer;
1056 int unit = ata_passthrough_xfer_unit(dev, buf);
1057
1058 switch (length) {
1059 case 0:
1060 case 3: /* USB-specific. */
1061 default:
1062 xfer = 0;
1063 break;
1064 case 1:
1065 xfer = buf[4];
1066 xfer |= (extend ? buf[3] << 8 : 0);
1067 break;
1068 case 2:
1069 xfer = buf[6];
1070 xfer |= (extend ? buf[5] << 8 : 0);
1071 break;
1072 }
1073
1074 return xfer * unit;
1075 }
1076
scsi_req_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1077 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1078 {
1079 cmd->xfer = scsi_cdb_xfer(buf);
1080 switch (buf[0]) {
1081 case TEST_UNIT_READY:
1082 case REWIND:
1083 case START_STOP:
1084 case SET_CAPACITY:
1085 case WRITE_FILEMARKS:
1086 case WRITE_FILEMARKS_16:
1087 case SPACE:
1088 case RESERVE:
1089 case RELEASE:
1090 case ERASE:
1091 case ALLOW_MEDIUM_REMOVAL:
1092 case SEEK_10:
1093 case SYNCHRONIZE_CACHE:
1094 case SYNCHRONIZE_CACHE_16:
1095 case LOCATE_16:
1096 case LOCK_UNLOCK_CACHE:
1097 case SET_CD_SPEED:
1098 case SET_LIMITS:
1099 case WRITE_LONG_10:
1100 case UPDATE_BLOCK:
1101 case RESERVE_TRACK:
1102 case SET_READ_AHEAD:
1103 case PRE_FETCH:
1104 case PRE_FETCH_16:
1105 case ALLOW_OVERWRITE:
1106 cmd->xfer = 0;
1107 break;
1108 case VERIFY_10:
1109 case VERIFY_12:
1110 case VERIFY_16:
1111 if ((buf[1] & 2) == 0) {
1112 cmd->xfer = 0;
1113 } else if ((buf[1] & 4) != 0) {
1114 cmd->xfer = 1;
1115 }
1116 cmd->xfer *= dev->blocksize;
1117 break;
1118 case MODE_SENSE:
1119 break;
1120 case WRITE_SAME_10:
1121 case WRITE_SAME_16:
1122 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
1123 break;
1124 case READ_CAPACITY_10:
1125 cmd->xfer = 8;
1126 break;
1127 case READ_BLOCK_LIMITS:
1128 cmd->xfer = 6;
1129 break;
1130 case SEND_VOLUME_TAG:
1131 /* GPCMD_SET_STREAMING from multimedia commands. */
1132 if (dev->type == TYPE_ROM) {
1133 cmd->xfer = buf[10] | (buf[9] << 8);
1134 } else {
1135 cmd->xfer = buf[9] | (buf[8] << 8);
1136 }
1137 break;
1138 case WRITE_6:
1139 /* length 0 means 256 blocks */
1140 if (cmd->xfer == 0) {
1141 cmd->xfer = 256;
1142 }
1143 /* fall through */
1144 case WRITE_10:
1145 case WRITE_VERIFY_10:
1146 case WRITE_12:
1147 case WRITE_VERIFY_12:
1148 case WRITE_16:
1149 case WRITE_VERIFY_16:
1150 cmd->xfer *= dev->blocksize;
1151 break;
1152 case READ_6:
1153 case READ_REVERSE:
1154 /* length 0 means 256 blocks */
1155 if (cmd->xfer == 0) {
1156 cmd->xfer = 256;
1157 }
1158 /* fall through */
1159 case READ_10:
1160 case READ_12:
1161 case READ_16:
1162 cmd->xfer *= dev->blocksize;
1163 break;
1164 case FORMAT_UNIT:
1165 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1166 * for block devices are restricted to the header right now. */
1167 if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1168 cmd->xfer = 12;
1169 } else {
1170 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1171 }
1172 break;
1173 case INQUIRY:
1174 case RECEIVE_DIAGNOSTIC:
1175 case SEND_DIAGNOSTIC:
1176 cmd->xfer = buf[4] | (buf[3] << 8);
1177 break;
1178 case READ_CD:
1179 case READ_BUFFER:
1180 case WRITE_BUFFER:
1181 case SEND_CUE_SHEET:
1182 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1183 break;
1184 case PERSISTENT_RESERVE_OUT:
1185 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1186 break;
1187 case ERASE_12:
1188 if (dev->type == TYPE_ROM) {
1189 /* MMC command GET PERFORMANCE. */
1190 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1191 buf[10], buf[1] & 0x1f);
1192 }
1193 break;
1194 case MECHANISM_STATUS:
1195 case READ_DVD_STRUCTURE:
1196 case SEND_DVD_STRUCTURE:
1197 case MAINTENANCE_OUT:
1198 case MAINTENANCE_IN:
1199 if (dev->type == TYPE_ROM) {
1200 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1201 cmd->xfer = buf[9] | (buf[8] << 8);
1202 }
1203 break;
1204 case ATA_PASSTHROUGH_12:
1205 if (dev->type == TYPE_ROM) {
1206 /* BLANK command of MMC */
1207 cmd->xfer = 0;
1208 } else {
1209 cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1210 }
1211 break;
1212 case ATA_PASSTHROUGH_16:
1213 cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1214 break;
1215 }
1216 return 0;
1217 }
1218
scsi_req_stream_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1219 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1220 {
1221 switch (buf[0]) {
1222 /* stream commands */
1223 case ERASE_12:
1224 case ERASE_16:
1225 cmd->xfer = 0;
1226 break;
1227 case READ_6:
1228 case READ_REVERSE:
1229 case RECOVER_BUFFERED_DATA:
1230 case WRITE_6:
1231 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1232 if (buf[1] & 0x01) { /* fixed */
1233 cmd->xfer *= dev->blocksize;
1234 }
1235 break;
1236 case READ_16:
1237 case READ_REVERSE_16:
1238 case VERIFY_16:
1239 case WRITE_16:
1240 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1241 if (buf[1] & 0x01) { /* fixed */
1242 cmd->xfer *= dev->blocksize;
1243 }
1244 break;
1245 case REWIND:
1246 case LOAD_UNLOAD:
1247 cmd->xfer = 0;
1248 break;
1249 case SPACE_16:
1250 cmd->xfer = buf[13] | (buf[12] << 8);
1251 break;
1252 case READ_POSITION:
1253 switch (buf[1] & 0x1f) /* operation code */ {
1254 case SHORT_FORM_BLOCK_ID:
1255 case SHORT_FORM_VENDOR_SPECIFIC:
1256 cmd->xfer = 20;
1257 break;
1258 case LONG_FORM:
1259 cmd->xfer = 32;
1260 break;
1261 case EXTENDED_FORM:
1262 cmd->xfer = buf[8] | (buf[7] << 8);
1263 break;
1264 default:
1265 return -1;
1266 }
1267
1268 break;
1269 case FORMAT_UNIT:
1270 cmd->xfer = buf[4] | (buf[3] << 8);
1271 break;
1272 /* generic commands */
1273 default:
1274 return scsi_req_xfer(cmd, dev, buf);
1275 }
1276 return 0;
1277 }
1278
scsi_req_medium_changer_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1279 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1280 {
1281 switch (buf[0]) {
1282 /* medium changer commands */
1283 case EXCHANGE_MEDIUM:
1284 case INITIALIZE_ELEMENT_STATUS:
1285 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1286 case MOVE_MEDIUM:
1287 case POSITION_TO_ELEMENT:
1288 cmd->xfer = 0;
1289 break;
1290 case READ_ELEMENT_STATUS:
1291 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1292 break;
1293
1294 /* generic commands */
1295 default:
1296 return scsi_req_xfer(cmd, dev, buf);
1297 }
1298 return 0;
1299 }
1300
scsi_req_scanner_length(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1301 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1302 {
1303 switch (buf[0]) {
1304 /* Scanner commands */
1305 case OBJECT_POSITION:
1306 cmd->xfer = 0;
1307 break;
1308 case SCAN:
1309 cmd->xfer = buf[4];
1310 break;
1311 case READ_10:
1312 case SEND:
1313 case GET_WINDOW:
1314 case SET_WINDOW:
1315 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1316 break;
1317 default:
1318 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1319 return scsi_req_xfer(cmd, dev, buf);
1320 }
1321
1322 return 0;
1323 }
1324
scsi_cmd_xfer_mode(SCSICommand * cmd)1325 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1326 {
1327 if (!cmd->xfer) {
1328 cmd->mode = SCSI_XFER_NONE;
1329 return;
1330 }
1331 switch (cmd->buf[0]) {
1332 case WRITE_6:
1333 case WRITE_10:
1334 case WRITE_VERIFY_10:
1335 case WRITE_12:
1336 case WRITE_VERIFY_12:
1337 case WRITE_16:
1338 case WRITE_VERIFY_16:
1339 case VERIFY_10:
1340 case VERIFY_12:
1341 case VERIFY_16:
1342 case COPY:
1343 case COPY_VERIFY:
1344 case COMPARE:
1345 case CHANGE_DEFINITION:
1346 case LOG_SELECT:
1347 case MODE_SELECT:
1348 case MODE_SELECT_10:
1349 case SEND_DIAGNOSTIC:
1350 case WRITE_BUFFER:
1351 case FORMAT_UNIT:
1352 case REASSIGN_BLOCKS:
1353 case SEARCH_EQUAL:
1354 case SEARCH_HIGH:
1355 case SEARCH_LOW:
1356 case UPDATE_BLOCK:
1357 case WRITE_LONG_10:
1358 case WRITE_SAME_10:
1359 case WRITE_SAME_16:
1360 case UNMAP:
1361 case SEARCH_HIGH_12:
1362 case SEARCH_EQUAL_12:
1363 case SEARCH_LOW_12:
1364 case MEDIUM_SCAN:
1365 case SEND_VOLUME_TAG:
1366 case SEND_CUE_SHEET:
1367 case SEND_DVD_STRUCTURE:
1368 case PERSISTENT_RESERVE_OUT:
1369 case MAINTENANCE_OUT:
1370 case SET_WINDOW:
1371 case SCAN:
1372 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1373 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1374 */
1375 cmd->mode = SCSI_XFER_TO_DEV;
1376 break;
1377 case ATA_PASSTHROUGH_12:
1378 case ATA_PASSTHROUGH_16:
1379 /* T_DIR */
1380 cmd->mode = (cmd->buf[2] & 0x8) ?
1381 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1382 break;
1383 default:
1384 cmd->mode = SCSI_XFER_FROM_DEV;
1385 break;
1386 }
1387 }
1388
scsi_req_parse_cdb(SCSIDevice * dev,SCSICommand * cmd,uint8_t * buf,size_t buf_len)1389 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
1390 size_t buf_len)
1391 {
1392 int rc;
1393 int len;
1394
1395 cmd->lba = -1;
1396 len = scsi_cdb_length(buf);
1397 if (len < 0 || len > buf_len) {
1398 return -1;
1399 }
1400
1401 cmd->len = len;
1402 switch (dev->type) {
1403 case TYPE_TAPE:
1404 rc = scsi_req_stream_xfer(cmd, dev, buf);
1405 break;
1406 case TYPE_MEDIUM_CHANGER:
1407 rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1408 break;
1409 case TYPE_SCANNER:
1410 rc = scsi_req_scanner_length(cmd, dev, buf);
1411 break;
1412 default:
1413 rc = scsi_req_xfer(cmd, dev, buf);
1414 break;
1415 }
1416
1417 if (rc != 0)
1418 return rc;
1419
1420 memcpy(cmd->buf, buf, cmd->len);
1421 scsi_cmd_xfer_mode(cmd);
1422 cmd->lba = scsi_cmd_lba(cmd);
1423 return 0;
1424 }
1425
scsi_device_report_change(SCSIDevice * dev,SCSISense sense)1426 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1427 {
1428 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1429
1430 scsi_device_set_ua(dev, sense);
1431 if (bus->info->change) {
1432 bus->info->change(bus, dev, sense);
1433 }
1434 }
1435
scsi_req_ref(SCSIRequest * req)1436 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1437 {
1438 assert(req->refcount > 0);
1439 req->refcount++;
1440 return req;
1441 }
1442
scsi_req_unref(SCSIRequest * req)1443 void scsi_req_unref(SCSIRequest *req)
1444 {
1445 assert(req->refcount > 0);
1446 if (--req->refcount == 0) {
1447 BusState *qbus = req->dev->qdev.parent_bus;
1448 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1449
1450 if (bus->info->free_request && req->hba_private) {
1451 bus->info->free_request(bus, req->hba_private);
1452 }
1453 if (req->ops->free_req) {
1454 req->ops->free_req(req);
1455 }
1456 object_unref(OBJECT(req->dev));
1457 object_unref(OBJECT(qbus->parent));
1458 g_free(req);
1459 }
1460 }
1461
1462 /* Tell the device that we finished processing this chunk of I/O. It
1463 will start the next chunk or complete the command. */
scsi_req_continue(SCSIRequest * req)1464 void scsi_req_continue(SCSIRequest *req)
1465 {
1466 if (req->io_canceled) {
1467 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1468 return;
1469 }
1470 trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1471 if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1472 req->ops->write_data(req);
1473 } else {
1474 req->ops->read_data(req);
1475 }
1476 }
1477
1478 /* Called by the devices when data is ready for the HBA. The HBA should
1479 start a DMA operation to read or fill the device's data buffer.
1480 Once it completes, calling scsi_req_continue will restart I/O. */
scsi_req_data(SCSIRequest * req,int len)1481 void scsi_req_data(SCSIRequest *req, int len)
1482 {
1483 uint8_t *buf;
1484 if (req->io_canceled) {
1485 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1486 return;
1487 }
1488 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1489 assert(req->cmd.mode != SCSI_XFER_NONE);
1490 if (!req->sg) {
1491 req->residual -= len;
1492 req->bus->info->transfer_data(req, len);
1493 return;
1494 }
1495
1496 /* If the device calls scsi_req_data and the HBA specified a
1497 * scatter/gather list, the transfer has to happen in a single
1498 * step. */
1499 assert(!req->dma_started);
1500 req->dma_started = true;
1501
1502 buf = scsi_req_get_buf(req);
1503 if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1504 dma_buf_read(buf, len, &req->residual, req->sg,
1505 MEMTXATTRS_UNSPECIFIED);
1506 } else {
1507 dma_buf_write(buf, len, &req->residual, req->sg,
1508 MEMTXATTRS_UNSPECIFIED);
1509 }
1510 scsi_req_continue(req);
1511 }
1512
scsi_req_print(SCSIRequest * req)1513 void scsi_req_print(SCSIRequest *req)
1514 {
1515 FILE *fp = stderr;
1516 int i;
1517
1518 fprintf(fp, "[%s id=%d] %s",
1519 req->dev->qdev.parent_bus->name,
1520 req->dev->id,
1521 scsi_command_name(req->cmd.buf[0]));
1522 for (i = 1; i < req->cmd.len; i++) {
1523 fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1524 }
1525 switch (req->cmd.mode) {
1526 case SCSI_XFER_NONE:
1527 fprintf(fp, " - none\n");
1528 break;
1529 case SCSI_XFER_FROM_DEV:
1530 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1531 break;
1532 case SCSI_XFER_TO_DEV:
1533 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1534 break;
1535 default:
1536 fprintf(fp, " - Oops\n");
1537 break;
1538 }
1539 }
1540
scsi_req_complete_failed(SCSIRequest * req,int host_status)1541 void scsi_req_complete_failed(SCSIRequest *req, int host_status)
1542 {
1543 SCSISense sense;
1544 int status;
1545
1546 assert(req->status == -1 && req->host_status == -1);
1547 assert(req->ops != &reqops_unit_attention);
1548
1549 if (!req->bus->info->fail) {
1550 status = scsi_sense_from_host_status(req->host_status, &sense);
1551 if (status == CHECK_CONDITION) {
1552 scsi_req_build_sense(req, sense);
1553 }
1554 scsi_req_complete(req, status);
1555 return;
1556 }
1557
1558 req->host_status = host_status;
1559 scsi_req_ref(req);
1560 scsi_req_dequeue(req);
1561 req->bus->info->fail(req);
1562
1563 /* Cancelled requests might end up being completed instead of cancelled */
1564 notifier_list_notify(&req->cancel_notifiers, req);
1565 scsi_req_unref(req);
1566 }
1567
scsi_req_complete(SCSIRequest * req,int status)1568 void scsi_req_complete(SCSIRequest *req, int status)
1569 {
1570 assert(req->status == -1 && req->host_status == -1);
1571 req->status = status;
1572 req->host_status = SCSI_HOST_OK;
1573
1574 assert(req->sense_len <= sizeof(req->sense));
1575 if (status == GOOD) {
1576 req->sense_len = 0;
1577 }
1578
1579 if (req->sense_len) {
1580 memcpy(req->dev->sense, req->sense, req->sense_len);
1581 req->dev->sense_len = req->sense_len;
1582 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1583 } else {
1584 req->dev->sense_len = 0;
1585 req->dev->sense_is_ua = false;
1586 }
1587
1588 scsi_req_ref(req);
1589 scsi_req_dequeue(req);
1590 req->bus->info->complete(req, req->residual);
1591
1592 /* Cancelled requests might end up being completed instead of cancelled */
1593 notifier_list_notify(&req->cancel_notifiers, req);
1594 scsi_req_unref(req);
1595 }
1596
1597 /* Called by the devices when the request is canceled. */
scsi_req_cancel_complete(SCSIRequest * req)1598 void scsi_req_cancel_complete(SCSIRequest *req)
1599 {
1600 assert(req->io_canceled);
1601 if (req->bus->info->cancel) {
1602 req->bus->info->cancel(req);
1603 }
1604 notifier_list_notify(&req->cancel_notifiers, req);
1605 scsi_req_unref(req);
1606 }
1607
1608 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1609 * notifier list, the bus will be notified the requests cancellation is
1610 * completed.
1611 * */
scsi_req_cancel_async(SCSIRequest * req,Notifier * notifier)1612 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1613 {
1614 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1615 if (notifier) {
1616 notifier_list_add(&req->cancel_notifiers, notifier);
1617 }
1618 if (req->io_canceled) {
1619 /* A blk_aio_cancel_async is pending; when it finishes,
1620 * scsi_req_cancel_complete will be called and will
1621 * call the notifier we just added. Just wait for that.
1622 */
1623 assert(req->aiocb);
1624 return;
1625 }
1626 /* Dropped in scsi_req_cancel_complete. */
1627 scsi_req_ref(req);
1628 scsi_req_dequeue(req);
1629 req->io_canceled = true;
1630 if (req->aiocb) {
1631 blk_aio_cancel_async(req->aiocb);
1632 } else {
1633 scsi_req_cancel_complete(req);
1634 }
1635 }
1636
scsi_req_cancel(SCSIRequest * req)1637 void scsi_req_cancel(SCSIRequest *req)
1638 {
1639 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1640 if (!req->enqueued) {
1641 return;
1642 }
1643 assert(!req->io_canceled);
1644 /* Dropped in scsi_req_cancel_complete. */
1645 scsi_req_ref(req);
1646 scsi_req_dequeue(req);
1647 req->io_canceled = true;
1648 if (req->aiocb) {
1649 blk_aio_cancel(req->aiocb);
1650 } else {
1651 scsi_req_cancel_complete(req);
1652 }
1653 }
1654
scsi_ua_precedence(SCSISense sense)1655 static int scsi_ua_precedence(SCSISense sense)
1656 {
1657 if (sense.key != UNIT_ATTENTION) {
1658 return INT_MAX;
1659 }
1660 if (sense.asc == 0x29 && sense.ascq == 0x04) {
1661 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1662 return 1;
1663 } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1664 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1665 return 2;
1666 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1667 /* These two go with "all others". */
1668 ;
1669 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1670 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1671 * POWER ON OCCURRED = 1
1672 * SCSI BUS RESET OCCURRED = 2
1673 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1674 * I_T NEXUS LOSS OCCURRED = 7
1675 */
1676 return sense.ascq;
1677 } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1678 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1679 return 8;
1680 }
1681 return (sense.asc << 8) | sense.ascq;
1682 }
1683
scsi_bus_set_ua(SCSIBus * bus,SCSISense sense)1684 void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense)
1685 {
1686 int prec1, prec2;
1687 if (sense.key != UNIT_ATTENTION) {
1688 return;
1689 }
1690
1691 /*
1692 * Override a pre-existing unit attention condition, except for a more
1693 * important reset condition.
1694 */
1695 prec1 = scsi_ua_precedence(bus->unit_attention);
1696 prec2 = scsi_ua_precedence(sense);
1697 if (prec2 < prec1) {
1698 bus->unit_attention = sense;
1699 }
1700 }
1701
scsi_device_set_ua(SCSIDevice * sdev,SCSISense sense)1702 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1703 {
1704 int prec1, prec2;
1705 if (sense.key != UNIT_ATTENTION) {
1706 return;
1707 }
1708 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1709 sense.asc, sense.ascq);
1710
1711 /*
1712 * Override a pre-existing unit attention condition, except for a more
1713 * important reset condition.
1714 */
1715 prec1 = scsi_ua_precedence(sdev->unit_attention);
1716 prec2 = scsi_ua_precedence(sense);
1717 if (prec2 < prec1) {
1718 sdev->unit_attention = sense;
1719 }
1720 }
1721
scsi_device_purge_one_req(SCSIRequest * req,void * opaque)1722 static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
1723 {
1724 scsi_req_cancel_async(req, NULL);
1725 }
1726
1727 /**
1728 * Cancel all requests, and block until they are deleted.
1729 */
scsi_device_purge_requests(SCSIDevice * sdev,SCSISense sense)1730 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1731 {
1732 scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
1733
1734 /*
1735 * Await all the scsi_device_purge_one_req() calls scheduled by
1736 * scsi_device_for_each_req_async(), and all I/O requests that were
1737 * cancelled this way, but may still take a bit of time to settle.
1738 */
1739 blk_drain(sdev->conf.blk);
1740
1741 scsi_device_set_ua(sdev, sense);
1742 }
1743
scsi_device_drained_begin(SCSIDevice * sdev)1744 void scsi_device_drained_begin(SCSIDevice *sdev)
1745 {
1746 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1747 if (!bus) {
1748 return;
1749 }
1750
1751 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1752 assert(bus->drain_count < INT_MAX);
1753
1754 /*
1755 * Multiple BlockBackends can be on a SCSIBus and each may begin/end
1756 * draining at any time. Keep a counter so HBAs only see begin/end once.
1757 */
1758 if (bus->drain_count++ == 0) {
1759 trace_scsi_bus_drained_begin(bus, sdev);
1760 if (bus->info->drained_begin) {
1761 bus->info->drained_begin(bus);
1762 }
1763 }
1764 }
1765
scsi_device_drained_end(SCSIDevice * sdev)1766 void scsi_device_drained_end(SCSIDevice *sdev)
1767 {
1768 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1769 if (!bus) {
1770 return;
1771 }
1772
1773 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1774 assert(bus->drain_count > 0);
1775
1776 if (bus->drain_count-- == 1) {
1777 trace_scsi_bus_drained_end(bus, sdev);
1778 if (bus->info->drained_end) {
1779 bus->info->drained_end(bus);
1780 }
1781 }
1782 }
1783
scsibus_get_dev_path(DeviceState * dev)1784 static char *scsibus_get_dev_path(DeviceState *dev)
1785 {
1786 SCSIDevice *d = SCSI_DEVICE(dev);
1787 DeviceState *hba = dev->parent_bus->parent;
1788 char *id;
1789 char *path;
1790
1791 id = qdev_get_dev_path(hba);
1792 if (id) {
1793 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1794 } else {
1795 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1796 }
1797 g_free(id);
1798 return path;
1799 }
1800
scsibus_get_fw_dev_path(DeviceState * dev)1801 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1802 {
1803 SCSIDevice *d = SCSI_DEVICE(dev);
1804 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1805 qdev_fw_name(dev), d->id, d->lun);
1806 }
1807
1808 /* SCSI request list. For simplicity, pv points to the whole device */
1809
put_scsi_req(SCSIRequest * req,void * opaque)1810 static void put_scsi_req(SCSIRequest *req, void *opaque)
1811 {
1812 QEMUFile *f = opaque;
1813
1814 assert(!req->io_canceled);
1815 assert(req->status == -1 && req->host_status == -1);
1816 assert(req->enqueued);
1817
1818 qemu_put_sbyte(f, req->retry ? 1 : 2);
1819 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1820 qemu_put_be32s(f, &req->tag);
1821 qemu_put_be32s(f, &req->lun);
1822 if (req->bus->info->save_request) {
1823 req->bus->info->save_request(f, req);
1824 }
1825 if (req->ops->save_request) {
1826 req->ops->save_request(f, req);
1827 }
1828 }
1829
put_scsi_requests(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)1830 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1831 const VMStateField *field, JSONWriter *vmdesc)
1832 {
1833 SCSIDevice *s = pv;
1834
1835 scsi_device_for_each_req_sync(s, put_scsi_req, f);
1836 qemu_put_sbyte(f, 0);
1837 return 0;
1838 }
1839
get_scsi_requests(QEMUFile * f,void * pv,size_t size,const VMStateField * field)1840 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1841 const VMStateField *field)
1842 {
1843 SCSIDevice *s = pv;
1844 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1845 int8_t sbyte;
1846
1847 while ((sbyte = qemu_get_sbyte(f)) > 0) {
1848 uint8_t buf[SCSI_CMD_BUF_SIZE];
1849 uint32_t tag;
1850 uint32_t lun;
1851 SCSIRequest *req;
1852
1853 qemu_get_buffer(f, buf, sizeof(buf));
1854 qemu_get_be32s(f, &tag);
1855 qemu_get_be32s(f, &lun);
1856 /*
1857 * A too-short CDB would have been rejected by scsi_req_new, so just use
1858 * SCSI_CMD_BUF_SIZE as the CDB length.
1859 */
1860 req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL);
1861 req->retry = (sbyte == 1);
1862 if (bus->info->load_request) {
1863 req->hba_private = bus->info->load_request(f, req);
1864 }
1865 if (req->ops->load_request) {
1866 req->ops->load_request(f, req);
1867 }
1868
1869 /* Just restart it later. */
1870 scsi_req_enqueue_internal(req);
1871
1872 /* At this point, the request will be kept alive by the reference
1873 * added by scsi_req_enqueue_internal, so we can release our reference.
1874 * The HBA of course will add its own reference in the load_request
1875 * callback if it needs to hold on the SCSIRequest.
1876 */
1877 scsi_req_unref(req);
1878 }
1879
1880 return 0;
1881 }
1882
1883 static const VMStateInfo vmstate_info_scsi_requests = {
1884 .name = "scsi-requests",
1885 .get = get_scsi_requests,
1886 .put = put_scsi_requests,
1887 };
1888
scsi_sense_state_needed(void * opaque)1889 static bool scsi_sense_state_needed(void *opaque)
1890 {
1891 SCSIDevice *s = opaque;
1892
1893 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1894 }
1895
1896 static const VMStateDescription vmstate_scsi_sense_state = {
1897 .name = "SCSIDevice/sense",
1898 .version_id = 1,
1899 .minimum_version_id = 1,
1900 .needed = scsi_sense_state_needed,
1901 .fields = (const VMStateField[]) {
1902 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1903 SCSI_SENSE_BUF_SIZE_OLD,
1904 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1905 VMSTATE_END_OF_LIST()
1906 }
1907 };
1908
1909 const VMStateDescription vmstate_scsi_device = {
1910 .name = "SCSIDevice",
1911 .version_id = 1,
1912 .minimum_version_id = 1,
1913 .fields = (const VMStateField[]) {
1914 VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1915 VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1916 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1917 VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1918 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1919 VMSTATE_UINT32(sense_len, SCSIDevice),
1920 {
1921 .name = "requests",
1922 .version_id = 0,
1923 .field_exists = NULL,
1924 .size = 0, /* ouch */
1925 .info = &vmstate_info_scsi_requests,
1926 .flags = VMS_SINGLE,
1927 .offset = 0,
1928 },
1929 VMSTATE_END_OF_LIST()
1930 },
1931 .subsections = (const VMStateDescription * const []) {
1932 &vmstate_scsi_sense_state,
1933 NULL
1934 }
1935 };
1936
1937 static Property scsi_props[] = {
1938 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
1939 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
1940 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
1941 DEFINE_PROP_END_OF_LIST(),
1942 };
1943
scsi_device_class_init(ObjectClass * klass,void * data)1944 static void scsi_device_class_init(ObjectClass *klass, void *data)
1945 {
1946 DeviceClass *k = DEVICE_CLASS(klass);
1947 set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1948 k->bus_type = TYPE_SCSI_BUS;
1949 k->realize = scsi_qdev_realize;
1950 k->unrealize = scsi_qdev_unrealize;
1951 device_class_set_props(k, scsi_props);
1952 }
1953
scsi_dev_instance_init(Object * obj)1954 static void scsi_dev_instance_init(Object *obj)
1955 {
1956 DeviceState *dev = DEVICE(obj);
1957 SCSIDevice *s = SCSI_DEVICE(dev);
1958
1959 device_add_bootindex_property(obj, &s->conf.bootindex,
1960 "bootindex", NULL,
1961 &s->qdev);
1962 }
1963
1964 static const TypeInfo scsi_device_type_info = {
1965 .name = TYPE_SCSI_DEVICE,
1966 .parent = TYPE_DEVICE,
1967 .instance_size = sizeof(SCSIDevice),
1968 .abstract = true,
1969 .class_size = sizeof(SCSIDeviceClass),
1970 .class_init = scsi_device_class_init,
1971 .instance_init = scsi_dev_instance_init,
1972 };
1973
scsi_bus_class_init(ObjectClass * klass,void * data)1974 static void scsi_bus_class_init(ObjectClass *klass, void *data)
1975 {
1976 BusClass *k = BUS_CLASS(klass);
1977 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1978
1979 k->get_dev_path = scsibus_get_dev_path;
1980 k->get_fw_dev_path = scsibus_get_fw_dev_path;
1981 k->check_address = scsi_bus_check_address;
1982 hc->unplug = qdev_simple_device_unplug_cb;
1983 }
1984
1985 static const TypeInfo scsi_bus_info = {
1986 .name = TYPE_SCSI_BUS,
1987 .parent = TYPE_BUS,
1988 .instance_size = sizeof(SCSIBus),
1989 .class_init = scsi_bus_class_init,
1990 .interfaces = (InterfaceInfo[]) {
1991 { TYPE_HOTPLUG_HANDLER },
1992 { }
1993 }
1994 };
1995
scsi_register_types(void)1996 static void scsi_register_types(void)
1997 {
1998 type_register_static(&scsi_bus_info);
1999 type_register_static(&scsi_device_type_info);
2000 }
2001
2002 type_init(scsi_register_types)
2003