xref: /qemu/hw/scsi/scsi-generic.c (revision abff1abf)
1 /*
2  * Generic SCSI Device support
3  *
4  * Copyright (c) 2007 Bull S.A.S.
5  * Based on code by Paul Brook
6  * Based on code by Fabrice Bellard
7  *
8  * Written by Laurent Vivier <Laurent.Vivier@bull.net>
9  *
10  * This code is licensed under the LGPL.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/scsi/emulation.h"
23 #include "sysemu/block-backend.h"
24 #include "trace.h"
25 
26 #ifdef __linux__
27 
28 #include <scsi/sg.h>
29 #include "scsi/constants.h"
30 
31 #ifndef MAX_UINT
32 #define MAX_UINT ((unsigned int)-1)
33 #endif
34 
35 typedef struct SCSIGenericReq {
36     SCSIRequest req;
37     uint8_t *buf;
38     int buflen;
39     int len;
40     sg_io_hdr_t io_header;
41 } SCSIGenericReq;
42 
43 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
44 {
45     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
46 
47     qemu_put_sbe32s(f, &r->buflen);
48     if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
49         assert(!r->req.sg);
50         qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
51     }
52 }
53 
54 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
55 {
56     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
57 
58     qemu_get_sbe32s(f, &r->buflen);
59     if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
60         assert(!r->req.sg);
61         qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
62     }
63 }
64 
65 static void scsi_free_request(SCSIRequest *req)
66 {
67     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
68 
69     g_free(r->buf);
70 }
71 
72 /* Helper function for command completion.  */
73 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
74 {
75     int status;
76     SCSISense sense;
77 
78     assert(r->req.aiocb == NULL);
79 
80     if (r->req.io_canceled) {
81         scsi_req_cancel_complete(&r->req);
82         goto done;
83     }
84     status = sg_io_sense_from_errno(-ret, &r->io_header, &sense);
85     if (status == CHECK_CONDITION) {
86         if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
87             r->req.sense_len = r->io_header.sb_len_wr;
88         } else {
89             scsi_req_build_sense(&r->req, sense);
90         }
91     }
92 
93     trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
94 
95     scsi_req_complete(&r->req, status);
96 done:
97     scsi_req_unref(&r->req);
98 }
99 
100 static void scsi_command_complete(void *opaque, int ret)
101 {
102     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
103     SCSIDevice *s = r->req.dev;
104 
105     assert(r->req.aiocb != NULL);
106     r->req.aiocb = NULL;
107 
108     aio_context_acquire(blk_get_aio_context(s->conf.blk));
109     scsi_command_complete_noio(r, ret);
110     aio_context_release(blk_get_aio_context(s->conf.blk));
111 }
112 
113 static int execute_command(BlockBackend *blk,
114                            SCSIGenericReq *r, int direction,
115                            BlockCompletionFunc *complete)
116 {
117     r->io_header.interface_id = 'S';
118     r->io_header.dxfer_direction = direction;
119     r->io_header.dxferp = r->buf;
120     r->io_header.dxfer_len = r->buflen;
121     r->io_header.cmdp = r->req.cmd.buf;
122     r->io_header.cmd_len = r->req.cmd.len;
123     r->io_header.mx_sb_len = sizeof(r->req.sense);
124     r->io_header.sbp = r->req.sense;
125     r->io_header.timeout = MAX_UINT;
126     r->io_header.usr_ptr = r;
127     r->io_header.flags |= SG_FLAG_DIRECT_IO;
128 
129     r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
130     if (r->req.aiocb == NULL) {
131         return -EIO;
132     }
133 
134     return 0;
135 }
136 
137 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s)
138 {
139     uint8_t page, page_idx;
140 
141     /*
142      *  EVPD set to zero returns the standard INQUIRY data.
143      *
144      *  Check if scsi_version is unset (-1) to avoid re-defining it
145      *  each time an INQUIRY with standard data is received.
146      *  scsi_version is initialized with -1 in scsi_generic_reset
147      *  and scsi_disk_reset, making sure that we'll set the
148      *  scsi_version after a reset. If the version field of the
149      *  INQUIRY response somehow changes after a guest reboot,
150      *  we'll be able to keep track of it.
151      *
152      *  On SCSI-2 and older, first 3 bits of byte 2 is the
153      *  ANSI-approved version, while on later versions the
154      *  whole byte 2 contains the version. Check if we're dealing
155      *  with a newer version and, in that case, assign the
156      *  whole byte.
157      */
158     if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
159         s->scsi_version = r->buf[2] & 0x07;
160         if (s->scsi_version > 2) {
161             s->scsi_version = r->buf[2];
162         }
163     }
164 
165     if (s->type == TYPE_DISK && (r->req.cmd.buf[1] & 0x01)) {
166         page = r->req.cmd.buf[2];
167         if (page == 0xb0) {
168             uint32_t max_transfer =
169                 blk_get_max_transfer(s->conf.blk) / s->blocksize;
170 
171             assert(max_transfer);
172             stl_be_p(&r->buf[8], max_transfer);
173             /* Also take care of the opt xfer len. */
174             stl_be_p(&r->buf[12],
175                     MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
176         } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
177             /*
178              * Now we're capable of supplying the VPD Block Limits
179              * response if the hardware can't. Add it in the INQUIRY
180              * Supported VPD pages response in case we are using the
181              * emulation for this device.
182              *
183              * This way, the guest kernel will be aware of the support
184              * and will use it to proper setup the SCSI device.
185              *
186              * VPD page numbers must be sorted, so insert 0xb0 at the
187              * right place with an in-place insert.  When the while loop
188              * begins the device response is at r[0] to r[page_idx - 1].
189              */
190             page_idx = lduw_be_p(r->buf + 2) + 4;
191             page_idx = MIN(page_idx, r->buflen);
192             while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
193                 if (page_idx < r->buflen) {
194                     r->buf[page_idx] = r->buf[page_idx - 1];
195                 }
196                 page_idx--;
197             }
198             if (page_idx < r->buflen) {
199                 r->buf[page_idx] = 0xb0;
200             }
201             stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
202         }
203     }
204 }
205 
206 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
207 {
208     int len;
209     uint8_t buf[64];
210 
211     SCSIBlockLimits bl = {
212         .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
213     };
214 
215     memset(r->buf, 0, r->buflen);
216     stb_p(buf, s->type);
217     stb_p(buf + 1, 0xb0);
218     len = scsi_emulate_block_limits(buf + 4, &bl);
219     assert(len <= sizeof(buf) - 4);
220     stw_be_p(buf + 2, len);
221 
222     memcpy(r->buf, buf, MIN(r->buflen, len + 4));
223 
224     r->io_header.sb_len_wr = 0;
225 
226     /*
227     * We have valid contents in the reply buffer but the
228     * io_header can report a sense error coming from
229     * the hardware in scsi_command_complete_noio. Clean
230     * up the io_header to avoid reporting it.
231     */
232     r->io_header.driver_status = 0;
233     r->io_header.status = 0;
234 
235     return r->buflen;
236 }
237 
238 static void scsi_read_complete(void * opaque, int ret)
239 {
240     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
241     SCSIDevice *s = r->req.dev;
242     int len;
243 
244     assert(r->req.aiocb != NULL);
245     r->req.aiocb = NULL;
246 
247     aio_context_acquire(blk_get_aio_context(s->conf.blk));
248 
249     if (ret || r->req.io_canceled) {
250         scsi_command_complete_noio(r, ret);
251         goto done;
252     }
253 
254     len = r->io_header.dxfer_len - r->io_header.resid;
255     trace_scsi_generic_read_complete(r->req.tag, len);
256 
257     r->len = -1;
258 
259     if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
260         SCSISense sense =
261             scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
262 
263         /*
264          * Check if this is a VPD Block Limits request that
265          * resulted in sense error but would need emulation.
266          * In this case, emulate a valid VPD response.
267          */
268         if (sense.key == ILLEGAL_REQUEST &&
269             s->needs_vpd_bl_emulation &&
270             r->req.cmd.buf[0] == INQUIRY &&
271             (r->req.cmd.buf[1] & 0x01) &&
272             r->req.cmd.buf[2] == 0xb0) {
273             len = scsi_generic_emulate_block_limits(r, s);
274             /*
275              * It's okay to jup to req_complete: no need to
276              * let scsi_handle_inquiry_reply handle an
277              * INQUIRY VPD BL request we created manually.
278              */
279         }
280         if (sense.key) {
281             goto req_complete;
282         }
283     }
284 
285     if (len == 0) {
286         scsi_command_complete_noio(r, 0);
287         goto done;
288     }
289 
290     /* Snoop READ CAPACITY output to set the blocksize.  */
291     if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
292         (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
293         s->blocksize = ldl_be_p(&r->buf[4]);
294         s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
295     } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
296                (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
297         s->blocksize = ldl_be_p(&r->buf[8]);
298         s->max_lba = ldq_be_p(&r->buf[0]);
299     }
300     blk_set_guest_block_size(s->conf.blk, s->blocksize);
301 
302     /* Patch MODE SENSE device specific parameters if the BDS is opened
303      * readonly.
304      */
305     if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) &&
306         blk_is_read_only(s->conf.blk) &&
307         (r->req.cmd.buf[0] == MODE_SENSE ||
308          r->req.cmd.buf[0] == MODE_SENSE_10) &&
309         (r->req.cmd.buf[1] & 0x8) == 0) {
310         if (r->req.cmd.buf[0] == MODE_SENSE) {
311             r->buf[2] |= 0x80;
312         } else  {
313             r->buf[3] |= 0x80;
314         }
315     }
316     if (r->req.cmd.buf[0] == INQUIRY) {
317         scsi_handle_inquiry_reply(r, s);
318     }
319 
320 req_complete:
321     scsi_req_data(&r->req, len);
322     scsi_req_unref(&r->req);
323 
324 done:
325     aio_context_release(blk_get_aio_context(s->conf.blk));
326 }
327 
328 /* Read more data from scsi device into buffer.  */
329 static void scsi_read_data(SCSIRequest *req)
330 {
331     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
332     SCSIDevice *s = r->req.dev;
333     int ret;
334 
335     trace_scsi_generic_read_data(req->tag);
336 
337     /* The request is used as the AIO opaque value, so add a ref.  */
338     scsi_req_ref(&r->req);
339     if (r->len == -1) {
340         scsi_command_complete_noio(r, 0);
341         return;
342     }
343 
344     ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
345                           scsi_read_complete);
346     if (ret < 0) {
347         scsi_command_complete_noio(r, ret);
348     }
349 }
350 
351 static void scsi_write_complete(void * opaque, int ret)
352 {
353     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
354     SCSIDevice *s = r->req.dev;
355 
356     trace_scsi_generic_write_complete(ret);
357 
358     assert(r->req.aiocb != NULL);
359     r->req.aiocb = NULL;
360 
361     aio_context_acquire(blk_get_aio_context(s->conf.blk));
362 
363     if (ret || r->req.io_canceled) {
364         scsi_command_complete_noio(r, ret);
365         goto done;
366     }
367 
368     if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
369         s->type == TYPE_TAPE) {
370         s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
371         trace_scsi_generic_write_complete_blocksize(s->blocksize);
372     }
373 
374     scsi_command_complete_noio(r, ret);
375 
376 done:
377     aio_context_release(blk_get_aio_context(s->conf.blk));
378 }
379 
380 /* Write data to a scsi device.  Returns nonzero on failure.
381    The transfer may complete asynchronously.  */
382 static void scsi_write_data(SCSIRequest *req)
383 {
384     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
385     SCSIDevice *s = r->req.dev;
386     int ret;
387 
388     trace_scsi_generic_write_data(req->tag);
389     if (r->len == 0) {
390         r->len = r->buflen;
391         scsi_req_data(&r->req, r->len);
392         return;
393     }
394 
395     /* The request is used as the AIO opaque value, so add a ref.  */
396     scsi_req_ref(&r->req);
397     ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
398     if (ret < 0) {
399         scsi_command_complete_noio(r, ret);
400     }
401 }
402 
403 /* Return a pointer to the data buffer.  */
404 static uint8_t *scsi_get_buf(SCSIRequest *req)
405 {
406     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
407 
408     return r->buf;
409 }
410 
411 static void scsi_generic_command_dump(uint8_t *cmd, int len)
412 {
413     int i;
414     char *line_buffer, *p;
415 
416     line_buffer = g_malloc(len * 5 + 1);
417 
418     for (i = 0, p = line_buffer; i < len; i++) {
419         p += sprintf(p, " 0x%02x", cmd[i]);
420     }
421     trace_scsi_generic_send_command(line_buffer);
422 
423     g_free(line_buffer);
424 }
425 
426 /* Execute a scsi command.  Returns the length of the data expected by the
427    command.  This will be Positive for data transfers from the device
428    (eg. disk reads), negative for transfers to the device (eg. disk writes),
429    and zero if the command does not transfer any data.  */
430 
431 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
432 {
433     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
434     SCSIDevice *s = r->req.dev;
435     int ret;
436 
437     if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
438         scsi_generic_command_dump(cmd, r->req.cmd.len);
439     }
440 
441     if (r->req.cmd.xfer == 0) {
442         g_free(r->buf);
443         r->buflen = 0;
444         r->buf = NULL;
445         /* The request is used as the AIO opaque value, so add a ref.  */
446         scsi_req_ref(&r->req);
447         ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
448                               scsi_command_complete);
449         if (ret < 0) {
450             scsi_command_complete_noio(r, ret);
451             return 0;
452         }
453         return 0;
454     }
455 
456     if (r->buflen != r->req.cmd.xfer) {
457         g_free(r->buf);
458         r->buf = g_malloc(r->req.cmd.xfer);
459         r->buflen = r->req.cmd.xfer;
460     }
461 
462     memset(r->buf, 0, r->buflen);
463     r->len = r->req.cmd.xfer;
464     if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
465         r->len = 0;
466         return -r->req.cmd.xfer;
467     } else {
468         return r->req.cmd.xfer;
469     }
470 }
471 
472 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
473 {
474     int i;
475 
476     if ((p[1] & 0xF) == 3) {
477         /* NAA designator type */
478         if (p[3] != 8) {
479             return -EINVAL;
480         }
481         *p_wwn = ldq_be_p(p + 4);
482         return 0;
483     }
484 
485     if ((p[1] & 0xF) == 8) {
486         /* SCSI name string designator type */
487         if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
488             return -EINVAL;
489         }
490         if (p[3] > 20 && p[24] != ',') {
491             return -EINVAL;
492         }
493         *p_wwn = 0;
494         for (i = 8; i < 24; i++) {
495             char c = qemu_toupper(p[i]);
496             c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
497             *p_wwn = (*p_wwn << 4) | c;
498         }
499         return 0;
500     }
501 
502     return -EINVAL;
503 }
504 
505 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
506                         uint8_t *buf, uint8_t buf_size)
507 {
508     sg_io_hdr_t io_header;
509     uint8_t sensebuf[8];
510     int ret;
511 
512     memset(&io_header, 0, sizeof(io_header));
513     io_header.interface_id = 'S';
514     io_header.dxfer_direction = SG_DXFER_FROM_DEV;
515     io_header.dxfer_len = buf_size;
516     io_header.dxferp = buf;
517     io_header.cmdp = cmd;
518     io_header.cmd_len = cmd_size;
519     io_header.mx_sb_len = sizeof(sensebuf);
520     io_header.sbp = sensebuf;
521     io_header.timeout = 6000; /* XXX */
522 
523     ret = blk_ioctl(blk, SG_IO, &io_header);
524     if (ret < 0 || io_header.driver_status || io_header.host_status) {
525         return -1;
526     }
527     return 0;
528 }
529 
530 /*
531  * Executes an INQUIRY request with EVPD set to retrieve the
532  * available VPD pages of the device. If the device does
533  * not support the Block Limits page (page 0xb0), set
534  * the needs_vpd_bl_emulation flag for future use.
535  */
536 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
537 {
538     uint8_t cmd[6];
539     uint8_t buf[250];
540     uint8_t page_len;
541     int ret, i;
542 
543     memset(cmd, 0, sizeof(cmd));
544     memset(buf, 0, sizeof(buf));
545     cmd[0] = INQUIRY;
546     cmd[1] = 1;
547     cmd[2] = 0x00;
548     cmd[4] = sizeof(buf);
549 
550     ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
551                               buf, sizeof(buf));
552     if (ret < 0) {
553         /*
554          * Do not assume anything if we can't retrieve the
555          * INQUIRY response to assert the VPD Block Limits
556          * support.
557          */
558         s->needs_vpd_bl_emulation = false;
559         return;
560     }
561 
562     page_len = buf[3];
563     for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
564         if (buf[i] == 0xb0) {
565             s->needs_vpd_bl_emulation = false;
566             return;
567         }
568     }
569     s->needs_vpd_bl_emulation = true;
570 }
571 
572 static void scsi_generic_read_device_identification(SCSIDevice *s)
573 {
574     uint8_t cmd[6];
575     uint8_t buf[250];
576     int ret;
577     int i, len;
578 
579     memset(cmd, 0, sizeof(cmd));
580     memset(buf, 0, sizeof(buf));
581     cmd[0] = INQUIRY;
582     cmd[1] = 1;
583     cmd[2] = 0x83;
584     cmd[4] = sizeof(buf);
585 
586     ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
587                               buf, sizeof(buf));
588     if (ret < 0) {
589         return;
590     }
591 
592     len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
593     for (i = 0; i + 3 <= len; ) {
594         const uint8_t *p = &buf[i + 4];
595         uint64_t wwn;
596 
597         if (i + (p[3] + 4) > len) {
598             break;
599         }
600 
601         if ((p[1] & 0x10) == 0) {
602             /* Associated with the logical unit */
603             if (read_naa_id(p, &wwn) == 0) {
604                 s->wwn = wwn;
605             }
606         } else if ((p[1] & 0x10) == 0x10) {
607             /* Associated with the target port */
608             if (read_naa_id(p, &wwn) == 0) {
609                 s->port_wwn = wwn;
610             }
611         }
612 
613         i += p[3] + 4;
614     }
615 }
616 
617 void scsi_generic_read_device_inquiry(SCSIDevice *s)
618 {
619     scsi_generic_read_device_identification(s);
620     if (s->type == TYPE_DISK) {
621         scsi_generic_set_vpd_bl_emulation(s);
622     } else {
623         s->needs_vpd_bl_emulation = false;
624     }
625 }
626 
627 static int get_stream_blocksize(BlockBackend *blk)
628 {
629     uint8_t cmd[6];
630     uint8_t buf[12];
631     int ret;
632 
633     memset(cmd, 0, sizeof(cmd));
634     memset(buf, 0, sizeof(buf));
635     cmd[0] = MODE_SENSE;
636     cmd[4] = sizeof(buf);
637 
638     ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf));
639     if (ret < 0) {
640         return -1;
641     }
642 
643     return (buf[9] << 16) | (buf[10] << 8) | buf[11];
644 }
645 
646 static void scsi_generic_reset(DeviceState *dev)
647 {
648     SCSIDevice *s = SCSI_DEVICE(dev);
649 
650     s->scsi_version = s->default_scsi_version;
651     scsi_device_purge_requests(s, SENSE_CODE(RESET));
652 }
653 
654 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
655 {
656     int rc;
657     int sg_version;
658     struct sg_scsi_id scsiid;
659 
660     if (!s->conf.blk) {
661         error_setg(errp, "drive property not set");
662         return;
663     }
664 
665     if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) {
666         error_setg(errp, "Device doesn't support drive option werror");
667         return;
668     }
669     if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
670         error_setg(errp, "Device doesn't support drive option rerror");
671         return;
672     }
673 
674     /* check we are using a driver managing SG_IO (version 3 and after */
675     rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
676     if (rc < 0) {
677         error_setg_errno(errp, -rc, "cannot get SG_IO version number");
678         if (rc != -EPERM) {
679             error_append_hint(errp, "Is this a SCSI device?\n");
680         }
681         return;
682     }
683     if (sg_version < 30000) {
684         error_setg(errp, "scsi generic interface too old");
685         return;
686     }
687 
688     /* get LUN of the /dev/sg? */
689     if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
690         error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
691         return;
692     }
693     if (!blkconf_apply_backend_options(&s->conf,
694                                        blk_is_read_only(s->conf.blk),
695                                        true, errp)) {
696         return;
697     }
698 
699     /* define device state */
700     s->type = scsiid.scsi_type;
701     trace_scsi_generic_realize_type(s->type);
702 
703     switch (s->type) {
704     case TYPE_TAPE:
705         s->blocksize = get_stream_blocksize(s->conf.blk);
706         if (s->blocksize == -1) {
707             s->blocksize = 0;
708         }
709         break;
710 
711         /* Make a guess for block devices, we'll fix it when the guest sends.
712          * READ CAPACITY.  If they don't, they likely would assume these sizes
713          * anyway. (TODO: they could also send MODE SENSE).
714          */
715     case TYPE_ROM:
716     case TYPE_WORM:
717         s->blocksize = 2048;
718         break;
719     default:
720         s->blocksize = 512;
721         break;
722     }
723 
724     trace_scsi_generic_realize_blocksize(s->blocksize);
725 
726     /* Only used by scsi-block, but initialize it nevertheless to be clean.  */
727     s->default_scsi_version = -1;
728     scsi_generic_read_device_inquiry(s);
729 }
730 
731 const SCSIReqOps scsi_generic_req_ops = {
732     .size         = sizeof(SCSIGenericReq),
733     .free_req     = scsi_free_request,
734     .send_command = scsi_send_command,
735     .read_data    = scsi_read_data,
736     .write_data   = scsi_write_data,
737     .get_buf      = scsi_get_buf,
738     .load_request = scsi_generic_load_request,
739     .save_request = scsi_generic_save_request,
740 };
741 
742 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
743                                      uint8_t *buf, void *hba_private)
744 {
745     return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
746 }
747 
748 static Property scsi_generic_properties[] = {
749     DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
750     DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
751     DEFINE_PROP_END_OF_LIST(),
752 };
753 
754 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
755                                   uint8_t *buf, void *hba_private)
756 {
757     return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
758 }
759 
760 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
761 {
762     DeviceClass *dc = DEVICE_CLASS(klass);
763     SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
764 
765     sc->realize      = scsi_generic_realize;
766     sc->alloc_req    = scsi_new_request;
767     sc->parse_cdb    = scsi_generic_parse_cdb;
768     dc->fw_name = "disk";
769     dc->desc = "pass through generic scsi device (/dev/sg*)";
770     dc->reset = scsi_generic_reset;
771     device_class_set_props(dc, scsi_generic_properties);
772     dc->vmsd  = &vmstate_scsi_device;
773 }
774 
775 static const TypeInfo scsi_generic_info = {
776     .name          = "scsi-generic",
777     .parent        = TYPE_SCSI_DEVICE,
778     .instance_size = sizeof(SCSIDevice),
779     .class_init    = scsi_generic_class_initfn,
780 };
781 
782 static void scsi_generic_register_types(void)
783 {
784     type_register_static(&scsi_generic_info);
785 }
786 
787 type_init(scsi_generic_register_types)
788 
789 #endif /* __linux__ */
790