xref: /qemu/hw/block/virtio-blk.c (revision bfa36802)
16e790746SPaolo Bonzini /*
26e790746SPaolo Bonzini  * Virtio Block Device
36e790746SPaolo Bonzini  *
46e790746SPaolo Bonzini  * Copyright IBM, Corp. 2007
56e790746SPaolo Bonzini  *
66e790746SPaolo Bonzini  * Authors:
76e790746SPaolo Bonzini  *  Anthony Liguori   <aliguori@us.ibm.com>
86e790746SPaolo Bonzini  *
96e790746SPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2.  See
106e790746SPaolo Bonzini  * the COPYING file in the top-level directory.
116e790746SPaolo Bonzini  *
126e790746SPaolo Bonzini  */
136e790746SPaolo Bonzini 
1480c71a24SPeter Maydell #include "qemu/osdep.h"
15433fcea4SStefan Hajnoczi #include "qemu/defer-call.h"
16da34e65cSMarkus Armbruster #include "qapi/error.h"
17827805a2SFam Zheng #include "qemu/iov.h"
180b8fa32fSMarkus Armbruster #include "qemu/module.h"
196e790746SPaolo Bonzini #include "qemu/error-report.h"
209b92fbcfSSergio Lopez #include "qemu/main-loop.h"
214f736650SSam Li #include "block/block_int.h"
226e790746SPaolo Bonzini #include "trace.h"
236e790746SPaolo Bonzini #include "hw/block/block.h"
24a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h"
256e790746SPaolo Bonzini #include "sysemu/blockdev.h"
26baf42268SStefan Hajnoczi #include "sysemu/block-ram-registrar.h"
272f780b6aSMarkus Armbruster #include "sysemu/sysemu.h"
2854d31236SMarkus Armbruster #include "sysemu/runstate.h"
296e790746SPaolo Bonzini #include "hw/virtio/virtio-blk.h"
3008e2c9f1SPaolo Bonzini #include "scsi/constants.h"
316e790746SPaolo Bonzini #ifdef __linux__
326e790746SPaolo Bonzini # include <scsi/sg.h>
336e790746SPaolo Bonzini #endif
346e790746SPaolo Bonzini #include "hw/virtio/virtio-bus.h"
35ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h"
36783d1897SRusty Russell #include "hw/virtio/virtio-access.h"
37d9cf55a8SDaniil Tatianin #include "hw/virtio/virtio-blk-common.h"
384c41c69eSHiroki Narukawa #include "qemu/coroutine.h"
396e790746SPaolo Bonzini 
4052bff01fSHanna Czenczek static void virtio_blk_ioeventfd_attach(VirtIOBlock *s);
4152bff01fSHanna Czenczek 
42d14dde5eSGreg Kurz static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
43edaffd9fSStefan Hajnoczi                                     VirtIOBlockReq *req)
44671ec3f0SFam Zheng {
45671ec3f0SFam Zheng     req->dev = s;
46edaffd9fSStefan Hajnoczi     req->vq = vq;
47869d66afSStefan Hajnoczi     req->qiov.size = 0;
482a6cdd6dSPaolo Bonzini     req->in_len = 0;
49869d66afSStefan Hajnoczi     req->next = NULL;
5095f7142aSPeter Lieven     req->mr_next = NULL;
51671ec3f0SFam Zheng }
52671ec3f0SFam Zheng 
53d14dde5eSGreg Kurz static void virtio_blk_free_request(VirtIOBlockReq *req)
54671ec3f0SFam Zheng {
55c84b3192SPaolo Bonzini     g_free(req);
56671ec3f0SFam Zheng }
57671ec3f0SFam Zheng 
5803de2f52SPaolo Bonzini static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
596e790746SPaolo Bonzini {
606e790746SPaolo Bonzini     VirtIOBlock *s = req->dev;
616e790746SPaolo Bonzini     VirtIODevice *vdev = VIRTIO_DEVICE(s);
626e790746SPaolo Bonzini 
63a576ceacSStefan Hajnoczi     trace_virtio_blk_req_complete(vdev, req, status);
646e790746SPaolo Bonzini 
656e790746SPaolo Bonzini     stb_p(&req->in->status, status);
667bd04a04SStefan Hajnoczi     iov_discard_undo(&req->inhdr_undo);
677bd04a04SStefan Hajnoczi     iov_discard_undo(&req->outhdr_undo);
68edaffd9fSStefan Hajnoczi     virtqueue_push(req->vq, &req->elem, req->in_len);
69*bfa36802SStefan Hajnoczi     if (qemu_in_iothread()) {
703bcc17f0SStefan Hajnoczi         virtio_notify_irqfd(vdev, req->vq);
7103de2f52SPaolo Bonzini     } else {
72edaffd9fSStefan Hajnoczi         virtio_notify(vdev, req->vq);
736e790746SPaolo Bonzini     }
74bf4bd461SFam Zheng }
75bf4bd461SFam Zheng 
766e790746SPaolo Bonzini static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
7700f639fbSStefano Garzarella     bool is_read, bool acct_failed)
786e790746SPaolo Bonzini {
796e790746SPaolo Bonzini     VirtIOBlock *s = req->dev;
809a6719d5SStefano Garzarella     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
816e790746SPaolo Bonzini 
82a589569fSWenchao Xia     if (action == BLOCK_ERROR_ACTION_STOP) {
83466138dcSFam Zheng         /* Break the link as the next request is going to be parsed from the
84466138dcSFam Zheng          * ring again. Otherwise we may end up doing a double completion! */
85466138dcSFam Zheng         req->mr_next = NULL;
869c67f33fSStefan Hajnoczi 
879c67f33fSStefan Hajnoczi         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
886e790746SPaolo Bonzini             req->next = s->rq;
896e790746SPaolo Bonzini             s->rq = req;
909c67f33fSStefan Hajnoczi         }
91a589569fSWenchao Xia     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
926e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
9300f639fbSStefano Garzarella         if (acct_failed) {
9401762e03SAlberto Garcia             block_acct_failed(blk_get_stats(s->blk), &req->acct);
9500f639fbSStefano Garzarella         }
96671ec3f0SFam Zheng         virtio_blk_free_request(req);
976e790746SPaolo Bonzini     }
986e790746SPaolo Bonzini 
994be74634SMarkus Armbruster     blk_error_action(s->blk, action, is_read, error);
100a589569fSWenchao Xia     return action != BLOCK_ERROR_ACTION_IGNORE;
1016e790746SPaolo Bonzini }
1026e790746SPaolo Bonzini 
1036e790746SPaolo Bonzini static void virtio_blk_rw_complete(void *opaque, int ret)
1046e790746SPaolo Bonzini {
10595f7142aSPeter Lieven     VirtIOBlockReq *next = opaque;
106b9e413ddSPaolo Bonzini     VirtIOBlock *s = next->dev;
107a576ceacSStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1086e790746SPaolo Bonzini 
10995f7142aSPeter Lieven     while (next) {
11095f7142aSPeter Lieven         VirtIOBlockReq *req = next;
11195f7142aSPeter Lieven         next = req->mr_next;
112a576ceacSStefan Hajnoczi         trace_virtio_blk_rw_complete(vdev, req, ret);
1136e790746SPaolo Bonzini 
11495f7142aSPeter Lieven         if (req->qiov.nalloc != -1) {
115e61809edSDongli Zhang             /* If nalloc is != -1 req->qiov is a local copy of the original
1169bb192a4SYaowei Bai              * external iovec. It was allocated in submit_requests to be
1179bb192a4SYaowei Bai              * able to merge requests. */
11895f7142aSPeter Lieven             qemu_iovec_destroy(&req->qiov);
11995f7142aSPeter Lieven         }
12095f7142aSPeter Lieven 
1216e790746SPaolo Bonzini         if (ret) {
122bf4069fbSAnastasiia Rusakova             int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type);
123783d1897SRusty Russell             bool is_read = !(p & VIRTIO_BLK_T_OUT);
1242a6cdd6dSPaolo Bonzini             /* Note that memory may be dirtied on read failure.  If the
1252a6cdd6dSPaolo Bonzini              * virtio request is not completed here, as is the case for
1262a6cdd6dSPaolo Bonzini              * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
1272a6cdd6dSPaolo Bonzini              * correctly during live migration.  While this is ugly,
1282a6cdd6dSPaolo Bonzini              * it is acceptable because the device is free to write to
1292a6cdd6dSPaolo Bonzini              * the memory until the request is completed (which will
1302a6cdd6dSPaolo Bonzini              * happen on the other side of the migration).
1312a6cdd6dSPaolo Bonzini              */
13200f639fbSStefano Garzarella             if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
13395f7142aSPeter Lieven                 continue;
13495f7142aSPeter Lieven             }
1356e790746SPaolo Bonzini         }
1366e790746SPaolo Bonzini 
1376e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
138bf4069fbSAnastasiia Rusakova         block_acct_done(blk_get_stats(s->blk), &req->acct);
139671ec3f0SFam Zheng         virtio_blk_free_request(req);
1406e790746SPaolo Bonzini     }
14195f7142aSPeter Lieven }
1426e790746SPaolo Bonzini 
1436e790746SPaolo Bonzini static void virtio_blk_flush_complete(void *opaque, int ret)
1446e790746SPaolo Bonzini {
1456e790746SPaolo Bonzini     VirtIOBlockReq *req = opaque;
146b9e413ddSPaolo Bonzini     VirtIOBlock *s = req->dev;
1476e790746SPaolo Bonzini 
148c1135913SStefan Hajnoczi     if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) {
149c1135913SStefan Hajnoczi         return;
1506e790746SPaolo Bonzini     }
1516e790746SPaolo Bonzini 
1526e790746SPaolo Bonzini     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
1539a6719d5SStefano Garzarella     block_acct_done(blk_get_stats(s->blk), &req->acct);
154671ec3f0SFam Zheng     virtio_blk_free_request(req);
1556e790746SPaolo Bonzini }
1566e790746SPaolo Bonzini 
15737b06f8dSStefano Garzarella static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
15837b06f8dSStefano Garzarella {
15937b06f8dSStefano Garzarella     VirtIOBlockReq *req = opaque;
16037b06f8dSStefano Garzarella     VirtIOBlock *s = req->dev;
16137b06f8dSStefano Garzarella     bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
16237b06f8dSStefano Garzarella                             ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
16337b06f8dSStefano Garzarella 
164c1135913SStefan Hajnoczi     if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
165c1135913SStefan Hajnoczi         return;
16637b06f8dSStefano Garzarella     }
16737b06f8dSStefano Garzarella 
16837b06f8dSStefano Garzarella     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
16937b06f8dSStefano Garzarella     if (is_write_zeroes) {
17037b06f8dSStefano Garzarella         block_acct_done(blk_get_stats(s->blk), &req->acct);
17137b06f8dSStefano Garzarella     }
17237b06f8dSStefano Garzarella     virtio_blk_free_request(req);
17337b06f8dSStefano Garzarella }
17437b06f8dSStefano Garzarella 
1751dc936aaSFam Zheng #ifdef __linux__
1761dc936aaSFam Zheng 
1771dc936aaSFam Zheng typedef struct {
1781dc936aaSFam Zheng     VirtIOBlockReq *req;
1791dc936aaSFam Zheng     struct sg_io_hdr hdr;
1801dc936aaSFam Zheng } VirtIOBlockIoctlReq;
1811dc936aaSFam Zheng 
1821dc936aaSFam Zheng static void virtio_blk_ioctl_complete(void *opaque, int status)
1831dc936aaSFam Zheng {
1841dc936aaSFam Zheng     VirtIOBlockIoctlReq *ioctl_req = opaque;
1851dc936aaSFam Zheng     VirtIOBlockReq *req = ioctl_req->req;
1869d456654SPaolo Bonzini     VirtIOBlock *s = req->dev;
1879d456654SPaolo Bonzini     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1881dc936aaSFam Zheng     struct virtio_scsi_inhdr *scsi;
1891dc936aaSFam Zheng     struct sg_io_hdr *hdr;
1901dc936aaSFam Zheng 
1911dc936aaSFam Zheng     scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
1921dc936aaSFam Zheng 
1931dc936aaSFam Zheng     if (status) {
1941dc936aaSFam Zheng         status = VIRTIO_BLK_S_UNSUPP;
1951dc936aaSFam Zheng         virtio_stl_p(vdev, &scsi->errors, 255);
1961dc936aaSFam Zheng         goto out;
1971dc936aaSFam Zheng     }
1981dc936aaSFam Zheng 
1991dc936aaSFam Zheng     hdr = &ioctl_req->hdr;
2001dc936aaSFam Zheng     /*
2011dc936aaSFam Zheng      * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
2021dc936aaSFam Zheng      * clear the masked_status field [hence status gets cleared too, see
2031dc936aaSFam Zheng      * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
2041dc936aaSFam Zheng      * status has occurred.  However they do set DRIVER_SENSE in driver_status
2051dc936aaSFam Zheng      * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
2061dc936aaSFam Zheng      */
2071dc936aaSFam Zheng     if (hdr->status == 0 && hdr->sb_len_wr > 0) {
2081dc936aaSFam Zheng         hdr->status = CHECK_CONDITION;
2091dc936aaSFam Zheng     }
2101dc936aaSFam Zheng 
2111dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->errors,
2121dc936aaSFam Zheng                  hdr->status | (hdr->msg_status << 8) |
2131dc936aaSFam Zheng                  (hdr->host_status << 16) | (hdr->driver_status << 24));
2141dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->residual, hdr->resid);
2151dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr);
2161dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
2171dc936aaSFam Zheng 
2181dc936aaSFam Zheng out:
2191dc936aaSFam Zheng     virtio_blk_req_complete(req, status);
2201dc936aaSFam Zheng     virtio_blk_free_request(req);
2211dc936aaSFam Zheng     g_free(ioctl_req);
2221dc936aaSFam Zheng }
2231dc936aaSFam Zheng 
2241dc936aaSFam Zheng #endif
2251dc936aaSFam Zheng 
226edaffd9fSStefan Hajnoczi static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
2276e790746SPaolo Bonzini {
228edaffd9fSStefan Hajnoczi     VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
2296e790746SPaolo Bonzini 
23051b19ebeSPaolo Bonzini     if (req) {
231edaffd9fSStefan Hajnoczi         virtio_blk_init_request(s, vq, req);
2326e790746SPaolo Bonzini     }
2336e790746SPaolo Bonzini     return req;
2346e790746SPaolo Bonzini }
2356e790746SPaolo Bonzini 
23675344fa4SFam Zheng static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
2376e790746SPaolo Bonzini {
2386e790746SPaolo Bonzini     int status = VIRTIO_BLK_S_OK;
2395a05cbeeSFam Zheng     struct virtio_scsi_inhdr *scsi = NULL;
24075344fa4SFam Zheng     VirtIOBlock *blk = req->dev;
241bf4069fbSAnastasiia Rusakova     VirtIODevice *vdev = VIRTIO_DEVICE(blk);
242bf4069fbSAnastasiia Rusakova     VirtQueueElement *elem = &req->elem;
243783d1897SRusty Russell 
2445a05cbeeSFam Zheng #ifdef __linux__
2455a05cbeeSFam Zheng     int i;
2461dc936aaSFam Zheng     VirtIOBlockIoctlReq *ioctl_req;
247a209f461SFam Zheng     BlockAIOCB *acb;
2485a05cbeeSFam Zheng #endif
2496e790746SPaolo Bonzini 
2506e790746SPaolo Bonzini     /*
2516e790746SPaolo Bonzini      * We require at least one output segment each for the virtio_blk_outhdr
2526e790746SPaolo Bonzini      * and the SCSI command block.
2536e790746SPaolo Bonzini      *
2546e790746SPaolo Bonzini      * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
2556e790746SPaolo Bonzini      * and the sense buffer pointer in the input segments.
2566e790746SPaolo Bonzini      */
2575a05cbeeSFam Zheng     if (elem->out_num < 2 || elem->in_num < 3) {
2585a05cbeeSFam Zheng         status = VIRTIO_BLK_S_IOERR;
2595a05cbeeSFam Zheng         goto fail;
2606e790746SPaolo Bonzini     }
2616e790746SPaolo Bonzini 
2626e790746SPaolo Bonzini     /*
2636e790746SPaolo Bonzini      * The scsi inhdr is placed in the second-to-last input segment, just
2646e790746SPaolo Bonzini      * before the regular inhdr.
2656e790746SPaolo Bonzini      */
2665a05cbeeSFam Zheng     scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
2676e790746SPaolo Bonzini 
268bbe8bd4dSStefano Garzarella     if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) {
2696e790746SPaolo Bonzini         status = VIRTIO_BLK_S_UNSUPP;
2706e790746SPaolo Bonzini         goto fail;
2716e790746SPaolo Bonzini     }
2726e790746SPaolo Bonzini 
2736e790746SPaolo Bonzini     /*
2746e790746SPaolo Bonzini      * No support for bidirection commands yet.
2756e790746SPaolo Bonzini      */
2765a05cbeeSFam Zheng     if (elem->out_num > 2 && elem->in_num > 3) {
2776e790746SPaolo Bonzini         status = VIRTIO_BLK_S_UNSUPP;
2786e790746SPaolo Bonzini         goto fail;
2796e790746SPaolo Bonzini     }
2806e790746SPaolo Bonzini 
2816e790746SPaolo Bonzini #ifdef __linux__
2821dc936aaSFam Zheng     ioctl_req = g_new0(VirtIOBlockIoctlReq, 1);
2831dc936aaSFam Zheng     ioctl_req->req = req;
2841dc936aaSFam Zheng     ioctl_req->hdr.interface_id = 'S';
2851dc936aaSFam Zheng     ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len;
2861dc936aaSFam Zheng     ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base;
2871dc936aaSFam Zheng     ioctl_req->hdr.dxfer_len = 0;
2886e790746SPaolo Bonzini 
2895a05cbeeSFam Zheng     if (elem->out_num > 2) {
2906e790746SPaolo Bonzini         /*
2916e790746SPaolo Bonzini          * If there are more than the minimally required 2 output segments
2926e790746SPaolo Bonzini          * there is write payload starting from the third iovec.
2936e790746SPaolo Bonzini          */
2941dc936aaSFam Zheng         ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV;
2951dc936aaSFam Zheng         ioctl_req->hdr.iovec_count = elem->out_num - 2;
2966e790746SPaolo Bonzini 
2971dc936aaSFam Zheng         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
2981dc936aaSFam Zheng             ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
2991dc936aaSFam Zheng         }
3006e790746SPaolo Bonzini 
3011dc936aaSFam Zheng         ioctl_req->hdr.dxferp = elem->out_sg + 2;
3026e790746SPaolo Bonzini 
3035a05cbeeSFam Zheng     } else if (elem->in_num > 3) {
3046e790746SPaolo Bonzini         /*
3056e790746SPaolo Bonzini          * If we have more than 3 input segments the guest wants to actually
3066e790746SPaolo Bonzini          * read data.
3076e790746SPaolo Bonzini          */
3081dc936aaSFam Zheng         ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV;
3091dc936aaSFam Zheng         ioctl_req->hdr.iovec_count = elem->in_num - 3;
3101dc936aaSFam Zheng         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
3111dc936aaSFam Zheng             ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len;
3121dc936aaSFam Zheng         }
3136e790746SPaolo Bonzini 
3141dc936aaSFam Zheng         ioctl_req->hdr.dxferp = elem->in_sg;
3156e790746SPaolo Bonzini     } else {
3166e790746SPaolo Bonzini         /*
3176e790746SPaolo Bonzini          * Some SCSI commands don't actually transfer any data.
3186e790746SPaolo Bonzini          */
3191dc936aaSFam Zheng         ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE;
3206e790746SPaolo Bonzini     }
3216e790746SPaolo Bonzini 
3221dc936aaSFam Zheng     ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
3231dc936aaSFam Zheng     ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
3246e790746SPaolo Bonzini 
325a209f461SFam Zheng     acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr,
3261dc936aaSFam Zheng                         virtio_blk_ioctl_complete, ioctl_req);
327a209f461SFam Zheng     if (!acb) {
328a209f461SFam Zheng         g_free(ioctl_req);
329a209f461SFam Zheng         status = VIRTIO_BLK_S_UNSUPP;
330a209f461SFam Zheng         goto fail;
331a209f461SFam Zheng     }
3321dc936aaSFam Zheng     return -EINPROGRESS;
3336e790746SPaolo Bonzini #else
3346e790746SPaolo Bonzini     abort();
3356e790746SPaolo Bonzini #endif
3366e790746SPaolo Bonzini 
3376e790746SPaolo Bonzini fail:
3386e790746SPaolo Bonzini     /* Just put anything nonzero so that the ioctl fails in the guest.  */
3395a05cbeeSFam Zheng     if (scsi) {
340783d1897SRusty Russell         virtio_stl_p(vdev, &scsi->errors, 255);
3415a05cbeeSFam Zheng     }
3425a05cbeeSFam Zheng     return status;
3435a05cbeeSFam Zheng }
3445a05cbeeSFam Zheng 
3455a05cbeeSFam Zheng static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
3465a05cbeeSFam Zheng {
3475a05cbeeSFam Zheng     int status;
3485a05cbeeSFam Zheng 
34975344fa4SFam Zheng     status = virtio_blk_handle_scsi_req(req);
3501dc936aaSFam Zheng     if (status != -EINPROGRESS) {
3516e790746SPaolo Bonzini         virtio_blk_req_complete(req, status);
352671ec3f0SFam Zheng         virtio_blk_free_request(req);
3536e790746SPaolo Bonzini     }
3541dc936aaSFam Zheng }
3556e790746SPaolo Bonzini 
356baf42268SStefan Hajnoczi static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
35795f7142aSPeter Lieven                                    int start, int num_reqs, int niov)
3586e790746SPaolo Bonzini {
359baf42268SStefan Hajnoczi     BlockBackend *blk = s->blk;
36095f7142aSPeter Lieven     QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
36195f7142aSPeter Lieven     int64_t sector_num = mrb->reqs[start]->sector_num;
36295f7142aSPeter Lieven     bool is_write = mrb->is_write;
363baf42268SStefan Hajnoczi     BdrvRequestFlags flags = 0;
3646e790746SPaolo Bonzini 
36595f7142aSPeter Lieven     if (num_reqs > 1) {
36695f7142aSPeter Lieven         int i;
36795f7142aSPeter Lieven         struct iovec *tmp_iov = qiov->iov;
36895f7142aSPeter Lieven         int tmp_niov = qiov->niov;
36995f7142aSPeter Lieven 
37095f7142aSPeter Lieven         /* mrb->reqs[start]->qiov was initialized from external so we can't
371b5772fddSEric Blake          * modify it here. We need to initialize it locally and then add the
37295f7142aSPeter Lieven          * external iovecs. */
37395f7142aSPeter Lieven         qemu_iovec_init(qiov, niov);
37495f7142aSPeter Lieven 
37595f7142aSPeter Lieven         for (i = 0; i < tmp_niov; i++) {
37695f7142aSPeter Lieven             qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
37795f7142aSPeter Lieven         }
37895f7142aSPeter Lieven 
37995f7142aSPeter Lieven         for (i = start + 1; i < start + num_reqs; i++) {
38095f7142aSPeter Lieven             qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
38195f7142aSPeter Lieven                               mrb->reqs[i]->qiov.size);
38295f7142aSPeter Lieven             mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
38395f7142aSPeter Lieven         }
38495f7142aSPeter Lieven 
385a576ceacSStefan Hajnoczi         trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev),
386a576ceacSStefan Hajnoczi                                          mrb, start, num_reqs,
387b5772fddSEric Blake                                          sector_num << BDRV_SECTOR_BITS,
388b5772fddSEric Blake                                          qiov->size, is_write);
38995f7142aSPeter Lieven         block_acct_merge_done(blk_get_stats(blk),
39095f7142aSPeter Lieven                               is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
39195f7142aSPeter Lieven                               num_reqs - 1);
39295f7142aSPeter Lieven     }
39395f7142aSPeter Lieven 
394baf42268SStefan Hajnoczi     if (blk_ram_registrar_ok(&s->blk_ram_registrar)) {
395baf42268SStefan Hajnoczi         flags |= BDRV_REQ_REGISTERED_BUF;
396baf42268SStefan Hajnoczi     }
397baf42268SStefan Hajnoczi 
39895f7142aSPeter Lieven     if (is_write) {
399baf42268SStefan Hajnoczi         blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov,
400baf42268SStefan Hajnoczi                         flags, virtio_blk_rw_complete,
401baf42268SStefan Hajnoczi                         mrb->reqs[start]);
40295f7142aSPeter Lieven     } else {
403baf42268SStefan Hajnoczi         blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov,
404baf42268SStefan Hajnoczi                        flags, virtio_blk_rw_complete,
405baf42268SStefan Hajnoczi                        mrb->reqs[start]);
40695f7142aSPeter Lieven     }
40795f7142aSPeter Lieven }
40895f7142aSPeter Lieven 
40995f7142aSPeter Lieven static int multireq_compare(const void *a, const void *b)
41095f7142aSPeter Lieven {
41195f7142aSPeter Lieven     const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
41295f7142aSPeter Lieven                          *req2 = *(VirtIOBlockReq **)b;
41395f7142aSPeter Lieven 
41495f7142aSPeter Lieven     /*
41595f7142aSPeter Lieven      * Note that we can't simply subtract sector_num1 from sector_num2
41695f7142aSPeter Lieven      * here as that could overflow the return value.
41795f7142aSPeter Lieven      */
41895f7142aSPeter Lieven     if (req1->sector_num > req2->sector_num) {
41995f7142aSPeter Lieven         return 1;
42095f7142aSPeter Lieven     } else if (req1->sector_num < req2->sector_num) {
42195f7142aSPeter Lieven         return -1;
42295f7142aSPeter Lieven     } else {
42395f7142aSPeter Lieven         return 0;
42495f7142aSPeter Lieven     }
42595f7142aSPeter Lieven }
42695f7142aSPeter Lieven 
427baf42268SStefan Hajnoczi static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb)
42895f7142aSPeter Lieven {
42995f7142aSPeter Lieven     int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
4305def6b80SEric Blake     uint32_t max_transfer;
43195f7142aSPeter Lieven     int64_t sector_num = 0;
43295f7142aSPeter Lieven 
43395f7142aSPeter Lieven     if (mrb->num_reqs == 1) {
434baf42268SStefan Hajnoczi         submit_requests(s, mrb, 0, 1, -1);
43595f7142aSPeter Lieven         mrb->num_reqs = 0;
4366e790746SPaolo Bonzini         return;
4376e790746SPaolo Bonzini     }
4386e790746SPaolo Bonzini 
4395def6b80SEric Blake     max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);
44095f7142aSPeter Lieven 
44195f7142aSPeter Lieven     qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
44295f7142aSPeter Lieven           &multireq_compare);
44395f7142aSPeter Lieven 
44495f7142aSPeter Lieven     for (i = 0; i < mrb->num_reqs; i++) {
44595f7142aSPeter Lieven         VirtIOBlockReq *req = mrb->reqs[i];
44695f7142aSPeter Lieven         if (num_reqs > 0) {
44749cffbc6SGonglei             /*
44849cffbc6SGonglei              * NOTE: We cannot merge the requests in below situations:
44949cffbc6SGonglei              * 1. requests are not sequential
45049cffbc6SGonglei              * 2. merge would exceed maximum number of IOVs
45149cffbc6SGonglei              * 3. merge would exceed maximum transfer length of backend device
45249cffbc6SGonglei              */
45349cffbc6SGonglei             if (sector_num + nb_sectors != req->sector_num ||
454baf42268SStefan Hajnoczi                 niov > blk_get_max_iov(s->blk) - req->qiov.niov ||
4555def6b80SEric Blake                 req->qiov.size > max_transfer ||
4565def6b80SEric Blake                 nb_sectors > (max_transfer -
4575def6b80SEric Blake                               req->qiov.size) / BDRV_SECTOR_SIZE) {
458baf42268SStefan Hajnoczi                 submit_requests(s, mrb, start, num_reqs, niov);
45995f7142aSPeter Lieven                 num_reqs = 0;
4606e790746SPaolo Bonzini             }
4616e790746SPaolo Bonzini         }
4626e790746SPaolo Bonzini 
46395f7142aSPeter Lieven         if (num_reqs == 0) {
46495f7142aSPeter Lieven             sector_num = req->sector_num;
46595f7142aSPeter Lieven             nb_sectors = niov = 0;
46695f7142aSPeter Lieven             start = i;
46795f7142aSPeter Lieven         }
46895f7142aSPeter Lieven 
46995f7142aSPeter Lieven         nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
47095f7142aSPeter Lieven         niov += req->qiov.niov;
47195f7142aSPeter Lieven         num_reqs++;
47295f7142aSPeter Lieven     }
47395f7142aSPeter Lieven 
474baf42268SStefan Hajnoczi     submit_requests(s, mrb, start, num_reqs, niov);
47595f7142aSPeter Lieven     mrb->num_reqs = 0;
4766e790746SPaolo Bonzini }
4776e790746SPaolo Bonzini 
4786e790746SPaolo Bonzini static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
4796e790746SPaolo Bonzini {
480bf4069fbSAnastasiia Rusakova     VirtIOBlock *s = req->dev;
481bf4069fbSAnastasiia Rusakova 
482bf4069fbSAnastasiia Rusakova     block_acct_start(blk_get_stats(s->blk), &req->acct, 0,
4835366d0c8SBenoît Canet                      BLOCK_ACCT_FLUSH);
4846e790746SPaolo Bonzini 
4856e790746SPaolo Bonzini     /*
4866e790746SPaolo Bonzini      * Make sure all outstanding writes are posted to the backing device.
4876e790746SPaolo Bonzini      */
48895f7142aSPeter Lieven     if (mrb->is_write && mrb->num_reqs > 0) {
489baf42268SStefan Hajnoczi         virtio_blk_submit_multireq(s, mrb);
49095f7142aSPeter Lieven     }
491bf4069fbSAnastasiia Rusakova     blk_aio_flush(s->blk, virtio_blk_flush_complete, req);
4926e790746SPaolo Bonzini }
4936e790746SPaolo Bonzini 
494d0e14376SMarkus Armbruster static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
495d0e14376SMarkus Armbruster                                      uint64_t sector, size_t size)
496d0e14376SMarkus Armbruster {
4973c2daac0SMarkus Armbruster     uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
4983c2daac0SMarkus Armbruster     uint64_t total_sectors;
4993c2daac0SMarkus Armbruster 
50075af1f34SPeter Lieven     if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
50195f7142aSPeter Lieven         return false;
50295f7142aSPeter Lieven     }
503d0e14376SMarkus Armbruster     if (sector & dev->sector_mask) {
504d0e14376SMarkus Armbruster         return false;
505d0e14376SMarkus Armbruster     }
5062a30307fSMarkus Armbruster     if (size % dev->conf.conf.logical_block_size) {
507d0e14376SMarkus Armbruster         return false;
508d0e14376SMarkus Armbruster     }
5094be74634SMarkus Armbruster     blk_get_geometry(dev->blk, &total_sectors);
5103c2daac0SMarkus Armbruster     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
5113c2daac0SMarkus Armbruster         return false;
5123c2daac0SMarkus Armbruster     }
513d0e14376SMarkus Armbruster     return true;
514d0e14376SMarkus Armbruster }
515d0e14376SMarkus Armbruster 
51637b06f8dSStefano Garzarella static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
51737b06f8dSStefano Garzarella     struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
51837b06f8dSStefano Garzarella {
51937b06f8dSStefano Garzarella     VirtIOBlock *s = req->dev;
52037b06f8dSStefano Garzarella     VirtIODevice *vdev = VIRTIO_DEVICE(s);
52137b06f8dSStefano Garzarella     uint64_t sector;
52237b06f8dSStefano Garzarella     uint32_t num_sectors, flags, max_sectors;
52337b06f8dSStefano Garzarella     uint8_t err_status;
52437b06f8dSStefano Garzarella     int bytes;
52537b06f8dSStefano Garzarella 
52637b06f8dSStefano Garzarella     sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
52737b06f8dSStefano Garzarella     num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
52837b06f8dSStefano Garzarella     flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
52937b06f8dSStefano Garzarella     max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
53037b06f8dSStefano Garzarella                   s->conf.max_discard_sectors;
53137b06f8dSStefano Garzarella 
53237b06f8dSStefano Garzarella     /*
53337b06f8dSStefano Garzarella      * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
53437b06f8dSStefano Garzarella      * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
53537b06f8dSStefano Garzarella      * the integer variable.
53637b06f8dSStefano Garzarella      */
53737b06f8dSStefano Garzarella     if (unlikely(num_sectors > max_sectors)) {
53837b06f8dSStefano Garzarella         err_status = VIRTIO_BLK_S_IOERR;
53937b06f8dSStefano Garzarella         goto err;
54037b06f8dSStefano Garzarella     }
54137b06f8dSStefano Garzarella 
54237b06f8dSStefano Garzarella     bytes = num_sectors << BDRV_SECTOR_BITS;
54337b06f8dSStefano Garzarella 
54437b06f8dSStefano Garzarella     if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
54537b06f8dSStefano Garzarella         err_status = VIRTIO_BLK_S_IOERR;
54637b06f8dSStefano Garzarella         goto err;
54737b06f8dSStefano Garzarella     }
54837b06f8dSStefano Garzarella 
54937b06f8dSStefano Garzarella     /*
55037b06f8dSStefano Garzarella      * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
55137b06f8dSStefano Garzarella      * and write zeroes commands if any unknown flag is set.
55237b06f8dSStefano Garzarella      */
55337b06f8dSStefano Garzarella     if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
55437b06f8dSStefano Garzarella         err_status = VIRTIO_BLK_S_UNSUPP;
55537b06f8dSStefano Garzarella         goto err;
55637b06f8dSStefano Garzarella     }
55737b06f8dSStefano Garzarella 
55837b06f8dSStefano Garzarella     if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
55937b06f8dSStefano Garzarella         int blk_aio_flags = 0;
56037b06f8dSStefano Garzarella 
56137b06f8dSStefano Garzarella         if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
56237b06f8dSStefano Garzarella             blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
56337b06f8dSStefano Garzarella         }
56437b06f8dSStefano Garzarella 
56537b06f8dSStefano Garzarella         block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
56637b06f8dSStefano Garzarella                          BLOCK_ACCT_WRITE);
56737b06f8dSStefano Garzarella 
56837b06f8dSStefano Garzarella         blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
56937b06f8dSStefano Garzarella                               bytes, blk_aio_flags,
57037b06f8dSStefano Garzarella                               virtio_blk_discard_write_zeroes_complete, req);
57137b06f8dSStefano Garzarella     } else { /* VIRTIO_BLK_T_DISCARD */
57237b06f8dSStefano Garzarella         /*
57337b06f8dSStefano Garzarella          * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
57437b06f8dSStefano Garzarella          * discard commands if the unmap flag is set.
57537b06f8dSStefano Garzarella          */
57637b06f8dSStefano Garzarella         if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
57737b06f8dSStefano Garzarella             err_status = VIRTIO_BLK_S_UNSUPP;
57837b06f8dSStefano Garzarella             goto err;
57937b06f8dSStefano Garzarella         }
58037b06f8dSStefano Garzarella 
58137b06f8dSStefano Garzarella         blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
58237b06f8dSStefano Garzarella                          virtio_blk_discard_write_zeroes_complete, req);
58337b06f8dSStefano Garzarella     }
58437b06f8dSStefano Garzarella 
58537b06f8dSStefano Garzarella     return VIRTIO_BLK_S_OK;
58637b06f8dSStefano Garzarella 
58737b06f8dSStefano Garzarella err:
58837b06f8dSStefano Garzarella     if (is_write_zeroes) {
58937b06f8dSStefano Garzarella         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
59037b06f8dSStefano Garzarella     }
59137b06f8dSStefano Garzarella     return err_status;
59237b06f8dSStefano Garzarella }
59337b06f8dSStefano Garzarella 
5944f736650SSam Li typedef struct ZoneCmdData {
5954f736650SSam Li     VirtIOBlockReq *req;
5964f736650SSam Li     struct iovec *in_iov;
5974f736650SSam Li     unsigned in_num;
5984f736650SSam Li     union {
5994f736650SSam Li         struct {
6004f736650SSam Li             unsigned int nr_zones;
6014f736650SSam Li             BlockZoneDescriptor *zones;
6024f736650SSam Li         } zone_report_data;
6034f736650SSam Li         struct {
6044f736650SSam Li             int64_t offset;
6054f736650SSam Li         } zone_append_data;
6064f736650SSam Li     };
6074f736650SSam Li } ZoneCmdData;
6084f736650SSam Li 
6094f736650SSam Li /*
6104f736650SSam Li  * check zoned_request: error checking before issuing requests. If all checks
6114f736650SSam Li  * passed, return true.
6124f736650SSam Li  * append: true if only zone append requests issued.
6134f736650SSam Li  */
6144f736650SSam Li static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len,
6154f736650SSam Li                              bool append, uint8_t *status) {
6164f736650SSam Li     BlockDriverState *bs = blk_bs(s->blk);
6174f736650SSam Li     int index;
6184f736650SSam Li 
6194f736650SSam Li     if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) {
6204f736650SSam Li         *status = VIRTIO_BLK_S_UNSUPP;
6214f736650SSam Li         return false;
6224f736650SSam Li     }
6234f736650SSam Li 
6244f736650SSam Li     if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS)
6254f736650SSam Li         || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) {
6264f736650SSam Li         *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6274f736650SSam Li         return false;
6284f736650SSam Li     }
6294f736650SSam Li 
6304f736650SSam Li     if (append) {
6314f736650SSam Li         if (bs->bl.write_granularity) {
6324f736650SSam Li             if ((offset % bs->bl.write_granularity) != 0) {
6334f736650SSam Li                 *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP;
6344f736650SSam Li                 return false;
6354f736650SSam Li             }
6364f736650SSam Li         }
6374f736650SSam Li 
6384f736650SSam Li         index = offset / bs->bl.zone_size;
6394f736650SSam Li         if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) {
6404f736650SSam Li             *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6414f736650SSam Li             return false;
6424f736650SSam Li         }
6434f736650SSam Li 
6444f736650SSam Li         if (len / 512 > bs->bl.max_append_sectors) {
6454f736650SSam Li             if (bs->bl.max_append_sectors == 0) {
6464f736650SSam Li                 *status = VIRTIO_BLK_S_UNSUPP;
6474f736650SSam Li             } else {
6484f736650SSam Li                 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6494f736650SSam Li             }
6504f736650SSam Li             return false;
6514f736650SSam Li         }
6524f736650SSam Li     }
6534f736650SSam Li     return true;
6544f736650SSam Li }
6554f736650SSam Li 
6564f736650SSam Li static void virtio_blk_zone_report_complete(void *opaque, int ret)
6574f736650SSam Li {
6584f736650SSam Li     ZoneCmdData *data = opaque;
6594f736650SSam Li     VirtIOBlockReq *req = data->req;
6604f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
6614f736650SSam Li     struct iovec *in_iov = data->in_iov;
6624f736650SSam Li     unsigned in_num = data->in_num;
6634f736650SSam Li     int64_t zrp_size, n, j = 0;
6644f736650SSam Li     int64_t nz = data->zone_report_data.nr_zones;
6654f736650SSam Li     int8_t err_status = VIRTIO_BLK_S_OK;
666b3d9bb9aSStefan Hajnoczi     struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
667b3d9bb9aSStefan Hajnoczi         .nr_zones = cpu_to_le64(nz),
668b3d9bb9aSStefan Hajnoczi     };
6694f736650SSam Li 
6704e92acf7SSam Li     trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
6714f736650SSam Li     if (ret) {
6724f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6734f736650SSam Li         goto out;
6744f736650SSam Li     }
6754f736650SSam Li 
6764f736650SSam Li     zrp_size = sizeof(struct virtio_blk_zone_report)
6774f736650SSam Li                + sizeof(struct virtio_blk_zone_descriptor) * nz;
6784f736650SSam Li     n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
6794f736650SSam Li     if (n != sizeof(zrp_hdr)) {
6804f736650SSam Li         virtio_error(vdev, "Driver provided input buffer that is too small!");
6814f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6824f736650SSam Li         goto out;
6834f736650SSam Li     }
6844f736650SSam Li 
6854f736650SSam Li     for (size_t i = sizeof(zrp_hdr); i < zrp_size;
6864f736650SSam Li         i += sizeof(struct virtio_blk_zone_descriptor), ++j) {
6874f736650SSam Li         struct virtio_blk_zone_descriptor desc =
6884f736650SSam Li             (struct virtio_blk_zone_descriptor) {
6894f736650SSam Li                 .z_start = cpu_to_le64(data->zone_report_data.zones[j].start
6904f736650SSam Li                     >> BDRV_SECTOR_BITS),
6914f736650SSam Li                 .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap
6924f736650SSam Li                     >> BDRV_SECTOR_BITS),
6934f736650SSam Li                 .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp
6944f736650SSam Li                     >> BDRV_SECTOR_BITS),
6954f736650SSam Li         };
6964f736650SSam Li 
6974f736650SSam Li         switch (data->zone_report_data.zones[j].type) {
6984f736650SSam Li         case BLK_ZT_CONV:
6994f736650SSam Li             desc.z_type = VIRTIO_BLK_ZT_CONV;
7004f736650SSam Li             break;
7014f736650SSam Li         case BLK_ZT_SWR:
7024f736650SSam Li             desc.z_type = VIRTIO_BLK_ZT_SWR;
7034f736650SSam Li             break;
7044f736650SSam Li         case BLK_ZT_SWP:
7054f736650SSam Li             desc.z_type = VIRTIO_BLK_ZT_SWP;
7064f736650SSam Li             break;
7074f736650SSam Li         default:
7084f736650SSam Li             g_assert_not_reached();
7094f736650SSam Li         }
7104f736650SSam Li 
7114f736650SSam Li         switch (data->zone_report_data.zones[j].state) {
7124f736650SSam Li         case BLK_ZS_RDONLY:
7134f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_RDONLY;
7144f736650SSam Li             break;
7154f736650SSam Li         case BLK_ZS_OFFLINE:
7164f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_OFFLINE;
7174f736650SSam Li             break;
7184f736650SSam Li         case BLK_ZS_EMPTY:
7194f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_EMPTY;
7204f736650SSam Li             break;
7214f736650SSam Li         case BLK_ZS_CLOSED:
7224f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_CLOSED;
7234f736650SSam Li             break;
7244f736650SSam Li         case BLK_ZS_FULL:
7254f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_FULL;
7264f736650SSam Li             break;
7274f736650SSam Li         case BLK_ZS_EOPEN:
7284f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_EOPEN;
7294f736650SSam Li             break;
7304f736650SSam Li         case BLK_ZS_IOPEN:
7314f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_IOPEN;
7324f736650SSam Li             break;
7334f736650SSam Li         case BLK_ZS_NOT_WP:
7344f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_NOT_WP;
7354f736650SSam Li             break;
7364f736650SSam Li         default:
7374f736650SSam Li             g_assert_not_reached();
7384f736650SSam Li         }
7394f736650SSam Li 
7404f736650SSam Li         /* TODO: it takes O(n^2) time complexity. Optimizations required. */
7414f736650SSam Li         n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc));
7424f736650SSam Li         if (n != sizeof(desc)) {
7434f736650SSam Li             virtio_error(vdev, "Driver provided input buffer "
7444f736650SSam Li                                "for descriptors that is too small!");
7454f736650SSam Li             err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
7464f736650SSam Li         }
7474f736650SSam Li     }
7484f736650SSam Li 
7494f736650SSam Li out:
7504f736650SSam Li     virtio_blk_req_complete(req, err_status);
7514f736650SSam Li     virtio_blk_free_request(req);
7524f736650SSam Li     g_free(data->zone_report_data.zones);
7534f736650SSam Li     g_free(data);
7544f736650SSam Li }
7554f736650SSam Li 
7564f736650SSam Li static void virtio_blk_handle_zone_report(VirtIOBlockReq *req,
7574f736650SSam Li                                          struct iovec *in_iov,
7584f736650SSam Li                                          unsigned in_num)
7594f736650SSam Li {
7604f736650SSam Li     VirtIOBlock *s = req->dev;
7614f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
7624f736650SSam Li     unsigned int nr_zones;
7634f736650SSam Li     ZoneCmdData *data;
7644f736650SSam Li     int64_t zone_size, offset;
7654f736650SSam Li     uint8_t err_status;
7664f736650SSam Li 
7674f736650SSam Li     if (req->in_len < sizeof(struct virtio_blk_inhdr) +
7684f736650SSam Li             sizeof(struct virtio_blk_zone_report) +
7694f736650SSam Li             sizeof(struct virtio_blk_zone_descriptor)) {
7704f736650SSam Li         virtio_error(vdev, "in buffer too small for zone report");
7714f736650SSam Li         return;
7724f736650SSam Li     }
7734f736650SSam Li 
7744f736650SSam Li     /* start byte offset of the zone report */
7754f736650SSam Li     offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
7764f736650SSam Li     if (!check_zoned_request(s, offset, 0, false, &err_status)) {
7774f736650SSam Li         goto out;
7784f736650SSam Li     }
7794f736650SSam Li     nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) -
7804f736650SSam Li                 sizeof(struct virtio_blk_zone_report)) /
7814f736650SSam Li                sizeof(struct virtio_blk_zone_descriptor);
7824e92acf7SSam Li     trace_virtio_blk_handle_zone_report(vdev, req,
7834e92acf7SSam Li                                         offset >> BDRV_SECTOR_BITS, nr_zones);
7844f736650SSam Li 
7854f736650SSam Li     zone_size = sizeof(BlockZoneDescriptor) * nr_zones;
7864f736650SSam Li     data = g_malloc(sizeof(ZoneCmdData));
7874f736650SSam Li     data->req = req;
7884f736650SSam Li     data->in_iov = in_iov;
7894f736650SSam Li     data->in_num = in_num;
7904f736650SSam Li     data->zone_report_data.nr_zones = nr_zones;
7914f736650SSam Li     data->zone_report_data.zones = g_malloc(zone_size),
7924f736650SSam Li 
7934f736650SSam Li     blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones,
7944f736650SSam Li                         data->zone_report_data.zones,
7954f736650SSam Li                         virtio_blk_zone_report_complete, data);
7964f736650SSam Li     return;
7974f736650SSam Li out:
7984f736650SSam Li     virtio_blk_req_complete(req, err_status);
7994f736650SSam Li     virtio_blk_free_request(req);
8004f736650SSam Li }
8014f736650SSam Li 
8024f736650SSam Li static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
8034f736650SSam Li {
8044f736650SSam Li     VirtIOBlockReq *req = opaque;
8054f736650SSam Li     VirtIOBlock *s = req->dev;
8064e92acf7SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
8074f736650SSam Li     int8_t err_status = VIRTIO_BLK_S_OK;
8084e92acf7SSam Li     trace_virtio_blk_zone_mgmt_complete(vdev, req,ret);
8094f736650SSam Li 
8104f736650SSam Li     if (ret) {
8114f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
8124f736650SSam Li     }
8134f736650SSam Li 
8144f736650SSam Li     virtio_blk_req_complete(req, err_status);
8154f736650SSam Li     virtio_blk_free_request(req);
8164f736650SSam Li }
8174f736650SSam Li 
8184f736650SSam Li static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
8194f736650SSam Li {
8204f736650SSam Li     VirtIOBlock *s = req->dev;
8214f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
8224f736650SSam Li     BlockDriverState *bs = blk_bs(s->blk);
8234f736650SSam Li     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
8244f736650SSam Li     uint64_t len;
8254f736650SSam Li     uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
8264f736650SSam Li     uint8_t err_status = VIRTIO_BLK_S_OK;
8274f736650SSam Li 
8284f736650SSam Li     uint32_t type = virtio_ldl_p(vdev, &req->out.type);
8294f736650SSam Li     if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) {
8304f736650SSam Li         /* Entire drive capacity */
8314f736650SSam Li         offset = 0;
8324f736650SSam Li         len = capacity;
8334e92acf7SSam Li         trace_virtio_blk_handle_zone_reset_all(vdev, req, 0,
8344e92acf7SSam Li                                                bs->total_sectors);
8354f736650SSam Li     } else {
8364f736650SSam Li         if (bs->bl.zone_size > capacity - offset) {
8374f736650SSam Li             /* The zoned device allows the last smaller zone. */
8384f736650SSam Li             len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1);
8394f736650SSam Li         } else {
8404f736650SSam Li             len = bs->bl.zone_size;
8414f736650SSam Li         }
8424e92acf7SSam Li         trace_virtio_blk_handle_zone_mgmt(vdev, req, op,
8434e92acf7SSam Li                                           offset >> BDRV_SECTOR_BITS,
8444e92acf7SSam Li                                           len >> BDRV_SECTOR_BITS);
8454f736650SSam Li     }
8464f736650SSam Li 
8474f736650SSam Li     if (!check_zoned_request(s, offset, len, false, &err_status)) {
8484f736650SSam Li         goto out;
8494f736650SSam Li     }
8504f736650SSam Li 
8514f736650SSam Li     blk_aio_zone_mgmt(s->blk, op, offset, len,
8524f736650SSam Li                       virtio_blk_zone_mgmt_complete, req);
8534f736650SSam Li 
8544f736650SSam Li     return 0;
8554f736650SSam Li out:
8564f736650SSam Li     virtio_blk_req_complete(req, err_status);
8574f736650SSam Li     virtio_blk_free_request(req);
8584f736650SSam Li     return err_status;
8594f736650SSam Li }
8604f736650SSam Li 
8614f736650SSam Li static void virtio_blk_zone_append_complete(void *opaque, int ret)
8624f736650SSam Li {
8634f736650SSam Li     ZoneCmdData *data = opaque;
8644f736650SSam Li     VirtIOBlockReq *req = data->req;
8654f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
8664f736650SSam Li     int64_t append_sector, n;
8674f736650SSam Li     uint8_t err_status = VIRTIO_BLK_S_OK;
8684f736650SSam Li 
8694f736650SSam Li     if (ret) {
8704f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
8714f736650SSam Li         goto out;
8724f736650SSam Li     }
8734f736650SSam Li 
8744f736650SSam Li     virtio_stq_p(vdev, &append_sector,
8754f736650SSam Li                  data->zone_append_data.offset >> BDRV_SECTOR_BITS);
8764f736650SSam Li     n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector,
8774f736650SSam Li                      sizeof(append_sector));
8784f736650SSam Li     if (n != sizeof(append_sector)) {
8794f736650SSam Li         virtio_error(vdev, "Driver provided input buffer less than size of "
8804f736650SSam Li                            "append_sector");
8814f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
8824f736650SSam Li         goto out;
8834f736650SSam Li     }
8844e92acf7SSam Li     trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret);
8854f736650SSam Li 
8864f736650SSam Li out:
8874f736650SSam Li     virtio_blk_req_complete(req, err_status);
8884f736650SSam Li     virtio_blk_free_request(req);
8894f736650SSam Li     g_free(data);
8904f736650SSam Li }
8914f736650SSam Li 
8924f736650SSam Li static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
8934f736650SSam Li                                          struct iovec *out_iov,
8944f736650SSam Li                                          struct iovec *in_iov,
8954f736650SSam Li                                          uint64_t out_num,
8964f736650SSam Li                                          unsigned in_num) {
8974f736650SSam Li     VirtIOBlock *s = req->dev;
8984f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
8994f736650SSam Li     uint8_t err_status = VIRTIO_BLK_S_OK;
9004f736650SSam Li 
9014f736650SSam Li     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
9024f736650SSam Li     int64_t len = iov_size(out_iov, out_num);
903b3d9bb9aSStefan Hajnoczi     ZoneCmdData *data;
9044f736650SSam Li 
9054e92acf7SSam Li     trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
9064f736650SSam Li     if (!check_zoned_request(s, offset, len, true, &err_status)) {
9074f736650SSam Li         goto out;
9084f736650SSam Li     }
9094f736650SSam Li 
910b3d9bb9aSStefan Hajnoczi     data = g_malloc(sizeof(ZoneCmdData));
9114f736650SSam Li     data->req = req;
9124f736650SSam Li     data->in_iov = in_iov;
9134f736650SSam Li     data->in_num = in_num;
9144f736650SSam Li     data->zone_append_data.offset = offset;
9154f736650SSam Li     qemu_iovec_init_external(&req->qiov, out_iov, out_num);
91652eb76f4SSam Li 
91752eb76f4SSam Li     block_acct_start(blk_get_stats(s->blk), &req->acct, len,
91852eb76f4SSam Li                      BLOCK_ACCT_ZONE_APPEND);
91952eb76f4SSam Li 
9204f736650SSam Li     blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0,
9214f736650SSam Li                         virtio_blk_zone_append_complete, data);
9224f736650SSam Li     return 0;
9234f736650SSam Li 
9244f736650SSam Li out:
9254f736650SSam Li     virtio_blk_req_complete(req, err_status);
9264f736650SSam Li     virtio_blk_free_request(req);
9274f736650SSam Li     return err_status;
9284f736650SSam Li }
9294f736650SSam Li 
93020ea686aSGreg Kurz static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
9316e790746SPaolo Bonzini {
9326e790746SPaolo Bonzini     uint32_t type;
933f897bf75SStefan Hajnoczi     struct iovec *in_iov = req->elem.in_sg;
9345636da76SDongli Zhang     struct iovec *out_iov = req->elem.out_sg;
935f897bf75SStefan Hajnoczi     unsigned in_num = req->elem.in_num;
936f897bf75SStefan Hajnoczi     unsigned out_num = req->elem.out_num;
93720ea686aSGreg Kurz     VirtIOBlock *s = req->dev;
93820ea686aSGreg Kurz     VirtIODevice *vdev = VIRTIO_DEVICE(s);
9396e790746SPaolo Bonzini 
940f897bf75SStefan Hajnoczi     if (req->elem.out_num < 1 || req->elem.in_num < 1) {
94120ea686aSGreg Kurz         virtio_error(vdev, "virtio-blk missing headers");
94220ea686aSGreg Kurz         return -1;
9436e790746SPaolo Bonzini     }
9446e790746SPaolo Bonzini 
9455636da76SDongli Zhang     if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
946827805a2SFam Zheng                             sizeof(req->out)) != sizeof(req->out))) {
94720ea686aSGreg Kurz         virtio_error(vdev, "virtio-blk request outhdr too short");
94820ea686aSGreg Kurz         return -1;
949827805a2SFam Zheng     }
950ee17e848SFam Zheng 
9517bd04a04SStefan Hajnoczi     iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out),
9527bd04a04SStefan Hajnoczi                                &req->outhdr_undo);
953ee17e848SFam Zheng 
95412048545SGonglei     if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
95520ea686aSGreg Kurz         virtio_error(vdev, "virtio-blk request inhdr too short");
9567bd04a04SStefan Hajnoczi         iov_discard_undo(&req->outhdr_undo);
95720ea686aSGreg Kurz         return -1;
958ee17e848SFam Zheng     }
959ee17e848SFam Zheng 
9602a6cdd6dSPaolo Bonzini     /* We always touch the last byte, so just see how big in_iov is.  */
9612a6cdd6dSPaolo Bonzini     req->in_len = iov_size(in_iov, in_num);
962ee17e848SFam Zheng     req->in = (void *)in_iov[in_num - 1].iov_base
963ee17e848SFam Zheng               + in_iov[in_num - 1].iov_len
964ee17e848SFam Zheng               - sizeof(struct virtio_blk_inhdr);
9657bd04a04SStefan Hajnoczi     iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr),
9667bd04a04SStefan Hajnoczi                               &req->inhdr_undo);
9676e790746SPaolo Bonzini 
9689a6719d5SStefano Garzarella     type = virtio_ldl_p(vdev, &req->out.type);
9696e790746SPaolo Bonzini 
97095f7142aSPeter Lieven     /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
971631b22eaSStefan Weil      * is an optional flag. Although a guest should not send this flag if
97295f7142aSPeter Lieven      * not negotiated we ignored it in the past. So keep ignoring it. */
97395f7142aSPeter Lieven     switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
97495f7142aSPeter Lieven     case VIRTIO_BLK_T_IN:
97595f7142aSPeter Lieven     {
97695f7142aSPeter Lieven         bool is_write = type & VIRTIO_BLK_T_OUT;
9779a6719d5SStefano Garzarella         req->sector_num = virtio_ldq_p(vdev, &req->out.sector);
97895f7142aSPeter Lieven 
97995f7142aSPeter Lieven         if (is_write) {
9805636da76SDongli Zhang             qemu_iovec_init_external(&req->qiov, out_iov, out_num);
981a576ceacSStefan Hajnoczi             trace_virtio_blk_handle_write(vdev, req, req->sector_num,
98295f7142aSPeter Lieven                                           req->qiov.size / BDRV_SECTOR_SIZE);
98395f7142aSPeter Lieven         } else {
98495f7142aSPeter Lieven             qemu_iovec_init_external(&req->qiov, in_iov, in_num);
985a576ceacSStefan Hajnoczi             trace_virtio_blk_handle_read(vdev, req, req->sector_num,
98695f7142aSPeter Lieven                                          req->qiov.size / BDRV_SECTOR_SIZE);
98795f7142aSPeter Lieven         }
98895f7142aSPeter Lieven 
9899a6719d5SStefano Garzarella         if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) {
99095f7142aSPeter Lieven             virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
9919a6719d5SStefano Garzarella             block_acct_invalid(blk_get_stats(s->blk),
99201762e03SAlberto Garcia                                is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
99395f7142aSPeter Lieven             virtio_blk_free_request(req);
99420ea686aSGreg Kurz             return 0;
99595f7142aSPeter Lieven         }
99695f7142aSPeter Lieven 
9979a6719d5SStefano Garzarella         block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size,
99895f7142aSPeter Lieven                          is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
99995f7142aSPeter Lieven 
100095f7142aSPeter Lieven         /* merge would exceed maximum number of requests or IO direction
100195f7142aSPeter Lieven          * changes */
100295f7142aSPeter Lieven         if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
1003c99495acSPeter Lieven                                   is_write != mrb->is_write ||
10049a6719d5SStefano Garzarella                                   !s->conf.request_merging)) {
1005baf42268SStefan Hajnoczi             virtio_blk_submit_multireq(s, mrb);
100695f7142aSPeter Lieven         }
100795f7142aSPeter Lieven 
100895f7142aSPeter Lieven         assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
100995f7142aSPeter Lieven         mrb->reqs[mrb->num_reqs++] = req;
101095f7142aSPeter Lieven         mrb->is_write = is_write;
101195f7142aSPeter Lieven         break;
101295f7142aSPeter Lieven     }
101395f7142aSPeter Lieven     case VIRTIO_BLK_T_FLUSH:
10146e790746SPaolo Bonzini         virtio_blk_handle_flush(req, mrb);
101595f7142aSPeter Lieven         break;
10164f736650SSam Li     case VIRTIO_BLK_T_ZONE_REPORT:
10174f736650SSam Li         virtio_blk_handle_zone_report(req, in_iov, in_num);
10184f736650SSam Li         break;
10194f736650SSam Li     case VIRTIO_BLK_T_ZONE_OPEN:
10204f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN);
10214f736650SSam Li         break;
10224f736650SSam Li     case VIRTIO_BLK_T_ZONE_CLOSE:
10234f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE);
10244f736650SSam Li         break;
10254f736650SSam Li     case VIRTIO_BLK_T_ZONE_FINISH:
10264f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH);
10274f736650SSam Li         break;
10284f736650SSam Li     case VIRTIO_BLK_T_ZONE_RESET:
10294f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
10304f736650SSam Li         break;
10314f736650SSam Li     case VIRTIO_BLK_T_ZONE_RESET_ALL:
10324f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
10334f736650SSam Li         break;
103495f7142aSPeter Lieven     case VIRTIO_BLK_T_SCSI_CMD:
10356e790746SPaolo Bonzini         virtio_blk_handle_scsi(req);
103695f7142aSPeter Lieven         break;
103795f7142aSPeter Lieven     case VIRTIO_BLK_T_GET_ID:
103895f7142aSPeter Lieven     {
10396e790746SPaolo Bonzini         /*
10406e790746SPaolo Bonzini          * NB: per existing s/n string convention the string is
10416e790746SPaolo Bonzini          * terminated by '\0' only when shorter than buffer.
10426e790746SPaolo Bonzini          */
10432a30307fSMarkus Armbruster         const char *serial = s->conf.serial ? s->conf.serial : "";
1044a83ceea8SMarc Marí         size_t size = MIN(strlen(serial) + 1,
1045a83ceea8SMarc Marí                           MIN(iov_size(in_iov, in_num),
1046a83ceea8SMarc Marí                               VIRTIO_BLK_ID_BYTES));
1047a83ceea8SMarc Marí         iov_from_buf(in_iov, in_num, 0, serial, size);
10486e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
1049671ec3f0SFam Zheng         virtio_blk_free_request(req);
105095f7142aSPeter Lieven         break;
105195f7142aSPeter Lieven     }
10524f736650SSam Li     case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT:
10534f736650SSam Li         /*
10544f736650SSam Li          * Passing out_iov/out_num and in_iov/in_num is not safe
10554f736650SSam Li          * to access req->elem.out_sg directly because it may be
10564f736650SSam Li          * modified by virtio_blk_handle_request().
10574f736650SSam Li          */
10584f736650SSam Li         virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num);
10594f736650SSam Li         break;
106037b06f8dSStefano Garzarella     /*
106137b06f8dSStefano Garzarella      * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
106237b06f8dSStefano Garzarella      * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
106337b06f8dSStefano Garzarella      * so we must mask it for these requests, then we will check if it is set.
106437b06f8dSStefano Garzarella      */
106537b06f8dSStefano Garzarella     case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
106637b06f8dSStefano Garzarella     case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
106737b06f8dSStefano Garzarella     {
106837b06f8dSStefano Garzarella         struct virtio_blk_discard_write_zeroes dwz_hdr;
106937b06f8dSStefano Garzarella         size_t out_len = iov_size(out_iov, out_num);
107037b06f8dSStefano Garzarella         bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
107137b06f8dSStefano Garzarella                                VIRTIO_BLK_T_WRITE_ZEROES;
107237b06f8dSStefano Garzarella         uint8_t err_status;
107337b06f8dSStefano Garzarella 
107437b06f8dSStefano Garzarella         /*
107537b06f8dSStefano Garzarella          * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
107637b06f8dSStefano Garzarella          * more than one segment.
107737b06f8dSStefano Garzarella          */
107837b06f8dSStefano Garzarella         if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
107937b06f8dSStefano Garzarella                      out_len > sizeof(dwz_hdr))) {
108037b06f8dSStefano Garzarella             virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
108137b06f8dSStefano Garzarella             virtio_blk_free_request(req);
108237b06f8dSStefano Garzarella             return 0;
108337b06f8dSStefano Garzarella         }
108437b06f8dSStefano Garzarella 
108537b06f8dSStefano Garzarella         if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
108637b06f8dSStefano Garzarella                                 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
10877bd04a04SStefan Hajnoczi             iov_discard_undo(&req->inhdr_undo);
10887bd04a04SStefan Hajnoczi             iov_discard_undo(&req->outhdr_undo);
108937b06f8dSStefano Garzarella             virtio_error(vdev, "virtio-blk discard/write_zeroes header"
109037b06f8dSStefano Garzarella                          " too short");
109137b06f8dSStefano Garzarella             return -1;
109237b06f8dSStefano Garzarella         }
109337b06f8dSStefano Garzarella 
109437b06f8dSStefano Garzarella         err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
109537b06f8dSStefano Garzarella                                                             is_write_zeroes);
109637b06f8dSStefano Garzarella         if (err_status != VIRTIO_BLK_S_OK) {
109737b06f8dSStefano Garzarella             virtio_blk_req_complete(req, err_status);
109837b06f8dSStefano Garzarella             virtio_blk_free_request(req);
109937b06f8dSStefano Garzarella         }
110037b06f8dSStefano Garzarella 
110137b06f8dSStefano Garzarella         break;
110237b06f8dSStefano Garzarella     }
110395f7142aSPeter Lieven     default:
11046e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
1105671ec3f0SFam Zheng         virtio_blk_free_request(req);
11066e790746SPaolo Bonzini     }
110720ea686aSGreg Kurz     return 0;
11086e790746SPaolo Bonzini }
11096e790746SPaolo Bonzini 
1110186b9691SStefan Hajnoczi void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
11116e790746SPaolo Bonzini {
11126e790746SPaolo Bonzini     VirtIOBlockReq *req;
111395f7142aSPeter Lieven     MultiReqBuffer mrb = {};
1114d0435bc5SStefan Hajnoczi     bool suppress_notifications = virtio_queue_get_notification(vq);
11156e790746SPaolo Bonzini 
1116ccee48aaSStefan Hajnoczi     defer_call_begin();
1117fc73548eSStefan Hajnoczi 
11189ef9d402SStefan Hajnoczi     do {
1119d0435bc5SStefan Hajnoczi         if (suppress_notifications) {
11209ef9d402SStefan Hajnoczi             virtio_queue_set_notification(vq, 0);
1121d0435bc5SStefan Hajnoczi         }
11229ef9d402SStefan Hajnoczi 
1123edaffd9fSStefan Hajnoczi         while ((req = virtio_blk_get_request(s, vq))) {
112420ea686aSGreg Kurz             if (virtio_blk_handle_request(req, &mrb)) {
112520ea686aSGreg Kurz                 virtqueue_detach_element(req->vq, &req->elem, 0);
112620ea686aSGreg Kurz                 virtio_blk_free_request(req);
112720ea686aSGreg Kurz                 break;
112820ea686aSGreg Kurz             }
11296e790746SPaolo Bonzini         }
11306e790746SPaolo Bonzini 
1131d0435bc5SStefan Hajnoczi         if (suppress_notifications) {
11329ef9d402SStefan Hajnoczi             virtio_queue_set_notification(vq, 1);
1133d0435bc5SStefan Hajnoczi         }
11349ef9d402SStefan Hajnoczi     } while (!virtio_queue_empty(vq));
11359ef9d402SStefan Hajnoczi 
113695f7142aSPeter Lieven     if (mrb.num_reqs) {
1137baf42268SStefan Hajnoczi         virtio_blk_submit_multireq(s, &mrb);
113895f7142aSPeter Lieven     }
1139fc73548eSStefan Hajnoczi 
1140ccee48aaSStefan Hajnoczi     defer_call_end();
11416e790746SPaolo Bonzini }
11426e790746SPaolo Bonzini 
11438a2fad57SMichael S. Tsirkin static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
11448a2fad57SMichael S. Tsirkin {
11458a2fad57SMichael S. Tsirkin     VirtIOBlock *s = (VirtIOBlock *)vdev;
11468a2fad57SMichael S. Tsirkin 
11473cdaf3ddSStefan Hajnoczi     if (!s->ioeventfd_disabled && !s->ioeventfd_started) {
11488a2fad57SMichael S. Tsirkin         /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
11493cdaf3ddSStefan Hajnoczi          * ioeventfd here instead of waiting for .set_status().
11508a2fad57SMichael S. Tsirkin          */
11519ffe337cSPaolo Bonzini         virtio_device_start_ioeventfd(vdev);
11523cdaf3ddSStefan Hajnoczi         if (!s->ioeventfd_disabled) {
11538a2fad57SMichael S. Tsirkin             return;
11548a2fad57SMichael S. Tsirkin         }
11558a2fad57SMichael S. Tsirkin     }
1156b6948ab0SStefan Hajnoczi 
1157186b9691SStefan Hajnoczi     virtio_blk_handle_vq(s, vq);
11588a2fad57SMichael S. Tsirkin }
11598a2fad57SMichael S. Tsirkin 
1160a937f8e8SStefan Hajnoczi static void virtio_blk_dma_restart_bh(void *opaque)
11616e790746SPaolo Bonzini {
116271ee0cddSStefan Hajnoczi     VirtIOBlockReq *req = opaque;
116371ee0cddSStefan Hajnoczi     VirtIOBlock *s = req->dev; /* we're called with at least one request */
1164a937f8e8SStefan Hajnoczi 
116595f7142aSPeter Lieven     MultiReqBuffer mrb = {};
11666e790746SPaolo Bonzini 
11676e790746SPaolo Bonzini     while (req) {
11681bdb176aSzhanghailiang         VirtIOBlockReq *next = req->next;
116920ea686aSGreg Kurz         if (virtio_blk_handle_request(req, &mrb)) {
117020ea686aSGreg Kurz             /* Device is now broken and won't do any processing until it gets
117120ea686aSGreg Kurz              * reset. Already queued requests will be lost: let's purge them.
117220ea686aSGreg Kurz              */
117320ea686aSGreg Kurz             while (req) {
117420ea686aSGreg Kurz                 next = req->next;
117520ea686aSGreg Kurz                 virtqueue_detach_element(req->vq, &req->elem, 0);
117620ea686aSGreg Kurz                 virtio_blk_free_request(req);
117720ea686aSGreg Kurz                 req = next;
117820ea686aSGreg Kurz             }
117920ea686aSGreg Kurz             break;
118020ea686aSGreg Kurz         }
11811bdb176aSzhanghailiang         req = next;
11826e790746SPaolo Bonzini     }
11836e790746SPaolo Bonzini 
118495f7142aSPeter Lieven     if (mrb.num_reqs) {
1185baf42268SStefan Hajnoczi         virtio_blk_submit_multireq(s, &mrb);
118695f7142aSPeter Lieven     }
1187a937f8e8SStefan Hajnoczi 
1188a937f8e8SStefan Hajnoczi     /* Paired with inc in virtio_blk_dma_restart_cb() */
1189680f2002SKevin Wolf     blk_dec_in_flight(s->conf.conf.blk);
11906e790746SPaolo Bonzini }
11916e790746SPaolo Bonzini 
1192538f0497SPhilippe Mathieu-Daudé static void virtio_blk_dma_restart_cb(void *opaque, bool running,
11936e790746SPaolo Bonzini                                       RunState state)
11946e790746SPaolo Bonzini {
11956e790746SPaolo Bonzini     VirtIOBlock *s = opaque;
119671ee0cddSStefan Hajnoczi     uint16_t num_queues = s->conf.num_queues;
1197b3d9bb9aSStefan Hajnoczi     g_autofree VirtIOBlockReq **vq_rq = NULL;
1198b3d9bb9aSStefan Hajnoczi     VirtIOBlockReq *rq;
11996e790746SPaolo Bonzini 
12006e790746SPaolo Bonzini     if (!running) {
12016e790746SPaolo Bonzini         return;
12026e790746SPaolo Bonzini     }
12036e790746SPaolo Bonzini 
120471ee0cddSStefan Hajnoczi     /* Split the device-wide s->rq request list into per-vq request lists */
1205b3d9bb9aSStefan Hajnoczi     vq_rq = g_new0(VirtIOBlockReq *, num_queues);
120671ee0cddSStefan Hajnoczi 
120771ee0cddSStefan Hajnoczi     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
120871ee0cddSStefan Hajnoczi         rq = s->rq;
120971ee0cddSStefan Hajnoczi         s->rq = NULL;
121071ee0cddSStefan Hajnoczi     }
121171ee0cddSStefan Hajnoczi 
121271ee0cddSStefan Hajnoczi     while (rq) {
121371ee0cddSStefan Hajnoczi         VirtIOBlockReq *next = rq->next;
121471ee0cddSStefan Hajnoczi         uint16_t idx = virtio_get_queue_index(rq->vq);
121571ee0cddSStefan Hajnoczi 
1216f2eea93cSStefan Hajnoczi         /* Only num_queues vqs were created so vq_rq[idx] is within bounds */
1217f2eea93cSStefan Hajnoczi         assert(idx < num_queues);
121871ee0cddSStefan Hajnoczi         rq->next = vq_rq[idx];
121971ee0cddSStefan Hajnoczi         vq_rq[idx] = rq;
122071ee0cddSStefan Hajnoczi         rq = next;
122171ee0cddSStefan Hajnoczi     }
122271ee0cddSStefan Hajnoczi 
122371ee0cddSStefan Hajnoczi     /* Schedule a BH to submit the requests in each vq's AioContext */
122471ee0cddSStefan Hajnoczi     for (uint16_t i = 0; i < num_queues; i++) {
122571ee0cddSStefan Hajnoczi         if (!vq_rq[i]) {
122671ee0cddSStefan Hajnoczi             continue;
122771ee0cddSStefan Hajnoczi         }
122871ee0cddSStefan Hajnoczi 
1229a937f8e8SStefan Hajnoczi         /* Paired with dec in virtio_blk_dma_restart_bh() */
1230680f2002SKevin Wolf         blk_inc_in_flight(s->conf.conf.blk);
1231a937f8e8SStefan Hajnoczi 
123271ee0cddSStefan Hajnoczi         aio_bh_schedule_oneshot(s->vq_aio_context[i],
123371ee0cddSStefan Hajnoczi                                 virtio_blk_dma_restart_bh,
123471ee0cddSStefan Hajnoczi                                 vq_rq[i]);
123571ee0cddSStefan Hajnoczi     }
12366e790746SPaolo Bonzini }
12376e790746SPaolo Bonzini 
12386e790746SPaolo Bonzini static void virtio_blk_reset(VirtIODevice *vdev)
12396e790746SPaolo Bonzini {
12406e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
124126307f6aSFam Zheng     VirtIOBlockReq *req;
12426e790746SPaolo Bonzini 
12439c67f33fSStefan Hajnoczi     /* Dataplane has stopped... */
12443cdaf3ddSStefan Hajnoczi     assert(!s->ioeventfd_started);
12459c67f33fSStefan Hajnoczi 
12469c67f33fSStefan Hajnoczi     /* ...but requests may still be in flight. */
12476e40b3bfSAlexander Yarygin     blk_drain(s->blk);
12486e40b3bfSAlexander Yarygin 
124926307f6aSFam Zheng     /* We drop queued requests after blk_drain() because blk_drain() itself can
125026307f6aSFam Zheng      * produce them. */
12519c67f33fSStefan Hajnoczi     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
125226307f6aSFam Zheng         while (s->rq) {
125326307f6aSFam Zheng             req = s->rq;
125426307f6aSFam Zheng             s->rq = req->next;
12559c67f33fSStefan Hajnoczi 
12569c67f33fSStefan Hajnoczi             /* No other threads can access req->vq here */
125797b93c8aSStefan Hajnoczi             virtqueue_detach_element(req->vq, &req->elem, 0);
12589c67f33fSStefan Hajnoczi 
125926307f6aSFam Zheng             virtio_blk_free_request(req);
126026307f6aSFam Zheng         }
12619c67f33fSStefan Hajnoczi     }
126226307f6aSFam Zheng 
12634be74634SMarkus Armbruster     blk_set_enable_write_cache(s->blk, s->original_wce);
12646e790746SPaolo Bonzini }
12656e790746SPaolo Bonzini 
12666e790746SPaolo Bonzini /* coalesce internal state, copy to pci i/o region 0
12676e790746SPaolo Bonzini  */
12686e790746SPaolo Bonzini static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
12696e790746SPaolo Bonzini {
12706e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
12712a30307fSMarkus Armbruster     BlockConf *conf = &s->conf.conf;
12724f736650SSam Li     BlockDriverState *bs = blk_bs(s->blk);
12736e790746SPaolo Bonzini     struct virtio_blk_config blkcfg;
12746e790746SPaolo Bonzini     uint64_t capacity;
127517d0bc01SStefan Hajnoczi     int64_t length;
1276f7516731SMarkus Armbruster     int blk_size = conf->logical_block_size;
12776e790746SPaolo Bonzini 
12784be74634SMarkus Armbruster     blk_get_geometry(s->blk, &capacity);
12796e790746SPaolo Bonzini     memset(&blkcfg, 0, sizeof(blkcfg));
1280783d1897SRusty Russell     virtio_stq_p(vdev, &blkcfg.capacity, capacity);
12811bf8a989SDenis Plotnikov     virtio_stl_p(vdev, &blkcfg.seg_max,
12821bf8a989SDenis Plotnikov                  s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2);
1283907eb3e5SMichael S. Tsirkin     virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls);
1284783d1897SRusty Russell     virtio_stl_p(vdev, &blkcfg.blk_size, blk_size);
1285f7516731SMarkus Armbruster     virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size);
12866abee260SRoman Kagan     virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size);
1287907eb3e5SMichael S. Tsirkin     blkcfg.geometry.heads = conf->heads;
12886e790746SPaolo Bonzini     /*
12896e790746SPaolo Bonzini      * We must ensure that the block device capacity is a multiple of
1290e03ba136SPeter Maydell      * the logical block size. If that is not the case, let's use
12916e790746SPaolo Bonzini      * sector_mask to adopt the geometry to have a correct picture.
12926e790746SPaolo Bonzini      * For those devices where the capacity is ok for the given geometry
1293e03ba136SPeter Maydell      * we don't touch the sector value of the geometry, since some devices
12946e790746SPaolo Bonzini      * (like s390 dasd) need a specific value. Here the capacity is already
12956e790746SPaolo Bonzini      * cyls*heads*secs*blk_size and the sector value is not block size
12966e790746SPaolo Bonzini      * divided by 512 - instead it is the amount of blk_size blocks
12976e790746SPaolo Bonzini      * per track (cylinder).
12986e790746SPaolo Bonzini      */
129917d0bc01SStefan Hajnoczi     length = blk_getlength(s->blk);
130017d0bc01SStefan Hajnoczi     if (length > 0 && length / conf->heads / conf->secs % blk_size) {
1301907eb3e5SMichael S. Tsirkin         blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
13026e790746SPaolo Bonzini     } else {
1303907eb3e5SMichael S. Tsirkin         blkcfg.geometry.sectors = conf->secs;
13046e790746SPaolo Bonzini     }
13056e790746SPaolo Bonzini     blkcfg.size_max = 0;
1306f7516731SMarkus Armbruster     blkcfg.physical_block_exp = get_physical_block_exp(conf);
13076e790746SPaolo Bonzini     blkcfg.alignment_offset = 0;
13084be74634SMarkus Armbruster     blkcfg.wce = blk_enable_write_cache(s->blk);
13092f270590SStefan Hajnoczi     virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
131037b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
1311fb0b154cSAkihiko Odaki         uint32_t discard_granularity = conf->discard_granularity;
1312fb0b154cSAkihiko Odaki         if (discard_granularity == -1 || !s->conf.report_discard_granularity) {
1313fb0b154cSAkihiko Odaki             discard_granularity = blk_size;
1314fb0b154cSAkihiko Odaki         }
131537b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
131637b06f8dSStefano Garzarella                      s->conf.max_discard_sectors);
131737b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
1318fb0b154cSAkihiko Odaki                      discard_granularity >> BDRV_SECTOR_BITS);
131937b06f8dSStefano Garzarella         /*
132037b06f8dSStefano Garzarella          * We support only one segment per request since multiple segments
132137b06f8dSStefano Garzarella          * are not widely used and there are no userspace APIs that allow
132237b06f8dSStefano Garzarella          * applications to submit multiple segments in a single call.
132337b06f8dSStefano Garzarella          */
132437b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
132537b06f8dSStefano Garzarella     }
132637b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
132737b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
132837b06f8dSStefano Garzarella                      s->conf.max_write_zeroes_sectors);
132937b06f8dSStefano Garzarella         blkcfg.write_zeroes_may_unmap = 1;
133037b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
133137b06f8dSStefano Garzarella     }
13324f736650SSam Li     if (bs->bl.zoned != BLK_Z_NONE) {
13334f736650SSam Li         switch (bs->bl.zoned) {
13344f736650SSam Li         case BLK_Z_HM:
13354f736650SSam Li             blkcfg.zoned.model = VIRTIO_BLK_Z_HM;
13364f736650SSam Li             break;
13374f736650SSam Li         case BLK_Z_HA:
13384f736650SSam Li             blkcfg.zoned.model = VIRTIO_BLK_Z_HA;
13394f736650SSam Li             break;
13404f736650SSam Li         default:
13414f736650SSam Li             g_assert_not_reached();
13424f736650SSam Li         }
13434f736650SSam Li 
13444f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors,
13454f736650SSam Li                      bs->bl.zone_size / 512);
13464f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones,
13474f736650SSam Li                      bs->bl.max_active_zones);
13484f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones,
13494f736650SSam Li                      bs->bl.max_open_zones);
13504f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size);
13514f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors,
13524f736650SSam Li                      bs->bl.max_append_sectors);
13534f736650SSam Li     } else {
13544f736650SSam Li         blkcfg.zoned.model = VIRTIO_BLK_Z_NONE;
13554f736650SSam Li     }
135620764be0SStefano Garzarella     memcpy(config, &blkcfg, s->config_size);
13576e790746SPaolo Bonzini }
13586e790746SPaolo Bonzini 
13596e790746SPaolo Bonzini static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
13606e790746SPaolo Bonzini {
13616e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
13626e790746SPaolo Bonzini     struct virtio_blk_config blkcfg;
13636e790746SPaolo Bonzini 
136420764be0SStefano Garzarella     memcpy(&blkcfg, config, s->config_size);
13656d7e73d6SFam Zheng 
13664be74634SMarkus Armbruster     blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
13676e790746SPaolo Bonzini }
13686e790746SPaolo Bonzini 
13699d5b731dSJason Wang static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
13709d5b731dSJason Wang                                         Error **errp)
13716e790746SPaolo Bonzini {
13726e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
13736e790746SPaolo Bonzini 
1374bbe8bd4dSStefano Garzarella     /* Firstly sync all virtio-blk possible supported features */
1375bbe8bd4dSStefano Garzarella     features |= s->host_features;
1376bbe8bd4dSStefano Garzarella 
13770cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
13780cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
13790cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
13800cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
138195129d6fSCornelia Huck     if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
1382bbe8bd4dSStefano Garzarella         if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) {
1383efb8206cSJason Wang             error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
1384efb8206cSJason Wang             return 0;
1385efb8206cSJason Wang         }
1386efb8206cSJason Wang     } else {
1387c9b11f97SJason Wang         virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT);
1388efb8206cSJason Wang         virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
1389efb8206cSJason Wang     }
13906e790746SPaolo Bonzini 
13915f258577SEvgeny Yakovlev     if (blk_enable_write_cache(s->blk) ||
13925f258577SEvgeny Yakovlev         (s->conf.x_enable_wce_if_config_wce &&
13935f258577SEvgeny Yakovlev          virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) {
13940cd09c3aSCornelia Huck         virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
13954be74634SMarkus Armbruster     }
139686b1cf32SKevin Wolf     if (!blk_is_writable(s->blk)) {
13970cd09c3aSCornelia Huck         virtio_add_feature(&features, VIRTIO_BLK_F_RO);
13984be74634SMarkus Armbruster     }
13992f270590SStefan Hajnoczi     if (s->conf.num_queues > 1) {
14002f270590SStefan Hajnoczi         virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
14012f270590SStefan Hajnoczi     }
14026e790746SPaolo Bonzini 
14036e790746SPaolo Bonzini     return features;
14046e790746SPaolo Bonzini }
14056e790746SPaolo Bonzini 
14066e790746SPaolo Bonzini static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
14076e790746SPaolo Bonzini {
14086e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
14096e790746SPaolo Bonzini 
14109ffe337cSPaolo Bonzini     if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
14113cdaf3ddSStefan Hajnoczi         assert(!s->ioeventfd_started);
14126e790746SPaolo Bonzini     }
14136e790746SPaolo Bonzini 
14146e790746SPaolo Bonzini     if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
14156e790746SPaolo Bonzini         return;
14166e790746SPaolo Bonzini     }
14176e790746SPaolo Bonzini 
1418ef5bc962SPaolo Bonzini     /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1419ef5bc962SPaolo Bonzini      * cache flushes.  Thus, the "auto writethrough" behavior is never
1420ef5bc962SPaolo Bonzini      * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1421ef5bc962SPaolo Bonzini      * Leaving it enabled would break the following sequence:
1422ef5bc962SPaolo Bonzini      *
1423ef5bc962SPaolo Bonzini      *     Guest started with "-drive cache=writethrough"
1424ef5bc962SPaolo Bonzini      *     Guest sets status to 0
1425ef5bc962SPaolo Bonzini      *     Guest sets DRIVER bit in status field
1426ef5bc962SPaolo Bonzini      *     Guest reads host features (WCE=0, CONFIG_WCE=1)
1427ef5bc962SPaolo Bonzini      *     Guest writes guest features (WCE=0, CONFIG_WCE=1)
1428ef5bc962SPaolo Bonzini      *     Guest writes 1 to the WCE configuration field (writeback mode)
1429ef5bc962SPaolo Bonzini      *     Guest sets DRIVER_OK bit in status field
1430ef5bc962SPaolo Bonzini      *
14314be74634SMarkus Armbruster      * s->blk would erroneously be placed in writethrough mode.
1432ef5bc962SPaolo Bonzini      */
143395129d6fSCornelia Huck     if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
14344be74634SMarkus Armbruster         blk_set_enable_write_cache(s->blk,
143595129d6fSCornelia Huck                                    virtio_vdev_has_feature(vdev,
143695129d6fSCornelia Huck                                                            VIRTIO_BLK_F_WCE));
14376e790746SPaolo Bonzini     }
1438ef5bc962SPaolo Bonzini }
14396e790746SPaolo Bonzini 
1440b2b295a7SGreg Kurz static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
1441b2b295a7SGreg Kurz {
1442b2b295a7SGreg Kurz     VirtIOBlock *s = VIRTIO_BLK(vdev);
14439c67f33fSStefan Hajnoczi 
14449c67f33fSStefan Hajnoczi     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1445b2b295a7SGreg Kurz         VirtIOBlockReq *req = s->rq;
14466e790746SPaolo Bonzini 
14476e790746SPaolo Bonzini         while (req) {
14486e790746SPaolo Bonzini             qemu_put_sbyte(f, 1);
144930d8bf6dSStefan Hajnoczi 
145030d8bf6dSStefan Hajnoczi             if (s->conf.num_queues > 1) {
145130d8bf6dSStefan Hajnoczi                 qemu_put_be32(f, virtio_get_queue_index(req->vq));
145230d8bf6dSStefan Hajnoczi             }
145330d8bf6dSStefan Hajnoczi 
145486044b24SJason Wang             qemu_put_virtqueue_element(vdev, f, &req->elem);
14556e790746SPaolo Bonzini             req = req->next;
14566e790746SPaolo Bonzini         }
14579c67f33fSStefan Hajnoczi     }
14589c67f33fSStefan Hajnoczi 
14596e790746SPaolo Bonzini     qemu_put_sbyte(f, 0);
14606e790746SPaolo Bonzini }
14616e790746SPaolo Bonzini 
1462b2b295a7SGreg Kurz static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
1463b2b295a7SGreg Kurz                                   int version_id)
1464b2b295a7SGreg Kurz {
1465b2b295a7SGreg Kurz     VirtIOBlock *s = VIRTIO_BLK(vdev);
1466b2b295a7SGreg Kurz 
14676e790746SPaolo Bonzini     while (qemu_get_sbyte(f)) {
146830d8bf6dSStefan Hajnoczi         unsigned nvqs = s->conf.num_queues;
146930d8bf6dSStefan Hajnoczi         unsigned vq_idx = 0;
1470ab281c17SPaolo Bonzini         VirtIOBlockReq *req;
147130d8bf6dSStefan Hajnoczi 
147230d8bf6dSStefan Hajnoczi         if (nvqs > 1) {
147330d8bf6dSStefan Hajnoczi             vq_idx = qemu_get_be32(f);
147430d8bf6dSStefan Hajnoczi 
147530d8bf6dSStefan Hajnoczi             if (vq_idx >= nvqs) {
147630d8bf6dSStefan Hajnoczi                 error_report("Invalid virtqueue index in request list: %#x",
147730d8bf6dSStefan Hajnoczi                              vq_idx);
147830d8bf6dSStefan Hajnoczi                 return -EINVAL;
147930d8bf6dSStefan Hajnoczi             }
148030d8bf6dSStefan Hajnoczi         }
148130d8bf6dSStefan Hajnoczi 
14828607f5c3SJason Wang         req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
148330d8bf6dSStefan Hajnoczi         virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
14849c67f33fSStefan Hajnoczi 
14859c67f33fSStefan Hajnoczi         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
14866e790746SPaolo Bonzini             req->next = s->rq;
14876e790746SPaolo Bonzini             s->rq = req;
14886e790746SPaolo Bonzini         }
14899c67f33fSStefan Hajnoczi     }
14906e790746SPaolo Bonzini 
14916e790746SPaolo Bonzini     return 0;
14926e790746SPaolo Bonzini }
14936e790746SPaolo Bonzini 
14949b92fbcfSSergio Lopez static void virtio_resize_cb(void *opaque)
14959b92fbcfSSergio Lopez {
14969b92fbcfSSergio Lopez     VirtIODevice *vdev = opaque;
14979b92fbcfSSergio Lopez 
14989b92fbcfSSergio Lopez     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
14999b92fbcfSSergio Lopez     virtio_notify_config(vdev);
15009b92fbcfSSergio Lopez }
15019b92fbcfSSergio Lopez 
15026e790746SPaolo Bonzini static void virtio_blk_resize(void *opaque)
15036e790746SPaolo Bonzini {
15046e790746SPaolo Bonzini     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
15056e790746SPaolo Bonzini 
15069b92fbcfSSergio Lopez     /*
15070b2675c4SStefan Hajnoczi      * virtio_notify_config() needs to acquire the BQL,
15089b92fbcfSSergio Lopez      * so it can't be called from an iothread. Instead, schedule
15099b92fbcfSSergio Lopez      * it to be run in the main context BH.
15109b92fbcfSSergio Lopez      */
15119b92fbcfSSergio Lopez     aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
15126e790746SPaolo Bonzini }
15136e790746SPaolo Bonzini 
15143cdaf3ddSStefan Hajnoczi static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
15153bcc17f0SStefan Hajnoczi {
15163bcc17f0SStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
15173bcc17f0SStefan Hajnoczi 
15183bcc17f0SStefan Hajnoczi     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
15193bcc17f0SStefan Hajnoczi         VirtQueue *vq = virtio_get_queue(vdev, i);
15203bcc17f0SStefan Hajnoczi         virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
15213bcc17f0SStefan Hajnoczi     }
15223bcc17f0SStefan Hajnoczi }
15233bcc17f0SStefan Hajnoczi 
15243cdaf3ddSStefan Hajnoczi static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
15253bcc17f0SStefan Hajnoczi {
15263bcc17f0SStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
15273bcc17f0SStefan Hajnoczi 
15283bcc17f0SStefan Hajnoczi     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
15293bcc17f0SStefan Hajnoczi         VirtQueue *vq = virtio_get_queue(vdev, i);
15303bcc17f0SStefan Hajnoczi         virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
15313bcc17f0SStefan Hajnoczi     }
15323bcc17f0SStefan Hajnoczi }
15333bcc17f0SStefan Hajnoczi 
15341665d932SStefan Hajnoczi /* Suspend virtqueue ioeventfd processing during drain */
15351665d932SStefan Hajnoczi static void virtio_blk_drained_begin(void *opaque)
15361665d932SStefan Hajnoczi {
15371665d932SStefan Hajnoczi     VirtIOBlock *s = opaque;
15381665d932SStefan Hajnoczi 
15393cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_started) {
15403cdaf3ddSStefan Hajnoczi         virtio_blk_ioeventfd_detach(s);
15411665d932SStefan Hajnoczi     }
15421665d932SStefan Hajnoczi }
15431665d932SStefan Hajnoczi 
15441665d932SStefan Hajnoczi /* Resume virtqueue ioeventfd processing after drain */
15451665d932SStefan Hajnoczi static void virtio_blk_drained_end(void *opaque)
15461665d932SStefan Hajnoczi {
15471665d932SStefan Hajnoczi     VirtIOBlock *s = opaque;
15481665d932SStefan Hajnoczi 
15493cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_started) {
15503cdaf3ddSStefan Hajnoczi         virtio_blk_ioeventfd_attach(s);
15511665d932SStefan Hajnoczi     }
15521665d932SStefan Hajnoczi }
15531665d932SStefan Hajnoczi 
15546e790746SPaolo Bonzini static const BlockDevOps virtio_block_ops = {
15556e790746SPaolo Bonzini     .resize_cb     = virtio_blk_resize,
15561665d932SStefan Hajnoczi     .drained_begin = virtio_blk_drained_begin,
15571665d932SStefan Hajnoczi     .drained_end   = virtio_blk_drained_end,
15586e790746SPaolo Bonzini };
15596e790746SPaolo Bonzini 
15601f995a47SStefan Hajnoczi static bool
15611f995a47SStefan Hajnoczi validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
15621f995a47SStefan Hajnoczi         uint16_t num_queues, Error **errp)
15631f995a47SStefan Hajnoczi {
15641f995a47SStefan Hajnoczi     g_autofree unsigned long *vqs = bitmap_new(num_queues);
15651f995a47SStefan Hajnoczi     g_autoptr(GHashTable) iothreads =
15661f995a47SStefan Hajnoczi         g_hash_table_new(g_str_hash, g_str_equal);
15671f995a47SStefan Hajnoczi 
15681f995a47SStefan Hajnoczi     for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
15691f995a47SStefan Hajnoczi         const char *name = node->value->iothread;
15701f995a47SStefan Hajnoczi         uint16List *vq;
15711f995a47SStefan Hajnoczi 
15721f995a47SStefan Hajnoczi         if (!iothread_by_id(name)) {
15731f995a47SStefan Hajnoczi             error_setg(errp, "IOThread \"%s\" object does not exist", name);
15741f995a47SStefan Hajnoczi             return false;
15751f995a47SStefan Hajnoczi         }
15761f995a47SStefan Hajnoczi 
15771f995a47SStefan Hajnoczi         if (!g_hash_table_add(iothreads, (gpointer)name)) {
15781f995a47SStefan Hajnoczi             error_setg(errp,
15791f995a47SStefan Hajnoczi                     "duplicate IOThread name \"%s\" in iothread-vq-mapping",
15801f995a47SStefan Hajnoczi                     name);
15811f995a47SStefan Hajnoczi             return false;
15821f995a47SStefan Hajnoczi         }
15831f995a47SStefan Hajnoczi 
15841f995a47SStefan Hajnoczi         if (node != list) {
15851f995a47SStefan Hajnoczi             if (!!node->value->vqs != !!list->value->vqs) {
15861f995a47SStefan Hajnoczi                 error_setg(errp, "either all items in iothread-vq-mapping "
15871f995a47SStefan Hajnoczi                                  "must have vqs or none of them must have it");
15881f995a47SStefan Hajnoczi                 return false;
15891f995a47SStefan Hajnoczi             }
15901f995a47SStefan Hajnoczi         }
15911f995a47SStefan Hajnoczi 
15921f995a47SStefan Hajnoczi         for (vq = node->value->vqs; vq; vq = vq->next) {
15931f995a47SStefan Hajnoczi             if (vq->value >= num_queues) {
15941f995a47SStefan Hajnoczi                 error_setg(errp, "vq index %u for IOThread \"%s\" must be "
15951f995a47SStefan Hajnoczi                         "less than num_queues %u in iothread-vq-mapping",
15961f995a47SStefan Hajnoczi                         vq->value, name, num_queues);
15971f995a47SStefan Hajnoczi                 return false;
15981f995a47SStefan Hajnoczi             }
15991f995a47SStefan Hajnoczi 
16001f995a47SStefan Hajnoczi             if (test_and_set_bit(vq->value, vqs)) {
16011f995a47SStefan Hajnoczi                 error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
16021f995a47SStefan Hajnoczi                         "because it is already assigned", vq->value, name);
16031f995a47SStefan Hajnoczi                 return false;
16041f995a47SStefan Hajnoczi             }
16051f995a47SStefan Hajnoczi         }
16061f995a47SStefan Hajnoczi     }
16071f995a47SStefan Hajnoczi 
16081f995a47SStefan Hajnoczi     if (list->value->vqs) {
16091f995a47SStefan Hajnoczi         for (uint16_t i = 0; i < num_queues; i++) {
16101f995a47SStefan Hajnoczi             if (!test_bit(i, vqs)) {
16111f995a47SStefan Hajnoczi                 error_setg(errp,
16121f995a47SStefan Hajnoczi                         "missing vq %u IOThread assignment in iothread-vq-mapping",
16131f995a47SStefan Hajnoczi                         i);
16141f995a47SStefan Hajnoczi                 return false;
16151f995a47SStefan Hajnoczi             }
16161f995a47SStefan Hajnoczi         }
16171f995a47SStefan Hajnoczi     }
16181f995a47SStefan Hajnoczi 
16191f995a47SStefan Hajnoczi     return true;
16201f995a47SStefan Hajnoczi }
16211f995a47SStefan Hajnoczi 
16221f995a47SStefan Hajnoczi /**
16231f995a47SStefan Hajnoczi  * apply_iothread_vq_mapping:
16241f995a47SStefan Hajnoczi  * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
16251f995a47SStefan Hajnoczi  * @vq_aio_context: The array of AioContext pointers to fill in.
16261f995a47SStefan Hajnoczi  * @num_queues: The length of @vq_aio_context.
16271f995a47SStefan Hajnoczi  * @errp: If an error occurs, a pointer to the area to store the error.
16281f995a47SStefan Hajnoczi  *
16291f995a47SStefan Hajnoczi  * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
16301f995a47SStefan Hajnoczi  * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
16311f995a47SStefan Hajnoczi  *
16321f995a47SStefan Hajnoczi  * Returns: %true on success, %false on failure.
16331f995a47SStefan Hajnoczi  **/
16341f995a47SStefan Hajnoczi static bool apply_iothread_vq_mapping(
16351f995a47SStefan Hajnoczi         IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
16361f995a47SStefan Hajnoczi         AioContext **vq_aio_context,
16371f995a47SStefan Hajnoczi         uint16_t num_queues,
16381f995a47SStefan Hajnoczi         Error **errp)
16393bcc17f0SStefan Hajnoczi {
16403bcc17f0SStefan Hajnoczi     IOThreadVirtQueueMappingList *node;
16413bcc17f0SStefan Hajnoczi     size_t num_iothreads = 0;
16423bcc17f0SStefan Hajnoczi     size_t cur_iothread = 0;
16433bcc17f0SStefan Hajnoczi 
16441f995a47SStefan Hajnoczi     if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
16451f995a47SStefan Hajnoczi                                            num_queues, errp)) {
16461f995a47SStefan Hajnoczi         return false;
16471f995a47SStefan Hajnoczi     }
16481f995a47SStefan Hajnoczi 
16493bcc17f0SStefan Hajnoczi     for (node = iothread_vq_mapping_list; node; node = node->next) {
16503bcc17f0SStefan Hajnoczi         num_iothreads++;
16513bcc17f0SStefan Hajnoczi     }
16523bcc17f0SStefan Hajnoczi 
16533bcc17f0SStefan Hajnoczi     for (node = iothread_vq_mapping_list; node; node = node->next) {
16543bcc17f0SStefan Hajnoczi         IOThread *iothread = iothread_by_id(node->value->iothread);
16553bcc17f0SStefan Hajnoczi         AioContext *ctx = iothread_get_aio_context(iothread);
16563bcc17f0SStefan Hajnoczi 
165757bc2658SStefan Hajnoczi         /* Released in virtio_blk_vq_aio_context_cleanup() */
16583bcc17f0SStefan Hajnoczi         object_ref(OBJECT(iothread));
16593bcc17f0SStefan Hajnoczi 
16603bcc17f0SStefan Hajnoczi         if (node->value->vqs) {
16613bcc17f0SStefan Hajnoczi             uint16List *vq;
16623bcc17f0SStefan Hajnoczi 
16633bcc17f0SStefan Hajnoczi             /* Explicit vq:IOThread assignment */
16643bcc17f0SStefan Hajnoczi             for (vq = node->value->vqs; vq; vq = vq->next) {
16651f995a47SStefan Hajnoczi                 assert(vq->value < num_queues);
16663bcc17f0SStefan Hajnoczi                 vq_aio_context[vq->value] = ctx;
16673bcc17f0SStefan Hajnoczi             }
16683bcc17f0SStefan Hajnoczi         } else {
16693bcc17f0SStefan Hajnoczi             /* Round-robin vq:IOThread assignment */
16703bcc17f0SStefan Hajnoczi             for (unsigned i = cur_iothread; i < num_queues;
16713bcc17f0SStefan Hajnoczi                  i += num_iothreads) {
16723bcc17f0SStefan Hajnoczi                 vq_aio_context[i] = ctx;
16733bcc17f0SStefan Hajnoczi             }
16743bcc17f0SStefan Hajnoczi         }
16753bcc17f0SStefan Hajnoczi 
16763bcc17f0SStefan Hajnoczi         cur_iothread++;
16773bcc17f0SStefan Hajnoczi     }
16781f995a47SStefan Hajnoczi 
16791f995a47SStefan Hajnoczi     return true;
16803bcc17f0SStefan Hajnoczi }
16813bcc17f0SStefan Hajnoczi 
16823bcc17f0SStefan Hajnoczi /* Context: BQL held */
168357bc2658SStefan Hajnoczi static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
16843bcc17f0SStefan Hajnoczi {
16853bcc17f0SStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
16863bcc17f0SStefan Hajnoczi     VirtIOBlkConf *conf = &s->conf;
16873bcc17f0SStefan Hajnoczi     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
16883bcc17f0SStefan Hajnoczi     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
16893bcc17f0SStefan Hajnoczi 
16901f995a47SStefan Hajnoczi     if (conf->iothread && conf->iothread_vq_mapping_list) {
16911f995a47SStefan Hajnoczi         error_setg(errp,
16921f995a47SStefan Hajnoczi                    "iothread and iothread-vq-mapping properties cannot be set "
16931f995a47SStefan Hajnoczi                    "at the same time");
16941f995a47SStefan Hajnoczi         return false;
16951f995a47SStefan Hajnoczi     }
16961f995a47SStefan Hajnoczi 
16973bcc17f0SStefan Hajnoczi     if (conf->iothread || conf->iothread_vq_mapping_list) {
16983bcc17f0SStefan Hajnoczi         if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
16993bcc17f0SStefan Hajnoczi             error_setg(errp,
17003bcc17f0SStefan Hajnoczi                        "device is incompatible with iothread "
17013bcc17f0SStefan Hajnoczi                        "(transport does not support notifiers)");
17023bcc17f0SStefan Hajnoczi             return false;
17033bcc17f0SStefan Hajnoczi         }
17043bcc17f0SStefan Hajnoczi         if (!virtio_device_ioeventfd_enabled(vdev)) {
17053bcc17f0SStefan Hajnoczi             error_setg(errp, "ioeventfd is required for iothread");
17063bcc17f0SStefan Hajnoczi             return false;
17073bcc17f0SStefan Hajnoczi         }
17083bcc17f0SStefan Hajnoczi 
17093bcc17f0SStefan Hajnoczi         /*
17103cdaf3ddSStefan Hajnoczi          * If ioeventfd is (re-)enabled while the guest is running there could
17113bcc17f0SStefan Hajnoczi          * be block jobs that can conflict.
17123bcc17f0SStefan Hajnoczi          */
17133bcc17f0SStefan Hajnoczi         if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
17143cdaf3ddSStefan Hajnoczi             error_prepend(errp, "cannot start virtio-blk ioeventfd: ");
17153bcc17f0SStefan Hajnoczi             return false;
17163bcc17f0SStefan Hajnoczi         }
17173bcc17f0SStefan Hajnoczi     }
17183bcc17f0SStefan Hajnoczi 
17193bcc17f0SStefan Hajnoczi     s->vq_aio_context = g_new(AioContext *, conf->num_queues);
17203bcc17f0SStefan Hajnoczi 
17213bcc17f0SStefan Hajnoczi     if (conf->iothread_vq_mapping_list) {
17221f995a47SStefan Hajnoczi         if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
17231f995a47SStefan Hajnoczi                                        s->vq_aio_context,
17241f995a47SStefan Hajnoczi                                        conf->num_queues,
17251f995a47SStefan Hajnoczi                                        errp)) {
17261f995a47SStefan Hajnoczi             g_free(s->vq_aio_context);
17271f995a47SStefan Hajnoczi             s->vq_aio_context = NULL;
17281f995a47SStefan Hajnoczi             return false;
17291f995a47SStefan Hajnoczi         }
17303bcc17f0SStefan Hajnoczi     } else if (conf->iothread) {
17313bcc17f0SStefan Hajnoczi         AioContext *ctx = iothread_get_aio_context(conf->iothread);
17323bcc17f0SStefan Hajnoczi         for (unsigned i = 0; i < conf->num_queues; i++) {
17333bcc17f0SStefan Hajnoczi             s->vq_aio_context[i] = ctx;
17343bcc17f0SStefan Hajnoczi         }
17353bcc17f0SStefan Hajnoczi 
173657bc2658SStefan Hajnoczi         /* Released in virtio_blk_vq_aio_context_cleanup() */
17373bcc17f0SStefan Hajnoczi         object_ref(OBJECT(conf->iothread));
17383bcc17f0SStefan Hajnoczi     } else {
17393bcc17f0SStefan Hajnoczi         AioContext *ctx = qemu_get_aio_context();
17403bcc17f0SStefan Hajnoczi         for (unsigned i = 0; i < conf->num_queues; i++) {
17413bcc17f0SStefan Hajnoczi             s->vq_aio_context[i] = ctx;
17423bcc17f0SStefan Hajnoczi         }
17433bcc17f0SStefan Hajnoczi     }
17443bcc17f0SStefan Hajnoczi 
17453bcc17f0SStefan Hajnoczi     return true;
17463bcc17f0SStefan Hajnoczi }
17473bcc17f0SStefan Hajnoczi 
17483bcc17f0SStefan Hajnoczi /* Context: BQL held */
174957bc2658SStefan Hajnoczi static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s)
17503bcc17f0SStefan Hajnoczi {
17513bcc17f0SStefan Hajnoczi     VirtIOBlkConf *conf = &s->conf;
17523bcc17f0SStefan Hajnoczi 
17533cdaf3ddSStefan Hajnoczi     assert(!s->ioeventfd_started);
17543bcc17f0SStefan Hajnoczi 
17553bcc17f0SStefan Hajnoczi     if (conf->iothread_vq_mapping_list) {
17563bcc17f0SStefan Hajnoczi         IOThreadVirtQueueMappingList *node;
17573bcc17f0SStefan Hajnoczi 
17583bcc17f0SStefan Hajnoczi         for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
17593bcc17f0SStefan Hajnoczi             IOThread *iothread = iothread_by_id(node->value->iothread);
17603bcc17f0SStefan Hajnoczi             object_unref(OBJECT(iothread));
17613bcc17f0SStefan Hajnoczi         }
17623bcc17f0SStefan Hajnoczi     }
17633bcc17f0SStefan Hajnoczi 
17643bcc17f0SStefan Hajnoczi     if (conf->iothread) {
17653bcc17f0SStefan Hajnoczi         object_unref(OBJECT(conf->iothread));
17663bcc17f0SStefan Hajnoczi     }
17673bcc17f0SStefan Hajnoczi 
17683bcc17f0SStefan Hajnoczi     g_free(s->vq_aio_context);
17693bcc17f0SStefan Hajnoczi     s->vq_aio_context = NULL;
17703bcc17f0SStefan Hajnoczi }
17713bcc17f0SStefan Hajnoczi 
17723bcc17f0SStefan Hajnoczi /* Context: BQL held */
17733cdaf3ddSStefan Hajnoczi static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
17743bcc17f0SStefan Hajnoczi {
17753bcc17f0SStefan Hajnoczi     VirtIOBlock *s = VIRTIO_BLK(vdev);
17763bcc17f0SStefan Hajnoczi     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
17773bcc17f0SStefan Hajnoczi     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
17783bcc17f0SStefan Hajnoczi     unsigned i;
17793bcc17f0SStefan Hajnoczi     unsigned nvqs = s->conf.num_queues;
17803bcc17f0SStefan Hajnoczi     Error *local_err = NULL;
17813bcc17f0SStefan Hajnoczi     int r;
17823bcc17f0SStefan Hajnoczi 
17833cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_started || s->ioeventfd_starting) {
17843bcc17f0SStefan Hajnoczi         return 0;
17853bcc17f0SStefan Hajnoczi     }
17863bcc17f0SStefan Hajnoczi 
17873cdaf3ddSStefan Hajnoczi     s->ioeventfd_starting = true;
17883bcc17f0SStefan Hajnoczi 
17893bcc17f0SStefan Hajnoczi     /* Set up guest notifier (irq) */
17903bcc17f0SStefan Hajnoczi     r = k->set_guest_notifiers(qbus->parent, nvqs, true);
17913bcc17f0SStefan Hajnoczi     if (r != 0) {
17923bcc17f0SStefan Hajnoczi         error_report("virtio-blk failed to set guest notifier (%d), "
17933bcc17f0SStefan Hajnoczi                      "ensure -accel kvm is set.", r);
17943bcc17f0SStefan Hajnoczi         goto fail_guest_notifiers;
17953bcc17f0SStefan Hajnoczi     }
17963bcc17f0SStefan Hajnoczi 
17973bcc17f0SStefan Hajnoczi     /*
17983bcc17f0SStefan Hajnoczi      * Batch all the host notifiers in a single transaction to avoid
17993bcc17f0SStefan Hajnoczi      * quadratic time complexity in address_space_update_ioeventfds().
18003bcc17f0SStefan Hajnoczi      */
18013bcc17f0SStefan Hajnoczi     memory_region_transaction_begin();
18023bcc17f0SStefan Hajnoczi 
18033bcc17f0SStefan Hajnoczi     /* Set up virtqueue notify */
18043bcc17f0SStefan Hajnoczi     for (i = 0; i < nvqs; i++) {
18053bcc17f0SStefan Hajnoczi         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
18063bcc17f0SStefan Hajnoczi         if (r != 0) {
18073bcc17f0SStefan Hajnoczi             int j = i;
18083bcc17f0SStefan Hajnoczi 
18093bcc17f0SStefan Hajnoczi             fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
18103bcc17f0SStefan Hajnoczi             while (i--) {
18113bcc17f0SStefan Hajnoczi                 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
18123bcc17f0SStefan Hajnoczi             }
18133bcc17f0SStefan Hajnoczi 
18143bcc17f0SStefan Hajnoczi             /*
18153bcc17f0SStefan Hajnoczi              * The transaction expects the ioeventfds to be open when it
18163bcc17f0SStefan Hajnoczi              * commits. Do it now, before the cleanup loop.
18173bcc17f0SStefan Hajnoczi              */
18183bcc17f0SStefan Hajnoczi             memory_region_transaction_commit();
18193bcc17f0SStefan Hajnoczi 
18203bcc17f0SStefan Hajnoczi             while (j--) {
18213bcc17f0SStefan Hajnoczi                 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j);
18223bcc17f0SStefan Hajnoczi             }
18233bcc17f0SStefan Hajnoczi             goto fail_host_notifiers;
18243bcc17f0SStefan Hajnoczi         }
18253bcc17f0SStefan Hajnoczi     }
18263bcc17f0SStefan Hajnoczi 
18273bcc17f0SStefan Hajnoczi     memory_region_transaction_commit();
18283bcc17f0SStefan Hajnoczi 
1829ea0736d7SStefan Hajnoczi     /*
1830ea0736d7SStefan Hajnoczi      * Try to change the AioContext so that block jobs and other operations can
1831ea0736d7SStefan Hajnoczi      * co-locate their activity in the same AioContext. If it fails, nevermind.
1832ea0736d7SStefan Hajnoczi      */
18335fbcbd50SStefan Hajnoczi     assert(nvqs > 0); /* enforced during ->realize() */
18343bcc17f0SStefan Hajnoczi     r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0],
18353bcc17f0SStefan Hajnoczi                             &local_err);
18363bcc17f0SStefan Hajnoczi     if (r < 0) {
1837ea0736d7SStefan Hajnoczi         warn_report_err(local_err);
18383bcc17f0SStefan Hajnoczi     }
18393bcc17f0SStefan Hajnoczi 
18403bcc17f0SStefan Hajnoczi     /*
18413bcc17f0SStefan Hajnoczi      * These fields must be visible to the IOThread when it processes the
18423cdaf3ddSStefan Hajnoczi      * virtqueue, otherwise it will think ioeventfd has not started yet.
18433bcc17f0SStefan Hajnoczi      *
18443cdaf3ddSStefan Hajnoczi      * Make sure ->ioeventfd_started is false when blk_set_aio_context() is
18453bcc17f0SStefan Hajnoczi      * called above so that draining does not cause the host notifier to be
18463bcc17f0SStefan Hajnoczi      * detached/attached prematurely.
18473bcc17f0SStefan Hajnoczi      */
18483cdaf3ddSStefan Hajnoczi     s->ioeventfd_starting = false;
18493cdaf3ddSStefan Hajnoczi     s->ioeventfd_started = true;
18503bcc17f0SStefan Hajnoczi     smp_wmb(); /* paired with aio_notify_accept() on the read side */
18513bcc17f0SStefan Hajnoczi 
185252bff01fSHanna Czenczek     /*
185352bff01fSHanna Czenczek      * Get this show started by hooking up our callbacks.  If drained now,
185452bff01fSHanna Czenczek      * virtio_blk_drained_end() will do this later.
185552bff01fSHanna Czenczek      * Attaching the notifier also kicks the virtqueues, processing any requests
185652bff01fSHanna Czenczek      * they may already have.
185752bff01fSHanna Czenczek      */
1858d3f6f294SStefan Hajnoczi     if (!blk_in_drain(s->conf.conf.blk)) {
185952bff01fSHanna Czenczek         virtio_blk_ioeventfd_attach(s);
18603bcc17f0SStefan Hajnoczi     }
18613bcc17f0SStefan Hajnoczi     return 0;
18623bcc17f0SStefan Hajnoczi 
18633bcc17f0SStefan Hajnoczi   fail_host_notifiers:
18643bcc17f0SStefan Hajnoczi     k->set_guest_notifiers(qbus->parent, nvqs, false);
18653bcc17f0SStefan Hajnoczi   fail_guest_notifiers:
18663cdaf3ddSStefan Hajnoczi     s->ioeventfd_disabled = true;
18673cdaf3ddSStefan Hajnoczi     s->ioeventfd_starting = false;
18683bcc17f0SStefan Hajnoczi     return -ENOSYS;
18693bcc17f0SStefan Hajnoczi }
18703bcc17f0SStefan Hajnoczi 
18713bcc17f0SStefan Hajnoczi /* Stop notifications for new requests from guest.
18723bcc17f0SStefan Hajnoczi  *
18733bcc17f0SStefan Hajnoczi  * Context: BH in IOThread
18743bcc17f0SStefan Hajnoczi  */
18753cdaf3ddSStefan Hajnoczi static void virtio_blk_ioeventfd_stop_vq_bh(void *opaque)
18763bcc17f0SStefan Hajnoczi {
18773bcc17f0SStefan Hajnoczi     VirtQueue *vq = opaque;
18783bcc17f0SStefan Hajnoczi     EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
18793bcc17f0SStefan Hajnoczi 
18803bcc17f0SStefan Hajnoczi     virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context());
18813bcc17f0SStefan Hajnoczi 
18823bcc17f0SStefan Hajnoczi     /*
18833bcc17f0SStefan Hajnoczi      * Test and clear notifier after disabling event, in case poll callback
18843bcc17f0SStefan Hajnoczi      * didn't have time to run.
18853bcc17f0SStefan Hajnoczi      */
18863bcc17f0SStefan Hajnoczi     virtio_queue_host_notifier_read(host_notifier);
18873bcc17f0SStefan Hajnoczi }
18883bcc17f0SStefan Hajnoczi 
18893bcc17f0SStefan Hajnoczi /* Context: BQL held */
18903cdaf3ddSStefan Hajnoczi static void virtio_blk_stop_ioeventfd(VirtIODevice *vdev)
18913bcc17f0SStefan Hajnoczi {
18923bcc17f0SStefan Hajnoczi     VirtIOBlock *s = VIRTIO_BLK(vdev);
18933bcc17f0SStefan Hajnoczi     BusState *qbus = qdev_get_parent_bus(DEVICE(s));
18943bcc17f0SStefan Hajnoczi     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
18953bcc17f0SStefan Hajnoczi     unsigned i;
18963bcc17f0SStefan Hajnoczi     unsigned nvqs = s->conf.num_queues;
18973bcc17f0SStefan Hajnoczi 
18983cdaf3ddSStefan Hajnoczi     if (!s->ioeventfd_started || s->ioeventfd_stopping) {
18993bcc17f0SStefan Hajnoczi         return;
19003bcc17f0SStefan Hajnoczi     }
19013bcc17f0SStefan Hajnoczi 
19023bcc17f0SStefan Hajnoczi     /* Better luck next time. */
19033cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_disabled) {
19043cdaf3ddSStefan Hajnoczi         s->ioeventfd_disabled = false;
19053cdaf3ddSStefan Hajnoczi         s->ioeventfd_started = false;
19063bcc17f0SStefan Hajnoczi         return;
19073bcc17f0SStefan Hajnoczi     }
19083cdaf3ddSStefan Hajnoczi     s->ioeventfd_stopping = true;
19093bcc17f0SStefan Hajnoczi 
19103bcc17f0SStefan Hajnoczi     if (!blk_in_drain(s->conf.conf.blk)) {
19113bcc17f0SStefan Hajnoczi         for (i = 0; i < nvqs; i++) {
19123bcc17f0SStefan Hajnoczi             VirtQueue *vq = virtio_get_queue(vdev, i);
19133bcc17f0SStefan Hajnoczi             AioContext *ctx = s->vq_aio_context[i];
19143bcc17f0SStefan Hajnoczi 
19153cdaf3ddSStefan Hajnoczi             aio_wait_bh_oneshot(ctx, virtio_blk_ioeventfd_stop_vq_bh, vq);
19163bcc17f0SStefan Hajnoczi         }
19173bcc17f0SStefan Hajnoczi     }
19183bcc17f0SStefan Hajnoczi 
19193bcc17f0SStefan Hajnoczi     /*
19203bcc17f0SStefan Hajnoczi      * Batch all the host notifiers in a single transaction to avoid
19213bcc17f0SStefan Hajnoczi      * quadratic time complexity in address_space_update_ioeventfds().
19223bcc17f0SStefan Hajnoczi      */
19233bcc17f0SStefan Hajnoczi     memory_region_transaction_begin();
19243bcc17f0SStefan Hajnoczi 
19253bcc17f0SStefan Hajnoczi     for (i = 0; i < nvqs; i++) {
19263bcc17f0SStefan Hajnoczi         virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
19273bcc17f0SStefan Hajnoczi     }
19283bcc17f0SStefan Hajnoczi 
19293bcc17f0SStefan Hajnoczi     /*
19303bcc17f0SStefan Hajnoczi      * The transaction expects the ioeventfds to be open when it
19313bcc17f0SStefan Hajnoczi      * commits. Do it now, before the cleanup loop.
19323bcc17f0SStefan Hajnoczi      */
19333bcc17f0SStefan Hajnoczi     memory_region_transaction_commit();
19343bcc17f0SStefan Hajnoczi 
19353bcc17f0SStefan Hajnoczi     for (i = 0; i < nvqs; i++) {
19363bcc17f0SStefan Hajnoczi         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
19373bcc17f0SStefan Hajnoczi     }
19383bcc17f0SStefan Hajnoczi 
19393bcc17f0SStefan Hajnoczi     /*
19403cdaf3ddSStefan Hajnoczi      * Set ->ioeventfd_started to false before draining so that host notifiers
19413bcc17f0SStefan Hajnoczi      * are not detached/attached anymore.
19423bcc17f0SStefan Hajnoczi      */
19433cdaf3ddSStefan Hajnoczi     s->ioeventfd_started = false;
19443bcc17f0SStefan Hajnoczi 
19453bcc17f0SStefan Hajnoczi     /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
19463bcc17f0SStefan Hajnoczi     blk_drain(s->conf.conf.blk);
19473bcc17f0SStefan Hajnoczi 
19483bcc17f0SStefan Hajnoczi     /*
19493bcc17f0SStefan Hajnoczi      * Try to switch bs back to the QEMU main loop. If other users keep the
19503bcc17f0SStefan Hajnoczi      * BlockBackend in the iothread, that's ok
19513bcc17f0SStefan Hajnoczi      */
19523bcc17f0SStefan Hajnoczi     blk_set_aio_context(s->conf.conf.blk, qemu_get_aio_context(), NULL);
19533bcc17f0SStefan Hajnoczi 
19543bcc17f0SStefan Hajnoczi     /* Clean up guest notifier (irq) */
19553bcc17f0SStefan Hajnoczi     k->set_guest_notifiers(qbus->parent, nvqs, false);
19563bcc17f0SStefan Hajnoczi 
19573cdaf3ddSStefan Hajnoczi     s->ioeventfd_stopping = false;
19583bcc17f0SStefan Hajnoczi }
19593bcc17f0SStefan Hajnoczi 
196075884afdSAndreas Färber static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
19616e790746SPaolo Bonzini {
196275884afdSAndreas Färber     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1963179b417eSAndreas Färber     VirtIOBlock *s = VIRTIO_BLK(dev);
19642a30307fSMarkus Armbruster     VirtIOBlkConf *conf = &s->conf;
1965b3d9bb9aSStefan Hajnoczi     BlockDriverState *bs;
19663ffeeef7SAndreas Färber     Error *err = NULL;
19672f270590SStefan Hajnoczi     unsigned i;
19686e790746SPaolo Bonzini 
19694be74634SMarkus Armbruster     if (!conf->conf.blk) {
197075884afdSAndreas Färber         error_setg(errp, "drive property not set");
197175884afdSAndreas Färber         return;
19726e790746SPaolo Bonzini     }
19734be74634SMarkus Armbruster     if (!blk_is_inserted(conf->conf.blk)) {
197475884afdSAndreas Färber         error_setg(errp, "Device needs media, but drive is empty");
197575884afdSAndreas Färber         return;
19766e790746SPaolo Bonzini     }
19779445e1e1SStefan Hajnoczi     if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) {
19789445e1e1SStefan Hajnoczi         conf->num_queues = 1;
19799445e1e1SStefan Hajnoczi     }
19802f270590SStefan Hajnoczi     if (!conf->num_queues) {
19812f270590SStefan Hajnoczi         error_setg(errp, "num-queues property must be larger than 0");
19822f270590SStefan Hajnoczi         return;
19832f270590SStefan Hajnoczi     }
19841bf8a989SDenis Plotnikov     if (conf->queue_size <= 2) {
19851bf8a989SDenis Plotnikov         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
19861bf8a989SDenis Plotnikov                    "must be > 2", conf->queue_size);
19871bf8a989SDenis Plotnikov         return;
19881bf8a989SDenis Plotnikov     }
19896040aeddSMark Kanda     if (!is_power_of_2(conf->queue_size) ||
19906040aeddSMark Kanda         conf->queue_size > VIRTQUEUE_MAX_SIZE) {
19916040aeddSMark Kanda         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
19926040aeddSMark Kanda                    "must be a power of 2 (max %d)",
19936040aeddSMark Kanda                    conf->queue_size, VIRTQUEUE_MAX_SIZE);
19946040aeddSMark Kanda         return;
19956040aeddSMark Kanda     }
19966e790746SPaolo Bonzini 
1997ceff3e1fSMao Zhongyi     if (!blkconf_apply_backend_options(&conf->conf,
199886b1cf32SKevin Wolf                                        !blk_supports_write_perm(conf->conf.blk),
199986b1cf32SKevin Wolf                                        true, errp)) {
2000a17c17a2SKevin Wolf         return;
2001a17c17a2SKevin Wolf     }
20024be74634SMarkus Armbruster     s->original_wce = blk_enable_write_cache(conf->conf.blk);
2003ceff3e1fSMao Zhongyi     if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
200475884afdSAndreas Färber         return;
20056e790746SPaolo Bonzini     }
2006ceff3e1fSMao Zhongyi 
2007c56ee92fSRoman Kagan     if (!blkconf_blocksizes(&conf->conf, errp)) {
20080a75b60cSMark Kanda         return;
20090a75b60cSMark Kanda     }
20100a75b60cSMark Kanda 
2011b3d9bb9aSStefan Hajnoczi     bs = blk_bs(conf->conf.blk);
20124f736650SSam Li     if (bs->bl.zoned != BLK_Z_NONE) {
20134f736650SSam Li         virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
20144f736650SSam Li         if (bs->bl.zoned == BLK_Z_HM) {
20154f736650SSam Li             virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD);
20164f736650SSam Li         }
20174f736650SSam Li     }
20184f736650SSam Li 
201937b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
202037b06f8dSStefano Garzarella         (!conf->max_discard_sectors ||
202137b06f8dSStefano Garzarella          conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
202237b06f8dSStefano Garzarella         error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
202337b06f8dSStefano Garzarella                    ", must be between 1 and %d",
202437b06f8dSStefano Garzarella                    conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
202537b06f8dSStefano Garzarella         return;
202637b06f8dSStefano Garzarella     }
202737b06f8dSStefano Garzarella 
202837b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
202937b06f8dSStefano Garzarella         (!conf->max_write_zeroes_sectors ||
203037b06f8dSStefano Garzarella          conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
203137b06f8dSStefano Garzarella         error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
203237b06f8dSStefano Garzarella                    "), must be between 1 and %d",
203337b06f8dSStefano Garzarella                    conf->max_write_zeroes_sectors,
203437b06f8dSStefano Garzarella                    (int)BDRV_REQUEST_MAX_SECTORS);
203537b06f8dSStefano Garzarella         return;
203637b06f8dSStefano Garzarella     }
203737b06f8dSStefano Garzarella 
2038d9cf55a8SDaniil Tatianin     s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
2039d74c30c8SDaniil Tatianin                                             s->host_features);
20403857cd5cSJonah Palmer     virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
20416e790746SPaolo Bonzini 
20429c67f33fSStefan Hajnoczi     qemu_mutex_init(&s->rq_lock);
20439c67f33fSStefan Hajnoczi 
20444be74634SMarkus Armbruster     s->blk = conf->conf.blk;
20456e790746SPaolo Bonzini     s->rq = NULL;
20462a30307fSMarkus Armbruster     s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
20476e790746SPaolo Bonzini 
20482f270590SStefan Hajnoczi     for (i = 0; i < conf->num_queues; i++) {
20496040aeddSMark Kanda         virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
20502f270590SStefan Hajnoczi     }
205198e3ab35SKevin Wolf     qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2);
205257bc2658SStefan Hajnoczi 
20533cdaf3ddSStefan Hajnoczi     /* Don't start ioeventfd if transport does not support notifiers. */
205457bc2658SStefan Hajnoczi     if (!virtio_device_ioeventfd_enabled(vdev)) {
20553cdaf3ddSStefan Hajnoczi         s->ioeventfd_disabled = true;
205657bc2658SStefan Hajnoczi     }
205757bc2658SStefan Hajnoczi 
205857bc2658SStefan Hajnoczi     virtio_blk_vq_aio_context_init(s, &err);
20593ffeeef7SAndreas Färber     if (err != NULL) {
206075884afdSAndreas Färber         error_propagate(errp, err);
2061cfaf757eSPan Nengyuan         for (i = 0; i < conf->num_queues; i++) {
2062cfaf757eSPan Nengyuan             virtio_del_queue(vdev, i);
2063cfaf757eSPan Nengyuan         }
20646a1a8cc7SKONRAD Frederic         virtio_cleanup(vdev);
206575884afdSAndreas Färber         return;
20666e790746SPaolo Bonzini     }
20676e790746SPaolo Bonzini 
2068a937f8e8SStefan Hajnoczi     /*
2069a937f8e8SStefan Hajnoczi      * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
2070a937f8e8SStefan Hajnoczi      * called after ->start_ioeventfd() has already set blk's AioContext.
2071a937f8e8SStefan Hajnoczi      */
2072a937f8e8SStefan Hajnoczi     s->change =
2073a937f8e8SStefan Hajnoczi         qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
2074a937f8e8SStefan Hajnoczi 
2075baf42268SStefan Hajnoczi     blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
20764be74634SMarkus Armbruster     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
20776e790746SPaolo Bonzini 
20784be74634SMarkus Armbruster     blk_iostatus_enable(s->blk);
207971f571a2SSam Eiderman 
208071f571a2SSam Eiderman     add_boot_device_lchs(dev, "/disk@0,0",
208171f571a2SSam Eiderman                          conf->conf.lcyls,
208271f571a2SSam Eiderman                          conf->conf.lheads,
208371f571a2SSam Eiderman                          conf->conf.lsecs);
20846e790746SPaolo Bonzini }
20856e790746SPaolo Bonzini 
2086b69c3c21SMarkus Armbruster static void virtio_blk_device_unrealize(DeviceState *dev)
20876e790746SPaolo Bonzini {
2088306ec6c3SAndreas Färber     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2089306ec6c3SAndreas Färber     VirtIOBlock *s = VIRTIO_BLK(dev);
20904a0117cfSEugenio Pérez     VirtIOBlkConf *conf = &s->conf;
20914a0117cfSEugenio Pérez     unsigned i;
2092306ec6c3SAndreas Färber 
20937bfde688SJulia Suvorova     blk_drain(s->blk);
209471f571a2SSam Eiderman     del_boot_device_lchs(dev, "/disk@0,0");
209557bc2658SStefan Hajnoczi     virtio_blk_vq_aio_context_cleanup(s);
20964a0117cfSEugenio Pérez     for (i = 0; i < conf->num_queues; i++) {
20974a0117cfSEugenio Pérez         virtio_del_queue(vdev, i);
20984a0117cfSEugenio Pérez     }
209998e3ab35SKevin Wolf     qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
21009c67f33fSStefan Hajnoczi     qemu_mutex_destroy(&s->rq_lock);
2101baf42268SStefan Hajnoczi     blk_ram_registrar_destroy(&s->blk_ram_registrar);
21026e790746SPaolo Bonzini     qemu_del_vm_change_state_handler(s->change);
21034be74634SMarkus Armbruster     blockdev_mark_auto_del(s->blk);
21046a1a8cc7SKONRAD Frederic     virtio_cleanup(vdev);
21056e790746SPaolo Bonzini }
21066e790746SPaolo Bonzini 
2107467b3f33SStefan Hajnoczi static void virtio_blk_instance_init(Object *obj)
2108467b3f33SStefan Hajnoczi {
2109467b3f33SStefan Hajnoczi     VirtIOBlock *s = VIRTIO_BLK(obj);
2110467b3f33SStefan Hajnoczi 
21112a30307fSMarkus Armbruster     device_add_bootindex_property(obj, &s->conf.conf.bootindex,
21123342ec32SGonglei                                   "bootindex", "/disk@0,0",
211340c2281cSMarkus Armbruster                                   DEVICE(obj));
2114467b3f33SStefan Hajnoczi }
2115467b3f33SStefan Hajnoczi 
2116977a117fSHalil Pasic static const VMStateDescription vmstate_virtio_blk = {
2117977a117fSHalil Pasic     .name = "virtio-blk",
2118977a117fSHalil Pasic     .minimum_version_id = 2,
2119977a117fSHalil Pasic     .version_id = 2,
21207d5dc0a3SRichard Henderson     .fields = (const VMStateField[]) {
2121977a117fSHalil Pasic         VMSTATE_VIRTIO_DEVICE,
2122977a117fSHalil Pasic         VMSTATE_END_OF_LIST()
2123977a117fSHalil Pasic     },
2124977a117fSHalil Pasic };
2125bbded32cSDr. David Alan Gilbert 
21266e790746SPaolo Bonzini static Property virtio_blk_properties[] = {
21272a30307fSMarkus Armbruster     DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
21288c398252SKevin Wolf     DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
21292a30307fSMarkus Armbruster     DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
21302a30307fSMarkus Armbruster     DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
2131bbe8bd4dSStefano Garzarella     DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
2132bbe8bd4dSStefano Garzarella                       VIRTIO_BLK_F_CONFIG_WCE, true),
213332a877e4SStefan Hajnoczi #ifdef __linux__
2134bbe8bd4dSStefano Garzarella     DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features,
2135bbe8bd4dSStefano Garzarella                       VIRTIO_BLK_F_SCSI, false),
213632a877e4SStefan Hajnoczi #endif
2137c99495acSPeter Lieven     DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
2138c99495acSPeter Lieven                     true),
21399445e1e1SStefan Hajnoczi     DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues,
21409445e1e1SStefan Hajnoczi                        VIRTIO_BLK_AUTO_NUM_QUEUES),
2141c9b7d9ecSDenis Plotnikov     DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256),
21421bf8a989SDenis Plotnikov     DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
2143d679ac09SFam Zheng     DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
2144d679ac09SFam Zheng                      IOThread *),
2145b6948ab0SStefan Hajnoczi     DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
2146b6948ab0SStefan Hajnoczi                                          conf.iothread_vq_mapping_list),
21475c81161fSStefano Garzarella     DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
21485c81161fSStefano Garzarella                       VIRTIO_BLK_F_DISCARD, true),
2149fb0b154cSAkihiko Odaki     DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
2150fb0b154cSAkihiko Odaki                      conf.report_discard_granularity, true),
21515c81161fSStefano Garzarella     DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
21525c81161fSStefano Garzarella                       VIRTIO_BLK_F_WRITE_ZEROES, true),
215337b06f8dSStefano Garzarella     DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
215437b06f8dSStefano Garzarella                        conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
215537b06f8dSStefano Garzarella     DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
215637b06f8dSStefano Garzarella                        conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
21575f258577SEvgeny Yakovlev     DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock,
21585f258577SEvgeny Yakovlev                      conf.x_enable_wce_if_config_wce, true),
21596e790746SPaolo Bonzini     DEFINE_PROP_END_OF_LIST(),
21606e790746SPaolo Bonzini };
21616e790746SPaolo Bonzini 
21626e790746SPaolo Bonzini static void virtio_blk_class_init(ObjectClass *klass, void *data)
21636e790746SPaolo Bonzini {
21646e790746SPaolo Bonzini     DeviceClass *dc = DEVICE_CLASS(klass);
21656e790746SPaolo Bonzini     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
216675884afdSAndreas Färber 
21674f67d30bSMarc-André Lureau     device_class_set_props(dc, virtio_blk_properties);
2168bbded32cSDr. David Alan Gilbert     dc->vmsd = &vmstate_virtio_blk;
2169125ee0edSMarcel Apfelbaum     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
217075884afdSAndreas Färber     vdc->realize = virtio_blk_device_realize;
2171306ec6c3SAndreas Färber     vdc->unrealize = virtio_blk_device_unrealize;
21726e790746SPaolo Bonzini     vdc->get_config = virtio_blk_update_config;
21736e790746SPaolo Bonzini     vdc->set_config = virtio_blk_set_config;
21746e790746SPaolo Bonzini     vdc->get_features = virtio_blk_get_features;
21756e790746SPaolo Bonzini     vdc->set_status = virtio_blk_set_status;
21766e790746SPaolo Bonzini     vdc->reset = virtio_blk_reset;
2177b2b295a7SGreg Kurz     vdc->save = virtio_blk_save_device;
2178b2b295a7SGreg Kurz     vdc->load = virtio_blk_load_device;
21793cdaf3ddSStefan Hajnoczi     vdc->start_ioeventfd = virtio_blk_start_ioeventfd;
21803cdaf3ddSStefan Hajnoczi     vdc->stop_ioeventfd = virtio_blk_stop_ioeventfd;
21816e790746SPaolo Bonzini }
21826e790746SPaolo Bonzini 
2183b5c7ceafSChanglong Xie static const TypeInfo virtio_blk_info = {
21846e790746SPaolo Bonzini     .name = TYPE_VIRTIO_BLK,
21856e790746SPaolo Bonzini     .parent = TYPE_VIRTIO_DEVICE,
21866e790746SPaolo Bonzini     .instance_size = sizeof(VirtIOBlock),
2187467b3f33SStefan Hajnoczi     .instance_init = virtio_blk_instance_init,
21886e790746SPaolo Bonzini     .class_init = virtio_blk_class_init,
21896e790746SPaolo Bonzini };
21906e790746SPaolo Bonzini 
21916e790746SPaolo Bonzini static void virtio_register_types(void)
21926e790746SPaolo Bonzini {
2193b5c7ceafSChanglong Xie     type_register_static(&virtio_blk_info);
21946e790746SPaolo Bonzini }
21956e790746SPaolo Bonzini 
21966e790746SPaolo Bonzini type_init(virtio_register_types)
2197