xref: /qemu/hw/block/virtio-blk.c (revision bbdf9023)
16e790746SPaolo Bonzini /*
26e790746SPaolo Bonzini  * Virtio Block Device
36e790746SPaolo Bonzini  *
46e790746SPaolo Bonzini  * Copyright IBM, Corp. 2007
56e790746SPaolo Bonzini  *
66e790746SPaolo Bonzini  * Authors:
76e790746SPaolo Bonzini  *  Anthony Liguori   <aliguori@us.ibm.com>
86e790746SPaolo Bonzini  *
96e790746SPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2.  See
106e790746SPaolo Bonzini  * the COPYING file in the top-level directory.
116e790746SPaolo Bonzini  *
126e790746SPaolo Bonzini  */
136e790746SPaolo Bonzini 
1480c71a24SPeter Maydell #include "qemu/osdep.h"
15433fcea4SStefan Hajnoczi #include "qemu/defer-call.h"
16da34e65cSMarkus Armbruster #include "qapi/error.h"
17827805a2SFam Zheng #include "qemu/iov.h"
180b8fa32fSMarkus Armbruster #include "qemu/module.h"
196e790746SPaolo Bonzini #include "qemu/error-report.h"
209b92fbcfSSergio Lopez #include "qemu/main-loop.h"
214f736650SSam Li #include "block/block_int.h"
226e790746SPaolo Bonzini #include "trace.h"
236e790746SPaolo Bonzini #include "hw/block/block.h"
24a27bd6c7SMarkus Armbruster #include "hw/qdev-properties.h"
256e790746SPaolo Bonzini #include "sysemu/blockdev.h"
26baf42268SStefan Hajnoczi #include "sysemu/block-ram-registrar.h"
272f780b6aSMarkus Armbruster #include "sysemu/sysemu.h"
2854d31236SMarkus Armbruster #include "sysemu/runstate.h"
296e790746SPaolo Bonzini #include "hw/virtio/virtio-blk.h"
3008e2c9f1SPaolo Bonzini #include "scsi/constants.h"
316e790746SPaolo Bonzini #ifdef __linux__
326e790746SPaolo Bonzini # include <scsi/sg.h>
336e790746SPaolo Bonzini #endif
346e790746SPaolo Bonzini #include "hw/virtio/virtio-bus.h"
35ca77ee28SMarkus Armbruster #include "migration/qemu-file-types.h"
36783d1897SRusty Russell #include "hw/virtio/virtio-access.h"
37d9cf55a8SDaniil Tatianin #include "hw/virtio/virtio-blk-common.h"
384c41c69eSHiroki Narukawa #include "qemu/coroutine.h"
396e790746SPaolo Bonzini 
4052bff01fSHanna Czenczek static void virtio_blk_ioeventfd_attach(VirtIOBlock *s);
4152bff01fSHanna Czenczek 
virtio_blk_init_request(VirtIOBlock * s,VirtQueue * vq,VirtIOBlockReq * req)42d14dde5eSGreg Kurz static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
43edaffd9fSStefan Hajnoczi                                     VirtIOBlockReq *req)
44671ec3f0SFam Zheng {
45671ec3f0SFam Zheng     req->dev = s;
46edaffd9fSStefan Hajnoczi     req->vq = vq;
47869d66afSStefan Hajnoczi     req->qiov.size = 0;
482a6cdd6dSPaolo Bonzini     req->in_len = 0;
49869d66afSStefan Hajnoczi     req->next = NULL;
5095f7142aSPeter Lieven     req->mr_next = NULL;
51671ec3f0SFam Zheng }
52671ec3f0SFam Zheng 
virtio_blk_free_request(VirtIOBlockReq * req)53d14dde5eSGreg Kurz static void virtio_blk_free_request(VirtIOBlockReq *req)
54671ec3f0SFam Zheng {
55c84b3192SPaolo Bonzini     g_free(req);
56671ec3f0SFam Zheng }
57671ec3f0SFam Zheng 
virtio_blk_req_complete(VirtIOBlockReq * req,unsigned char status)5803de2f52SPaolo Bonzini static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
596e790746SPaolo Bonzini {
606e790746SPaolo Bonzini     VirtIOBlock *s = req->dev;
616e790746SPaolo Bonzini     VirtIODevice *vdev = VIRTIO_DEVICE(s);
626e790746SPaolo Bonzini 
63a576ceacSStefan Hajnoczi     trace_virtio_blk_req_complete(vdev, req, status);
646e790746SPaolo Bonzini 
656e790746SPaolo Bonzini     stb_p(&req->in->status, status);
667bd04a04SStefan Hajnoczi     iov_discard_undo(&req->inhdr_undo);
677bd04a04SStefan Hajnoczi     iov_discard_undo(&req->outhdr_undo);
68edaffd9fSStefan Hajnoczi     virtqueue_push(req->vq, &req->elem, req->in_len);
69bfa36802SStefan Hajnoczi     if (qemu_in_iothread()) {
703bcc17f0SStefan Hajnoczi         virtio_notify_irqfd(vdev, req->vq);
7103de2f52SPaolo Bonzini     } else {
72edaffd9fSStefan Hajnoczi         virtio_notify(vdev, req->vq);
736e790746SPaolo Bonzini     }
74bf4bd461SFam Zheng }
75bf4bd461SFam Zheng 
virtio_blk_handle_rw_error(VirtIOBlockReq * req,int error,bool is_read,bool acct_failed)766e790746SPaolo Bonzini static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
7700f639fbSStefano Garzarella     bool is_read, bool acct_failed)
786e790746SPaolo Bonzini {
796e790746SPaolo Bonzini     VirtIOBlock *s = req->dev;
809a6719d5SStefano Garzarella     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
816e790746SPaolo Bonzini 
82a589569fSWenchao Xia     if (action == BLOCK_ERROR_ACTION_STOP) {
83466138dcSFam Zheng         /* Break the link as the next request is going to be parsed from the
84466138dcSFam Zheng          * ring again. Otherwise we may end up doing a double completion! */
85466138dcSFam Zheng         req->mr_next = NULL;
869c67f33fSStefan Hajnoczi 
879c67f33fSStefan Hajnoczi         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
886e790746SPaolo Bonzini             req->next = s->rq;
896e790746SPaolo Bonzini             s->rq = req;
909c67f33fSStefan Hajnoczi         }
91a589569fSWenchao Xia     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
926e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
9300f639fbSStefano Garzarella         if (acct_failed) {
9401762e03SAlberto Garcia             block_acct_failed(blk_get_stats(s->blk), &req->acct);
9500f639fbSStefano Garzarella         }
96671ec3f0SFam Zheng         virtio_blk_free_request(req);
976e790746SPaolo Bonzini     }
986e790746SPaolo Bonzini 
994be74634SMarkus Armbruster     blk_error_action(s->blk, action, is_read, error);
100a589569fSWenchao Xia     return action != BLOCK_ERROR_ACTION_IGNORE;
1016e790746SPaolo Bonzini }
1026e790746SPaolo Bonzini 
virtio_blk_rw_complete(void * opaque,int ret)1036e790746SPaolo Bonzini static void virtio_blk_rw_complete(void *opaque, int ret)
1046e790746SPaolo Bonzini {
10595f7142aSPeter Lieven     VirtIOBlockReq *next = opaque;
106b9e413ddSPaolo Bonzini     VirtIOBlock *s = next->dev;
107a576ceacSStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1086e790746SPaolo Bonzini 
10995f7142aSPeter Lieven     while (next) {
11095f7142aSPeter Lieven         VirtIOBlockReq *req = next;
11195f7142aSPeter Lieven         next = req->mr_next;
112a576ceacSStefan Hajnoczi         trace_virtio_blk_rw_complete(vdev, req, ret);
1136e790746SPaolo Bonzini 
11495f7142aSPeter Lieven         if (req->qiov.nalloc != -1) {
115e61809edSDongli Zhang             /* If nalloc is != -1 req->qiov is a local copy of the original
1169bb192a4SYaowei Bai              * external iovec. It was allocated in submit_requests to be
1179bb192a4SYaowei Bai              * able to merge requests. */
11895f7142aSPeter Lieven             qemu_iovec_destroy(&req->qiov);
11995f7142aSPeter Lieven         }
12095f7142aSPeter Lieven 
1216e790746SPaolo Bonzini         if (ret) {
122bf4069fbSAnastasiia Rusakova             int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type);
123783d1897SRusty Russell             bool is_read = !(p & VIRTIO_BLK_T_OUT);
1242a6cdd6dSPaolo Bonzini             /* Note that memory may be dirtied on read failure.  If the
1252a6cdd6dSPaolo Bonzini              * virtio request is not completed here, as is the case for
1262a6cdd6dSPaolo Bonzini              * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
1272a6cdd6dSPaolo Bonzini              * correctly during live migration.  While this is ugly,
1282a6cdd6dSPaolo Bonzini              * it is acceptable because the device is free to write to
1292a6cdd6dSPaolo Bonzini              * the memory until the request is completed (which will
1302a6cdd6dSPaolo Bonzini              * happen on the other side of the migration).
1312a6cdd6dSPaolo Bonzini              */
13200f639fbSStefano Garzarella             if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
13395f7142aSPeter Lieven                 continue;
13495f7142aSPeter Lieven             }
1356e790746SPaolo Bonzini         }
1366e790746SPaolo Bonzini 
1376e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
138bf4069fbSAnastasiia Rusakova         block_acct_done(blk_get_stats(s->blk), &req->acct);
139671ec3f0SFam Zheng         virtio_blk_free_request(req);
1406e790746SPaolo Bonzini     }
14195f7142aSPeter Lieven }
1426e790746SPaolo Bonzini 
virtio_blk_flush_complete(void * opaque,int ret)1436e790746SPaolo Bonzini static void virtio_blk_flush_complete(void *opaque, int ret)
1446e790746SPaolo Bonzini {
1456e790746SPaolo Bonzini     VirtIOBlockReq *req = opaque;
146b9e413ddSPaolo Bonzini     VirtIOBlock *s = req->dev;
1476e790746SPaolo Bonzini 
148c1135913SStefan Hajnoczi     if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) {
149c1135913SStefan Hajnoczi         return;
1506e790746SPaolo Bonzini     }
1516e790746SPaolo Bonzini 
1526e790746SPaolo Bonzini     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
1539a6719d5SStefano Garzarella     block_acct_done(blk_get_stats(s->blk), &req->acct);
154671ec3f0SFam Zheng     virtio_blk_free_request(req);
1556e790746SPaolo Bonzini }
1566e790746SPaolo Bonzini 
virtio_blk_discard_write_zeroes_complete(void * opaque,int ret)15737b06f8dSStefano Garzarella static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
15837b06f8dSStefano Garzarella {
15937b06f8dSStefano Garzarella     VirtIOBlockReq *req = opaque;
16037b06f8dSStefano Garzarella     VirtIOBlock *s = req->dev;
16137b06f8dSStefano Garzarella     bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
16237b06f8dSStefano Garzarella                             ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
16337b06f8dSStefano Garzarella 
164c1135913SStefan Hajnoczi     if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
165c1135913SStefan Hajnoczi         return;
16637b06f8dSStefano Garzarella     }
16737b06f8dSStefano Garzarella 
16837b06f8dSStefano Garzarella     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
16937b06f8dSStefano Garzarella     if (is_write_zeroes) {
17037b06f8dSStefano Garzarella         block_acct_done(blk_get_stats(s->blk), &req->acct);
17137b06f8dSStefano Garzarella     }
17237b06f8dSStefano Garzarella     virtio_blk_free_request(req);
17337b06f8dSStefano Garzarella }
17437b06f8dSStefano Garzarella 
1751dc936aaSFam Zheng #ifdef __linux__
1761dc936aaSFam Zheng 
1771dc936aaSFam Zheng typedef struct {
1781dc936aaSFam Zheng     VirtIOBlockReq *req;
1791dc936aaSFam Zheng     struct sg_io_hdr hdr;
1801dc936aaSFam Zheng } VirtIOBlockIoctlReq;
1811dc936aaSFam Zheng 
virtio_blk_ioctl_complete(void * opaque,int status)1821dc936aaSFam Zheng static void virtio_blk_ioctl_complete(void *opaque, int status)
1831dc936aaSFam Zheng {
1841dc936aaSFam Zheng     VirtIOBlockIoctlReq *ioctl_req = opaque;
1851dc936aaSFam Zheng     VirtIOBlockReq *req = ioctl_req->req;
1869d456654SPaolo Bonzini     VirtIOBlock *s = req->dev;
1879d456654SPaolo Bonzini     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1881dc936aaSFam Zheng     struct virtio_scsi_inhdr *scsi;
1891dc936aaSFam Zheng     struct sg_io_hdr *hdr;
1901dc936aaSFam Zheng 
1911dc936aaSFam Zheng     scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
1921dc936aaSFam Zheng 
1931dc936aaSFam Zheng     if (status) {
1941dc936aaSFam Zheng         status = VIRTIO_BLK_S_UNSUPP;
1951dc936aaSFam Zheng         virtio_stl_p(vdev, &scsi->errors, 255);
1961dc936aaSFam Zheng         goto out;
1971dc936aaSFam Zheng     }
1981dc936aaSFam Zheng 
1991dc936aaSFam Zheng     hdr = &ioctl_req->hdr;
2001dc936aaSFam Zheng     /*
2011dc936aaSFam Zheng      * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
2021dc936aaSFam Zheng      * clear the masked_status field [hence status gets cleared too, see
2031dc936aaSFam Zheng      * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
2041dc936aaSFam Zheng      * status has occurred.  However they do set DRIVER_SENSE in driver_status
2051dc936aaSFam Zheng      * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
2061dc936aaSFam Zheng      */
2071dc936aaSFam Zheng     if (hdr->status == 0 && hdr->sb_len_wr > 0) {
2081dc936aaSFam Zheng         hdr->status = CHECK_CONDITION;
2091dc936aaSFam Zheng     }
2101dc936aaSFam Zheng 
2111dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->errors,
2121dc936aaSFam Zheng                  hdr->status | (hdr->msg_status << 8) |
2131dc936aaSFam Zheng                  (hdr->host_status << 16) | (hdr->driver_status << 24));
2141dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->residual, hdr->resid);
2151dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr);
2161dc936aaSFam Zheng     virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
2171dc936aaSFam Zheng 
2181dc936aaSFam Zheng out:
2191dc936aaSFam Zheng     virtio_blk_req_complete(req, status);
2201dc936aaSFam Zheng     virtio_blk_free_request(req);
2211dc936aaSFam Zheng     g_free(ioctl_req);
2221dc936aaSFam Zheng }
2231dc936aaSFam Zheng 
2241dc936aaSFam Zheng #endif
2251dc936aaSFam Zheng 
virtio_blk_get_request(VirtIOBlock * s,VirtQueue * vq)226edaffd9fSStefan Hajnoczi static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
2276e790746SPaolo Bonzini {
228edaffd9fSStefan Hajnoczi     VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
2296e790746SPaolo Bonzini 
23051b19ebeSPaolo Bonzini     if (req) {
231edaffd9fSStefan Hajnoczi         virtio_blk_init_request(s, vq, req);
2326e790746SPaolo Bonzini     }
2336e790746SPaolo Bonzini     return req;
2346e790746SPaolo Bonzini }
2356e790746SPaolo Bonzini 
virtio_blk_handle_scsi_req(VirtIOBlockReq * req)23675344fa4SFam Zheng static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
2376e790746SPaolo Bonzini {
2386e790746SPaolo Bonzini     int status = VIRTIO_BLK_S_OK;
2395a05cbeeSFam Zheng     struct virtio_scsi_inhdr *scsi = NULL;
24075344fa4SFam Zheng     VirtIOBlock *blk = req->dev;
241bf4069fbSAnastasiia Rusakova     VirtIODevice *vdev = VIRTIO_DEVICE(blk);
242bf4069fbSAnastasiia Rusakova     VirtQueueElement *elem = &req->elem;
243783d1897SRusty Russell 
2445a05cbeeSFam Zheng #ifdef __linux__
2455a05cbeeSFam Zheng     int i;
2461dc936aaSFam Zheng     VirtIOBlockIoctlReq *ioctl_req;
247a209f461SFam Zheng     BlockAIOCB *acb;
2485a05cbeeSFam Zheng #endif
2496e790746SPaolo Bonzini 
2506e790746SPaolo Bonzini     /*
2516e790746SPaolo Bonzini      * We require at least one output segment each for the virtio_blk_outhdr
2526e790746SPaolo Bonzini      * and the SCSI command block.
2536e790746SPaolo Bonzini      *
2546e790746SPaolo Bonzini      * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
2556e790746SPaolo Bonzini      * and the sense buffer pointer in the input segments.
2566e790746SPaolo Bonzini      */
2575a05cbeeSFam Zheng     if (elem->out_num < 2 || elem->in_num < 3) {
2585a05cbeeSFam Zheng         status = VIRTIO_BLK_S_IOERR;
2595a05cbeeSFam Zheng         goto fail;
2606e790746SPaolo Bonzini     }
2616e790746SPaolo Bonzini 
2626e790746SPaolo Bonzini     /*
2636e790746SPaolo Bonzini      * The scsi inhdr is placed in the second-to-last input segment, just
2646e790746SPaolo Bonzini      * before the regular inhdr.
2656e790746SPaolo Bonzini      */
2665a05cbeeSFam Zheng     scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
2676e790746SPaolo Bonzini 
268bbe8bd4dSStefano Garzarella     if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) {
2696e790746SPaolo Bonzini         status = VIRTIO_BLK_S_UNSUPP;
2706e790746SPaolo Bonzini         goto fail;
2716e790746SPaolo Bonzini     }
2726e790746SPaolo Bonzini 
2736e790746SPaolo Bonzini     /*
2746e790746SPaolo Bonzini      * No support for bidirection commands yet.
2756e790746SPaolo Bonzini      */
2765a05cbeeSFam Zheng     if (elem->out_num > 2 && elem->in_num > 3) {
2776e790746SPaolo Bonzini         status = VIRTIO_BLK_S_UNSUPP;
2786e790746SPaolo Bonzini         goto fail;
2796e790746SPaolo Bonzini     }
2806e790746SPaolo Bonzini 
2816e790746SPaolo Bonzini #ifdef __linux__
2821dc936aaSFam Zheng     ioctl_req = g_new0(VirtIOBlockIoctlReq, 1);
2831dc936aaSFam Zheng     ioctl_req->req = req;
2841dc936aaSFam Zheng     ioctl_req->hdr.interface_id = 'S';
2851dc936aaSFam Zheng     ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len;
2861dc936aaSFam Zheng     ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base;
2871dc936aaSFam Zheng     ioctl_req->hdr.dxfer_len = 0;
2886e790746SPaolo Bonzini 
2895a05cbeeSFam Zheng     if (elem->out_num > 2) {
2906e790746SPaolo Bonzini         /*
2916e790746SPaolo Bonzini          * If there are more than the minimally required 2 output segments
2926e790746SPaolo Bonzini          * there is write payload starting from the third iovec.
2936e790746SPaolo Bonzini          */
2941dc936aaSFam Zheng         ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV;
2951dc936aaSFam Zheng         ioctl_req->hdr.iovec_count = elem->out_num - 2;
2966e790746SPaolo Bonzini 
2971dc936aaSFam Zheng         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
2981dc936aaSFam Zheng             ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
2991dc936aaSFam Zheng         }
3006e790746SPaolo Bonzini 
3011dc936aaSFam Zheng         ioctl_req->hdr.dxferp = elem->out_sg + 2;
3026e790746SPaolo Bonzini 
3035a05cbeeSFam Zheng     } else if (elem->in_num > 3) {
3046e790746SPaolo Bonzini         /*
3056e790746SPaolo Bonzini          * If we have more than 3 input segments the guest wants to actually
3066e790746SPaolo Bonzini          * read data.
3076e790746SPaolo Bonzini          */
3081dc936aaSFam Zheng         ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV;
3091dc936aaSFam Zheng         ioctl_req->hdr.iovec_count = elem->in_num - 3;
3101dc936aaSFam Zheng         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
3111dc936aaSFam Zheng             ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len;
3121dc936aaSFam Zheng         }
3136e790746SPaolo Bonzini 
3141dc936aaSFam Zheng         ioctl_req->hdr.dxferp = elem->in_sg;
3156e790746SPaolo Bonzini     } else {
3166e790746SPaolo Bonzini         /*
3176e790746SPaolo Bonzini          * Some SCSI commands don't actually transfer any data.
3186e790746SPaolo Bonzini          */
3191dc936aaSFam Zheng         ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE;
3206e790746SPaolo Bonzini     }
3216e790746SPaolo Bonzini 
3221dc936aaSFam Zheng     ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
3231dc936aaSFam Zheng     ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
3246e790746SPaolo Bonzini 
325a209f461SFam Zheng     acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr,
3261dc936aaSFam Zheng                         virtio_blk_ioctl_complete, ioctl_req);
327a209f461SFam Zheng     if (!acb) {
328a209f461SFam Zheng         g_free(ioctl_req);
329a209f461SFam Zheng         status = VIRTIO_BLK_S_UNSUPP;
330a209f461SFam Zheng         goto fail;
331a209f461SFam Zheng     }
3321dc936aaSFam Zheng     return -EINPROGRESS;
3336e790746SPaolo Bonzini #else
3346e790746SPaolo Bonzini     abort();
3356e790746SPaolo Bonzini #endif
3366e790746SPaolo Bonzini 
3376e790746SPaolo Bonzini fail:
3386e790746SPaolo Bonzini     /* Just put anything nonzero so that the ioctl fails in the guest.  */
3395a05cbeeSFam Zheng     if (scsi) {
340783d1897SRusty Russell         virtio_stl_p(vdev, &scsi->errors, 255);
3415a05cbeeSFam Zheng     }
3425a05cbeeSFam Zheng     return status;
3435a05cbeeSFam Zheng }
3445a05cbeeSFam Zheng 
virtio_blk_handle_scsi(VirtIOBlockReq * req)3455a05cbeeSFam Zheng static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
3465a05cbeeSFam Zheng {
3475a05cbeeSFam Zheng     int status;
3485a05cbeeSFam Zheng 
34975344fa4SFam Zheng     status = virtio_blk_handle_scsi_req(req);
3501dc936aaSFam Zheng     if (status != -EINPROGRESS) {
3516e790746SPaolo Bonzini         virtio_blk_req_complete(req, status);
352671ec3f0SFam Zheng         virtio_blk_free_request(req);
3536e790746SPaolo Bonzini     }
3541dc936aaSFam Zheng }
3556e790746SPaolo Bonzini 
submit_requests(VirtIOBlock * s,MultiReqBuffer * mrb,int start,int num_reqs,int niov)356baf42268SStefan Hajnoczi static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
35795f7142aSPeter Lieven                                    int start, int num_reqs, int niov)
3586e790746SPaolo Bonzini {
359baf42268SStefan Hajnoczi     BlockBackend *blk = s->blk;
36095f7142aSPeter Lieven     QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
36195f7142aSPeter Lieven     int64_t sector_num = mrb->reqs[start]->sector_num;
36295f7142aSPeter Lieven     bool is_write = mrb->is_write;
363baf42268SStefan Hajnoczi     BdrvRequestFlags flags = 0;
3646e790746SPaolo Bonzini 
36595f7142aSPeter Lieven     if (num_reqs > 1) {
36695f7142aSPeter Lieven         int i;
36795f7142aSPeter Lieven         struct iovec *tmp_iov = qiov->iov;
36895f7142aSPeter Lieven         int tmp_niov = qiov->niov;
36995f7142aSPeter Lieven 
37095f7142aSPeter Lieven         /* mrb->reqs[start]->qiov was initialized from external so we can't
371b5772fddSEric Blake          * modify it here. We need to initialize it locally and then add the
37295f7142aSPeter Lieven          * external iovecs. */
37395f7142aSPeter Lieven         qemu_iovec_init(qiov, niov);
37495f7142aSPeter Lieven 
37595f7142aSPeter Lieven         for (i = 0; i < tmp_niov; i++) {
37695f7142aSPeter Lieven             qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
37795f7142aSPeter Lieven         }
37895f7142aSPeter Lieven 
37995f7142aSPeter Lieven         for (i = start + 1; i < start + num_reqs; i++) {
38095f7142aSPeter Lieven             qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
38195f7142aSPeter Lieven                               mrb->reqs[i]->qiov.size);
38295f7142aSPeter Lieven             mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
38395f7142aSPeter Lieven         }
38495f7142aSPeter Lieven 
385a576ceacSStefan Hajnoczi         trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev),
386a576ceacSStefan Hajnoczi                                          mrb, start, num_reqs,
387b5772fddSEric Blake                                          sector_num << BDRV_SECTOR_BITS,
388b5772fddSEric Blake                                          qiov->size, is_write);
38995f7142aSPeter Lieven         block_acct_merge_done(blk_get_stats(blk),
39095f7142aSPeter Lieven                               is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
39195f7142aSPeter Lieven                               num_reqs - 1);
39295f7142aSPeter Lieven     }
39395f7142aSPeter Lieven 
394baf42268SStefan Hajnoczi     if (blk_ram_registrar_ok(&s->blk_ram_registrar)) {
395baf42268SStefan Hajnoczi         flags |= BDRV_REQ_REGISTERED_BUF;
396baf42268SStefan Hajnoczi     }
397baf42268SStefan Hajnoczi 
39895f7142aSPeter Lieven     if (is_write) {
399baf42268SStefan Hajnoczi         blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov,
400baf42268SStefan Hajnoczi                         flags, virtio_blk_rw_complete,
401baf42268SStefan Hajnoczi                         mrb->reqs[start]);
40295f7142aSPeter Lieven     } else {
403baf42268SStefan Hajnoczi         blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov,
404baf42268SStefan Hajnoczi                        flags, virtio_blk_rw_complete,
405baf42268SStefan Hajnoczi                        mrb->reqs[start]);
40695f7142aSPeter Lieven     }
40795f7142aSPeter Lieven }
40895f7142aSPeter Lieven 
multireq_compare(const void * a,const void * b)40995f7142aSPeter Lieven static int multireq_compare(const void *a, const void *b)
41095f7142aSPeter Lieven {
41195f7142aSPeter Lieven     const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
41295f7142aSPeter Lieven                          *req2 = *(VirtIOBlockReq **)b;
41395f7142aSPeter Lieven 
41495f7142aSPeter Lieven     /*
41595f7142aSPeter Lieven      * Note that we can't simply subtract sector_num1 from sector_num2
41695f7142aSPeter Lieven      * here as that could overflow the return value.
41795f7142aSPeter Lieven      */
41895f7142aSPeter Lieven     if (req1->sector_num > req2->sector_num) {
41995f7142aSPeter Lieven         return 1;
42095f7142aSPeter Lieven     } else if (req1->sector_num < req2->sector_num) {
42195f7142aSPeter Lieven         return -1;
42295f7142aSPeter Lieven     } else {
42395f7142aSPeter Lieven         return 0;
42495f7142aSPeter Lieven     }
42595f7142aSPeter Lieven }
42695f7142aSPeter Lieven 
virtio_blk_submit_multireq(VirtIOBlock * s,MultiReqBuffer * mrb)427baf42268SStefan Hajnoczi static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb)
42895f7142aSPeter Lieven {
42995f7142aSPeter Lieven     int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
4305def6b80SEric Blake     uint32_t max_transfer;
43195f7142aSPeter Lieven     int64_t sector_num = 0;
43295f7142aSPeter Lieven 
43395f7142aSPeter Lieven     if (mrb->num_reqs == 1) {
434baf42268SStefan Hajnoczi         submit_requests(s, mrb, 0, 1, -1);
43595f7142aSPeter Lieven         mrb->num_reqs = 0;
4366e790746SPaolo Bonzini         return;
4376e790746SPaolo Bonzini     }
4386e790746SPaolo Bonzini 
4395def6b80SEric Blake     max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);
44095f7142aSPeter Lieven 
44195f7142aSPeter Lieven     qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
44295f7142aSPeter Lieven           &multireq_compare);
44395f7142aSPeter Lieven 
44495f7142aSPeter Lieven     for (i = 0; i < mrb->num_reqs; i++) {
44595f7142aSPeter Lieven         VirtIOBlockReq *req = mrb->reqs[i];
44695f7142aSPeter Lieven         if (num_reqs > 0) {
44749cffbc6SGonglei             /*
44849cffbc6SGonglei              * NOTE: We cannot merge the requests in below situations:
44949cffbc6SGonglei              * 1. requests are not sequential
45049cffbc6SGonglei              * 2. merge would exceed maximum number of IOVs
45149cffbc6SGonglei              * 3. merge would exceed maximum transfer length of backend device
45249cffbc6SGonglei              */
45349cffbc6SGonglei             if (sector_num + nb_sectors != req->sector_num ||
454baf42268SStefan Hajnoczi                 niov > blk_get_max_iov(s->blk) - req->qiov.niov ||
4555def6b80SEric Blake                 req->qiov.size > max_transfer ||
4565def6b80SEric Blake                 nb_sectors > (max_transfer -
4575def6b80SEric Blake                               req->qiov.size) / BDRV_SECTOR_SIZE) {
458baf42268SStefan Hajnoczi                 submit_requests(s, mrb, start, num_reqs, niov);
45995f7142aSPeter Lieven                 num_reqs = 0;
4606e790746SPaolo Bonzini             }
4616e790746SPaolo Bonzini         }
4626e790746SPaolo Bonzini 
46395f7142aSPeter Lieven         if (num_reqs == 0) {
46495f7142aSPeter Lieven             sector_num = req->sector_num;
46595f7142aSPeter Lieven             nb_sectors = niov = 0;
46695f7142aSPeter Lieven             start = i;
46795f7142aSPeter Lieven         }
46895f7142aSPeter Lieven 
46995f7142aSPeter Lieven         nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
47095f7142aSPeter Lieven         niov += req->qiov.niov;
47195f7142aSPeter Lieven         num_reqs++;
47295f7142aSPeter Lieven     }
47395f7142aSPeter Lieven 
474baf42268SStefan Hajnoczi     submit_requests(s, mrb, start, num_reqs, niov);
47595f7142aSPeter Lieven     mrb->num_reqs = 0;
4766e790746SPaolo Bonzini }
4776e790746SPaolo Bonzini 
virtio_blk_handle_flush(VirtIOBlockReq * req,MultiReqBuffer * mrb)4786e790746SPaolo Bonzini static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
4796e790746SPaolo Bonzini {
480bf4069fbSAnastasiia Rusakova     VirtIOBlock *s = req->dev;
481bf4069fbSAnastasiia Rusakova 
482bf4069fbSAnastasiia Rusakova     block_acct_start(blk_get_stats(s->blk), &req->acct, 0,
4835366d0c8SBenoît Canet                      BLOCK_ACCT_FLUSH);
4846e790746SPaolo Bonzini 
4856e790746SPaolo Bonzini     /*
4866e790746SPaolo Bonzini      * Make sure all outstanding writes are posted to the backing device.
4876e790746SPaolo Bonzini      */
48895f7142aSPeter Lieven     if (mrb->is_write && mrb->num_reqs > 0) {
489baf42268SStefan Hajnoczi         virtio_blk_submit_multireq(s, mrb);
49095f7142aSPeter Lieven     }
491bf4069fbSAnastasiia Rusakova     blk_aio_flush(s->blk, virtio_blk_flush_complete, req);
4926e790746SPaolo Bonzini }
4936e790746SPaolo Bonzini 
virtio_blk_sect_range_ok(VirtIOBlock * dev,uint64_t sector,size_t size)494d0e14376SMarkus Armbruster static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
495d0e14376SMarkus Armbruster                                      uint64_t sector, size_t size)
496d0e14376SMarkus Armbruster {
4973c2daac0SMarkus Armbruster     uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
4983c2daac0SMarkus Armbruster     uint64_t total_sectors;
4993c2daac0SMarkus Armbruster 
50075af1f34SPeter Lieven     if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
50195f7142aSPeter Lieven         return false;
50295f7142aSPeter Lieven     }
503d0e14376SMarkus Armbruster     if (sector & dev->sector_mask) {
504d0e14376SMarkus Armbruster         return false;
505d0e14376SMarkus Armbruster     }
5062a30307fSMarkus Armbruster     if (size % dev->conf.conf.logical_block_size) {
507d0e14376SMarkus Armbruster         return false;
508d0e14376SMarkus Armbruster     }
5094be74634SMarkus Armbruster     blk_get_geometry(dev->blk, &total_sectors);
5103c2daac0SMarkus Armbruster     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
5113c2daac0SMarkus Armbruster         return false;
5123c2daac0SMarkus Armbruster     }
513d0e14376SMarkus Armbruster     return true;
514d0e14376SMarkus Armbruster }
515d0e14376SMarkus Armbruster 
virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq * req,struct virtio_blk_discard_write_zeroes * dwz_hdr,bool is_write_zeroes)51637b06f8dSStefano Garzarella static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
51737b06f8dSStefano Garzarella     struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
51837b06f8dSStefano Garzarella {
51937b06f8dSStefano Garzarella     VirtIOBlock *s = req->dev;
52037b06f8dSStefano Garzarella     VirtIODevice *vdev = VIRTIO_DEVICE(s);
52137b06f8dSStefano Garzarella     uint64_t sector;
52237b06f8dSStefano Garzarella     uint32_t num_sectors, flags, max_sectors;
52337b06f8dSStefano Garzarella     uint8_t err_status;
52437b06f8dSStefano Garzarella     int bytes;
52537b06f8dSStefano Garzarella 
52637b06f8dSStefano Garzarella     sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
52737b06f8dSStefano Garzarella     num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
52837b06f8dSStefano Garzarella     flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
52937b06f8dSStefano Garzarella     max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
53037b06f8dSStefano Garzarella                   s->conf.max_discard_sectors;
53137b06f8dSStefano Garzarella 
53237b06f8dSStefano Garzarella     /*
53337b06f8dSStefano Garzarella      * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
53437b06f8dSStefano Garzarella      * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
53537b06f8dSStefano Garzarella      * the integer variable.
53637b06f8dSStefano Garzarella      */
53737b06f8dSStefano Garzarella     if (unlikely(num_sectors > max_sectors)) {
53837b06f8dSStefano Garzarella         err_status = VIRTIO_BLK_S_IOERR;
53937b06f8dSStefano Garzarella         goto err;
54037b06f8dSStefano Garzarella     }
54137b06f8dSStefano Garzarella 
54237b06f8dSStefano Garzarella     bytes = num_sectors << BDRV_SECTOR_BITS;
54337b06f8dSStefano Garzarella 
54437b06f8dSStefano Garzarella     if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
54537b06f8dSStefano Garzarella         err_status = VIRTIO_BLK_S_IOERR;
54637b06f8dSStefano Garzarella         goto err;
54737b06f8dSStefano Garzarella     }
54837b06f8dSStefano Garzarella 
54937b06f8dSStefano Garzarella     /*
55037b06f8dSStefano Garzarella      * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
55137b06f8dSStefano Garzarella      * and write zeroes commands if any unknown flag is set.
55237b06f8dSStefano Garzarella      */
55337b06f8dSStefano Garzarella     if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
55437b06f8dSStefano Garzarella         err_status = VIRTIO_BLK_S_UNSUPP;
55537b06f8dSStefano Garzarella         goto err;
55637b06f8dSStefano Garzarella     }
55737b06f8dSStefano Garzarella 
55837b06f8dSStefano Garzarella     if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
55937b06f8dSStefano Garzarella         int blk_aio_flags = 0;
56037b06f8dSStefano Garzarella 
56137b06f8dSStefano Garzarella         if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
56237b06f8dSStefano Garzarella             blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
56337b06f8dSStefano Garzarella         }
56437b06f8dSStefano Garzarella 
56537b06f8dSStefano Garzarella         block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
56637b06f8dSStefano Garzarella                          BLOCK_ACCT_WRITE);
56737b06f8dSStefano Garzarella 
56837b06f8dSStefano Garzarella         blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
56937b06f8dSStefano Garzarella                               bytes, blk_aio_flags,
57037b06f8dSStefano Garzarella                               virtio_blk_discard_write_zeroes_complete, req);
57137b06f8dSStefano Garzarella     } else { /* VIRTIO_BLK_T_DISCARD */
57237b06f8dSStefano Garzarella         /*
57337b06f8dSStefano Garzarella          * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
57437b06f8dSStefano Garzarella          * discard commands if the unmap flag is set.
57537b06f8dSStefano Garzarella          */
57637b06f8dSStefano Garzarella         if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
57737b06f8dSStefano Garzarella             err_status = VIRTIO_BLK_S_UNSUPP;
57837b06f8dSStefano Garzarella             goto err;
57937b06f8dSStefano Garzarella         }
58037b06f8dSStefano Garzarella 
58137b06f8dSStefano Garzarella         blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
58237b06f8dSStefano Garzarella                          virtio_blk_discard_write_zeroes_complete, req);
58337b06f8dSStefano Garzarella     }
58437b06f8dSStefano Garzarella 
58537b06f8dSStefano Garzarella     return VIRTIO_BLK_S_OK;
58637b06f8dSStefano Garzarella 
58737b06f8dSStefano Garzarella err:
58837b06f8dSStefano Garzarella     if (is_write_zeroes) {
58937b06f8dSStefano Garzarella         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
59037b06f8dSStefano Garzarella     }
59137b06f8dSStefano Garzarella     return err_status;
59237b06f8dSStefano Garzarella }
59337b06f8dSStefano Garzarella 
5944f736650SSam Li typedef struct ZoneCmdData {
5954f736650SSam Li     VirtIOBlockReq *req;
5964f736650SSam Li     struct iovec *in_iov;
5974f736650SSam Li     unsigned in_num;
5984f736650SSam Li     union {
5994f736650SSam Li         struct {
6004f736650SSam Li             unsigned int nr_zones;
6014f736650SSam Li             BlockZoneDescriptor *zones;
6024f736650SSam Li         } zone_report_data;
6034f736650SSam Li         struct {
6044f736650SSam Li             int64_t offset;
6054f736650SSam Li         } zone_append_data;
6064f736650SSam Li     };
6074f736650SSam Li } ZoneCmdData;
6084f736650SSam Li 
6094f736650SSam Li /*
6104f736650SSam Li  * check zoned_request: error checking before issuing requests. If all checks
6114f736650SSam Li  * passed, return true.
6124f736650SSam Li  * append: true if only zone append requests issued.
6134f736650SSam Li  */
check_zoned_request(VirtIOBlock * s,int64_t offset,int64_t len,bool append,uint8_t * status)6144f736650SSam Li static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len,
6154f736650SSam Li                              bool append, uint8_t *status) {
6164f736650SSam Li     BlockDriverState *bs = blk_bs(s->blk);
6174f736650SSam Li     int index;
6184f736650SSam Li 
6194f736650SSam Li     if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) {
6204f736650SSam Li         *status = VIRTIO_BLK_S_UNSUPP;
6214f736650SSam Li         return false;
6224f736650SSam Li     }
6234f736650SSam Li 
6244f736650SSam Li     if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS)
6254f736650SSam Li         || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) {
6264f736650SSam Li         *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6274f736650SSam Li         return false;
6284f736650SSam Li     }
6294f736650SSam Li 
6304f736650SSam Li     if (append) {
6314f736650SSam Li         if (bs->bl.write_granularity) {
6324f736650SSam Li             if ((offset % bs->bl.write_granularity) != 0) {
6334f736650SSam Li                 *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP;
6344f736650SSam Li                 return false;
6354f736650SSam Li             }
6364f736650SSam Li         }
6374f736650SSam Li 
6384f736650SSam Li         index = offset / bs->bl.zone_size;
6394f736650SSam Li         if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) {
6404f736650SSam Li             *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6414f736650SSam Li             return false;
6424f736650SSam Li         }
6434f736650SSam Li 
6444f736650SSam Li         if (len / 512 > bs->bl.max_append_sectors) {
6454f736650SSam Li             if (bs->bl.max_append_sectors == 0) {
6464f736650SSam Li                 *status = VIRTIO_BLK_S_UNSUPP;
6474f736650SSam Li             } else {
6484f736650SSam Li                 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6494f736650SSam Li             }
6504f736650SSam Li             return false;
6514f736650SSam Li         }
6524f736650SSam Li     }
6534f736650SSam Li     return true;
6544f736650SSam Li }
6554f736650SSam Li 
virtio_blk_zone_report_complete(void * opaque,int ret)6564f736650SSam Li static void virtio_blk_zone_report_complete(void *opaque, int ret)
6574f736650SSam Li {
6584f736650SSam Li     ZoneCmdData *data = opaque;
6594f736650SSam Li     VirtIOBlockReq *req = data->req;
6604f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
6614f736650SSam Li     struct iovec *in_iov = data->in_iov;
6624f736650SSam Li     unsigned in_num = data->in_num;
6634f736650SSam Li     int64_t zrp_size, n, j = 0;
6644f736650SSam Li     int64_t nz = data->zone_report_data.nr_zones;
6654f736650SSam Li     int8_t err_status = VIRTIO_BLK_S_OK;
666b3d9bb9aSStefan Hajnoczi     struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
667b3d9bb9aSStefan Hajnoczi         .nr_zones = cpu_to_le64(nz),
668b3d9bb9aSStefan Hajnoczi     };
6694f736650SSam Li 
6704e92acf7SSam Li     trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
6714f736650SSam Li     if (ret) {
6724f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6734f736650SSam Li         goto out;
6744f736650SSam Li     }
6754f736650SSam Li 
6764f736650SSam Li     zrp_size = sizeof(struct virtio_blk_zone_report)
6774f736650SSam Li                + sizeof(struct virtio_blk_zone_descriptor) * nz;
6784f736650SSam Li     n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
6794f736650SSam Li     if (n != sizeof(zrp_hdr)) {
6804f736650SSam Li         virtio_error(vdev, "Driver provided input buffer that is too small!");
6814f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
6824f736650SSam Li         goto out;
6834f736650SSam Li     }
6844f736650SSam Li 
6854f736650SSam Li     for (size_t i = sizeof(zrp_hdr); i < zrp_size;
6864f736650SSam Li         i += sizeof(struct virtio_blk_zone_descriptor), ++j) {
6874f736650SSam Li         struct virtio_blk_zone_descriptor desc =
6884f736650SSam Li             (struct virtio_blk_zone_descriptor) {
6894f736650SSam Li                 .z_start = cpu_to_le64(data->zone_report_data.zones[j].start
6904f736650SSam Li                     >> BDRV_SECTOR_BITS),
6914f736650SSam Li                 .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap
6924f736650SSam Li                     >> BDRV_SECTOR_BITS),
6934f736650SSam Li                 .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp
6944f736650SSam Li                     >> BDRV_SECTOR_BITS),
6954f736650SSam Li         };
6964f736650SSam Li 
6974f736650SSam Li         switch (data->zone_report_data.zones[j].type) {
6984f736650SSam Li         case BLK_ZT_CONV:
6994f736650SSam Li             desc.z_type = VIRTIO_BLK_ZT_CONV;
7004f736650SSam Li             break;
7014f736650SSam Li         case BLK_ZT_SWR:
7024f736650SSam Li             desc.z_type = VIRTIO_BLK_ZT_SWR;
7034f736650SSam Li             break;
7044f736650SSam Li         case BLK_ZT_SWP:
7054f736650SSam Li             desc.z_type = VIRTIO_BLK_ZT_SWP;
7064f736650SSam Li             break;
7074f736650SSam Li         default:
7084f736650SSam Li             g_assert_not_reached();
7094f736650SSam Li         }
7104f736650SSam Li 
7114f736650SSam Li         switch (data->zone_report_data.zones[j].state) {
7124f736650SSam Li         case BLK_ZS_RDONLY:
7134f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_RDONLY;
7144f736650SSam Li             break;
7154f736650SSam Li         case BLK_ZS_OFFLINE:
7164f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_OFFLINE;
7174f736650SSam Li             break;
7184f736650SSam Li         case BLK_ZS_EMPTY:
7194f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_EMPTY;
7204f736650SSam Li             break;
7214f736650SSam Li         case BLK_ZS_CLOSED:
7224f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_CLOSED;
7234f736650SSam Li             break;
7244f736650SSam Li         case BLK_ZS_FULL:
7254f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_FULL;
7264f736650SSam Li             break;
7274f736650SSam Li         case BLK_ZS_EOPEN:
7284f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_EOPEN;
7294f736650SSam Li             break;
7304f736650SSam Li         case BLK_ZS_IOPEN:
7314f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_IOPEN;
7324f736650SSam Li             break;
7334f736650SSam Li         case BLK_ZS_NOT_WP:
7344f736650SSam Li             desc.z_state = VIRTIO_BLK_ZS_NOT_WP;
7354f736650SSam Li             break;
7364f736650SSam Li         default:
7374f736650SSam Li             g_assert_not_reached();
7384f736650SSam Li         }
7394f736650SSam Li 
7404f736650SSam Li         /* TODO: it takes O(n^2) time complexity. Optimizations required. */
7414f736650SSam Li         n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc));
7424f736650SSam Li         if (n != sizeof(desc)) {
7434f736650SSam Li             virtio_error(vdev, "Driver provided input buffer "
7444f736650SSam Li                                "for descriptors that is too small!");
7454f736650SSam Li             err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
7464f736650SSam Li         }
7474f736650SSam Li     }
7484f736650SSam Li 
7494f736650SSam Li out:
7504f736650SSam Li     virtio_blk_req_complete(req, err_status);
7514f736650SSam Li     virtio_blk_free_request(req);
7524f736650SSam Li     g_free(data->zone_report_data.zones);
7534f736650SSam Li     g_free(data);
7544f736650SSam Li }
7554f736650SSam Li 
virtio_blk_handle_zone_report(VirtIOBlockReq * req,struct iovec * in_iov,unsigned in_num)7564f736650SSam Li static void virtio_blk_handle_zone_report(VirtIOBlockReq *req,
7574f736650SSam Li                                          struct iovec *in_iov,
7584f736650SSam Li                                          unsigned in_num)
7594f736650SSam Li {
7604f736650SSam Li     VirtIOBlock *s = req->dev;
7614f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
7624f736650SSam Li     unsigned int nr_zones;
7634f736650SSam Li     ZoneCmdData *data;
7644f736650SSam Li     int64_t zone_size, offset;
7654f736650SSam Li     uint8_t err_status;
7664f736650SSam Li 
7674f736650SSam Li     if (req->in_len < sizeof(struct virtio_blk_inhdr) +
7684f736650SSam Li             sizeof(struct virtio_blk_zone_report) +
7694f736650SSam Li             sizeof(struct virtio_blk_zone_descriptor)) {
7704f736650SSam Li         virtio_error(vdev, "in buffer too small for zone report");
771*bbdf9023SZheyu Ma         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
772*bbdf9023SZheyu Ma         goto out;
7734f736650SSam Li     }
7744f736650SSam Li 
7754f736650SSam Li     /* start byte offset of the zone report */
7764f736650SSam Li     offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
7774f736650SSam Li     if (!check_zoned_request(s, offset, 0, false, &err_status)) {
7784f736650SSam Li         goto out;
7794f736650SSam Li     }
7804f736650SSam Li     nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) -
7814f736650SSam Li                 sizeof(struct virtio_blk_zone_report)) /
7824f736650SSam Li                sizeof(struct virtio_blk_zone_descriptor);
7834e92acf7SSam Li     trace_virtio_blk_handle_zone_report(vdev, req,
7844e92acf7SSam Li                                         offset >> BDRV_SECTOR_BITS, nr_zones);
7854f736650SSam Li 
7864f736650SSam Li     zone_size = sizeof(BlockZoneDescriptor) * nr_zones;
7874f736650SSam Li     data = g_malloc(sizeof(ZoneCmdData));
7884f736650SSam Li     data->req = req;
7894f736650SSam Li     data->in_iov = in_iov;
7904f736650SSam Li     data->in_num = in_num;
7914f736650SSam Li     data->zone_report_data.nr_zones = nr_zones;
7924f736650SSam Li     data->zone_report_data.zones = g_malloc(zone_size),
7934f736650SSam Li 
7944f736650SSam Li     blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones,
7954f736650SSam Li                         data->zone_report_data.zones,
7964f736650SSam Li                         virtio_blk_zone_report_complete, data);
7974f736650SSam Li     return;
7984f736650SSam Li out:
7994f736650SSam Li     virtio_blk_req_complete(req, err_status);
8004f736650SSam Li     virtio_blk_free_request(req);
8014f736650SSam Li }
8024f736650SSam Li 
virtio_blk_zone_mgmt_complete(void * opaque,int ret)8034f736650SSam Li static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
8044f736650SSam Li {
8054f736650SSam Li     VirtIOBlockReq *req = opaque;
8064f736650SSam Li     VirtIOBlock *s = req->dev;
8074e92acf7SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
8084f736650SSam Li     int8_t err_status = VIRTIO_BLK_S_OK;
8094e92acf7SSam Li     trace_virtio_blk_zone_mgmt_complete(vdev, req,ret);
8104f736650SSam Li 
8114f736650SSam Li     if (ret) {
8124f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
8134f736650SSam Li     }
8144f736650SSam Li 
8154f736650SSam Li     virtio_blk_req_complete(req, err_status);
8164f736650SSam Li     virtio_blk_free_request(req);
8174f736650SSam Li }
8184f736650SSam Li 
virtio_blk_handle_zone_mgmt(VirtIOBlockReq * req,BlockZoneOp op)8194f736650SSam Li static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
8204f736650SSam Li {
8214f736650SSam Li     VirtIOBlock *s = req->dev;
8224f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
8234f736650SSam Li     BlockDriverState *bs = blk_bs(s->blk);
8244f736650SSam Li     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
8254f736650SSam Li     uint64_t len;
8264f736650SSam Li     uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
8274f736650SSam Li     uint8_t err_status = VIRTIO_BLK_S_OK;
8284f736650SSam Li 
8294f736650SSam Li     uint32_t type = virtio_ldl_p(vdev, &req->out.type);
8304f736650SSam Li     if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) {
8314f736650SSam Li         /* Entire drive capacity */
8324f736650SSam Li         offset = 0;
8334f736650SSam Li         len = capacity;
8344e92acf7SSam Li         trace_virtio_blk_handle_zone_reset_all(vdev, req, 0,
8354e92acf7SSam Li                                                bs->total_sectors);
8364f736650SSam Li     } else {
8374f736650SSam Li         if (bs->bl.zone_size > capacity - offset) {
8384f736650SSam Li             /* The zoned device allows the last smaller zone. */
8394f736650SSam Li             len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1);
8404f736650SSam Li         } else {
8414f736650SSam Li             len = bs->bl.zone_size;
8424f736650SSam Li         }
8434e92acf7SSam Li         trace_virtio_blk_handle_zone_mgmt(vdev, req, op,
8444e92acf7SSam Li                                           offset >> BDRV_SECTOR_BITS,
8454e92acf7SSam Li                                           len >> BDRV_SECTOR_BITS);
8464f736650SSam Li     }
8474f736650SSam Li 
8484f736650SSam Li     if (!check_zoned_request(s, offset, len, false, &err_status)) {
8494f736650SSam Li         goto out;
8504f736650SSam Li     }
8514f736650SSam Li 
8524f736650SSam Li     blk_aio_zone_mgmt(s->blk, op, offset, len,
8534f736650SSam Li                       virtio_blk_zone_mgmt_complete, req);
8544f736650SSam Li 
8554f736650SSam Li     return 0;
8564f736650SSam Li out:
8574f736650SSam Li     virtio_blk_req_complete(req, err_status);
8584f736650SSam Li     virtio_blk_free_request(req);
8594f736650SSam Li     return err_status;
8604f736650SSam Li }
8614f736650SSam Li 
virtio_blk_zone_append_complete(void * opaque,int ret)8624f736650SSam Li static void virtio_blk_zone_append_complete(void *opaque, int ret)
8634f736650SSam Li {
8644f736650SSam Li     ZoneCmdData *data = opaque;
8654f736650SSam Li     VirtIOBlockReq *req = data->req;
8664f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
8674f736650SSam Li     int64_t append_sector, n;
8684f736650SSam Li     uint8_t err_status = VIRTIO_BLK_S_OK;
8694f736650SSam Li 
8704f736650SSam Li     if (ret) {
8714f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
8724f736650SSam Li         goto out;
8734f736650SSam Li     }
8744f736650SSam Li 
8754f736650SSam Li     virtio_stq_p(vdev, &append_sector,
8764f736650SSam Li                  data->zone_append_data.offset >> BDRV_SECTOR_BITS);
8774f736650SSam Li     n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector,
8784f736650SSam Li                      sizeof(append_sector));
8794f736650SSam Li     if (n != sizeof(append_sector)) {
8804f736650SSam Li         virtio_error(vdev, "Driver provided input buffer less than size of "
8814f736650SSam Li                            "append_sector");
8824f736650SSam Li         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
8834f736650SSam Li         goto out;
8844f736650SSam Li     }
8854e92acf7SSam Li     trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret);
8864f736650SSam Li 
8874f736650SSam Li out:
8884f736650SSam Li     virtio_blk_req_complete(req, err_status);
8894f736650SSam Li     virtio_blk_free_request(req);
8904f736650SSam Li     g_free(data);
8914f736650SSam Li }
8924f736650SSam Li 
virtio_blk_handle_zone_append(VirtIOBlockReq * req,struct iovec * out_iov,struct iovec * in_iov,uint64_t out_num,unsigned in_num)8934f736650SSam Li static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
8944f736650SSam Li                                          struct iovec *out_iov,
8954f736650SSam Li                                          struct iovec *in_iov,
8964f736650SSam Li                                          uint64_t out_num,
8974f736650SSam Li                                          unsigned in_num) {
8984f736650SSam Li     VirtIOBlock *s = req->dev;
8994f736650SSam Li     VirtIODevice *vdev = VIRTIO_DEVICE(s);
9004f736650SSam Li     uint8_t err_status = VIRTIO_BLK_S_OK;
9014f736650SSam Li 
9024f736650SSam Li     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
9034f736650SSam Li     int64_t len = iov_size(out_iov, out_num);
904b3d9bb9aSStefan Hajnoczi     ZoneCmdData *data;
9054f736650SSam Li 
9064e92acf7SSam Li     trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
9074f736650SSam Li     if (!check_zoned_request(s, offset, len, true, &err_status)) {
9084f736650SSam Li         goto out;
9094f736650SSam Li     }
9104f736650SSam Li 
911b3d9bb9aSStefan Hajnoczi     data = g_malloc(sizeof(ZoneCmdData));
9124f736650SSam Li     data->req = req;
9134f736650SSam Li     data->in_iov = in_iov;
9144f736650SSam Li     data->in_num = in_num;
9154f736650SSam Li     data->zone_append_data.offset = offset;
9164f736650SSam Li     qemu_iovec_init_external(&req->qiov, out_iov, out_num);
91752eb76f4SSam Li 
91852eb76f4SSam Li     block_acct_start(blk_get_stats(s->blk), &req->acct, len,
91952eb76f4SSam Li                      BLOCK_ACCT_ZONE_APPEND);
92052eb76f4SSam Li 
9214f736650SSam Li     blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0,
9224f736650SSam Li                         virtio_blk_zone_append_complete, data);
9234f736650SSam Li     return 0;
9244f736650SSam Li 
9254f736650SSam Li out:
9264f736650SSam Li     virtio_blk_req_complete(req, err_status);
9274f736650SSam Li     virtio_blk_free_request(req);
9284f736650SSam Li     return err_status;
9294f736650SSam Li }
9304f736650SSam Li 
virtio_blk_handle_request(VirtIOBlockReq * req,MultiReqBuffer * mrb)93120ea686aSGreg Kurz static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
9326e790746SPaolo Bonzini {
9336e790746SPaolo Bonzini     uint32_t type;
934f897bf75SStefan Hajnoczi     struct iovec *in_iov = req->elem.in_sg;
9355636da76SDongli Zhang     struct iovec *out_iov = req->elem.out_sg;
936f897bf75SStefan Hajnoczi     unsigned in_num = req->elem.in_num;
937f897bf75SStefan Hajnoczi     unsigned out_num = req->elem.out_num;
93820ea686aSGreg Kurz     VirtIOBlock *s = req->dev;
93920ea686aSGreg Kurz     VirtIODevice *vdev = VIRTIO_DEVICE(s);
9406e790746SPaolo Bonzini 
941f897bf75SStefan Hajnoczi     if (req->elem.out_num < 1 || req->elem.in_num < 1) {
94220ea686aSGreg Kurz         virtio_error(vdev, "virtio-blk missing headers");
94320ea686aSGreg Kurz         return -1;
9446e790746SPaolo Bonzini     }
9456e790746SPaolo Bonzini 
9465636da76SDongli Zhang     if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
947827805a2SFam Zheng                             sizeof(req->out)) != sizeof(req->out))) {
94820ea686aSGreg Kurz         virtio_error(vdev, "virtio-blk request outhdr too short");
94920ea686aSGreg Kurz         return -1;
950827805a2SFam Zheng     }
951ee17e848SFam Zheng 
9527bd04a04SStefan Hajnoczi     iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out),
9537bd04a04SStefan Hajnoczi                                &req->outhdr_undo);
954ee17e848SFam Zheng 
95512048545SGonglei     if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
95620ea686aSGreg Kurz         virtio_error(vdev, "virtio-blk request inhdr too short");
9577bd04a04SStefan Hajnoczi         iov_discard_undo(&req->outhdr_undo);
95820ea686aSGreg Kurz         return -1;
959ee17e848SFam Zheng     }
960ee17e848SFam Zheng 
9612a6cdd6dSPaolo Bonzini     /* We always touch the last byte, so just see how big in_iov is.  */
9622a6cdd6dSPaolo Bonzini     req->in_len = iov_size(in_iov, in_num);
963ee17e848SFam Zheng     req->in = (void *)in_iov[in_num - 1].iov_base
964ee17e848SFam Zheng               + in_iov[in_num - 1].iov_len
965ee17e848SFam Zheng               - sizeof(struct virtio_blk_inhdr);
9667bd04a04SStefan Hajnoczi     iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr),
9677bd04a04SStefan Hajnoczi                               &req->inhdr_undo);
9686e790746SPaolo Bonzini 
9699a6719d5SStefano Garzarella     type = virtio_ldl_p(vdev, &req->out.type);
9706e790746SPaolo Bonzini 
97195f7142aSPeter Lieven     /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
972631b22eaSStefan Weil      * is an optional flag. Although a guest should not send this flag if
97395f7142aSPeter Lieven      * not negotiated we ignored it in the past. So keep ignoring it. */
97495f7142aSPeter Lieven     switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
97595f7142aSPeter Lieven     case VIRTIO_BLK_T_IN:
97695f7142aSPeter Lieven     {
97795f7142aSPeter Lieven         bool is_write = type & VIRTIO_BLK_T_OUT;
9789a6719d5SStefano Garzarella         req->sector_num = virtio_ldq_p(vdev, &req->out.sector);
97995f7142aSPeter Lieven 
98095f7142aSPeter Lieven         if (is_write) {
9815636da76SDongli Zhang             qemu_iovec_init_external(&req->qiov, out_iov, out_num);
982a576ceacSStefan Hajnoczi             trace_virtio_blk_handle_write(vdev, req, req->sector_num,
98395f7142aSPeter Lieven                                           req->qiov.size / BDRV_SECTOR_SIZE);
98495f7142aSPeter Lieven         } else {
98595f7142aSPeter Lieven             qemu_iovec_init_external(&req->qiov, in_iov, in_num);
986a576ceacSStefan Hajnoczi             trace_virtio_blk_handle_read(vdev, req, req->sector_num,
98795f7142aSPeter Lieven                                          req->qiov.size / BDRV_SECTOR_SIZE);
98895f7142aSPeter Lieven         }
98995f7142aSPeter Lieven 
9909a6719d5SStefano Garzarella         if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) {
99195f7142aSPeter Lieven             virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
9929a6719d5SStefano Garzarella             block_acct_invalid(blk_get_stats(s->blk),
99301762e03SAlberto Garcia                                is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
99495f7142aSPeter Lieven             virtio_blk_free_request(req);
99520ea686aSGreg Kurz             return 0;
99695f7142aSPeter Lieven         }
99795f7142aSPeter Lieven 
9989a6719d5SStefano Garzarella         block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size,
99995f7142aSPeter Lieven                          is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
100095f7142aSPeter Lieven 
100195f7142aSPeter Lieven         /* merge would exceed maximum number of requests or IO direction
100295f7142aSPeter Lieven          * changes */
100395f7142aSPeter Lieven         if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
1004c99495acSPeter Lieven                                   is_write != mrb->is_write ||
10059a6719d5SStefano Garzarella                                   !s->conf.request_merging)) {
1006baf42268SStefan Hajnoczi             virtio_blk_submit_multireq(s, mrb);
100795f7142aSPeter Lieven         }
100895f7142aSPeter Lieven 
100995f7142aSPeter Lieven         assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
101095f7142aSPeter Lieven         mrb->reqs[mrb->num_reqs++] = req;
101195f7142aSPeter Lieven         mrb->is_write = is_write;
101295f7142aSPeter Lieven         break;
101395f7142aSPeter Lieven     }
101495f7142aSPeter Lieven     case VIRTIO_BLK_T_FLUSH:
10156e790746SPaolo Bonzini         virtio_blk_handle_flush(req, mrb);
101695f7142aSPeter Lieven         break;
10174f736650SSam Li     case VIRTIO_BLK_T_ZONE_REPORT:
10184f736650SSam Li         virtio_blk_handle_zone_report(req, in_iov, in_num);
10194f736650SSam Li         break;
10204f736650SSam Li     case VIRTIO_BLK_T_ZONE_OPEN:
10214f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN);
10224f736650SSam Li         break;
10234f736650SSam Li     case VIRTIO_BLK_T_ZONE_CLOSE:
10244f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE);
10254f736650SSam Li         break;
10264f736650SSam Li     case VIRTIO_BLK_T_ZONE_FINISH:
10274f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH);
10284f736650SSam Li         break;
10294f736650SSam Li     case VIRTIO_BLK_T_ZONE_RESET:
10304f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
10314f736650SSam Li         break;
10324f736650SSam Li     case VIRTIO_BLK_T_ZONE_RESET_ALL:
10334f736650SSam Li         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
10344f736650SSam Li         break;
103595f7142aSPeter Lieven     case VIRTIO_BLK_T_SCSI_CMD:
10366e790746SPaolo Bonzini         virtio_blk_handle_scsi(req);
103795f7142aSPeter Lieven         break;
103895f7142aSPeter Lieven     case VIRTIO_BLK_T_GET_ID:
103995f7142aSPeter Lieven     {
10406e790746SPaolo Bonzini         /*
10416e790746SPaolo Bonzini          * NB: per existing s/n string convention the string is
10426e790746SPaolo Bonzini          * terminated by '\0' only when shorter than buffer.
10436e790746SPaolo Bonzini          */
10442a30307fSMarkus Armbruster         const char *serial = s->conf.serial ? s->conf.serial : "";
1045a83ceea8SMarc Marí         size_t size = MIN(strlen(serial) + 1,
1046a83ceea8SMarc Marí                           MIN(iov_size(in_iov, in_num),
1047a83ceea8SMarc Marí                               VIRTIO_BLK_ID_BYTES));
1048a83ceea8SMarc Marí         iov_from_buf(in_iov, in_num, 0, serial, size);
10496e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
1050671ec3f0SFam Zheng         virtio_blk_free_request(req);
105195f7142aSPeter Lieven         break;
105295f7142aSPeter Lieven     }
10534f736650SSam Li     case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT:
10544f736650SSam Li         /*
10554f736650SSam Li          * Passing out_iov/out_num and in_iov/in_num is not safe
10564f736650SSam Li          * to access req->elem.out_sg directly because it may be
10574f736650SSam Li          * modified by virtio_blk_handle_request().
10584f736650SSam Li          */
10594f736650SSam Li         virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num);
10604f736650SSam Li         break;
106137b06f8dSStefano Garzarella     /*
106237b06f8dSStefano Garzarella      * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
106337b06f8dSStefano Garzarella      * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
106437b06f8dSStefano Garzarella      * so we must mask it for these requests, then we will check if it is set.
106537b06f8dSStefano Garzarella      */
106637b06f8dSStefano Garzarella     case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
106737b06f8dSStefano Garzarella     case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
106837b06f8dSStefano Garzarella     {
106937b06f8dSStefano Garzarella         struct virtio_blk_discard_write_zeroes dwz_hdr;
107037b06f8dSStefano Garzarella         size_t out_len = iov_size(out_iov, out_num);
107137b06f8dSStefano Garzarella         bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
107237b06f8dSStefano Garzarella                                VIRTIO_BLK_T_WRITE_ZEROES;
107337b06f8dSStefano Garzarella         uint8_t err_status;
107437b06f8dSStefano Garzarella 
107537b06f8dSStefano Garzarella         /*
107637b06f8dSStefano Garzarella          * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
107737b06f8dSStefano Garzarella          * more than one segment.
107837b06f8dSStefano Garzarella          */
107937b06f8dSStefano Garzarella         if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
108037b06f8dSStefano Garzarella                      out_len > sizeof(dwz_hdr))) {
108137b06f8dSStefano Garzarella             virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
108237b06f8dSStefano Garzarella             virtio_blk_free_request(req);
108337b06f8dSStefano Garzarella             return 0;
108437b06f8dSStefano Garzarella         }
108537b06f8dSStefano Garzarella 
108637b06f8dSStefano Garzarella         if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
108737b06f8dSStefano Garzarella                                 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
10887bd04a04SStefan Hajnoczi             iov_discard_undo(&req->inhdr_undo);
10897bd04a04SStefan Hajnoczi             iov_discard_undo(&req->outhdr_undo);
109037b06f8dSStefano Garzarella             virtio_error(vdev, "virtio-blk discard/write_zeroes header"
109137b06f8dSStefano Garzarella                          " too short");
109237b06f8dSStefano Garzarella             return -1;
109337b06f8dSStefano Garzarella         }
109437b06f8dSStefano Garzarella 
109537b06f8dSStefano Garzarella         err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
109637b06f8dSStefano Garzarella                                                             is_write_zeroes);
109737b06f8dSStefano Garzarella         if (err_status != VIRTIO_BLK_S_OK) {
109837b06f8dSStefano Garzarella             virtio_blk_req_complete(req, err_status);
109937b06f8dSStefano Garzarella             virtio_blk_free_request(req);
110037b06f8dSStefano Garzarella         }
110137b06f8dSStefano Garzarella 
110237b06f8dSStefano Garzarella         break;
110337b06f8dSStefano Garzarella     }
110495f7142aSPeter Lieven     default:
11056e790746SPaolo Bonzini         virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
1106671ec3f0SFam Zheng         virtio_blk_free_request(req);
11076e790746SPaolo Bonzini     }
110820ea686aSGreg Kurz     return 0;
11096e790746SPaolo Bonzini }
11106e790746SPaolo Bonzini 
virtio_blk_handle_vq(VirtIOBlock * s,VirtQueue * vq)1111186b9691SStefan Hajnoczi void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
11126e790746SPaolo Bonzini {
11136e790746SPaolo Bonzini     VirtIOBlockReq *req;
111495f7142aSPeter Lieven     MultiReqBuffer mrb = {};
1115d0435bc5SStefan Hajnoczi     bool suppress_notifications = virtio_queue_get_notification(vq);
11166e790746SPaolo Bonzini 
1117ccee48aaSStefan Hajnoczi     defer_call_begin();
1118fc73548eSStefan Hajnoczi 
11199ef9d402SStefan Hajnoczi     do {
1120d0435bc5SStefan Hajnoczi         if (suppress_notifications) {
11219ef9d402SStefan Hajnoczi             virtio_queue_set_notification(vq, 0);
1122d0435bc5SStefan Hajnoczi         }
11239ef9d402SStefan Hajnoczi 
1124edaffd9fSStefan Hajnoczi         while ((req = virtio_blk_get_request(s, vq))) {
112520ea686aSGreg Kurz             if (virtio_blk_handle_request(req, &mrb)) {
112620ea686aSGreg Kurz                 virtqueue_detach_element(req->vq, &req->elem, 0);
112720ea686aSGreg Kurz                 virtio_blk_free_request(req);
112820ea686aSGreg Kurz                 break;
112920ea686aSGreg Kurz             }
11306e790746SPaolo Bonzini         }
11316e790746SPaolo Bonzini 
1132d0435bc5SStefan Hajnoczi         if (suppress_notifications) {
11339ef9d402SStefan Hajnoczi             virtio_queue_set_notification(vq, 1);
1134d0435bc5SStefan Hajnoczi         }
11359ef9d402SStefan Hajnoczi     } while (!virtio_queue_empty(vq));
11369ef9d402SStefan Hajnoczi 
113795f7142aSPeter Lieven     if (mrb.num_reqs) {
1138baf42268SStefan Hajnoczi         virtio_blk_submit_multireq(s, &mrb);
113995f7142aSPeter Lieven     }
1140fc73548eSStefan Hajnoczi 
1141ccee48aaSStefan Hajnoczi     defer_call_end();
11426e790746SPaolo Bonzini }
11436e790746SPaolo Bonzini 
virtio_blk_handle_output(VirtIODevice * vdev,VirtQueue * vq)11448a2fad57SMichael S. Tsirkin static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
11458a2fad57SMichael S. Tsirkin {
11468a2fad57SMichael S. Tsirkin     VirtIOBlock *s = (VirtIOBlock *)vdev;
11478a2fad57SMichael S. Tsirkin 
11483cdaf3ddSStefan Hajnoczi     if (!s->ioeventfd_disabled && !s->ioeventfd_started) {
11498a2fad57SMichael S. Tsirkin         /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
11503cdaf3ddSStefan Hajnoczi          * ioeventfd here instead of waiting for .set_status().
11518a2fad57SMichael S. Tsirkin          */
11529ffe337cSPaolo Bonzini         virtio_device_start_ioeventfd(vdev);
11533cdaf3ddSStefan Hajnoczi         if (!s->ioeventfd_disabled) {
11548a2fad57SMichael S. Tsirkin             return;
11558a2fad57SMichael S. Tsirkin         }
11568a2fad57SMichael S. Tsirkin     }
1157b6948ab0SStefan Hajnoczi 
1158186b9691SStefan Hajnoczi     virtio_blk_handle_vq(s, vq);
11598a2fad57SMichael S. Tsirkin }
11608a2fad57SMichael S. Tsirkin 
virtio_blk_dma_restart_bh(void * opaque)1161a937f8e8SStefan Hajnoczi static void virtio_blk_dma_restart_bh(void *opaque)
11626e790746SPaolo Bonzini {
116371ee0cddSStefan Hajnoczi     VirtIOBlockReq *req = opaque;
116471ee0cddSStefan Hajnoczi     VirtIOBlock *s = req->dev; /* we're called with at least one request */
1165a937f8e8SStefan Hajnoczi 
116695f7142aSPeter Lieven     MultiReqBuffer mrb = {};
11676e790746SPaolo Bonzini 
11686e790746SPaolo Bonzini     while (req) {
11691bdb176aSzhanghailiang         VirtIOBlockReq *next = req->next;
117020ea686aSGreg Kurz         if (virtio_blk_handle_request(req, &mrb)) {
117120ea686aSGreg Kurz             /* Device is now broken and won't do any processing until it gets
117220ea686aSGreg Kurz              * reset. Already queued requests will be lost: let's purge them.
117320ea686aSGreg Kurz              */
117420ea686aSGreg Kurz             while (req) {
117520ea686aSGreg Kurz                 next = req->next;
117620ea686aSGreg Kurz                 virtqueue_detach_element(req->vq, &req->elem, 0);
117720ea686aSGreg Kurz                 virtio_blk_free_request(req);
117820ea686aSGreg Kurz                 req = next;
117920ea686aSGreg Kurz             }
118020ea686aSGreg Kurz             break;
118120ea686aSGreg Kurz         }
11821bdb176aSzhanghailiang         req = next;
11836e790746SPaolo Bonzini     }
11846e790746SPaolo Bonzini 
118595f7142aSPeter Lieven     if (mrb.num_reqs) {
1186baf42268SStefan Hajnoczi         virtio_blk_submit_multireq(s, &mrb);
118795f7142aSPeter Lieven     }
1188a937f8e8SStefan Hajnoczi 
1189a937f8e8SStefan Hajnoczi     /* Paired with inc in virtio_blk_dma_restart_cb() */
1190680f2002SKevin Wolf     blk_dec_in_flight(s->conf.conf.blk);
11916e790746SPaolo Bonzini }
11926e790746SPaolo Bonzini 
virtio_blk_dma_restart_cb(void * opaque,bool running,RunState state)1193538f0497SPhilippe Mathieu-Daudé static void virtio_blk_dma_restart_cb(void *opaque, bool running,
11946e790746SPaolo Bonzini                                       RunState state)
11956e790746SPaolo Bonzini {
11966e790746SPaolo Bonzini     VirtIOBlock *s = opaque;
119771ee0cddSStefan Hajnoczi     uint16_t num_queues = s->conf.num_queues;
1198b3d9bb9aSStefan Hajnoczi     g_autofree VirtIOBlockReq **vq_rq = NULL;
1199b3d9bb9aSStefan Hajnoczi     VirtIOBlockReq *rq;
12006e790746SPaolo Bonzini 
12016e790746SPaolo Bonzini     if (!running) {
12026e790746SPaolo Bonzini         return;
12036e790746SPaolo Bonzini     }
12046e790746SPaolo Bonzini 
120571ee0cddSStefan Hajnoczi     /* Split the device-wide s->rq request list into per-vq request lists */
1206b3d9bb9aSStefan Hajnoczi     vq_rq = g_new0(VirtIOBlockReq *, num_queues);
120771ee0cddSStefan Hajnoczi 
120871ee0cddSStefan Hajnoczi     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
120971ee0cddSStefan Hajnoczi         rq = s->rq;
121071ee0cddSStefan Hajnoczi         s->rq = NULL;
121171ee0cddSStefan Hajnoczi     }
121271ee0cddSStefan Hajnoczi 
121371ee0cddSStefan Hajnoczi     while (rq) {
121471ee0cddSStefan Hajnoczi         VirtIOBlockReq *next = rq->next;
121571ee0cddSStefan Hajnoczi         uint16_t idx = virtio_get_queue_index(rq->vq);
121671ee0cddSStefan Hajnoczi 
1217f2eea93cSStefan Hajnoczi         /* Only num_queues vqs were created so vq_rq[idx] is within bounds */
1218f2eea93cSStefan Hajnoczi         assert(idx < num_queues);
121971ee0cddSStefan Hajnoczi         rq->next = vq_rq[idx];
122071ee0cddSStefan Hajnoczi         vq_rq[idx] = rq;
122171ee0cddSStefan Hajnoczi         rq = next;
122271ee0cddSStefan Hajnoczi     }
122371ee0cddSStefan Hajnoczi 
122471ee0cddSStefan Hajnoczi     /* Schedule a BH to submit the requests in each vq's AioContext */
122571ee0cddSStefan Hajnoczi     for (uint16_t i = 0; i < num_queues; i++) {
122671ee0cddSStefan Hajnoczi         if (!vq_rq[i]) {
122771ee0cddSStefan Hajnoczi             continue;
122871ee0cddSStefan Hajnoczi         }
122971ee0cddSStefan Hajnoczi 
1230a937f8e8SStefan Hajnoczi         /* Paired with dec in virtio_blk_dma_restart_bh() */
1231680f2002SKevin Wolf         blk_inc_in_flight(s->conf.conf.blk);
1232a937f8e8SStefan Hajnoczi 
123371ee0cddSStefan Hajnoczi         aio_bh_schedule_oneshot(s->vq_aio_context[i],
123471ee0cddSStefan Hajnoczi                                 virtio_blk_dma_restart_bh,
123571ee0cddSStefan Hajnoczi                                 vq_rq[i]);
123671ee0cddSStefan Hajnoczi     }
12376e790746SPaolo Bonzini }
12386e790746SPaolo Bonzini 
virtio_blk_reset(VirtIODevice * vdev)12396e790746SPaolo Bonzini static void virtio_blk_reset(VirtIODevice *vdev)
12406e790746SPaolo Bonzini {
12416e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
124226307f6aSFam Zheng     VirtIOBlockReq *req;
12436e790746SPaolo Bonzini 
12449c67f33fSStefan Hajnoczi     /* Dataplane has stopped... */
12453cdaf3ddSStefan Hajnoczi     assert(!s->ioeventfd_started);
12469c67f33fSStefan Hajnoczi 
12479c67f33fSStefan Hajnoczi     /* ...but requests may still be in flight. */
12486e40b3bfSAlexander Yarygin     blk_drain(s->blk);
12496e40b3bfSAlexander Yarygin 
125026307f6aSFam Zheng     /* We drop queued requests after blk_drain() because blk_drain() itself can
125126307f6aSFam Zheng      * produce them. */
12529c67f33fSStefan Hajnoczi     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
125326307f6aSFam Zheng         while (s->rq) {
125426307f6aSFam Zheng             req = s->rq;
125526307f6aSFam Zheng             s->rq = req->next;
12569c67f33fSStefan Hajnoczi 
12579c67f33fSStefan Hajnoczi             /* No other threads can access req->vq here */
125897b93c8aSStefan Hajnoczi             virtqueue_detach_element(req->vq, &req->elem, 0);
12599c67f33fSStefan Hajnoczi 
126026307f6aSFam Zheng             virtio_blk_free_request(req);
126126307f6aSFam Zheng         }
12629c67f33fSStefan Hajnoczi     }
126326307f6aSFam Zheng 
12644be74634SMarkus Armbruster     blk_set_enable_write_cache(s->blk, s->original_wce);
12656e790746SPaolo Bonzini }
12666e790746SPaolo Bonzini 
12676e790746SPaolo Bonzini /* coalesce internal state, copy to pci i/o region 0
12686e790746SPaolo Bonzini  */
virtio_blk_update_config(VirtIODevice * vdev,uint8_t * config)12696e790746SPaolo Bonzini static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
12706e790746SPaolo Bonzini {
12716e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
12722a30307fSMarkus Armbruster     BlockConf *conf = &s->conf.conf;
12734f736650SSam Li     BlockDriverState *bs = blk_bs(s->blk);
12746e790746SPaolo Bonzini     struct virtio_blk_config blkcfg;
12756e790746SPaolo Bonzini     uint64_t capacity;
127617d0bc01SStefan Hajnoczi     int64_t length;
1277f7516731SMarkus Armbruster     int blk_size = conf->logical_block_size;
12786e790746SPaolo Bonzini 
12794be74634SMarkus Armbruster     blk_get_geometry(s->blk, &capacity);
12806e790746SPaolo Bonzini     memset(&blkcfg, 0, sizeof(blkcfg));
1281783d1897SRusty Russell     virtio_stq_p(vdev, &blkcfg.capacity, capacity);
12821bf8a989SDenis Plotnikov     virtio_stl_p(vdev, &blkcfg.seg_max,
12831bf8a989SDenis Plotnikov                  s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2);
1284907eb3e5SMichael S. Tsirkin     virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls);
1285783d1897SRusty Russell     virtio_stl_p(vdev, &blkcfg.blk_size, blk_size);
1286f7516731SMarkus Armbruster     virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size);
12876abee260SRoman Kagan     virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size);
1288907eb3e5SMichael S. Tsirkin     blkcfg.geometry.heads = conf->heads;
12896e790746SPaolo Bonzini     /*
12906e790746SPaolo Bonzini      * We must ensure that the block device capacity is a multiple of
1291e03ba136SPeter Maydell      * the logical block size. If that is not the case, let's use
12926e790746SPaolo Bonzini      * sector_mask to adopt the geometry to have a correct picture.
12936e790746SPaolo Bonzini      * For those devices where the capacity is ok for the given geometry
1294e03ba136SPeter Maydell      * we don't touch the sector value of the geometry, since some devices
12956e790746SPaolo Bonzini      * (like s390 dasd) need a specific value. Here the capacity is already
12966e790746SPaolo Bonzini      * cyls*heads*secs*blk_size and the sector value is not block size
12976e790746SPaolo Bonzini      * divided by 512 - instead it is the amount of blk_size blocks
12986e790746SPaolo Bonzini      * per track (cylinder).
12996e790746SPaolo Bonzini      */
130017d0bc01SStefan Hajnoczi     length = blk_getlength(s->blk);
130117d0bc01SStefan Hajnoczi     if (length > 0 && length / conf->heads / conf->secs % blk_size) {
1302907eb3e5SMichael S. Tsirkin         blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
13036e790746SPaolo Bonzini     } else {
1304907eb3e5SMichael S. Tsirkin         blkcfg.geometry.sectors = conf->secs;
13056e790746SPaolo Bonzini     }
13066e790746SPaolo Bonzini     blkcfg.size_max = 0;
1307f7516731SMarkus Armbruster     blkcfg.physical_block_exp = get_physical_block_exp(conf);
13086e790746SPaolo Bonzini     blkcfg.alignment_offset = 0;
13094be74634SMarkus Armbruster     blkcfg.wce = blk_enable_write_cache(s->blk);
13102f270590SStefan Hajnoczi     virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
131137b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
1312fb0b154cSAkihiko Odaki         uint32_t discard_granularity = conf->discard_granularity;
1313fb0b154cSAkihiko Odaki         if (discard_granularity == -1 || !s->conf.report_discard_granularity) {
1314fb0b154cSAkihiko Odaki             discard_granularity = blk_size;
1315fb0b154cSAkihiko Odaki         }
131637b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
131737b06f8dSStefano Garzarella                      s->conf.max_discard_sectors);
131837b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
1319fb0b154cSAkihiko Odaki                      discard_granularity >> BDRV_SECTOR_BITS);
132037b06f8dSStefano Garzarella         /*
132137b06f8dSStefano Garzarella          * We support only one segment per request since multiple segments
132237b06f8dSStefano Garzarella          * are not widely used and there are no userspace APIs that allow
132337b06f8dSStefano Garzarella          * applications to submit multiple segments in a single call.
132437b06f8dSStefano Garzarella          */
132537b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
132637b06f8dSStefano Garzarella     }
132737b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
132837b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
132937b06f8dSStefano Garzarella                      s->conf.max_write_zeroes_sectors);
133037b06f8dSStefano Garzarella         blkcfg.write_zeroes_may_unmap = 1;
133137b06f8dSStefano Garzarella         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
133237b06f8dSStefano Garzarella     }
13334f736650SSam Li     if (bs->bl.zoned != BLK_Z_NONE) {
13344f736650SSam Li         switch (bs->bl.zoned) {
13354f736650SSam Li         case BLK_Z_HM:
13364f736650SSam Li             blkcfg.zoned.model = VIRTIO_BLK_Z_HM;
13374f736650SSam Li             break;
13384f736650SSam Li         case BLK_Z_HA:
13394f736650SSam Li             blkcfg.zoned.model = VIRTIO_BLK_Z_HA;
13404f736650SSam Li             break;
13414f736650SSam Li         default:
13424f736650SSam Li             g_assert_not_reached();
13434f736650SSam Li         }
13444f736650SSam Li 
13454f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors,
13464f736650SSam Li                      bs->bl.zone_size / 512);
13474f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones,
13484f736650SSam Li                      bs->bl.max_active_zones);
13494f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones,
13504f736650SSam Li                      bs->bl.max_open_zones);
13514f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size);
13524f736650SSam Li         virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors,
13534f736650SSam Li                      bs->bl.max_append_sectors);
13544f736650SSam Li     } else {
13554f736650SSam Li         blkcfg.zoned.model = VIRTIO_BLK_Z_NONE;
13564f736650SSam Li     }
135720764be0SStefano Garzarella     memcpy(config, &blkcfg, s->config_size);
13586e790746SPaolo Bonzini }
13596e790746SPaolo Bonzini 
virtio_blk_set_config(VirtIODevice * vdev,const uint8_t * config)13606e790746SPaolo Bonzini static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
13616e790746SPaolo Bonzini {
13626e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
13636e790746SPaolo Bonzini     struct virtio_blk_config blkcfg;
13646e790746SPaolo Bonzini 
136520764be0SStefano Garzarella     memcpy(&blkcfg, config, s->config_size);
13666d7e73d6SFam Zheng 
13674be74634SMarkus Armbruster     blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
13686e790746SPaolo Bonzini }
13696e790746SPaolo Bonzini 
virtio_blk_get_features(VirtIODevice * vdev,uint64_t features,Error ** errp)13709d5b731dSJason Wang static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
13719d5b731dSJason Wang                                         Error **errp)
13726e790746SPaolo Bonzini {
13736e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
13746e790746SPaolo Bonzini 
1375bbe8bd4dSStefano Garzarella     /* Firstly sync all virtio-blk possible supported features */
1376bbe8bd4dSStefano Garzarella     features |= s->host_features;
1377bbe8bd4dSStefano Garzarella 
13780cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
13790cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
13800cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
13810cd09c3aSCornelia Huck     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
138295129d6fSCornelia Huck     if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
1383bbe8bd4dSStefano Garzarella         if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) {
1384efb8206cSJason Wang             error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
1385efb8206cSJason Wang             return 0;
1386efb8206cSJason Wang         }
1387efb8206cSJason Wang     } else {
1388c9b11f97SJason Wang         virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT);
1389efb8206cSJason Wang         virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
1390efb8206cSJason Wang     }
13916e790746SPaolo Bonzini 
13925f258577SEvgeny Yakovlev     if (blk_enable_write_cache(s->blk) ||
13935f258577SEvgeny Yakovlev         (s->conf.x_enable_wce_if_config_wce &&
13945f258577SEvgeny Yakovlev          virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) {
13950cd09c3aSCornelia Huck         virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
13964be74634SMarkus Armbruster     }
139786b1cf32SKevin Wolf     if (!blk_is_writable(s->blk)) {
13980cd09c3aSCornelia Huck         virtio_add_feature(&features, VIRTIO_BLK_F_RO);
13994be74634SMarkus Armbruster     }
14002f270590SStefan Hajnoczi     if (s->conf.num_queues > 1) {
14012f270590SStefan Hajnoczi         virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
14022f270590SStefan Hajnoczi     }
14036e790746SPaolo Bonzini 
14046e790746SPaolo Bonzini     return features;
14056e790746SPaolo Bonzini }
14066e790746SPaolo Bonzini 
virtio_blk_set_status(VirtIODevice * vdev,uint8_t status)14076e790746SPaolo Bonzini static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
14086e790746SPaolo Bonzini {
14096e790746SPaolo Bonzini     VirtIOBlock *s = VIRTIO_BLK(vdev);
14106e790746SPaolo Bonzini 
14119ffe337cSPaolo Bonzini     if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
14123cdaf3ddSStefan Hajnoczi         assert(!s->ioeventfd_started);
14136e790746SPaolo Bonzini     }
14146e790746SPaolo Bonzini 
14156e790746SPaolo Bonzini     if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
14166e790746SPaolo Bonzini         return;
14176e790746SPaolo Bonzini     }
14186e790746SPaolo Bonzini 
1419ef5bc962SPaolo Bonzini     /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1420ef5bc962SPaolo Bonzini      * cache flushes.  Thus, the "auto writethrough" behavior is never
1421ef5bc962SPaolo Bonzini      * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1422ef5bc962SPaolo Bonzini      * Leaving it enabled would break the following sequence:
1423ef5bc962SPaolo Bonzini      *
1424ef5bc962SPaolo Bonzini      *     Guest started with "-drive cache=writethrough"
1425ef5bc962SPaolo Bonzini      *     Guest sets status to 0
1426ef5bc962SPaolo Bonzini      *     Guest sets DRIVER bit in status field
1427ef5bc962SPaolo Bonzini      *     Guest reads host features (WCE=0, CONFIG_WCE=1)
1428ef5bc962SPaolo Bonzini      *     Guest writes guest features (WCE=0, CONFIG_WCE=1)
1429ef5bc962SPaolo Bonzini      *     Guest writes 1 to the WCE configuration field (writeback mode)
1430ef5bc962SPaolo Bonzini      *     Guest sets DRIVER_OK bit in status field
1431ef5bc962SPaolo Bonzini      *
14324be74634SMarkus Armbruster      * s->blk would erroneously be placed in writethrough mode.
1433ef5bc962SPaolo Bonzini      */
143495129d6fSCornelia Huck     if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
14354be74634SMarkus Armbruster         blk_set_enable_write_cache(s->blk,
143695129d6fSCornelia Huck                                    virtio_vdev_has_feature(vdev,
143795129d6fSCornelia Huck                                                            VIRTIO_BLK_F_WCE));
14386e790746SPaolo Bonzini     }
1439ef5bc962SPaolo Bonzini }
14406e790746SPaolo Bonzini 
virtio_blk_save_device(VirtIODevice * vdev,QEMUFile * f)1441b2b295a7SGreg Kurz static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
1442b2b295a7SGreg Kurz {
1443b2b295a7SGreg Kurz     VirtIOBlock *s = VIRTIO_BLK(vdev);
14449c67f33fSStefan Hajnoczi 
14459c67f33fSStefan Hajnoczi     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1446b2b295a7SGreg Kurz         VirtIOBlockReq *req = s->rq;
14476e790746SPaolo Bonzini 
14486e790746SPaolo Bonzini         while (req) {
14496e790746SPaolo Bonzini             qemu_put_sbyte(f, 1);
145030d8bf6dSStefan Hajnoczi 
145130d8bf6dSStefan Hajnoczi             if (s->conf.num_queues > 1) {
145230d8bf6dSStefan Hajnoczi                 qemu_put_be32(f, virtio_get_queue_index(req->vq));
145330d8bf6dSStefan Hajnoczi             }
145430d8bf6dSStefan Hajnoczi 
145586044b24SJason Wang             qemu_put_virtqueue_element(vdev, f, &req->elem);
14566e790746SPaolo Bonzini             req = req->next;
14576e790746SPaolo Bonzini         }
14589c67f33fSStefan Hajnoczi     }
14599c67f33fSStefan Hajnoczi 
14606e790746SPaolo Bonzini     qemu_put_sbyte(f, 0);
14616e790746SPaolo Bonzini }
14626e790746SPaolo Bonzini 
virtio_blk_load_device(VirtIODevice * vdev,QEMUFile * f,int version_id)1463b2b295a7SGreg Kurz static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
1464b2b295a7SGreg Kurz                                   int version_id)
1465b2b295a7SGreg Kurz {
1466b2b295a7SGreg Kurz     VirtIOBlock *s = VIRTIO_BLK(vdev);
1467b2b295a7SGreg Kurz 
14686e790746SPaolo Bonzini     while (qemu_get_sbyte(f)) {
146930d8bf6dSStefan Hajnoczi         unsigned nvqs = s->conf.num_queues;
147030d8bf6dSStefan Hajnoczi         unsigned vq_idx = 0;
1471ab281c17SPaolo Bonzini         VirtIOBlockReq *req;
147230d8bf6dSStefan Hajnoczi 
147330d8bf6dSStefan Hajnoczi         if (nvqs > 1) {
147430d8bf6dSStefan Hajnoczi             vq_idx = qemu_get_be32(f);
147530d8bf6dSStefan Hajnoczi 
147630d8bf6dSStefan Hajnoczi             if (vq_idx >= nvqs) {
147730d8bf6dSStefan Hajnoczi                 error_report("Invalid virtqueue index in request list: %#x",
147830d8bf6dSStefan Hajnoczi                              vq_idx);
147930d8bf6dSStefan Hajnoczi                 return -EINVAL;
148030d8bf6dSStefan Hajnoczi             }
148130d8bf6dSStefan Hajnoczi         }
148230d8bf6dSStefan Hajnoczi 
14838607f5c3SJason Wang         req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
148430d8bf6dSStefan Hajnoczi         virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
14859c67f33fSStefan Hajnoczi 
14869c67f33fSStefan Hajnoczi         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
14876e790746SPaolo Bonzini             req->next = s->rq;
14886e790746SPaolo Bonzini             s->rq = req;
14896e790746SPaolo Bonzini         }
14909c67f33fSStefan Hajnoczi     }
14916e790746SPaolo Bonzini 
14926e790746SPaolo Bonzini     return 0;
14936e790746SPaolo Bonzini }
14946e790746SPaolo Bonzini 
virtio_resize_cb(void * opaque)14959b92fbcfSSergio Lopez static void virtio_resize_cb(void *opaque)
14969b92fbcfSSergio Lopez {
14979b92fbcfSSergio Lopez     VirtIODevice *vdev = opaque;
14989b92fbcfSSergio Lopez 
14999b92fbcfSSergio Lopez     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
15009b92fbcfSSergio Lopez     virtio_notify_config(vdev);
15019b92fbcfSSergio Lopez }
15029b92fbcfSSergio Lopez 
virtio_blk_resize(void * opaque)15036e790746SPaolo Bonzini static void virtio_blk_resize(void *opaque)
15046e790746SPaolo Bonzini {
15056e790746SPaolo Bonzini     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
15066e790746SPaolo Bonzini 
15079b92fbcfSSergio Lopez     /*
15080b2675c4SStefan Hajnoczi      * virtio_notify_config() needs to acquire the BQL,
15099b92fbcfSSergio Lopez      * so it can't be called from an iothread. Instead, schedule
15109b92fbcfSSergio Lopez      * it to be run in the main context BH.
15119b92fbcfSSergio Lopez      */
15129b92fbcfSSergio Lopez     aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
15136e790746SPaolo Bonzini }
15146e790746SPaolo Bonzini 
virtio_blk_ioeventfd_detach(VirtIOBlock * s)15153cdaf3ddSStefan Hajnoczi static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
15163bcc17f0SStefan Hajnoczi {
15173bcc17f0SStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
15183bcc17f0SStefan Hajnoczi 
15193bcc17f0SStefan Hajnoczi     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
15203bcc17f0SStefan Hajnoczi         VirtQueue *vq = virtio_get_queue(vdev, i);
15213bcc17f0SStefan Hajnoczi         virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
15223bcc17f0SStefan Hajnoczi     }
15233bcc17f0SStefan Hajnoczi }
15243bcc17f0SStefan Hajnoczi 
virtio_blk_ioeventfd_attach(VirtIOBlock * s)15253cdaf3ddSStefan Hajnoczi static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
15263bcc17f0SStefan Hajnoczi {
15273bcc17f0SStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
15283bcc17f0SStefan Hajnoczi 
15293bcc17f0SStefan Hajnoczi     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
15303bcc17f0SStefan Hajnoczi         VirtQueue *vq = virtio_get_queue(vdev, i);
15313bcc17f0SStefan Hajnoczi         virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
15323bcc17f0SStefan Hajnoczi     }
15333bcc17f0SStefan Hajnoczi }
15343bcc17f0SStefan Hajnoczi 
15351665d932SStefan Hajnoczi /* Suspend virtqueue ioeventfd processing during drain */
virtio_blk_drained_begin(void * opaque)15361665d932SStefan Hajnoczi static void virtio_blk_drained_begin(void *opaque)
15371665d932SStefan Hajnoczi {
15381665d932SStefan Hajnoczi     VirtIOBlock *s = opaque;
15391665d932SStefan Hajnoczi 
15403cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_started) {
15413cdaf3ddSStefan Hajnoczi         virtio_blk_ioeventfd_detach(s);
15421665d932SStefan Hajnoczi     }
15431665d932SStefan Hajnoczi }
15441665d932SStefan Hajnoczi 
15451665d932SStefan Hajnoczi /* Resume virtqueue ioeventfd processing after drain */
virtio_blk_drained_end(void * opaque)15461665d932SStefan Hajnoczi static void virtio_blk_drained_end(void *opaque)
15471665d932SStefan Hajnoczi {
15481665d932SStefan Hajnoczi     VirtIOBlock *s = opaque;
15491665d932SStefan Hajnoczi 
15503cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_started) {
15513cdaf3ddSStefan Hajnoczi         virtio_blk_ioeventfd_attach(s);
15521665d932SStefan Hajnoczi     }
15531665d932SStefan Hajnoczi }
15541665d932SStefan Hajnoczi 
15556e790746SPaolo Bonzini static const BlockDevOps virtio_block_ops = {
15566e790746SPaolo Bonzini     .resize_cb     = virtio_blk_resize,
15571665d932SStefan Hajnoczi     .drained_begin = virtio_blk_drained_begin,
15581665d932SStefan Hajnoczi     .drained_end   = virtio_blk_drained_end,
15596e790746SPaolo Bonzini };
15606e790746SPaolo Bonzini 
15611f995a47SStefan Hajnoczi static bool
validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList * list,uint16_t num_queues,Error ** errp)15621f995a47SStefan Hajnoczi validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
15631f995a47SStefan Hajnoczi         uint16_t num_queues, Error **errp)
15641f995a47SStefan Hajnoczi {
15651f995a47SStefan Hajnoczi     g_autofree unsigned long *vqs = bitmap_new(num_queues);
15661f995a47SStefan Hajnoczi     g_autoptr(GHashTable) iothreads =
15671f995a47SStefan Hajnoczi         g_hash_table_new(g_str_hash, g_str_equal);
15681f995a47SStefan Hajnoczi 
15691f995a47SStefan Hajnoczi     for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
15701f995a47SStefan Hajnoczi         const char *name = node->value->iothread;
15711f995a47SStefan Hajnoczi         uint16List *vq;
15721f995a47SStefan Hajnoczi 
15731f995a47SStefan Hajnoczi         if (!iothread_by_id(name)) {
15741f995a47SStefan Hajnoczi             error_setg(errp, "IOThread \"%s\" object does not exist", name);
15751f995a47SStefan Hajnoczi             return false;
15761f995a47SStefan Hajnoczi         }
15771f995a47SStefan Hajnoczi 
15781f995a47SStefan Hajnoczi         if (!g_hash_table_add(iothreads, (gpointer)name)) {
15791f995a47SStefan Hajnoczi             error_setg(errp,
15801f995a47SStefan Hajnoczi                     "duplicate IOThread name \"%s\" in iothread-vq-mapping",
15811f995a47SStefan Hajnoczi                     name);
15821f995a47SStefan Hajnoczi             return false;
15831f995a47SStefan Hajnoczi         }
15841f995a47SStefan Hajnoczi 
15851f995a47SStefan Hajnoczi         if (node != list) {
15861f995a47SStefan Hajnoczi             if (!!node->value->vqs != !!list->value->vqs) {
15871f995a47SStefan Hajnoczi                 error_setg(errp, "either all items in iothread-vq-mapping "
15881f995a47SStefan Hajnoczi                                  "must have vqs or none of them must have it");
15891f995a47SStefan Hajnoczi                 return false;
15901f995a47SStefan Hajnoczi             }
15911f995a47SStefan Hajnoczi         }
15921f995a47SStefan Hajnoczi 
15931f995a47SStefan Hajnoczi         for (vq = node->value->vqs; vq; vq = vq->next) {
15941f995a47SStefan Hajnoczi             if (vq->value >= num_queues) {
15951f995a47SStefan Hajnoczi                 error_setg(errp, "vq index %u for IOThread \"%s\" must be "
15961f995a47SStefan Hajnoczi                         "less than num_queues %u in iothread-vq-mapping",
15971f995a47SStefan Hajnoczi                         vq->value, name, num_queues);
15981f995a47SStefan Hajnoczi                 return false;
15991f995a47SStefan Hajnoczi             }
16001f995a47SStefan Hajnoczi 
16011f995a47SStefan Hajnoczi             if (test_and_set_bit(vq->value, vqs)) {
16021f995a47SStefan Hajnoczi                 error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
16031f995a47SStefan Hajnoczi                         "because it is already assigned", vq->value, name);
16041f995a47SStefan Hajnoczi                 return false;
16051f995a47SStefan Hajnoczi             }
16061f995a47SStefan Hajnoczi         }
16071f995a47SStefan Hajnoczi     }
16081f995a47SStefan Hajnoczi 
16091f995a47SStefan Hajnoczi     if (list->value->vqs) {
16101f995a47SStefan Hajnoczi         for (uint16_t i = 0; i < num_queues; i++) {
16111f995a47SStefan Hajnoczi             if (!test_bit(i, vqs)) {
16121f995a47SStefan Hajnoczi                 error_setg(errp,
16131f995a47SStefan Hajnoczi                         "missing vq %u IOThread assignment in iothread-vq-mapping",
16141f995a47SStefan Hajnoczi                         i);
16151f995a47SStefan Hajnoczi                 return false;
16161f995a47SStefan Hajnoczi             }
16171f995a47SStefan Hajnoczi         }
16181f995a47SStefan Hajnoczi     }
16191f995a47SStefan Hajnoczi 
16201f995a47SStefan Hajnoczi     return true;
16211f995a47SStefan Hajnoczi }
16221f995a47SStefan Hajnoczi 
16231f995a47SStefan Hajnoczi /**
16241f995a47SStefan Hajnoczi  * apply_iothread_vq_mapping:
16251f995a47SStefan Hajnoczi  * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
16261f995a47SStefan Hajnoczi  * @vq_aio_context: The array of AioContext pointers to fill in.
16271f995a47SStefan Hajnoczi  * @num_queues: The length of @vq_aio_context.
16281f995a47SStefan Hajnoczi  * @errp: If an error occurs, a pointer to the area to store the error.
16291f995a47SStefan Hajnoczi  *
16301f995a47SStefan Hajnoczi  * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
16311f995a47SStefan Hajnoczi  * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
16321f995a47SStefan Hajnoczi  *
16331f995a47SStefan Hajnoczi  * Returns: %true on success, %false on failure.
16341f995a47SStefan Hajnoczi  **/
apply_iothread_vq_mapping(IOThreadVirtQueueMappingList * iothread_vq_mapping_list,AioContext ** vq_aio_context,uint16_t num_queues,Error ** errp)16351f995a47SStefan Hajnoczi static bool apply_iothread_vq_mapping(
16361f995a47SStefan Hajnoczi         IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
16371f995a47SStefan Hajnoczi         AioContext **vq_aio_context,
16381f995a47SStefan Hajnoczi         uint16_t num_queues,
16391f995a47SStefan Hajnoczi         Error **errp)
16403bcc17f0SStefan Hajnoczi {
16413bcc17f0SStefan Hajnoczi     IOThreadVirtQueueMappingList *node;
16423bcc17f0SStefan Hajnoczi     size_t num_iothreads = 0;
16433bcc17f0SStefan Hajnoczi     size_t cur_iothread = 0;
16443bcc17f0SStefan Hajnoczi 
16451f995a47SStefan Hajnoczi     if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
16461f995a47SStefan Hajnoczi                                            num_queues, errp)) {
16471f995a47SStefan Hajnoczi         return false;
16481f995a47SStefan Hajnoczi     }
16491f995a47SStefan Hajnoczi 
16503bcc17f0SStefan Hajnoczi     for (node = iothread_vq_mapping_list; node; node = node->next) {
16513bcc17f0SStefan Hajnoczi         num_iothreads++;
16523bcc17f0SStefan Hajnoczi     }
16533bcc17f0SStefan Hajnoczi 
16543bcc17f0SStefan Hajnoczi     for (node = iothread_vq_mapping_list; node; node = node->next) {
16553bcc17f0SStefan Hajnoczi         IOThread *iothread = iothread_by_id(node->value->iothread);
16563bcc17f0SStefan Hajnoczi         AioContext *ctx = iothread_get_aio_context(iothread);
16573bcc17f0SStefan Hajnoczi 
165857bc2658SStefan Hajnoczi         /* Released in virtio_blk_vq_aio_context_cleanup() */
16593bcc17f0SStefan Hajnoczi         object_ref(OBJECT(iothread));
16603bcc17f0SStefan Hajnoczi 
16613bcc17f0SStefan Hajnoczi         if (node->value->vqs) {
16623bcc17f0SStefan Hajnoczi             uint16List *vq;
16633bcc17f0SStefan Hajnoczi 
16643bcc17f0SStefan Hajnoczi             /* Explicit vq:IOThread assignment */
16653bcc17f0SStefan Hajnoczi             for (vq = node->value->vqs; vq; vq = vq->next) {
16661f995a47SStefan Hajnoczi                 assert(vq->value < num_queues);
16673bcc17f0SStefan Hajnoczi                 vq_aio_context[vq->value] = ctx;
16683bcc17f0SStefan Hajnoczi             }
16693bcc17f0SStefan Hajnoczi         } else {
16703bcc17f0SStefan Hajnoczi             /* Round-robin vq:IOThread assignment */
16713bcc17f0SStefan Hajnoczi             for (unsigned i = cur_iothread; i < num_queues;
16723bcc17f0SStefan Hajnoczi                  i += num_iothreads) {
16733bcc17f0SStefan Hajnoczi                 vq_aio_context[i] = ctx;
16743bcc17f0SStefan Hajnoczi             }
16753bcc17f0SStefan Hajnoczi         }
16763bcc17f0SStefan Hajnoczi 
16773bcc17f0SStefan Hajnoczi         cur_iothread++;
16783bcc17f0SStefan Hajnoczi     }
16791f995a47SStefan Hajnoczi 
16801f995a47SStefan Hajnoczi     return true;
16813bcc17f0SStefan Hajnoczi }
16823bcc17f0SStefan Hajnoczi 
16833bcc17f0SStefan Hajnoczi /* Context: BQL held */
virtio_blk_vq_aio_context_init(VirtIOBlock * s,Error ** errp)168457bc2658SStefan Hajnoczi static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
16853bcc17f0SStefan Hajnoczi {
16860ea5f594SZhao Liu     ERRP_GUARD();
16873bcc17f0SStefan Hajnoczi     VirtIODevice *vdev = VIRTIO_DEVICE(s);
16883bcc17f0SStefan Hajnoczi     VirtIOBlkConf *conf = &s->conf;
16893bcc17f0SStefan Hajnoczi     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
16903bcc17f0SStefan Hajnoczi     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
16913bcc17f0SStefan Hajnoczi 
16921f995a47SStefan Hajnoczi     if (conf->iothread && conf->iothread_vq_mapping_list) {
16931f995a47SStefan Hajnoczi         error_setg(errp,
16941f995a47SStefan Hajnoczi                    "iothread and iothread-vq-mapping properties cannot be set "
16951f995a47SStefan Hajnoczi                    "at the same time");
16961f995a47SStefan Hajnoczi         return false;
16971f995a47SStefan Hajnoczi     }
16981f995a47SStefan Hajnoczi 
16993bcc17f0SStefan Hajnoczi     if (conf->iothread || conf->iothread_vq_mapping_list) {
17003bcc17f0SStefan Hajnoczi         if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
17013bcc17f0SStefan Hajnoczi             error_setg(errp,
17023bcc17f0SStefan Hajnoczi                        "device is incompatible with iothread "
17033bcc17f0SStefan Hajnoczi                        "(transport does not support notifiers)");
17043bcc17f0SStefan Hajnoczi             return false;
17053bcc17f0SStefan Hajnoczi         }
17063bcc17f0SStefan Hajnoczi         if (!virtio_device_ioeventfd_enabled(vdev)) {
17073bcc17f0SStefan Hajnoczi             error_setg(errp, "ioeventfd is required for iothread");
17083bcc17f0SStefan Hajnoczi             return false;
17093bcc17f0SStefan Hajnoczi         }
17103bcc17f0SStefan Hajnoczi 
17113bcc17f0SStefan Hajnoczi         /*
17123cdaf3ddSStefan Hajnoczi          * If ioeventfd is (re-)enabled while the guest is running there could
17133bcc17f0SStefan Hajnoczi          * be block jobs that can conflict.
17143bcc17f0SStefan Hajnoczi          */
17153bcc17f0SStefan Hajnoczi         if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
17163cdaf3ddSStefan Hajnoczi             error_prepend(errp, "cannot start virtio-blk ioeventfd: ");
17173bcc17f0SStefan Hajnoczi             return false;
17183bcc17f0SStefan Hajnoczi         }
17193bcc17f0SStefan Hajnoczi     }
17203bcc17f0SStefan Hajnoczi 
17213bcc17f0SStefan Hajnoczi     s->vq_aio_context = g_new(AioContext *, conf->num_queues);
17223bcc17f0SStefan Hajnoczi 
17233bcc17f0SStefan Hajnoczi     if (conf->iothread_vq_mapping_list) {
17241f995a47SStefan Hajnoczi         if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
17251f995a47SStefan Hajnoczi                                        s->vq_aio_context,
17261f995a47SStefan Hajnoczi                                        conf->num_queues,
17271f995a47SStefan Hajnoczi                                        errp)) {
17281f995a47SStefan Hajnoczi             g_free(s->vq_aio_context);
17291f995a47SStefan Hajnoczi             s->vq_aio_context = NULL;
17301f995a47SStefan Hajnoczi             return false;
17311f995a47SStefan Hajnoczi         }
17323bcc17f0SStefan Hajnoczi     } else if (conf->iothread) {
17333bcc17f0SStefan Hajnoczi         AioContext *ctx = iothread_get_aio_context(conf->iothread);
17343bcc17f0SStefan Hajnoczi         for (unsigned i = 0; i < conf->num_queues; i++) {
17353bcc17f0SStefan Hajnoczi             s->vq_aio_context[i] = ctx;
17363bcc17f0SStefan Hajnoczi         }
17373bcc17f0SStefan Hajnoczi 
173857bc2658SStefan Hajnoczi         /* Released in virtio_blk_vq_aio_context_cleanup() */
17393bcc17f0SStefan Hajnoczi         object_ref(OBJECT(conf->iothread));
17403bcc17f0SStefan Hajnoczi     } else {
17413bcc17f0SStefan Hajnoczi         AioContext *ctx = qemu_get_aio_context();
17423bcc17f0SStefan Hajnoczi         for (unsigned i = 0; i < conf->num_queues; i++) {
17433bcc17f0SStefan Hajnoczi             s->vq_aio_context[i] = ctx;
17443bcc17f0SStefan Hajnoczi         }
17453bcc17f0SStefan Hajnoczi     }
17463bcc17f0SStefan Hajnoczi 
17473bcc17f0SStefan Hajnoczi     return true;
17483bcc17f0SStefan Hajnoczi }
17493bcc17f0SStefan Hajnoczi 
17503bcc17f0SStefan Hajnoczi /* Context: BQL held */
virtio_blk_vq_aio_context_cleanup(VirtIOBlock * s)175157bc2658SStefan Hajnoczi static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s)
17523bcc17f0SStefan Hajnoczi {
17533bcc17f0SStefan Hajnoczi     VirtIOBlkConf *conf = &s->conf;
17543bcc17f0SStefan Hajnoczi 
17553cdaf3ddSStefan Hajnoczi     assert(!s->ioeventfd_started);
17563bcc17f0SStefan Hajnoczi 
17573bcc17f0SStefan Hajnoczi     if (conf->iothread_vq_mapping_list) {
17583bcc17f0SStefan Hajnoczi         IOThreadVirtQueueMappingList *node;
17593bcc17f0SStefan Hajnoczi 
17603bcc17f0SStefan Hajnoczi         for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
17613bcc17f0SStefan Hajnoczi             IOThread *iothread = iothread_by_id(node->value->iothread);
17623bcc17f0SStefan Hajnoczi             object_unref(OBJECT(iothread));
17633bcc17f0SStefan Hajnoczi         }
17643bcc17f0SStefan Hajnoczi     }
17653bcc17f0SStefan Hajnoczi 
17663bcc17f0SStefan Hajnoczi     if (conf->iothread) {
17673bcc17f0SStefan Hajnoczi         object_unref(OBJECT(conf->iothread));
17683bcc17f0SStefan Hajnoczi     }
17693bcc17f0SStefan Hajnoczi 
17703bcc17f0SStefan Hajnoczi     g_free(s->vq_aio_context);
17713bcc17f0SStefan Hajnoczi     s->vq_aio_context = NULL;
17723bcc17f0SStefan Hajnoczi }
17733bcc17f0SStefan Hajnoczi 
17743bcc17f0SStefan Hajnoczi /* Context: BQL held */
virtio_blk_start_ioeventfd(VirtIODevice * vdev)17753cdaf3ddSStefan Hajnoczi static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
17763bcc17f0SStefan Hajnoczi {
17773bcc17f0SStefan Hajnoczi     VirtIOBlock *s = VIRTIO_BLK(vdev);
17783bcc17f0SStefan Hajnoczi     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
17793bcc17f0SStefan Hajnoczi     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
17803bcc17f0SStefan Hajnoczi     unsigned i;
17813bcc17f0SStefan Hajnoczi     unsigned nvqs = s->conf.num_queues;
17823bcc17f0SStefan Hajnoczi     Error *local_err = NULL;
17833bcc17f0SStefan Hajnoczi     int r;
17843bcc17f0SStefan Hajnoczi 
17853cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_started || s->ioeventfd_starting) {
17863bcc17f0SStefan Hajnoczi         return 0;
17873bcc17f0SStefan Hajnoczi     }
17883bcc17f0SStefan Hajnoczi 
17893cdaf3ddSStefan Hajnoczi     s->ioeventfd_starting = true;
17903bcc17f0SStefan Hajnoczi 
17913bcc17f0SStefan Hajnoczi     /* Set up guest notifier (irq) */
17923bcc17f0SStefan Hajnoczi     r = k->set_guest_notifiers(qbus->parent, nvqs, true);
17933bcc17f0SStefan Hajnoczi     if (r != 0) {
17943bcc17f0SStefan Hajnoczi         error_report("virtio-blk failed to set guest notifier (%d), "
17953bcc17f0SStefan Hajnoczi                      "ensure -accel kvm is set.", r);
17963bcc17f0SStefan Hajnoczi         goto fail_guest_notifiers;
17973bcc17f0SStefan Hajnoczi     }
17983bcc17f0SStefan Hajnoczi 
17993bcc17f0SStefan Hajnoczi     /*
18003bcc17f0SStefan Hajnoczi      * Batch all the host notifiers in a single transaction to avoid
18013bcc17f0SStefan Hajnoczi      * quadratic time complexity in address_space_update_ioeventfds().
18023bcc17f0SStefan Hajnoczi      */
18033bcc17f0SStefan Hajnoczi     memory_region_transaction_begin();
18043bcc17f0SStefan Hajnoczi 
18053bcc17f0SStefan Hajnoczi     /* Set up virtqueue notify */
18063bcc17f0SStefan Hajnoczi     for (i = 0; i < nvqs; i++) {
18073bcc17f0SStefan Hajnoczi         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
18083bcc17f0SStefan Hajnoczi         if (r != 0) {
18093bcc17f0SStefan Hajnoczi             int j = i;
18103bcc17f0SStefan Hajnoczi 
18113bcc17f0SStefan Hajnoczi             fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
18123bcc17f0SStefan Hajnoczi             while (i--) {
18133bcc17f0SStefan Hajnoczi                 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
18143bcc17f0SStefan Hajnoczi             }
18153bcc17f0SStefan Hajnoczi 
18163bcc17f0SStefan Hajnoczi             /*
18173bcc17f0SStefan Hajnoczi              * The transaction expects the ioeventfds to be open when it
18183bcc17f0SStefan Hajnoczi              * commits. Do it now, before the cleanup loop.
18193bcc17f0SStefan Hajnoczi              */
18203bcc17f0SStefan Hajnoczi             memory_region_transaction_commit();
18213bcc17f0SStefan Hajnoczi 
18223bcc17f0SStefan Hajnoczi             while (j--) {
18233bcc17f0SStefan Hajnoczi                 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j);
18243bcc17f0SStefan Hajnoczi             }
18253bcc17f0SStefan Hajnoczi             goto fail_host_notifiers;
18263bcc17f0SStefan Hajnoczi         }
18273bcc17f0SStefan Hajnoczi     }
18283bcc17f0SStefan Hajnoczi 
18293bcc17f0SStefan Hajnoczi     memory_region_transaction_commit();
18303bcc17f0SStefan Hajnoczi 
1831ea0736d7SStefan Hajnoczi     /*
1832ea0736d7SStefan Hajnoczi      * Try to change the AioContext so that block jobs and other operations can
1833ea0736d7SStefan Hajnoczi      * co-locate their activity in the same AioContext. If it fails, nevermind.
1834ea0736d7SStefan Hajnoczi      */
18355fbcbd50SStefan Hajnoczi     assert(nvqs > 0); /* enforced during ->realize() */
18363bcc17f0SStefan Hajnoczi     r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0],
18373bcc17f0SStefan Hajnoczi                             &local_err);
18383bcc17f0SStefan Hajnoczi     if (r < 0) {
1839ea0736d7SStefan Hajnoczi         warn_report_err(local_err);
18403bcc17f0SStefan Hajnoczi     }
18413bcc17f0SStefan Hajnoczi 
18423bcc17f0SStefan Hajnoczi     /*
18433bcc17f0SStefan Hajnoczi      * These fields must be visible to the IOThread when it processes the
18443cdaf3ddSStefan Hajnoczi      * virtqueue, otherwise it will think ioeventfd has not started yet.
18453bcc17f0SStefan Hajnoczi      *
18463cdaf3ddSStefan Hajnoczi      * Make sure ->ioeventfd_started is false when blk_set_aio_context() is
18473bcc17f0SStefan Hajnoczi      * called above so that draining does not cause the host notifier to be
18483bcc17f0SStefan Hajnoczi      * detached/attached prematurely.
18493bcc17f0SStefan Hajnoczi      */
18503cdaf3ddSStefan Hajnoczi     s->ioeventfd_starting = false;
18513cdaf3ddSStefan Hajnoczi     s->ioeventfd_started = true;
18523bcc17f0SStefan Hajnoczi     smp_wmb(); /* paired with aio_notify_accept() on the read side */
18533bcc17f0SStefan Hajnoczi 
185452bff01fSHanna Czenczek     /*
185552bff01fSHanna Czenczek      * Get this show started by hooking up our callbacks.  If drained now,
185652bff01fSHanna Czenczek      * virtio_blk_drained_end() will do this later.
185752bff01fSHanna Czenczek      * Attaching the notifier also kicks the virtqueues, processing any requests
185852bff01fSHanna Czenczek      * they may already have.
185952bff01fSHanna Czenczek      */
1860d3f6f294SStefan Hajnoczi     if (!blk_in_drain(s->conf.conf.blk)) {
186152bff01fSHanna Czenczek         virtio_blk_ioeventfd_attach(s);
18623bcc17f0SStefan Hajnoczi     }
18633bcc17f0SStefan Hajnoczi     return 0;
18643bcc17f0SStefan Hajnoczi 
18653bcc17f0SStefan Hajnoczi   fail_host_notifiers:
18663bcc17f0SStefan Hajnoczi     k->set_guest_notifiers(qbus->parent, nvqs, false);
18673bcc17f0SStefan Hajnoczi   fail_guest_notifiers:
18683cdaf3ddSStefan Hajnoczi     s->ioeventfd_disabled = true;
18693cdaf3ddSStefan Hajnoczi     s->ioeventfd_starting = false;
18703bcc17f0SStefan Hajnoczi     return -ENOSYS;
18713bcc17f0SStefan Hajnoczi }
18723bcc17f0SStefan Hajnoczi 
18733bcc17f0SStefan Hajnoczi /* Stop notifications for new requests from guest.
18743bcc17f0SStefan Hajnoczi  *
18753bcc17f0SStefan Hajnoczi  * Context: BH in IOThread
18763bcc17f0SStefan Hajnoczi  */
virtio_blk_ioeventfd_stop_vq_bh(void * opaque)18773cdaf3ddSStefan Hajnoczi static void virtio_blk_ioeventfd_stop_vq_bh(void *opaque)
18783bcc17f0SStefan Hajnoczi {
18793bcc17f0SStefan Hajnoczi     VirtQueue *vq = opaque;
18803bcc17f0SStefan Hajnoczi     EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
18813bcc17f0SStefan Hajnoczi 
18823bcc17f0SStefan Hajnoczi     virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context());
18833bcc17f0SStefan Hajnoczi 
18843bcc17f0SStefan Hajnoczi     /*
18853bcc17f0SStefan Hajnoczi      * Test and clear notifier after disabling event, in case poll callback
18863bcc17f0SStefan Hajnoczi      * didn't have time to run.
18873bcc17f0SStefan Hajnoczi      */
18883bcc17f0SStefan Hajnoczi     virtio_queue_host_notifier_read(host_notifier);
18893bcc17f0SStefan Hajnoczi }
18903bcc17f0SStefan Hajnoczi 
18913bcc17f0SStefan Hajnoczi /* Context: BQL held */
virtio_blk_stop_ioeventfd(VirtIODevice * vdev)18923cdaf3ddSStefan Hajnoczi static void virtio_blk_stop_ioeventfd(VirtIODevice *vdev)
18933bcc17f0SStefan Hajnoczi {
18943bcc17f0SStefan Hajnoczi     VirtIOBlock *s = VIRTIO_BLK(vdev);
18953bcc17f0SStefan Hajnoczi     BusState *qbus = qdev_get_parent_bus(DEVICE(s));
18963bcc17f0SStefan Hajnoczi     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
18973bcc17f0SStefan Hajnoczi     unsigned i;
18983bcc17f0SStefan Hajnoczi     unsigned nvqs = s->conf.num_queues;
18993bcc17f0SStefan Hajnoczi 
19003cdaf3ddSStefan Hajnoczi     if (!s->ioeventfd_started || s->ioeventfd_stopping) {
19013bcc17f0SStefan Hajnoczi         return;
19023bcc17f0SStefan Hajnoczi     }
19033bcc17f0SStefan Hajnoczi 
19043bcc17f0SStefan Hajnoczi     /* Better luck next time. */
19053cdaf3ddSStefan Hajnoczi     if (s->ioeventfd_disabled) {
19063cdaf3ddSStefan Hajnoczi         s->ioeventfd_disabled = false;
19073cdaf3ddSStefan Hajnoczi         s->ioeventfd_started = false;
19083bcc17f0SStefan Hajnoczi         return;
19093bcc17f0SStefan Hajnoczi     }
19103cdaf3ddSStefan Hajnoczi     s->ioeventfd_stopping = true;
19113bcc17f0SStefan Hajnoczi 
19123bcc17f0SStefan Hajnoczi     if (!blk_in_drain(s->conf.conf.blk)) {
19133bcc17f0SStefan Hajnoczi         for (i = 0; i < nvqs; i++) {
19143bcc17f0SStefan Hajnoczi             VirtQueue *vq = virtio_get_queue(vdev, i);
19153bcc17f0SStefan Hajnoczi             AioContext *ctx = s->vq_aio_context[i];
19163bcc17f0SStefan Hajnoczi 
19173cdaf3ddSStefan Hajnoczi             aio_wait_bh_oneshot(ctx, virtio_blk_ioeventfd_stop_vq_bh, vq);
19183bcc17f0SStefan Hajnoczi         }
19193bcc17f0SStefan Hajnoczi     }
19203bcc17f0SStefan Hajnoczi 
19213bcc17f0SStefan Hajnoczi     /*
19223bcc17f0SStefan Hajnoczi      * Batch all the host notifiers in a single transaction to avoid
19233bcc17f0SStefan Hajnoczi      * quadratic time complexity in address_space_update_ioeventfds().
19243bcc17f0SStefan Hajnoczi      */
19253bcc17f0SStefan Hajnoczi     memory_region_transaction_begin();
19263bcc17f0SStefan Hajnoczi 
19273bcc17f0SStefan Hajnoczi     for (i = 0; i < nvqs; i++) {
19283bcc17f0SStefan Hajnoczi         virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
19293bcc17f0SStefan Hajnoczi     }
19303bcc17f0SStefan Hajnoczi 
19313bcc17f0SStefan Hajnoczi     /*
19323bcc17f0SStefan Hajnoczi      * The transaction expects the ioeventfds to be open when it
19333bcc17f0SStefan Hajnoczi      * commits. Do it now, before the cleanup loop.
19343bcc17f0SStefan Hajnoczi      */
19353bcc17f0SStefan Hajnoczi     memory_region_transaction_commit();
19363bcc17f0SStefan Hajnoczi 
19373bcc17f0SStefan Hajnoczi     for (i = 0; i < nvqs; i++) {
19383bcc17f0SStefan Hajnoczi         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
19393bcc17f0SStefan Hajnoczi     }
19403bcc17f0SStefan Hajnoczi 
19413bcc17f0SStefan Hajnoczi     /*
19423cdaf3ddSStefan Hajnoczi      * Set ->ioeventfd_started to false before draining so that host notifiers
19433bcc17f0SStefan Hajnoczi      * are not detached/attached anymore.
19443bcc17f0SStefan Hajnoczi      */
19453cdaf3ddSStefan Hajnoczi     s->ioeventfd_started = false;
19463bcc17f0SStefan Hajnoczi 
19473bcc17f0SStefan Hajnoczi     /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
19483bcc17f0SStefan Hajnoczi     blk_drain(s->conf.conf.blk);
19493bcc17f0SStefan Hajnoczi 
19503bcc17f0SStefan Hajnoczi     /*
19513bcc17f0SStefan Hajnoczi      * Try to switch bs back to the QEMU main loop. If other users keep the
19523bcc17f0SStefan Hajnoczi      * BlockBackend in the iothread, that's ok
19533bcc17f0SStefan Hajnoczi      */
19543bcc17f0SStefan Hajnoczi     blk_set_aio_context(s->conf.conf.blk, qemu_get_aio_context(), NULL);
19553bcc17f0SStefan Hajnoczi 
19563bcc17f0SStefan Hajnoczi     /* Clean up guest notifier (irq) */
19573bcc17f0SStefan Hajnoczi     k->set_guest_notifiers(qbus->parent, nvqs, false);
19583bcc17f0SStefan Hajnoczi 
19593cdaf3ddSStefan Hajnoczi     s->ioeventfd_stopping = false;
19603bcc17f0SStefan Hajnoczi }
19613bcc17f0SStefan Hajnoczi 
virtio_blk_device_realize(DeviceState * dev,Error ** errp)196275884afdSAndreas Färber static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
19636e790746SPaolo Bonzini {
196475884afdSAndreas Färber     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1965179b417eSAndreas Färber     VirtIOBlock *s = VIRTIO_BLK(dev);
19662a30307fSMarkus Armbruster     VirtIOBlkConf *conf = &s->conf;
1967b3d9bb9aSStefan Hajnoczi     BlockDriverState *bs;
19683ffeeef7SAndreas Färber     Error *err = NULL;
19692f270590SStefan Hajnoczi     unsigned i;
19706e790746SPaolo Bonzini 
19714be74634SMarkus Armbruster     if (!conf->conf.blk) {
197275884afdSAndreas Färber         error_setg(errp, "drive property not set");
197375884afdSAndreas Färber         return;
19746e790746SPaolo Bonzini     }
19754be74634SMarkus Armbruster     if (!blk_is_inserted(conf->conf.blk)) {
197675884afdSAndreas Färber         error_setg(errp, "Device needs media, but drive is empty");
197775884afdSAndreas Färber         return;
19786e790746SPaolo Bonzini     }
19799445e1e1SStefan Hajnoczi     if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) {
19809445e1e1SStefan Hajnoczi         conf->num_queues = 1;
19819445e1e1SStefan Hajnoczi     }
19822f270590SStefan Hajnoczi     if (!conf->num_queues) {
19832f270590SStefan Hajnoczi         error_setg(errp, "num-queues property must be larger than 0");
19842f270590SStefan Hajnoczi         return;
19852f270590SStefan Hajnoczi     }
19861bf8a989SDenis Plotnikov     if (conf->queue_size <= 2) {
19871bf8a989SDenis Plotnikov         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
19881bf8a989SDenis Plotnikov                    "must be > 2", conf->queue_size);
19891bf8a989SDenis Plotnikov         return;
19901bf8a989SDenis Plotnikov     }
19916040aeddSMark Kanda     if (!is_power_of_2(conf->queue_size) ||
19926040aeddSMark Kanda         conf->queue_size > VIRTQUEUE_MAX_SIZE) {
19936040aeddSMark Kanda         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
19946040aeddSMark Kanda                    "must be a power of 2 (max %d)",
19956040aeddSMark Kanda                    conf->queue_size, VIRTQUEUE_MAX_SIZE);
19966040aeddSMark Kanda         return;
19976040aeddSMark Kanda     }
19986e790746SPaolo Bonzini 
1999ceff3e1fSMao Zhongyi     if (!blkconf_apply_backend_options(&conf->conf,
200086b1cf32SKevin Wolf                                        !blk_supports_write_perm(conf->conf.blk),
200186b1cf32SKevin Wolf                                        true, errp)) {
2002a17c17a2SKevin Wolf         return;
2003a17c17a2SKevin Wolf     }
20044be74634SMarkus Armbruster     s->original_wce = blk_enable_write_cache(conf->conf.blk);
2005ceff3e1fSMao Zhongyi     if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
200675884afdSAndreas Färber         return;
20076e790746SPaolo Bonzini     }
2008ceff3e1fSMao Zhongyi 
2009c56ee92fSRoman Kagan     if (!blkconf_blocksizes(&conf->conf, errp)) {
20100a75b60cSMark Kanda         return;
20110a75b60cSMark Kanda     }
20120a75b60cSMark Kanda 
2013b3d9bb9aSStefan Hajnoczi     bs = blk_bs(conf->conf.blk);
20144f736650SSam Li     if (bs->bl.zoned != BLK_Z_NONE) {
20154f736650SSam Li         virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
20164f736650SSam Li         if (bs->bl.zoned == BLK_Z_HM) {
20174f736650SSam Li             virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD);
20184f736650SSam Li         }
20194f736650SSam Li     }
20204f736650SSam Li 
202137b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
202237b06f8dSStefano Garzarella         (!conf->max_discard_sectors ||
202337b06f8dSStefano Garzarella          conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
202437b06f8dSStefano Garzarella         error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
202537b06f8dSStefano Garzarella                    ", must be between 1 and %d",
202637b06f8dSStefano Garzarella                    conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
202737b06f8dSStefano Garzarella         return;
202837b06f8dSStefano Garzarella     }
202937b06f8dSStefano Garzarella 
203037b06f8dSStefano Garzarella     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
203137b06f8dSStefano Garzarella         (!conf->max_write_zeroes_sectors ||
203237b06f8dSStefano Garzarella          conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
203337b06f8dSStefano Garzarella         error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
203437b06f8dSStefano Garzarella                    "), must be between 1 and %d",
203537b06f8dSStefano Garzarella                    conf->max_write_zeroes_sectors,
203637b06f8dSStefano Garzarella                    (int)BDRV_REQUEST_MAX_SECTORS);
203737b06f8dSStefano Garzarella         return;
203837b06f8dSStefano Garzarella     }
203937b06f8dSStefano Garzarella 
2040d9cf55a8SDaniil Tatianin     s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
2041d74c30c8SDaniil Tatianin                                             s->host_features);
20423857cd5cSJonah Palmer     virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
20436e790746SPaolo Bonzini 
20449c67f33fSStefan Hajnoczi     qemu_mutex_init(&s->rq_lock);
20459c67f33fSStefan Hajnoczi 
20464be74634SMarkus Armbruster     s->blk = conf->conf.blk;
20476e790746SPaolo Bonzini     s->rq = NULL;
20482a30307fSMarkus Armbruster     s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
20496e790746SPaolo Bonzini 
20502f270590SStefan Hajnoczi     for (i = 0; i < conf->num_queues; i++) {
20516040aeddSMark Kanda         virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
20522f270590SStefan Hajnoczi     }
205398e3ab35SKevin Wolf     qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2);
205457bc2658SStefan Hajnoczi 
20553cdaf3ddSStefan Hajnoczi     /* Don't start ioeventfd if transport does not support notifiers. */
205657bc2658SStefan Hajnoczi     if (!virtio_device_ioeventfd_enabled(vdev)) {
20573cdaf3ddSStefan Hajnoczi         s->ioeventfd_disabled = true;
205857bc2658SStefan Hajnoczi     }
205957bc2658SStefan Hajnoczi 
206057bc2658SStefan Hajnoczi     virtio_blk_vq_aio_context_init(s, &err);
20613ffeeef7SAndreas Färber     if (err != NULL) {
206275884afdSAndreas Färber         error_propagate(errp, err);
2063cfaf757eSPan Nengyuan         for (i = 0; i < conf->num_queues; i++) {
2064cfaf757eSPan Nengyuan             virtio_del_queue(vdev, i);
2065cfaf757eSPan Nengyuan         }
20666a1a8cc7SKONRAD Frederic         virtio_cleanup(vdev);
206775884afdSAndreas Färber         return;
20686e790746SPaolo Bonzini     }
20696e790746SPaolo Bonzini 
2070a937f8e8SStefan Hajnoczi     /*
2071a937f8e8SStefan Hajnoczi      * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
2072a937f8e8SStefan Hajnoczi      * called after ->start_ioeventfd() has already set blk's AioContext.
2073a937f8e8SStefan Hajnoczi      */
2074a937f8e8SStefan Hajnoczi     s->change =
2075a937f8e8SStefan Hajnoczi         qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
2076a937f8e8SStefan Hajnoczi 
2077baf42268SStefan Hajnoczi     blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
20784be74634SMarkus Armbruster     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
20796e790746SPaolo Bonzini 
20804be74634SMarkus Armbruster     blk_iostatus_enable(s->blk);
208171f571a2SSam Eiderman 
208271f571a2SSam Eiderman     add_boot_device_lchs(dev, "/disk@0,0",
208371f571a2SSam Eiderman                          conf->conf.lcyls,
208471f571a2SSam Eiderman                          conf->conf.lheads,
208571f571a2SSam Eiderman                          conf->conf.lsecs);
20866e790746SPaolo Bonzini }
20876e790746SPaolo Bonzini 
virtio_blk_device_unrealize(DeviceState * dev)2088b69c3c21SMarkus Armbruster static void virtio_blk_device_unrealize(DeviceState *dev)
20896e790746SPaolo Bonzini {
2090306ec6c3SAndreas Färber     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2091306ec6c3SAndreas Färber     VirtIOBlock *s = VIRTIO_BLK(dev);
20924a0117cfSEugenio Pérez     VirtIOBlkConf *conf = &s->conf;
20934a0117cfSEugenio Pérez     unsigned i;
2094306ec6c3SAndreas Färber 
20957bfde688SJulia Suvorova     blk_drain(s->blk);
209671f571a2SSam Eiderman     del_boot_device_lchs(dev, "/disk@0,0");
209757bc2658SStefan Hajnoczi     virtio_blk_vq_aio_context_cleanup(s);
20984a0117cfSEugenio Pérez     for (i = 0; i < conf->num_queues; i++) {
20994a0117cfSEugenio Pérez         virtio_del_queue(vdev, i);
21004a0117cfSEugenio Pérez     }
210198e3ab35SKevin Wolf     qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
21029c67f33fSStefan Hajnoczi     qemu_mutex_destroy(&s->rq_lock);
2103baf42268SStefan Hajnoczi     blk_ram_registrar_destroy(&s->blk_ram_registrar);
21046e790746SPaolo Bonzini     qemu_del_vm_change_state_handler(s->change);
21054be74634SMarkus Armbruster     blockdev_mark_auto_del(s->blk);
21066a1a8cc7SKONRAD Frederic     virtio_cleanup(vdev);
21076e790746SPaolo Bonzini }
21086e790746SPaolo Bonzini 
virtio_blk_instance_init(Object * obj)2109467b3f33SStefan Hajnoczi static void virtio_blk_instance_init(Object *obj)
2110467b3f33SStefan Hajnoczi {
2111467b3f33SStefan Hajnoczi     VirtIOBlock *s = VIRTIO_BLK(obj);
2112467b3f33SStefan Hajnoczi 
21132a30307fSMarkus Armbruster     device_add_bootindex_property(obj, &s->conf.conf.bootindex,
21143342ec32SGonglei                                   "bootindex", "/disk@0,0",
211540c2281cSMarkus Armbruster                                   DEVICE(obj));
2116467b3f33SStefan Hajnoczi }
2117467b3f33SStefan Hajnoczi 
2118977a117fSHalil Pasic static const VMStateDescription vmstate_virtio_blk = {
2119977a117fSHalil Pasic     .name = "virtio-blk",
2120977a117fSHalil Pasic     .minimum_version_id = 2,
2121977a117fSHalil Pasic     .version_id = 2,
21227d5dc0a3SRichard Henderson     .fields = (const VMStateField[]) {
2123977a117fSHalil Pasic         VMSTATE_VIRTIO_DEVICE,
2124977a117fSHalil Pasic         VMSTATE_END_OF_LIST()
2125977a117fSHalil Pasic     },
2126977a117fSHalil Pasic };
2127bbded32cSDr. David Alan Gilbert 
21286e790746SPaolo Bonzini static Property virtio_blk_properties[] = {
21292a30307fSMarkus Armbruster     DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
21308c398252SKevin Wolf     DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
21312a30307fSMarkus Armbruster     DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
21322a30307fSMarkus Armbruster     DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
2133bbe8bd4dSStefano Garzarella     DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
2134bbe8bd4dSStefano Garzarella                       VIRTIO_BLK_F_CONFIG_WCE, true),
213532a877e4SStefan Hajnoczi #ifdef __linux__
2136bbe8bd4dSStefano Garzarella     DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features,
2137bbe8bd4dSStefano Garzarella                       VIRTIO_BLK_F_SCSI, false),
213832a877e4SStefan Hajnoczi #endif
2139c99495acSPeter Lieven     DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
2140c99495acSPeter Lieven                     true),
21419445e1e1SStefan Hajnoczi     DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues,
21429445e1e1SStefan Hajnoczi                        VIRTIO_BLK_AUTO_NUM_QUEUES),
2143c9b7d9ecSDenis Plotnikov     DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256),
21441bf8a989SDenis Plotnikov     DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
2145d679ac09SFam Zheng     DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
2146d679ac09SFam Zheng                      IOThread *),
2147b6948ab0SStefan Hajnoczi     DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
2148b6948ab0SStefan Hajnoczi                                          conf.iothread_vq_mapping_list),
21495c81161fSStefano Garzarella     DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
21505c81161fSStefano Garzarella                       VIRTIO_BLK_F_DISCARD, true),
2151fb0b154cSAkihiko Odaki     DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
2152fb0b154cSAkihiko Odaki                      conf.report_discard_granularity, true),
21535c81161fSStefano Garzarella     DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
21545c81161fSStefano Garzarella                       VIRTIO_BLK_F_WRITE_ZEROES, true),
215537b06f8dSStefano Garzarella     DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
215637b06f8dSStefano Garzarella                        conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
215737b06f8dSStefano Garzarella     DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
215837b06f8dSStefano Garzarella                        conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
21595f258577SEvgeny Yakovlev     DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock,
21605f258577SEvgeny Yakovlev                      conf.x_enable_wce_if_config_wce, true),
21616e790746SPaolo Bonzini     DEFINE_PROP_END_OF_LIST(),
21626e790746SPaolo Bonzini };
21636e790746SPaolo Bonzini 
virtio_blk_class_init(ObjectClass * klass,void * data)21646e790746SPaolo Bonzini static void virtio_blk_class_init(ObjectClass *klass, void *data)
21656e790746SPaolo Bonzini {
21666e790746SPaolo Bonzini     DeviceClass *dc = DEVICE_CLASS(klass);
21676e790746SPaolo Bonzini     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
216875884afdSAndreas Färber 
21694f67d30bSMarc-André Lureau     device_class_set_props(dc, virtio_blk_properties);
2170bbded32cSDr. David Alan Gilbert     dc->vmsd = &vmstate_virtio_blk;
2171125ee0edSMarcel Apfelbaum     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
217275884afdSAndreas Färber     vdc->realize = virtio_blk_device_realize;
2173306ec6c3SAndreas Färber     vdc->unrealize = virtio_blk_device_unrealize;
21746e790746SPaolo Bonzini     vdc->get_config = virtio_blk_update_config;
21756e790746SPaolo Bonzini     vdc->set_config = virtio_blk_set_config;
21766e790746SPaolo Bonzini     vdc->get_features = virtio_blk_get_features;
21776e790746SPaolo Bonzini     vdc->set_status = virtio_blk_set_status;
21786e790746SPaolo Bonzini     vdc->reset = virtio_blk_reset;
2179b2b295a7SGreg Kurz     vdc->save = virtio_blk_save_device;
2180b2b295a7SGreg Kurz     vdc->load = virtio_blk_load_device;
21813cdaf3ddSStefan Hajnoczi     vdc->start_ioeventfd = virtio_blk_start_ioeventfd;
21823cdaf3ddSStefan Hajnoczi     vdc->stop_ioeventfd = virtio_blk_stop_ioeventfd;
21836e790746SPaolo Bonzini }
21846e790746SPaolo Bonzini 
2185b5c7ceafSChanglong Xie static const TypeInfo virtio_blk_info = {
21866e790746SPaolo Bonzini     .name = TYPE_VIRTIO_BLK,
21876e790746SPaolo Bonzini     .parent = TYPE_VIRTIO_DEVICE,
21886e790746SPaolo Bonzini     .instance_size = sizeof(VirtIOBlock),
2189467b3f33SStefan Hajnoczi     .instance_init = virtio_blk_instance_init,
21906e790746SPaolo Bonzini     .class_init = virtio_blk_class_init,
21916e790746SPaolo Bonzini };
21926e790746SPaolo Bonzini 
virtio_register_types(void)21936e790746SPaolo Bonzini static void virtio_register_types(void)
21946e790746SPaolo Bonzini {
2195b5c7ceafSChanglong Xie     type_register_static(&virtio_blk_info);
21966e790746SPaolo Bonzini }
21976e790746SPaolo Bonzini 
21986e790746SPaolo Bonzini type_init(virtio_register_types)
2199