xref: /qemu/block/nvme.c (revision 15a730e7)
1bdd6a90aSFam Zheng /*
2bdd6a90aSFam Zheng  * NVMe block driver based on vfio
3bdd6a90aSFam Zheng  *
4bdd6a90aSFam Zheng  * Copyright 2016 - 2018 Red Hat, Inc.
5bdd6a90aSFam Zheng  *
6bdd6a90aSFam Zheng  * Authors:
7bdd6a90aSFam Zheng  *   Fam Zheng <famz@redhat.com>
8bdd6a90aSFam Zheng  *   Paolo Bonzini <pbonzini@redhat.com>
9bdd6a90aSFam Zheng  *
10bdd6a90aSFam Zheng  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11bdd6a90aSFam Zheng  * See the COPYING file in the top-level directory.
12bdd6a90aSFam Zheng  */
13bdd6a90aSFam Zheng 
14bdd6a90aSFam Zheng #include "qemu/osdep.h"
15bdd6a90aSFam Zheng #include <linux/vfio.h>
16bdd6a90aSFam Zheng #include "qapi/error.h"
17bdd6a90aSFam Zheng #include "qapi/qmp/qdict.h"
18bdd6a90aSFam Zheng #include "qapi/qmp/qstring.h"
19bdd6a90aSFam Zheng #include "qemu/error-report.h"
20db725815SMarkus Armbruster #include "qemu/main-loop.h"
210b8fa32fSMarkus Armbruster #include "qemu/module.h"
22bdd6a90aSFam Zheng #include "qemu/cutils.h"
23922a01a0SMarkus Armbruster #include "qemu/option.h"
24bdd6a90aSFam Zheng #include "qemu/vfio-helpers.h"
25bdd6a90aSFam Zheng #include "block/block_int.h"
26e4ec5ad4SPavel Dovgalyuk #include "sysemu/replay.h"
27bdd6a90aSFam Zheng #include "trace.h"
28bdd6a90aSFam Zheng 
29a3d9a352SFam Zheng #include "block/nvme.h"
30bdd6a90aSFam Zheng 
31bdd6a90aSFam Zheng #define NVME_SQ_ENTRY_BYTES 64
32bdd6a90aSFam Zheng #define NVME_CQ_ENTRY_BYTES 16
33bdd6a90aSFam Zheng #define NVME_QUEUE_SIZE 128
34f6845323SPhilippe Mathieu-Daudé #define NVME_DOORBELL_SIZE 4096
35bdd6a90aSFam Zheng 
361086e95dSStefan Hajnoczi /*
371086e95dSStefan Hajnoczi  * We have to leave one slot empty as that is the full queue case where
381086e95dSStefan Hajnoczi  * head == tail + 1.
391086e95dSStefan Hajnoczi  */
401086e95dSStefan Hajnoczi #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
411086e95dSStefan Hajnoczi 
42b75fd5f5SStefan Hajnoczi typedef struct BDRVNVMeState BDRVNVMeState;
43b75fd5f5SStefan Hajnoczi 
443214b0f0SPhilippe Mathieu-Daudé /* Same index is used for queues and IRQs */
453214b0f0SPhilippe Mathieu-Daudé #define INDEX_ADMIN     0
463214b0f0SPhilippe Mathieu-Daudé #define INDEX_IO(n)     (1 + n)
473214b0f0SPhilippe Mathieu-Daudé 
483214b0f0SPhilippe Mathieu-Daudé /* This driver shares a single MSIX IRQ for the admin and I/O queues */
493214b0f0SPhilippe Mathieu-Daudé enum {
503214b0f0SPhilippe Mathieu-Daudé     MSIX_SHARED_IRQ_IDX = 0,
513214b0f0SPhilippe Mathieu-Daudé     MSIX_IRQ_COUNT = 1
523214b0f0SPhilippe Mathieu-Daudé };
533214b0f0SPhilippe Mathieu-Daudé 
54bdd6a90aSFam Zheng typedef struct {
55bdd6a90aSFam Zheng     int32_t  head, tail;
56bdd6a90aSFam Zheng     uint8_t  *queue;
57bdd6a90aSFam Zheng     uint64_t iova;
58bdd6a90aSFam Zheng     /* Hardware MMIO register */
59bdd6a90aSFam Zheng     volatile uint32_t *doorbell;
60bdd6a90aSFam Zheng } NVMeQueue;
61bdd6a90aSFam Zheng 
62bdd6a90aSFam Zheng typedef struct {
63bdd6a90aSFam Zheng     BlockCompletionFunc *cb;
64bdd6a90aSFam Zheng     void *opaque;
65bdd6a90aSFam Zheng     int cid;
66bdd6a90aSFam Zheng     void *prp_list_page;
67bdd6a90aSFam Zheng     uint64_t prp_list_iova;
681086e95dSStefan Hajnoczi     int free_req_next; /* q->reqs[] index of next free req */
69bdd6a90aSFam Zheng } NVMeRequest;
70bdd6a90aSFam Zheng 
71bdd6a90aSFam Zheng typedef struct {
72bdd6a90aSFam Zheng     QemuMutex   lock;
73bdd6a90aSFam Zheng 
74b75fd5f5SStefan Hajnoczi     /* Read from I/O code path, initialized under BQL */
75b75fd5f5SStefan Hajnoczi     BDRVNVMeState   *s;
76bdd6a90aSFam Zheng     int             index;
77b75fd5f5SStefan Hajnoczi 
78b75fd5f5SStefan Hajnoczi     /* Fields protected by BQL */
79bdd6a90aSFam Zheng     uint8_t     *prp_list_pages;
80bdd6a90aSFam Zheng 
81bdd6a90aSFam Zheng     /* Fields protected by @lock */
82a5db74f3SStefan Hajnoczi     CoQueue     free_req_queue;
83bdd6a90aSFam Zheng     NVMeQueue   sq, cq;
84bdd6a90aSFam Zheng     int         cq_phase;
851086e95dSStefan Hajnoczi     int         free_req_head;
861086e95dSStefan Hajnoczi     NVMeRequest reqs[NVME_NUM_REQS];
87bdd6a90aSFam Zheng     int         need_kick;
88bdd6a90aSFam Zheng     int         inflight;
897838c67fSStefan Hajnoczi 
907838c67fSStefan Hajnoczi     /* Thread-safe, no lock necessary */
917838c67fSStefan Hajnoczi     QEMUBH      *completion_bh;
92bdd6a90aSFam Zheng } NVMeQueuePair;
93bdd6a90aSFam Zheng 
94b75fd5f5SStefan Hajnoczi struct BDRVNVMeState {
95bdd6a90aSFam Zheng     AioContext *aio_context;
96bdd6a90aSFam Zheng     QEMUVFIOState *vfio;
974b19e9b8SPhilippe Mathieu-Daudé     void *bar0_wo_map;
98f6845323SPhilippe Mathieu-Daudé     /* Memory mapped registers */
99f6845323SPhilippe Mathieu-Daudé     volatile struct {
100f6845323SPhilippe Mathieu-Daudé         uint32_t sq_tail;
101f6845323SPhilippe Mathieu-Daudé         uint32_t cq_head;
102f6845323SPhilippe Mathieu-Daudé     } *doorbells;
103bdd6a90aSFam Zheng     /* The submission/completion queue pairs.
104bdd6a90aSFam Zheng      * [0]: admin queue.
105bdd6a90aSFam Zheng      * [1..]: io queues.
106bdd6a90aSFam Zheng      */
107bdd6a90aSFam Zheng     NVMeQueuePair **queues;
1081b539bd6SPhilippe Mathieu-Daudé     unsigned queue_count;
109bdd6a90aSFam Zheng     size_t page_size;
110bdd6a90aSFam Zheng     /* How many uint32_t elements does each doorbell entry take. */
111bdd6a90aSFam Zheng     size_t doorbell_scale;
112bdd6a90aSFam Zheng     bool write_cache_supported;
113b111b3fcSPhilippe Mathieu-Daudé     EventNotifier irq_notifier[MSIX_IRQ_COUNT];
114118d1b6aSMaxim Levitsky 
115bdd6a90aSFam Zheng     uint64_t nsze; /* Namespace size reported by identify command */
116bdd6a90aSFam Zheng     int nsid;      /* The namespace id to read/write data. */
1171120407bSMax Reitz     int blkshift;
118118d1b6aSMaxim Levitsky 
119bdd6a90aSFam Zheng     uint64_t max_transfer;
1202f0d8947SPaolo Bonzini     bool plugged;
121bdd6a90aSFam Zheng 
122e0dd95e3SMaxim Levitsky     bool supports_write_zeroes;
123e87a09d6SMaxim Levitsky     bool supports_discard;
124e0dd95e3SMaxim Levitsky 
125bdd6a90aSFam Zheng     CoMutex dma_map_lock;
126bdd6a90aSFam Zheng     CoQueue dma_flush_queue;
127bdd6a90aSFam Zheng 
128bdd6a90aSFam Zheng     /* Total size of mapped qiov, accessed under dma_map_lock */
129bdd6a90aSFam Zheng     int dma_map_count;
130cc61b074SMax Reitz 
131cc61b074SMax Reitz     /* PCI address (required for nvme_refresh_filename()) */
132cc61b074SMax Reitz     char *device;
133f25e7ab2SPhilippe Mathieu-Daudé 
134f25e7ab2SPhilippe Mathieu-Daudé     struct {
135f25e7ab2SPhilippe Mathieu-Daudé         uint64_t completion_errors;
136f25e7ab2SPhilippe Mathieu-Daudé         uint64_t aligned_accesses;
137f25e7ab2SPhilippe Mathieu-Daudé         uint64_t unaligned_accesses;
138f25e7ab2SPhilippe Mathieu-Daudé     } stats;
139b75fd5f5SStefan Hajnoczi };
140bdd6a90aSFam Zheng 
141bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_DEVICE "device"
142bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_NAMESPACE "namespace"
143bdd6a90aSFam Zheng 
1447838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque);
1457838c67fSStefan Hajnoczi 
146bdd6a90aSFam Zheng static QemuOptsList runtime_opts = {
147bdd6a90aSFam Zheng     .name = "nvme",
148bdd6a90aSFam Zheng     .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
149bdd6a90aSFam Zheng     .desc = {
150bdd6a90aSFam Zheng         {
151bdd6a90aSFam Zheng             .name = NVME_BLOCK_OPT_DEVICE,
152bdd6a90aSFam Zheng             .type = QEMU_OPT_STRING,
153bdd6a90aSFam Zheng             .help = "NVMe PCI device address",
154bdd6a90aSFam Zheng         },
155bdd6a90aSFam Zheng         {
156bdd6a90aSFam Zheng             .name = NVME_BLOCK_OPT_NAMESPACE,
157bdd6a90aSFam Zheng             .type = QEMU_OPT_NUMBER,
158bdd6a90aSFam Zheng             .help = "NVMe namespace",
159bdd6a90aSFam Zheng         },
160bdd6a90aSFam Zheng         { /* end of list */ }
161bdd6a90aSFam Zheng     },
162bdd6a90aSFam Zheng };
163bdd6a90aSFam Zheng 
164dfa9c6c6SPhilippe Mathieu-Daudé /* Returns true on success, false on failure. */
165dfa9c6c6SPhilippe Mathieu-Daudé static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
1661b539bd6SPhilippe Mathieu-Daudé                             unsigned nentries, size_t entry_bytes, Error **errp)
167bdd6a90aSFam Zheng {
168bdd6a90aSFam Zheng     size_t bytes;
169bdd6a90aSFam Zheng     int r;
170bdd6a90aSFam Zheng 
1712387aaceSEric Auger     bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size);
172bdd6a90aSFam Zheng     q->head = q->tail = 0;
1732387aaceSEric Auger     q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes);
174bdd6a90aSFam Zheng     if (!q->queue) {
175bdd6a90aSFam Zheng         error_setg(errp, "Cannot allocate queue");
176dfa9c6c6SPhilippe Mathieu-Daudé         return false;
177bdd6a90aSFam Zheng     }
1782ed84693SPhilippe Mathieu-Daudé     memset(q->queue, 0, bytes);
179bdd6a90aSFam Zheng     r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
180bdd6a90aSFam Zheng     if (r) {
181bdd6a90aSFam Zheng         error_setg(errp, "Cannot map queue");
182dfa9c6c6SPhilippe Mathieu-Daudé         return false;
183bdd6a90aSFam Zheng     }
184dfa9c6c6SPhilippe Mathieu-Daudé     return true;
185bdd6a90aSFam Zheng }
186bdd6a90aSFam Zheng 
187b75fd5f5SStefan Hajnoczi static void nvme_free_queue_pair(NVMeQueuePair *q)
188bdd6a90aSFam Zheng {
1896e1e9ff2SPhilippe Mathieu-Daudé     trace_nvme_free_queue_pair(q->index, q);
1907838c67fSStefan Hajnoczi     if (q->completion_bh) {
1917838c67fSStefan Hajnoczi         qemu_bh_delete(q->completion_bh);
1927838c67fSStefan Hajnoczi     }
193bdd6a90aSFam Zheng     qemu_vfree(q->prp_list_pages);
194bdd6a90aSFam Zheng     qemu_vfree(q->sq.queue);
195bdd6a90aSFam Zheng     qemu_vfree(q->cq.queue);
196bdd6a90aSFam Zheng     qemu_mutex_destroy(&q->lock);
197bdd6a90aSFam Zheng     g_free(q);
198bdd6a90aSFam Zheng }
199bdd6a90aSFam Zheng 
200bdd6a90aSFam Zheng static void nvme_free_req_queue_cb(void *opaque)
201bdd6a90aSFam Zheng {
202bdd6a90aSFam Zheng     NVMeQueuePair *q = opaque;
203bdd6a90aSFam Zheng 
204bdd6a90aSFam Zheng     qemu_mutex_lock(&q->lock);
205bdd6a90aSFam Zheng     while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
206bdd6a90aSFam Zheng         /* Retry all pending requests */
207bdd6a90aSFam Zheng     }
208bdd6a90aSFam Zheng     qemu_mutex_unlock(&q->lock);
209bdd6a90aSFam Zheng }
210bdd6a90aSFam Zheng 
2110a28b02eSPhilippe Mathieu-Daudé static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
2120a28b02eSPhilippe Mathieu-Daudé                                              AioContext *aio_context,
2131b539bd6SPhilippe Mathieu-Daudé                                              unsigned idx, size_t size,
214bdd6a90aSFam Zheng                                              Error **errp)
215bdd6a90aSFam Zheng {
216bdd6a90aSFam Zheng     int i, r;
2170ea45f76SPhilippe Mathieu-Daudé     NVMeQueuePair *q;
218bdd6a90aSFam Zheng     uint64_t prp_list_iova;
219f8fd3ebaSEric Auger     size_t bytes;
220bdd6a90aSFam Zheng 
2210ea45f76SPhilippe Mathieu-Daudé     q = g_try_new0(NVMeQueuePair, 1);
2220ea45f76SPhilippe Mathieu-Daudé     if (!q) {
2230ea45f76SPhilippe Mathieu-Daudé         return NULL;
2240ea45f76SPhilippe Mathieu-Daudé     }
2256e1e9ff2SPhilippe Mathieu-Daudé     trace_nvme_create_queue_pair(idx, q, size, aio_context,
2266e1e9ff2SPhilippe Mathieu-Daudé                                  event_notifier_get_fd(s->irq_notifier));
227f8fd3ebaSEric Auger     bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
228f8fd3ebaSEric Auger                           qemu_real_host_page_size);
229f8fd3ebaSEric Auger     q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
2300ea45f76SPhilippe Mathieu-Daudé     if (!q->prp_list_pages) {
2310ea45f76SPhilippe Mathieu-Daudé         goto fail;
2320ea45f76SPhilippe Mathieu-Daudé     }
233f8fd3ebaSEric Auger     memset(q->prp_list_pages, 0, bytes);
234bdd6a90aSFam Zheng     qemu_mutex_init(&q->lock);
235b75fd5f5SStefan Hajnoczi     q->s = s;
236bdd6a90aSFam Zheng     q->index = idx;
237bdd6a90aSFam Zheng     qemu_co_queue_init(&q->free_req_queue);
2380a28b02eSPhilippe Mathieu-Daudé     q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
239f8fd3ebaSEric Auger     r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
240bdd6a90aSFam Zheng                           false, &prp_list_iova);
241bdd6a90aSFam Zheng     if (r) {
242bdd6a90aSFam Zheng         goto fail;
243bdd6a90aSFam Zheng     }
2441086e95dSStefan Hajnoczi     q->free_req_head = -1;
2451086e95dSStefan Hajnoczi     for (i = 0; i < NVME_NUM_REQS; i++) {
246bdd6a90aSFam Zheng         NVMeRequest *req = &q->reqs[i];
247bdd6a90aSFam Zheng         req->cid = i + 1;
2481086e95dSStefan Hajnoczi         req->free_req_next = q->free_req_head;
2491086e95dSStefan Hajnoczi         q->free_req_head = i;
250bdd6a90aSFam Zheng         req->prp_list_page = q->prp_list_pages + i * s->page_size;
251bdd6a90aSFam Zheng         req->prp_list_iova = prp_list_iova + i * s->page_size;
252bdd6a90aSFam Zheng     }
2531086e95dSStefan Hajnoczi 
254dfa9c6c6SPhilippe Mathieu-Daudé     if (!nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, errp)) {
255bdd6a90aSFam Zheng         goto fail;
256bdd6a90aSFam Zheng     }
257f6845323SPhilippe Mathieu-Daudé     q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail;
258bdd6a90aSFam Zheng 
259dfa9c6c6SPhilippe Mathieu-Daudé     if (!nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, errp)) {
260bdd6a90aSFam Zheng         goto fail;
261bdd6a90aSFam Zheng     }
262f6845323SPhilippe Mathieu-Daudé     q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head;
263bdd6a90aSFam Zheng 
264bdd6a90aSFam Zheng     return q;
265bdd6a90aSFam Zheng fail:
266b75fd5f5SStefan Hajnoczi     nvme_free_queue_pair(q);
267bdd6a90aSFam Zheng     return NULL;
268bdd6a90aSFam Zheng }
269bdd6a90aSFam Zheng 
270bdd6a90aSFam Zheng /* With q->lock */
271b75fd5f5SStefan Hajnoczi static void nvme_kick(NVMeQueuePair *q)
272bdd6a90aSFam Zheng {
273b75fd5f5SStefan Hajnoczi     BDRVNVMeState *s = q->s;
274b75fd5f5SStefan Hajnoczi 
275bdd6a90aSFam Zheng     if (s->plugged || !q->need_kick) {
276bdd6a90aSFam Zheng         return;
277bdd6a90aSFam Zheng     }
278bdd6a90aSFam Zheng     trace_nvme_kick(s, q->index);
279bdd6a90aSFam Zheng     assert(!(q->sq.tail & 0xFF00));
280bdd6a90aSFam Zheng     /* Fence the write to submission queue entry before notifying the device. */
281bdd6a90aSFam Zheng     smp_wmb();
282bdd6a90aSFam Zheng     *q->sq.doorbell = cpu_to_le32(q->sq.tail);
283bdd6a90aSFam Zheng     q->inflight += q->need_kick;
284bdd6a90aSFam Zheng     q->need_kick = 0;
285bdd6a90aSFam Zheng }
286bdd6a90aSFam Zheng 
287bdd6a90aSFam Zheng /* Find a free request element if any, otherwise:
288bdd6a90aSFam Zheng  * a) if in coroutine context, try to wait for one to become available;
289bdd6a90aSFam Zheng  * b) if not in coroutine, return NULL;
290bdd6a90aSFam Zheng  */
291bdd6a90aSFam Zheng static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
292bdd6a90aSFam Zheng {
2931086e95dSStefan Hajnoczi     NVMeRequest *req;
294bdd6a90aSFam Zheng 
295bdd6a90aSFam Zheng     qemu_mutex_lock(&q->lock);
2961086e95dSStefan Hajnoczi 
2971086e95dSStefan Hajnoczi     while (q->free_req_head == -1) {
298bdd6a90aSFam Zheng         if (qemu_in_coroutine()) {
29951e98b6dSPhilippe Mathieu-Daudé             trace_nvme_free_req_queue_wait(q->s, q->index);
300bdd6a90aSFam Zheng             qemu_co_queue_wait(&q->free_req_queue, &q->lock);
301bdd6a90aSFam Zheng         } else {
302bdd6a90aSFam Zheng             qemu_mutex_unlock(&q->lock);
303bdd6a90aSFam Zheng             return NULL;
304bdd6a90aSFam Zheng         }
305bdd6a90aSFam Zheng     }
3061086e95dSStefan Hajnoczi 
3071086e95dSStefan Hajnoczi     req = &q->reqs[q->free_req_head];
3081086e95dSStefan Hajnoczi     q->free_req_head = req->free_req_next;
3091086e95dSStefan Hajnoczi     req->free_req_next = -1;
3101086e95dSStefan Hajnoczi 
311bdd6a90aSFam Zheng     qemu_mutex_unlock(&q->lock);
312bdd6a90aSFam Zheng     return req;
313bdd6a90aSFam Zheng }
314bdd6a90aSFam Zheng 
3151086e95dSStefan Hajnoczi /* With q->lock */
3161086e95dSStefan Hajnoczi static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
3171086e95dSStefan Hajnoczi {
3181086e95dSStefan Hajnoczi     req->free_req_next = q->free_req_head;
3191086e95dSStefan Hajnoczi     q->free_req_head = req - q->reqs;
3201086e95dSStefan Hajnoczi }
3211086e95dSStefan Hajnoczi 
3221086e95dSStefan Hajnoczi /* With q->lock */
323b75fd5f5SStefan Hajnoczi static void nvme_wake_free_req_locked(NVMeQueuePair *q)
3241086e95dSStefan Hajnoczi {
3251086e95dSStefan Hajnoczi     if (!qemu_co_queue_empty(&q->free_req_queue)) {
326b75fd5f5SStefan Hajnoczi         replay_bh_schedule_oneshot_event(q->s->aio_context,
3271086e95dSStefan Hajnoczi                 nvme_free_req_queue_cb, q);
3281086e95dSStefan Hajnoczi     }
3291086e95dSStefan Hajnoczi }
3301086e95dSStefan Hajnoczi 
3311086e95dSStefan Hajnoczi /* Insert a request in the freelist and wake waiters */
332b75fd5f5SStefan Hajnoczi static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req)
3331086e95dSStefan Hajnoczi {
3341086e95dSStefan Hajnoczi     qemu_mutex_lock(&q->lock);
3351086e95dSStefan Hajnoczi     nvme_put_free_req_locked(q, req);
336b75fd5f5SStefan Hajnoczi     nvme_wake_free_req_locked(q);
3371086e95dSStefan Hajnoczi     qemu_mutex_unlock(&q->lock);
3381086e95dSStefan Hajnoczi }
3391086e95dSStefan Hajnoczi 
340bdd6a90aSFam Zheng static inline int nvme_translate_error(const NvmeCqe *c)
341bdd6a90aSFam Zheng {
342bdd6a90aSFam Zheng     uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
343bdd6a90aSFam Zheng     if (status) {
344bdd6a90aSFam Zheng         trace_nvme_error(le32_to_cpu(c->result),
345bdd6a90aSFam Zheng                          le16_to_cpu(c->sq_head),
346bdd6a90aSFam Zheng                          le16_to_cpu(c->sq_id),
347bdd6a90aSFam Zheng                          le16_to_cpu(c->cid),
348bdd6a90aSFam Zheng                          le16_to_cpu(status));
349bdd6a90aSFam Zheng     }
350bdd6a90aSFam Zheng     switch (status) {
351bdd6a90aSFam Zheng     case 0:
352bdd6a90aSFam Zheng         return 0;
353bdd6a90aSFam Zheng     case 1:
354bdd6a90aSFam Zheng         return -ENOSYS;
355bdd6a90aSFam Zheng     case 2:
356bdd6a90aSFam Zheng         return -EINVAL;
357bdd6a90aSFam Zheng     default:
358bdd6a90aSFam Zheng         return -EIO;
359bdd6a90aSFam Zheng     }
360bdd6a90aSFam Zheng }
361bdd6a90aSFam Zheng 
362bdd6a90aSFam Zheng /* With q->lock */
363b75fd5f5SStefan Hajnoczi static bool nvme_process_completion(NVMeQueuePair *q)
364bdd6a90aSFam Zheng {
365b75fd5f5SStefan Hajnoczi     BDRVNVMeState *s = q->s;
366bdd6a90aSFam Zheng     bool progress = false;
367bdd6a90aSFam Zheng     NVMeRequest *preq;
368bdd6a90aSFam Zheng     NVMeRequest req;
369bdd6a90aSFam Zheng     NvmeCqe *c;
370bdd6a90aSFam Zheng 
371bdd6a90aSFam Zheng     trace_nvme_process_completion(s, q->index, q->inflight);
3727838c67fSStefan Hajnoczi     if (s->plugged) {
3737838c67fSStefan Hajnoczi         trace_nvme_process_completion_queue_plugged(s, q->index);
374bdd6a90aSFam Zheng         return false;
375bdd6a90aSFam Zheng     }
3767838c67fSStefan Hajnoczi 
3777838c67fSStefan Hajnoczi     /*
3787838c67fSStefan Hajnoczi      * Support re-entrancy when a request cb() function invokes aio_poll().
3797838c67fSStefan Hajnoczi      * Pending completions must be visible to aio_poll() so that a cb()
3807838c67fSStefan Hajnoczi      * function can wait for the completion of another request.
3817838c67fSStefan Hajnoczi      *
3827838c67fSStefan Hajnoczi      * The aio_poll() loop will execute our BH and we'll resume completion
3837838c67fSStefan Hajnoczi      * processing there.
3847838c67fSStefan Hajnoczi      */
3857838c67fSStefan Hajnoczi     qemu_bh_schedule(q->completion_bh);
3867838c67fSStefan Hajnoczi 
387bdd6a90aSFam Zheng     assert(q->inflight >= 0);
388bdd6a90aSFam Zheng     while (q->inflight) {
38904b3fb39SStefan Hajnoczi         int ret;
390bdd6a90aSFam Zheng         int16_t cid;
39104b3fb39SStefan Hajnoczi 
392bdd6a90aSFam Zheng         c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
393258867d1SMaxim Levitsky         if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
394bdd6a90aSFam Zheng             break;
395bdd6a90aSFam Zheng         }
39604b3fb39SStefan Hajnoczi         ret = nvme_translate_error(c);
397f25e7ab2SPhilippe Mathieu-Daudé         if (ret) {
398f25e7ab2SPhilippe Mathieu-Daudé             s->stats.completion_errors++;
399f25e7ab2SPhilippe Mathieu-Daudé         }
400bdd6a90aSFam Zheng         q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
401bdd6a90aSFam Zheng         if (!q->cq.head) {
402bdd6a90aSFam Zheng             q->cq_phase = !q->cq_phase;
403bdd6a90aSFam Zheng         }
404bdd6a90aSFam Zheng         cid = le16_to_cpu(c->cid);
405bdd6a90aSFam Zheng         if (cid == 0 || cid > NVME_QUEUE_SIZE) {
40658ad6ae0SPhilippe Mathieu-Daudé             warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32", "
40758ad6ae0SPhilippe Mathieu-Daudé                         "queue size: %u", cid, NVME_QUEUE_SIZE);
408bdd6a90aSFam Zheng             continue;
409bdd6a90aSFam Zheng         }
410bdd6a90aSFam Zheng         trace_nvme_complete_command(s, q->index, cid);
411bdd6a90aSFam Zheng         preq = &q->reqs[cid - 1];
412bdd6a90aSFam Zheng         req = *preq;
413bdd6a90aSFam Zheng         assert(req.cid == cid);
414bdd6a90aSFam Zheng         assert(req.cb);
4151086e95dSStefan Hajnoczi         nvme_put_free_req_locked(q, preq);
416bdd6a90aSFam Zheng         preq->cb = preq->opaque = NULL;
4177838c67fSStefan Hajnoczi         q->inflight--;
418bdd6a90aSFam Zheng         qemu_mutex_unlock(&q->lock);
41904b3fb39SStefan Hajnoczi         req.cb(req.opaque, ret);
420bdd6a90aSFam Zheng         qemu_mutex_lock(&q->lock);
421bdd6a90aSFam Zheng         progress = true;
422bdd6a90aSFam Zheng     }
423bdd6a90aSFam Zheng     if (progress) {
424bdd6a90aSFam Zheng         /* Notify the device so it can post more completions. */
425bdd6a90aSFam Zheng         smp_mb_release();
426bdd6a90aSFam Zheng         *q->cq.doorbell = cpu_to_le32(q->cq.head);
427b75fd5f5SStefan Hajnoczi         nvme_wake_free_req_locked(q);
428bdd6a90aSFam Zheng     }
4297838c67fSStefan Hajnoczi 
4307838c67fSStefan Hajnoczi     qemu_bh_cancel(q->completion_bh);
4317838c67fSStefan Hajnoczi 
432bdd6a90aSFam Zheng     return progress;
433bdd6a90aSFam Zheng }
434bdd6a90aSFam Zheng 
4357838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque)
4367838c67fSStefan Hajnoczi {
4377838c67fSStefan Hajnoczi     NVMeQueuePair *q = opaque;
4387838c67fSStefan Hajnoczi 
4397838c67fSStefan Hajnoczi     /*
4407838c67fSStefan Hajnoczi      * We're being invoked because a nvme_process_completion() cb() function
4417838c67fSStefan Hajnoczi      * called aio_poll(). The callback may be waiting for further completions
4427838c67fSStefan Hajnoczi      * so notify the device that it has space to fill in more completions now.
4437838c67fSStefan Hajnoczi      */
4447838c67fSStefan Hajnoczi     smp_mb_release();
4457838c67fSStefan Hajnoczi     *q->cq.doorbell = cpu_to_le32(q->cq.head);
4467838c67fSStefan Hajnoczi     nvme_wake_free_req_locked(q);
4477838c67fSStefan Hajnoczi 
4487838c67fSStefan Hajnoczi     nvme_process_completion(q);
4497838c67fSStefan Hajnoczi }
4507838c67fSStefan Hajnoczi 
451bdd6a90aSFam Zheng static void nvme_trace_command(const NvmeCmd *cmd)
452bdd6a90aSFam Zheng {
453bdd6a90aSFam Zheng     int i;
454bdd6a90aSFam Zheng 
455e266f52cSPhilippe Mathieu-Daudé     if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW)) {
456e266f52cSPhilippe Mathieu-Daudé         return;
457e266f52cSPhilippe Mathieu-Daudé     }
458bdd6a90aSFam Zheng     for (i = 0; i < 8; ++i) {
459bdd6a90aSFam Zheng         uint8_t *cmdp = (uint8_t *)cmd + i * 8;
460bdd6a90aSFam Zheng         trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
461bdd6a90aSFam Zheng                                       cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
462bdd6a90aSFam Zheng     }
463bdd6a90aSFam Zheng }
464bdd6a90aSFam Zheng 
465b75fd5f5SStefan Hajnoczi static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req,
466bdd6a90aSFam Zheng                                 NvmeCmd *cmd, BlockCompletionFunc cb,
467bdd6a90aSFam Zheng                                 void *opaque)
468bdd6a90aSFam Zheng {
469bdd6a90aSFam Zheng     assert(!req->cb);
470bdd6a90aSFam Zheng     req->cb = cb;
471bdd6a90aSFam Zheng     req->opaque = opaque;
472a0546a7bSPhilippe Mathieu-Daudé     cmd->cid = cpu_to_le16(req->cid);
473bdd6a90aSFam Zheng 
474b75fd5f5SStefan Hajnoczi     trace_nvme_submit_command(q->s, q->index, req->cid);
475bdd6a90aSFam Zheng     nvme_trace_command(cmd);
476bdd6a90aSFam Zheng     qemu_mutex_lock(&q->lock);
477bdd6a90aSFam Zheng     memcpy((uint8_t *)q->sq.queue +
478bdd6a90aSFam Zheng            q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
479bdd6a90aSFam Zheng     q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
480bdd6a90aSFam Zheng     q->need_kick++;
481b75fd5f5SStefan Hajnoczi     nvme_kick(q);
482b75fd5f5SStefan Hajnoczi     nvme_process_completion(q);
483bdd6a90aSFam Zheng     qemu_mutex_unlock(&q->lock);
484bdd6a90aSFam Zheng }
485bdd6a90aSFam Zheng 
48608d54067SPhilippe Mathieu-Daudé static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
487bdd6a90aSFam Zheng {
488bdd6a90aSFam Zheng     int *pret = opaque;
489bdd6a90aSFam Zheng     *pret = ret;
4904720cbeeSKevin Wolf     aio_wait_kick();
491bdd6a90aSFam Zheng }
492bdd6a90aSFam Zheng 
49308d54067SPhilippe Mathieu-Daudé static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
494bdd6a90aSFam Zheng {
49508d54067SPhilippe Mathieu-Daudé     BDRVNVMeState *s = bs->opaque;
49608d54067SPhilippe Mathieu-Daudé     NVMeQueuePair *q = s->queues[INDEX_ADMIN];
497073a0697SPhilippe Mathieu-Daudé     AioContext *aio_context = bdrv_get_aio_context(bs);
498bdd6a90aSFam Zheng     NVMeRequest *req;
499bdd6a90aSFam Zheng     int ret = -EINPROGRESS;
500bdd6a90aSFam Zheng     req = nvme_get_free_req(q);
501bdd6a90aSFam Zheng     if (!req) {
502bdd6a90aSFam Zheng         return -EBUSY;
503bdd6a90aSFam Zheng     }
50408d54067SPhilippe Mathieu-Daudé     nvme_submit_command(q, req, cmd, nvme_admin_cmd_sync_cb, &ret);
505bdd6a90aSFam Zheng 
506073a0697SPhilippe Mathieu-Daudé     AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS);
507bdd6a90aSFam Zheng     return ret;
508bdd6a90aSFam Zheng }
509bdd6a90aSFam Zheng 
5107a5f00ddSPhilippe Mathieu-Daudé /* Returns true on success, false on failure. */
5117a5f00ddSPhilippe Mathieu-Daudé static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
512bdd6a90aSFam Zheng {
513bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
5147a5f00ddSPhilippe Mathieu-Daudé     bool ret = false;
5157d3b214aSPhilippe Mathieu-Daudé     union {
5167d3b214aSPhilippe Mathieu-Daudé         NvmeIdCtrl ctrl;
5177d3b214aSPhilippe Mathieu-Daudé         NvmeIdNs ns;
5187d3b214aSPhilippe Mathieu-Daudé     } *id;
519118d1b6aSMaxim Levitsky     NvmeLBAF *lbaf;
520e0dd95e3SMaxim Levitsky     uint16_t oncs;
5211120407bSMax Reitz     int r;
522bdd6a90aSFam Zheng     uint64_t iova;
523bdd6a90aSFam Zheng     NvmeCmd cmd = {
524bdd6a90aSFam Zheng         .opcode = NVME_ADM_CMD_IDENTIFY,
525bdd6a90aSFam Zheng         .cdw10 = cpu_to_le32(0x1),
526bdd6a90aSFam Zheng     };
5270aecd060SEric Auger     size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size);
528bdd6a90aSFam Zheng 
5290aecd060SEric Auger     id = qemu_try_memalign(qemu_real_host_page_size, id_size);
5304d980939SPhilippe Mathieu-Daudé     if (!id) {
531bdd6a90aSFam Zheng         error_setg(errp, "Cannot allocate buffer for identify response");
532bdd6a90aSFam Zheng         goto out;
533bdd6a90aSFam Zheng     }
5340aecd060SEric Auger     r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova);
535bdd6a90aSFam Zheng     if (r) {
536bdd6a90aSFam Zheng         error_setg(errp, "Cannot map buffer for DMA");
537bdd6a90aSFam Zheng         goto out;
538bdd6a90aSFam Zheng     }
539bdd6a90aSFam Zheng 
5400aecd060SEric Auger     memset(id, 0, id_size);
5412ed84693SPhilippe Mathieu-Daudé     cmd.dptr.prp1 = cpu_to_le64(iova);
54208d54067SPhilippe Mathieu-Daudé     if (nvme_admin_cmd_sync(bs, &cmd)) {
543bdd6a90aSFam Zheng         error_setg(errp, "Failed to identify controller");
544bdd6a90aSFam Zheng         goto out;
545bdd6a90aSFam Zheng     }
546bdd6a90aSFam Zheng 
5477d3b214aSPhilippe Mathieu-Daudé     if (le32_to_cpu(id->ctrl.nn) < namespace) {
548bdd6a90aSFam Zheng         error_setg(errp, "Invalid namespace");
549bdd6a90aSFam Zheng         goto out;
550bdd6a90aSFam Zheng     }
5517d3b214aSPhilippe Mathieu-Daudé     s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1;
5527d3b214aSPhilippe Mathieu-Daudé     s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size;
553bdd6a90aSFam Zheng     /* For now the page list buffer per command is one page, to hold at most
554bdd6a90aSFam Zheng      * s->page_size / sizeof(uint64_t) entries. */
555bdd6a90aSFam Zheng     s->max_transfer = MIN_NON_ZERO(s->max_transfer,
556bdd6a90aSFam Zheng                           s->page_size / sizeof(uint64_t) * s->page_size);
557bdd6a90aSFam Zheng 
5587d3b214aSPhilippe Mathieu-Daudé     oncs = le16_to_cpu(id->ctrl.oncs);
55969265150SKlaus Jensen     s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES);
560e87a09d6SMaxim Levitsky     s->supports_discard = !!(oncs & NVME_ONCS_DSM);
561e0dd95e3SMaxim Levitsky 
5620aecd060SEric Auger     memset(id, 0, id_size);
563bdd6a90aSFam Zheng     cmd.cdw10 = 0;
564bdd6a90aSFam Zheng     cmd.nsid = cpu_to_le32(namespace);
56508d54067SPhilippe Mathieu-Daudé     if (nvme_admin_cmd_sync(bs, &cmd)) {
566bdd6a90aSFam Zheng         error_setg(errp, "Failed to identify namespace");
567bdd6a90aSFam Zheng         goto out;
568bdd6a90aSFam Zheng     }
569bdd6a90aSFam Zheng 
5707d3b214aSPhilippe Mathieu-Daudé     s->nsze = le64_to_cpu(id->ns.nsze);
5717d3b214aSPhilippe Mathieu-Daudé     lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)];
572bdd6a90aSFam Zheng 
5737d3b214aSPhilippe Mathieu-Daudé     if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) &&
5747d3b214aSPhilippe Mathieu-Daudé             NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) ==
575e0dd95e3SMaxim Levitsky                     NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) {
576e0dd95e3SMaxim Levitsky         bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP;
577e0dd95e3SMaxim Levitsky     }
578e0dd95e3SMaxim Levitsky 
579118d1b6aSMaxim Levitsky     if (lbaf->ms) {
580118d1b6aSMaxim Levitsky         error_setg(errp, "Namespaces with metadata are not yet supported");
581118d1b6aSMaxim Levitsky         goto out;
582118d1b6aSMaxim Levitsky     }
583118d1b6aSMaxim Levitsky 
5841120407bSMax Reitz     if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 ||
5851120407bSMax Reitz         (1 << lbaf->ds) > s->page_size)
5861120407bSMax Reitz     {
5871120407bSMax Reitz         error_setg(errp, "Namespace has unsupported block size (2^%d)",
5881120407bSMax Reitz                    lbaf->ds);
589118d1b6aSMaxim Levitsky         goto out;
590118d1b6aSMaxim Levitsky     }
591118d1b6aSMaxim Levitsky 
5927a5f00ddSPhilippe Mathieu-Daudé     ret = true;
593118d1b6aSMaxim Levitsky     s->blkshift = lbaf->ds;
594bdd6a90aSFam Zheng out:
5954d980939SPhilippe Mathieu-Daudé     qemu_vfio_dma_unmap(s->vfio, id);
5964d980939SPhilippe Mathieu-Daudé     qemu_vfree(id);
5977a5f00ddSPhilippe Mathieu-Daudé 
5987a5f00ddSPhilippe Mathieu-Daudé     return ret;
599bdd6a90aSFam Zheng }
600bdd6a90aSFam Zheng 
6017a1fb2efSPhilippe Mathieu-Daudé static bool nvme_poll_queue(NVMeQueuePair *q)
602bdd6a90aSFam Zheng {
603bdd6a90aSFam Zheng     bool progress = false;
604bdd6a90aSFam Zheng 
6052446e0e2SStefan Hajnoczi     const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
6062446e0e2SStefan Hajnoczi     NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
6072446e0e2SStefan Hajnoczi 
6081c914cd1SPhilippe Mathieu-Daudé     trace_nvme_poll_queue(q->s, q->index);
6092446e0e2SStefan Hajnoczi     /*
6102446e0e2SStefan Hajnoczi      * Do an early check for completions. q->lock isn't needed because
6112446e0e2SStefan Hajnoczi      * nvme_process_completion() only runs in the event loop thread and
6122446e0e2SStefan Hajnoczi      * cannot race with itself.
6132446e0e2SStefan Hajnoczi      */
6142446e0e2SStefan Hajnoczi     if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
6157a1fb2efSPhilippe Mathieu-Daudé         return false;
6162446e0e2SStefan Hajnoczi     }
6172446e0e2SStefan Hajnoczi 
618bdd6a90aSFam Zheng     qemu_mutex_lock(&q->lock);
619b75fd5f5SStefan Hajnoczi     while (nvme_process_completion(q)) {
620bdd6a90aSFam Zheng         /* Keep polling */
621bdd6a90aSFam Zheng         progress = true;
622bdd6a90aSFam Zheng     }
623bdd6a90aSFam Zheng     qemu_mutex_unlock(&q->lock);
6247a1fb2efSPhilippe Mathieu-Daudé 
6257a1fb2efSPhilippe Mathieu-Daudé     return progress;
6267a1fb2efSPhilippe Mathieu-Daudé }
6277a1fb2efSPhilippe Mathieu-Daudé 
6287a1fb2efSPhilippe Mathieu-Daudé static bool nvme_poll_queues(BDRVNVMeState *s)
6297a1fb2efSPhilippe Mathieu-Daudé {
6307a1fb2efSPhilippe Mathieu-Daudé     bool progress = false;
6317a1fb2efSPhilippe Mathieu-Daudé     int i;
6327a1fb2efSPhilippe Mathieu-Daudé 
6331b539bd6SPhilippe Mathieu-Daudé     for (i = 0; i < s->queue_count; i++) {
6347a1fb2efSPhilippe Mathieu-Daudé         if (nvme_poll_queue(s->queues[i])) {
6357a1fb2efSPhilippe Mathieu-Daudé             progress = true;
6367a1fb2efSPhilippe Mathieu-Daudé         }
637bdd6a90aSFam Zheng     }
638bdd6a90aSFam Zheng     return progress;
639bdd6a90aSFam Zheng }
640bdd6a90aSFam Zheng 
641bdd6a90aSFam Zheng static void nvme_handle_event(EventNotifier *n)
642bdd6a90aSFam Zheng {
643b111b3fcSPhilippe Mathieu-Daudé     BDRVNVMeState *s = container_of(n, BDRVNVMeState,
644b111b3fcSPhilippe Mathieu-Daudé                                     irq_notifier[MSIX_SHARED_IRQ_IDX]);
645bdd6a90aSFam Zheng 
646bdd6a90aSFam Zheng     trace_nvme_handle_event(s);
647bdd6a90aSFam Zheng     event_notifier_test_and_clear(n);
648bdd6a90aSFam Zheng     nvme_poll_queues(s);
649bdd6a90aSFam Zheng }
650bdd6a90aSFam Zheng 
651bdd6a90aSFam Zheng static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
652bdd6a90aSFam Zheng {
653bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
6541b539bd6SPhilippe Mathieu-Daudé     unsigned n = s->queue_count;
655bdd6a90aSFam Zheng     NVMeQueuePair *q;
656bdd6a90aSFam Zheng     NvmeCmd cmd;
6571b539bd6SPhilippe Mathieu-Daudé     unsigned queue_size = NVME_QUEUE_SIZE;
658bdd6a90aSFam Zheng 
65976a24781SPhilippe Mathieu-Daudé     assert(n <= UINT16_MAX);
6600a28b02eSPhilippe Mathieu-Daudé     q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
6610a28b02eSPhilippe Mathieu-Daudé                                n, queue_size, errp);
662bdd6a90aSFam Zheng     if (!q) {
663bdd6a90aSFam Zheng         return false;
664bdd6a90aSFam Zheng     }
665bdd6a90aSFam Zheng     cmd = (NvmeCmd) {
666bdd6a90aSFam Zheng         .opcode = NVME_ADM_CMD_CREATE_CQ,
667c26f2173SKlaus Jensen         .dptr.prp1 = cpu_to_le64(q->cq.iova),
66876a24781SPhilippe Mathieu-Daudé         .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
66976a24781SPhilippe Mathieu-Daudé         .cdw11 = cpu_to_le32(NVME_CQ_IEN | NVME_CQ_PC),
670bdd6a90aSFam Zheng     };
67108d54067SPhilippe Mathieu-Daudé     if (nvme_admin_cmd_sync(bs, &cmd)) {
6721b539bd6SPhilippe Mathieu-Daudé         error_setg(errp, "Failed to create CQ io queue [%u]", n);
673c8edbfb2SPhilippe Mathieu-Daudé         goto out_error;
674bdd6a90aSFam Zheng     }
675bdd6a90aSFam Zheng     cmd = (NvmeCmd) {
676bdd6a90aSFam Zheng         .opcode = NVME_ADM_CMD_CREATE_SQ,
677c26f2173SKlaus Jensen         .dptr.prp1 = cpu_to_le64(q->sq.iova),
67876a24781SPhilippe Mathieu-Daudé         .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | n),
67976a24781SPhilippe Mathieu-Daudé         .cdw11 = cpu_to_le32(NVME_SQ_PC | (n << 16)),
680bdd6a90aSFam Zheng     };
68108d54067SPhilippe Mathieu-Daudé     if (nvme_admin_cmd_sync(bs, &cmd)) {
6821b539bd6SPhilippe Mathieu-Daudé         error_setg(errp, "Failed to create SQ io queue [%u]", n);
683c8edbfb2SPhilippe Mathieu-Daudé         goto out_error;
684bdd6a90aSFam Zheng     }
685bdd6a90aSFam Zheng     s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
686bdd6a90aSFam Zheng     s->queues[n] = q;
6871b539bd6SPhilippe Mathieu-Daudé     s->queue_count++;
688bdd6a90aSFam Zheng     return true;
689c8edbfb2SPhilippe Mathieu-Daudé out_error:
690c8edbfb2SPhilippe Mathieu-Daudé     nvme_free_queue_pair(q);
691c8edbfb2SPhilippe Mathieu-Daudé     return false;
692bdd6a90aSFam Zheng }
693bdd6a90aSFam Zheng 
694bdd6a90aSFam Zheng static bool nvme_poll_cb(void *opaque)
695bdd6a90aSFam Zheng {
696bdd6a90aSFam Zheng     EventNotifier *e = opaque;
697b111b3fcSPhilippe Mathieu-Daudé     BDRVNVMeState *s = container_of(e, BDRVNVMeState,
698b111b3fcSPhilippe Mathieu-Daudé                                     irq_notifier[MSIX_SHARED_IRQ_IDX]);
699bdd6a90aSFam Zheng 
700b3ac2b94SSimran Singhal     return nvme_poll_queues(s);
701bdd6a90aSFam Zheng }
702bdd6a90aSFam Zheng 
703bdd6a90aSFam Zheng static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
704bdd6a90aSFam Zheng                      Error **errp)
705bdd6a90aSFam Zheng {
706bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
70752b75ea8SPhilippe Mathieu-Daudé     NVMeQueuePair *q;
7080a28b02eSPhilippe Mathieu-Daudé     AioContext *aio_context = bdrv_get_aio_context(bs);
709bdd6a90aSFam Zheng     int ret;
710bdd6a90aSFam Zheng     uint64_t cap;
711fcc8672aSPhilippe Mathieu-Daudé     uint32_t ver;
712bdd6a90aSFam Zheng     uint64_t timeout_ms;
713bdd6a90aSFam Zheng     uint64_t deadline, now;
7149406e0d9SPhilippe Mathieu-Daudé     volatile NvmeBar *regs = NULL;
715bdd6a90aSFam Zheng 
716bdd6a90aSFam Zheng     qemu_co_mutex_init(&s->dma_map_lock);
717bdd6a90aSFam Zheng     qemu_co_queue_init(&s->dma_flush_queue);
718cc61b074SMax Reitz     s->device = g_strdup(device);
719bdd6a90aSFam Zheng     s->nsid = namespace;
720bdd6a90aSFam Zheng     s->aio_context = bdrv_get_aio_context(bs);
721b111b3fcSPhilippe Mathieu-Daudé     ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
722bdd6a90aSFam Zheng     if (ret) {
723bdd6a90aSFam Zheng         error_setg(errp, "Failed to init event notifier");
724bdd6a90aSFam Zheng         return ret;
725bdd6a90aSFam Zheng     }
726bdd6a90aSFam Zheng 
727bdd6a90aSFam Zheng     s->vfio = qemu_vfio_open_pci(device, errp);
728bdd6a90aSFam Zheng     if (!s->vfio) {
729bdd6a90aSFam Zheng         ret = -EINVAL;
7309582f357SFam Zheng         goto out;
731bdd6a90aSFam Zheng     }
732bdd6a90aSFam Zheng 
73337d7a45aSPhilippe Mathieu-Daudé     regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
734b02c01a5SPhilippe Mathieu-Daudé                                  PROT_READ | PROT_WRITE, errp);
73537d7a45aSPhilippe Mathieu-Daudé     if (!regs) {
736bdd6a90aSFam Zheng         ret = -EINVAL;
7379582f357SFam Zheng         goto out;
738bdd6a90aSFam Zheng     }
739bdd6a90aSFam Zheng     /* Perform initialize sequence as described in NVMe spec "7.6.1
740bdd6a90aSFam Zheng      * Initialization". */
741bdd6a90aSFam Zheng 
7429406e0d9SPhilippe Mathieu-Daudé     cap = le64_to_cpu(regs->cap);
74315b2260bSPhilippe Mathieu-Daudé     trace_nvme_controller_capability_raw(cap);
74415b2260bSPhilippe Mathieu-Daudé     trace_nvme_controller_capability("Maximum Queue Entries Supported",
74515b2260bSPhilippe Mathieu-Daudé                                      1 + NVME_CAP_MQES(cap));
74615b2260bSPhilippe Mathieu-Daudé     trace_nvme_controller_capability("Contiguous Queues Required",
74715b2260bSPhilippe Mathieu-Daudé                                      NVME_CAP_CQR(cap));
74815b2260bSPhilippe Mathieu-Daudé     trace_nvme_controller_capability("Doorbell Stride",
74997b709f3SPhilippe Mathieu-Daudé                                      1 << (2 + NVME_CAP_DSTRD(cap)));
75015b2260bSPhilippe Mathieu-Daudé     trace_nvme_controller_capability("Subsystem Reset Supported",
75115b2260bSPhilippe Mathieu-Daudé                                      NVME_CAP_NSSRS(cap));
75215b2260bSPhilippe Mathieu-Daudé     trace_nvme_controller_capability("Memory Page Size Minimum",
75315b2260bSPhilippe Mathieu-Daudé                                      1 << (12 + NVME_CAP_MPSMIN(cap)));
75415b2260bSPhilippe Mathieu-Daudé     trace_nvme_controller_capability("Memory Page Size Maximum",
75515b2260bSPhilippe Mathieu-Daudé                                      1 << (12 + NVME_CAP_MPSMAX(cap)));
756fad1eb68SPhilippe Mathieu-Daudé     if (!NVME_CAP_CSS(cap)) {
757bdd6a90aSFam Zheng         error_setg(errp, "Device doesn't support NVMe command set");
758bdd6a90aSFam Zheng         ret = -EINVAL;
7599582f357SFam Zheng         goto out;
760bdd6a90aSFam Zheng     }
761bdd6a90aSFam Zheng 
762a652a3ecSPhilippe Mathieu-Daudé     s->page_size = 1u << (12 + NVME_CAP_MPSMIN(cap));
763fad1eb68SPhilippe Mathieu-Daudé     s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t);
764bdd6a90aSFam Zheng     bs->bl.opt_mem_alignment = s->page_size;
765c8228ac3SPhilippe Mathieu-Daudé     bs->bl.request_alignment = s->page_size;
766fad1eb68SPhilippe Mathieu-Daudé     timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
767bdd6a90aSFam Zheng 
768fcc8672aSPhilippe Mathieu-Daudé     ver = le32_to_cpu(regs->vs);
769fcc8672aSPhilippe Mathieu-Daudé     trace_nvme_controller_spec_version(extract32(ver, 16, 16),
770fcc8672aSPhilippe Mathieu-Daudé                                        extract32(ver, 8, 8),
771fcc8672aSPhilippe Mathieu-Daudé                                        extract32(ver, 0, 8));
772fcc8672aSPhilippe Mathieu-Daudé 
773bdd6a90aSFam Zheng     /* Reset device to get a clean state. */
7749406e0d9SPhilippe Mathieu-Daudé     regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
775bdd6a90aSFam Zheng     /* Wait for CSTS.RDY = 0. */
776e4f310feSPhilippe Mathieu-Daudé     deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
777fad1eb68SPhilippe Mathieu-Daudé     while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
778bdd6a90aSFam Zheng         if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
779bdd6a90aSFam Zheng             error_setg(errp, "Timeout while waiting for device to reset (%"
780bdd6a90aSFam Zheng                              PRId64 " ms)",
781bdd6a90aSFam Zheng                        timeout_ms);
782bdd6a90aSFam Zheng             ret = -ETIMEDOUT;
7839582f357SFam Zheng             goto out;
784bdd6a90aSFam Zheng         }
785bdd6a90aSFam Zheng     }
786bdd6a90aSFam Zheng 
7874b19e9b8SPhilippe Mathieu-Daudé     s->bar0_wo_map = qemu_vfio_pci_map_bar(s->vfio, 0, 0,
7884b19e9b8SPhilippe Mathieu-Daudé                                            sizeof(NvmeBar) + NVME_DOORBELL_SIZE,
7894b19e9b8SPhilippe Mathieu-Daudé                                            PROT_WRITE, errp);
7904b19e9b8SPhilippe Mathieu-Daudé     s->doorbells = (void *)((uintptr_t)s->bar0_wo_map + sizeof(NvmeBar));
791f6845323SPhilippe Mathieu-Daudé     if (!s->doorbells) {
792f6845323SPhilippe Mathieu-Daudé         ret = -EINVAL;
793f6845323SPhilippe Mathieu-Daudé         goto out;
794f6845323SPhilippe Mathieu-Daudé     }
795f6845323SPhilippe Mathieu-Daudé 
796bdd6a90aSFam Zheng     /* Set up admin queue. */
797bdd6a90aSFam Zheng     s->queues = g_new(NVMeQueuePair *, 1);
79852b75ea8SPhilippe Mathieu-Daudé     q = nvme_create_queue_pair(s, aio_context, 0, NVME_QUEUE_SIZE, errp);
79952b75ea8SPhilippe Mathieu-Daudé     if (!q) {
800bdd6a90aSFam Zheng         ret = -EINVAL;
8019582f357SFam Zheng         goto out;
802bdd6a90aSFam Zheng     }
80352b75ea8SPhilippe Mathieu-Daudé     s->queues[INDEX_ADMIN] = q;
8041b539bd6SPhilippe Mathieu-Daudé     s->queue_count = 1;
8053c363c07SPhilippe Mathieu-Daudé     QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
8063c363c07SPhilippe Mathieu-Daudé     regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
8073c363c07SPhilippe Mathieu-Daudé                             ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
80852b75ea8SPhilippe Mathieu-Daudé     regs->asq = cpu_to_le64(q->sq.iova);
80952b75ea8SPhilippe Mathieu-Daudé     regs->acq = cpu_to_le64(q->cq.iova);
810bdd6a90aSFam Zheng 
811bdd6a90aSFam Zheng     /* After setting up all control registers we can enable device now. */
812fad1eb68SPhilippe Mathieu-Daudé     regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
813fad1eb68SPhilippe Mathieu-Daudé                            (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
814fad1eb68SPhilippe Mathieu-Daudé                            CC_EN_MASK);
815bdd6a90aSFam Zheng     /* Wait for CSTS.RDY = 1. */
816bdd6a90aSFam Zheng     now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
817eefffb02SPhilippe Mathieu-Daudé     deadline = now + timeout_ms * SCALE_MS;
818fad1eb68SPhilippe Mathieu-Daudé     while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
819bdd6a90aSFam Zheng         if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
820bdd6a90aSFam Zheng             error_setg(errp, "Timeout while waiting for device to start (%"
821bdd6a90aSFam Zheng                              PRId64 " ms)",
822bdd6a90aSFam Zheng                        timeout_ms);
823bdd6a90aSFam Zheng             ret = -ETIMEDOUT;
8249582f357SFam Zheng             goto out;
825bdd6a90aSFam Zheng         }
826bdd6a90aSFam Zheng     }
827bdd6a90aSFam Zheng 
828b111b3fcSPhilippe Mathieu-Daudé     ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
829bdd6a90aSFam Zheng                                  VFIO_PCI_MSIX_IRQ_INDEX, errp);
830bdd6a90aSFam Zheng     if (ret) {
8319582f357SFam Zheng         goto out;
832bdd6a90aSFam Zheng     }
833b111b3fcSPhilippe Mathieu-Daudé     aio_set_event_notifier(bdrv_get_aio_context(bs),
834b111b3fcSPhilippe Mathieu-Daudé                            &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
835bdd6a90aSFam Zheng                            false, nvme_handle_event, nvme_poll_cb);
836bdd6a90aSFam Zheng 
8377a5f00ddSPhilippe Mathieu-Daudé     if (!nvme_identify(bs, namespace, errp)) {
838bdd6a90aSFam Zheng         ret = -EIO;
8399582f357SFam Zheng         goto out;
840bdd6a90aSFam Zheng     }
841bdd6a90aSFam Zheng 
842bdd6a90aSFam Zheng     /* Set up command queues. */
843bdd6a90aSFam Zheng     if (!nvme_add_io_queue(bs, errp)) {
844bdd6a90aSFam Zheng         ret = -EIO;
845bdd6a90aSFam Zheng     }
8469582f357SFam Zheng out:
84737d7a45aSPhilippe Mathieu-Daudé     if (regs) {
84837d7a45aSPhilippe Mathieu-Daudé         qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar));
84937d7a45aSPhilippe Mathieu-Daudé     }
85037d7a45aSPhilippe Mathieu-Daudé 
8519582f357SFam Zheng     /* Cleaning up is done in nvme_file_open() upon error. */
852bdd6a90aSFam Zheng     return ret;
853bdd6a90aSFam Zheng }
854bdd6a90aSFam Zheng 
855bdd6a90aSFam Zheng /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
856bdd6a90aSFam Zheng  *
857bdd6a90aSFam Zheng  *     nvme://0000:44:00.0/1
858bdd6a90aSFam Zheng  *
859bdd6a90aSFam Zheng  * where the "nvme://" is a fixed form of the protocol prefix, the middle part
860bdd6a90aSFam Zheng  * is the PCI address, and the last part is the namespace number starting from
861bdd6a90aSFam Zheng  * 1 according to the NVMe spec. */
862bdd6a90aSFam Zheng static void nvme_parse_filename(const char *filename, QDict *options,
863bdd6a90aSFam Zheng                                 Error **errp)
864bdd6a90aSFam Zheng {
865bdd6a90aSFam Zheng     int pref = strlen("nvme://");
866bdd6a90aSFam Zheng 
867bdd6a90aSFam Zheng     if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
868bdd6a90aSFam Zheng         const char *tmp = filename + pref;
869bdd6a90aSFam Zheng         char *device;
870bdd6a90aSFam Zheng         const char *namespace;
871bdd6a90aSFam Zheng         unsigned long ns;
872bdd6a90aSFam Zheng         const char *slash = strchr(tmp, '/');
873bdd6a90aSFam Zheng         if (!slash) {
874625eaca9SLaurent Vivier             qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp);
875bdd6a90aSFam Zheng             return;
876bdd6a90aSFam Zheng         }
877bdd6a90aSFam Zheng         device = g_strndup(tmp, slash - tmp);
878625eaca9SLaurent Vivier         qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device);
879bdd6a90aSFam Zheng         g_free(device);
880bdd6a90aSFam Zheng         namespace = slash + 1;
881bdd6a90aSFam Zheng         if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
882bdd6a90aSFam Zheng             error_setg(errp, "Invalid namespace '%s', positive number expected",
883bdd6a90aSFam Zheng                        namespace);
884bdd6a90aSFam Zheng             return;
885bdd6a90aSFam Zheng         }
886625eaca9SLaurent Vivier         qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE,
887625eaca9SLaurent Vivier                       *namespace ? namespace : "1");
888bdd6a90aSFam Zheng     }
889bdd6a90aSFam Zheng }
890bdd6a90aSFam Zheng 
891bdd6a90aSFam Zheng static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
892bdd6a90aSFam Zheng                                            Error **errp)
893bdd6a90aSFam Zheng {
894bdd6a90aSFam Zheng     int ret;
895bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
896bdd6a90aSFam Zheng     NvmeCmd cmd = {
897bdd6a90aSFam Zheng         .opcode = NVME_ADM_CMD_SET_FEATURES,
898bdd6a90aSFam Zheng         .nsid = cpu_to_le32(s->nsid),
899bdd6a90aSFam Zheng         .cdw10 = cpu_to_le32(0x06),
900bdd6a90aSFam Zheng         .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
901bdd6a90aSFam Zheng     };
902bdd6a90aSFam Zheng 
90308d54067SPhilippe Mathieu-Daudé     ret = nvme_admin_cmd_sync(bs, &cmd);
904bdd6a90aSFam Zheng     if (ret) {
905bdd6a90aSFam Zheng         error_setg(errp, "Failed to configure NVMe write cache");
906bdd6a90aSFam Zheng     }
907bdd6a90aSFam Zheng     return ret;
908bdd6a90aSFam Zheng }
909bdd6a90aSFam Zheng 
910bdd6a90aSFam Zheng static void nvme_close(BlockDriverState *bs)
911bdd6a90aSFam Zheng {
912bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
913bdd6a90aSFam Zheng 
9141b539bd6SPhilippe Mathieu-Daudé     for (unsigned i = 0; i < s->queue_count; ++i) {
915b75fd5f5SStefan Hajnoczi         nvme_free_queue_pair(s->queues[i]);
916bdd6a90aSFam Zheng     }
9179582f357SFam Zheng     g_free(s->queues);
918b111b3fcSPhilippe Mathieu-Daudé     aio_set_event_notifier(bdrv_get_aio_context(bs),
919b111b3fcSPhilippe Mathieu-Daudé                            &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
920bdd6a90aSFam Zheng                            false, NULL, NULL);
921b111b3fcSPhilippe Mathieu-Daudé     event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
9224b19e9b8SPhilippe Mathieu-Daudé     qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map,
9234b19e9b8SPhilippe Mathieu-Daudé                             0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE);
924bdd6a90aSFam Zheng     qemu_vfio_close(s->vfio);
925cc61b074SMax Reitz 
926cc61b074SMax Reitz     g_free(s->device);
927bdd6a90aSFam Zheng }
928bdd6a90aSFam Zheng 
929bdd6a90aSFam Zheng static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags,
930bdd6a90aSFam Zheng                           Error **errp)
931bdd6a90aSFam Zheng {
932bdd6a90aSFam Zheng     const char *device;
933bdd6a90aSFam Zheng     QemuOpts *opts;
934bdd6a90aSFam Zheng     int namespace;
935bdd6a90aSFam Zheng     int ret;
936bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
937bdd6a90aSFam Zheng 
938e0dd95e3SMaxim Levitsky     bs->supported_write_flags = BDRV_REQ_FUA;
939e0dd95e3SMaxim Levitsky 
940bdd6a90aSFam Zheng     opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
941bdd6a90aSFam Zheng     qemu_opts_absorb_qdict(opts, options, &error_abort);
942bdd6a90aSFam Zheng     device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
943bdd6a90aSFam Zheng     if (!device) {
944bdd6a90aSFam Zheng         error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
945bdd6a90aSFam Zheng         qemu_opts_del(opts);
946bdd6a90aSFam Zheng         return -EINVAL;
947bdd6a90aSFam Zheng     }
948bdd6a90aSFam Zheng 
949bdd6a90aSFam Zheng     namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
950bdd6a90aSFam Zheng     ret = nvme_init(bs, device, namespace, errp);
951bdd6a90aSFam Zheng     qemu_opts_del(opts);
952bdd6a90aSFam Zheng     if (ret) {
953bdd6a90aSFam Zheng         goto fail;
954bdd6a90aSFam Zheng     }
955bdd6a90aSFam Zheng     if (flags & BDRV_O_NOCACHE) {
956bdd6a90aSFam Zheng         if (!s->write_cache_supported) {
957bdd6a90aSFam Zheng             error_setg(errp,
958bdd6a90aSFam Zheng                        "NVMe controller doesn't support write cache configuration");
959bdd6a90aSFam Zheng             ret = -EINVAL;
960bdd6a90aSFam Zheng         } else {
961bdd6a90aSFam Zheng             ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE),
962bdd6a90aSFam Zheng                                                   errp);
963bdd6a90aSFam Zheng         }
964bdd6a90aSFam Zheng         if (ret) {
965bdd6a90aSFam Zheng             goto fail;
966bdd6a90aSFam Zheng         }
967bdd6a90aSFam Zheng     }
968bdd6a90aSFam Zheng     return 0;
969bdd6a90aSFam Zheng fail:
970bdd6a90aSFam Zheng     nvme_close(bs);
971bdd6a90aSFam Zheng     return ret;
972bdd6a90aSFam Zheng }
973bdd6a90aSFam Zheng 
974bdd6a90aSFam Zheng static int64_t nvme_getlength(BlockDriverState *bs)
975bdd6a90aSFam Zheng {
976bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
977118d1b6aSMaxim Levitsky     return s->nsze << s->blkshift;
978118d1b6aSMaxim Levitsky }
979bdd6a90aSFam Zheng 
9801120407bSMax Reitz static uint32_t nvme_get_blocksize(BlockDriverState *bs)
981118d1b6aSMaxim Levitsky {
982118d1b6aSMaxim Levitsky     BDRVNVMeState *s = bs->opaque;
9831120407bSMax Reitz     assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12);
9841120407bSMax Reitz     return UINT32_C(1) << s->blkshift;
985118d1b6aSMaxim Levitsky }
986118d1b6aSMaxim Levitsky 
987118d1b6aSMaxim Levitsky static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
988118d1b6aSMaxim Levitsky {
9891120407bSMax Reitz     uint32_t blocksize = nvme_get_blocksize(bs);
990118d1b6aSMaxim Levitsky     bsz->phys = blocksize;
991118d1b6aSMaxim Levitsky     bsz->log = blocksize;
992118d1b6aSMaxim Levitsky     return 0;
993bdd6a90aSFam Zheng }
994bdd6a90aSFam Zheng 
995bdd6a90aSFam Zheng /* Called with s->dma_map_lock */
996bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
997bdd6a90aSFam Zheng                                             QEMUIOVector *qiov)
998bdd6a90aSFam Zheng {
999bdd6a90aSFam Zheng     int r = 0;
1000bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1001bdd6a90aSFam Zheng 
1002bdd6a90aSFam Zheng     s->dma_map_count -= qiov->size;
1003bdd6a90aSFam Zheng     if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
1004bdd6a90aSFam Zheng         r = qemu_vfio_dma_reset_temporary(s->vfio);
1005bdd6a90aSFam Zheng         if (!r) {
1006bdd6a90aSFam Zheng             qemu_co_queue_restart_all(&s->dma_flush_queue);
1007bdd6a90aSFam Zheng         }
1008bdd6a90aSFam Zheng     }
1009bdd6a90aSFam Zheng     return r;
1010bdd6a90aSFam Zheng }
1011bdd6a90aSFam Zheng 
1012bdd6a90aSFam Zheng /* Called with s->dma_map_lock */
1013bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
1014bdd6a90aSFam Zheng                                           NVMeRequest *req, QEMUIOVector *qiov)
1015bdd6a90aSFam Zheng {
1016bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1017bdd6a90aSFam Zheng     uint64_t *pagelist = req->prp_list_page;
1018bdd6a90aSFam Zheng     int i, j, r;
1019bdd6a90aSFam Zheng     int entries = 0;
1020bdd6a90aSFam Zheng 
1021bdd6a90aSFam Zheng     assert(qiov->size);
1022bdd6a90aSFam Zheng     assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
1023bdd6a90aSFam Zheng     assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
1024bdd6a90aSFam Zheng     for (i = 0; i < qiov->niov; ++i) {
1025bdd6a90aSFam Zheng         bool retry = true;
1026bdd6a90aSFam Zheng         uint64_t iova;
10279e13d598SEric Auger         size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
10289e13d598SEric Auger                                    qemu_real_host_page_size);
1029bdd6a90aSFam Zheng try_map:
1030bdd6a90aSFam Zheng         r = qemu_vfio_dma_map(s->vfio,
1031bdd6a90aSFam Zheng                               qiov->iov[i].iov_base,
10329e13d598SEric Auger                               len, true, &iova);
1033*15a730e7SPhilippe Mathieu-Daudé         if (r == -ENOSPC) {
1034*15a730e7SPhilippe Mathieu-Daudé             /*
1035*15a730e7SPhilippe Mathieu-Daudé              * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
1036*15a730e7SPhilippe Mathieu-Daudé              * ioctl returns -ENOSPC to signal the user exhausted the DMA
1037*15a730e7SPhilippe Mathieu-Daudé              * mappings available for a container since Linux kernel commit
1038*15a730e7SPhilippe Mathieu-Daudé              * 492855939bdb ("vfio/type1: Limit DMA mappings per container",
1039*15a730e7SPhilippe Mathieu-Daudé              * April 2019, see CVE-2019-3882).
1040*15a730e7SPhilippe Mathieu-Daudé              *
1041*15a730e7SPhilippe Mathieu-Daudé              * This block driver already handles this error path by checking
1042*15a730e7SPhilippe Mathieu-Daudé              * for the -ENOMEM error, so we directly replace -ENOSPC by
1043*15a730e7SPhilippe Mathieu-Daudé              * -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev
1044*15a730e7SPhilippe Mathieu-Daudé              * coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and
1045*15a730e7SPhilippe Mathieu-Daudé              * BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator
1046*15a730e7SPhilippe Mathieu-Daudé              * to add more storage to the blockdev. Not something we can do
1047*15a730e7SPhilippe Mathieu-Daudé              * easily with an IOMMU :)
1048*15a730e7SPhilippe Mathieu-Daudé              */
1049*15a730e7SPhilippe Mathieu-Daudé             r = -ENOMEM;
1050*15a730e7SPhilippe Mathieu-Daudé         }
1051bdd6a90aSFam Zheng         if (r == -ENOMEM && retry) {
1052*15a730e7SPhilippe Mathieu-Daudé             /*
1053*15a730e7SPhilippe Mathieu-Daudé              * We exhausted the DMA mappings available for our container:
1054*15a730e7SPhilippe Mathieu-Daudé              * recycle the volatile IOVA mappings.
1055*15a730e7SPhilippe Mathieu-Daudé              */
1056bdd6a90aSFam Zheng             retry = false;
1057bdd6a90aSFam Zheng             trace_nvme_dma_flush_queue_wait(s);
1058bdd6a90aSFam Zheng             if (s->dma_map_count) {
1059bdd6a90aSFam Zheng                 trace_nvme_dma_map_flush(s);
1060bdd6a90aSFam Zheng                 qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
1061bdd6a90aSFam Zheng             } else {
1062bdd6a90aSFam Zheng                 r = qemu_vfio_dma_reset_temporary(s->vfio);
1063bdd6a90aSFam Zheng                 if (r) {
1064bdd6a90aSFam Zheng                     goto fail;
1065bdd6a90aSFam Zheng                 }
1066bdd6a90aSFam Zheng             }
1067bdd6a90aSFam Zheng             goto try_map;
1068bdd6a90aSFam Zheng         }
1069bdd6a90aSFam Zheng         if (r) {
1070bdd6a90aSFam Zheng             goto fail;
1071bdd6a90aSFam Zheng         }
1072bdd6a90aSFam Zheng 
1073bdd6a90aSFam Zheng         for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
10742916405aSLi Feng             pagelist[entries++] = cpu_to_le64(iova + j * s->page_size);
1075bdd6a90aSFam Zheng         }
1076bdd6a90aSFam Zheng         trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
1077bdd6a90aSFam Zheng                                     qiov->iov[i].iov_len / s->page_size);
1078bdd6a90aSFam Zheng     }
1079bdd6a90aSFam Zheng 
1080bdd6a90aSFam Zheng     s->dma_map_count += qiov->size;
1081bdd6a90aSFam Zheng 
1082bdd6a90aSFam Zheng     assert(entries <= s->page_size / sizeof(uint64_t));
1083bdd6a90aSFam Zheng     switch (entries) {
1084bdd6a90aSFam Zheng     case 0:
1085bdd6a90aSFam Zheng         abort();
1086bdd6a90aSFam Zheng     case 1:
1087c26f2173SKlaus Jensen         cmd->dptr.prp1 = pagelist[0];
1088c26f2173SKlaus Jensen         cmd->dptr.prp2 = 0;
1089bdd6a90aSFam Zheng         break;
1090bdd6a90aSFam Zheng     case 2:
1091c26f2173SKlaus Jensen         cmd->dptr.prp1 = pagelist[0];
1092c26f2173SKlaus Jensen         cmd->dptr.prp2 = pagelist[1];
1093bdd6a90aSFam Zheng         break;
1094bdd6a90aSFam Zheng     default:
1095c26f2173SKlaus Jensen         cmd->dptr.prp1 = pagelist[0];
1096c26f2173SKlaus Jensen         cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
1097bdd6a90aSFam Zheng         break;
1098bdd6a90aSFam Zheng     }
1099bdd6a90aSFam Zheng     trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
1100bdd6a90aSFam Zheng     for (i = 0; i < entries; ++i) {
1101bdd6a90aSFam Zheng         trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
1102bdd6a90aSFam Zheng     }
1103bdd6a90aSFam Zheng     return 0;
1104bdd6a90aSFam Zheng fail:
1105bdd6a90aSFam Zheng     /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1106bdd6a90aSFam Zheng      * increment s->dma_map_count. This is okay for fixed mapping memory areas
1107bdd6a90aSFam Zheng      * because they are already mapped before calling this function; for
1108bdd6a90aSFam Zheng      * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1109bdd6a90aSFam Zheng      * calling qemu_vfio_dma_reset_temporary when necessary. */
1110bdd6a90aSFam Zheng     return r;
1111bdd6a90aSFam Zheng }
1112bdd6a90aSFam Zheng 
1113bdd6a90aSFam Zheng typedef struct {
1114bdd6a90aSFam Zheng     Coroutine *co;
1115bdd6a90aSFam Zheng     int ret;
1116bdd6a90aSFam Zheng     AioContext *ctx;
1117bdd6a90aSFam Zheng } NVMeCoData;
1118bdd6a90aSFam Zheng 
1119bdd6a90aSFam Zheng static void nvme_rw_cb_bh(void *opaque)
1120bdd6a90aSFam Zheng {
1121bdd6a90aSFam Zheng     NVMeCoData *data = opaque;
1122bdd6a90aSFam Zheng     qemu_coroutine_enter(data->co);
1123bdd6a90aSFam Zheng }
1124bdd6a90aSFam Zheng 
1125bdd6a90aSFam Zheng static void nvme_rw_cb(void *opaque, int ret)
1126bdd6a90aSFam Zheng {
1127bdd6a90aSFam Zheng     NVMeCoData *data = opaque;
1128bdd6a90aSFam Zheng     data->ret = ret;
1129bdd6a90aSFam Zheng     if (!data->co) {
1130bdd6a90aSFam Zheng         /* The rw coroutine hasn't yielded, don't try to enter. */
1131bdd6a90aSFam Zheng         return;
1132bdd6a90aSFam Zheng     }
1133e4ec5ad4SPavel Dovgalyuk     replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);
1134bdd6a90aSFam Zheng }
1135bdd6a90aSFam Zheng 
1136bdd6a90aSFam Zheng static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
1137bdd6a90aSFam Zheng                                             uint64_t offset, uint64_t bytes,
1138bdd6a90aSFam Zheng                                             QEMUIOVector *qiov,
1139bdd6a90aSFam Zheng                                             bool is_write,
1140bdd6a90aSFam Zheng                                             int flags)
1141bdd6a90aSFam Zheng {
1142bdd6a90aSFam Zheng     int r;
1143bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
114473159e52SPhilippe Mathieu-Daudé     NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1145bdd6a90aSFam Zheng     NVMeRequest *req;
1146118d1b6aSMaxim Levitsky 
1147118d1b6aSMaxim Levitsky     uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
1148bdd6a90aSFam Zheng                        (flags & BDRV_REQ_FUA ? 1 << 30 : 0);
1149bdd6a90aSFam Zheng     NvmeCmd cmd = {
1150bdd6a90aSFam Zheng         .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
1151bdd6a90aSFam Zheng         .nsid = cpu_to_le32(s->nsid),
1152118d1b6aSMaxim Levitsky         .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
1153118d1b6aSMaxim Levitsky         .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
1154bdd6a90aSFam Zheng         .cdw12 = cpu_to_le32(cdw12),
1155bdd6a90aSFam Zheng     };
1156bdd6a90aSFam Zheng     NVMeCoData data = {
1157bdd6a90aSFam Zheng         .ctx = bdrv_get_aio_context(bs),
1158bdd6a90aSFam Zheng         .ret = -EINPROGRESS,
1159bdd6a90aSFam Zheng     };
1160bdd6a90aSFam Zheng 
1161bdd6a90aSFam Zheng     trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
11621b539bd6SPhilippe Mathieu-Daudé     assert(s->queue_count > 1);
1163bdd6a90aSFam Zheng     req = nvme_get_free_req(ioq);
1164bdd6a90aSFam Zheng     assert(req);
1165bdd6a90aSFam Zheng 
1166bdd6a90aSFam Zheng     qemu_co_mutex_lock(&s->dma_map_lock);
1167bdd6a90aSFam Zheng     r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
1168bdd6a90aSFam Zheng     qemu_co_mutex_unlock(&s->dma_map_lock);
1169bdd6a90aSFam Zheng     if (r) {
1170b75fd5f5SStefan Hajnoczi         nvme_put_free_req_and_wake(ioq, req);
1171bdd6a90aSFam Zheng         return r;
1172bdd6a90aSFam Zheng     }
1173b75fd5f5SStefan Hajnoczi     nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1174bdd6a90aSFam Zheng 
1175bdd6a90aSFam Zheng     data.co = qemu_coroutine_self();
1176bdd6a90aSFam Zheng     while (data.ret == -EINPROGRESS) {
1177bdd6a90aSFam Zheng         qemu_coroutine_yield();
1178bdd6a90aSFam Zheng     }
1179bdd6a90aSFam Zheng 
1180bdd6a90aSFam Zheng     qemu_co_mutex_lock(&s->dma_map_lock);
1181bdd6a90aSFam Zheng     r = nvme_cmd_unmap_qiov(bs, qiov);
1182bdd6a90aSFam Zheng     qemu_co_mutex_unlock(&s->dma_map_lock);
1183bdd6a90aSFam Zheng     if (r) {
1184bdd6a90aSFam Zheng         return r;
1185bdd6a90aSFam Zheng     }
1186bdd6a90aSFam Zheng 
1187bdd6a90aSFam Zheng     trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
1188bdd6a90aSFam Zheng     return data.ret;
1189bdd6a90aSFam Zheng }
1190bdd6a90aSFam Zheng 
1191bdd6a90aSFam Zheng static inline bool nvme_qiov_aligned(BlockDriverState *bs,
1192bdd6a90aSFam Zheng                                      const QEMUIOVector *qiov)
1193bdd6a90aSFam Zheng {
1194bdd6a90aSFam Zheng     int i;
1195bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1196bdd6a90aSFam Zheng 
1197bdd6a90aSFam Zheng     for (i = 0; i < qiov->niov; ++i) {
11989e13d598SEric Auger         if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
11999e13d598SEric Auger                                  qemu_real_host_page_size) ||
12009e13d598SEric Auger             !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
1201bdd6a90aSFam Zheng             trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
1202bdd6a90aSFam Zheng                                       qiov->iov[i].iov_len, s->page_size);
1203bdd6a90aSFam Zheng             return false;
1204bdd6a90aSFam Zheng         }
1205bdd6a90aSFam Zheng     }
1206bdd6a90aSFam Zheng     return true;
1207bdd6a90aSFam Zheng }
1208bdd6a90aSFam Zheng 
1209bdd6a90aSFam Zheng static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
1210bdd6a90aSFam Zheng                        QEMUIOVector *qiov, bool is_write, int flags)
1211bdd6a90aSFam Zheng {
1212bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1213bdd6a90aSFam Zheng     int r;
1214bdd6a90aSFam Zheng     uint8_t *buf = NULL;
1215bdd6a90aSFam Zheng     QEMUIOVector local_qiov;
12169e13d598SEric Auger     size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
1217bdd6a90aSFam Zheng     assert(QEMU_IS_ALIGNED(offset, s->page_size));
1218bdd6a90aSFam Zheng     assert(QEMU_IS_ALIGNED(bytes, s->page_size));
1219bdd6a90aSFam Zheng     assert(bytes <= s->max_transfer);
1220bdd6a90aSFam Zheng     if (nvme_qiov_aligned(bs, qiov)) {
1221f25e7ab2SPhilippe Mathieu-Daudé         s->stats.aligned_accesses++;
1222bdd6a90aSFam Zheng         return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
1223bdd6a90aSFam Zheng     }
1224f25e7ab2SPhilippe Mathieu-Daudé     s->stats.unaligned_accesses++;
1225bdd6a90aSFam Zheng     trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
12269e13d598SEric Auger     buf = qemu_try_memalign(qemu_real_host_page_size, len);
1227bdd6a90aSFam Zheng 
1228bdd6a90aSFam Zheng     if (!buf) {
1229bdd6a90aSFam Zheng         return -ENOMEM;
1230bdd6a90aSFam Zheng     }
1231bdd6a90aSFam Zheng     qemu_iovec_init(&local_qiov, 1);
1232bdd6a90aSFam Zheng     if (is_write) {
1233bdd6a90aSFam Zheng         qemu_iovec_to_buf(qiov, 0, buf, bytes);
1234bdd6a90aSFam Zheng     }
1235bdd6a90aSFam Zheng     qemu_iovec_add(&local_qiov, buf, bytes);
1236bdd6a90aSFam Zheng     r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
1237bdd6a90aSFam Zheng     qemu_iovec_destroy(&local_qiov);
1238bdd6a90aSFam Zheng     if (!r && !is_write) {
1239bdd6a90aSFam Zheng         qemu_iovec_from_buf(qiov, 0, buf, bytes);
1240bdd6a90aSFam Zheng     }
1241bdd6a90aSFam Zheng     qemu_vfree(buf);
1242bdd6a90aSFam Zheng     return r;
1243bdd6a90aSFam Zheng }
1244bdd6a90aSFam Zheng 
1245bdd6a90aSFam Zheng static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
1246bdd6a90aSFam Zheng                                        uint64_t offset, uint64_t bytes,
1247bdd6a90aSFam Zheng                                        QEMUIOVector *qiov, int flags)
1248bdd6a90aSFam Zheng {
1249bdd6a90aSFam Zheng     return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
1250bdd6a90aSFam Zheng }
1251bdd6a90aSFam Zheng 
1252bdd6a90aSFam Zheng static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
1253bdd6a90aSFam Zheng                                         uint64_t offset, uint64_t bytes,
1254bdd6a90aSFam Zheng                                         QEMUIOVector *qiov, int flags)
1255bdd6a90aSFam Zheng {
1256bdd6a90aSFam Zheng     return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
1257bdd6a90aSFam Zheng }
1258bdd6a90aSFam Zheng 
1259bdd6a90aSFam Zheng static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
1260bdd6a90aSFam Zheng {
1261bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
126273159e52SPhilippe Mathieu-Daudé     NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1263bdd6a90aSFam Zheng     NVMeRequest *req;
1264bdd6a90aSFam Zheng     NvmeCmd cmd = {
1265bdd6a90aSFam Zheng         .opcode = NVME_CMD_FLUSH,
1266bdd6a90aSFam Zheng         .nsid = cpu_to_le32(s->nsid),
1267bdd6a90aSFam Zheng     };
1268bdd6a90aSFam Zheng     NVMeCoData data = {
1269bdd6a90aSFam Zheng         .ctx = bdrv_get_aio_context(bs),
1270bdd6a90aSFam Zheng         .ret = -EINPROGRESS,
1271bdd6a90aSFam Zheng     };
1272bdd6a90aSFam Zheng 
12731b539bd6SPhilippe Mathieu-Daudé     assert(s->queue_count > 1);
1274bdd6a90aSFam Zheng     req = nvme_get_free_req(ioq);
1275bdd6a90aSFam Zheng     assert(req);
1276b75fd5f5SStefan Hajnoczi     nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1277bdd6a90aSFam Zheng 
1278bdd6a90aSFam Zheng     data.co = qemu_coroutine_self();
1279bdd6a90aSFam Zheng     if (data.ret == -EINPROGRESS) {
1280bdd6a90aSFam Zheng         qemu_coroutine_yield();
1281bdd6a90aSFam Zheng     }
1282bdd6a90aSFam Zheng 
1283bdd6a90aSFam Zheng     return data.ret;
1284bdd6a90aSFam Zheng }
1285bdd6a90aSFam Zheng 
1286bdd6a90aSFam Zheng 
1287e0dd95e3SMaxim Levitsky static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
1288e0dd95e3SMaxim Levitsky                                               int64_t offset,
1289e0dd95e3SMaxim Levitsky                                               int bytes,
1290e0dd95e3SMaxim Levitsky                                               BdrvRequestFlags flags)
1291e0dd95e3SMaxim Levitsky {
1292e0dd95e3SMaxim Levitsky     BDRVNVMeState *s = bs->opaque;
129373159e52SPhilippe Mathieu-Daudé     NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1294e0dd95e3SMaxim Levitsky     NVMeRequest *req;
1295e0dd95e3SMaxim Levitsky 
1296e0dd95e3SMaxim Levitsky     uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
1297e0dd95e3SMaxim Levitsky 
1298e0dd95e3SMaxim Levitsky     if (!s->supports_write_zeroes) {
1299e0dd95e3SMaxim Levitsky         return -ENOTSUP;
1300e0dd95e3SMaxim Levitsky     }
1301e0dd95e3SMaxim Levitsky 
1302e0dd95e3SMaxim Levitsky     NvmeCmd cmd = {
130369265150SKlaus Jensen         .opcode = NVME_CMD_WRITE_ZEROES,
1304e0dd95e3SMaxim Levitsky         .nsid = cpu_to_le32(s->nsid),
1305e0dd95e3SMaxim Levitsky         .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
1306e0dd95e3SMaxim Levitsky         .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
1307e0dd95e3SMaxim Levitsky     };
1308e0dd95e3SMaxim Levitsky 
1309e0dd95e3SMaxim Levitsky     NVMeCoData data = {
1310e0dd95e3SMaxim Levitsky         .ctx = bdrv_get_aio_context(bs),
1311e0dd95e3SMaxim Levitsky         .ret = -EINPROGRESS,
1312e0dd95e3SMaxim Levitsky     };
1313e0dd95e3SMaxim Levitsky 
1314e0dd95e3SMaxim Levitsky     if (flags & BDRV_REQ_MAY_UNMAP) {
1315e0dd95e3SMaxim Levitsky         cdw12 |= (1 << 25);
1316e0dd95e3SMaxim Levitsky     }
1317e0dd95e3SMaxim Levitsky 
1318e0dd95e3SMaxim Levitsky     if (flags & BDRV_REQ_FUA) {
1319e0dd95e3SMaxim Levitsky         cdw12 |= (1 << 30);
1320e0dd95e3SMaxim Levitsky     }
1321e0dd95e3SMaxim Levitsky 
1322e0dd95e3SMaxim Levitsky     cmd.cdw12 = cpu_to_le32(cdw12);
1323e0dd95e3SMaxim Levitsky 
1324e0dd95e3SMaxim Levitsky     trace_nvme_write_zeroes(s, offset, bytes, flags);
13251b539bd6SPhilippe Mathieu-Daudé     assert(s->queue_count > 1);
1326e0dd95e3SMaxim Levitsky     req = nvme_get_free_req(ioq);
1327e0dd95e3SMaxim Levitsky     assert(req);
1328e0dd95e3SMaxim Levitsky 
1329b75fd5f5SStefan Hajnoczi     nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1330e0dd95e3SMaxim Levitsky 
1331e0dd95e3SMaxim Levitsky     data.co = qemu_coroutine_self();
1332e0dd95e3SMaxim Levitsky     while (data.ret == -EINPROGRESS) {
1333e0dd95e3SMaxim Levitsky         qemu_coroutine_yield();
1334e0dd95e3SMaxim Levitsky     }
1335e0dd95e3SMaxim Levitsky 
1336e0dd95e3SMaxim Levitsky     trace_nvme_rw_done(s, true, offset, bytes, data.ret);
1337e0dd95e3SMaxim Levitsky     return data.ret;
1338e0dd95e3SMaxim Levitsky }
1339e0dd95e3SMaxim Levitsky 
1340e0dd95e3SMaxim Levitsky 
1341e87a09d6SMaxim Levitsky static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
1342e87a09d6SMaxim Levitsky                                          int64_t offset,
1343e87a09d6SMaxim Levitsky                                          int bytes)
1344e87a09d6SMaxim Levitsky {
1345e87a09d6SMaxim Levitsky     BDRVNVMeState *s = bs->opaque;
134673159e52SPhilippe Mathieu-Daudé     NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
1347e87a09d6SMaxim Levitsky     NVMeRequest *req;
1348e87a09d6SMaxim Levitsky     NvmeDsmRange *buf;
1349e87a09d6SMaxim Levitsky     QEMUIOVector local_qiov;
1350e87a09d6SMaxim Levitsky     int ret;
1351e87a09d6SMaxim Levitsky 
1352e87a09d6SMaxim Levitsky     NvmeCmd cmd = {
1353e87a09d6SMaxim Levitsky         .opcode = NVME_CMD_DSM,
1354e87a09d6SMaxim Levitsky         .nsid = cpu_to_le32(s->nsid),
1355e87a09d6SMaxim Levitsky         .cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/
1356e87a09d6SMaxim Levitsky         .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
1357e87a09d6SMaxim Levitsky     };
1358e87a09d6SMaxim Levitsky 
1359e87a09d6SMaxim Levitsky     NVMeCoData data = {
1360e87a09d6SMaxim Levitsky         .ctx = bdrv_get_aio_context(bs),
1361e87a09d6SMaxim Levitsky         .ret = -EINPROGRESS,
1362e87a09d6SMaxim Levitsky     };
1363e87a09d6SMaxim Levitsky 
1364e87a09d6SMaxim Levitsky     if (!s->supports_discard) {
1365e87a09d6SMaxim Levitsky         return -ENOTSUP;
1366e87a09d6SMaxim Levitsky     }
1367e87a09d6SMaxim Levitsky 
13681b539bd6SPhilippe Mathieu-Daudé     assert(s->queue_count > 1);
1369e87a09d6SMaxim Levitsky 
137038e1f818SPhilippe Mathieu-Daudé     buf = qemu_try_memalign(s->page_size, s->page_size);
1371e87a09d6SMaxim Levitsky     if (!buf) {
1372e87a09d6SMaxim Levitsky         return -ENOMEM;
1373e87a09d6SMaxim Levitsky     }
13742ed84693SPhilippe Mathieu-Daudé     memset(buf, 0, s->page_size);
1375e87a09d6SMaxim Levitsky     buf->nlb = cpu_to_le32(bytes >> s->blkshift);
1376e87a09d6SMaxim Levitsky     buf->slba = cpu_to_le64(offset >> s->blkshift);
1377e87a09d6SMaxim Levitsky     buf->cattr = 0;
1378e87a09d6SMaxim Levitsky 
1379e87a09d6SMaxim Levitsky     qemu_iovec_init(&local_qiov, 1);
1380e87a09d6SMaxim Levitsky     qemu_iovec_add(&local_qiov, buf, 4096);
1381e87a09d6SMaxim Levitsky 
1382e87a09d6SMaxim Levitsky     req = nvme_get_free_req(ioq);
1383e87a09d6SMaxim Levitsky     assert(req);
1384e87a09d6SMaxim Levitsky 
1385e87a09d6SMaxim Levitsky     qemu_co_mutex_lock(&s->dma_map_lock);
1386e87a09d6SMaxim Levitsky     ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
1387e87a09d6SMaxim Levitsky     qemu_co_mutex_unlock(&s->dma_map_lock);
1388e87a09d6SMaxim Levitsky 
1389e87a09d6SMaxim Levitsky     if (ret) {
1390b75fd5f5SStefan Hajnoczi         nvme_put_free_req_and_wake(ioq, req);
1391e87a09d6SMaxim Levitsky         goto out;
1392e87a09d6SMaxim Levitsky     }
1393e87a09d6SMaxim Levitsky 
1394e87a09d6SMaxim Levitsky     trace_nvme_dsm(s, offset, bytes);
1395e87a09d6SMaxim Levitsky 
1396b75fd5f5SStefan Hajnoczi     nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
1397e87a09d6SMaxim Levitsky 
1398e87a09d6SMaxim Levitsky     data.co = qemu_coroutine_self();
1399e87a09d6SMaxim Levitsky     while (data.ret == -EINPROGRESS) {
1400e87a09d6SMaxim Levitsky         qemu_coroutine_yield();
1401e87a09d6SMaxim Levitsky     }
1402e87a09d6SMaxim Levitsky 
1403e87a09d6SMaxim Levitsky     qemu_co_mutex_lock(&s->dma_map_lock);
1404e87a09d6SMaxim Levitsky     ret = nvme_cmd_unmap_qiov(bs, &local_qiov);
1405e87a09d6SMaxim Levitsky     qemu_co_mutex_unlock(&s->dma_map_lock);
1406e87a09d6SMaxim Levitsky 
1407e87a09d6SMaxim Levitsky     if (ret) {
1408e87a09d6SMaxim Levitsky         goto out;
1409e87a09d6SMaxim Levitsky     }
1410e87a09d6SMaxim Levitsky 
1411e87a09d6SMaxim Levitsky     ret = data.ret;
1412e87a09d6SMaxim Levitsky     trace_nvme_dsm_done(s, offset, bytes, ret);
1413e87a09d6SMaxim Levitsky out:
1414e87a09d6SMaxim Levitsky     qemu_iovec_destroy(&local_qiov);
1415e87a09d6SMaxim Levitsky     qemu_vfree(buf);
1416e87a09d6SMaxim Levitsky     return ret;
1417e87a09d6SMaxim Levitsky 
1418e87a09d6SMaxim Levitsky }
1419e87a09d6SMaxim Levitsky 
1420c8807c5eSPhilippe Mathieu-Daudé static int coroutine_fn nvme_co_truncate(BlockDriverState *bs, int64_t offset,
1421c8807c5eSPhilippe Mathieu-Daudé                                          bool exact, PreallocMode prealloc,
1422c8807c5eSPhilippe Mathieu-Daudé                                          BdrvRequestFlags flags, Error **errp)
1423c8807c5eSPhilippe Mathieu-Daudé {
1424c8807c5eSPhilippe Mathieu-Daudé     int64_t cur_length;
1425c8807c5eSPhilippe Mathieu-Daudé 
1426c8807c5eSPhilippe Mathieu-Daudé     if (prealloc != PREALLOC_MODE_OFF) {
1427c8807c5eSPhilippe Mathieu-Daudé         error_setg(errp, "Unsupported preallocation mode '%s'",
1428c8807c5eSPhilippe Mathieu-Daudé                    PreallocMode_str(prealloc));
1429c8807c5eSPhilippe Mathieu-Daudé         return -ENOTSUP;
1430c8807c5eSPhilippe Mathieu-Daudé     }
1431c8807c5eSPhilippe Mathieu-Daudé 
1432c8807c5eSPhilippe Mathieu-Daudé     cur_length = nvme_getlength(bs);
1433c8807c5eSPhilippe Mathieu-Daudé     if (offset != cur_length && exact) {
1434c8807c5eSPhilippe Mathieu-Daudé         error_setg(errp, "Cannot resize NVMe devices");
1435c8807c5eSPhilippe Mathieu-Daudé         return -ENOTSUP;
1436c8807c5eSPhilippe Mathieu-Daudé     } else if (offset > cur_length) {
1437c8807c5eSPhilippe Mathieu-Daudé         error_setg(errp, "Cannot grow NVMe devices");
1438c8807c5eSPhilippe Mathieu-Daudé         return -EINVAL;
1439c8807c5eSPhilippe Mathieu-Daudé     }
1440c8807c5eSPhilippe Mathieu-Daudé 
1441c8807c5eSPhilippe Mathieu-Daudé     return 0;
1442c8807c5eSPhilippe Mathieu-Daudé }
1443e87a09d6SMaxim Levitsky 
1444bdd6a90aSFam Zheng static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
1445bdd6a90aSFam Zheng                                BlockReopenQueue *queue, Error **errp)
1446bdd6a90aSFam Zheng {
1447bdd6a90aSFam Zheng     return 0;
1448bdd6a90aSFam Zheng }
1449bdd6a90aSFam Zheng 
1450998b3a1eSMax Reitz static void nvme_refresh_filename(BlockDriverState *bs)
1451bdd6a90aSFam Zheng {
1452cc61b074SMax Reitz     BDRVNVMeState *s = bs->opaque;
1453bdd6a90aSFam Zheng 
1454cc61b074SMax Reitz     snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i",
1455cc61b074SMax Reitz              s->device, s->nsid);
1456bdd6a90aSFam Zheng }
1457bdd6a90aSFam Zheng 
1458bdd6a90aSFam Zheng static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
1459bdd6a90aSFam Zheng {
1460bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1461bdd6a90aSFam Zheng 
1462bdd6a90aSFam Zheng     bs->bl.opt_mem_alignment = s->page_size;
1463bdd6a90aSFam Zheng     bs->bl.request_alignment = s->page_size;
1464bdd6a90aSFam Zheng     bs->bl.max_transfer = s->max_transfer;
1465bdd6a90aSFam Zheng }
1466bdd6a90aSFam Zheng 
1467bdd6a90aSFam Zheng static void nvme_detach_aio_context(BlockDriverState *bs)
1468bdd6a90aSFam Zheng {
1469bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1470bdd6a90aSFam Zheng 
14711b539bd6SPhilippe Mathieu-Daudé     for (unsigned i = 0; i < s->queue_count; i++) {
14727838c67fSStefan Hajnoczi         NVMeQueuePair *q = s->queues[i];
14737838c67fSStefan Hajnoczi 
14747838c67fSStefan Hajnoczi         qemu_bh_delete(q->completion_bh);
14757838c67fSStefan Hajnoczi         q->completion_bh = NULL;
14767838c67fSStefan Hajnoczi     }
14777838c67fSStefan Hajnoczi 
1478b111b3fcSPhilippe Mathieu-Daudé     aio_set_event_notifier(bdrv_get_aio_context(bs),
1479b111b3fcSPhilippe Mathieu-Daudé                            &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
1480bdd6a90aSFam Zheng                            false, NULL, NULL);
1481bdd6a90aSFam Zheng }
1482bdd6a90aSFam Zheng 
1483bdd6a90aSFam Zheng static void nvme_attach_aio_context(BlockDriverState *bs,
1484bdd6a90aSFam Zheng                                     AioContext *new_context)
1485bdd6a90aSFam Zheng {
1486bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1487bdd6a90aSFam Zheng 
1488bdd6a90aSFam Zheng     s->aio_context = new_context;
1489b111b3fcSPhilippe Mathieu-Daudé     aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
1490bdd6a90aSFam Zheng                            false, nvme_handle_event, nvme_poll_cb);
14917838c67fSStefan Hajnoczi 
14921b539bd6SPhilippe Mathieu-Daudé     for (unsigned i = 0; i < s->queue_count; i++) {
14937838c67fSStefan Hajnoczi         NVMeQueuePair *q = s->queues[i];
14947838c67fSStefan Hajnoczi 
14957838c67fSStefan Hajnoczi         q->completion_bh =
14967838c67fSStefan Hajnoczi             aio_bh_new(new_context, nvme_process_completion_bh, q);
14977838c67fSStefan Hajnoczi     }
1498bdd6a90aSFam Zheng }
1499bdd6a90aSFam Zheng 
1500bdd6a90aSFam Zheng static void nvme_aio_plug(BlockDriverState *bs)
1501bdd6a90aSFam Zheng {
1502bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
15032f0d8947SPaolo Bonzini     assert(!s->plugged);
15042f0d8947SPaolo Bonzini     s->plugged = true;
1505bdd6a90aSFam Zheng }
1506bdd6a90aSFam Zheng 
1507bdd6a90aSFam Zheng static void nvme_aio_unplug(BlockDriverState *bs)
1508bdd6a90aSFam Zheng {
1509bdd6a90aSFam Zheng     BDRVNVMeState *s = bs->opaque;
1510bdd6a90aSFam Zheng     assert(s->plugged);
15112f0d8947SPaolo Bonzini     s->plugged = false;
15121b539bd6SPhilippe Mathieu-Daudé     for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) {
1513bdd6a90aSFam Zheng         NVMeQueuePair *q = s->queues[i];
1514bdd6a90aSFam Zheng         qemu_mutex_lock(&q->lock);
1515b75fd5f5SStefan Hajnoczi         nvme_kick(q);
1516b75fd5f5SStefan Hajnoczi         nvme_process_completion(q);
1517bdd6a90aSFam Zheng         qemu_mutex_unlock(&q->lock);
1518bdd6a90aSFam Zheng     }
1519bdd6a90aSFam Zheng }
1520bdd6a90aSFam Zheng 
15219ed61612SFam Zheng static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
15229ed61612SFam Zheng {
15239ed61612SFam Zheng     int ret;
15249ed61612SFam Zheng     BDRVNVMeState *s = bs->opaque;
15259ed61612SFam Zheng 
15269ed61612SFam Zheng     ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
15279ed61612SFam Zheng     if (ret) {
15289ed61612SFam Zheng         /* FIXME: we may run out of IOVA addresses after repeated
15299ed61612SFam Zheng          * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
15309ed61612SFam Zheng          * doesn't reclaim addresses for fixed mappings. */
15319ed61612SFam Zheng         error_report("nvme_register_buf failed: %s", strerror(-ret));
15329ed61612SFam Zheng     }
15339ed61612SFam Zheng }
15349ed61612SFam Zheng 
15359ed61612SFam Zheng static void nvme_unregister_buf(BlockDriverState *bs, void *host)
15369ed61612SFam Zheng {
15379ed61612SFam Zheng     BDRVNVMeState *s = bs->opaque;
15389ed61612SFam Zheng 
15399ed61612SFam Zheng     qemu_vfio_dma_unmap(s->vfio, host);
15409ed61612SFam Zheng }
15419ed61612SFam Zheng 
1542f25e7ab2SPhilippe Mathieu-Daudé static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs)
1543f25e7ab2SPhilippe Mathieu-Daudé {
1544f25e7ab2SPhilippe Mathieu-Daudé     BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
1545f25e7ab2SPhilippe Mathieu-Daudé     BDRVNVMeState *s = bs->opaque;
1546f25e7ab2SPhilippe Mathieu-Daudé 
1547f25e7ab2SPhilippe Mathieu-Daudé     stats->driver = BLOCKDEV_DRIVER_NVME;
1548f25e7ab2SPhilippe Mathieu-Daudé     stats->u.nvme = (BlockStatsSpecificNvme) {
1549f25e7ab2SPhilippe Mathieu-Daudé         .completion_errors = s->stats.completion_errors,
1550f25e7ab2SPhilippe Mathieu-Daudé         .aligned_accesses = s->stats.aligned_accesses,
1551f25e7ab2SPhilippe Mathieu-Daudé         .unaligned_accesses = s->stats.unaligned_accesses,
1552f25e7ab2SPhilippe Mathieu-Daudé     };
1553f25e7ab2SPhilippe Mathieu-Daudé 
1554f25e7ab2SPhilippe Mathieu-Daudé     return stats;
1555f25e7ab2SPhilippe Mathieu-Daudé }
1556f25e7ab2SPhilippe Mathieu-Daudé 
15572654267cSMax Reitz static const char *const nvme_strong_runtime_opts[] = {
15582654267cSMax Reitz     NVME_BLOCK_OPT_DEVICE,
15592654267cSMax Reitz     NVME_BLOCK_OPT_NAMESPACE,
15602654267cSMax Reitz 
15612654267cSMax Reitz     NULL
15622654267cSMax Reitz };
15632654267cSMax Reitz 
1564bdd6a90aSFam Zheng static BlockDriver bdrv_nvme = {
1565bdd6a90aSFam Zheng     .format_name              = "nvme",
1566bdd6a90aSFam Zheng     .protocol_name            = "nvme",
1567bdd6a90aSFam Zheng     .instance_size            = sizeof(BDRVNVMeState),
1568bdd6a90aSFam Zheng 
15695a5e7f8cSMaxim Levitsky     .bdrv_co_create_opts      = bdrv_co_create_opts_simple,
15705a5e7f8cSMaxim Levitsky     .create_opts              = &bdrv_create_opts_simple,
15715a5e7f8cSMaxim Levitsky 
1572bdd6a90aSFam Zheng     .bdrv_parse_filename      = nvme_parse_filename,
1573bdd6a90aSFam Zheng     .bdrv_file_open           = nvme_file_open,
1574bdd6a90aSFam Zheng     .bdrv_close               = nvme_close,
1575bdd6a90aSFam Zheng     .bdrv_getlength           = nvme_getlength,
1576118d1b6aSMaxim Levitsky     .bdrv_probe_blocksizes    = nvme_probe_blocksizes,
1577c8807c5eSPhilippe Mathieu-Daudé     .bdrv_co_truncate         = nvme_co_truncate,
1578bdd6a90aSFam Zheng 
1579bdd6a90aSFam Zheng     .bdrv_co_preadv           = nvme_co_preadv,
1580bdd6a90aSFam Zheng     .bdrv_co_pwritev          = nvme_co_pwritev,
1581e0dd95e3SMaxim Levitsky 
1582e0dd95e3SMaxim Levitsky     .bdrv_co_pwrite_zeroes    = nvme_co_pwrite_zeroes,
1583e87a09d6SMaxim Levitsky     .bdrv_co_pdiscard         = nvme_co_pdiscard,
1584e0dd95e3SMaxim Levitsky 
1585bdd6a90aSFam Zheng     .bdrv_co_flush_to_disk    = nvme_co_flush,
1586bdd6a90aSFam Zheng     .bdrv_reopen_prepare      = nvme_reopen_prepare,
1587bdd6a90aSFam Zheng 
1588bdd6a90aSFam Zheng     .bdrv_refresh_filename    = nvme_refresh_filename,
1589bdd6a90aSFam Zheng     .bdrv_refresh_limits      = nvme_refresh_limits,
15902654267cSMax Reitz     .strong_runtime_opts      = nvme_strong_runtime_opts,
1591f25e7ab2SPhilippe Mathieu-Daudé     .bdrv_get_specific_stats  = nvme_get_specific_stats,
1592bdd6a90aSFam Zheng 
1593bdd6a90aSFam Zheng     .bdrv_detach_aio_context  = nvme_detach_aio_context,
1594bdd6a90aSFam Zheng     .bdrv_attach_aio_context  = nvme_attach_aio_context,
1595bdd6a90aSFam Zheng 
1596bdd6a90aSFam Zheng     .bdrv_io_plug             = nvme_aio_plug,
1597bdd6a90aSFam Zheng     .bdrv_io_unplug           = nvme_aio_unplug,
15989ed61612SFam Zheng 
15999ed61612SFam Zheng     .bdrv_register_buf        = nvme_register_buf,
16009ed61612SFam Zheng     .bdrv_unregister_buf      = nvme_unregister_buf,
1601bdd6a90aSFam Zheng };
1602bdd6a90aSFam Zheng 
1603bdd6a90aSFam Zheng static void bdrv_nvme_init(void)
1604bdd6a90aSFam Zheng {
1605bdd6a90aSFam Zheng     bdrv_register(&bdrv_nvme);
1606bdd6a90aSFam Zheng }
1607bdd6a90aSFam Zheng 
1608bdd6a90aSFam Zheng block_init(bdrv_nvme_init);
1609