1bdd6a90aSFam Zheng /* 2bdd6a90aSFam Zheng * NVMe block driver based on vfio 3bdd6a90aSFam Zheng * 4bdd6a90aSFam Zheng * Copyright 2016 - 2018 Red Hat, Inc. 5bdd6a90aSFam Zheng * 6bdd6a90aSFam Zheng * Authors: 7bdd6a90aSFam Zheng * Fam Zheng <famz@redhat.com> 8bdd6a90aSFam Zheng * Paolo Bonzini <pbonzini@redhat.com> 9bdd6a90aSFam Zheng * 10bdd6a90aSFam Zheng * This work is licensed under the terms of the GNU GPL, version 2 or later. 11bdd6a90aSFam Zheng * See the COPYING file in the top-level directory. 12bdd6a90aSFam Zheng */ 13bdd6a90aSFam Zheng 14bdd6a90aSFam Zheng #include "qemu/osdep.h" 15bdd6a90aSFam Zheng #include <linux/vfio.h> 16bdd6a90aSFam Zheng #include "qapi/error.h" 17bdd6a90aSFam Zheng #include "qapi/qmp/qdict.h" 18bdd6a90aSFam Zheng #include "qapi/qmp/qstring.h" 19bdd6a90aSFam Zheng #include "qemu/error-report.h" 20db725815SMarkus Armbruster #include "qemu/main-loop.h" 210b8fa32fSMarkus Armbruster #include "qemu/module.h" 22bdd6a90aSFam Zheng #include "qemu/cutils.h" 23922a01a0SMarkus Armbruster #include "qemu/option.h" 24bdd6a90aSFam Zheng #include "qemu/vfio-helpers.h" 25bdd6a90aSFam Zheng #include "block/block_int.h" 26e4ec5ad4SPavel Dovgalyuk #include "sysemu/replay.h" 27bdd6a90aSFam Zheng #include "trace.h" 28bdd6a90aSFam Zheng 29a3d9a352SFam Zheng #include "block/nvme.h" 30bdd6a90aSFam Zheng 31bdd6a90aSFam Zheng #define NVME_SQ_ENTRY_BYTES 64 32bdd6a90aSFam Zheng #define NVME_CQ_ENTRY_BYTES 16 33bdd6a90aSFam Zheng #define NVME_QUEUE_SIZE 128 34f6845323SPhilippe Mathieu-Daudé #define NVME_DOORBELL_SIZE 4096 35bdd6a90aSFam Zheng 361086e95dSStefan Hajnoczi /* 371086e95dSStefan Hajnoczi * We have to leave one slot empty as that is the full queue case where 381086e95dSStefan Hajnoczi * head == tail + 1. 391086e95dSStefan Hajnoczi */ 401086e95dSStefan Hajnoczi #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1) 411086e95dSStefan Hajnoczi 42b75fd5f5SStefan Hajnoczi typedef struct BDRVNVMeState BDRVNVMeState; 43b75fd5f5SStefan Hajnoczi 44*3214b0f0SPhilippe Mathieu-Daudé /* Same index is used for queues and IRQs */ 45*3214b0f0SPhilippe Mathieu-Daudé #define INDEX_ADMIN 0 46*3214b0f0SPhilippe Mathieu-Daudé #define INDEX_IO(n) (1 + n) 47*3214b0f0SPhilippe Mathieu-Daudé 48*3214b0f0SPhilippe Mathieu-Daudé /* This driver shares a single MSIX IRQ for the admin and I/O queues */ 49*3214b0f0SPhilippe Mathieu-Daudé enum { 50*3214b0f0SPhilippe Mathieu-Daudé MSIX_SHARED_IRQ_IDX = 0, 51*3214b0f0SPhilippe Mathieu-Daudé MSIX_IRQ_COUNT = 1 52*3214b0f0SPhilippe Mathieu-Daudé }; 53*3214b0f0SPhilippe Mathieu-Daudé 54bdd6a90aSFam Zheng typedef struct { 55bdd6a90aSFam Zheng int32_t head, tail; 56bdd6a90aSFam Zheng uint8_t *queue; 57bdd6a90aSFam Zheng uint64_t iova; 58bdd6a90aSFam Zheng /* Hardware MMIO register */ 59bdd6a90aSFam Zheng volatile uint32_t *doorbell; 60bdd6a90aSFam Zheng } NVMeQueue; 61bdd6a90aSFam Zheng 62bdd6a90aSFam Zheng typedef struct { 63bdd6a90aSFam Zheng BlockCompletionFunc *cb; 64bdd6a90aSFam Zheng void *opaque; 65bdd6a90aSFam Zheng int cid; 66bdd6a90aSFam Zheng void *prp_list_page; 67bdd6a90aSFam Zheng uint64_t prp_list_iova; 681086e95dSStefan Hajnoczi int free_req_next; /* q->reqs[] index of next free req */ 69bdd6a90aSFam Zheng } NVMeRequest; 70bdd6a90aSFam Zheng 71bdd6a90aSFam Zheng typedef struct { 72bdd6a90aSFam Zheng QemuMutex lock; 73bdd6a90aSFam Zheng 74b75fd5f5SStefan Hajnoczi /* Read from I/O code path, initialized under BQL */ 75b75fd5f5SStefan Hajnoczi BDRVNVMeState *s; 76bdd6a90aSFam Zheng int index; 77b75fd5f5SStefan Hajnoczi 78b75fd5f5SStefan Hajnoczi /* Fields protected by BQL */ 79bdd6a90aSFam Zheng uint8_t *prp_list_pages; 80bdd6a90aSFam Zheng 81bdd6a90aSFam Zheng /* Fields protected by @lock */ 82a5db74f3SStefan Hajnoczi CoQueue free_req_queue; 83bdd6a90aSFam Zheng NVMeQueue sq, cq; 84bdd6a90aSFam Zheng int cq_phase; 851086e95dSStefan Hajnoczi int free_req_head; 861086e95dSStefan Hajnoczi NVMeRequest reqs[NVME_NUM_REQS]; 87bdd6a90aSFam Zheng int need_kick; 88bdd6a90aSFam Zheng int inflight; 897838c67fSStefan Hajnoczi 907838c67fSStefan Hajnoczi /* Thread-safe, no lock necessary */ 917838c67fSStefan Hajnoczi QEMUBH *completion_bh; 92bdd6a90aSFam Zheng } NVMeQueuePair; 93bdd6a90aSFam Zheng 94b75fd5f5SStefan Hajnoczi struct BDRVNVMeState { 95bdd6a90aSFam Zheng AioContext *aio_context; 96bdd6a90aSFam Zheng QEMUVFIOState *vfio; 97f6845323SPhilippe Mathieu-Daudé /* Memory mapped registers */ 98f6845323SPhilippe Mathieu-Daudé volatile struct { 99f6845323SPhilippe Mathieu-Daudé uint32_t sq_tail; 100f6845323SPhilippe Mathieu-Daudé uint32_t cq_head; 101f6845323SPhilippe Mathieu-Daudé } *doorbells; 102bdd6a90aSFam Zheng /* The submission/completion queue pairs. 103bdd6a90aSFam Zheng * [0]: admin queue. 104bdd6a90aSFam Zheng * [1..]: io queues. 105bdd6a90aSFam Zheng */ 106bdd6a90aSFam Zheng NVMeQueuePair **queues; 107bdd6a90aSFam Zheng int nr_queues; 108bdd6a90aSFam Zheng size_t page_size; 109bdd6a90aSFam Zheng /* How many uint32_t elements does each doorbell entry take. */ 110bdd6a90aSFam Zheng size_t doorbell_scale; 111bdd6a90aSFam Zheng bool write_cache_supported; 112b111b3fcSPhilippe Mathieu-Daudé EventNotifier irq_notifier[MSIX_IRQ_COUNT]; 113118d1b6aSMaxim Levitsky 114bdd6a90aSFam Zheng uint64_t nsze; /* Namespace size reported by identify command */ 115bdd6a90aSFam Zheng int nsid; /* The namespace id to read/write data. */ 1161120407bSMax Reitz int blkshift; 117118d1b6aSMaxim Levitsky 118bdd6a90aSFam Zheng uint64_t max_transfer; 1192f0d8947SPaolo Bonzini bool plugged; 120bdd6a90aSFam Zheng 121e0dd95e3SMaxim Levitsky bool supports_write_zeroes; 122e87a09d6SMaxim Levitsky bool supports_discard; 123e0dd95e3SMaxim Levitsky 124bdd6a90aSFam Zheng CoMutex dma_map_lock; 125bdd6a90aSFam Zheng CoQueue dma_flush_queue; 126bdd6a90aSFam Zheng 127bdd6a90aSFam Zheng /* Total size of mapped qiov, accessed under dma_map_lock */ 128bdd6a90aSFam Zheng int dma_map_count; 129cc61b074SMax Reitz 130cc61b074SMax Reitz /* PCI address (required for nvme_refresh_filename()) */ 131cc61b074SMax Reitz char *device; 132f25e7ab2SPhilippe Mathieu-Daudé 133f25e7ab2SPhilippe Mathieu-Daudé struct { 134f25e7ab2SPhilippe Mathieu-Daudé uint64_t completion_errors; 135f25e7ab2SPhilippe Mathieu-Daudé uint64_t aligned_accesses; 136f25e7ab2SPhilippe Mathieu-Daudé uint64_t unaligned_accesses; 137f25e7ab2SPhilippe Mathieu-Daudé } stats; 138b75fd5f5SStefan Hajnoczi }; 139bdd6a90aSFam Zheng 140bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_DEVICE "device" 141bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_NAMESPACE "namespace" 142bdd6a90aSFam Zheng 1437838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque); 1447838c67fSStefan Hajnoczi 145bdd6a90aSFam Zheng static QemuOptsList runtime_opts = { 146bdd6a90aSFam Zheng .name = "nvme", 147bdd6a90aSFam Zheng .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 148bdd6a90aSFam Zheng .desc = { 149bdd6a90aSFam Zheng { 150bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_DEVICE, 151bdd6a90aSFam Zheng .type = QEMU_OPT_STRING, 152bdd6a90aSFam Zheng .help = "NVMe PCI device address", 153bdd6a90aSFam Zheng }, 154bdd6a90aSFam Zheng { 155bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_NAMESPACE, 156bdd6a90aSFam Zheng .type = QEMU_OPT_NUMBER, 157bdd6a90aSFam Zheng .help = "NVMe namespace", 158bdd6a90aSFam Zheng }, 159bdd6a90aSFam Zheng { /* end of list */ } 160bdd6a90aSFam Zheng }, 161bdd6a90aSFam Zheng }; 162bdd6a90aSFam Zheng 1633a6d34d0SPhilippe Mathieu-Daudé static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q, 164bdd6a90aSFam Zheng int nentries, int entry_bytes, Error **errp) 165bdd6a90aSFam Zheng { 166bdd6a90aSFam Zheng size_t bytes; 167bdd6a90aSFam Zheng int r; 168bdd6a90aSFam Zheng 169bdd6a90aSFam Zheng bytes = ROUND_UP(nentries * entry_bytes, s->page_size); 170bdd6a90aSFam Zheng q->head = q->tail = 0; 17138e1f818SPhilippe Mathieu-Daudé q->queue = qemu_try_memalign(s->page_size, bytes); 172bdd6a90aSFam Zheng if (!q->queue) { 173bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate queue"); 174bdd6a90aSFam Zheng return; 175bdd6a90aSFam Zheng } 1762ed84693SPhilippe Mathieu-Daudé memset(q->queue, 0, bytes); 177bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova); 178bdd6a90aSFam Zheng if (r) { 179bdd6a90aSFam Zheng error_setg(errp, "Cannot map queue"); 180bdd6a90aSFam Zheng } 181bdd6a90aSFam Zheng } 182bdd6a90aSFam Zheng 183b75fd5f5SStefan Hajnoczi static void nvme_free_queue_pair(NVMeQueuePair *q) 184bdd6a90aSFam Zheng { 1856e1e9ff2SPhilippe Mathieu-Daudé trace_nvme_free_queue_pair(q->index, q); 1867838c67fSStefan Hajnoczi if (q->completion_bh) { 1877838c67fSStefan Hajnoczi qemu_bh_delete(q->completion_bh); 1887838c67fSStefan Hajnoczi } 189bdd6a90aSFam Zheng qemu_vfree(q->prp_list_pages); 190bdd6a90aSFam Zheng qemu_vfree(q->sq.queue); 191bdd6a90aSFam Zheng qemu_vfree(q->cq.queue); 192bdd6a90aSFam Zheng qemu_mutex_destroy(&q->lock); 193bdd6a90aSFam Zheng g_free(q); 194bdd6a90aSFam Zheng } 195bdd6a90aSFam Zheng 196bdd6a90aSFam Zheng static void nvme_free_req_queue_cb(void *opaque) 197bdd6a90aSFam Zheng { 198bdd6a90aSFam Zheng NVMeQueuePair *q = opaque; 199bdd6a90aSFam Zheng 200bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 201bdd6a90aSFam Zheng while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) { 202bdd6a90aSFam Zheng /* Retry all pending requests */ 203bdd6a90aSFam Zheng } 204bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 205bdd6a90aSFam Zheng } 206bdd6a90aSFam Zheng 2070a28b02eSPhilippe Mathieu-Daudé static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s, 2080a28b02eSPhilippe Mathieu-Daudé AioContext *aio_context, 209bdd6a90aSFam Zheng int idx, int size, 210bdd6a90aSFam Zheng Error **errp) 211bdd6a90aSFam Zheng { 212bdd6a90aSFam Zheng int i, r; 213bdd6a90aSFam Zheng Error *local_err = NULL; 2140ea45f76SPhilippe Mathieu-Daudé NVMeQueuePair *q; 215bdd6a90aSFam Zheng uint64_t prp_list_iova; 216bdd6a90aSFam Zheng 2170ea45f76SPhilippe Mathieu-Daudé q = g_try_new0(NVMeQueuePair, 1); 2180ea45f76SPhilippe Mathieu-Daudé if (!q) { 2190ea45f76SPhilippe Mathieu-Daudé return NULL; 2200ea45f76SPhilippe Mathieu-Daudé } 2216e1e9ff2SPhilippe Mathieu-Daudé trace_nvme_create_queue_pair(idx, q, size, aio_context, 2226e1e9ff2SPhilippe Mathieu-Daudé event_notifier_get_fd(s->irq_notifier)); 22338e1f818SPhilippe Mathieu-Daudé q->prp_list_pages = qemu_try_memalign(s->page_size, 2240ea45f76SPhilippe Mathieu-Daudé s->page_size * NVME_NUM_REQS); 2250ea45f76SPhilippe Mathieu-Daudé if (!q->prp_list_pages) { 2260ea45f76SPhilippe Mathieu-Daudé goto fail; 2270ea45f76SPhilippe Mathieu-Daudé } 2282ed84693SPhilippe Mathieu-Daudé memset(q->prp_list_pages, 0, s->page_size * NVME_NUM_REQS); 229bdd6a90aSFam Zheng qemu_mutex_init(&q->lock); 230b75fd5f5SStefan Hajnoczi q->s = s; 231bdd6a90aSFam Zheng q->index = idx; 232bdd6a90aSFam Zheng qemu_co_queue_init(&q->free_req_queue); 2330a28b02eSPhilippe Mathieu-Daudé q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q); 234bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, 2351086e95dSStefan Hajnoczi s->page_size * NVME_NUM_REQS, 236bdd6a90aSFam Zheng false, &prp_list_iova); 237bdd6a90aSFam Zheng if (r) { 238bdd6a90aSFam Zheng goto fail; 239bdd6a90aSFam Zheng } 2401086e95dSStefan Hajnoczi q->free_req_head = -1; 2411086e95dSStefan Hajnoczi for (i = 0; i < NVME_NUM_REQS; i++) { 242bdd6a90aSFam Zheng NVMeRequest *req = &q->reqs[i]; 243bdd6a90aSFam Zheng req->cid = i + 1; 2441086e95dSStefan Hajnoczi req->free_req_next = q->free_req_head; 2451086e95dSStefan Hajnoczi q->free_req_head = i; 246bdd6a90aSFam Zheng req->prp_list_page = q->prp_list_pages + i * s->page_size; 247bdd6a90aSFam Zheng req->prp_list_iova = prp_list_iova + i * s->page_size; 248bdd6a90aSFam Zheng } 2491086e95dSStefan Hajnoczi 2503a6d34d0SPhilippe Mathieu-Daudé nvme_init_queue(s, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err); 251bdd6a90aSFam Zheng if (local_err) { 252bdd6a90aSFam Zheng error_propagate(errp, local_err); 253bdd6a90aSFam Zheng goto fail; 254bdd6a90aSFam Zheng } 255f6845323SPhilippe Mathieu-Daudé q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail; 256bdd6a90aSFam Zheng 2573a6d34d0SPhilippe Mathieu-Daudé nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err); 258bdd6a90aSFam Zheng if (local_err) { 259bdd6a90aSFam Zheng error_propagate(errp, local_err); 260bdd6a90aSFam Zheng goto fail; 261bdd6a90aSFam Zheng } 262f6845323SPhilippe Mathieu-Daudé q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head; 263bdd6a90aSFam Zheng 264bdd6a90aSFam Zheng return q; 265bdd6a90aSFam Zheng fail: 266b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(q); 267bdd6a90aSFam Zheng return NULL; 268bdd6a90aSFam Zheng } 269bdd6a90aSFam Zheng 270bdd6a90aSFam Zheng /* With q->lock */ 271b75fd5f5SStefan Hajnoczi static void nvme_kick(NVMeQueuePair *q) 272bdd6a90aSFam Zheng { 273b75fd5f5SStefan Hajnoczi BDRVNVMeState *s = q->s; 274b75fd5f5SStefan Hajnoczi 275bdd6a90aSFam Zheng if (s->plugged || !q->need_kick) { 276bdd6a90aSFam Zheng return; 277bdd6a90aSFam Zheng } 278bdd6a90aSFam Zheng trace_nvme_kick(s, q->index); 279bdd6a90aSFam Zheng assert(!(q->sq.tail & 0xFF00)); 280bdd6a90aSFam Zheng /* Fence the write to submission queue entry before notifying the device. */ 281bdd6a90aSFam Zheng smp_wmb(); 282bdd6a90aSFam Zheng *q->sq.doorbell = cpu_to_le32(q->sq.tail); 283bdd6a90aSFam Zheng q->inflight += q->need_kick; 284bdd6a90aSFam Zheng q->need_kick = 0; 285bdd6a90aSFam Zheng } 286bdd6a90aSFam Zheng 287bdd6a90aSFam Zheng /* Find a free request element if any, otherwise: 288bdd6a90aSFam Zheng * a) if in coroutine context, try to wait for one to become available; 289bdd6a90aSFam Zheng * b) if not in coroutine, return NULL; 290bdd6a90aSFam Zheng */ 291bdd6a90aSFam Zheng static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) 292bdd6a90aSFam Zheng { 2931086e95dSStefan Hajnoczi NVMeRequest *req; 294bdd6a90aSFam Zheng 295bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 2961086e95dSStefan Hajnoczi 2971086e95dSStefan Hajnoczi while (q->free_req_head == -1) { 298bdd6a90aSFam Zheng if (qemu_in_coroutine()) { 29951e98b6dSPhilippe Mathieu-Daudé trace_nvme_free_req_queue_wait(q->s, q->index); 300bdd6a90aSFam Zheng qemu_co_queue_wait(&q->free_req_queue, &q->lock); 301bdd6a90aSFam Zheng } else { 302bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 303bdd6a90aSFam Zheng return NULL; 304bdd6a90aSFam Zheng } 305bdd6a90aSFam Zheng } 3061086e95dSStefan Hajnoczi 3071086e95dSStefan Hajnoczi req = &q->reqs[q->free_req_head]; 3081086e95dSStefan Hajnoczi q->free_req_head = req->free_req_next; 3091086e95dSStefan Hajnoczi req->free_req_next = -1; 3101086e95dSStefan Hajnoczi 311bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 312bdd6a90aSFam Zheng return req; 313bdd6a90aSFam Zheng } 314bdd6a90aSFam Zheng 3151086e95dSStefan Hajnoczi /* With q->lock */ 3161086e95dSStefan Hajnoczi static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) 3171086e95dSStefan Hajnoczi { 3181086e95dSStefan Hajnoczi req->free_req_next = q->free_req_head; 3191086e95dSStefan Hajnoczi q->free_req_head = req - q->reqs; 3201086e95dSStefan Hajnoczi } 3211086e95dSStefan Hajnoczi 3221086e95dSStefan Hajnoczi /* With q->lock */ 323b75fd5f5SStefan Hajnoczi static void nvme_wake_free_req_locked(NVMeQueuePair *q) 3241086e95dSStefan Hajnoczi { 3251086e95dSStefan Hajnoczi if (!qemu_co_queue_empty(&q->free_req_queue)) { 326b75fd5f5SStefan Hajnoczi replay_bh_schedule_oneshot_event(q->s->aio_context, 3271086e95dSStefan Hajnoczi nvme_free_req_queue_cb, q); 3281086e95dSStefan Hajnoczi } 3291086e95dSStefan Hajnoczi } 3301086e95dSStefan Hajnoczi 3311086e95dSStefan Hajnoczi /* Insert a request in the freelist and wake waiters */ 332b75fd5f5SStefan Hajnoczi static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req) 3331086e95dSStefan Hajnoczi { 3341086e95dSStefan Hajnoczi qemu_mutex_lock(&q->lock); 3351086e95dSStefan Hajnoczi nvme_put_free_req_locked(q, req); 336b75fd5f5SStefan Hajnoczi nvme_wake_free_req_locked(q); 3371086e95dSStefan Hajnoczi qemu_mutex_unlock(&q->lock); 3381086e95dSStefan Hajnoczi } 3391086e95dSStefan Hajnoczi 340bdd6a90aSFam Zheng static inline int nvme_translate_error(const NvmeCqe *c) 341bdd6a90aSFam Zheng { 342bdd6a90aSFam Zheng uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF; 343bdd6a90aSFam Zheng if (status) { 344bdd6a90aSFam Zheng trace_nvme_error(le32_to_cpu(c->result), 345bdd6a90aSFam Zheng le16_to_cpu(c->sq_head), 346bdd6a90aSFam Zheng le16_to_cpu(c->sq_id), 347bdd6a90aSFam Zheng le16_to_cpu(c->cid), 348bdd6a90aSFam Zheng le16_to_cpu(status)); 349bdd6a90aSFam Zheng } 350bdd6a90aSFam Zheng switch (status) { 351bdd6a90aSFam Zheng case 0: 352bdd6a90aSFam Zheng return 0; 353bdd6a90aSFam Zheng case 1: 354bdd6a90aSFam Zheng return -ENOSYS; 355bdd6a90aSFam Zheng case 2: 356bdd6a90aSFam Zheng return -EINVAL; 357bdd6a90aSFam Zheng default: 358bdd6a90aSFam Zheng return -EIO; 359bdd6a90aSFam Zheng } 360bdd6a90aSFam Zheng } 361bdd6a90aSFam Zheng 362bdd6a90aSFam Zheng /* With q->lock */ 363b75fd5f5SStefan Hajnoczi static bool nvme_process_completion(NVMeQueuePair *q) 364bdd6a90aSFam Zheng { 365b75fd5f5SStefan Hajnoczi BDRVNVMeState *s = q->s; 366bdd6a90aSFam Zheng bool progress = false; 367bdd6a90aSFam Zheng NVMeRequest *preq; 368bdd6a90aSFam Zheng NVMeRequest req; 369bdd6a90aSFam Zheng NvmeCqe *c; 370bdd6a90aSFam Zheng 371bdd6a90aSFam Zheng trace_nvme_process_completion(s, q->index, q->inflight); 3727838c67fSStefan Hajnoczi if (s->plugged) { 3737838c67fSStefan Hajnoczi trace_nvme_process_completion_queue_plugged(s, q->index); 374bdd6a90aSFam Zheng return false; 375bdd6a90aSFam Zheng } 3767838c67fSStefan Hajnoczi 3777838c67fSStefan Hajnoczi /* 3787838c67fSStefan Hajnoczi * Support re-entrancy when a request cb() function invokes aio_poll(). 3797838c67fSStefan Hajnoczi * Pending completions must be visible to aio_poll() so that a cb() 3807838c67fSStefan Hajnoczi * function can wait for the completion of another request. 3817838c67fSStefan Hajnoczi * 3827838c67fSStefan Hajnoczi * The aio_poll() loop will execute our BH and we'll resume completion 3837838c67fSStefan Hajnoczi * processing there. 3847838c67fSStefan Hajnoczi */ 3857838c67fSStefan Hajnoczi qemu_bh_schedule(q->completion_bh); 3867838c67fSStefan Hajnoczi 387bdd6a90aSFam Zheng assert(q->inflight >= 0); 388bdd6a90aSFam Zheng while (q->inflight) { 38904b3fb39SStefan Hajnoczi int ret; 390bdd6a90aSFam Zheng int16_t cid; 39104b3fb39SStefan Hajnoczi 392bdd6a90aSFam Zheng c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES]; 393258867d1SMaxim Levitsky if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) { 394bdd6a90aSFam Zheng break; 395bdd6a90aSFam Zheng } 39604b3fb39SStefan Hajnoczi ret = nvme_translate_error(c); 397f25e7ab2SPhilippe Mathieu-Daudé if (ret) { 398f25e7ab2SPhilippe Mathieu-Daudé s->stats.completion_errors++; 399f25e7ab2SPhilippe Mathieu-Daudé } 400bdd6a90aSFam Zheng q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE; 401bdd6a90aSFam Zheng if (!q->cq.head) { 402bdd6a90aSFam Zheng q->cq_phase = !q->cq_phase; 403bdd6a90aSFam Zheng } 404bdd6a90aSFam Zheng cid = le16_to_cpu(c->cid); 405bdd6a90aSFam Zheng if (cid == 0 || cid > NVME_QUEUE_SIZE) { 40658ad6ae0SPhilippe Mathieu-Daudé warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32", " 40758ad6ae0SPhilippe Mathieu-Daudé "queue size: %u", cid, NVME_QUEUE_SIZE); 408bdd6a90aSFam Zheng continue; 409bdd6a90aSFam Zheng } 410bdd6a90aSFam Zheng trace_nvme_complete_command(s, q->index, cid); 411bdd6a90aSFam Zheng preq = &q->reqs[cid - 1]; 412bdd6a90aSFam Zheng req = *preq; 413bdd6a90aSFam Zheng assert(req.cid == cid); 414bdd6a90aSFam Zheng assert(req.cb); 4151086e95dSStefan Hajnoczi nvme_put_free_req_locked(q, preq); 416bdd6a90aSFam Zheng preq->cb = preq->opaque = NULL; 4177838c67fSStefan Hajnoczi q->inflight--; 418bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 41904b3fb39SStefan Hajnoczi req.cb(req.opaque, ret); 420bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 421bdd6a90aSFam Zheng progress = true; 422bdd6a90aSFam Zheng } 423bdd6a90aSFam Zheng if (progress) { 424bdd6a90aSFam Zheng /* Notify the device so it can post more completions. */ 425bdd6a90aSFam Zheng smp_mb_release(); 426bdd6a90aSFam Zheng *q->cq.doorbell = cpu_to_le32(q->cq.head); 427b75fd5f5SStefan Hajnoczi nvme_wake_free_req_locked(q); 428bdd6a90aSFam Zheng } 4297838c67fSStefan Hajnoczi 4307838c67fSStefan Hajnoczi qemu_bh_cancel(q->completion_bh); 4317838c67fSStefan Hajnoczi 432bdd6a90aSFam Zheng return progress; 433bdd6a90aSFam Zheng } 434bdd6a90aSFam Zheng 4357838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque) 4367838c67fSStefan Hajnoczi { 4377838c67fSStefan Hajnoczi NVMeQueuePair *q = opaque; 4387838c67fSStefan Hajnoczi 4397838c67fSStefan Hajnoczi /* 4407838c67fSStefan Hajnoczi * We're being invoked because a nvme_process_completion() cb() function 4417838c67fSStefan Hajnoczi * called aio_poll(). The callback may be waiting for further completions 4427838c67fSStefan Hajnoczi * so notify the device that it has space to fill in more completions now. 4437838c67fSStefan Hajnoczi */ 4447838c67fSStefan Hajnoczi smp_mb_release(); 4457838c67fSStefan Hajnoczi *q->cq.doorbell = cpu_to_le32(q->cq.head); 4467838c67fSStefan Hajnoczi nvme_wake_free_req_locked(q); 4477838c67fSStefan Hajnoczi 4487838c67fSStefan Hajnoczi nvme_process_completion(q); 4497838c67fSStefan Hajnoczi } 4507838c67fSStefan Hajnoczi 451bdd6a90aSFam Zheng static void nvme_trace_command(const NvmeCmd *cmd) 452bdd6a90aSFam Zheng { 453bdd6a90aSFam Zheng int i; 454bdd6a90aSFam Zheng 455e266f52cSPhilippe Mathieu-Daudé if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW)) { 456e266f52cSPhilippe Mathieu-Daudé return; 457e266f52cSPhilippe Mathieu-Daudé } 458bdd6a90aSFam Zheng for (i = 0; i < 8; ++i) { 459bdd6a90aSFam Zheng uint8_t *cmdp = (uint8_t *)cmd + i * 8; 460bdd6a90aSFam Zheng trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3], 461bdd6a90aSFam Zheng cmdp[4], cmdp[5], cmdp[6], cmdp[7]); 462bdd6a90aSFam Zheng } 463bdd6a90aSFam Zheng } 464bdd6a90aSFam Zheng 465b75fd5f5SStefan Hajnoczi static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, 466bdd6a90aSFam Zheng NvmeCmd *cmd, BlockCompletionFunc cb, 467bdd6a90aSFam Zheng void *opaque) 468bdd6a90aSFam Zheng { 469bdd6a90aSFam Zheng assert(!req->cb); 470bdd6a90aSFam Zheng req->cb = cb; 471bdd6a90aSFam Zheng req->opaque = opaque; 472bdd6a90aSFam Zheng cmd->cid = cpu_to_le32(req->cid); 473bdd6a90aSFam Zheng 474b75fd5f5SStefan Hajnoczi trace_nvme_submit_command(q->s, q->index, req->cid); 475bdd6a90aSFam Zheng nvme_trace_command(cmd); 476bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 477bdd6a90aSFam Zheng memcpy((uint8_t *)q->sq.queue + 478bdd6a90aSFam Zheng q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); 479bdd6a90aSFam Zheng q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; 480bdd6a90aSFam Zheng q->need_kick++; 481b75fd5f5SStefan Hajnoczi nvme_kick(q); 482b75fd5f5SStefan Hajnoczi nvme_process_completion(q); 483bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 484bdd6a90aSFam Zheng } 485bdd6a90aSFam Zheng 486bdd6a90aSFam Zheng static void nvme_cmd_sync_cb(void *opaque, int ret) 487bdd6a90aSFam Zheng { 488bdd6a90aSFam Zheng int *pret = opaque; 489bdd6a90aSFam Zheng *pret = ret; 4904720cbeeSKevin Wolf aio_wait_kick(); 491bdd6a90aSFam Zheng } 492bdd6a90aSFam Zheng 493bdd6a90aSFam Zheng static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q, 494bdd6a90aSFam Zheng NvmeCmd *cmd) 495bdd6a90aSFam Zheng { 496073a0697SPhilippe Mathieu-Daudé AioContext *aio_context = bdrv_get_aio_context(bs); 497bdd6a90aSFam Zheng NVMeRequest *req; 498bdd6a90aSFam Zheng int ret = -EINPROGRESS; 499bdd6a90aSFam Zheng req = nvme_get_free_req(q); 500bdd6a90aSFam Zheng if (!req) { 501bdd6a90aSFam Zheng return -EBUSY; 502bdd6a90aSFam Zheng } 503b75fd5f5SStefan Hajnoczi nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret); 504bdd6a90aSFam Zheng 505073a0697SPhilippe Mathieu-Daudé AIO_WAIT_WHILE(aio_context, ret == -EINPROGRESS); 506bdd6a90aSFam Zheng return ret; 507bdd6a90aSFam Zheng } 508bdd6a90aSFam Zheng 509bdd6a90aSFam Zheng static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp) 510bdd6a90aSFam Zheng { 511bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 5127d3b214aSPhilippe Mathieu-Daudé union { 5137d3b214aSPhilippe Mathieu-Daudé NvmeIdCtrl ctrl; 5147d3b214aSPhilippe Mathieu-Daudé NvmeIdNs ns; 5157d3b214aSPhilippe Mathieu-Daudé } *id; 516118d1b6aSMaxim Levitsky NvmeLBAF *lbaf; 517e0dd95e3SMaxim Levitsky uint16_t oncs; 5181120407bSMax Reitz int r; 519bdd6a90aSFam Zheng uint64_t iova; 520bdd6a90aSFam Zheng NvmeCmd cmd = { 521bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_IDENTIFY, 522bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x1), 523bdd6a90aSFam Zheng }; 524bdd6a90aSFam Zheng 52538e1f818SPhilippe Mathieu-Daudé id = qemu_try_memalign(s->page_size, sizeof(*id)); 5264d980939SPhilippe Mathieu-Daudé if (!id) { 527bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate buffer for identify response"); 528bdd6a90aSFam Zheng goto out; 529bdd6a90aSFam Zheng } 5307d3b214aSPhilippe Mathieu-Daudé r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova); 531bdd6a90aSFam Zheng if (r) { 532bdd6a90aSFam Zheng error_setg(errp, "Cannot map buffer for DMA"); 533bdd6a90aSFam Zheng goto out; 534bdd6a90aSFam Zheng } 535bdd6a90aSFam Zheng 5362ed84693SPhilippe Mathieu-Daudé memset(id, 0, sizeof(*id)); 5372ed84693SPhilippe Mathieu-Daudé cmd.dptr.prp1 = cpu_to_le64(iova); 53873159e52SPhilippe Mathieu-Daudé if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { 539bdd6a90aSFam Zheng error_setg(errp, "Failed to identify controller"); 540bdd6a90aSFam Zheng goto out; 541bdd6a90aSFam Zheng } 542bdd6a90aSFam Zheng 5437d3b214aSPhilippe Mathieu-Daudé if (le32_to_cpu(id->ctrl.nn) < namespace) { 544bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace"); 545bdd6a90aSFam Zheng goto out; 546bdd6a90aSFam Zheng } 5477d3b214aSPhilippe Mathieu-Daudé s->write_cache_supported = le32_to_cpu(id->ctrl.vwc) & 0x1; 5487d3b214aSPhilippe Mathieu-Daudé s->max_transfer = (id->ctrl.mdts ? 1 << id->ctrl.mdts : 0) * s->page_size; 549bdd6a90aSFam Zheng /* For now the page list buffer per command is one page, to hold at most 550bdd6a90aSFam Zheng * s->page_size / sizeof(uint64_t) entries. */ 551bdd6a90aSFam Zheng s->max_transfer = MIN_NON_ZERO(s->max_transfer, 552bdd6a90aSFam Zheng s->page_size / sizeof(uint64_t) * s->page_size); 553bdd6a90aSFam Zheng 5547d3b214aSPhilippe Mathieu-Daudé oncs = le16_to_cpu(id->ctrl.oncs); 55569265150SKlaus Jensen s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES); 556e87a09d6SMaxim Levitsky s->supports_discard = !!(oncs & NVME_ONCS_DSM); 557e0dd95e3SMaxim Levitsky 5587d3b214aSPhilippe Mathieu-Daudé memset(id, 0, sizeof(*id)); 559bdd6a90aSFam Zheng cmd.cdw10 = 0; 560bdd6a90aSFam Zheng cmd.nsid = cpu_to_le32(namespace); 56173159e52SPhilippe Mathieu-Daudé if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { 562bdd6a90aSFam Zheng error_setg(errp, "Failed to identify namespace"); 563bdd6a90aSFam Zheng goto out; 564bdd6a90aSFam Zheng } 565bdd6a90aSFam Zheng 5667d3b214aSPhilippe Mathieu-Daudé s->nsze = le64_to_cpu(id->ns.nsze); 5677d3b214aSPhilippe Mathieu-Daudé lbaf = &id->ns.lbaf[NVME_ID_NS_FLBAS_INDEX(id->ns.flbas)]; 568bdd6a90aSFam Zheng 5697d3b214aSPhilippe Mathieu-Daudé if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id->ns.dlfeat) && 5707d3b214aSPhilippe Mathieu-Daudé NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id->ns.dlfeat) == 571e0dd95e3SMaxim Levitsky NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) { 572e0dd95e3SMaxim Levitsky bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP; 573e0dd95e3SMaxim Levitsky } 574e0dd95e3SMaxim Levitsky 575118d1b6aSMaxim Levitsky if (lbaf->ms) { 576118d1b6aSMaxim Levitsky error_setg(errp, "Namespaces with metadata are not yet supported"); 577118d1b6aSMaxim Levitsky goto out; 578118d1b6aSMaxim Levitsky } 579118d1b6aSMaxim Levitsky 5801120407bSMax Reitz if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 || 5811120407bSMax Reitz (1 << lbaf->ds) > s->page_size) 5821120407bSMax Reitz { 5831120407bSMax Reitz error_setg(errp, "Namespace has unsupported block size (2^%d)", 5841120407bSMax Reitz lbaf->ds); 585118d1b6aSMaxim Levitsky goto out; 586118d1b6aSMaxim Levitsky } 587118d1b6aSMaxim Levitsky 588118d1b6aSMaxim Levitsky s->blkshift = lbaf->ds; 589bdd6a90aSFam Zheng out: 5904d980939SPhilippe Mathieu-Daudé qemu_vfio_dma_unmap(s->vfio, id); 5914d980939SPhilippe Mathieu-Daudé qemu_vfree(id); 592bdd6a90aSFam Zheng } 593bdd6a90aSFam Zheng 5947a1fb2efSPhilippe Mathieu-Daudé static bool nvme_poll_queue(NVMeQueuePair *q) 595bdd6a90aSFam Zheng { 596bdd6a90aSFam Zheng bool progress = false; 597bdd6a90aSFam Zheng 5982446e0e2SStefan Hajnoczi const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; 5992446e0e2SStefan Hajnoczi NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; 6002446e0e2SStefan Hajnoczi 6011c914cd1SPhilippe Mathieu-Daudé trace_nvme_poll_queue(q->s, q->index); 6022446e0e2SStefan Hajnoczi /* 6032446e0e2SStefan Hajnoczi * Do an early check for completions. q->lock isn't needed because 6042446e0e2SStefan Hajnoczi * nvme_process_completion() only runs in the event loop thread and 6052446e0e2SStefan Hajnoczi * cannot race with itself. 6062446e0e2SStefan Hajnoczi */ 6072446e0e2SStefan Hajnoczi if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) { 6087a1fb2efSPhilippe Mathieu-Daudé return false; 6092446e0e2SStefan Hajnoczi } 6102446e0e2SStefan Hajnoczi 611bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 612b75fd5f5SStefan Hajnoczi while (nvme_process_completion(q)) { 613bdd6a90aSFam Zheng /* Keep polling */ 614bdd6a90aSFam Zheng progress = true; 615bdd6a90aSFam Zheng } 616bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 6177a1fb2efSPhilippe Mathieu-Daudé 6187a1fb2efSPhilippe Mathieu-Daudé return progress; 6197a1fb2efSPhilippe Mathieu-Daudé } 6207a1fb2efSPhilippe Mathieu-Daudé 6217a1fb2efSPhilippe Mathieu-Daudé static bool nvme_poll_queues(BDRVNVMeState *s) 6227a1fb2efSPhilippe Mathieu-Daudé { 6237a1fb2efSPhilippe Mathieu-Daudé bool progress = false; 6247a1fb2efSPhilippe Mathieu-Daudé int i; 6257a1fb2efSPhilippe Mathieu-Daudé 6267a1fb2efSPhilippe Mathieu-Daudé for (i = 0; i < s->nr_queues; i++) { 6277a1fb2efSPhilippe Mathieu-Daudé if (nvme_poll_queue(s->queues[i])) { 6287a1fb2efSPhilippe Mathieu-Daudé progress = true; 6297a1fb2efSPhilippe Mathieu-Daudé } 630bdd6a90aSFam Zheng } 631bdd6a90aSFam Zheng return progress; 632bdd6a90aSFam Zheng } 633bdd6a90aSFam Zheng 634bdd6a90aSFam Zheng static void nvme_handle_event(EventNotifier *n) 635bdd6a90aSFam Zheng { 636b111b3fcSPhilippe Mathieu-Daudé BDRVNVMeState *s = container_of(n, BDRVNVMeState, 637b111b3fcSPhilippe Mathieu-Daudé irq_notifier[MSIX_SHARED_IRQ_IDX]); 638bdd6a90aSFam Zheng 639bdd6a90aSFam Zheng trace_nvme_handle_event(s); 640bdd6a90aSFam Zheng event_notifier_test_and_clear(n); 641bdd6a90aSFam Zheng nvme_poll_queues(s); 642bdd6a90aSFam Zheng } 643bdd6a90aSFam Zheng 644bdd6a90aSFam Zheng static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp) 645bdd6a90aSFam Zheng { 646bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 647bdd6a90aSFam Zheng int n = s->nr_queues; 648bdd6a90aSFam Zheng NVMeQueuePair *q; 649bdd6a90aSFam Zheng NvmeCmd cmd; 650bdd6a90aSFam Zheng int queue_size = NVME_QUEUE_SIZE; 651bdd6a90aSFam Zheng 6520a28b02eSPhilippe Mathieu-Daudé q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs), 6530a28b02eSPhilippe Mathieu-Daudé n, queue_size, errp); 654bdd6a90aSFam Zheng if (!q) { 655bdd6a90aSFam Zheng return false; 656bdd6a90aSFam Zheng } 657bdd6a90aSFam Zheng cmd = (NvmeCmd) { 658bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_CQ, 659c26f2173SKlaus Jensen .dptr.prp1 = cpu_to_le64(q->cq.iova), 660bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)), 661bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(0x3), 662bdd6a90aSFam Zheng }; 66373159e52SPhilippe Mathieu-Daudé if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { 664bf6ce5ecSPhilippe Mathieu-Daudé error_setg(errp, "Failed to create CQ io queue [%d]", n); 665c8edbfb2SPhilippe Mathieu-Daudé goto out_error; 666bdd6a90aSFam Zheng } 667bdd6a90aSFam Zheng cmd = (NvmeCmd) { 668bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_SQ, 669c26f2173SKlaus Jensen .dptr.prp1 = cpu_to_le64(q->sq.iova), 670bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)), 671bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(0x1 | (n << 16)), 672bdd6a90aSFam Zheng }; 67373159e52SPhilippe Mathieu-Daudé if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { 674bf6ce5ecSPhilippe Mathieu-Daudé error_setg(errp, "Failed to create SQ io queue [%d]", n); 675c8edbfb2SPhilippe Mathieu-Daudé goto out_error; 676bdd6a90aSFam Zheng } 677bdd6a90aSFam Zheng s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1); 678bdd6a90aSFam Zheng s->queues[n] = q; 679bdd6a90aSFam Zheng s->nr_queues++; 680bdd6a90aSFam Zheng return true; 681c8edbfb2SPhilippe Mathieu-Daudé out_error: 682c8edbfb2SPhilippe Mathieu-Daudé nvme_free_queue_pair(q); 683c8edbfb2SPhilippe Mathieu-Daudé return false; 684bdd6a90aSFam Zheng } 685bdd6a90aSFam Zheng 686bdd6a90aSFam Zheng static bool nvme_poll_cb(void *opaque) 687bdd6a90aSFam Zheng { 688bdd6a90aSFam Zheng EventNotifier *e = opaque; 689b111b3fcSPhilippe Mathieu-Daudé BDRVNVMeState *s = container_of(e, BDRVNVMeState, 690b111b3fcSPhilippe Mathieu-Daudé irq_notifier[MSIX_SHARED_IRQ_IDX]); 691bdd6a90aSFam Zheng 692b3ac2b94SSimran Singhal return nvme_poll_queues(s); 693bdd6a90aSFam Zheng } 694bdd6a90aSFam Zheng 695bdd6a90aSFam Zheng static int nvme_init(BlockDriverState *bs, const char *device, int namespace, 696bdd6a90aSFam Zheng Error **errp) 697bdd6a90aSFam Zheng { 698bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 6990a28b02eSPhilippe Mathieu-Daudé AioContext *aio_context = bdrv_get_aio_context(bs); 700bdd6a90aSFam Zheng int ret; 701bdd6a90aSFam Zheng uint64_t cap; 702bdd6a90aSFam Zheng uint64_t timeout_ms; 703bdd6a90aSFam Zheng uint64_t deadline, now; 704bdd6a90aSFam Zheng Error *local_err = NULL; 7059406e0d9SPhilippe Mathieu-Daudé volatile NvmeBar *regs = NULL; 706bdd6a90aSFam Zheng 707bdd6a90aSFam Zheng qemu_co_mutex_init(&s->dma_map_lock); 708bdd6a90aSFam Zheng qemu_co_queue_init(&s->dma_flush_queue); 709cc61b074SMax Reitz s->device = g_strdup(device); 710bdd6a90aSFam Zheng s->nsid = namespace; 711bdd6a90aSFam Zheng s->aio_context = bdrv_get_aio_context(bs); 712b111b3fcSPhilippe Mathieu-Daudé ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0); 713bdd6a90aSFam Zheng if (ret) { 714bdd6a90aSFam Zheng error_setg(errp, "Failed to init event notifier"); 715bdd6a90aSFam Zheng return ret; 716bdd6a90aSFam Zheng } 717bdd6a90aSFam Zheng 718bdd6a90aSFam Zheng s->vfio = qemu_vfio_open_pci(device, errp); 719bdd6a90aSFam Zheng if (!s->vfio) { 720bdd6a90aSFam Zheng ret = -EINVAL; 7219582f357SFam Zheng goto out; 722bdd6a90aSFam Zheng } 723bdd6a90aSFam Zheng 72437d7a45aSPhilippe Mathieu-Daudé regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar), 725b02c01a5SPhilippe Mathieu-Daudé PROT_READ | PROT_WRITE, errp); 72637d7a45aSPhilippe Mathieu-Daudé if (!regs) { 727bdd6a90aSFam Zheng ret = -EINVAL; 7289582f357SFam Zheng goto out; 729bdd6a90aSFam Zheng } 730bdd6a90aSFam Zheng /* Perform initialize sequence as described in NVMe spec "7.6.1 731bdd6a90aSFam Zheng * Initialization". */ 732bdd6a90aSFam Zheng 7339406e0d9SPhilippe Mathieu-Daudé cap = le64_to_cpu(regs->cap); 73415b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability_raw(cap); 73515b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Maximum Queue Entries Supported", 73615b2260bSPhilippe Mathieu-Daudé 1 + NVME_CAP_MQES(cap)); 73715b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Contiguous Queues Required", 73815b2260bSPhilippe Mathieu-Daudé NVME_CAP_CQR(cap)); 73915b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Doorbell Stride", 74015b2260bSPhilippe Mathieu-Daudé 2 << (2 + NVME_CAP_DSTRD(cap))); 74115b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Subsystem Reset Supported", 74215b2260bSPhilippe Mathieu-Daudé NVME_CAP_NSSRS(cap)); 74315b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Memory Page Size Minimum", 74415b2260bSPhilippe Mathieu-Daudé 1 << (12 + NVME_CAP_MPSMIN(cap))); 74515b2260bSPhilippe Mathieu-Daudé trace_nvme_controller_capability("Memory Page Size Maximum", 74615b2260bSPhilippe Mathieu-Daudé 1 << (12 + NVME_CAP_MPSMAX(cap))); 747fad1eb68SPhilippe Mathieu-Daudé if (!NVME_CAP_CSS(cap)) { 748bdd6a90aSFam Zheng error_setg(errp, "Device doesn't support NVMe command set"); 749bdd6a90aSFam Zheng ret = -EINVAL; 7509582f357SFam Zheng goto out; 751bdd6a90aSFam Zheng } 752bdd6a90aSFam Zheng 753fad1eb68SPhilippe Mathieu-Daudé s->page_size = MAX(4096, 1 << NVME_CAP_MPSMIN(cap)); 754fad1eb68SPhilippe Mathieu-Daudé s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t); 755bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size; 756fad1eb68SPhilippe Mathieu-Daudé timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000); 757bdd6a90aSFam Zheng 758bdd6a90aSFam Zheng /* Reset device to get a clean state. */ 7599406e0d9SPhilippe Mathieu-Daudé regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE); 760bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 0. */ 761e4f310feSPhilippe Mathieu-Daudé deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS; 762fad1eb68SPhilippe Mathieu-Daudé while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) { 763bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { 764bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to reset (%" 765bdd6a90aSFam Zheng PRId64 " ms)", 766bdd6a90aSFam Zheng timeout_ms); 767bdd6a90aSFam Zheng ret = -ETIMEDOUT; 7689582f357SFam Zheng goto out; 769bdd6a90aSFam Zheng } 770bdd6a90aSFam Zheng } 771bdd6a90aSFam Zheng 772f6845323SPhilippe Mathieu-Daudé s->doorbells = qemu_vfio_pci_map_bar(s->vfio, 0, sizeof(NvmeBar), 773f6845323SPhilippe Mathieu-Daudé NVME_DOORBELL_SIZE, PROT_WRITE, errp); 774f6845323SPhilippe Mathieu-Daudé if (!s->doorbells) { 775f6845323SPhilippe Mathieu-Daudé ret = -EINVAL; 776f6845323SPhilippe Mathieu-Daudé goto out; 777f6845323SPhilippe Mathieu-Daudé } 778f6845323SPhilippe Mathieu-Daudé 779bdd6a90aSFam Zheng /* Set up admin queue. */ 780bdd6a90aSFam Zheng s->queues = g_new(NVMeQueuePair *, 1); 7810a28b02eSPhilippe Mathieu-Daudé s->queues[INDEX_ADMIN] = nvme_create_queue_pair(s, aio_context, 0, 78273159e52SPhilippe Mathieu-Daudé NVME_QUEUE_SIZE, 78373159e52SPhilippe Mathieu-Daudé errp); 78473159e52SPhilippe Mathieu-Daudé if (!s->queues[INDEX_ADMIN]) { 785bdd6a90aSFam Zheng ret = -EINVAL; 7869582f357SFam Zheng goto out; 787bdd6a90aSFam Zheng } 78895667c3bSMichal Privoznik s->nr_queues = 1; 789bdd6a90aSFam Zheng QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000); 790fad1eb68SPhilippe Mathieu-Daudé regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << AQA_ACQS_SHIFT) | 791fad1eb68SPhilippe Mathieu-Daudé (NVME_QUEUE_SIZE << AQA_ASQS_SHIFT)); 7929406e0d9SPhilippe Mathieu-Daudé regs->asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova); 7939406e0d9SPhilippe Mathieu-Daudé regs->acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova); 794bdd6a90aSFam Zheng 795bdd6a90aSFam Zheng /* After setting up all control registers we can enable device now. */ 796fad1eb68SPhilippe Mathieu-Daudé regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) | 797fad1eb68SPhilippe Mathieu-Daudé (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) | 798fad1eb68SPhilippe Mathieu-Daudé CC_EN_MASK); 799bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 1. */ 800bdd6a90aSFam Zheng now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 801eefffb02SPhilippe Mathieu-Daudé deadline = now + timeout_ms * SCALE_MS; 802fad1eb68SPhilippe Mathieu-Daudé while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) { 803bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { 804bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to start (%" 805bdd6a90aSFam Zheng PRId64 " ms)", 806bdd6a90aSFam Zheng timeout_ms); 807bdd6a90aSFam Zheng ret = -ETIMEDOUT; 8089582f357SFam Zheng goto out; 809bdd6a90aSFam Zheng } 810bdd6a90aSFam Zheng } 811bdd6a90aSFam Zheng 812b111b3fcSPhilippe Mathieu-Daudé ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier, 813bdd6a90aSFam Zheng VFIO_PCI_MSIX_IRQ_INDEX, errp); 814bdd6a90aSFam Zheng if (ret) { 8159582f357SFam Zheng goto out; 816bdd6a90aSFam Zheng } 817b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(bdrv_get_aio_context(bs), 818b111b3fcSPhilippe Mathieu-Daudé &s->irq_notifier[MSIX_SHARED_IRQ_IDX], 819bdd6a90aSFam Zheng false, nvme_handle_event, nvme_poll_cb); 820bdd6a90aSFam Zheng 82178d8c99eSPaolo Bonzini nvme_identify(bs, namespace, &local_err); 822bdd6a90aSFam Zheng if (local_err) { 823bdd6a90aSFam Zheng error_propagate(errp, local_err); 824bdd6a90aSFam Zheng ret = -EIO; 8259582f357SFam Zheng goto out; 826bdd6a90aSFam Zheng } 827bdd6a90aSFam Zheng 828bdd6a90aSFam Zheng /* Set up command queues. */ 829bdd6a90aSFam Zheng if (!nvme_add_io_queue(bs, errp)) { 830bdd6a90aSFam Zheng ret = -EIO; 831bdd6a90aSFam Zheng } 8329582f357SFam Zheng out: 83337d7a45aSPhilippe Mathieu-Daudé if (regs) { 83437d7a45aSPhilippe Mathieu-Daudé qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar)); 83537d7a45aSPhilippe Mathieu-Daudé } 83637d7a45aSPhilippe Mathieu-Daudé 8379582f357SFam Zheng /* Cleaning up is done in nvme_file_open() upon error. */ 838bdd6a90aSFam Zheng return ret; 839bdd6a90aSFam Zheng } 840bdd6a90aSFam Zheng 841bdd6a90aSFam Zheng /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example: 842bdd6a90aSFam Zheng * 843bdd6a90aSFam Zheng * nvme://0000:44:00.0/1 844bdd6a90aSFam Zheng * 845bdd6a90aSFam Zheng * where the "nvme://" is a fixed form of the protocol prefix, the middle part 846bdd6a90aSFam Zheng * is the PCI address, and the last part is the namespace number starting from 847bdd6a90aSFam Zheng * 1 according to the NVMe spec. */ 848bdd6a90aSFam Zheng static void nvme_parse_filename(const char *filename, QDict *options, 849bdd6a90aSFam Zheng Error **errp) 850bdd6a90aSFam Zheng { 851bdd6a90aSFam Zheng int pref = strlen("nvme://"); 852bdd6a90aSFam Zheng 853bdd6a90aSFam Zheng if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) { 854bdd6a90aSFam Zheng const char *tmp = filename + pref; 855bdd6a90aSFam Zheng char *device; 856bdd6a90aSFam Zheng const char *namespace; 857bdd6a90aSFam Zheng unsigned long ns; 858bdd6a90aSFam Zheng const char *slash = strchr(tmp, '/'); 859bdd6a90aSFam Zheng if (!slash) { 860625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp); 861bdd6a90aSFam Zheng return; 862bdd6a90aSFam Zheng } 863bdd6a90aSFam Zheng device = g_strndup(tmp, slash - tmp); 864625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device); 865bdd6a90aSFam Zheng g_free(device); 866bdd6a90aSFam Zheng namespace = slash + 1; 867bdd6a90aSFam Zheng if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) { 868bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace '%s', positive number expected", 869bdd6a90aSFam Zheng namespace); 870bdd6a90aSFam Zheng return; 871bdd6a90aSFam Zheng } 872625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE, 873625eaca9SLaurent Vivier *namespace ? namespace : "1"); 874bdd6a90aSFam Zheng } 875bdd6a90aSFam Zheng } 876bdd6a90aSFam Zheng 877bdd6a90aSFam Zheng static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable, 878bdd6a90aSFam Zheng Error **errp) 879bdd6a90aSFam Zheng { 880bdd6a90aSFam Zheng int ret; 881bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 882bdd6a90aSFam Zheng NvmeCmd cmd = { 883bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_SET_FEATURES, 884bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 885bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x06), 886bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00), 887bdd6a90aSFam Zheng }; 888bdd6a90aSFam Zheng 88973159e52SPhilippe Mathieu-Daudé ret = nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd); 890bdd6a90aSFam Zheng if (ret) { 891bdd6a90aSFam Zheng error_setg(errp, "Failed to configure NVMe write cache"); 892bdd6a90aSFam Zheng } 893bdd6a90aSFam Zheng return ret; 894bdd6a90aSFam Zheng } 895bdd6a90aSFam Zheng 896bdd6a90aSFam Zheng static void nvme_close(BlockDriverState *bs) 897bdd6a90aSFam Zheng { 898bdd6a90aSFam Zheng int i; 899bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 900bdd6a90aSFam Zheng 901bdd6a90aSFam Zheng for (i = 0; i < s->nr_queues; ++i) { 902b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(s->queues[i]); 903bdd6a90aSFam Zheng } 9049582f357SFam Zheng g_free(s->queues); 905b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(bdrv_get_aio_context(bs), 906b111b3fcSPhilippe Mathieu-Daudé &s->irq_notifier[MSIX_SHARED_IRQ_IDX], 907bdd6a90aSFam Zheng false, NULL, NULL); 908b111b3fcSPhilippe Mathieu-Daudé event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]); 909f6845323SPhilippe Mathieu-Daudé qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->doorbells, 910f6845323SPhilippe Mathieu-Daudé sizeof(NvmeBar), NVME_DOORBELL_SIZE); 911bdd6a90aSFam Zheng qemu_vfio_close(s->vfio); 912cc61b074SMax Reitz 913cc61b074SMax Reitz g_free(s->device); 914bdd6a90aSFam Zheng } 915bdd6a90aSFam Zheng 916bdd6a90aSFam Zheng static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags, 917bdd6a90aSFam Zheng Error **errp) 918bdd6a90aSFam Zheng { 919bdd6a90aSFam Zheng const char *device; 920bdd6a90aSFam Zheng QemuOpts *opts; 921bdd6a90aSFam Zheng int namespace; 922bdd6a90aSFam Zheng int ret; 923bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 924bdd6a90aSFam Zheng 925e0dd95e3SMaxim Levitsky bs->supported_write_flags = BDRV_REQ_FUA; 926e0dd95e3SMaxim Levitsky 927bdd6a90aSFam Zheng opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 928bdd6a90aSFam Zheng qemu_opts_absorb_qdict(opts, options, &error_abort); 929bdd6a90aSFam Zheng device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE); 930bdd6a90aSFam Zheng if (!device) { 931bdd6a90aSFam Zheng error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required"); 932bdd6a90aSFam Zheng qemu_opts_del(opts); 933bdd6a90aSFam Zheng return -EINVAL; 934bdd6a90aSFam Zheng } 935bdd6a90aSFam Zheng 936bdd6a90aSFam Zheng namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1); 937bdd6a90aSFam Zheng ret = nvme_init(bs, device, namespace, errp); 938bdd6a90aSFam Zheng qemu_opts_del(opts); 939bdd6a90aSFam Zheng if (ret) { 940bdd6a90aSFam Zheng goto fail; 941bdd6a90aSFam Zheng } 942bdd6a90aSFam Zheng if (flags & BDRV_O_NOCACHE) { 943bdd6a90aSFam Zheng if (!s->write_cache_supported) { 944bdd6a90aSFam Zheng error_setg(errp, 945bdd6a90aSFam Zheng "NVMe controller doesn't support write cache configuration"); 946bdd6a90aSFam Zheng ret = -EINVAL; 947bdd6a90aSFam Zheng } else { 948bdd6a90aSFam Zheng ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE), 949bdd6a90aSFam Zheng errp); 950bdd6a90aSFam Zheng } 951bdd6a90aSFam Zheng if (ret) { 952bdd6a90aSFam Zheng goto fail; 953bdd6a90aSFam Zheng } 954bdd6a90aSFam Zheng } 955bdd6a90aSFam Zheng return 0; 956bdd6a90aSFam Zheng fail: 957bdd6a90aSFam Zheng nvme_close(bs); 958bdd6a90aSFam Zheng return ret; 959bdd6a90aSFam Zheng } 960bdd6a90aSFam Zheng 961bdd6a90aSFam Zheng static int64_t nvme_getlength(BlockDriverState *bs) 962bdd6a90aSFam Zheng { 963bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 964118d1b6aSMaxim Levitsky return s->nsze << s->blkshift; 965118d1b6aSMaxim Levitsky } 966bdd6a90aSFam Zheng 9671120407bSMax Reitz static uint32_t nvme_get_blocksize(BlockDriverState *bs) 968118d1b6aSMaxim Levitsky { 969118d1b6aSMaxim Levitsky BDRVNVMeState *s = bs->opaque; 9701120407bSMax Reitz assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12); 9711120407bSMax Reitz return UINT32_C(1) << s->blkshift; 972118d1b6aSMaxim Levitsky } 973118d1b6aSMaxim Levitsky 974118d1b6aSMaxim Levitsky static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) 975118d1b6aSMaxim Levitsky { 9761120407bSMax Reitz uint32_t blocksize = nvme_get_blocksize(bs); 977118d1b6aSMaxim Levitsky bsz->phys = blocksize; 978118d1b6aSMaxim Levitsky bsz->log = blocksize; 979118d1b6aSMaxim Levitsky return 0; 980bdd6a90aSFam Zheng } 981bdd6a90aSFam Zheng 982bdd6a90aSFam Zheng /* Called with s->dma_map_lock */ 983bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs, 984bdd6a90aSFam Zheng QEMUIOVector *qiov) 985bdd6a90aSFam Zheng { 986bdd6a90aSFam Zheng int r = 0; 987bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 988bdd6a90aSFam Zheng 989bdd6a90aSFam Zheng s->dma_map_count -= qiov->size; 990bdd6a90aSFam Zheng if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) { 991bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio); 992bdd6a90aSFam Zheng if (!r) { 993bdd6a90aSFam Zheng qemu_co_queue_restart_all(&s->dma_flush_queue); 994bdd6a90aSFam Zheng } 995bdd6a90aSFam Zheng } 996bdd6a90aSFam Zheng return r; 997bdd6a90aSFam Zheng } 998bdd6a90aSFam Zheng 999bdd6a90aSFam Zheng /* Called with s->dma_map_lock */ 1000bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd, 1001bdd6a90aSFam Zheng NVMeRequest *req, QEMUIOVector *qiov) 1002bdd6a90aSFam Zheng { 1003bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1004bdd6a90aSFam Zheng uint64_t *pagelist = req->prp_list_page; 1005bdd6a90aSFam Zheng int i, j, r; 1006bdd6a90aSFam Zheng int entries = 0; 1007bdd6a90aSFam Zheng 1008bdd6a90aSFam Zheng assert(qiov->size); 1009bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(qiov->size, s->page_size)); 1010bdd6a90aSFam Zheng assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t)); 1011bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) { 1012bdd6a90aSFam Zheng bool retry = true; 1013bdd6a90aSFam Zheng uint64_t iova; 1014bdd6a90aSFam Zheng try_map: 1015bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, 1016bdd6a90aSFam Zheng qiov->iov[i].iov_base, 1017bdd6a90aSFam Zheng qiov->iov[i].iov_len, 1018bdd6a90aSFam Zheng true, &iova); 1019bdd6a90aSFam Zheng if (r == -ENOMEM && retry) { 1020bdd6a90aSFam Zheng retry = false; 1021bdd6a90aSFam Zheng trace_nvme_dma_flush_queue_wait(s); 1022bdd6a90aSFam Zheng if (s->dma_map_count) { 1023bdd6a90aSFam Zheng trace_nvme_dma_map_flush(s); 1024bdd6a90aSFam Zheng qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock); 1025bdd6a90aSFam Zheng } else { 1026bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio); 1027bdd6a90aSFam Zheng if (r) { 1028bdd6a90aSFam Zheng goto fail; 1029bdd6a90aSFam Zheng } 1030bdd6a90aSFam Zheng } 1031bdd6a90aSFam Zheng goto try_map; 1032bdd6a90aSFam Zheng } 1033bdd6a90aSFam Zheng if (r) { 1034bdd6a90aSFam Zheng goto fail; 1035bdd6a90aSFam Zheng } 1036bdd6a90aSFam Zheng 1037bdd6a90aSFam Zheng for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) { 10382916405aSLi Feng pagelist[entries++] = cpu_to_le64(iova + j * s->page_size); 1039bdd6a90aSFam Zheng } 1040bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base, 1041bdd6a90aSFam Zheng qiov->iov[i].iov_len / s->page_size); 1042bdd6a90aSFam Zheng } 1043bdd6a90aSFam Zheng 1044bdd6a90aSFam Zheng s->dma_map_count += qiov->size; 1045bdd6a90aSFam Zheng 1046bdd6a90aSFam Zheng assert(entries <= s->page_size / sizeof(uint64_t)); 1047bdd6a90aSFam Zheng switch (entries) { 1048bdd6a90aSFam Zheng case 0: 1049bdd6a90aSFam Zheng abort(); 1050bdd6a90aSFam Zheng case 1: 1051c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0]; 1052c26f2173SKlaus Jensen cmd->dptr.prp2 = 0; 1053bdd6a90aSFam Zheng break; 1054bdd6a90aSFam Zheng case 2: 1055c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0]; 1056c26f2173SKlaus Jensen cmd->dptr.prp2 = pagelist[1]; 1057bdd6a90aSFam Zheng break; 1058bdd6a90aSFam Zheng default: 1059c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0]; 1060c26f2173SKlaus Jensen cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t)); 1061bdd6a90aSFam Zheng break; 1062bdd6a90aSFam Zheng } 1063bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries); 1064bdd6a90aSFam Zheng for (i = 0; i < entries; ++i) { 1065bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]); 1066bdd6a90aSFam Zheng } 1067bdd6a90aSFam Zheng return 0; 1068bdd6a90aSFam Zheng fail: 1069bdd6a90aSFam Zheng /* No need to unmap [0 - i) iovs even if we've failed, since we don't 1070bdd6a90aSFam Zheng * increment s->dma_map_count. This is okay for fixed mapping memory areas 1071bdd6a90aSFam Zheng * because they are already mapped before calling this function; for 1072bdd6a90aSFam Zheng * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by 1073bdd6a90aSFam Zheng * calling qemu_vfio_dma_reset_temporary when necessary. */ 1074bdd6a90aSFam Zheng return r; 1075bdd6a90aSFam Zheng } 1076bdd6a90aSFam Zheng 1077bdd6a90aSFam Zheng typedef struct { 1078bdd6a90aSFam Zheng Coroutine *co; 1079bdd6a90aSFam Zheng int ret; 1080bdd6a90aSFam Zheng AioContext *ctx; 1081bdd6a90aSFam Zheng } NVMeCoData; 1082bdd6a90aSFam Zheng 1083bdd6a90aSFam Zheng static void nvme_rw_cb_bh(void *opaque) 1084bdd6a90aSFam Zheng { 1085bdd6a90aSFam Zheng NVMeCoData *data = opaque; 1086bdd6a90aSFam Zheng qemu_coroutine_enter(data->co); 1087bdd6a90aSFam Zheng } 1088bdd6a90aSFam Zheng 1089bdd6a90aSFam Zheng static void nvme_rw_cb(void *opaque, int ret) 1090bdd6a90aSFam Zheng { 1091bdd6a90aSFam Zheng NVMeCoData *data = opaque; 1092bdd6a90aSFam Zheng data->ret = ret; 1093bdd6a90aSFam Zheng if (!data->co) { 1094bdd6a90aSFam Zheng /* The rw coroutine hasn't yielded, don't try to enter. */ 1095bdd6a90aSFam Zheng return; 1096bdd6a90aSFam Zheng } 1097e4ec5ad4SPavel Dovgalyuk replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data); 1098bdd6a90aSFam Zheng } 1099bdd6a90aSFam Zheng 1100bdd6a90aSFam Zheng static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, 1101bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1102bdd6a90aSFam Zheng QEMUIOVector *qiov, 1103bdd6a90aSFam Zheng bool is_write, 1104bdd6a90aSFam Zheng int flags) 1105bdd6a90aSFam Zheng { 1106bdd6a90aSFam Zheng int r; 1107bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 110873159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; 1109bdd6a90aSFam Zheng NVMeRequest *req; 1110118d1b6aSMaxim Levitsky 1111118d1b6aSMaxim Levitsky uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) | 1112bdd6a90aSFam Zheng (flags & BDRV_REQ_FUA ? 1 << 30 : 0); 1113bdd6a90aSFam Zheng NvmeCmd cmd = { 1114bdd6a90aSFam Zheng .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ, 1115bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 1116118d1b6aSMaxim Levitsky .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF), 1117118d1b6aSMaxim Levitsky .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF), 1118bdd6a90aSFam Zheng .cdw12 = cpu_to_le32(cdw12), 1119bdd6a90aSFam Zheng }; 1120bdd6a90aSFam Zheng NVMeCoData data = { 1121bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs), 1122bdd6a90aSFam Zheng .ret = -EINPROGRESS, 1123bdd6a90aSFam Zheng }; 1124bdd6a90aSFam Zheng 1125bdd6a90aSFam Zheng trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov); 1126bdd6a90aSFam Zheng assert(s->nr_queues > 1); 1127bdd6a90aSFam Zheng req = nvme_get_free_req(ioq); 1128bdd6a90aSFam Zheng assert(req); 1129bdd6a90aSFam Zheng 1130bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock); 1131bdd6a90aSFam Zheng r = nvme_cmd_map_qiov(bs, &cmd, req, qiov); 1132bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock); 1133bdd6a90aSFam Zheng if (r) { 1134b75fd5f5SStefan Hajnoczi nvme_put_free_req_and_wake(ioq, req); 1135bdd6a90aSFam Zheng return r; 1136bdd6a90aSFam Zheng } 1137b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1138bdd6a90aSFam Zheng 1139bdd6a90aSFam Zheng data.co = qemu_coroutine_self(); 1140bdd6a90aSFam Zheng while (data.ret == -EINPROGRESS) { 1141bdd6a90aSFam Zheng qemu_coroutine_yield(); 1142bdd6a90aSFam Zheng } 1143bdd6a90aSFam Zheng 1144bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock); 1145bdd6a90aSFam Zheng r = nvme_cmd_unmap_qiov(bs, qiov); 1146bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock); 1147bdd6a90aSFam Zheng if (r) { 1148bdd6a90aSFam Zheng return r; 1149bdd6a90aSFam Zheng } 1150bdd6a90aSFam Zheng 1151bdd6a90aSFam Zheng trace_nvme_rw_done(s, is_write, offset, bytes, data.ret); 1152bdd6a90aSFam Zheng return data.ret; 1153bdd6a90aSFam Zheng } 1154bdd6a90aSFam Zheng 1155bdd6a90aSFam Zheng static inline bool nvme_qiov_aligned(BlockDriverState *bs, 1156bdd6a90aSFam Zheng const QEMUIOVector *qiov) 1157bdd6a90aSFam Zheng { 1158bdd6a90aSFam Zheng int i; 1159bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1160bdd6a90aSFam Zheng 1161bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) { 1162bdd6a90aSFam Zheng if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) || 1163bdd6a90aSFam Zheng !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) { 1164bdd6a90aSFam Zheng trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base, 1165bdd6a90aSFam Zheng qiov->iov[i].iov_len, s->page_size); 1166bdd6a90aSFam Zheng return false; 1167bdd6a90aSFam Zheng } 1168bdd6a90aSFam Zheng } 1169bdd6a90aSFam Zheng return true; 1170bdd6a90aSFam Zheng } 1171bdd6a90aSFam Zheng 1172bdd6a90aSFam Zheng static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, 1173bdd6a90aSFam Zheng QEMUIOVector *qiov, bool is_write, int flags) 1174bdd6a90aSFam Zheng { 1175bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1176bdd6a90aSFam Zheng int r; 1177bdd6a90aSFam Zheng uint8_t *buf = NULL; 1178bdd6a90aSFam Zheng QEMUIOVector local_qiov; 1179bdd6a90aSFam Zheng 1180bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(offset, s->page_size)); 1181bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(bytes, s->page_size)); 1182bdd6a90aSFam Zheng assert(bytes <= s->max_transfer); 1183bdd6a90aSFam Zheng if (nvme_qiov_aligned(bs, qiov)) { 1184f25e7ab2SPhilippe Mathieu-Daudé s->stats.aligned_accesses++; 1185bdd6a90aSFam Zheng return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags); 1186bdd6a90aSFam Zheng } 1187f25e7ab2SPhilippe Mathieu-Daudé s->stats.unaligned_accesses++; 1188bdd6a90aSFam Zheng trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write); 118938e1f818SPhilippe Mathieu-Daudé buf = qemu_try_memalign(s->page_size, bytes); 1190bdd6a90aSFam Zheng 1191bdd6a90aSFam Zheng if (!buf) { 1192bdd6a90aSFam Zheng return -ENOMEM; 1193bdd6a90aSFam Zheng } 1194bdd6a90aSFam Zheng qemu_iovec_init(&local_qiov, 1); 1195bdd6a90aSFam Zheng if (is_write) { 1196bdd6a90aSFam Zheng qemu_iovec_to_buf(qiov, 0, buf, bytes); 1197bdd6a90aSFam Zheng } 1198bdd6a90aSFam Zheng qemu_iovec_add(&local_qiov, buf, bytes); 1199bdd6a90aSFam Zheng r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags); 1200bdd6a90aSFam Zheng qemu_iovec_destroy(&local_qiov); 1201bdd6a90aSFam Zheng if (!r && !is_write) { 1202bdd6a90aSFam Zheng qemu_iovec_from_buf(qiov, 0, buf, bytes); 1203bdd6a90aSFam Zheng } 1204bdd6a90aSFam Zheng qemu_vfree(buf); 1205bdd6a90aSFam Zheng return r; 1206bdd6a90aSFam Zheng } 1207bdd6a90aSFam Zheng 1208bdd6a90aSFam Zheng static coroutine_fn int nvme_co_preadv(BlockDriverState *bs, 1209bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1210bdd6a90aSFam Zheng QEMUIOVector *qiov, int flags) 1211bdd6a90aSFam Zheng { 1212bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, false, flags); 1213bdd6a90aSFam Zheng } 1214bdd6a90aSFam Zheng 1215bdd6a90aSFam Zheng static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs, 1216bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1217bdd6a90aSFam Zheng QEMUIOVector *qiov, int flags) 1218bdd6a90aSFam Zheng { 1219bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, true, flags); 1220bdd6a90aSFam Zheng } 1221bdd6a90aSFam Zheng 1222bdd6a90aSFam Zheng static coroutine_fn int nvme_co_flush(BlockDriverState *bs) 1223bdd6a90aSFam Zheng { 1224bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 122573159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; 1226bdd6a90aSFam Zheng NVMeRequest *req; 1227bdd6a90aSFam Zheng NvmeCmd cmd = { 1228bdd6a90aSFam Zheng .opcode = NVME_CMD_FLUSH, 1229bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 1230bdd6a90aSFam Zheng }; 1231bdd6a90aSFam Zheng NVMeCoData data = { 1232bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs), 1233bdd6a90aSFam Zheng .ret = -EINPROGRESS, 1234bdd6a90aSFam Zheng }; 1235bdd6a90aSFam Zheng 1236bdd6a90aSFam Zheng assert(s->nr_queues > 1); 1237bdd6a90aSFam Zheng req = nvme_get_free_req(ioq); 1238bdd6a90aSFam Zheng assert(req); 1239b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1240bdd6a90aSFam Zheng 1241bdd6a90aSFam Zheng data.co = qemu_coroutine_self(); 1242bdd6a90aSFam Zheng if (data.ret == -EINPROGRESS) { 1243bdd6a90aSFam Zheng qemu_coroutine_yield(); 1244bdd6a90aSFam Zheng } 1245bdd6a90aSFam Zheng 1246bdd6a90aSFam Zheng return data.ret; 1247bdd6a90aSFam Zheng } 1248bdd6a90aSFam Zheng 1249bdd6a90aSFam Zheng 1250e0dd95e3SMaxim Levitsky static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, 1251e0dd95e3SMaxim Levitsky int64_t offset, 1252e0dd95e3SMaxim Levitsky int bytes, 1253e0dd95e3SMaxim Levitsky BdrvRequestFlags flags) 1254e0dd95e3SMaxim Levitsky { 1255e0dd95e3SMaxim Levitsky BDRVNVMeState *s = bs->opaque; 125673159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; 1257e0dd95e3SMaxim Levitsky NVMeRequest *req; 1258e0dd95e3SMaxim Levitsky 1259e0dd95e3SMaxim Levitsky uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF; 1260e0dd95e3SMaxim Levitsky 1261e0dd95e3SMaxim Levitsky if (!s->supports_write_zeroes) { 1262e0dd95e3SMaxim Levitsky return -ENOTSUP; 1263e0dd95e3SMaxim Levitsky } 1264e0dd95e3SMaxim Levitsky 1265e0dd95e3SMaxim Levitsky NvmeCmd cmd = { 126669265150SKlaus Jensen .opcode = NVME_CMD_WRITE_ZEROES, 1267e0dd95e3SMaxim Levitsky .nsid = cpu_to_le32(s->nsid), 1268e0dd95e3SMaxim Levitsky .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF), 1269e0dd95e3SMaxim Levitsky .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF), 1270e0dd95e3SMaxim Levitsky }; 1271e0dd95e3SMaxim Levitsky 1272e0dd95e3SMaxim Levitsky NVMeCoData data = { 1273e0dd95e3SMaxim Levitsky .ctx = bdrv_get_aio_context(bs), 1274e0dd95e3SMaxim Levitsky .ret = -EINPROGRESS, 1275e0dd95e3SMaxim Levitsky }; 1276e0dd95e3SMaxim Levitsky 1277e0dd95e3SMaxim Levitsky if (flags & BDRV_REQ_MAY_UNMAP) { 1278e0dd95e3SMaxim Levitsky cdw12 |= (1 << 25); 1279e0dd95e3SMaxim Levitsky } 1280e0dd95e3SMaxim Levitsky 1281e0dd95e3SMaxim Levitsky if (flags & BDRV_REQ_FUA) { 1282e0dd95e3SMaxim Levitsky cdw12 |= (1 << 30); 1283e0dd95e3SMaxim Levitsky } 1284e0dd95e3SMaxim Levitsky 1285e0dd95e3SMaxim Levitsky cmd.cdw12 = cpu_to_le32(cdw12); 1286e0dd95e3SMaxim Levitsky 1287e0dd95e3SMaxim Levitsky trace_nvme_write_zeroes(s, offset, bytes, flags); 1288e0dd95e3SMaxim Levitsky assert(s->nr_queues > 1); 1289e0dd95e3SMaxim Levitsky req = nvme_get_free_req(ioq); 1290e0dd95e3SMaxim Levitsky assert(req); 1291e0dd95e3SMaxim Levitsky 1292b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1293e0dd95e3SMaxim Levitsky 1294e0dd95e3SMaxim Levitsky data.co = qemu_coroutine_self(); 1295e0dd95e3SMaxim Levitsky while (data.ret == -EINPROGRESS) { 1296e0dd95e3SMaxim Levitsky qemu_coroutine_yield(); 1297e0dd95e3SMaxim Levitsky } 1298e0dd95e3SMaxim Levitsky 1299e0dd95e3SMaxim Levitsky trace_nvme_rw_done(s, true, offset, bytes, data.ret); 1300e0dd95e3SMaxim Levitsky return data.ret; 1301e0dd95e3SMaxim Levitsky } 1302e0dd95e3SMaxim Levitsky 1303e0dd95e3SMaxim Levitsky 1304e87a09d6SMaxim Levitsky static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, 1305e87a09d6SMaxim Levitsky int64_t offset, 1306e87a09d6SMaxim Levitsky int bytes) 1307e87a09d6SMaxim Levitsky { 1308e87a09d6SMaxim Levitsky BDRVNVMeState *s = bs->opaque; 130973159e52SPhilippe Mathieu-Daudé NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; 1310e87a09d6SMaxim Levitsky NVMeRequest *req; 1311e87a09d6SMaxim Levitsky NvmeDsmRange *buf; 1312e87a09d6SMaxim Levitsky QEMUIOVector local_qiov; 1313e87a09d6SMaxim Levitsky int ret; 1314e87a09d6SMaxim Levitsky 1315e87a09d6SMaxim Levitsky NvmeCmd cmd = { 1316e87a09d6SMaxim Levitsky .opcode = NVME_CMD_DSM, 1317e87a09d6SMaxim Levitsky .nsid = cpu_to_le32(s->nsid), 1318e87a09d6SMaxim Levitsky .cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/ 1319e87a09d6SMaxim Levitsky .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/ 1320e87a09d6SMaxim Levitsky }; 1321e87a09d6SMaxim Levitsky 1322e87a09d6SMaxim Levitsky NVMeCoData data = { 1323e87a09d6SMaxim Levitsky .ctx = bdrv_get_aio_context(bs), 1324e87a09d6SMaxim Levitsky .ret = -EINPROGRESS, 1325e87a09d6SMaxim Levitsky }; 1326e87a09d6SMaxim Levitsky 1327e87a09d6SMaxim Levitsky if (!s->supports_discard) { 1328e87a09d6SMaxim Levitsky return -ENOTSUP; 1329e87a09d6SMaxim Levitsky } 1330e87a09d6SMaxim Levitsky 1331e87a09d6SMaxim Levitsky assert(s->nr_queues > 1); 1332e87a09d6SMaxim Levitsky 133338e1f818SPhilippe Mathieu-Daudé buf = qemu_try_memalign(s->page_size, s->page_size); 1334e87a09d6SMaxim Levitsky if (!buf) { 1335e87a09d6SMaxim Levitsky return -ENOMEM; 1336e87a09d6SMaxim Levitsky } 13372ed84693SPhilippe Mathieu-Daudé memset(buf, 0, s->page_size); 1338e87a09d6SMaxim Levitsky buf->nlb = cpu_to_le32(bytes >> s->blkshift); 1339e87a09d6SMaxim Levitsky buf->slba = cpu_to_le64(offset >> s->blkshift); 1340e87a09d6SMaxim Levitsky buf->cattr = 0; 1341e87a09d6SMaxim Levitsky 1342e87a09d6SMaxim Levitsky qemu_iovec_init(&local_qiov, 1); 1343e87a09d6SMaxim Levitsky qemu_iovec_add(&local_qiov, buf, 4096); 1344e87a09d6SMaxim Levitsky 1345e87a09d6SMaxim Levitsky req = nvme_get_free_req(ioq); 1346e87a09d6SMaxim Levitsky assert(req); 1347e87a09d6SMaxim Levitsky 1348e87a09d6SMaxim Levitsky qemu_co_mutex_lock(&s->dma_map_lock); 1349e87a09d6SMaxim Levitsky ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov); 1350e87a09d6SMaxim Levitsky qemu_co_mutex_unlock(&s->dma_map_lock); 1351e87a09d6SMaxim Levitsky 1352e87a09d6SMaxim Levitsky if (ret) { 1353b75fd5f5SStefan Hajnoczi nvme_put_free_req_and_wake(ioq, req); 1354e87a09d6SMaxim Levitsky goto out; 1355e87a09d6SMaxim Levitsky } 1356e87a09d6SMaxim Levitsky 1357e87a09d6SMaxim Levitsky trace_nvme_dsm(s, offset, bytes); 1358e87a09d6SMaxim Levitsky 1359b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1360e87a09d6SMaxim Levitsky 1361e87a09d6SMaxim Levitsky data.co = qemu_coroutine_self(); 1362e87a09d6SMaxim Levitsky while (data.ret == -EINPROGRESS) { 1363e87a09d6SMaxim Levitsky qemu_coroutine_yield(); 1364e87a09d6SMaxim Levitsky } 1365e87a09d6SMaxim Levitsky 1366e87a09d6SMaxim Levitsky qemu_co_mutex_lock(&s->dma_map_lock); 1367e87a09d6SMaxim Levitsky ret = nvme_cmd_unmap_qiov(bs, &local_qiov); 1368e87a09d6SMaxim Levitsky qemu_co_mutex_unlock(&s->dma_map_lock); 1369e87a09d6SMaxim Levitsky 1370e87a09d6SMaxim Levitsky if (ret) { 1371e87a09d6SMaxim Levitsky goto out; 1372e87a09d6SMaxim Levitsky } 1373e87a09d6SMaxim Levitsky 1374e87a09d6SMaxim Levitsky ret = data.ret; 1375e87a09d6SMaxim Levitsky trace_nvme_dsm_done(s, offset, bytes, ret); 1376e87a09d6SMaxim Levitsky out: 1377e87a09d6SMaxim Levitsky qemu_iovec_destroy(&local_qiov); 1378e87a09d6SMaxim Levitsky qemu_vfree(buf); 1379e87a09d6SMaxim Levitsky return ret; 1380e87a09d6SMaxim Levitsky 1381e87a09d6SMaxim Levitsky } 1382e87a09d6SMaxim Levitsky 1383e87a09d6SMaxim Levitsky 1384bdd6a90aSFam Zheng static int nvme_reopen_prepare(BDRVReopenState *reopen_state, 1385bdd6a90aSFam Zheng BlockReopenQueue *queue, Error **errp) 1386bdd6a90aSFam Zheng { 1387bdd6a90aSFam Zheng return 0; 1388bdd6a90aSFam Zheng } 1389bdd6a90aSFam Zheng 1390998b3a1eSMax Reitz static void nvme_refresh_filename(BlockDriverState *bs) 1391bdd6a90aSFam Zheng { 1392cc61b074SMax Reitz BDRVNVMeState *s = bs->opaque; 1393bdd6a90aSFam Zheng 1394cc61b074SMax Reitz snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i", 1395cc61b074SMax Reitz s->device, s->nsid); 1396bdd6a90aSFam Zheng } 1397bdd6a90aSFam Zheng 1398bdd6a90aSFam Zheng static void nvme_refresh_limits(BlockDriverState *bs, Error **errp) 1399bdd6a90aSFam Zheng { 1400bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1401bdd6a90aSFam Zheng 1402bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size; 1403bdd6a90aSFam Zheng bs->bl.request_alignment = s->page_size; 1404bdd6a90aSFam Zheng bs->bl.max_transfer = s->max_transfer; 1405bdd6a90aSFam Zheng } 1406bdd6a90aSFam Zheng 1407bdd6a90aSFam Zheng static void nvme_detach_aio_context(BlockDriverState *bs) 1408bdd6a90aSFam Zheng { 1409bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1410bdd6a90aSFam Zheng 14117838c67fSStefan Hajnoczi for (int i = 0; i < s->nr_queues; i++) { 14127838c67fSStefan Hajnoczi NVMeQueuePair *q = s->queues[i]; 14137838c67fSStefan Hajnoczi 14147838c67fSStefan Hajnoczi qemu_bh_delete(q->completion_bh); 14157838c67fSStefan Hajnoczi q->completion_bh = NULL; 14167838c67fSStefan Hajnoczi } 14177838c67fSStefan Hajnoczi 1418b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(bdrv_get_aio_context(bs), 1419b111b3fcSPhilippe Mathieu-Daudé &s->irq_notifier[MSIX_SHARED_IRQ_IDX], 1420bdd6a90aSFam Zheng false, NULL, NULL); 1421bdd6a90aSFam Zheng } 1422bdd6a90aSFam Zheng 1423bdd6a90aSFam Zheng static void nvme_attach_aio_context(BlockDriverState *bs, 1424bdd6a90aSFam Zheng AioContext *new_context) 1425bdd6a90aSFam Zheng { 1426bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1427bdd6a90aSFam Zheng 1428bdd6a90aSFam Zheng s->aio_context = new_context; 1429b111b3fcSPhilippe Mathieu-Daudé aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX], 1430bdd6a90aSFam Zheng false, nvme_handle_event, nvme_poll_cb); 14317838c67fSStefan Hajnoczi 14327838c67fSStefan Hajnoczi for (int i = 0; i < s->nr_queues; i++) { 14337838c67fSStefan Hajnoczi NVMeQueuePair *q = s->queues[i]; 14347838c67fSStefan Hajnoczi 14357838c67fSStefan Hajnoczi q->completion_bh = 14367838c67fSStefan Hajnoczi aio_bh_new(new_context, nvme_process_completion_bh, q); 14377838c67fSStefan Hajnoczi } 1438bdd6a90aSFam Zheng } 1439bdd6a90aSFam Zheng 1440bdd6a90aSFam Zheng static void nvme_aio_plug(BlockDriverState *bs) 1441bdd6a90aSFam Zheng { 1442bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 14432f0d8947SPaolo Bonzini assert(!s->plugged); 14442f0d8947SPaolo Bonzini s->plugged = true; 1445bdd6a90aSFam Zheng } 1446bdd6a90aSFam Zheng 1447bdd6a90aSFam Zheng static void nvme_aio_unplug(BlockDriverState *bs) 1448bdd6a90aSFam Zheng { 1449bdd6a90aSFam Zheng int i; 1450bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1451bdd6a90aSFam Zheng assert(s->plugged); 14522f0d8947SPaolo Bonzini s->plugged = false; 145373159e52SPhilippe Mathieu-Daudé for (i = INDEX_IO(0); i < s->nr_queues; i++) { 1454bdd6a90aSFam Zheng NVMeQueuePair *q = s->queues[i]; 1455bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 1456b75fd5f5SStefan Hajnoczi nvme_kick(q); 1457b75fd5f5SStefan Hajnoczi nvme_process_completion(q); 1458bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 1459bdd6a90aSFam Zheng } 1460bdd6a90aSFam Zheng } 1461bdd6a90aSFam Zheng 14629ed61612SFam Zheng static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size) 14639ed61612SFam Zheng { 14649ed61612SFam Zheng int ret; 14659ed61612SFam Zheng BDRVNVMeState *s = bs->opaque; 14669ed61612SFam Zheng 14679ed61612SFam Zheng ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL); 14689ed61612SFam Zheng if (ret) { 14699ed61612SFam Zheng /* FIXME: we may run out of IOVA addresses after repeated 14709ed61612SFam Zheng * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap 14719ed61612SFam Zheng * doesn't reclaim addresses for fixed mappings. */ 14729ed61612SFam Zheng error_report("nvme_register_buf failed: %s", strerror(-ret)); 14739ed61612SFam Zheng } 14749ed61612SFam Zheng } 14759ed61612SFam Zheng 14769ed61612SFam Zheng static void nvme_unregister_buf(BlockDriverState *bs, void *host) 14779ed61612SFam Zheng { 14789ed61612SFam Zheng BDRVNVMeState *s = bs->opaque; 14799ed61612SFam Zheng 14809ed61612SFam Zheng qemu_vfio_dma_unmap(s->vfio, host); 14819ed61612SFam Zheng } 14829ed61612SFam Zheng 1483f25e7ab2SPhilippe Mathieu-Daudé static BlockStatsSpecific *nvme_get_specific_stats(BlockDriverState *bs) 1484f25e7ab2SPhilippe Mathieu-Daudé { 1485f25e7ab2SPhilippe Mathieu-Daudé BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1); 1486f25e7ab2SPhilippe Mathieu-Daudé BDRVNVMeState *s = bs->opaque; 1487f25e7ab2SPhilippe Mathieu-Daudé 1488f25e7ab2SPhilippe Mathieu-Daudé stats->driver = BLOCKDEV_DRIVER_NVME; 1489f25e7ab2SPhilippe Mathieu-Daudé stats->u.nvme = (BlockStatsSpecificNvme) { 1490f25e7ab2SPhilippe Mathieu-Daudé .completion_errors = s->stats.completion_errors, 1491f25e7ab2SPhilippe Mathieu-Daudé .aligned_accesses = s->stats.aligned_accesses, 1492f25e7ab2SPhilippe Mathieu-Daudé .unaligned_accesses = s->stats.unaligned_accesses, 1493f25e7ab2SPhilippe Mathieu-Daudé }; 1494f25e7ab2SPhilippe Mathieu-Daudé 1495f25e7ab2SPhilippe Mathieu-Daudé return stats; 1496f25e7ab2SPhilippe Mathieu-Daudé } 1497f25e7ab2SPhilippe Mathieu-Daudé 14982654267cSMax Reitz static const char *const nvme_strong_runtime_opts[] = { 14992654267cSMax Reitz NVME_BLOCK_OPT_DEVICE, 15002654267cSMax Reitz NVME_BLOCK_OPT_NAMESPACE, 15012654267cSMax Reitz 15022654267cSMax Reitz NULL 15032654267cSMax Reitz }; 15042654267cSMax Reitz 1505bdd6a90aSFam Zheng static BlockDriver bdrv_nvme = { 1506bdd6a90aSFam Zheng .format_name = "nvme", 1507bdd6a90aSFam Zheng .protocol_name = "nvme", 1508bdd6a90aSFam Zheng .instance_size = sizeof(BDRVNVMeState), 1509bdd6a90aSFam Zheng 15105a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple, 15115a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple, 15125a5e7f8cSMaxim Levitsky 1513bdd6a90aSFam Zheng .bdrv_parse_filename = nvme_parse_filename, 1514bdd6a90aSFam Zheng .bdrv_file_open = nvme_file_open, 1515bdd6a90aSFam Zheng .bdrv_close = nvme_close, 1516bdd6a90aSFam Zheng .bdrv_getlength = nvme_getlength, 1517118d1b6aSMaxim Levitsky .bdrv_probe_blocksizes = nvme_probe_blocksizes, 1518bdd6a90aSFam Zheng 1519bdd6a90aSFam Zheng .bdrv_co_preadv = nvme_co_preadv, 1520bdd6a90aSFam Zheng .bdrv_co_pwritev = nvme_co_pwritev, 1521e0dd95e3SMaxim Levitsky 1522e0dd95e3SMaxim Levitsky .bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes, 1523e87a09d6SMaxim Levitsky .bdrv_co_pdiscard = nvme_co_pdiscard, 1524e0dd95e3SMaxim Levitsky 1525bdd6a90aSFam Zheng .bdrv_co_flush_to_disk = nvme_co_flush, 1526bdd6a90aSFam Zheng .bdrv_reopen_prepare = nvme_reopen_prepare, 1527bdd6a90aSFam Zheng 1528bdd6a90aSFam Zheng .bdrv_refresh_filename = nvme_refresh_filename, 1529bdd6a90aSFam Zheng .bdrv_refresh_limits = nvme_refresh_limits, 15302654267cSMax Reitz .strong_runtime_opts = nvme_strong_runtime_opts, 1531f25e7ab2SPhilippe Mathieu-Daudé .bdrv_get_specific_stats = nvme_get_specific_stats, 1532bdd6a90aSFam Zheng 1533bdd6a90aSFam Zheng .bdrv_detach_aio_context = nvme_detach_aio_context, 1534bdd6a90aSFam Zheng .bdrv_attach_aio_context = nvme_attach_aio_context, 1535bdd6a90aSFam Zheng 1536bdd6a90aSFam Zheng .bdrv_io_plug = nvme_aio_plug, 1537bdd6a90aSFam Zheng .bdrv_io_unplug = nvme_aio_unplug, 15389ed61612SFam Zheng 15399ed61612SFam Zheng .bdrv_register_buf = nvme_register_buf, 15409ed61612SFam Zheng .bdrv_unregister_buf = nvme_unregister_buf, 1541bdd6a90aSFam Zheng }; 1542bdd6a90aSFam Zheng 1543bdd6a90aSFam Zheng static void bdrv_nvme_init(void) 1544bdd6a90aSFam Zheng { 1545bdd6a90aSFam Zheng bdrv_register(&bdrv_nvme); 1546bdd6a90aSFam Zheng } 1547bdd6a90aSFam Zheng 1548bdd6a90aSFam Zheng block_init(bdrv_nvme_init); 1549