1bdd6a90aSFam Zheng /* 2bdd6a90aSFam Zheng * NVMe block driver based on vfio 3bdd6a90aSFam Zheng * 4bdd6a90aSFam Zheng * Copyright 2016 - 2018 Red Hat, Inc. 5bdd6a90aSFam Zheng * 6bdd6a90aSFam Zheng * Authors: 7bdd6a90aSFam Zheng * Fam Zheng <famz@redhat.com> 8bdd6a90aSFam Zheng * Paolo Bonzini <pbonzini@redhat.com> 9bdd6a90aSFam Zheng * 10bdd6a90aSFam Zheng * This work is licensed under the terms of the GNU GPL, version 2 or later. 11bdd6a90aSFam Zheng * See the COPYING file in the top-level directory. 12bdd6a90aSFam Zheng */ 13bdd6a90aSFam Zheng 14bdd6a90aSFam Zheng #include "qemu/osdep.h" 15bdd6a90aSFam Zheng #include <linux/vfio.h> 16bdd6a90aSFam Zheng #include "qapi/error.h" 17bdd6a90aSFam Zheng #include "qapi/qmp/qdict.h" 18bdd6a90aSFam Zheng #include "qapi/qmp/qstring.h" 19bdd6a90aSFam Zheng #include "qemu/error-report.h" 20db725815SMarkus Armbruster #include "qemu/main-loop.h" 210b8fa32fSMarkus Armbruster #include "qemu/module.h" 22bdd6a90aSFam Zheng #include "qemu/cutils.h" 23922a01a0SMarkus Armbruster #include "qemu/option.h" 24bdd6a90aSFam Zheng #include "qemu/vfio-helpers.h" 25bdd6a90aSFam Zheng #include "block/block_int.h" 26e4ec5ad4SPavel Dovgalyuk #include "sysemu/replay.h" 27bdd6a90aSFam Zheng #include "trace.h" 28bdd6a90aSFam Zheng 29a3d9a352SFam Zheng #include "block/nvme.h" 30bdd6a90aSFam Zheng 31bdd6a90aSFam Zheng #define NVME_SQ_ENTRY_BYTES 64 32bdd6a90aSFam Zheng #define NVME_CQ_ENTRY_BYTES 16 33bdd6a90aSFam Zheng #define NVME_QUEUE_SIZE 128 34bdd6a90aSFam Zheng #define NVME_BAR_SIZE 8192 35bdd6a90aSFam Zheng 361086e95dSStefan Hajnoczi /* 371086e95dSStefan Hajnoczi * We have to leave one slot empty as that is the full queue case where 381086e95dSStefan Hajnoczi * head == tail + 1. 391086e95dSStefan Hajnoczi */ 401086e95dSStefan Hajnoczi #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1) 411086e95dSStefan Hajnoczi 42b75fd5f5SStefan Hajnoczi typedef struct BDRVNVMeState BDRVNVMeState; 43b75fd5f5SStefan Hajnoczi 44bdd6a90aSFam Zheng typedef struct { 45bdd6a90aSFam Zheng int32_t head, tail; 46bdd6a90aSFam Zheng uint8_t *queue; 47bdd6a90aSFam Zheng uint64_t iova; 48bdd6a90aSFam Zheng /* Hardware MMIO register */ 49bdd6a90aSFam Zheng volatile uint32_t *doorbell; 50bdd6a90aSFam Zheng } NVMeQueue; 51bdd6a90aSFam Zheng 52bdd6a90aSFam Zheng typedef struct { 53bdd6a90aSFam Zheng BlockCompletionFunc *cb; 54bdd6a90aSFam Zheng void *opaque; 55bdd6a90aSFam Zheng int cid; 56bdd6a90aSFam Zheng void *prp_list_page; 57bdd6a90aSFam Zheng uint64_t prp_list_iova; 581086e95dSStefan Hajnoczi int free_req_next; /* q->reqs[] index of next free req */ 59bdd6a90aSFam Zheng } NVMeRequest; 60bdd6a90aSFam Zheng 61bdd6a90aSFam Zheng typedef struct { 62bdd6a90aSFam Zheng QemuMutex lock; 63bdd6a90aSFam Zheng 64b75fd5f5SStefan Hajnoczi /* Read from I/O code path, initialized under BQL */ 65b75fd5f5SStefan Hajnoczi BDRVNVMeState *s; 66bdd6a90aSFam Zheng int index; 67b75fd5f5SStefan Hajnoczi 68b75fd5f5SStefan Hajnoczi /* Fields protected by BQL */ 69bdd6a90aSFam Zheng uint8_t *prp_list_pages; 70bdd6a90aSFam Zheng 71bdd6a90aSFam Zheng /* Fields protected by @lock */ 72a5db74f3SStefan Hajnoczi CoQueue free_req_queue; 73bdd6a90aSFam Zheng NVMeQueue sq, cq; 74bdd6a90aSFam Zheng int cq_phase; 751086e95dSStefan Hajnoczi int free_req_head; 761086e95dSStefan Hajnoczi NVMeRequest reqs[NVME_NUM_REQS]; 77bdd6a90aSFam Zheng int need_kick; 78bdd6a90aSFam Zheng int inflight; 797838c67fSStefan Hajnoczi 807838c67fSStefan Hajnoczi /* Thread-safe, no lock necessary */ 817838c67fSStefan Hajnoczi QEMUBH *completion_bh; 82bdd6a90aSFam Zheng } NVMeQueuePair; 83bdd6a90aSFam Zheng 84bdd6a90aSFam Zheng /* Memory mapped registers */ 85bdd6a90aSFam Zheng typedef volatile struct { 86bdd6a90aSFam Zheng uint64_t cap; 87bdd6a90aSFam Zheng uint32_t vs; 88bdd6a90aSFam Zheng uint32_t intms; 89bdd6a90aSFam Zheng uint32_t intmc; 90bdd6a90aSFam Zheng uint32_t cc; 91bdd6a90aSFam Zheng uint32_t reserved0; 92bdd6a90aSFam Zheng uint32_t csts; 93bdd6a90aSFam Zheng uint32_t nssr; 94bdd6a90aSFam Zheng uint32_t aqa; 95bdd6a90aSFam Zheng uint64_t asq; 96bdd6a90aSFam Zheng uint64_t acq; 97bdd6a90aSFam Zheng uint32_t cmbloc; 98bdd6a90aSFam Zheng uint32_t cmbsz; 99bdd6a90aSFam Zheng uint8_t reserved1[0xec0]; 100bdd6a90aSFam Zheng uint8_t cmd_set_specfic[0x100]; 101bdd6a90aSFam Zheng uint32_t doorbells[]; 10283c68e14SThomas Huth } NVMeRegs; 103bdd6a90aSFam Zheng 104bdd6a90aSFam Zheng QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000); 105bdd6a90aSFam Zheng 106b75fd5f5SStefan Hajnoczi struct BDRVNVMeState { 107bdd6a90aSFam Zheng AioContext *aio_context; 108bdd6a90aSFam Zheng QEMUVFIOState *vfio; 109bdd6a90aSFam Zheng NVMeRegs *regs; 110bdd6a90aSFam Zheng /* The submission/completion queue pairs. 111bdd6a90aSFam Zheng * [0]: admin queue. 112bdd6a90aSFam Zheng * [1..]: io queues. 113bdd6a90aSFam Zheng */ 114bdd6a90aSFam Zheng NVMeQueuePair **queues; 115bdd6a90aSFam Zheng int nr_queues; 116bdd6a90aSFam Zheng size_t page_size; 117bdd6a90aSFam Zheng /* How many uint32_t elements does each doorbell entry take. */ 118bdd6a90aSFam Zheng size_t doorbell_scale; 119bdd6a90aSFam Zheng bool write_cache_supported; 120bdd6a90aSFam Zheng EventNotifier irq_notifier; 121118d1b6aSMaxim Levitsky 122bdd6a90aSFam Zheng uint64_t nsze; /* Namespace size reported by identify command */ 123bdd6a90aSFam Zheng int nsid; /* The namespace id to read/write data. */ 1241120407bSMax Reitz int blkshift; 125118d1b6aSMaxim Levitsky 126bdd6a90aSFam Zheng uint64_t max_transfer; 1272f0d8947SPaolo Bonzini bool plugged; 128bdd6a90aSFam Zheng 129e0dd95e3SMaxim Levitsky bool supports_write_zeroes; 130e87a09d6SMaxim Levitsky bool supports_discard; 131e0dd95e3SMaxim Levitsky 132bdd6a90aSFam Zheng CoMutex dma_map_lock; 133bdd6a90aSFam Zheng CoQueue dma_flush_queue; 134bdd6a90aSFam Zheng 135bdd6a90aSFam Zheng /* Total size of mapped qiov, accessed under dma_map_lock */ 136bdd6a90aSFam Zheng int dma_map_count; 137cc61b074SMax Reitz 138cc61b074SMax Reitz /* PCI address (required for nvme_refresh_filename()) */ 139cc61b074SMax Reitz char *device; 140b75fd5f5SStefan Hajnoczi }; 141bdd6a90aSFam Zheng 142bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_DEVICE "device" 143bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_NAMESPACE "namespace" 144bdd6a90aSFam Zheng 1457838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque); 1467838c67fSStefan Hajnoczi 147bdd6a90aSFam Zheng static QemuOptsList runtime_opts = { 148bdd6a90aSFam Zheng .name = "nvme", 149bdd6a90aSFam Zheng .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 150bdd6a90aSFam Zheng .desc = { 151bdd6a90aSFam Zheng { 152bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_DEVICE, 153bdd6a90aSFam Zheng .type = QEMU_OPT_STRING, 154bdd6a90aSFam Zheng .help = "NVMe PCI device address", 155bdd6a90aSFam Zheng }, 156bdd6a90aSFam Zheng { 157bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_NAMESPACE, 158bdd6a90aSFam Zheng .type = QEMU_OPT_NUMBER, 159bdd6a90aSFam Zheng .help = "NVMe namespace", 160bdd6a90aSFam Zheng }, 161bdd6a90aSFam Zheng { /* end of list */ } 162bdd6a90aSFam Zheng }, 163bdd6a90aSFam Zheng }; 164bdd6a90aSFam Zheng 165bdd6a90aSFam Zheng static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q, 166bdd6a90aSFam Zheng int nentries, int entry_bytes, Error **errp) 167bdd6a90aSFam Zheng { 168bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 169bdd6a90aSFam Zheng size_t bytes; 170bdd6a90aSFam Zheng int r; 171bdd6a90aSFam Zheng 172bdd6a90aSFam Zheng bytes = ROUND_UP(nentries * entry_bytes, s->page_size); 173bdd6a90aSFam Zheng q->head = q->tail = 0; 174bdd6a90aSFam Zheng q->queue = qemu_try_blockalign0(bs, bytes); 175bdd6a90aSFam Zheng 176bdd6a90aSFam Zheng if (!q->queue) { 177bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate queue"); 178bdd6a90aSFam Zheng return; 179bdd6a90aSFam Zheng } 180bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova); 181bdd6a90aSFam Zheng if (r) { 182bdd6a90aSFam Zheng error_setg(errp, "Cannot map queue"); 183bdd6a90aSFam Zheng } 184bdd6a90aSFam Zheng } 185bdd6a90aSFam Zheng 186b75fd5f5SStefan Hajnoczi static void nvme_free_queue_pair(NVMeQueuePair *q) 187bdd6a90aSFam Zheng { 1887838c67fSStefan Hajnoczi if (q->completion_bh) { 1897838c67fSStefan Hajnoczi qemu_bh_delete(q->completion_bh); 1907838c67fSStefan Hajnoczi } 191bdd6a90aSFam Zheng qemu_vfree(q->prp_list_pages); 192bdd6a90aSFam Zheng qemu_vfree(q->sq.queue); 193bdd6a90aSFam Zheng qemu_vfree(q->cq.queue); 194bdd6a90aSFam Zheng qemu_mutex_destroy(&q->lock); 195bdd6a90aSFam Zheng g_free(q); 196bdd6a90aSFam Zheng } 197bdd6a90aSFam Zheng 198bdd6a90aSFam Zheng static void nvme_free_req_queue_cb(void *opaque) 199bdd6a90aSFam Zheng { 200bdd6a90aSFam Zheng NVMeQueuePair *q = opaque; 201bdd6a90aSFam Zheng 202bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 203bdd6a90aSFam Zheng while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) { 204bdd6a90aSFam Zheng /* Retry all pending requests */ 205bdd6a90aSFam Zheng } 206bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 207bdd6a90aSFam Zheng } 208bdd6a90aSFam Zheng 209bdd6a90aSFam Zheng static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs, 210bdd6a90aSFam Zheng int idx, int size, 211bdd6a90aSFam Zheng Error **errp) 212bdd6a90aSFam Zheng { 213bdd6a90aSFam Zheng int i, r; 214bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 215bdd6a90aSFam Zheng Error *local_err = NULL; 216bdd6a90aSFam Zheng NVMeQueuePair *q = g_new0(NVMeQueuePair, 1); 217bdd6a90aSFam Zheng uint64_t prp_list_iova; 218bdd6a90aSFam Zheng 219bdd6a90aSFam Zheng qemu_mutex_init(&q->lock); 220b75fd5f5SStefan Hajnoczi q->s = s; 221bdd6a90aSFam Zheng q->index = idx; 222bdd6a90aSFam Zheng qemu_co_queue_init(&q->free_req_queue); 2231086e95dSStefan Hajnoczi q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_NUM_REQS); 2247838c67fSStefan Hajnoczi q->completion_bh = aio_bh_new(bdrv_get_aio_context(bs), 2257838c67fSStefan Hajnoczi nvme_process_completion_bh, q); 226bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, 2271086e95dSStefan Hajnoczi s->page_size * NVME_NUM_REQS, 228bdd6a90aSFam Zheng false, &prp_list_iova); 229bdd6a90aSFam Zheng if (r) { 230bdd6a90aSFam Zheng goto fail; 231bdd6a90aSFam Zheng } 2321086e95dSStefan Hajnoczi q->free_req_head = -1; 2331086e95dSStefan Hajnoczi for (i = 0; i < NVME_NUM_REQS; i++) { 234bdd6a90aSFam Zheng NVMeRequest *req = &q->reqs[i]; 235bdd6a90aSFam Zheng req->cid = i + 1; 2361086e95dSStefan Hajnoczi req->free_req_next = q->free_req_head; 2371086e95dSStefan Hajnoczi q->free_req_head = i; 238bdd6a90aSFam Zheng req->prp_list_page = q->prp_list_pages + i * s->page_size; 239bdd6a90aSFam Zheng req->prp_list_iova = prp_list_iova + i * s->page_size; 240bdd6a90aSFam Zheng } 2411086e95dSStefan Hajnoczi 242bdd6a90aSFam Zheng nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err); 243bdd6a90aSFam Zheng if (local_err) { 244bdd6a90aSFam Zheng error_propagate(errp, local_err); 245bdd6a90aSFam Zheng goto fail; 246bdd6a90aSFam Zheng } 247bdd6a90aSFam Zheng q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale]; 248bdd6a90aSFam Zheng 249bdd6a90aSFam Zheng nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err); 250bdd6a90aSFam Zheng if (local_err) { 251bdd6a90aSFam Zheng error_propagate(errp, local_err); 252bdd6a90aSFam Zheng goto fail; 253bdd6a90aSFam Zheng } 254461bba04SMaxim Levitsky q->cq.doorbell = &s->regs->doorbells[(idx * 2 + 1) * s->doorbell_scale]; 255bdd6a90aSFam Zheng 256bdd6a90aSFam Zheng return q; 257bdd6a90aSFam Zheng fail: 258b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(q); 259bdd6a90aSFam Zheng return NULL; 260bdd6a90aSFam Zheng } 261bdd6a90aSFam Zheng 262bdd6a90aSFam Zheng /* With q->lock */ 263b75fd5f5SStefan Hajnoczi static void nvme_kick(NVMeQueuePair *q) 264bdd6a90aSFam Zheng { 265b75fd5f5SStefan Hajnoczi BDRVNVMeState *s = q->s; 266b75fd5f5SStefan Hajnoczi 267bdd6a90aSFam Zheng if (s->plugged || !q->need_kick) { 268bdd6a90aSFam Zheng return; 269bdd6a90aSFam Zheng } 270bdd6a90aSFam Zheng trace_nvme_kick(s, q->index); 271bdd6a90aSFam Zheng assert(!(q->sq.tail & 0xFF00)); 272bdd6a90aSFam Zheng /* Fence the write to submission queue entry before notifying the device. */ 273bdd6a90aSFam Zheng smp_wmb(); 274bdd6a90aSFam Zheng *q->sq.doorbell = cpu_to_le32(q->sq.tail); 275bdd6a90aSFam Zheng q->inflight += q->need_kick; 276bdd6a90aSFam Zheng q->need_kick = 0; 277bdd6a90aSFam Zheng } 278bdd6a90aSFam Zheng 279bdd6a90aSFam Zheng /* Find a free request element if any, otherwise: 280bdd6a90aSFam Zheng * a) if in coroutine context, try to wait for one to become available; 281bdd6a90aSFam Zheng * b) if not in coroutine, return NULL; 282bdd6a90aSFam Zheng */ 283bdd6a90aSFam Zheng static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) 284bdd6a90aSFam Zheng { 2851086e95dSStefan Hajnoczi NVMeRequest *req; 286bdd6a90aSFam Zheng 287bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 2881086e95dSStefan Hajnoczi 2891086e95dSStefan Hajnoczi while (q->free_req_head == -1) { 290bdd6a90aSFam Zheng if (qemu_in_coroutine()) { 291bdd6a90aSFam Zheng trace_nvme_free_req_queue_wait(q); 292bdd6a90aSFam Zheng qemu_co_queue_wait(&q->free_req_queue, &q->lock); 293bdd6a90aSFam Zheng } else { 294bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 295bdd6a90aSFam Zheng return NULL; 296bdd6a90aSFam Zheng } 297bdd6a90aSFam Zheng } 2981086e95dSStefan Hajnoczi 2991086e95dSStefan Hajnoczi req = &q->reqs[q->free_req_head]; 3001086e95dSStefan Hajnoczi q->free_req_head = req->free_req_next; 3011086e95dSStefan Hajnoczi req->free_req_next = -1; 3021086e95dSStefan Hajnoczi 303bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 304bdd6a90aSFam Zheng return req; 305bdd6a90aSFam Zheng } 306bdd6a90aSFam Zheng 3071086e95dSStefan Hajnoczi /* With q->lock */ 3081086e95dSStefan Hajnoczi static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) 3091086e95dSStefan Hajnoczi { 3101086e95dSStefan Hajnoczi req->free_req_next = q->free_req_head; 3111086e95dSStefan Hajnoczi q->free_req_head = req - q->reqs; 3121086e95dSStefan Hajnoczi } 3131086e95dSStefan Hajnoczi 3141086e95dSStefan Hajnoczi /* With q->lock */ 315b75fd5f5SStefan Hajnoczi static void nvme_wake_free_req_locked(NVMeQueuePair *q) 3161086e95dSStefan Hajnoczi { 3171086e95dSStefan Hajnoczi if (!qemu_co_queue_empty(&q->free_req_queue)) { 318b75fd5f5SStefan Hajnoczi replay_bh_schedule_oneshot_event(q->s->aio_context, 3191086e95dSStefan Hajnoczi nvme_free_req_queue_cb, q); 3201086e95dSStefan Hajnoczi } 3211086e95dSStefan Hajnoczi } 3221086e95dSStefan Hajnoczi 3231086e95dSStefan Hajnoczi /* Insert a request in the freelist and wake waiters */ 324b75fd5f5SStefan Hajnoczi static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req) 3251086e95dSStefan Hajnoczi { 3261086e95dSStefan Hajnoczi qemu_mutex_lock(&q->lock); 3271086e95dSStefan Hajnoczi nvme_put_free_req_locked(q, req); 328b75fd5f5SStefan Hajnoczi nvme_wake_free_req_locked(q); 3291086e95dSStefan Hajnoczi qemu_mutex_unlock(&q->lock); 3301086e95dSStefan Hajnoczi } 3311086e95dSStefan Hajnoczi 332bdd6a90aSFam Zheng static inline int nvme_translate_error(const NvmeCqe *c) 333bdd6a90aSFam Zheng { 334bdd6a90aSFam Zheng uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF; 335bdd6a90aSFam Zheng if (status) { 336bdd6a90aSFam Zheng trace_nvme_error(le32_to_cpu(c->result), 337bdd6a90aSFam Zheng le16_to_cpu(c->sq_head), 338bdd6a90aSFam Zheng le16_to_cpu(c->sq_id), 339bdd6a90aSFam Zheng le16_to_cpu(c->cid), 340bdd6a90aSFam Zheng le16_to_cpu(status)); 341bdd6a90aSFam Zheng } 342bdd6a90aSFam Zheng switch (status) { 343bdd6a90aSFam Zheng case 0: 344bdd6a90aSFam Zheng return 0; 345bdd6a90aSFam Zheng case 1: 346bdd6a90aSFam Zheng return -ENOSYS; 347bdd6a90aSFam Zheng case 2: 348bdd6a90aSFam Zheng return -EINVAL; 349bdd6a90aSFam Zheng default: 350bdd6a90aSFam Zheng return -EIO; 351bdd6a90aSFam Zheng } 352bdd6a90aSFam Zheng } 353bdd6a90aSFam Zheng 354bdd6a90aSFam Zheng /* With q->lock */ 355b75fd5f5SStefan Hajnoczi static bool nvme_process_completion(NVMeQueuePair *q) 356bdd6a90aSFam Zheng { 357b75fd5f5SStefan Hajnoczi BDRVNVMeState *s = q->s; 358bdd6a90aSFam Zheng bool progress = false; 359bdd6a90aSFam Zheng NVMeRequest *preq; 360bdd6a90aSFam Zheng NVMeRequest req; 361bdd6a90aSFam Zheng NvmeCqe *c; 362bdd6a90aSFam Zheng 363bdd6a90aSFam Zheng trace_nvme_process_completion(s, q->index, q->inflight); 3647838c67fSStefan Hajnoczi if (s->plugged) { 3657838c67fSStefan Hajnoczi trace_nvme_process_completion_queue_plugged(s, q->index); 366bdd6a90aSFam Zheng return false; 367bdd6a90aSFam Zheng } 3687838c67fSStefan Hajnoczi 3697838c67fSStefan Hajnoczi /* 3707838c67fSStefan Hajnoczi * Support re-entrancy when a request cb() function invokes aio_poll(). 3717838c67fSStefan Hajnoczi * Pending completions must be visible to aio_poll() so that a cb() 3727838c67fSStefan Hajnoczi * function can wait for the completion of another request. 3737838c67fSStefan Hajnoczi * 3747838c67fSStefan Hajnoczi * The aio_poll() loop will execute our BH and we'll resume completion 3757838c67fSStefan Hajnoczi * processing there. 3767838c67fSStefan Hajnoczi */ 3777838c67fSStefan Hajnoczi qemu_bh_schedule(q->completion_bh); 3787838c67fSStefan Hajnoczi 379bdd6a90aSFam Zheng assert(q->inflight >= 0); 380bdd6a90aSFam Zheng while (q->inflight) { 38104b3fb39SStefan Hajnoczi int ret; 382bdd6a90aSFam Zheng int16_t cid; 38304b3fb39SStefan Hajnoczi 384bdd6a90aSFam Zheng c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES]; 385258867d1SMaxim Levitsky if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) { 386bdd6a90aSFam Zheng break; 387bdd6a90aSFam Zheng } 38804b3fb39SStefan Hajnoczi ret = nvme_translate_error(c); 389bdd6a90aSFam Zheng q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE; 390bdd6a90aSFam Zheng if (!q->cq.head) { 391bdd6a90aSFam Zheng q->cq_phase = !q->cq_phase; 392bdd6a90aSFam Zheng } 393bdd6a90aSFam Zheng cid = le16_to_cpu(c->cid); 394bdd6a90aSFam Zheng if (cid == 0 || cid > NVME_QUEUE_SIZE) { 395bdd6a90aSFam Zheng fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n", 396bdd6a90aSFam Zheng cid); 397bdd6a90aSFam Zheng continue; 398bdd6a90aSFam Zheng } 399bdd6a90aSFam Zheng trace_nvme_complete_command(s, q->index, cid); 400bdd6a90aSFam Zheng preq = &q->reqs[cid - 1]; 401bdd6a90aSFam Zheng req = *preq; 402bdd6a90aSFam Zheng assert(req.cid == cid); 403bdd6a90aSFam Zheng assert(req.cb); 4041086e95dSStefan Hajnoczi nvme_put_free_req_locked(q, preq); 405bdd6a90aSFam Zheng preq->cb = preq->opaque = NULL; 4067838c67fSStefan Hajnoczi q->inflight--; 407bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 40804b3fb39SStefan Hajnoczi req.cb(req.opaque, ret); 409bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 410bdd6a90aSFam Zheng progress = true; 411bdd6a90aSFam Zheng } 412bdd6a90aSFam Zheng if (progress) { 413bdd6a90aSFam Zheng /* Notify the device so it can post more completions. */ 414bdd6a90aSFam Zheng smp_mb_release(); 415bdd6a90aSFam Zheng *q->cq.doorbell = cpu_to_le32(q->cq.head); 416b75fd5f5SStefan Hajnoczi nvme_wake_free_req_locked(q); 417bdd6a90aSFam Zheng } 4187838c67fSStefan Hajnoczi 4197838c67fSStefan Hajnoczi qemu_bh_cancel(q->completion_bh); 4207838c67fSStefan Hajnoczi 421bdd6a90aSFam Zheng return progress; 422bdd6a90aSFam Zheng } 423bdd6a90aSFam Zheng 4247838c67fSStefan Hajnoczi static void nvme_process_completion_bh(void *opaque) 4257838c67fSStefan Hajnoczi { 4267838c67fSStefan Hajnoczi NVMeQueuePair *q = opaque; 4277838c67fSStefan Hajnoczi 4287838c67fSStefan Hajnoczi /* 4297838c67fSStefan Hajnoczi * We're being invoked because a nvme_process_completion() cb() function 4307838c67fSStefan Hajnoczi * called aio_poll(). The callback may be waiting for further completions 4317838c67fSStefan Hajnoczi * so notify the device that it has space to fill in more completions now. 4327838c67fSStefan Hajnoczi */ 4337838c67fSStefan Hajnoczi smp_mb_release(); 4347838c67fSStefan Hajnoczi *q->cq.doorbell = cpu_to_le32(q->cq.head); 4357838c67fSStefan Hajnoczi nvme_wake_free_req_locked(q); 4367838c67fSStefan Hajnoczi 4377838c67fSStefan Hajnoczi nvme_process_completion(q); 4387838c67fSStefan Hajnoczi } 4397838c67fSStefan Hajnoczi 440bdd6a90aSFam Zheng static void nvme_trace_command(const NvmeCmd *cmd) 441bdd6a90aSFam Zheng { 442bdd6a90aSFam Zheng int i; 443bdd6a90aSFam Zheng 444bdd6a90aSFam Zheng for (i = 0; i < 8; ++i) { 445bdd6a90aSFam Zheng uint8_t *cmdp = (uint8_t *)cmd + i * 8; 446bdd6a90aSFam Zheng trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3], 447bdd6a90aSFam Zheng cmdp[4], cmdp[5], cmdp[6], cmdp[7]); 448bdd6a90aSFam Zheng } 449bdd6a90aSFam Zheng } 450bdd6a90aSFam Zheng 451b75fd5f5SStefan Hajnoczi static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, 452bdd6a90aSFam Zheng NvmeCmd *cmd, BlockCompletionFunc cb, 453bdd6a90aSFam Zheng void *opaque) 454bdd6a90aSFam Zheng { 455bdd6a90aSFam Zheng assert(!req->cb); 456bdd6a90aSFam Zheng req->cb = cb; 457bdd6a90aSFam Zheng req->opaque = opaque; 458bdd6a90aSFam Zheng cmd->cid = cpu_to_le32(req->cid); 459bdd6a90aSFam Zheng 460b75fd5f5SStefan Hajnoczi trace_nvme_submit_command(q->s, q->index, req->cid); 461bdd6a90aSFam Zheng nvme_trace_command(cmd); 462bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 463bdd6a90aSFam Zheng memcpy((uint8_t *)q->sq.queue + 464bdd6a90aSFam Zheng q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); 465bdd6a90aSFam Zheng q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; 466bdd6a90aSFam Zheng q->need_kick++; 467b75fd5f5SStefan Hajnoczi nvme_kick(q); 468b75fd5f5SStefan Hajnoczi nvme_process_completion(q); 469bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 470bdd6a90aSFam Zheng } 471bdd6a90aSFam Zheng 472bdd6a90aSFam Zheng static void nvme_cmd_sync_cb(void *opaque, int ret) 473bdd6a90aSFam Zheng { 474bdd6a90aSFam Zheng int *pret = opaque; 475bdd6a90aSFam Zheng *pret = ret; 4764720cbeeSKevin Wolf aio_wait_kick(); 477bdd6a90aSFam Zheng } 478bdd6a90aSFam Zheng 479bdd6a90aSFam Zheng static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q, 480bdd6a90aSFam Zheng NvmeCmd *cmd) 481bdd6a90aSFam Zheng { 482bdd6a90aSFam Zheng NVMeRequest *req; 483bdd6a90aSFam Zheng int ret = -EINPROGRESS; 484bdd6a90aSFam Zheng req = nvme_get_free_req(q); 485bdd6a90aSFam Zheng if (!req) { 486bdd6a90aSFam Zheng return -EBUSY; 487bdd6a90aSFam Zheng } 488b75fd5f5SStefan Hajnoczi nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret); 489bdd6a90aSFam Zheng 490bdd6a90aSFam Zheng BDRV_POLL_WHILE(bs, ret == -EINPROGRESS); 491bdd6a90aSFam Zheng return ret; 492bdd6a90aSFam Zheng } 493bdd6a90aSFam Zheng 494bdd6a90aSFam Zheng static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp) 495bdd6a90aSFam Zheng { 496bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 497bdd6a90aSFam Zheng NvmeIdCtrl *idctrl; 498bdd6a90aSFam Zheng NvmeIdNs *idns; 499118d1b6aSMaxim Levitsky NvmeLBAF *lbaf; 500bdd6a90aSFam Zheng uint8_t *resp; 501e0dd95e3SMaxim Levitsky uint16_t oncs; 5021120407bSMax Reitz int r; 503bdd6a90aSFam Zheng uint64_t iova; 504bdd6a90aSFam Zheng NvmeCmd cmd = { 505bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_IDENTIFY, 506bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x1), 507bdd6a90aSFam Zheng }; 508bdd6a90aSFam Zheng 509bdd6a90aSFam Zheng resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl)); 510bdd6a90aSFam Zheng if (!resp) { 511bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate buffer for identify response"); 512bdd6a90aSFam Zheng goto out; 513bdd6a90aSFam Zheng } 514bdd6a90aSFam Zheng idctrl = (NvmeIdCtrl *)resp; 515bdd6a90aSFam Zheng idns = (NvmeIdNs *)resp; 516bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova); 517bdd6a90aSFam Zheng if (r) { 518bdd6a90aSFam Zheng error_setg(errp, "Cannot map buffer for DMA"); 519bdd6a90aSFam Zheng goto out; 520bdd6a90aSFam Zheng } 521*c26f2173SKlaus Jensen cmd.dptr.prp1 = cpu_to_le64(iova); 522bdd6a90aSFam Zheng 523bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 524bdd6a90aSFam Zheng error_setg(errp, "Failed to identify controller"); 525bdd6a90aSFam Zheng goto out; 526bdd6a90aSFam Zheng } 527bdd6a90aSFam Zheng 528bdd6a90aSFam Zheng if (le32_to_cpu(idctrl->nn) < namespace) { 529bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace"); 530bdd6a90aSFam Zheng goto out; 531bdd6a90aSFam Zheng } 532bdd6a90aSFam Zheng s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1; 533bdd6a90aSFam Zheng s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size; 534bdd6a90aSFam Zheng /* For now the page list buffer per command is one page, to hold at most 535bdd6a90aSFam Zheng * s->page_size / sizeof(uint64_t) entries. */ 536bdd6a90aSFam Zheng s->max_transfer = MIN_NON_ZERO(s->max_transfer, 537bdd6a90aSFam Zheng s->page_size / sizeof(uint64_t) * s->page_size); 538bdd6a90aSFam Zheng 539e0dd95e3SMaxim Levitsky oncs = le16_to_cpu(idctrl->oncs); 540e0dd95e3SMaxim Levitsky s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROS); 541e87a09d6SMaxim Levitsky s->supports_discard = !!(oncs & NVME_ONCS_DSM); 542e0dd95e3SMaxim Levitsky 543bdd6a90aSFam Zheng memset(resp, 0, 4096); 544bdd6a90aSFam Zheng 545bdd6a90aSFam Zheng cmd.cdw10 = 0; 546bdd6a90aSFam Zheng cmd.nsid = cpu_to_le32(namespace); 547bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 548bdd6a90aSFam Zheng error_setg(errp, "Failed to identify namespace"); 549bdd6a90aSFam Zheng goto out; 550bdd6a90aSFam Zheng } 551bdd6a90aSFam Zheng 552bdd6a90aSFam Zheng s->nsze = le64_to_cpu(idns->nsze); 553118d1b6aSMaxim Levitsky lbaf = &idns->lbaf[NVME_ID_NS_FLBAS_INDEX(idns->flbas)]; 554bdd6a90aSFam Zheng 555e0dd95e3SMaxim Levitsky if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(idns->dlfeat) && 556e0dd95e3SMaxim Levitsky NVME_ID_NS_DLFEAT_READ_BEHAVIOR(idns->dlfeat) == 557e0dd95e3SMaxim Levitsky NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES) { 558e0dd95e3SMaxim Levitsky bs->supported_write_flags |= BDRV_REQ_MAY_UNMAP; 559e0dd95e3SMaxim Levitsky } 560e0dd95e3SMaxim Levitsky 561118d1b6aSMaxim Levitsky if (lbaf->ms) { 562118d1b6aSMaxim Levitsky error_setg(errp, "Namespaces with metadata are not yet supported"); 563118d1b6aSMaxim Levitsky goto out; 564118d1b6aSMaxim Levitsky } 565118d1b6aSMaxim Levitsky 5661120407bSMax Reitz if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 || 5671120407bSMax Reitz (1 << lbaf->ds) > s->page_size) 5681120407bSMax Reitz { 5691120407bSMax Reitz error_setg(errp, "Namespace has unsupported block size (2^%d)", 5701120407bSMax Reitz lbaf->ds); 571118d1b6aSMaxim Levitsky goto out; 572118d1b6aSMaxim Levitsky } 573118d1b6aSMaxim Levitsky 574118d1b6aSMaxim Levitsky s->blkshift = lbaf->ds; 575bdd6a90aSFam Zheng out: 576bdd6a90aSFam Zheng qemu_vfio_dma_unmap(s->vfio, resp); 577bdd6a90aSFam Zheng qemu_vfree(resp); 578bdd6a90aSFam Zheng } 579bdd6a90aSFam Zheng 580bdd6a90aSFam Zheng static bool nvme_poll_queues(BDRVNVMeState *s) 581bdd6a90aSFam Zheng { 582bdd6a90aSFam Zheng bool progress = false; 583bdd6a90aSFam Zheng int i; 584bdd6a90aSFam Zheng 585bdd6a90aSFam Zheng for (i = 0; i < s->nr_queues; i++) { 586bdd6a90aSFam Zheng NVMeQueuePair *q = s->queues[i]; 5872446e0e2SStefan Hajnoczi const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; 5882446e0e2SStefan Hajnoczi NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; 5892446e0e2SStefan Hajnoczi 5902446e0e2SStefan Hajnoczi /* 5912446e0e2SStefan Hajnoczi * Do an early check for completions. q->lock isn't needed because 5922446e0e2SStefan Hajnoczi * nvme_process_completion() only runs in the event loop thread and 5932446e0e2SStefan Hajnoczi * cannot race with itself. 5942446e0e2SStefan Hajnoczi */ 5952446e0e2SStefan Hajnoczi if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) { 5962446e0e2SStefan Hajnoczi continue; 5972446e0e2SStefan Hajnoczi } 5982446e0e2SStefan Hajnoczi 599bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 600b75fd5f5SStefan Hajnoczi while (nvme_process_completion(q)) { 601bdd6a90aSFam Zheng /* Keep polling */ 602bdd6a90aSFam Zheng progress = true; 603bdd6a90aSFam Zheng } 604bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 605bdd6a90aSFam Zheng } 606bdd6a90aSFam Zheng return progress; 607bdd6a90aSFam Zheng } 608bdd6a90aSFam Zheng 609bdd6a90aSFam Zheng static void nvme_handle_event(EventNotifier *n) 610bdd6a90aSFam Zheng { 611bdd6a90aSFam Zheng BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier); 612bdd6a90aSFam Zheng 613bdd6a90aSFam Zheng trace_nvme_handle_event(s); 614bdd6a90aSFam Zheng event_notifier_test_and_clear(n); 615bdd6a90aSFam Zheng nvme_poll_queues(s); 616bdd6a90aSFam Zheng } 617bdd6a90aSFam Zheng 618bdd6a90aSFam Zheng static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp) 619bdd6a90aSFam Zheng { 620bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 621bdd6a90aSFam Zheng int n = s->nr_queues; 622bdd6a90aSFam Zheng NVMeQueuePair *q; 623bdd6a90aSFam Zheng NvmeCmd cmd; 624bdd6a90aSFam Zheng int queue_size = NVME_QUEUE_SIZE; 625bdd6a90aSFam Zheng 626bdd6a90aSFam Zheng q = nvme_create_queue_pair(bs, n, queue_size, errp); 627bdd6a90aSFam Zheng if (!q) { 628bdd6a90aSFam Zheng return false; 629bdd6a90aSFam Zheng } 630bdd6a90aSFam Zheng cmd = (NvmeCmd) { 631bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_CQ, 632*c26f2173SKlaus Jensen .dptr.prp1 = cpu_to_le64(q->cq.iova), 633bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)), 634bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(0x3), 635bdd6a90aSFam Zheng }; 636bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 637bdd6a90aSFam Zheng error_setg(errp, "Failed to create io queue [%d]", n); 638b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(q); 639bdd6a90aSFam Zheng return false; 640bdd6a90aSFam Zheng } 641bdd6a90aSFam Zheng cmd = (NvmeCmd) { 642bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_SQ, 643*c26f2173SKlaus Jensen .dptr.prp1 = cpu_to_le64(q->sq.iova), 644bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)), 645bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(0x1 | (n << 16)), 646bdd6a90aSFam Zheng }; 647bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 648bdd6a90aSFam Zheng error_setg(errp, "Failed to create io queue [%d]", n); 649b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(q); 650bdd6a90aSFam Zheng return false; 651bdd6a90aSFam Zheng } 652bdd6a90aSFam Zheng s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1); 653bdd6a90aSFam Zheng s->queues[n] = q; 654bdd6a90aSFam Zheng s->nr_queues++; 655bdd6a90aSFam Zheng return true; 656bdd6a90aSFam Zheng } 657bdd6a90aSFam Zheng 658bdd6a90aSFam Zheng static bool nvme_poll_cb(void *opaque) 659bdd6a90aSFam Zheng { 660bdd6a90aSFam Zheng EventNotifier *e = opaque; 661bdd6a90aSFam Zheng BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier); 662bdd6a90aSFam Zheng 663bdd6a90aSFam Zheng trace_nvme_poll_cb(s); 664b3ac2b94SSimran Singhal return nvme_poll_queues(s); 665bdd6a90aSFam Zheng } 666bdd6a90aSFam Zheng 667bdd6a90aSFam Zheng static int nvme_init(BlockDriverState *bs, const char *device, int namespace, 668bdd6a90aSFam Zheng Error **errp) 669bdd6a90aSFam Zheng { 670bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 671bdd6a90aSFam Zheng int ret; 672bdd6a90aSFam Zheng uint64_t cap; 673bdd6a90aSFam Zheng uint64_t timeout_ms; 674bdd6a90aSFam Zheng uint64_t deadline, now; 675bdd6a90aSFam Zheng Error *local_err = NULL; 676bdd6a90aSFam Zheng 677bdd6a90aSFam Zheng qemu_co_mutex_init(&s->dma_map_lock); 678bdd6a90aSFam Zheng qemu_co_queue_init(&s->dma_flush_queue); 679cc61b074SMax Reitz s->device = g_strdup(device); 680bdd6a90aSFam Zheng s->nsid = namespace; 681bdd6a90aSFam Zheng s->aio_context = bdrv_get_aio_context(bs); 682bdd6a90aSFam Zheng ret = event_notifier_init(&s->irq_notifier, 0); 683bdd6a90aSFam Zheng if (ret) { 684bdd6a90aSFam Zheng error_setg(errp, "Failed to init event notifier"); 685bdd6a90aSFam Zheng return ret; 686bdd6a90aSFam Zheng } 687bdd6a90aSFam Zheng 688bdd6a90aSFam Zheng s->vfio = qemu_vfio_open_pci(device, errp); 689bdd6a90aSFam Zheng if (!s->vfio) { 690bdd6a90aSFam Zheng ret = -EINVAL; 6919582f357SFam Zheng goto out; 692bdd6a90aSFam Zheng } 693bdd6a90aSFam Zheng 694bdd6a90aSFam Zheng s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp); 695bdd6a90aSFam Zheng if (!s->regs) { 696bdd6a90aSFam Zheng ret = -EINVAL; 6979582f357SFam Zheng goto out; 698bdd6a90aSFam Zheng } 699bdd6a90aSFam Zheng 700bdd6a90aSFam Zheng /* Perform initialize sequence as described in NVMe spec "7.6.1 701bdd6a90aSFam Zheng * Initialization". */ 702bdd6a90aSFam Zheng 703bdd6a90aSFam Zheng cap = le64_to_cpu(s->regs->cap); 704bdd6a90aSFam Zheng if (!(cap & (1ULL << 37))) { 705bdd6a90aSFam Zheng error_setg(errp, "Device doesn't support NVMe command set"); 706bdd6a90aSFam Zheng ret = -EINVAL; 7079582f357SFam Zheng goto out; 708bdd6a90aSFam Zheng } 709bdd6a90aSFam Zheng 710bdd6a90aSFam Zheng s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF))); 711bdd6a90aSFam Zheng s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t); 712bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size; 713bdd6a90aSFam Zheng timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000); 714bdd6a90aSFam Zheng 715bdd6a90aSFam Zheng /* Reset device to get a clean state. */ 716bdd6a90aSFam Zheng s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE); 717bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 0. */ 718bdd6a90aSFam Zheng deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * 1000000ULL; 719bdd6a90aSFam Zheng while (le32_to_cpu(s->regs->csts) & 0x1) { 720bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { 721bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to reset (%" 722bdd6a90aSFam Zheng PRId64 " ms)", 723bdd6a90aSFam Zheng timeout_ms); 724bdd6a90aSFam Zheng ret = -ETIMEDOUT; 7259582f357SFam Zheng goto out; 726bdd6a90aSFam Zheng } 727bdd6a90aSFam Zheng } 728bdd6a90aSFam Zheng 729bdd6a90aSFam Zheng /* Set up admin queue. */ 730bdd6a90aSFam Zheng s->queues = g_new(NVMeQueuePair *, 1); 731bdd6a90aSFam Zheng s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp); 732bdd6a90aSFam Zheng if (!s->queues[0]) { 733bdd6a90aSFam Zheng ret = -EINVAL; 7349582f357SFam Zheng goto out; 735bdd6a90aSFam Zheng } 73695667c3bSMichal Privoznik s->nr_queues = 1; 737bdd6a90aSFam Zheng QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000); 738bdd6a90aSFam Zheng s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE); 739bdd6a90aSFam Zheng s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova); 740bdd6a90aSFam Zheng s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova); 741bdd6a90aSFam Zheng 742bdd6a90aSFam Zheng /* After setting up all control registers we can enable device now. */ 743bdd6a90aSFam Zheng s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) | 744bdd6a90aSFam Zheng (ctz32(NVME_SQ_ENTRY_BYTES) << 16) | 745bdd6a90aSFam Zheng 0x1); 746bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 1. */ 747bdd6a90aSFam Zheng now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 748bdd6a90aSFam Zheng deadline = now + timeout_ms * 1000000; 749bdd6a90aSFam Zheng while (!(le32_to_cpu(s->regs->csts) & 0x1)) { 750bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { 751bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to start (%" 752bdd6a90aSFam Zheng PRId64 " ms)", 753bdd6a90aSFam Zheng timeout_ms); 754bdd6a90aSFam Zheng ret = -ETIMEDOUT; 7559582f357SFam Zheng goto out; 756bdd6a90aSFam Zheng } 757bdd6a90aSFam Zheng } 758bdd6a90aSFam Zheng 759bdd6a90aSFam Zheng ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier, 760bdd6a90aSFam Zheng VFIO_PCI_MSIX_IRQ_INDEX, errp); 761bdd6a90aSFam Zheng if (ret) { 7629582f357SFam Zheng goto out; 763bdd6a90aSFam Zheng } 764bdd6a90aSFam Zheng aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, 765bdd6a90aSFam Zheng false, nvme_handle_event, nvme_poll_cb); 766bdd6a90aSFam Zheng 76778d8c99eSPaolo Bonzini nvme_identify(bs, namespace, &local_err); 768bdd6a90aSFam Zheng if (local_err) { 769bdd6a90aSFam Zheng error_propagate(errp, local_err); 770bdd6a90aSFam Zheng ret = -EIO; 7719582f357SFam Zheng goto out; 772bdd6a90aSFam Zheng } 773bdd6a90aSFam Zheng 774bdd6a90aSFam Zheng /* Set up command queues. */ 775bdd6a90aSFam Zheng if (!nvme_add_io_queue(bs, errp)) { 776bdd6a90aSFam Zheng ret = -EIO; 777bdd6a90aSFam Zheng } 7789582f357SFam Zheng out: 7799582f357SFam Zheng /* Cleaning up is done in nvme_file_open() upon error. */ 780bdd6a90aSFam Zheng return ret; 781bdd6a90aSFam Zheng } 782bdd6a90aSFam Zheng 783bdd6a90aSFam Zheng /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example: 784bdd6a90aSFam Zheng * 785bdd6a90aSFam Zheng * nvme://0000:44:00.0/1 786bdd6a90aSFam Zheng * 787bdd6a90aSFam Zheng * where the "nvme://" is a fixed form of the protocol prefix, the middle part 788bdd6a90aSFam Zheng * is the PCI address, and the last part is the namespace number starting from 789bdd6a90aSFam Zheng * 1 according to the NVMe spec. */ 790bdd6a90aSFam Zheng static void nvme_parse_filename(const char *filename, QDict *options, 791bdd6a90aSFam Zheng Error **errp) 792bdd6a90aSFam Zheng { 793bdd6a90aSFam Zheng int pref = strlen("nvme://"); 794bdd6a90aSFam Zheng 795bdd6a90aSFam Zheng if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) { 796bdd6a90aSFam Zheng const char *tmp = filename + pref; 797bdd6a90aSFam Zheng char *device; 798bdd6a90aSFam Zheng const char *namespace; 799bdd6a90aSFam Zheng unsigned long ns; 800bdd6a90aSFam Zheng const char *slash = strchr(tmp, '/'); 801bdd6a90aSFam Zheng if (!slash) { 802625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp); 803bdd6a90aSFam Zheng return; 804bdd6a90aSFam Zheng } 805bdd6a90aSFam Zheng device = g_strndup(tmp, slash - tmp); 806625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device); 807bdd6a90aSFam Zheng g_free(device); 808bdd6a90aSFam Zheng namespace = slash + 1; 809bdd6a90aSFam Zheng if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) { 810bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace '%s', positive number expected", 811bdd6a90aSFam Zheng namespace); 812bdd6a90aSFam Zheng return; 813bdd6a90aSFam Zheng } 814625eaca9SLaurent Vivier qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE, 815625eaca9SLaurent Vivier *namespace ? namespace : "1"); 816bdd6a90aSFam Zheng } 817bdd6a90aSFam Zheng } 818bdd6a90aSFam Zheng 819bdd6a90aSFam Zheng static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable, 820bdd6a90aSFam Zheng Error **errp) 821bdd6a90aSFam Zheng { 822bdd6a90aSFam Zheng int ret; 823bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 824bdd6a90aSFam Zheng NvmeCmd cmd = { 825bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_SET_FEATURES, 826bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 827bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x06), 828bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00), 829bdd6a90aSFam Zheng }; 830bdd6a90aSFam Zheng 831bdd6a90aSFam Zheng ret = nvme_cmd_sync(bs, s->queues[0], &cmd); 832bdd6a90aSFam Zheng if (ret) { 833bdd6a90aSFam Zheng error_setg(errp, "Failed to configure NVMe write cache"); 834bdd6a90aSFam Zheng } 835bdd6a90aSFam Zheng return ret; 836bdd6a90aSFam Zheng } 837bdd6a90aSFam Zheng 838bdd6a90aSFam Zheng static void nvme_close(BlockDriverState *bs) 839bdd6a90aSFam Zheng { 840bdd6a90aSFam Zheng int i; 841bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 842bdd6a90aSFam Zheng 843bdd6a90aSFam Zheng for (i = 0; i < s->nr_queues; ++i) { 844b75fd5f5SStefan Hajnoczi nvme_free_queue_pair(s->queues[i]); 845bdd6a90aSFam Zheng } 8469582f357SFam Zheng g_free(s->queues); 847bdd6a90aSFam Zheng aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, 848bdd6a90aSFam Zheng false, NULL, NULL); 8499582f357SFam Zheng event_notifier_cleanup(&s->irq_notifier); 850bdd6a90aSFam Zheng qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE); 851bdd6a90aSFam Zheng qemu_vfio_close(s->vfio); 852cc61b074SMax Reitz 853cc61b074SMax Reitz g_free(s->device); 854bdd6a90aSFam Zheng } 855bdd6a90aSFam Zheng 856bdd6a90aSFam Zheng static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags, 857bdd6a90aSFam Zheng Error **errp) 858bdd6a90aSFam Zheng { 859bdd6a90aSFam Zheng const char *device; 860bdd6a90aSFam Zheng QemuOpts *opts; 861bdd6a90aSFam Zheng int namespace; 862bdd6a90aSFam Zheng int ret; 863bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 864bdd6a90aSFam Zheng 865e0dd95e3SMaxim Levitsky bs->supported_write_flags = BDRV_REQ_FUA; 866e0dd95e3SMaxim Levitsky 867bdd6a90aSFam Zheng opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 868bdd6a90aSFam Zheng qemu_opts_absorb_qdict(opts, options, &error_abort); 869bdd6a90aSFam Zheng device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE); 870bdd6a90aSFam Zheng if (!device) { 871bdd6a90aSFam Zheng error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required"); 872bdd6a90aSFam Zheng qemu_opts_del(opts); 873bdd6a90aSFam Zheng return -EINVAL; 874bdd6a90aSFam Zheng } 875bdd6a90aSFam Zheng 876bdd6a90aSFam Zheng namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1); 877bdd6a90aSFam Zheng ret = nvme_init(bs, device, namespace, errp); 878bdd6a90aSFam Zheng qemu_opts_del(opts); 879bdd6a90aSFam Zheng if (ret) { 880bdd6a90aSFam Zheng goto fail; 881bdd6a90aSFam Zheng } 882bdd6a90aSFam Zheng if (flags & BDRV_O_NOCACHE) { 883bdd6a90aSFam Zheng if (!s->write_cache_supported) { 884bdd6a90aSFam Zheng error_setg(errp, 885bdd6a90aSFam Zheng "NVMe controller doesn't support write cache configuration"); 886bdd6a90aSFam Zheng ret = -EINVAL; 887bdd6a90aSFam Zheng } else { 888bdd6a90aSFam Zheng ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE), 889bdd6a90aSFam Zheng errp); 890bdd6a90aSFam Zheng } 891bdd6a90aSFam Zheng if (ret) { 892bdd6a90aSFam Zheng goto fail; 893bdd6a90aSFam Zheng } 894bdd6a90aSFam Zheng } 895bdd6a90aSFam Zheng return 0; 896bdd6a90aSFam Zheng fail: 897bdd6a90aSFam Zheng nvme_close(bs); 898bdd6a90aSFam Zheng return ret; 899bdd6a90aSFam Zheng } 900bdd6a90aSFam Zheng 901bdd6a90aSFam Zheng static int64_t nvme_getlength(BlockDriverState *bs) 902bdd6a90aSFam Zheng { 903bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 904118d1b6aSMaxim Levitsky return s->nsze << s->blkshift; 905118d1b6aSMaxim Levitsky } 906bdd6a90aSFam Zheng 9071120407bSMax Reitz static uint32_t nvme_get_blocksize(BlockDriverState *bs) 908118d1b6aSMaxim Levitsky { 909118d1b6aSMaxim Levitsky BDRVNVMeState *s = bs->opaque; 9101120407bSMax Reitz assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12); 9111120407bSMax Reitz return UINT32_C(1) << s->blkshift; 912118d1b6aSMaxim Levitsky } 913118d1b6aSMaxim Levitsky 914118d1b6aSMaxim Levitsky static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) 915118d1b6aSMaxim Levitsky { 9161120407bSMax Reitz uint32_t blocksize = nvme_get_blocksize(bs); 917118d1b6aSMaxim Levitsky bsz->phys = blocksize; 918118d1b6aSMaxim Levitsky bsz->log = blocksize; 919118d1b6aSMaxim Levitsky return 0; 920bdd6a90aSFam Zheng } 921bdd6a90aSFam Zheng 922bdd6a90aSFam Zheng /* Called with s->dma_map_lock */ 923bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs, 924bdd6a90aSFam Zheng QEMUIOVector *qiov) 925bdd6a90aSFam Zheng { 926bdd6a90aSFam Zheng int r = 0; 927bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 928bdd6a90aSFam Zheng 929bdd6a90aSFam Zheng s->dma_map_count -= qiov->size; 930bdd6a90aSFam Zheng if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) { 931bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio); 932bdd6a90aSFam Zheng if (!r) { 933bdd6a90aSFam Zheng qemu_co_queue_restart_all(&s->dma_flush_queue); 934bdd6a90aSFam Zheng } 935bdd6a90aSFam Zheng } 936bdd6a90aSFam Zheng return r; 937bdd6a90aSFam Zheng } 938bdd6a90aSFam Zheng 939bdd6a90aSFam Zheng /* Called with s->dma_map_lock */ 940bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd, 941bdd6a90aSFam Zheng NVMeRequest *req, QEMUIOVector *qiov) 942bdd6a90aSFam Zheng { 943bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 944bdd6a90aSFam Zheng uint64_t *pagelist = req->prp_list_page; 945bdd6a90aSFam Zheng int i, j, r; 946bdd6a90aSFam Zheng int entries = 0; 947bdd6a90aSFam Zheng 948bdd6a90aSFam Zheng assert(qiov->size); 949bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(qiov->size, s->page_size)); 950bdd6a90aSFam Zheng assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t)); 951bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) { 952bdd6a90aSFam Zheng bool retry = true; 953bdd6a90aSFam Zheng uint64_t iova; 954bdd6a90aSFam Zheng try_map: 955bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, 956bdd6a90aSFam Zheng qiov->iov[i].iov_base, 957bdd6a90aSFam Zheng qiov->iov[i].iov_len, 958bdd6a90aSFam Zheng true, &iova); 959bdd6a90aSFam Zheng if (r == -ENOMEM && retry) { 960bdd6a90aSFam Zheng retry = false; 961bdd6a90aSFam Zheng trace_nvme_dma_flush_queue_wait(s); 962bdd6a90aSFam Zheng if (s->dma_map_count) { 963bdd6a90aSFam Zheng trace_nvme_dma_map_flush(s); 964bdd6a90aSFam Zheng qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock); 965bdd6a90aSFam Zheng } else { 966bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio); 967bdd6a90aSFam Zheng if (r) { 968bdd6a90aSFam Zheng goto fail; 969bdd6a90aSFam Zheng } 970bdd6a90aSFam Zheng } 971bdd6a90aSFam Zheng goto try_map; 972bdd6a90aSFam Zheng } 973bdd6a90aSFam Zheng if (r) { 974bdd6a90aSFam Zheng goto fail; 975bdd6a90aSFam Zheng } 976bdd6a90aSFam Zheng 977bdd6a90aSFam Zheng for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) { 9782916405aSLi Feng pagelist[entries++] = cpu_to_le64(iova + j * s->page_size); 979bdd6a90aSFam Zheng } 980bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base, 981bdd6a90aSFam Zheng qiov->iov[i].iov_len / s->page_size); 982bdd6a90aSFam Zheng } 983bdd6a90aSFam Zheng 984bdd6a90aSFam Zheng s->dma_map_count += qiov->size; 985bdd6a90aSFam Zheng 986bdd6a90aSFam Zheng assert(entries <= s->page_size / sizeof(uint64_t)); 987bdd6a90aSFam Zheng switch (entries) { 988bdd6a90aSFam Zheng case 0: 989bdd6a90aSFam Zheng abort(); 990bdd6a90aSFam Zheng case 1: 991*c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0]; 992*c26f2173SKlaus Jensen cmd->dptr.prp2 = 0; 993bdd6a90aSFam Zheng break; 994bdd6a90aSFam Zheng case 2: 995*c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0]; 996*c26f2173SKlaus Jensen cmd->dptr.prp2 = pagelist[1]; 997bdd6a90aSFam Zheng break; 998bdd6a90aSFam Zheng default: 999*c26f2173SKlaus Jensen cmd->dptr.prp1 = pagelist[0]; 1000*c26f2173SKlaus Jensen cmd->dptr.prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t)); 1001bdd6a90aSFam Zheng break; 1002bdd6a90aSFam Zheng } 1003bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries); 1004bdd6a90aSFam Zheng for (i = 0; i < entries; ++i) { 1005bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]); 1006bdd6a90aSFam Zheng } 1007bdd6a90aSFam Zheng return 0; 1008bdd6a90aSFam Zheng fail: 1009bdd6a90aSFam Zheng /* No need to unmap [0 - i) iovs even if we've failed, since we don't 1010bdd6a90aSFam Zheng * increment s->dma_map_count. This is okay for fixed mapping memory areas 1011bdd6a90aSFam Zheng * because they are already mapped before calling this function; for 1012bdd6a90aSFam Zheng * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by 1013bdd6a90aSFam Zheng * calling qemu_vfio_dma_reset_temporary when necessary. */ 1014bdd6a90aSFam Zheng return r; 1015bdd6a90aSFam Zheng } 1016bdd6a90aSFam Zheng 1017bdd6a90aSFam Zheng typedef struct { 1018bdd6a90aSFam Zheng Coroutine *co; 1019bdd6a90aSFam Zheng int ret; 1020bdd6a90aSFam Zheng AioContext *ctx; 1021bdd6a90aSFam Zheng } NVMeCoData; 1022bdd6a90aSFam Zheng 1023bdd6a90aSFam Zheng static void nvme_rw_cb_bh(void *opaque) 1024bdd6a90aSFam Zheng { 1025bdd6a90aSFam Zheng NVMeCoData *data = opaque; 1026bdd6a90aSFam Zheng qemu_coroutine_enter(data->co); 1027bdd6a90aSFam Zheng } 1028bdd6a90aSFam Zheng 1029bdd6a90aSFam Zheng static void nvme_rw_cb(void *opaque, int ret) 1030bdd6a90aSFam Zheng { 1031bdd6a90aSFam Zheng NVMeCoData *data = opaque; 1032bdd6a90aSFam Zheng data->ret = ret; 1033bdd6a90aSFam Zheng if (!data->co) { 1034bdd6a90aSFam Zheng /* The rw coroutine hasn't yielded, don't try to enter. */ 1035bdd6a90aSFam Zheng return; 1036bdd6a90aSFam Zheng } 1037e4ec5ad4SPavel Dovgalyuk replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data); 1038bdd6a90aSFam Zheng } 1039bdd6a90aSFam Zheng 1040bdd6a90aSFam Zheng static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, 1041bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1042bdd6a90aSFam Zheng QEMUIOVector *qiov, 1043bdd6a90aSFam Zheng bool is_write, 1044bdd6a90aSFam Zheng int flags) 1045bdd6a90aSFam Zheng { 1046bdd6a90aSFam Zheng int r; 1047bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1048bdd6a90aSFam Zheng NVMeQueuePair *ioq = s->queues[1]; 1049bdd6a90aSFam Zheng NVMeRequest *req; 1050118d1b6aSMaxim Levitsky 1051118d1b6aSMaxim Levitsky uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) | 1052bdd6a90aSFam Zheng (flags & BDRV_REQ_FUA ? 1 << 30 : 0); 1053bdd6a90aSFam Zheng NvmeCmd cmd = { 1054bdd6a90aSFam Zheng .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ, 1055bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 1056118d1b6aSMaxim Levitsky .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF), 1057118d1b6aSMaxim Levitsky .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF), 1058bdd6a90aSFam Zheng .cdw12 = cpu_to_le32(cdw12), 1059bdd6a90aSFam Zheng }; 1060bdd6a90aSFam Zheng NVMeCoData data = { 1061bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs), 1062bdd6a90aSFam Zheng .ret = -EINPROGRESS, 1063bdd6a90aSFam Zheng }; 1064bdd6a90aSFam Zheng 1065bdd6a90aSFam Zheng trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov); 1066bdd6a90aSFam Zheng assert(s->nr_queues > 1); 1067bdd6a90aSFam Zheng req = nvme_get_free_req(ioq); 1068bdd6a90aSFam Zheng assert(req); 1069bdd6a90aSFam Zheng 1070bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock); 1071bdd6a90aSFam Zheng r = nvme_cmd_map_qiov(bs, &cmd, req, qiov); 1072bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock); 1073bdd6a90aSFam Zheng if (r) { 1074b75fd5f5SStefan Hajnoczi nvme_put_free_req_and_wake(ioq, req); 1075bdd6a90aSFam Zheng return r; 1076bdd6a90aSFam Zheng } 1077b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1078bdd6a90aSFam Zheng 1079bdd6a90aSFam Zheng data.co = qemu_coroutine_self(); 1080bdd6a90aSFam Zheng while (data.ret == -EINPROGRESS) { 1081bdd6a90aSFam Zheng qemu_coroutine_yield(); 1082bdd6a90aSFam Zheng } 1083bdd6a90aSFam Zheng 1084bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock); 1085bdd6a90aSFam Zheng r = nvme_cmd_unmap_qiov(bs, qiov); 1086bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock); 1087bdd6a90aSFam Zheng if (r) { 1088bdd6a90aSFam Zheng return r; 1089bdd6a90aSFam Zheng } 1090bdd6a90aSFam Zheng 1091bdd6a90aSFam Zheng trace_nvme_rw_done(s, is_write, offset, bytes, data.ret); 1092bdd6a90aSFam Zheng return data.ret; 1093bdd6a90aSFam Zheng } 1094bdd6a90aSFam Zheng 1095bdd6a90aSFam Zheng static inline bool nvme_qiov_aligned(BlockDriverState *bs, 1096bdd6a90aSFam Zheng const QEMUIOVector *qiov) 1097bdd6a90aSFam Zheng { 1098bdd6a90aSFam Zheng int i; 1099bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1100bdd6a90aSFam Zheng 1101bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) { 1102bdd6a90aSFam Zheng if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) || 1103bdd6a90aSFam Zheng !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) { 1104bdd6a90aSFam Zheng trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base, 1105bdd6a90aSFam Zheng qiov->iov[i].iov_len, s->page_size); 1106bdd6a90aSFam Zheng return false; 1107bdd6a90aSFam Zheng } 1108bdd6a90aSFam Zheng } 1109bdd6a90aSFam Zheng return true; 1110bdd6a90aSFam Zheng } 1111bdd6a90aSFam Zheng 1112bdd6a90aSFam Zheng static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, 1113bdd6a90aSFam Zheng QEMUIOVector *qiov, bool is_write, int flags) 1114bdd6a90aSFam Zheng { 1115bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1116bdd6a90aSFam Zheng int r; 1117bdd6a90aSFam Zheng uint8_t *buf = NULL; 1118bdd6a90aSFam Zheng QEMUIOVector local_qiov; 1119bdd6a90aSFam Zheng 1120bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(offset, s->page_size)); 1121bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(bytes, s->page_size)); 1122bdd6a90aSFam Zheng assert(bytes <= s->max_transfer); 1123bdd6a90aSFam Zheng if (nvme_qiov_aligned(bs, qiov)) { 1124bdd6a90aSFam Zheng return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags); 1125bdd6a90aSFam Zheng } 1126bdd6a90aSFam Zheng trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write); 1127bdd6a90aSFam Zheng buf = qemu_try_blockalign(bs, bytes); 1128bdd6a90aSFam Zheng 1129bdd6a90aSFam Zheng if (!buf) { 1130bdd6a90aSFam Zheng return -ENOMEM; 1131bdd6a90aSFam Zheng } 1132bdd6a90aSFam Zheng qemu_iovec_init(&local_qiov, 1); 1133bdd6a90aSFam Zheng if (is_write) { 1134bdd6a90aSFam Zheng qemu_iovec_to_buf(qiov, 0, buf, bytes); 1135bdd6a90aSFam Zheng } 1136bdd6a90aSFam Zheng qemu_iovec_add(&local_qiov, buf, bytes); 1137bdd6a90aSFam Zheng r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags); 1138bdd6a90aSFam Zheng qemu_iovec_destroy(&local_qiov); 1139bdd6a90aSFam Zheng if (!r && !is_write) { 1140bdd6a90aSFam Zheng qemu_iovec_from_buf(qiov, 0, buf, bytes); 1141bdd6a90aSFam Zheng } 1142bdd6a90aSFam Zheng qemu_vfree(buf); 1143bdd6a90aSFam Zheng return r; 1144bdd6a90aSFam Zheng } 1145bdd6a90aSFam Zheng 1146bdd6a90aSFam Zheng static coroutine_fn int nvme_co_preadv(BlockDriverState *bs, 1147bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1148bdd6a90aSFam Zheng QEMUIOVector *qiov, int flags) 1149bdd6a90aSFam Zheng { 1150bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, false, flags); 1151bdd6a90aSFam Zheng } 1152bdd6a90aSFam Zheng 1153bdd6a90aSFam Zheng static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs, 1154bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1155bdd6a90aSFam Zheng QEMUIOVector *qiov, int flags) 1156bdd6a90aSFam Zheng { 1157bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, true, flags); 1158bdd6a90aSFam Zheng } 1159bdd6a90aSFam Zheng 1160bdd6a90aSFam Zheng static coroutine_fn int nvme_co_flush(BlockDriverState *bs) 1161bdd6a90aSFam Zheng { 1162bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1163bdd6a90aSFam Zheng NVMeQueuePair *ioq = s->queues[1]; 1164bdd6a90aSFam Zheng NVMeRequest *req; 1165bdd6a90aSFam Zheng NvmeCmd cmd = { 1166bdd6a90aSFam Zheng .opcode = NVME_CMD_FLUSH, 1167bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 1168bdd6a90aSFam Zheng }; 1169bdd6a90aSFam Zheng NVMeCoData data = { 1170bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs), 1171bdd6a90aSFam Zheng .ret = -EINPROGRESS, 1172bdd6a90aSFam Zheng }; 1173bdd6a90aSFam Zheng 1174bdd6a90aSFam Zheng assert(s->nr_queues > 1); 1175bdd6a90aSFam Zheng req = nvme_get_free_req(ioq); 1176bdd6a90aSFam Zheng assert(req); 1177b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1178bdd6a90aSFam Zheng 1179bdd6a90aSFam Zheng data.co = qemu_coroutine_self(); 1180bdd6a90aSFam Zheng if (data.ret == -EINPROGRESS) { 1181bdd6a90aSFam Zheng qemu_coroutine_yield(); 1182bdd6a90aSFam Zheng } 1183bdd6a90aSFam Zheng 1184bdd6a90aSFam Zheng return data.ret; 1185bdd6a90aSFam Zheng } 1186bdd6a90aSFam Zheng 1187bdd6a90aSFam Zheng 1188e0dd95e3SMaxim Levitsky static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, 1189e0dd95e3SMaxim Levitsky int64_t offset, 1190e0dd95e3SMaxim Levitsky int bytes, 1191e0dd95e3SMaxim Levitsky BdrvRequestFlags flags) 1192e0dd95e3SMaxim Levitsky { 1193e0dd95e3SMaxim Levitsky BDRVNVMeState *s = bs->opaque; 1194e0dd95e3SMaxim Levitsky NVMeQueuePair *ioq = s->queues[1]; 1195e0dd95e3SMaxim Levitsky NVMeRequest *req; 1196e0dd95e3SMaxim Levitsky 1197e0dd95e3SMaxim Levitsky uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF; 1198e0dd95e3SMaxim Levitsky 1199e0dd95e3SMaxim Levitsky if (!s->supports_write_zeroes) { 1200e0dd95e3SMaxim Levitsky return -ENOTSUP; 1201e0dd95e3SMaxim Levitsky } 1202e0dd95e3SMaxim Levitsky 1203e0dd95e3SMaxim Levitsky NvmeCmd cmd = { 1204e0dd95e3SMaxim Levitsky .opcode = NVME_CMD_WRITE_ZEROS, 1205e0dd95e3SMaxim Levitsky .nsid = cpu_to_le32(s->nsid), 1206e0dd95e3SMaxim Levitsky .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF), 1207e0dd95e3SMaxim Levitsky .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF), 1208e0dd95e3SMaxim Levitsky }; 1209e0dd95e3SMaxim Levitsky 1210e0dd95e3SMaxim Levitsky NVMeCoData data = { 1211e0dd95e3SMaxim Levitsky .ctx = bdrv_get_aio_context(bs), 1212e0dd95e3SMaxim Levitsky .ret = -EINPROGRESS, 1213e0dd95e3SMaxim Levitsky }; 1214e0dd95e3SMaxim Levitsky 1215e0dd95e3SMaxim Levitsky if (flags & BDRV_REQ_MAY_UNMAP) { 1216e0dd95e3SMaxim Levitsky cdw12 |= (1 << 25); 1217e0dd95e3SMaxim Levitsky } 1218e0dd95e3SMaxim Levitsky 1219e0dd95e3SMaxim Levitsky if (flags & BDRV_REQ_FUA) { 1220e0dd95e3SMaxim Levitsky cdw12 |= (1 << 30); 1221e0dd95e3SMaxim Levitsky } 1222e0dd95e3SMaxim Levitsky 1223e0dd95e3SMaxim Levitsky cmd.cdw12 = cpu_to_le32(cdw12); 1224e0dd95e3SMaxim Levitsky 1225e0dd95e3SMaxim Levitsky trace_nvme_write_zeroes(s, offset, bytes, flags); 1226e0dd95e3SMaxim Levitsky assert(s->nr_queues > 1); 1227e0dd95e3SMaxim Levitsky req = nvme_get_free_req(ioq); 1228e0dd95e3SMaxim Levitsky assert(req); 1229e0dd95e3SMaxim Levitsky 1230b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1231e0dd95e3SMaxim Levitsky 1232e0dd95e3SMaxim Levitsky data.co = qemu_coroutine_self(); 1233e0dd95e3SMaxim Levitsky while (data.ret == -EINPROGRESS) { 1234e0dd95e3SMaxim Levitsky qemu_coroutine_yield(); 1235e0dd95e3SMaxim Levitsky } 1236e0dd95e3SMaxim Levitsky 1237e0dd95e3SMaxim Levitsky trace_nvme_rw_done(s, true, offset, bytes, data.ret); 1238e0dd95e3SMaxim Levitsky return data.ret; 1239e0dd95e3SMaxim Levitsky } 1240e0dd95e3SMaxim Levitsky 1241e0dd95e3SMaxim Levitsky 1242e87a09d6SMaxim Levitsky static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, 1243e87a09d6SMaxim Levitsky int64_t offset, 1244e87a09d6SMaxim Levitsky int bytes) 1245e87a09d6SMaxim Levitsky { 1246e87a09d6SMaxim Levitsky BDRVNVMeState *s = bs->opaque; 1247e87a09d6SMaxim Levitsky NVMeQueuePair *ioq = s->queues[1]; 1248e87a09d6SMaxim Levitsky NVMeRequest *req; 1249e87a09d6SMaxim Levitsky NvmeDsmRange *buf; 1250e87a09d6SMaxim Levitsky QEMUIOVector local_qiov; 1251e87a09d6SMaxim Levitsky int ret; 1252e87a09d6SMaxim Levitsky 1253e87a09d6SMaxim Levitsky NvmeCmd cmd = { 1254e87a09d6SMaxim Levitsky .opcode = NVME_CMD_DSM, 1255e87a09d6SMaxim Levitsky .nsid = cpu_to_le32(s->nsid), 1256e87a09d6SMaxim Levitsky .cdw10 = cpu_to_le32(0), /*number of ranges - 0 based*/ 1257e87a09d6SMaxim Levitsky .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/ 1258e87a09d6SMaxim Levitsky }; 1259e87a09d6SMaxim Levitsky 1260e87a09d6SMaxim Levitsky NVMeCoData data = { 1261e87a09d6SMaxim Levitsky .ctx = bdrv_get_aio_context(bs), 1262e87a09d6SMaxim Levitsky .ret = -EINPROGRESS, 1263e87a09d6SMaxim Levitsky }; 1264e87a09d6SMaxim Levitsky 1265e87a09d6SMaxim Levitsky if (!s->supports_discard) { 1266e87a09d6SMaxim Levitsky return -ENOTSUP; 1267e87a09d6SMaxim Levitsky } 1268e87a09d6SMaxim Levitsky 1269e87a09d6SMaxim Levitsky assert(s->nr_queues > 1); 1270e87a09d6SMaxim Levitsky 1271e87a09d6SMaxim Levitsky buf = qemu_try_blockalign0(bs, s->page_size); 1272e87a09d6SMaxim Levitsky if (!buf) { 1273e87a09d6SMaxim Levitsky return -ENOMEM; 1274e87a09d6SMaxim Levitsky } 1275e87a09d6SMaxim Levitsky 1276e87a09d6SMaxim Levitsky buf->nlb = cpu_to_le32(bytes >> s->blkshift); 1277e87a09d6SMaxim Levitsky buf->slba = cpu_to_le64(offset >> s->blkshift); 1278e87a09d6SMaxim Levitsky buf->cattr = 0; 1279e87a09d6SMaxim Levitsky 1280e87a09d6SMaxim Levitsky qemu_iovec_init(&local_qiov, 1); 1281e87a09d6SMaxim Levitsky qemu_iovec_add(&local_qiov, buf, 4096); 1282e87a09d6SMaxim Levitsky 1283e87a09d6SMaxim Levitsky req = nvme_get_free_req(ioq); 1284e87a09d6SMaxim Levitsky assert(req); 1285e87a09d6SMaxim Levitsky 1286e87a09d6SMaxim Levitsky qemu_co_mutex_lock(&s->dma_map_lock); 1287e87a09d6SMaxim Levitsky ret = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov); 1288e87a09d6SMaxim Levitsky qemu_co_mutex_unlock(&s->dma_map_lock); 1289e87a09d6SMaxim Levitsky 1290e87a09d6SMaxim Levitsky if (ret) { 1291b75fd5f5SStefan Hajnoczi nvme_put_free_req_and_wake(ioq, req); 1292e87a09d6SMaxim Levitsky goto out; 1293e87a09d6SMaxim Levitsky } 1294e87a09d6SMaxim Levitsky 1295e87a09d6SMaxim Levitsky trace_nvme_dsm(s, offset, bytes); 1296e87a09d6SMaxim Levitsky 1297b75fd5f5SStefan Hajnoczi nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); 1298e87a09d6SMaxim Levitsky 1299e87a09d6SMaxim Levitsky data.co = qemu_coroutine_self(); 1300e87a09d6SMaxim Levitsky while (data.ret == -EINPROGRESS) { 1301e87a09d6SMaxim Levitsky qemu_coroutine_yield(); 1302e87a09d6SMaxim Levitsky } 1303e87a09d6SMaxim Levitsky 1304e87a09d6SMaxim Levitsky qemu_co_mutex_lock(&s->dma_map_lock); 1305e87a09d6SMaxim Levitsky ret = nvme_cmd_unmap_qiov(bs, &local_qiov); 1306e87a09d6SMaxim Levitsky qemu_co_mutex_unlock(&s->dma_map_lock); 1307e87a09d6SMaxim Levitsky 1308e87a09d6SMaxim Levitsky if (ret) { 1309e87a09d6SMaxim Levitsky goto out; 1310e87a09d6SMaxim Levitsky } 1311e87a09d6SMaxim Levitsky 1312e87a09d6SMaxim Levitsky ret = data.ret; 1313e87a09d6SMaxim Levitsky trace_nvme_dsm_done(s, offset, bytes, ret); 1314e87a09d6SMaxim Levitsky out: 1315e87a09d6SMaxim Levitsky qemu_iovec_destroy(&local_qiov); 1316e87a09d6SMaxim Levitsky qemu_vfree(buf); 1317e87a09d6SMaxim Levitsky return ret; 1318e87a09d6SMaxim Levitsky 1319e87a09d6SMaxim Levitsky } 1320e87a09d6SMaxim Levitsky 1321e87a09d6SMaxim Levitsky 1322bdd6a90aSFam Zheng static int nvme_reopen_prepare(BDRVReopenState *reopen_state, 1323bdd6a90aSFam Zheng BlockReopenQueue *queue, Error **errp) 1324bdd6a90aSFam Zheng { 1325bdd6a90aSFam Zheng return 0; 1326bdd6a90aSFam Zheng } 1327bdd6a90aSFam Zheng 1328998b3a1eSMax Reitz static void nvme_refresh_filename(BlockDriverState *bs) 1329bdd6a90aSFam Zheng { 1330cc61b074SMax Reitz BDRVNVMeState *s = bs->opaque; 1331bdd6a90aSFam Zheng 1332cc61b074SMax Reitz snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i", 1333cc61b074SMax Reitz s->device, s->nsid); 1334bdd6a90aSFam Zheng } 1335bdd6a90aSFam Zheng 1336bdd6a90aSFam Zheng static void nvme_refresh_limits(BlockDriverState *bs, Error **errp) 1337bdd6a90aSFam Zheng { 1338bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1339bdd6a90aSFam Zheng 1340bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size; 1341bdd6a90aSFam Zheng bs->bl.request_alignment = s->page_size; 1342bdd6a90aSFam Zheng bs->bl.max_transfer = s->max_transfer; 1343bdd6a90aSFam Zheng } 1344bdd6a90aSFam Zheng 1345bdd6a90aSFam Zheng static void nvme_detach_aio_context(BlockDriverState *bs) 1346bdd6a90aSFam Zheng { 1347bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1348bdd6a90aSFam Zheng 13497838c67fSStefan Hajnoczi for (int i = 0; i < s->nr_queues; i++) { 13507838c67fSStefan Hajnoczi NVMeQueuePair *q = s->queues[i]; 13517838c67fSStefan Hajnoczi 13527838c67fSStefan Hajnoczi qemu_bh_delete(q->completion_bh); 13537838c67fSStefan Hajnoczi q->completion_bh = NULL; 13547838c67fSStefan Hajnoczi } 13557838c67fSStefan Hajnoczi 1356bdd6a90aSFam Zheng aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, 1357bdd6a90aSFam Zheng false, NULL, NULL); 1358bdd6a90aSFam Zheng } 1359bdd6a90aSFam Zheng 1360bdd6a90aSFam Zheng static void nvme_attach_aio_context(BlockDriverState *bs, 1361bdd6a90aSFam Zheng AioContext *new_context) 1362bdd6a90aSFam Zheng { 1363bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1364bdd6a90aSFam Zheng 1365bdd6a90aSFam Zheng s->aio_context = new_context; 1366bdd6a90aSFam Zheng aio_set_event_notifier(new_context, &s->irq_notifier, 1367bdd6a90aSFam Zheng false, nvme_handle_event, nvme_poll_cb); 13687838c67fSStefan Hajnoczi 13697838c67fSStefan Hajnoczi for (int i = 0; i < s->nr_queues; i++) { 13707838c67fSStefan Hajnoczi NVMeQueuePair *q = s->queues[i]; 13717838c67fSStefan Hajnoczi 13727838c67fSStefan Hajnoczi q->completion_bh = 13737838c67fSStefan Hajnoczi aio_bh_new(new_context, nvme_process_completion_bh, q); 13747838c67fSStefan Hajnoczi } 1375bdd6a90aSFam Zheng } 1376bdd6a90aSFam Zheng 1377bdd6a90aSFam Zheng static void nvme_aio_plug(BlockDriverState *bs) 1378bdd6a90aSFam Zheng { 1379bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 13802f0d8947SPaolo Bonzini assert(!s->plugged); 13812f0d8947SPaolo Bonzini s->plugged = true; 1382bdd6a90aSFam Zheng } 1383bdd6a90aSFam Zheng 1384bdd6a90aSFam Zheng static void nvme_aio_unplug(BlockDriverState *bs) 1385bdd6a90aSFam Zheng { 1386bdd6a90aSFam Zheng int i; 1387bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1388bdd6a90aSFam Zheng assert(s->plugged); 13892f0d8947SPaolo Bonzini s->plugged = false; 1390bdd6a90aSFam Zheng for (i = 1; i < s->nr_queues; i++) { 1391bdd6a90aSFam Zheng NVMeQueuePair *q = s->queues[i]; 1392bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 1393b75fd5f5SStefan Hajnoczi nvme_kick(q); 1394b75fd5f5SStefan Hajnoczi nvme_process_completion(q); 1395bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 1396bdd6a90aSFam Zheng } 1397bdd6a90aSFam Zheng } 1398bdd6a90aSFam Zheng 13999ed61612SFam Zheng static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size) 14009ed61612SFam Zheng { 14019ed61612SFam Zheng int ret; 14029ed61612SFam Zheng BDRVNVMeState *s = bs->opaque; 14039ed61612SFam Zheng 14049ed61612SFam Zheng ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL); 14059ed61612SFam Zheng if (ret) { 14069ed61612SFam Zheng /* FIXME: we may run out of IOVA addresses after repeated 14079ed61612SFam Zheng * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap 14089ed61612SFam Zheng * doesn't reclaim addresses for fixed mappings. */ 14099ed61612SFam Zheng error_report("nvme_register_buf failed: %s", strerror(-ret)); 14109ed61612SFam Zheng } 14119ed61612SFam Zheng } 14129ed61612SFam Zheng 14139ed61612SFam Zheng static void nvme_unregister_buf(BlockDriverState *bs, void *host) 14149ed61612SFam Zheng { 14159ed61612SFam Zheng BDRVNVMeState *s = bs->opaque; 14169ed61612SFam Zheng 14179ed61612SFam Zheng qemu_vfio_dma_unmap(s->vfio, host); 14189ed61612SFam Zheng } 14199ed61612SFam Zheng 14202654267cSMax Reitz static const char *const nvme_strong_runtime_opts[] = { 14212654267cSMax Reitz NVME_BLOCK_OPT_DEVICE, 14222654267cSMax Reitz NVME_BLOCK_OPT_NAMESPACE, 14232654267cSMax Reitz 14242654267cSMax Reitz NULL 14252654267cSMax Reitz }; 14262654267cSMax Reitz 1427bdd6a90aSFam Zheng static BlockDriver bdrv_nvme = { 1428bdd6a90aSFam Zheng .format_name = "nvme", 1429bdd6a90aSFam Zheng .protocol_name = "nvme", 1430bdd6a90aSFam Zheng .instance_size = sizeof(BDRVNVMeState), 1431bdd6a90aSFam Zheng 14325a5e7f8cSMaxim Levitsky .bdrv_co_create_opts = bdrv_co_create_opts_simple, 14335a5e7f8cSMaxim Levitsky .create_opts = &bdrv_create_opts_simple, 14345a5e7f8cSMaxim Levitsky 1435bdd6a90aSFam Zheng .bdrv_parse_filename = nvme_parse_filename, 1436bdd6a90aSFam Zheng .bdrv_file_open = nvme_file_open, 1437bdd6a90aSFam Zheng .bdrv_close = nvme_close, 1438bdd6a90aSFam Zheng .bdrv_getlength = nvme_getlength, 1439118d1b6aSMaxim Levitsky .bdrv_probe_blocksizes = nvme_probe_blocksizes, 1440bdd6a90aSFam Zheng 1441bdd6a90aSFam Zheng .bdrv_co_preadv = nvme_co_preadv, 1442bdd6a90aSFam Zheng .bdrv_co_pwritev = nvme_co_pwritev, 1443e0dd95e3SMaxim Levitsky 1444e0dd95e3SMaxim Levitsky .bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes, 1445e87a09d6SMaxim Levitsky .bdrv_co_pdiscard = nvme_co_pdiscard, 1446e0dd95e3SMaxim Levitsky 1447bdd6a90aSFam Zheng .bdrv_co_flush_to_disk = nvme_co_flush, 1448bdd6a90aSFam Zheng .bdrv_reopen_prepare = nvme_reopen_prepare, 1449bdd6a90aSFam Zheng 1450bdd6a90aSFam Zheng .bdrv_refresh_filename = nvme_refresh_filename, 1451bdd6a90aSFam Zheng .bdrv_refresh_limits = nvme_refresh_limits, 14522654267cSMax Reitz .strong_runtime_opts = nvme_strong_runtime_opts, 1453bdd6a90aSFam Zheng 1454bdd6a90aSFam Zheng .bdrv_detach_aio_context = nvme_detach_aio_context, 1455bdd6a90aSFam Zheng .bdrv_attach_aio_context = nvme_attach_aio_context, 1456bdd6a90aSFam Zheng 1457bdd6a90aSFam Zheng .bdrv_io_plug = nvme_aio_plug, 1458bdd6a90aSFam Zheng .bdrv_io_unplug = nvme_aio_unplug, 14599ed61612SFam Zheng 14609ed61612SFam Zheng .bdrv_register_buf = nvme_register_buf, 14619ed61612SFam Zheng .bdrv_unregister_buf = nvme_unregister_buf, 1462bdd6a90aSFam Zheng }; 1463bdd6a90aSFam Zheng 1464bdd6a90aSFam Zheng static void bdrv_nvme_init(void) 1465bdd6a90aSFam Zheng { 1466bdd6a90aSFam Zheng bdrv_register(&bdrv_nvme); 1467bdd6a90aSFam Zheng } 1468bdd6a90aSFam Zheng 1469bdd6a90aSFam Zheng block_init(bdrv_nvme_init); 1470