1bdd6a90aSFam Zheng /* 2bdd6a90aSFam Zheng * NVMe block driver based on vfio 3bdd6a90aSFam Zheng * 4bdd6a90aSFam Zheng * Copyright 2016 - 2018 Red Hat, Inc. 5bdd6a90aSFam Zheng * 6bdd6a90aSFam Zheng * Authors: 7bdd6a90aSFam Zheng * Fam Zheng <famz@redhat.com> 8bdd6a90aSFam Zheng * Paolo Bonzini <pbonzini@redhat.com> 9bdd6a90aSFam Zheng * 10bdd6a90aSFam Zheng * This work is licensed under the terms of the GNU GPL, version 2 or later. 11bdd6a90aSFam Zheng * See the COPYING file in the top-level directory. 12bdd6a90aSFam Zheng */ 13bdd6a90aSFam Zheng 14bdd6a90aSFam Zheng #include "qemu/osdep.h" 15bdd6a90aSFam Zheng #include <linux/vfio.h> 16bdd6a90aSFam Zheng #include "qapi/error.h" 17bdd6a90aSFam Zheng #include "qapi/qmp/qdict.h" 18bdd6a90aSFam Zheng #include "qapi/qmp/qstring.h" 19bdd6a90aSFam Zheng #include "qemu/error-report.h" 20bdd6a90aSFam Zheng #include "qemu/cutils.h" 21922a01a0SMarkus Armbruster #include "qemu/option.h" 22bdd6a90aSFam Zheng #include "qemu/vfio-helpers.h" 23bdd6a90aSFam Zheng #include "block/block_int.h" 24bdd6a90aSFam Zheng #include "trace.h" 25bdd6a90aSFam Zheng 26a3d9a352SFam Zheng #include "block/nvme.h" 27bdd6a90aSFam Zheng 28bdd6a90aSFam Zheng #define NVME_SQ_ENTRY_BYTES 64 29bdd6a90aSFam Zheng #define NVME_CQ_ENTRY_BYTES 16 30bdd6a90aSFam Zheng #define NVME_QUEUE_SIZE 128 31bdd6a90aSFam Zheng #define NVME_BAR_SIZE 8192 32bdd6a90aSFam Zheng 33bdd6a90aSFam Zheng typedef struct { 34bdd6a90aSFam Zheng int32_t head, tail; 35bdd6a90aSFam Zheng uint8_t *queue; 36bdd6a90aSFam Zheng uint64_t iova; 37bdd6a90aSFam Zheng /* Hardware MMIO register */ 38bdd6a90aSFam Zheng volatile uint32_t *doorbell; 39bdd6a90aSFam Zheng } NVMeQueue; 40bdd6a90aSFam Zheng 41bdd6a90aSFam Zheng typedef struct { 42bdd6a90aSFam Zheng BlockCompletionFunc *cb; 43bdd6a90aSFam Zheng void *opaque; 44bdd6a90aSFam Zheng int cid; 45bdd6a90aSFam Zheng void *prp_list_page; 46bdd6a90aSFam Zheng uint64_t prp_list_iova; 47bdd6a90aSFam Zheng bool busy; 48bdd6a90aSFam Zheng } NVMeRequest; 49bdd6a90aSFam Zheng 50bdd6a90aSFam Zheng typedef struct { 51bdd6a90aSFam Zheng CoQueue free_req_queue; 52bdd6a90aSFam Zheng QemuMutex lock; 53bdd6a90aSFam Zheng 54bdd6a90aSFam Zheng /* Fields protected by BQL */ 55bdd6a90aSFam Zheng int index; 56bdd6a90aSFam Zheng uint8_t *prp_list_pages; 57bdd6a90aSFam Zheng 58bdd6a90aSFam Zheng /* Fields protected by @lock */ 59bdd6a90aSFam Zheng NVMeQueue sq, cq; 60bdd6a90aSFam Zheng int cq_phase; 61bdd6a90aSFam Zheng NVMeRequest reqs[NVME_QUEUE_SIZE]; 62bdd6a90aSFam Zheng bool busy; 63bdd6a90aSFam Zheng int need_kick; 64bdd6a90aSFam Zheng int inflight; 65bdd6a90aSFam Zheng } NVMeQueuePair; 66bdd6a90aSFam Zheng 67bdd6a90aSFam Zheng /* Memory mapped registers */ 68bdd6a90aSFam Zheng typedef volatile struct { 69bdd6a90aSFam Zheng uint64_t cap; 70bdd6a90aSFam Zheng uint32_t vs; 71bdd6a90aSFam Zheng uint32_t intms; 72bdd6a90aSFam Zheng uint32_t intmc; 73bdd6a90aSFam Zheng uint32_t cc; 74bdd6a90aSFam Zheng uint32_t reserved0; 75bdd6a90aSFam Zheng uint32_t csts; 76bdd6a90aSFam Zheng uint32_t nssr; 77bdd6a90aSFam Zheng uint32_t aqa; 78bdd6a90aSFam Zheng uint64_t asq; 79bdd6a90aSFam Zheng uint64_t acq; 80bdd6a90aSFam Zheng uint32_t cmbloc; 81bdd6a90aSFam Zheng uint32_t cmbsz; 82bdd6a90aSFam Zheng uint8_t reserved1[0xec0]; 83bdd6a90aSFam Zheng uint8_t cmd_set_specfic[0x100]; 84bdd6a90aSFam Zheng uint32_t doorbells[]; 85bdd6a90aSFam Zheng } QEMU_PACKED NVMeRegs; 86bdd6a90aSFam Zheng 87bdd6a90aSFam Zheng QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000); 88bdd6a90aSFam Zheng 89bdd6a90aSFam Zheng typedef struct { 90bdd6a90aSFam Zheng AioContext *aio_context; 91bdd6a90aSFam Zheng QEMUVFIOState *vfio; 92bdd6a90aSFam Zheng NVMeRegs *regs; 93bdd6a90aSFam Zheng /* The submission/completion queue pairs. 94bdd6a90aSFam Zheng * [0]: admin queue. 95bdd6a90aSFam Zheng * [1..]: io queues. 96bdd6a90aSFam Zheng */ 97bdd6a90aSFam Zheng NVMeQueuePair **queues; 98bdd6a90aSFam Zheng int nr_queues; 99bdd6a90aSFam Zheng size_t page_size; 100bdd6a90aSFam Zheng /* How many uint32_t elements does each doorbell entry take. */ 101bdd6a90aSFam Zheng size_t doorbell_scale; 102bdd6a90aSFam Zheng bool write_cache_supported; 103bdd6a90aSFam Zheng EventNotifier irq_notifier; 104bdd6a90aSFam Zheng uint64_t nsze; /* Namespace size reported by identify command */ 105bdd6a90aSFam Zheng int nsid; /* The namespace id to read/write data. */ 106bdd6a90aSFam Zheng uint64_t max_transfer; 107bdd6a90aSFam Zheng int plugged; 108bdd6a90aSFam Zheng 109bdd6a90aSFam Zheng CoMutex dma_map_lock; 110bdd6a90aSFam Zheng CoQueue dma_flush_queue; 111bdd6a90aSFam Zheng 112bdd6a90aSFam Zheng /* Total size of mapped qiov, accessed under dma_map_lock */ 113bdd6a90aSFam Zheng int dma_map_count; 114bdd6a90aSFam Zheng } BDRVNVMeState; 115bdd6a90aSFam Zheng 116bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_DEVICE "device" 117bdd6a90aSFam Zheng #define NVME_BLOCK_OPT_NAMESPACE "namespace" 118bdd6a90aSFam Zheng 119bdd6a90aSFam Zheng static QemuOptsList runtime_opts = { 120bdd6a90aSFam Zheng .name = "nvme", 121bdd6a90aSFam Zheng .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), 122bdd6a90aSFam Zheng .desc = { 123bdd6a90aSFam Zheng { 124bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_DEVICE, 125bdd6a90aSFam Zheng .type = QEMU_OPT_STRING, 126bdd6a90aSFam Zheng .help = "NVMe PCI device address", 127bdd6a90aSFam Zheng }, 128bdd6a90aSFam Zheng { 129bdd6a90aSFam Zheng .name = NVME_BLOCK_OPT_NAMESPACE, 130bdd6a90aSFam Zheng .type = QEMU_OPT_NUMBER, 131bdd6a90aSFam Zheng .help = "NVMe namespace", 132bdd6a90aSFam Zheng }, 133bdd6a90aSFam Zheng { /* end of list */ } 134bdd6a90aSFam Zheng }, 135bdd6a90aSFam Zheng }; 136bdd6a90aSFam Zheng 137bdd6a90aSFam Zheng static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q, 138bdd6a90aSFam Zheng int nentries, int entry_bytes, Error **errp) 139bdd6a90aSFam Zheng { 140bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 141bdd6a90aSFam Zheng size_t bytes; 142bdd6a90aSFam Zheng int r; 143bdd6a90aSFam Zheng 144bdd6a90aSFam Zheng bytes = ROUND_UP(nentries * entry_bytes, s->page_size); 145bdd6a90aSFam Zheng q->head = q->tail = 0; 146bdd6a90aSFam Zheng q->queue = qemu_try_blockalign0(bs, bytes); 147bdd6a90aSFam Zheng 148bdd6a90aSFam Zheng if (!q->queue) { 149bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate queue"); 150bdd6a90aSFam Zheng return; 151bdd6a90aSFam Zheng } 152bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova); 153bdd6a90aSFam Zheng if (r) { 154bdd6a90aSFam Zheng error_setg(errp, "Cannot map queue"); 155bdd6a90aSFam Zheng } 156bdd6a90aSFam Zheng } 157bdd6a90aSFam Zheng 158bdd6a90aSFam Zheng static void nvme_free_queue_pair(BlockDriverState *bs, NVMeQueuePair *q) 159bdd6a90aSFam Zheng { 160bdd6a90aSFam Zheng qemu_vfree(q->prp_list_pages); 161bdd6a90aSFam Zheng qemu_vfree(q->sq.queue); 162bdd6a90aSFam Zheng qemu_vfree(q->cq.queue); 163bdd6a90aSFam Zheng qemu_mutex_destroy(&q->lock); 164bdd6a90aSFam Zheng g_free(q); 165bdd6a90aSFam Zheng } 166bdd6a90aSFam Zheng 167bdd6a90aSFam Zheng static void nvme_free_req_queue_cb(void *opaque) 168bdd6a90aSFam Zheng { 169bdd6a90aSFam Zheng NVMeQueuePair *q = opaque; 170bdd6a90aSFam Zheng 171bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 172bdd6a90aSFam Zheng while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) { 173bdd6a90aSFam Zheng /* Retry all pending requests */ 174bdd6a90aSFam Zheng } 175bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 176bdd6a90aSFam Zheng } 177bdd6a90aSFam Zheng 178bdd6a90aSFam Zheng static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs, 179bdd6a90aSFam Zheng int idx, int size, 180bdd6a90aSFam Zheng Error **errp) 181bdd6a90aSFam Zheng { 182bdd6a90aSFam Zheng int i, r; 183bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 184bdd6a90aSFam Zheng Error *local_err = NULL; 185bdd6a90aSFam Zheng NVMeQueuePair *q = g_new0(NVMeQueuePair, 1); 186bdd6a90aSFam Zheng uint64_t prp_list_iova; 187bdd6a90aSFam Zheng 188bdd6a90aSFam Zheng qemu_mutex_init(&q->lock); 189bdd6a90aSFam Zheng q->index = idx; 190bdd6a90aSFam Zheng qemu_co_queue_init(&q->free_req_queue); 191bdd6a90aSFam Zheng q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_QUEUE_SIZE); 192bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, 193bdd6a90aSFam Zheng s->page_size * NVME_QUEUE_SIZE, 194bdd6a90aSFam Zheng false, &prp_list_iova); 195bdd6a90aSFam Zheng if (r) { 196bdd6a90aSFam Zheng goto fail; 197bdd6a90aSFam Zheng } 198bdd6a90aSFam Zheng for (i = 0; i < NVME_QUEUE_SIZE; i++) { 199bdd6a90aSFam Zheng NVMeRequest *req = &q->reqs[i]; 200bdd6a90aSFam Zheng req->cid = i + 1; 201bdd6a90aSFam Zheng req->prp_list_page = q->prp_list_pages + i * s->page_size; 202bdd6a90aSFam Zheng req->prp_list_iova = prp_list_iova + i * s->page_size; 203bdd6a90aSFam Zheng } 204bdd6a90aSFam Zheng nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err); 205bdd6a90aSFam Zheng if (local_err) { 206bdd6a90aSFam Zheng error_propagate(errp, local_err); 207bdd6a90aSFam Zheng goto fail; 208bdd6a90aSFam Zheng } 209bdd6a90aSFam Zheng q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale]; 210bdd6a90aSFam Zheng 211bdd6a90aSFam Zheng nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err); 212bdd6a90aSFam Zheng if (local_err) { 213bdd6a90aSFam Zheng error_propagate(errp, local_err); 214bdd6a90aSFam Zheng goto fail; 215bdd6a90aSFam Zheng } 216bdd6a90aSFam Zheng q->cq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale + 1]; 217bdd6a90aSFam Zheng 218bdd6a90aSFam Zheng return q; 219bdd6a90aSFam Zheng fail: 220bdd6a90aSFam Zheng nvme_free_queue_pair(bs, q); 221bdd6a90aSFam Zheng return NULL; 222bdd6a90aSFam Zheng } 223bdd6a90aSFam Zheng 224bdd6a90aSFam Zheng /* With q->lock */ 225bdd6a90aSFam Zheng static void nvme_kick(BDRVNVMeState *s, NVMeQueuePair *q) 226bdd6a90aSFam Zheng { 227bdd6a90aSFam Zheng if (s->plugged || !q->need_kick) { 228bdd6a90aSFam Zheng return; 229bdd6a90aSFam Zheng } 230bdd6a90aSFam Zheng trace_nvme_kick(s, q->index); 231bdd6a90aSFam Zheng assert(!(q->sq.tail & 0xFF00)); 232bdd6a90aSFam Zheng /* Fence the write to submission queue entry before notifying the device. */ 233bdd6a90aSFam Zheng smp_wmb(); 234bdd6a90aSFam Zheng *q->sq.doorbell = cpu_to_le32(q->sq.tail); 235bdd6a90aSFam Zheng q->inflight += q->need_kick; 236bdd6a90aSFam Zheng q->need_kick = 0; 237bdd6a90aSFam Zheng } 238bdd6a90aSFam Zheng 239bdd6a90aSFam Zheng /* Find a free request element if any, otherwise: 240bdd6a90aSFam Zheng * a) if in coroutine context, try to wait for one to become available; 241bdd6a90aSFam Zheng * b) if not in coroutine, return NULL; 242bdd6a90aSFam Zheng */ 243bdd6a90aSFam Zheng static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) 244bdd6a90aSFam Zheng { 245bdd6a90aSFam Zheng int i; 246bdd6a90aSFam Zheng NVMeRequest *req = NULL; 247bdd6a90aSFam Zheng 248bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 249bdd6a90aSFam Zheng while (q->inflight + q->need_kick > NVME_QUEUE_SIZE - 2) { 250bdd6a90aSFam Zheng /* We have to leave one slot empty as that is the full queue case (head 251bdd6a90aSFam Zheng * == tail + 1). */ 252bdd6a90aSFam Zheng if (qemu_in_coroutine()) { 253bdd6a90aSFam Zheng trace_nvme_free_req_queue_wait(q); 254bdd6a90aSFam Zheng qemu_co_queue_wait(&q->free_req_queue, &q->lock); 255bdd6a90aSFam Zheng } else { 256bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 257bdd6a90aSFam Zheng return NULL; 258bdd6a90aSFam Zheng } 259bdd6a90aSFam Zheng } 260bdd6a90aSFam Zheng for (i = 0; i < NVME_QUEUE_SIZE; i++) { 261bdd6a90aSFam Zheng if (!q->reqs[i].busy) { 262bdd6a90aSFam Zheng q->reqs[i].busy = true; 263bdd6a90aSFam Zheng req = &q->reqs[i]; 264bdd6a90aSFam Zheng break; 265bdd6a90aSFam Zheng } 266bdd6a90aSFam Zheng } 267bdd6a90aSFam Zheng /* We have checked inflight and need_kick while holding q->lock, so one 268bdd6a90aSFam Zheng * free req must be available. */ 269bdd6a90aSFam Zheng assert(req); 270bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 271bdd6a90aSFam Zheng return req; 272bdd6a90aSFam Zheng } 273bdd6a90aSFam Zheng 274bdd6a90aSFam Zheng static inline int nvme_translate_error(const NvmeCqe *c) 275bdd6a90aSFam Zheng { 276bdd6a90aSFam Zheng uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF; 277bdd6a90aSFam Zheng if (status) { 278bdd6a90aSFam Zheng trace_nvme_error(le32_to_cpu(c->result), 279bdd6a90aSFam Zheng le16_to_cpu(c->sq_head), 280bdd6a90aSFam Zheng le16_to_cpu(c->sq_id), 281bdd6a90aSFam Zheng le16_to_cpu(c->cid), 282bdd6a90aSFam Zheng le16_to_cpu(status)); 283bdd6a90aSFam Zheng } 284bdd6a90aSFam Zheng switch (status) { 285bdd6a90aSFam Zheng case 0: 286bdd6a90aSFam Zheng return 0; 287bdd6a90aSFam Zheng case 1: 288bdd6a90aSFam Zheng return -ENOSYS; 289bdd6a90aSFam Zheng case 2: 290bdd6a90aSFam Zheng return -EINVAL; 291bdd6a90aSFam Zheng default: 292bdd6a90aSFam Zheng return -EIO; 293bdd6a90aSFam Zheng } 294bdd6a90aSFam Zheng } 295bdd6a90aSFam Zheng 296bdd6a90aSFam Zheng /* With q->lock */ 297bdd6a90aSFam Zheng static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) 298bdd6a90aSFam Zheng { 299bdd6a90aSFam Zheng bool progress = false; 300bdd6a90aSFam Zheng NVMeRequest *preq; 301bdd6a90aSFam Zheng NVMeRequest req; 302bdd6a90aSFam Zheng NvmeCqe *c; 303bdd6a90aSFam Zheng 304bdd6a90aSFam Zheng trace_nvme_process_completion(s, q->index, q->inflight); 305bdd6a90aSFam Zheng if (q->busy || s->plugged) { 306bdd6a90aSFam Zheng trace_nvme_process_completion_queue_busy(s, q->index); 307bdd6a90aSFam Zheng return false; 308bdd6a90aSFam Zheng } 309bdd6a90aSFam Zheng q->busy = true; 310bdd6a90aSFam Zheng assert(q->inflight >= 0); 311bdd6a90aSFam Zheng while (q->inflight) { 312bdd6a90aSFam Zheng int16_t cid; 313bdd6a90aSFam Zheng c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES]; 314bdd6a90aSFam Zheng if (!c->cid || (le16_to_cpu(c->status) & 0x1) == q->cq_phase) { 315bdd6a90aSFam Zheng break; 316bdd6a90aSFam Zheng } 317bdd6a90aSFam Zheng q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE; 318bdd6a90aSFam Zheng if (!q->cq.head) { 319bdd6a90aSFam Zheng q->cq_phase = !q->cq_phase; 320bdd6a90aSFam Zheng } 321bdd6a90aSFam Zheng cid = le16_to_cpu(c->cid); 322bdd6a90aSFam Zheng if (cid == 0 || cid > NVME_QUEUE_SIZE) { 323bdd6a90aSFam Zheng fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n", 324bdd6a90aSFam Zheng cid); 325bdd6a90aSFam Zheng continue; 326bdd6a90aSFam Zheng } 327bdd6a90aSFam Zheng assert(cid <= NVME_QUEUE_SIZE); 328bdd6a90aSFam Zheng trace_nvme_complete_command(s, q->index, cid); 329bdd6a90aSFam Zheng preq = &q->reqs[cid - 1]; 330bdd6a90aSFam Zheng req = *preq; 331bdd6a90aSFam Zheng assert(req.cid == cid); 332bdd6a90aSFam Zheng assert(req.cb); 333bdd6a90aSFam Zheng preq->busy = false; 334bdd6a90aSFam Zheng preq->cb = preq->opaque = NULL; 335bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 336bdd6a90aSFam Zheng req.cb(req.opaque, nvme_translate_error(c)); 337bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 338bdd6a90aSFam Zheng c->cid = cpu_to_le16(0); 339bdd6a90aSFam Zheng q->inflight--; 340bdd6a90aSFam Zheng /* Flip Phase Tag bit. */ 341bdd6a90aSFam Zheng c->status = cpu_to_le16(le16_to_cpu(c->status) ^ 0x1); 342bdd6a90aSFam Zheng progress = true; 343bdd6a90aSFam Zheng } 344bdd6a90aSFam Zheng if (progress) { 345bdd6a90aSFam Zheng /* Notify the device so it can post more completions. */ 346bdd6a90aSFam Zheng smp_mb_release(); 347bdd6a90aSFam Zheng *q->cq.doorbell = cpu_to_le32(q->cq.head); 348bdd6a90aSFam Zheng if (!qemu_co_queue_empty(&q->free_req_queue)) { 349bdd6a90aSFam Zheng aio_bh_schedule_oneshot(s->aio_context, nvme_free_req_queue_cb, q); 350bdd6a90aSFam Zheng } 351bdd6a90aSFam Zheng } 352bdd6a90aSFam Zheng q->busy = false; 353bdd6a90aSFam Zheng return progress; 354bdd6a90aSFam Zheng } 355bdd6a90aSFam Zheng 356bdd6a90aSFam Zheng static void nvme_trace_command(const NvmeCmd *cmd) 357bdd6a90aSFam Zheng { 358bdd6a90aSFam Zheng int i; 359bdd6a90aSFam Zheng 360bdd6a90aSFam Zheng for (i = 0; i < 8; ++i) { 361bdd6a90aSFam Zheng uint8_t *cmdp = (uint8_t *)cmd + i * 8; 362bdd6a90aSFam Zheng trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3], 363bdd6a90aSFam Zheng cmdp[4], cmdp[5], cmdp[6], cmdp[7]); 364bdd6a90aSFam Zheng } 365bdd6a90aSFam Zheng } 366bdd6a90aSFam Zheng 367bdd6a90aSFam Zheng static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q, 368bdd6a90aSFam Zheng NVMeRequest *req, 369bdd6a90aSFam Zheng NvmeCmd *cmd, BlockCompletionFunc cb, 370bdd6a90aSFam Zheng void *opaque) 371bdd6a90aSFam Zheng { 372bdd6a90aSFam Zheng assert(!req->cb); 373bdd6a90aSFam Zheng req->cb = cb; 374bdd6a90aSFam Zheng req->opaque = opaque; 375bdd6a90aSFam Zheng cmd->cid = cpu_to_le32(req->cid); 376bdd6a90aSFam Zheng 377bdd6a90aSFam Zheng trace_nvme_submit_command(s, q->index, req->cid); 378bdd6a90aSFam Zheng nvme_trace_command(cmd); 379bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 380bdd6a90aSFam Zheng memcpy((uint8_t *)q->sq.queue + 381bdd6a90aSFam Zheng q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); 382bdd6a90aSFam Zheng q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; 383bdd6a90aSFam Zheng q->need_kick++; 384bdd6a90aSFam Zheng nvme_kick(s, q); 385bdd6a90aSFam Zheng nvme_process_completion(s, q); 386bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 387bdd6a90aSFam Zheng } 388bdd6a90aSFam Zheng 389bdd6a90aSFam Zheng static void nvme_cmd_sync_cb(void *opaque, int ret) 390bdd6a90aSFam Zheng { 391bdd6a90aSFam Zheng int *pret = opaque; 392bdd6a90aSFam Zheng *pret = ret; 393bdd6a90aSFam Zheng } 394bdd6a90aSFam Zheng 395bdd6a90aSFam Zheng static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q, 396bdd6a90aSFam Zheng NvmeCmd *cmd) 397bdd6a90aSFam Zheng { 398bdd6a90aSFam Zheng NVMeRequest *req; 399bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 400bdd6a90aSFam Zheng int ret = -EINPROGRESS; 401bdd6a90aSFam Zheng req = nvme_get_free_req(q); 402bdd6a90aSFam Zheng if (!req) { 403bdd6a90aSFam Zheng return -EBUSY; 404bdd6a90aSFam Zheng } 405bdd6a90aSFam Zheng nvme_submit_command(s, q, req, cmd, nvme_cmd_sync_cb, &ret); 406bdd6a90aSFam Zheng 407bdd6a90aSFam Zheng BDRV_POLL_WHILE(bs, ret == -EINPROGRESS); 408bdd6a90aSFam Zheng return ret; 409bdd6a90aSFam Zheng } 410bdd6a90aSFam Zheng 411bdd6a90aSFam Zheng static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp) 412bdd6a90aSFam Zheng { 413bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 414bdd6a90aSFam Zheng NvmeIdCtrl *idctrl; 415bdd6a90aSFam Zheng NvmeIdNs *idns; 416bdd6a90aSFam Zheng uint8_t *resp; 417bdd6a90aSFam Zheng int r; 418bdd6a90aSFam Zheng uint64_t iova; 419bdd6a90aSFam Zheng NvmeCmd cmd = { 420bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_IDENTIFY, 421bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x1), 422bdd6a90aSFam Zheng }; 423bdd6a90aSFam Zheng 424bdd6a90aSFam Zheng resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl)); 425bdd6a90aSFam Zheng if (!resp) { 426bdd6a90aSFam Zheng error_setg(errp, "Cannot allocate buffer for identify response"); 427bdd6a90aSFam Zheng goto out; 428bdd6a90aSFam Zheng } 429bdd6a90aSFam Zheng idctrl = (NvmeIdCtrl *)resp; 430bdd6a90aSFam Zheng idns = (NvmeIdNs *)resp; 431bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova); 432bdd6a90aSFam Zheng if (r) { 433bdd6a90aSFam Zheng error_setg(errp, "Cannot map buffer for DMA"); 434bdd6a90aSFam Zheng goto out; 435bdd6a90aSFam Zheng } 436bdd6a90aSFam Zheng cmd.prp1 = cpu_to_le64(iova); 437bdd6a90aSFam Zheng 438bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 439bdd6a90aSFam Zheng error_setg(errp, "Failed to identify controller"); 440bdd6a90aSFam Zheng goto out; 441bdd6a90aSFam Zheng } 442bdd6a90aSFam Zheng 443bdd6a90aSFam Zheng if (le32_to_cpu(idctrl->nn) < namespace) { 444bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace"); 445bdd6a90aSFam Zheng goto out; 446bdd6a90aSFam Zheng } 447bdd6a90aSFam Zheng s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1; 448bdd6a90aSFam Zheng s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size; 449bdd6a90aSFam Zheng /* For now the page list buffer per command is one page, to hold at most 450bdd6a90aSFam Zheng * s->page_size / sizeof(uint64_t) entries. */ 451bdd6a90aSFam Zheng s->max_transfer = MIN_NON_ZERO(s->max_transfer, 452bdd6a90aSFam Zheng s->page_size / sizeof(uint64_t) * s->page_size); 453bdd6a90aSFam Zheng 454bdd6a90aSFam Zheng memset(resp, 0, 4096); 455bdd6a90aSFam Zheng 456bdd6a90aSFam Zheng cmd.cdw10 = 0; 457bdd6a90aSFam Zheng cmd.nsid = cpu_to_le32(namespace); 458bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 459bdd6a90aSFam Zheng error_setg(errp, "Failed to identify namespace"); 460bdd6a90aSFam Zheng goto out; 461bdd6a90aSFam Zheng } 462bdd6a90aSFam Zheng 463bdd6a90aSFam Zheng s->nsze = le64_to_cpu(idns->nsze); 464bdd6a90aSFam Zheng 465bdd6a90aSFam Zheng out: 466bdd6a90aSFam Zheng qemu_vfio_dma_unmap(s->vfio, resp); 467bdd6a90aSFam Zheng qemu_vfree(resp); 468bdd6a90aSFam Zheng } 469bdd6a90aSFam Zheng 470bdd6a90aSFam Zheng static bool nvme_poll_queues(BDRVNVMeState *s) 471bdd6a90aSFam Zheng { 472bdd6a90aSFam Zheng bool progress = false; 473bdd6a90aSFam Zheng int i; 474bdd6a90aSFam Zheng 475bdd6a90aSFam Zheng for (i = 0; i < s->nr_queues; i++) { 476bdd6a90aSFam Zheng NVMeQueuePair *q = s->queues[i]; 477bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 478bdd6a90aSFam Zheng while (nvme_process_completion(s, q)) { 479bdd6a90aSFam Zheng /* Keep polling */ 480bdd6a90aSFam Zheng progress = true; 481bdd6a90aSFam Zheng } 482bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 483bdd6a90aSFam Zheng } 484bdd6a90aSFam Zheng return progress; 485bdd6a90aSFam Zheng } 486bdd6a90aSFam Zheng 487bdd6a90aSFam Zheng static void nvme_handle_event(EventNotifier *n) 488bdd6a90aSFam Zheng { 489bdd6a90aSFam Zheng BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier); 490bdd6a90aSFam Zheng 491bdd6a90aSFam Zheng trace_nvme_handle_event(s); 492bdd6a90aSFam Zheng aio_context_acquire(s->aio_context); 493bdd6a90aSFam Zheng event_notifier_test_and_clear(n); 494bdd6a90aSFam Zheng nvme_poll_queues(s); 495bdd6a90aSFam Zheng aio_context_release(s->aio_context); 496bdd6a90aSFam Zheng } 497bdd6a90aSFam Zheng 498bdd6a90aSFam Zheng static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp) 499bdd6a90aSFam Zheng { 500bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 501bdd6a90aSFam Zheng int n = s->nr_queues; 502bdd6a90aSFam Zheng NVMeQueuePair *q; 503bdd6a90aSFam Zheng NvmeCmd cmd; 504bdd6a90aSFam Zheng int queue_size = NVME_QUEUE_SIZE; 505bdd6a90aSFam Zheng 506bdd6a90aSFam Zheng q = nvme_create_queue_pair(bs, n, queue_size, errp); 507bdd6a90aSFam Zheng if (!q) { 508bdd6a90aSFam Zheng return false; 509bdd6a90aSFam Zheng } 510bdd6a90aSFam Zheng cmd = (NvmeCmd) { 511bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_CQ, 512bdd6a90aSFam Zheng .prp1 = cpu_to_le64(q->cq.iova), 513bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)), 514bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(0x3), 515bdd6a90aSFam Zheng }; 516bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 517bdd6a90aSFam Zheng error_setg(errp, "Failed to create io queue [%d]", n); 518bdd6a90aSFam Zheng nvme_free_queue_pair(bs, q); 519bdd6a90aSFam Zheng return false; 520bdd6a90aSFam Zheng } 521bdd6a90aSFam Zheng cmd = (NvmeCmd) { 522bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_CREATE_SQ, 523bdd6a90aSFam Zheng .prp1 = cpu_to_le64(q->sq.iova), 524bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)), 525bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(0x1 | (n << 16)), 526bdd6a90aSFam Zheng }; 527bdd6a90aSFam Zheng if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { 528bdd6a90aSFam Zheng error_setg(errp, "Failed to create io queue [%d]", n); 529bdd6a90aSFam Zheng nvme_free_queue_pair(bs, q); 530bdd6a90aSFam Zheng return false; 531bdd6a90aSFam Zheng } 532bdd6a90aSFam Zheng s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1); 533bdd6a90aSFam Zheng s->queues[n] = q; 534bdd6a90aSFam Zheng s->nr_queues++; 535bdd6a90aSFam Zheng return true; 536bdd6a90aSFam Zheng } 537bdd6a90aSFam Zheng 538bdd6a90aSFam Zheng static bool nvme_poll_cb(void *opaque) 539bdd6a90aSFam Zheng { 540bdd6a90aSFam Zheng EventNotifier *e = opaque; 541bdd6a90aSFam Zheng BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier); 542bdd6a90aSFam Zheng bool progress = false; 543bdd6a90aSFam Zheng 544bdd6a90aSFam Zheng trace_nvme_poll_cb(s); 545bdd6a90aSFam Zheng progress = nvme_poll_queues(s); 546bdd6a90aSFam Zheng return progress; 547bdd6a90aSFam Zheng } 548bdd6a90aSFam Zheng 549bdd6a90aSFam Zheng static int nvme_init(BlockDriverState *bs, const char *device, int namespace, 550bdd6a90aSFam Zheng Error **errp) 551bdd6a90aSFam Zheng { 552bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 553bdd6a90aSFam Zheng int ret; 554bdd6a90aSFam Zheng uint64_t cap; 555bdd6a90aSFam Zheng uint64_t timeout_ms; 556bdd6a90aSFam Zheng uint64_t deadline, now; 557bdd6a90aSFam Zheng Error *local_err = NULL; 558bdd6a90aSFam Zheng 559bdd6a90aSFam Zheng qemu_co_mutex_init(&s->dma_map_lock); 560bdd6a90aSFam Zheng qemu_co_queue_init(&s->dma_flush_queue); 561bdd6a90aSFam Zheng s->nsid = namespace; 562bdd6a90aSFam Zheng s->aio_context = bdrv_get_aio_context(bs); 563bdd6a90aSFam Zheng ret = event_notifier_init(&s->irq_notifier, 0); 564bdd6a90aSFam Zheng if (ret) { 565bdd6a90aSFam Zheng error_setg(errp, "Failed to init event notifier"); 566bdd6a90aSFam Zheng return ret; 567bdd6a90aSFam Zheng } 568bdd6a90aSFam Zheng 569bdd6a90aSFam Zheng s->vfio = qemu_vfio_open_pci(device, errp); 570bdd6a90aSFam Zheng if (!s->vfio) { 571bdd6a90aSFam Zheng ret = -EINVAL; 572bdd6a90aSFam Zheng goto fail; 573bdd6a90aSFam Zheng } 574bdd6a90aSFam Zheng 575bdd6a90aSFam Zheng s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp); 576bdd6a90aSFam Zheng if (!s->regs) { 577bdd6a90aSFam Zheng ret = -EINVAL; 578bdd6a90aSFam Zheng goto fail; 579bdd6a90aSFam Zheng } 580bdd6a90aSFam Zheng 581bdd6a90aSFam Zheng /* Perform initialize sequence as described in NVMe spec "7.6.1 582bdd6a90aSFam Zheng * Initialization". */ 583bdd6a90aSFam Zheng 584bdd6a90aSFam Zheng cap = le64_to_cpu(s->regs->cap); 585bdd6a90aSFam Zheng if (!(cap & (1ULL << 37))) { 586bdd6a90aSFam Zheng error_setg(errp, "Device doesn't support NVMe command set"); 587bdd6a90aSFam Zheng ret = -EINVAL; 588bdd6a90aSFam Zheng goto fail; 589bdd6a90aSFam Zheng } 590bdd6a90aSFam Zheng 591bdd6a90aSFam Zheng s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF))); 592bdd6a90aSFam Zheng s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t); 593bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size; 594bdd6a90aSFam Zheng timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000); 595bdd6a90aSFam Zheng 596bdd6a90aSFam Zheng /* Reset device to get a clean state. */ 597bdd6a90aSFam Zheng s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE); 598bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 0. */ 599bdd6a90aSFam Zheng deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * 1000000ULL; 600bdd6a90aSFam Zheng while (le32_to_cpu(s->regs->csts) & 0x1) { 601bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { 602bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to reset (%" 603bdd6a90aSFam Zheng PRId64 " ms)", 604bdd6a90aSFam Zheng timeout_ms); 605bdd6a90aSFam Zheng ret = -ETIMEDOUT; 606bdd6a90aSFam Zheng goto fail; 607bdd6a90aSFam Zheng } 608bdd6a90aSFam Zheng } 609bdd6a90aSFam Zheng 610bdd6a90aSFam Zheng /* Set up admin queue. */ 611bdd6a90aSFam Zheng s->queues = g_new(NVMeQueuePair *, 1); 612bdd6a90aSFam Zheng s->nr_queues = 1; 613bdd6a90aSFam Zheng s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp); 614bdd6a90aSFam Zheng if (!s->queues[0]) { 615bdd6a90aSFam Zheng ret = -EINVAL; 616bdd6a90aSFam Zheng goto fail; 617bdd6a90aSFam Zheng } 618bdd6a90aSFam Zheng QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000); 619bdd6a90aSFam Zheng s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE); 620bdd6a90aSFam Zheng s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova); 621bdd6a90aSFam Zheng s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova); 622bdd6a90aSFam Zheng 623bdd6a90aSFam Zheng /* After setting up all control registers we can enable device now. */ 624bdd6a90aSFam Zheng s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) | 625bdd6a90aSFam Zheng (ctz32(NVME_SQ_ENTRY_BYTES) << 16) | 626bdd6a90aSFam Zheng 0x1); 627bdd6a90aSFam Zheng /* Wait for CSTS.RDY = 1. */ 628bdd6a90aSFam Zheng now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 629bdd6a90aSFam Zheng deadline = now + timeout_ms * 1000000; 630bdd6a90aSFam Zheng while (!(le32_to_cpu(s->regs->csts) & 0x1)) { 631bdd6a90aSFam Zheng if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) { 632bdd6a90aSFam Zheng error_setg(errp, "Timeout while waiting for device to start (%" 633bdd6a90aSFam Zheng PRId64 " ms)", 634bdd6a90aSFam Zheng timeout_ms); 635bdd6a90aSFam Zheng ret = -ETIMEDOUT; 636bdd6a90aSFam Zheng goto fail_queue; 637bdd6a90aSFam Zheng } 638bdd6a90aSFam Zheng } 639bdd6a90aSFam Zheng 640bdd6a90aSFam Zheng ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier, 641bdd6a90aSFam Zheng VFIO_PCI_MSIX_IRQ_INDEX, errp); 642bdd6a90aSFam Zheng if (ret) { 643bdd6a90aSFam Zheng goto fail_queue; 644bdd6a90aSFam Zheng } 645bdd6a90aSFam Zheng aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, 646bdd6a90aSFam Zheng false, nvme_handle_event, nvme_poll_cb); 647bdd6a90aSFam Zheng 648*78d8c99eSPaolo Bonzini nvme_identify(bs, namespace, &local_err); 649bdd6a90aSFam Zheng if (local_err) { 650bdd6a90aSFam Zheng error_propagate(errp, local_err); 651bdd6a90aSFam Zheng ret = -EIO; 652bdd6a90aSFam Zheng goto fail_handler; 653bdd6a90aSFam Zheng } 654bdd6a90aSFam Zheng 655bdd6a90aSFam Zheng /* Set up command queues. */ 656bdd6a90aSFam Zheng if (!nvme_add_io_queue(bs, errp)) { 657bdd6a90aSFam Zheng ret = -EIO; 658bdd6a90aSFam Zheng goto fail_handler; 659bdd6a90aSFam Zheng } 660bdd6a90aSFam Zheng return 0; 661bdd6a90aSFam Zheng 662bdd6a90aSFam Zheng fail_handler: 663bdd6a90aSFam Zheng aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, 664bdd6a90aSFam Zheng false, NULL, NULL); 665bdd6a90aSFam Zheng fail_queue: 666bdd6a90aSFam Zheng nvme_free_queue_pair(bs, s->queues[0]); 667bdd6a90aSFam Zheng fail: 668bdd6a90aSFam Zheng g_free(s->queues); 669*78d8c99eSPaolo Bonzini if (s->regs) { 670bdd6a90aSFam Zheng qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE); 671*78d8c99eSPaolo Bonzini } 672*78d8c99eSPaolo Bonzini if (s->vfio) { 673bdd6a90aSFam Zheng qemu_vfio_close(s->vfio); 674*78d8c99eSPaolo Bonzini } 675bdd6a90aSFam Zheng event_notifier_cleanup(&s->irq_notifier); 676bdd6a90aSFam Zheng return ret; 677bdd6a90aSFam Zheng } 678bdd6a90aSFam Zheng 679bdd6a90aSFam Zheng /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example: 680bdd6a90aSFam Zheng * 681bdd6a90aSFam Zheng * nvme://0000:44:00.0/1 682bdd6a90aSFam Zheng * 683bdd6a90aSFam Zheng * where the "nvme://" is a fixed form of the protocol prefix, the middle part 684bdd6a90aSFam Zheng * is the PCI address, and the last part is the namespace number starting from 685bdd6a90aSFam Zheng * 1 according to the NVMe spec. */ 686bdd6a90aSFam Zheng static void nvme_parse_filename(const char *filename, QDict *options, 687bdd6a90aSFam Zheng Error **errp) 688bdd6a90aSFam Zheng { 689bdd6a90aSFam Zheng int pref = strlen("nvme://"); 690bdd6a90aSFam Zheng 691bdd6a90aSFam Zheng if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) { 692bdd6a90aSFam Zheng const char *tmp = filename + pref; 693bdd6a90aSFam Zheng char *device; 694bdd6a90aSFam Zheng const char *namespace; 695bdd6a90aSFam Zheng unsigned long ns; 696bdd6a90aSFam Zheng const char *slash = strchr(tmp, '/'); 697bdd6a90aSFam Zheng if (!slash) { 698bdd6a90aSFam Zheng qdict_put(options, NVME_BLOCK_OPT_DEVICE, 699bdd6a90aSFam Zheng qstring_from_str(tmp)); 700bdd6a90aSFam Zheng return; 701bdd6a90aSFam Zheng } 702bdd6a90aSFam Zheng device = g_strndup(tmp, slash - tmp); 703bdd6a90aSFam Zheng qdict_put(options, NVME_BLOCK_OPT_DEVICE, qstring_from_str(device)); 704bdd6a90aSFam Zheng g_free(device); 705bdd6a90aSFam Zheng namespace = slash + 1; 706bdd6a90aSFam Zheng if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) { 707bdd6a90aSFam Zheng error_setg(errp, "Invalid namespace '%s', positive number expected", 708bdd6a90aSFam Zheng namespace); 709bdd6a90aSFam Zheng return; 710bdd6a90aSFam Zheng } 711bdd6a90aSFam Zheng qdict_put(options, NVME_BLOCK_OPT_NAMESPACE, 712bdd6a90aSFam Zheng qstring_from_str(*namespace ? namespace : "1")); 713bdd6a90aSFam Zheng } 714bdd6a90aSFam Zheng } 715bdd6a90aSFam Zheng 716bdd6a90aSFam Zheng static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable, 717bdd6a90aSFam Zheng Error **errp) 718bdd6a90aSFam Zheng { 719bdd6a90aSFam Zheng int ret; 720bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 721bdd6a90aSFam Zheng NvmeCmd cmd = { 722bdd6a90aSFam Zheng .opcode = NVME_ADM_CMD_SET_FEATURES, 723bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 724bdd6a90aSFam Zheng .cdw10 = cpu_to_le32(0x06), 725bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00), 726bdd6a90aSFam Zheng }; 727bdd6a90aSFam Zheng 728bdd6a90aSFam Zheng ret = nvme_cmd_sync(bs, s->queues[0], &cmd); 729bdd6a90aSFam Zheng if (ret) { 730bdd6a90aSFam Zheng error_setg(errp, "Failed to configure NVMe write cache"); 731bdd6a90aSFam Zheng } 732bdd6a90aSFam Zheng return ret; 733bdd6a90aSFam Zheng } 734bdd6a90aSFam Zheng 735bdd6a90aSFam Zheng static void nvme_close(BlockDriverState *bs) 736bdd6a90aSFam Zheng { 737bdd6a90aSFam Zheng int i; 738bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 739bdd6a90aSFam Zheng 740bdd6a90aSFam Zheng for (i = 0; i < s->nr_queues; ++i) { 741bdd6a90aSFam Zheng nvme_free_queue_pair(bs, s->queues[i]); 742bdd6a90aSFam Zheng } 743bdd6a90aSFam Zheng aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, 744bdd6a90aSFam Zheng false, NULL, NULL); 745bdd6a90aSFam Zheng qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE); 746bdd6a90aSFam Zheng qemu_vfio_close(s->vfio); 747bdd6a90aSFam Zheng } 748bdd6a90aSFam Zheng 749bdd6a90aSFam Zheng static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags, 750bdd6a90aSFam Zheng Error **errp) 751bdd6a90aSFam Zheng { 752bdd6a90aSFam Zheng const char *device; 753bdd6a90aSFam Zheng QemuOpts *opts; 754bdd6a90aSFam Zheng int namespace; 755bdd6a90aSFam Zheng int ret; 756bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 757bdd6a90aSFam Zheng 758bdd6a90aSFam Zheng opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); 759bdd6a90aSFam Zheng qemu_opts_absorb_qdict(opts, options, &error_abort); 760bdd6a90aSFam Zheng device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE); 761bdd6a90aSFam Zheng if (!device) { 762bdd6a90aSFam Zheng error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required"); 763bdd6a90aSFam Zheng qemu_opts_del(opts); 764bdd6a90aSFam Zheng return -EINVAL; 765bdd6a90aSFam Zheng } 766bdd6a90aSFam Zheng 767bdd6a90aSFam Zheng namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1); 768bdd6a90aSFam Zheng ret = nvme_init(bs, device, namespace, errp); 769bdd6a90aSFam Zheng qemu_opts_del(opts); 770bdd6a90aSFam Zheng if (ret) { 771bdd6a90aSFam Zheng goto fail; 772bdd6a90aSFam Zheng } 773bdd6a90aSFam Zheng if (flags & BDRV_O_NOCACHE) { 774bdd6a90aSFam Zheng if (!s->write_cache_supported) { 775bdd6a90aSFam Zheng error_setg(errp, 776bdd6a90aSFam Zheng "NVMe controller doesn't support write cache configuration"); 777bdd6a90aSFam Zheng ret = -EINVAL; 778bdd6a90aSFam Zheng } else { 779bdd6a90aSFam Zheng ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE), 780bdd6a90aSFam Zheng errp); 781bdd6a90aSFam Zheng } 782bdd6a90aSFam Zheng if (ret) { 783bdd6a90aSFam Zheng goto fail; 784bdd6a90aSFam Zheng } 785bdd6a90aSFam Zheng } 786bdd6a90aSFam Zheng bs->supported_write_flags = BDRV_REQ_FUA; 787bdd6a90aSFam Zheng return 0; 788bdd6a90aSFam Zheng fail: 789bdd6a90aSFam Zheng nvme_close(bs); 790bdd6a90aSFam Zheng return ret; 791bdd6a90aSFam Zheng } 792bdd6a90aSFam Zheng 793bdd6a90aSFam Zheng static int64_t nvme_getlength(BlockDriverState *bs) 794bdd6a90aSFam Zheng { 795bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 796bdd6a90aSFam Zheng 797bdd6a90aSFam Zheng return s->nsze << BDRV_SECTOR_BITS; 798bdd6a90aSFam Zheng } 799bdd6a90aSFam Zheng 800bdd6a90aSFam Zheng /* Called with s->dma_map_lock */ 801bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs, 802bdd6a90aSFam Zheng QEMUIOVector *qiov) 803bdd6a90aSFam Zheng { 804bdd6a90aSFam Zheng int r = 0; 805bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 806bdd6a90aSFam Zheng 807bdd6a90aSFam Zheng s->dma_map_count -= qiov->size; 808bdd6a90aSFam Zheng if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) { 809bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio); 810bdd6a90aSFam Zheng if (!r) { 811bdd6a90aSFam Zheng qemu_co_queue_restart_all(&s->dma_flush_queue); 812bdd6a90aSFam Zheng } 813bdd6a90aSFam Zheng } 814bdd6a90aSFam Zheng return r; 815bdd6a90aSFam Zheng } 816bdd6a90aSFam Zheng 817bdd6a90aSFam Zheng /* Called with s->dma_map_lock */ 818bdd6a90aSFam Zheng static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd, 819bdd6a90aSFam Zheng NVMeRequest *req, QEMUIOVector *qiov) 820bdd6a90aSFam Zheng { 821bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 822bdd6a90aSFam Zheng uint64_t *pagelist = req->prp_list_page; 823bdd6a90aSFam Zheng int i, j, r; 824bdd6a90aSFam Zheng int entries = 0; 825bdd6a90aSFam Zheng 826bdd6a90aSFam Zheng assert(qiov->size); 827bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(qiov->size, s->page_size)); 828bdd6a90aSFam Zheng assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t)); 829bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) { 830bdd6a90aSFam Zheng bool retry = true; 831bdd6a90aSFam Zheng uint64_t iova; 832bdd6a90aSFam Zheng try_map: 833bdd6a90aSFam Zheng r = qemu_vfio_dma_map(s->vfio, 834bdd6a90aSFam Zheng qiov->iov[i].iov_base, 835bdd6a90aSFam Zheng qiov->iov[i].iov_len, 836bdd6a90aSFam Zheng true, &iova); 837bdd6a90aSFam Zheng if (r == -ENOMEM && retry) { 838bdd6a90aSFam Zheng retry = false; 839bdd6a90aSFam Zheng trace_nvme_dma_flush_queue_wait(s); 840bdd6a90aSFam Zheng if (s->dma_map_count) { 841bdd6a90aSFam Zheng trace_nvme_dma_map_flush(s); 842bdd6a90aSFam Zheng qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock); 843bdd6a90aSFam Zheng } else { 844bdd6a90aSFam Zheng r = qemu_vfio_dma_reset_temporary(s->vfio); 845bdd6a90aSFam Zheng if (r) { 846bdd6a90aSFam Zheng goto fail; 847bdd6a90aSFam Zheng } 848bdd6a90aSFam Zheng } 849bdd6a90aSFam Zheng goto try_map; 850bdd6a90aSFam Zheng } 851bdd6a90aSFam Zheng if (r) { 852bdd6a90aSFam Zheng goto fail; 853bdd6a90aSFam Zheng } 854bdd6a90aSFam Zheng 855bdd6a90aSFam Zheng for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) { 856bdd6a90aSFam Zheng pagelist[entries++] = iova + j * s->page_size; 857bdd6a90aSFam Zheng } 858bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base, 859bdd6a90aSFam Zheng qiov->iov[i].iov_len / s->page_size); 860bdd6a90aSFam Zheng } 861bdd6a90aSFam Zheng 862bdd6a90aSFam Zheng s->dma_map_count += qiov->size; 863bdd6a90aSFam Zheng 864bdd6a90aSFam Zheng assert(entries <= s->page_size / sizeof(uint64_t)); 865bdd6a90aSFam Zheng switch (entries) { 866bdd6a90aSFam Zheng case 0: 867bdd6a90aSFam Zheng abort(); 868bdd6a90aSFam Zheng case 1: 869bdd6a90aSFam Zheng cmd->prp1 = cpu_to_le64(pagelist[0]); 870bdd6a90aSFam Zheng cmd->prp2 = 0; 871bdd6a90aSFam Zheng break; 872bdd6a90aSFam Zheng case 2: 873bdd6a90aSFam Zheng cmd->prp1 = cpu_to_le64(pagelist[0]); 874bdd6a90aSFam Zheng cmd->prp2 = cpu_to_le64(pagelist[1]);; 875bdd6a90aSFam Zheng break; 876bdd6a90aSFam Zheng default: 877bdd6a90aSFam Zheng cmd->prp1 = cpu_to_le64(pagelist[0]); 878bdd6a90aSFam Zheng cmd->prp2 = cpu_to_le64(req->prp_list_iova); 879bdd6a90aSFam Zheng for (i = 0; i < entries - 1; ++i) { 880bdd6a90aSFam Zheng pagelist[i] = cpu_to_le64(pagelist[i + 1]); 881bdd6a90aSFam Zheng } 882bdd6a90aSFam Zheng pagelist[entries - 1] = 0; 883bdd6a90aSFam Zheng break; 884bdd6a90aSFam Zheng } 885bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries); 886bdd6a90aSFam Zheng for (i = 0; i < entries; ++i) { 887bdd6a90aSFam Zheng trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]); 888bdd6a90aSFam Zheng } 889bdd6a90aSFam Zheng return 0; 890bdd6a90aSFam Zheng fail: 891bdd6a90aSFam Zheng /* No need to unmap [0 - i) iovs even if we've failed, since we don't 892bdd6a90aSFam Zheng * increment s->dma_map_count. This is okay for fixed mapping memory areas 893bdd6a90aSFam Zheng * because they are already mapped before calling this function; for 894bdd6a90aSFam Zheng * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by 895bdd6a90aSFam Zheng * calling qemu_vfio_dma_reset_temporary when necessary. */ 896bdd6a90aSFam Zheng return r; 897bdd6a90aSFam Zheng } 898bdd6a90aSFam Zheng 899bdd6a90aSFam Zheng typedef struct { 900bdd6a90aSFam Zheng Coroutine *co; 901bdd6a90aSFam Zheng int ret; 902bdd6a90aSFam Zheng AioContext *ctx; 903bdd6a90aSFam Zheng } NVMeCoData; 904bdd6a90aSFam Zheng 905bdd6a90aSFam Zheng static void nvme_rw_cb_bh(void *opaque) 906bdd6a90aSFam Zheng { 907bdd6a90aSFam Zheng NVMeCoData *data = opaque; 908bdd6a90aSFam Zheng qemu_coroutine_enter(data->co); 909bdd6a90aSFam Zheng } 910bdd6a90aSFam Zheng 911bdd6a90aSFam Zheng static void nvme_rw_cb(void *opaque, int ret) 912bdd6a90aSFam Zheng { 913bdd6a90aSFam Zheng NVMeCoData *data = opaque; 914bdd6a90aSFam Zheng data->ret = ret; 915bdd6a90aSFam Zheng if (!data->co) { 916bdd6a90aSFam Zheng /* The rw coroutine hasn't yielded, don't try to enter. */ 917bdd6a90aSFam Zheng return; 918bdd6a90aSFam Zheng } 919bdd6a90aSFam Zheng aio_bh_schedule_oneshot(data->ctx, nvme_rw_cb_bh, data); 920bdd6a90aSFam Zheng } 921bdd6a90aSFam Zheng 922bdd6a90aSFam Zheng static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, 923bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 924bdd6a90aSFam Zheng QEMUIOVector *qiov, 925bdd6a90aSFam Zheng bool is_write, 926bdd6a90aSFam Zheng int flags) 927bdd6a90aSFam Zheng { 928bdd6a90aSFam Zheng int r; 929bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 930bdd6a90aSFam Zheng NVMeQueuePair *ioq = s->queues[1]; 931bdd6a90aSFam Zheng NVMeRequest *req; 932bdd6a90aSFam Zheng uint32_t cdw12 = (((bytes >> BDRV_SECTOR_BITS) - 1) & 0xFFFF) | 933bdd6a90aSFam Zheng (flags & BDRV_REQ_FUA ? 1 << 30 : 0); 934bdd6a90aSFam Zheng NvmeCmd cmd = { 935bdd6a90aSFam Zheng .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ, 936bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 937bdd6a90aSFam Zheng .cdw10 = cpu_to_le32((offset >> BDRV_SECTOR_BITS) & 0xFFFFFFFF), 938bdd6a90aSFam Zheng .cdw11 = cpu_to_le32(((offset >> BDRV_SECTOR_BITS) >> 32) & 0xFFFFFFFF), 939bdd6a90aSFam Zheng .cdw12 = cpu_to_le32(cdw12), 940bdd6a90aSFam Zheng }; 941bdd6a90aSFam Zheng NVMeCoData data = { 942bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs), 943bdd6a90aSFam Zheng .ret = -EINPROGRESS, 944bdd6a90aSFam Zheng }; 945bdd6a90aSFam Zheng 946bdd6a90aSFam Zheng trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov); 947bdd6a90aSFam Zheng assert(s->nr_queues > 1); 948bdd6a90aSFam Zheng req = nvme_get_free_req(ioq); 949bdd6a90aSFam Zheng assert(req); 950bdd6a90aSFam Zheng 951bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock); 952bdd6a90aSFam Zheng r = nvme_cmd_map_qiov(bs, &cmd, req, qiov); 953bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock); 954bdd6a90aSFam Zheng if (r) { 955bdd6a90aSFam Zheng req->busy = false; 956bdd6a90aSFam Zheng return r; 957bdd6a90aSFam Zheng } 958bdd6a90aSFam Zheng nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data); 959bdd6a90aSFam Zheng 960bdd6a90aSFam Zheng data.co = qemu_coroutine_self(); 961bdd6a90aSFam Zheng while (data.ret == -EINPROGRESS) { 962bdd6a90aSFam Zheng qemu_coroutine_yield(); 963bdd6a90aSFam Zheng } 964bdd6a90aSFam Zheng 965bdd6a90aSFam Zheng qemu_co_mutex_lock(&s->dma_map_lock); 966bdd6a90aSFam Zheng r = nvme_cmd_unmap_qiov(bs, qiov); 967bdd6a90aSFam Zheng qemu_co_mutex_unlock(&s->dma_map_lock); 968bdd6a90aSFam Zheng if (r) { 969bdd6a90aSFam Zheng return r; 970bdd6a90aSFam Zheng } 971bdd6a90aSFam Zheng 972bdd6a90aSFam Zheng trace_nvme_rw_done(s, is_write, offset, bytes, data.ret); 973bdd6a90aSFam Zheng return data.ret; 974bdd6a90aSFam Zheng } 975bdd6a90aSFam Zheng 976bdd6a90aSFam Zheng static inline bool nvme_qiov_aligned(BlockDriverState *bs, 977bdd6a90aSFam Zheng const QEMUIOVector *qiov) 978bdd6a90aSFam Zheng { 979bdd6a90aSFam Zheng int i; 980bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 981bdd6a90aSFam Zheng 982bdd6a90aSFam Zheng for (i = 0; i < qiov->niov; ++i) { 983bdd6a90aSFam Zheng if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) || 984bdd6a90aSFam Zheng !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) { 985bdd6a90aSFam Zheng trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base, 986bdd6a90aSFam Zheng qiov->iov[i].iov_len, s->page_size); 987bdd6a90aSFam Zheng return false; 988bdd6a90aSFam Zheng } 989bdd6a90aSFam Zheng } 990bdd6a90aSFam Zheng return true; 991bdd6a90aSFam Zheng } 992bdd6a90aSFam Zheng 993bdd6a90aSFam Zheng static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, 994bdd6a90aSFam Zheng QEMUIOVector *qiov, bool is_write, int flags) 995bdd6a90aSFam Zheng { 996bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 997bdd6a90aSFam Zheng int r; 998bdd6a90aSFam Zheng uint8_t *buf = NULL; 999bdd6a90aSFam Zheng QEMUIOVector local_qiov; 1000bdd6a90aSFam Zheng 1001bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(offset, s->page_size)); 1002bdd6a90aSFam Zheng assert(QEMU_IS_ALIGNED(bytes, s->page_size)); 1003bdd6a90aSFam Zheng assert(bytes <= s->max_transfer); 1004bdd6a90aSFam Zheng if (nvme_qiov_aligned(bs, qiov)) { 1005bdd6a90aSFam Zheng return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags); 1006bdd6a90aSFam Zheng } 1007bdd6a90aSFam Zheng trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write); 1008bdd6a90aSFam Zheng buf = qemu_try_blockalign(bs, bytes); 1009bdd6a90aSFam Zheng 1010bdd6a90aSFam Zheng if (!buf) { 1011bdd6a90aSFam Zheng return -ENOMEM; 1012bdd6a90aSFam Zheng } 1013bdd6a90aSFam Zheng qemu_iovec_init(&local_qiov, 1); 1014bdd6a90aSFam Zheng if (is_write) { 1015bdd6a90aSFam Zheng qemu_iovec_to_buf(qiov, 0, buf, bytes); 1016bdd6a90aSFam Zheng } 1017bdd6a90aSFam Zheng qemu_iovec_add(&local_qiov, buf, bytes); 1018bdd6a90aSFam Zheng r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags); 1019bdd6a90aSFam Zheng qemu_iovec_destroy(&local_qiov); 1020bdd6a90aSFam Zheng if (!r && !is_write) { 1021bdd6a90aSFam Zheng qemu_iovec_from_buf(qiov, 0, buf, bytes); 1022bdd6a90aSFam Zheng } 1023bdd6a90aSFam Zheng qemu_vfree(buf); 1024bdd6a90aSFam Zheng return r; 1025bdd6a90aSFam Zheng } 1026bdd6a90aSFam Zheng 1027bdd6a90aSFam Zheng static coroutine_fn int nvme_co_preadv(BlockDriverState *bs, 1028bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1029bdd6a90aSFam Zheng QEMUIOVector *qiov, int flags) 1030bdd6a90aSFam Zheng { 1031bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, false, flags); 1032bdd6a90aSFam Zheng } 1033bdd6a90aSFam Zheng 1034bdd6a90aSFam Zheng static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs, 1035bdd6a90aSFam Zheng uint64_t offset, uint64_t bytes, 1036bdd6a90aSFam Zheng QEMUIOVector *qiov, int flags) 1037bdd6a90aSFam Zheng { 1038bdd6a90aSFam Zheng return nvme_co_prw(bs, offset, bytes, qiov, true, flags); 1039bdd6a90aSFam Zheng } 1040bdd6a90aSFam Zheng 1041bdd6a90aSFam Zheng static coroutine_fn int nvme_co_flush(BlockDriverState *bs) 1042bdd6a90aSFam Zheng { 1043bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1044bdd6a90aSFam Zheng NVMeQueuePair *ioq = s->queues[1]; 1045bdd6a90aSFam Zheng NVMeRequest *req; 1046bdd6a90aSFam Zheng NvmeCmd cmd = { 1047bdd6a90aSFam Zheng .opcode = NVME_CMD_FLUSH, 1048bdd6a90aSFam Zheng .nsid = cpu_to_le32(s->nsid), 1049bdd6a90aSFam Zheng }; 1050bdd6a90aSFam Zheng NVMeCoData data = { 1051bdd6a90aSFam Zheng .ctx = bdrv_get_aio_context(bs), 1052bdd6a90aSFam Zheng .ret = -EINPROGRESS, 1053bdd6a90aSFam Zheng }; 1054bdd6a90aSFam Zheng 1055bdd6a90aSFam Zheng assert(s->nr_queues > 1); 1056bdd6a90aSFam Zheng req = nvme_get_free_req(ioq); 1057bdd6a90aSFam Zheng assert(req); 1058bdd6a90aSFam Zheng nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data); 1059bdd6a90aSFam Zheng 1060bdd6a90aSFam Zheng data.co = qemu_coroutine_self(); 1061bdd6a90aSFam Zheng if (data.ret == -EINPROGRESS) { 1062bdd6a90aSFam Zheng qemu_coroutine_yield(); 1063bdd6a90aSFam Zheng } 1064bdd6a90aSFam Zheng 1065bdd6a90aSFam Zheng return data.ret; 1066bdd6a90aSFam Zheng } 1067bdd6a90aSFam Zheng 1068bdd6a90aSFam Zheng 1069bdd6a90aSFam Zheng static int nvme_reopen_prepare(BDRVReopenState *reopen_state, 1070bdd6a90aSFam Zheng BlockReopenQueue *queue, Error **errp) 1071bdd6a90aSFam Zheng { 1072bdd6a90aSFam Zheng return 0; 1073bdd6a90aSFam Zheng } 1074bdd6a90aSFam Zheng 1075bdd6a90aSFam Zheng static int64_t coroutine_fn nvme_co_get_block_status(BlockDriverState *bs, 1076bdd6a90aSFam Zheng int64_t sector_num, 1077bdd6a90aSFam Zheng int nb_sectors, int *pnum, 1078bdd6a90aSFam Zheng BlockDriverState **file) 1079bdd6a90aSFam Zheng { 1080bdd6a90aSFam Zheng *pnum = nb_sectors; 1081bdd6a90aSFam Zheng *file = bs; 1082bdd6a90aSFam Zheng 1083bdd6a90aSFam Zheng return BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_OFFSET_VALID | 1084bdd6a90aSFam Zheng (sector_num << BDRV_SECTOR_BITS); 1085bdd6a90aSFam Zheng } 1086bdd6a90aSFam Zheng 1087bdd6a90aSFam Zheng static void nvme_refresh_filename(BlockDriverState *bs, QDict *opts) 1088bdd6a90aSFam Zheng { 1089bdd6a90aSFam Zheng QINCREF(opts); 1090bdd6a90aSFam Zheng qdict_del(opts, "filename"); 1091bdd6a90aSFam Zheng 1092bdd6a90aSFam Zheng if (!qdict_size(opts)) { 1093bdd6a90aSFam Zheng snprintf(bs->exact_filename, sizeof(bs->exact_filename), "%s://", 1094bdd6a90aSFam Zheng bs->drv->format_name); 1095bdd6a90aSFam Zheng } 1096bdd6a90aSFam Zheng 1097bdd6a90aSFam Zheng qdict_put(opts, "driver", qstring_from_str(bs->drv->format_name)); 1098bdd6a90aSFam Zheng bs->full_open_options = opts; 1099bdd6a90aSFam Zheng } 1100bdd6a90aSFam Zheng 1101bdd6a90aSFam Zheng static void nvme_refresh_limits(BlockDriverState *bs, Error **errp) 1102bdd6a90aSFam Zheng { 1103bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1104bdd6a90aSFam Zheng 1105bdd6a90aSFam Zheng bs->bl.opt_mem_alignment = s->page_size; 1106bdd6a90aSFam Zheng bs->bl.request_alignment = s->page_size; 1107bdd6a90aSFam Zheng bs->bl.max_transfer = s->max_transfer; 1108bdd6a90aSFam Zheng } 1109bdd6a90aSFam Zheng 1110bdd6a90aSFam Zheng static void nvme_detach_aio_context(BlockDriverState *bs) 1111bdd6a90aSFam Zheng { 1112bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1113bdd6a90aSFam Zheng 1114bdd6a90aSFam Zheng aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, 1115bdd6a90aSFam Zheng false, NULL, NULL); 1116bdd6a90aSFam Zheng } 1117bdd6a90aSFam Zheng 1118bdd6a90aSFam Zheng static void nvme_attach_aio_context(BlockDriverState *bs, 1119bdd6a90aSFam Zheng AioContext *new_context) 1120bdd6a90aSFam Zheng { 1121bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1122bdd6a90aSFam Zheng 1123bdd6a90aSFam Zheng s->aio_context = new_context; 1124bdd6a90aSFam Zheng aio_set_event_notifier(new_context, &s->irq_notifier, 1125bdd6a90aSFam Zheng false, nvme_handle_event, nvme_poll_cb); 1126bdd6a90aSFam Zheng } 1127bdd6a90aSFam Zheng 1128bdd6a90aSFam Zheng static void nvme_aio_plug(BlockDriverState *bs) 1129bdd6a90aSFam Zheng { 1130bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1131bdd6a90aSFam Zheng s->plugged++; 1132bdd6a90aSFam Zheng } 1133bdd6a90aSFam Zheng 1134bdd6a90aSFam Zheng static void nvme_aio_unplug(BlockDriverState *bs) 1135bdd6a90aSFam Zheng { 1136bdd6a90aSFam Zheng int i; 1137bdd6a90aSFam Zheng BDRVNVMeState *s = bs->opaque; 1138bdd6a90aSFam Zheng assert(s->plugged); 1139bdd6a90aSFam Zheng if (!--s->plugged) { 1140bdd6a90aSFam Zheng for (i = 1; i < s->nr_queues; i++) { 1141bdd6a90aSFam Zheng NVMeQueuePair *q = s->queues[i]; 1142bdd6a90aSFam Zheng qemu_mutex_lock(&q->lock); 1143bdd6a90aSFam Zheng nvme_kick(s, q); 1144bdd6a90aSFam Zheng nvme_process_completion(s, q); 1145bdd6a90aSFam Zheng qemu_mutex_unlock(&q->lock); 1146bdd6a90aSFam Zheng } 1147bdd6a90aSFam Zheng } 1148bdd6a90aSFam Zheng } 1149bdd6a90aSFam Zheng 11509ed61612SFam Zheng static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size) 11519ed61612SFam Zheng { 11529ed61612SFam Zheng int ret; 11539ed61612SFam Zheng BDRVNVMeState *s = bs->opaque; 11549ed61612SFam Zheng 11559ed61612SFam Zheng ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL); 11569ed61612SFam Zheng if (ret) { 11579ed61612SFam Zheng /* FIXME: we may run out of IOVA addresses after repeated 11589ed61612SFam Zheng * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap 11599ed61612SFam Zheng * doesn't reclaim addresses for fixed mappings. */ 11609ed61612SFam Zheng error_report("nvme_register_buf failed: %s", strerror(-ret)); 11619ed61612SFam Zheng } 11629ed61612SFam Zheng } 11639ed61612SFam Zheng 11649ed61612SFam Zheng static void nvme_unregister_buf(BlockDriverState *bs, void *host) 11659ed61612SFam Zheng { 11669ed61612SFam Zheng BDRVNVMeState *s = bs->opaque; 11679ed61612SFam Zheng 11689ed61612SFam Zheng qemu_vfio_dma_unmap(s->vfio, host); 11699ed61612SFam Zheng } 11709ed61612SFam Zheng 1171bdd6a90aSFam Zheng static BlockDriver bdrv_nvme = { 1172bdd6a90aSFam Zheng .format_name = "nvme", 1173bdd6a90aSFam Zheng .protocol_name = "nvme", 1174bdd6a90aSFam Zheng .instance_size = sizeof(BDRVNVMeState), 1175bdd6a90aSFam Zheng 1176bdd6a90aSFam Zheng .bdrv_parse_filename = nvme_parse_filename, 1177bdd6a90aSFam Zheng .bdrv_file_open = nvme_file_open, 1178bdd6a90aSFam Zheng .bdrv_close = nvme_close, 1179bdd6a90aSFam Zheng .bdrv_getlength = nvme_getlength, 1180bdd6a90aSFam Zheng 1181bdd6a90aSFam Zheng .bdrv_co_preadv = nvme_co_preadv, 1182bdd6a90aSFam Zheng .bdrv_co_pwritev = nvme_co_pwritev, 1183bdd6a90aSFam Zheng .bdrv_co_flush_to_disk = nvme_co_flush, 1184bdd6a90aSFam Zheng .bdrv_reopen_prepare = nvme_reopen_prepare, 1185bdd6a90aSFam Zheng 1186bdd6a90aSFam Zheng .bdrv_co_get_block_status = nvme_co_get_block_status, 1187bdd6a90aSFam Zheng 1188bdd6a90aSFam Zheng .bdrv_refresh_filename = nvme_refresh_filename, 1189bdd6a90aSFam Zheng .bdrv_refresh_limits = nvme_refresh_limits, 1190bdd6a90aSFam Zheng 1191bdd6a90aSFam Zheng .bdrv_detach_aio_context = nvme_detach_aio_context, 1192bdd6a90aSFam Zheng .bdrv_attach_aio_context = nvme_attach_aio_context, 1193bdd6a90aSFam Zheng 1194bdd6a90aSFam Zheng .bdrv_io_plug = nvme_aio_plug, 1195bdd6a90aSFam Zheng .bdrv_io_unplug = nvme_aio_unplug, 11969ed61612SFam Zheng 11979ed61612SFam Zheng .bdrv_register_buf = nvme_register_buf, 11989ed61612SFam Zheng .bdrv_unregister_buf = nvme_unregister_buf, 1199bdd6a90aSFam Zheng }; 1200bdd6a90aSFam Zheng 1201bdd6a90aSFam Zheng static void bdrv_nvme_init(void) 1202bdd6a90aSFam Zheng { 1203bdd6a90aSFam Zheng bdrv_register(&bdrv_nvme); 1204bdd6a90aSFam Zheng } 1205bdd6a90aSFam Zheng 1206bdd6a90aSFam Zheng block_init(bdrv_nvme_init); 1207