1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2016 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_nvme.h"
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/ioccom.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 #include <sys/uio.h>
40 #include <sys/sbuf.h>
41 #include <sys/endian.h>
42 #include <machine/stdarg.h>
43 #include <vm/vm.h>
44
45 #include "nvme_private.h"
46 #include "nvme_linux.h"
47
48 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
49
50 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
51 struct nvme_async_event_request *aer);
52
53 static void
nvme_ctrlr_barrier(struct nvme_controller * ctrlr,int flags)54 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags)
55 {
56 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags);
57 }
58
59 static void
nvme_ctrlr_devctl_va(struct nvme_controller * ctrlr,const char * type,const char * msg,va_list ap)60 nvme_ctrlr_devctl_va(struct nvme_controller *ctrlr, const char *type,
61 const char *msg, va_list ap)
62 {
63 struct sbuf sb;
64 int error;
65
66 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
67 return;
68 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev));
69 sbuf_vprintf(&sb, msg, ap);
70 error = sbuf_finish(&sb);
71 if (error == 0)
72 devctl_notify("nvme", "controller", type, sbuf_data(&sb));
73 sbuf_delete(&sb);
74 }
75
76 static void
nvme_ctrlr_devctl(struct nvme_controller * ctrlr,const char * type,const char * msg,...)77 nvme_ctrlr_devctl(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
78 {
79 va_list ap;
80
81 va_start(ap, msg);
82 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
83 va_end(ap);
84 }
85
86 static void
nvme_ctrlr_devctl_log(struct nvme_controller * ctrlr,const char * type,const char * msg,...)87 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
88 {
89 struct sbuf sb;
90 va_list ap;
91 int error;
92
93 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
94 return;
95 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev));
96 va_start(ap, msg);
97 sbuf_vprintf(&sb, msg, ap);
98 va_end(ap);
99 error = sbuf_finish(&sb);
100 if (error == 0)
101 printf("%s\n", sbuf_data(&sb));
102 sbuf_delete(&sb);
103 va_start(ap, msg);
104 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
105 va_end(ap);
106 }
107
108 static int
nvme_ctrlr_construct_admin_qpair(struct nvme_controller * ctrlr)109 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
110 {
111 struct nvme_qpair *qpair;
112 uint32_t num_entries;
113 int error;
114
115 qpair = &ctrlr->adminq;
116 qpair->id = 0;
117 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
118 qpair->domain = ctrlr->domain;
119
120 num_entries = NVME_ADMIN_ENTRIES;
121 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
122 /*
123 * If admin_entries was overridden to an invalid value, revert it
124 * back to our default value.
125 */
126 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
127 num_entries > NVME_MAX_ADMIN_ENTRIES) {
128 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
129 "specified\n", num_entries);
130 num_entries = NVME_ADMIN_ENTRIES;
131 }
132
133 /*
134 * The admin queue's max xfer size is treated differently than the
135 * max I/O xfer size. 16KB is sufficient here - maybe even less?
136 */
137 error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS,
138 ctrlr);
139 return (error);
140 }
141
142 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
143
144 static int
nvme_ctrlr_construct_io_qpairs(struct nvme_controller * ctrlr)145 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
146 {
147 struct nvme_qpair *qpair;
148 uint32_t cap_lo;
149 uint16_t mqes;
150 int c, error, i, n;
151 int num_entries, num_trackers, max_entries;
152
153 /*
154 * NVMe spec sets a hard limit of 64K max entries, but devices may
155 * specify a smaller limit, so we need to check the MQES field in the
156 * capabilities register. We have to cap the number of entries to the
157 * current stride allows for in BAR 0/1, otherwise the remainder entries
158 * are inaccessible. MQES should reflect this, and this is just a
159 * fail-safe.
160 */
161 max_entries =
162 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
163 (1 << (ctrlr->dstrd + 1));
164 num_entries = NVME_IO_ENTRIES;
165 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
166 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
167 mqes = NVME_CAP_LO_MQES(cap_lo);
168 num_entries = min(num_entries, mqes + 1);
169 num_entries = min(num_entries, max_entries);
170
171 num_trackers = NVME_IO_TRACKERS;
172 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
173
174 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
175 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
176 /*
177 * No need to have more trackers than entries in the submit queue. Note
178 * also that for a queue size of N, we can only have (N-1) commands
179 * outstanding, hence the "-1" here.
180 */
181 num_trackers = min(num_trackers, (num_entries-1));
182
183 /*
184 * Our best estimate for the maximum number of I/Os that we should
185 * normally have in flight at one time. This should be viewed as a hint,
186 * not a hard limit and will need to be revisited when the upper layers
187 * of the storage system grows multi-queue support.
188 */
189 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
190
191 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
192 M_NVME, M_ZERO | M_WAITOK);
193
194 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
195 qpair = &ctrlr->ioq[i];
196
197 /*
198 * Admin queue has ID=0. IO queues start at ID=1 -
199 * hence the 'i+1' here.
200 */
201 qpair->id = i + 1;
202 if (ctrlr->num_io_queues > 1) {
203 /* Find number of CPUs served by this queue. */
204 for (n = 1; QP(ctrlr, c + n) == i; n++)
205 ;
206 /* Shuffle multiple NVMe devices between CPUs. */
207 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
208 qpair->domain = pcpu_find(qpair->cpu)->pc_domain;
209 } else {
210 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
211 qpair->domain = ctrlr->domain;
212 }
213
214 /*
215 * For I/O queues, use the controller-wide max_xfer_size
216 * calculated in nvme_attach().
217 */
218 error = nvme_qpair_construct(qpair, num_entries, num_trackers,
219 ctrlr);
220 if (error)
221 return (error);
222
223 /*
224 * Do not bother binding interrupts if we only have one I/O
225 * interrupt thread for this controller.
226 */
227 if (ctrlr->num_io_queues > 1)
228 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
229 }
230
231 return (0);
232 }
233
234 static void
nvme_ctrlr_fail(struct nvme_controller * ctrlr)235 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
236 {
237 int i;
238
239 /*
240 * No need to disable queues before failing them. Failing is a superet
241 * of disabling (though pedantically we'd abort the AERs silently with
242 * a different error, though when we fail, that hardly matters).
243 */
244 ctrlr->is_failed = true;
245 nvme_qpair_fail(&ctrlr->adminq);
246 if (ctrlr->ioq != NULL) {
247 for (i = 0; i < ctrlr->num_io_queues; i++) {
248 nvme_qpair_fail(&ctrlr->ioq[i]);
249 }
250 }
251 nvme_notify_fail_consumers(ctrlr);
252 }
253
254 /*
255 * Wait for RDY to change.
256 *
257 * Starts sleeping for 1us and geometrically increases it the longer we wait,
258 * capped at 1ms.
259 */
260 static int
nvme_ctrlr_wait_for_ready(struct nvme_controller * ctrlr,int desired_val)261 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
262 {
263 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms);
264 sbintime_t delta_t = SBT_1US;
265 uint32_t csts;
266
267 while (1) {
268 csts = nvme_mmio_read_4(ctrlr, csts);
269 if (csts == NVME_GONE) /* Hot unplug. */
270 return (ENXIO);
271 if (NVMEV(NVME_CSTS_REG_RDY, csts) == desired_val)
272 break;
273 if (timeout - ticks < 0) {
274 nvme_printf(ctrlr, "controller ready did not become %d "
275 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
276 return (ENXIO);
277 }
278
279 pause_sbt("nvmerdy", delta_t, 0, C_PREL(1));
280 delta_t = min(SBT_1MS, delta_t * 3 / 2);
281 }
282
283 return (0);
284 }
285
286 static int
nvme_ctrlr_disable(struct nvme_controller * ctrlr)287 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
288 {
289 uint32_t cc;
290 uint32_t csts;
291 uint8_t en, rdy;
292 int err;
293
294 cc = nvme_mmio_read_4(ctrlr, cc);
295 csts = nvme_mmio_read_4(ctrlr, csts);
296
297 en = NVMEV(NVME_CC_REG_EN, cc);
298 rdy = NVMEV(NVME_CSTS_REG_RDY, csts);
299
300 /*
301 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
302 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
303 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
304 * isn't the desired value. Short circuit if we're already disabled.
305 */
306 if (en == 0) {
307 /* Wait for RDY == 0 or timeout & fail */
308 if (rdy == 0)
309 return (0);
310 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
311 }
312 if (rdy == 0) {
313 /* EN == 1, wait for RDY == 1 or timeout & fail */
314 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
315 if (err != 0)
316 return (err);
317 }
318
319 cc &= ~NVMEM(NVME_CC_REG_EN);
320 nvme_mmio_write_4(ctrlr, cc, cc);
321
322 /*
323 * A few drives have firmware bugs that freeze the drive if we access
324 * the mmio too soon after we disable.
325 */
326 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
327 pause("nvmeR", MSEC_2_TICKS(B4_CHK_RDY_DELAY_MS));
328 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
329 }
330
331 static int
nvme_ctrlr_enable(struct nvme_controller * ctrlr)332 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
333 {
334 uint32_t cc;
335 uint32_t csts;
336 uint32_t aqa;
337 uint32_t qsize;
338 uint8_t en, rdy;
339 int err;
340
341 cc = nvme_mmio_read_4(ctrlr, cc);
342 csts = nvme_mmio_read_4(ctrlr, csts);
343
344 en = NVMEV(NVME_CC_REG_EN, cc);
345 rdy = NVMEV(NVME_CSTS_REG_RDY, csts);
346
347 /*
348 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
349 */
350 if (en == 1) {
351 if (rdy == 1)
352 return (0);
353 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
354 }
355
356 /* EN == 0 already wait for RDY == 0 or timeout & fail */
357 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
358 if (err != 0)
359 return (err);
360
361 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
362 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
363
364 /* acqs and asqs are 0-based. */
365 qsize = ctrlr->adminq.num_entries - 1;
366
367 aqa = 0;
368 aqa |= NVMEF(NVME_AQA_REG_ACQS, qsize);
369 aqa |= NVMEF(NVME_AQA_REG_ASQS, qsize);
370 nvme_mmio_write_4(ctrlr, aqa, aqa);
371
372 /* Initialization values for CC */
373 cc = 0;
374 cc |= NVMEF(NVME_CC_REG_EN, 1);
375 cc |= NVMEF(NVME_CC_REG_CSS, 0);
376 cc |= NVMEF(NVME_CC_REG_AMS, 0);
377 cc |= NVMEF(NVME_CC_REG_SHN, 0);
378 cc |= NVMEF(NVME_CC_REG_IOSQES, 6); /* SQ entry size == 64 == 2^6 */
379 cc |= NVMEF(NVME_CC_REG_IOCQES, 4); /* CQ entry size == 16 == 2^4 */
380
381 /*
382 * Use the Memory Page Size selected during device initialization. Note
383 * that value stored in mps is suitable to use here without adjusting by
384 * NVME_MPS_SHIFT.
385 */
386 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps);
387
388 nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE);
389 nvme_mmio_write_4(ctrlr, cc, cc);
390
391 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
392 }
393
394 static void
nvme_ctrlr_disable_qpairs(struct nvme_controller * ctrlr)395 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
396 {
397 int i;
398
399 nvme_admin_qpair_disable(&ctrlr->adminq);
400 /*
401 * I/O queues are not allocated before the initial HW
402 * reset, so do not try to disable them. Use is_initialized
403 * to determine if this is the initial HW reset.
404 */
405 if (ctrlr->is_initialized) {
406 for (i = 0; i < ctrlr->num_io_queues; i++)
407 nvme_io_qpair_disable(&ctrlr->ioq[i]);
408 }
409 }
410
411 static int
nvme_ctrlr_hw_reset(struct nvme_controller * ctrlr)412 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
413 {
414 int err;
415
416 TSENTER();
417
418 nvme_ctrlr_disable_qpairs(ctrlr);
419
420 err = nvme_ctrlr_disable(ctrlr);
421 if (err != 0)
422 goto out;
423
424 err = nvme_ctrlr_enable(ctrlr);
425 out:
426
427 TSEXIT();
428 return (err);
429 }
430
431 void
nvme_ctrlr_reset(struct nvme_controller * ctrlr)432 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
433 {
434 int cmpset;
435
436 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
437
438 if (cmpset == 0 || ctrlr->is_failed)
439 /*
440 * Controller is already resetting or has failed. Return
441 * immediately since there is no need to kick off another
442 * reset in these cases.
443 */
444 return;
445
446 if (!ctrlr->is_dying)
447 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
448 }
449
450 static int
nvme_ctrlr_identify(struct nvme_controller * ctrlr)451 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
452 {
453 struct nvme_completion_poll_status status;
454
455 status.done = 0;
456 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
457 nvme_completion_poll_cb, &status);
458 nvme_completion_poll(&status);
459 if (nvme_completion_is_error(&status.cpl)) {
460 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
461 return (ENXIO);
462 }
463
464 /* Convert data to host endian */
465 nvme_controller_data_swapbytes(&ctrlr->cdata);
466
467 /*
468 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
469 * controller supports.
470 */
471 if (ctrlr->cdata.mdts > 0)
472 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
473 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT +
474 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi)));
475
476 return (0);
477 }
478
479 static int
nvme_ctrlr_set_num_qpairs(struct nvme_controller * ctrlr)480 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
481 {
482 struct nvme_completion_poll_status status;
483 int cq_allocated, sq_allocated;
484
485 status.done = 0;
486 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
487 nvme_completion_poll_cb, &status);
488 nvme_completion_poll(&status);
489 if (nvme_completion_is_error(&status.cpl)) {
490 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
491 return (ENXIO);
492 }
493
494 /*
495 * Data in cdw0 is 0-based.
496 * Lower 16-bits indicate number of submission queues allocated.
497 * Upper 16-bits indicate number of completion queues allocated.
498 */
499 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
500 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
501
502 /*
503 * Controller may allocate more queues than we requested,
504 * so use the minimum of the number requested and what was
505 * actually allocated.
506 */
507 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
508 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
509 if (ctrlr->num_io_queues > vm_ndomains)
510 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
511
512 return (0);
513 }
514
515 static int
nvme_ctrlr_create_qpairs(struct nvme_controller * ctrlr)516 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
517 {
518 struct nvme_completion_poll_status status;
519 struct nvme_qpair *qpair;
520 int i;
521
522 for (i = 0; i < ctrlr->num_io_queues; i++) {
523 qpair = &ctrlr->ioq[i];
524
525 status.done = 0;
526 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
527 nvme_completion_poll_cb, &status);
528 nvme_completion_poll(&status);
529 if (nvme_completion_is_error(&status.cpl)) {
530 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
531 return (ENXIO);
532 }
533
534 status.done = 0;
535 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair,
536 nvme_completion_poll_cb, &status);
537 nvme_completion_poll(&status);
538 if (nvme_completion_is_error(&status.cpl)) {
539 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
540 return (ENXIO);
541 }
542 }
543
544 return (0);
545 }
546
547 static int
nvme_ctrlr_delete_qpairs(struct nvme_controller * ctrlr)548 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
549 {
550 struct nvme_completion_poll_status status;
551 struct nvme_qpair *qpair;
552
553 for (int i = 0; i < ctrlr->num_io_queues; i++) {
554 qpair = &ctrlr->ioq[i];
555
556 status.done = 0;
557 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
558 nvme_completion_poll_cb, &status);
559 nvme_completion_poll(&status);
560 if (nvme_completion_is_error(&status.cpl)) {
561 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
562 return (ENXIO);
563 }
564
565 status.done = 0;
566 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
567 nvme_completion_poll_cb, &status);
568 nvme_completion_poll(&status);
569 if (nvme_completion_is_error(&status.cpl)) {
570 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
571 return (ENXIO);
572 }
573 }
574
575 return (0);
576 }
577
578 static int
nvme_ctrlr_construct_namespaces(struct nvme_controller * ctrlr)579 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
580 {
581 struct nvme_namespace *ns;
582 uint32_t i;
583
584 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
585 ns = &ctrlr->ns[i];
586 nvme_ns_construct(ns, i+1, ctrlr);
587 }
588
589 return (0);
590 }
591
592 static bool
is_log_page_id_valid(uint8_t page_id)593 is_log_page_id_valid(uint8_t page_id)
594 {
595
596 switch (page_id) {
597 case NVME_LOG_ERROR:
598 case NVME_LOG_HEALTH_INFORMATION:
599 case NVME_LOG_FIRMWARE_SLOT:
600 case NVME_LOG_CHANGED_NAMESPACE:
601 case NVME_LOG_COMMAND_EFFECT:
602 case NVME_LOG_RES_NOTIFICATION:
603 case NVME_LOG_SANITIZE_STATUS:
604 return (true);
605 }
606
607 return (false);
608 }
609
610 static uint32_t
nvme_ctrlr_get_log_page_size(struct nvme_controller * ctrlr,uint8_t page_id)611 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
612 {
613 uint32_t log_page_size;
614
615 switch (page_id) {
616 case NVME_LOG_ERROR:
617 log_page_size = min(
618 sizeof(struct nvme_error_information_entry) *
619 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
620 break;
621 case NVME_LOG_HEALTH_INFORMATION:
622 log_page_size = sizeof(struct nvme_health_information_page);
623 break;
624 case NVME_LOG_FIRMWARE_SLOT:
625 log_page_size = sizeof(struct nvme_firmware_page);
626 break;
627 case NVME_LOG_CHANGED_NAMESPACE:
628 log_page_size = sizeof(struct nvme_ns_list);
629 break;
630 case NVME_LOG_COMMAND_EFFECT:
631 log_page_size = sizeof(struct nvme_command_effects_page);
632 break;
633 case NVME_LOG_RES_NOTIFICATION:
634 log_page_size = sizeof(struct nvme_res_notification_page);
635 break;
636 case NVME_LOG_SANITIZE_STATUS:
637 log_page_size = sizeof(struct nvme_sanitize_status_page);
638 break;
639 default:
640 log_page_size = 0;
641 break;
642 }
643
644 return (log_page_size);
645 }
646
647 static void
nvme_ctrlr_log_critical_warnings(struct nvme_controller * ctrlr,uint8_t state)648 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
649 uint8_t state)
650 {
651
652 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
653 nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
654
655 if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
656 nvme_printf(ctrlr, "SMART WARNING: temperature above threshold\n");
657
658 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
659 nvme_printf(ctrlr, "SMART WARNING: device reliability degraded\n");
660
661 if (state & NVME_CRIT_WARN_ST_READ_ONLY)
662 nvme_printf(ctrlr, "SMART WARNING: media placed in read only mode\n");
663
664 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
665 nvme_printf(ctrlr, "SMART WARNING: volatile memory backup device failed\n");
666
667 if (state & NVME_CRIT_WARN_ST_PERSISTENT_MEMORY_REGION)
668 nvme_printf(ctrlr, "SMART WARNING: persistent memory read only or unreliable\n");
669
670 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
671 nvme_printf(ctrlr, "SMART WARNING: unknown critical warning(s): state = 0x%02x\n",
672 state & NVME_CRIT_WARN_ST_RESERVED_MASK);
673
674 nvme_ctrlr_devctl(ctrlr, "critical", "SMART_ERROR", "state=0x%02x", state);
675 }
676
677 static void
nvme_ctrlr_async_event_log_page_cb(void * arg,const struct nvme_completion * cpl)678 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
679 {
680 struct nvme_async_event_request *aer = arg;
681 struct nvme_health_information_page *health_info;
682 struct nvme_ns_list *nsl;
683 struct nvme_error_information_entry *err;
684 int i;
685
686 /*
687 * If the log page fetch for some reason completed with an error,
688 * don't pass log page data to the consumers. In practice, this case
689 * should never happen.
690 */
691 if (nvme_completion_is_error(cpl))
692 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
693 aer->log_page_id, NULL, 0);
694 else {
695 /* Convert data to host endian */
696 switch (aer->log_page_id) {
697 case NVME_LOG_ERROR:
698 err = (struct nvme_error_information_entry *)aer->log_page_buffer;
699 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
700 nvme_error_information_entry_swapbytes(err++);
701 break;
702 case NVME_LOG_HEALTH_INFORMATION:
703 nvme_health_information_page_swapbytes(
704 (struct nvme_health_information_page *)aer->log_page_buffer);
705 break;
706 case NVME_LOG_CHANGED_NAMESPACE:
707 nvme_ns_list_swapbytes(
708 (struct nvme_ns_list *)aer->log_page_buffer);
709 break;
710 case NVME_LOG_COMMAND_EFFECT:
711 nvme_command_effects_page_swapbytes(
712 (struct nvme_command_effects_page *)aer->log_page_buffer);
713 break;
714 case NVME_LOG_RES_NOTIFICATION:
715 nvme_res_notification_page_swapbytes(
716 (struct nvme_res_notification_page *)aer->log_page_buffer);
717 break;
718 case NVME_LOG_SANITIZE_STATUS:
719 nvme_sanitize_status_page_swapbytes(
720 (struct nvme_sanitize_status_page *)aer->log_page_buffer);
721 break;
722 default:
723 break;
724 }
725
726 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
727 health_info = (struct nvme_health_information_page *)
728 aer->log_page_buffer;
729 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
730 health_info->critical_warning);
731 /*
732 * Critical warnings reported through the
733 * SMART/health log page are persistent, so
734 * clear the associated bits in the async event
735 * config so that we do not receive repeated
736 * notifications for the same event.
737 */
738 aer->ctrlr->async_event_config &=
739 ~health_info->critical_warning;
740 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
741 aer->ctrlr->async_event_config, NULL, NULL);
742 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
743 !nvme_use_nvd) {
744 nsl = (struct nvme_ns_list *)aer->log_page_buffer;
745 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
746 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
747 break;
748 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
749 }
750 }
751
752 /*
753 * Pass the cpl data from the original async event completion,
754 * not the log page fetch.
755 */
756 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
757 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
758 }
759
760 /*
761 * Repost another asynchronous event request to replace the one
762 * that just completed.
763 */
764 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
765 }
766
767 static void
nvme_ctrlr_async_event_cb(void * arg,const struct nvme_completion * cpl)768 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
769 {
770 struct nvme_async_event_request *aer = arg;
771
772 if (nvme_completion_is_error(cpl)) {
773 /*
774 * Do not retry failed async event requests. This avoids
775 * infinite loops where a new async event request is submitted
776 * to replace the one just failed, only to fail again and
777 * perpetuate the loop.
778 */
779 return;
780 }
781
782 /* Associated log page is in bits 23:16 of completion entry dw0. */
783 aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0);
784
785 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
786 " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0),
787 NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0),
788 aer->log_page_id);
789
790 if (is_log_page_id_valid(aer->log_page_id)) {
791 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
792 aer->log_page_id);
793 memcpy(&aer->cpl, cpl, sizeof(*cpl));
794 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
795 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
796 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
797 aer);
798 /* Wait to notify consumers until after log page is fetched. */
799 } else {
800 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
801 NULL, 0);
802
803 /*
804 * Repost another asynchronous event request to replace the one
805 * that just completed.
806 */
807 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
808 }
809 }
810
811 static void
nvme_ctrlr_construct_and_submit_aer(struct nvme_controller * ctrlr,struct nvme_async_event_request * aer)812 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
813 struct nvme_async_event_request *aer)
814 {
815 struct nvme_request *req;
816
817 aer->ctrlr = ctrlr;
818 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
819 aer->req = req;
820
821 /*
822 * Disable timeout here, since asynchronous event requests should by
823 * nature never be timed out.
824 */
825 req->timeout = false;
826 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
827 nvme_ctrlr_submit_admin_request(ctrlr, req);
828 }
829
830 static void
nvme_ctrlr_configure_aer(struct nvme_controller * ctrlr)831 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
832 {
833 struct nvme_completion_poll_status status;
834 struct nvme_async_event_request *aer;
835 uint32_t i;
836
837 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
838 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
839 NVME_CRIT_WARN_ST_READ_ONLY |
840 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
841 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
842 ctrlr->async_event_config |=
843 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE |
844 NVME_ASYNC_EVENT_FW_ACTIVATE);
845
846 status.done = 0;
847 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
848 0, NULL, 0, nvme_completion_poll_cb, &status);
849 nvme_completion_poll(&status);
850 if (nvme_completion_is_error(&status.cpl) ||
851 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
852 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
853 nvme_printf(ctrlr, "temperature threshold not supported\n");
854 } else
855 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
856
857 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
858 ctrlr->async_event_config, NULL, NULL);
859
860 /* aerl is a zero-based value, so we need to add 1 here. */
861 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
862
863 for (i = 0; i < ctrlr->num_aers; i++) {
864 aer = &ctrlr->aer[i];
865 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
866 }
867 }
868
869 static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller * ctrlr)870 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
871 {
872
873 ctrlr->int_coal_time = 0;
874 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
875 &ctrlr->int_coal_time);
876
877 ctrlr->int_coal_threshold = 0;
878 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
879 &ctrlr->int_coal_threshold);
880
881 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
882 ctrlr->int_coal_threshold, NULL, NULL);
883 }
884
885 static void
nvme_ctrlr_hmb_free(struct nvme_controller * ctrlr)886 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr)
887 {
888 struct nvme_hmb_chunk *hmbc;
889 int i;
890
891 if (ctrlr->hmb_desc_paddr) {
892 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
893 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
894 ctrlr->hmb_desc_map);
895 ctrlr->hmb_desc_paddr = 0;
896 }
897 if (ctrlr->hmb_desc_tag) {
898 bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
899 ctrlr->hmb_desc_tag = NULL;
900 }
901 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
902 hmbc = &ctrlr->hmb_chunks[i];
903 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
904 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
905 hmbc->hmbc_map);
906 }
907 ctrlr->hmb_nchunks = 0;
908 if (ctrlr->hmb_tag) {
909 bus_dma_tag_destroy(ctrlr->hmb_tag);
910 ctrlr->hmb_tag = NULL;
911 }
912 if (ctrlr->hmb_chunks) {
913 free(ctrlr->hmb_chunks, M_NVME);
914 ctrlr->hmb_chunks = NULL;
915 }
916 }
917
918 static void
nvme_ctrlr_hmb_alloc(struct nvme_controller * ctrlr)919 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr)
920 {
921 struct nvme_hmb_chunk *hmbc;
922 size_t pref, min, minc, size;
923 int err, i;
924 uint64_t max;
925
926 /* Limit HMB to 5% of RAM size per device by default. */
927 max = (uint64_t)physmem * PAGE_SIZE / 20;
928 TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max);
929
930 /*
931 * Units of Host Memory Buffer in the Identify info are always in terms
932 * of 4k units.
933 */
934 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS;
935 if (max == 0 || max < min)
936 return;
937 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max);
938 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size);
939 if (min > 0 && ctrlr->cdata.hmmaxd > 0)
940 minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
941 ctrlr->hmb_chunk = pref;
942
943 again:
944 /*
945 * However, the chunk sizes, number of chunks, and alignment of chunks
946 * are all based on the current MPS (ctrlr->page_size).
947 */
948 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size);
949 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
950 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
951 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
952 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
953 ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
954 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
955 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
956 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
957 if (err != 0) {
958 nvme_printf(ctrlr, "HMB tag create failed %d\n", err);
959 nvme_ctrlr_hmb_free(ctrlr);
960 return;
961 }
962
963 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
964 hmbc = &ctrlr->hmb_chunks[i];
965 if (bus_dmamem_alloc(ctrlr->hmb_tag,
966 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT,
967 &hmbc->hmbc_map)) {
968 nvme_printf(ctrlr, "failed to alloc HMB\n");
969 break;
970 }
971 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
972 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
973 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) {
974 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
975 hmbc->hmbc_map);
976 nvme_printf(ctrlr, "failed to load HMB\n");
977 break;
978 }
979 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
980 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
981 }
982
983 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
984 ctrlr->hmb_chunk / 2 >= minc) {
985 ctrlr->hmb_nchunks = i;
986 nvme_ctrlr_hmb_free(ctrlr);
987 ctrlr->hmb_chunk /= 2;
988 goto again;
989 }
990 ctrlr->hmb_nchunks = i;
991 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
992 nvme_ctrlr_hmb_free(ctrlr);
993 return;
994 }
995
996 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
997 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
998 16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
999 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
1000 if (err != 0) {
1001 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err);
1002 nvme_ctrlr_hmb_free(ctrlr);
1003 return;
1004 }
1005 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
1006 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
1007 &ctrlr->hmb_desc_map)) {
1008 nvme_printf(ctrlr, "failed to alloc HMB desc\n");
1009 nvme_ctrlr_hmb_free(ctrlr);
1010 return;
1011 }
1012 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1013 ctrlr->hmb_desc_vaddr, size, nvme_single_map,
1014 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
1015 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
1016 ctrlr->hmb_desc_map);
1017 nvme_printf(ctrlr, "failed to load HMB desc\n");
1018 nvme_ctrlr_hmb_free(ctrlr);
1019 return;
1020 }
1021
1022 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
1023 memset(&ctrlr->hmb_desc_vaddr[i], 0,
1024 sizeof(struct nvme_hmb_desc));
1025 ctrlr->hmb_desc_vaddr[i].addr =
1026 htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
1027 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size);
1028 }
1029 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1030 BUS_DMASYNC_PREWRITE);
1031
1032 nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n",
1033 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
1034 / 1024 / 1024);
1035 }
1036
1037 static void
nvme_ctrlr_hmb_enable(struct nvme_controller * ctrlr,bool enable,bool memret)1038 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret)
1039 {
1040 struct nvme_completion_poll_status status;
1041 uint32_t cdw11;
1042
1043 cdw11 = 0;
1044 if (enable)
1045 cdw11 |= 1;
1046 if (memret)
1047 cdw11 |= 2;
1048 status.done = 0;
1049 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11,
1050 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size,
1051 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32,
1052 ctrlr->hmb_nchunks, NULL, 0,
1053 nvme_completion_poll_cb, &status);
1054 nvme_completion_poll(&status);
1055 if (nvme_completion_is_error(&status.cpl))
1056 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n");
1057 }
1058
1059 static void
nvme_ctrlr_start(void * ctrlr_arg,bool resetting)1060 nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
1061 {
1062 struct nvme_controller *ctrlr = ctrlr_arg;
1063 uint32_t old_num_io_queues;
1064 int i;
1065
1066 TSENTER();
1067
1068 /*
1069 * Only reset adminq here when we are restarting the
1070 * controller after a reset. During initialization,
1071 * we have already submitted admin commands to get
1072 * the number of I/O queues supported, so cannot reset
1073 * the adminq again here.
1074 */
1075 if (resetting) {
1076 nvme_qpair_reset(&ctrlr->adminq);
1077 nvme_admin_qpair_enable(&ctrlr->adminq);
1078 }
1079
1080 if (ctrlr->ioq != NULL) {
1081 for (i = 0; i < ctrlr->num_io_queues; i++)
1082 nvme_qpair_reset(&ctrlr->ioq[i]);
1083 }
1084
1085 /*
1086 * If it was a reset on initialization command timeout, just
1087 * return here, letting initialization code fail gracefully.
1088 */
1089 if (resetting && !ctrlr->is_initialized)
1090 return;
1091
1092 if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
1093 nvme_ctrlr_fail(ctrlr);
1094 return;
1095 }
1096
1097 /*
1098 * The number of qpairs are determined during controller initialization,
1099 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
1100 * HW limit. We call SET_FEATURES again here so that it gets called
1101 * after any reset for controllers that depend on the driver to
1102 * explicit specify how many queues it will use. This value should
1103 * never change between resets, so panic if somehow that does happen.
1104 */
1105 if (resetting) {
1106 old_num_io_queues = ctrlr->num_io_queues;
1107 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
1108 nvme_ctrlr_fail(ctrlr);
1109 return;
1110 }
1111
1112 if (old_num_io_queues != ctrlr->num_io_queues) {
1113 panic("num_io_queues changed from %u to %u",
1114 old_num_io_queues, ctrlr->num_io_queues);
1115 }
1116 }
1117
1118 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1119 nvme_ctrlr_hmb_alloc(ctrlr);
1120 if (ctrlr->hmb_nchunks > 0)
1121 nvme_ctrlr_hmb_enable(ctrlr, true, false);
1122 } else if (ctrlr->hmb_nchunks > 0)
1123 nvme_ctrlr_hmb_enable(ctrlr, true, true);
1124
1125 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
1126 nvme_ctrlr_fail(ctrlr);
1127 return;
1128 }
1129
1130 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
1131 nvme_ctrlr_fail(ctrlr);
1132 return;
1133 }
1134
1135 nvme_ctrlr_configure_aer(ctrlr);
1136 nvme_ctrlr_configure_int_coalescing(ctrlr);
1137
1138 for (i = 0; i < ctrlr->num_io_queues; i++)
1139 nvme_io_qpair_enable(&ctrlr->ioq[i]);
1140 TSEXIT();
1141 }
1142
1143 void
nvme_ctrlr_start_config_hook(void * arg)1144 nvme_ctrlr_start_config_hook(void *arg)
1145 {
1146 struct nvme_controller *ctrlr = arg;
1147
1148 TSENTER();
1149
1150 if (nvme_ctrlr_hw_reset(ctrlr) != 0) {
1151 fail:
1152 nvme_ctrlr_fail(ctrlr);
1153 config_intrhook_disestablish(&ctrlr->config_hook);
1154 return;
1155 }
1156
1157 nvme_qpair_reset(&ctrlr->adminq);
1158 nvme_admin_qpair_enable(&ctrlr->adminq);
1159
1160 if (nvme_ctrlr_identify(ctrlr) == 0 &&
1161 nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
1162 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
1163 nvme_ctrlr_start(ctrlr, false);
1164 else
1165 goto fail;
1166
1167 nvme_sysctl_initialize_ctrlr(ctrlr);
1168 config_intrhook_disestablish(&ctrlr->config_hook);
1169
1170 ctrlr->is_initialized = 1;
1171 nvme_notify_new_controller(ctrlr);
1172 TSEXIT();
1173 }
1174
1175 static void
nvme_ctrlr_reset_task(void * arg,int pending)1176 nvme_ctrlr_reset_task(void *arg, int pending)
1177 {
1178 struct nvme_controller *ctrlr = arg;
1179 int status;
1180
1181 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"start\"");
1182 status = nvme_ctrlr_hw_reset(ctrlr);
1183 if (status == 0) {
1184 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"success\"");
1185 nvme_ctrlr_start(ctrlr, true);
1186 } else {
1187 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"timed_out\"");
1188 nvme_ctrlr_fail(ctrlr);
1189 }
1190
1191 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1192 }
1193
1194 /*
1195 * Poll all the queues enabled on the device for completion.
1196 */
1197 void
nvme_ctrlr_poll(struct nvme_controller * ctrlr)1198 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
1199 {
1200 int i;
1201
1202 nvme_qpair_process_completions(&ctrlr->adminq);
1203
1204 for (i = 0; i < ctrlr->num_io_queues; i++)
1205 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1206 nvme_qpair_process_completions(&ctrlr->ioq[i]);
1207 }
1208
1209 /*
1210 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1211 * there's only a single vector. While we're polling, we mask further
1212 * interrupts in the controller.
1213 */
1214 void
nvme_ctrlr_shared_handler(void * arg)1215 nvme_ctrlr_shared_handler(void *arg)
1216 {
1217 struct nvme_controller *ctrlr = arg;
1218
1219 nvme_mmio_write_4(ctrlr, intms, 1);
1220 nvme_ctrlr_poll(ctrlr);
1221 nvme_mmio_write_4(ctrlr, intmc, 1);
1222 }
1223
1224 static void
nvme_pt_done(void * arg,const struct nvme_completion * cpl)1225 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1226 {
1227 struct nvme_pt_command *pt = arg;
1228 struct mtx *mtx = pt->driver_lock;
1229 uint16_t status;
1230
1231 bzero(&pt->cpl, sizeof(pt->cpl));
1232 pt->cpl.cdw0 = cpl->cdw0;
1233
1234 status = cpl->status;
1235 status &= ~NVMEM(NVME_STATUS_P);
1236 pt->cpl.status = status;
1237
1238 mtx_lock(mtx);
1239 pt->driver_lock = NULL;
1240 wakeup(pt);
1241 mtx_unlock(mtx);
1242 }
1243
1244 int
nvme_ctrlr_passthrough_cmd(struct nvme_controller * ctrlr,struct nvme_pt_command * pt,uint32_t nsid,int is_user_buffer,int is_admin_cmd)1245 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1246 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1247 int is_admin_cmd)
1248 {
1249 struct nvme_request *req;
1250 struct mtx *mtx;
1251 struct buf *buf = NULL;
1252 int ret = 0;
1253
1254 if (pt->len > 0) {
1255 if (pt->len > ctrlr->max_xfer_size) {
1256 nvme_printf(ctrlr, "pt->len (%d) "
1257 "exceeds max_xfer_size (%d)\n", pt->len,
1258 ctrlr->max_xfer_size);
1259 return EIO;
1260 }
1261 if (is_user_buffer) {
1262 /*
1263 * Ensure the user buffer is wired for the duration of
1264 * this pass-through command.
1265 */
1266 PHOLD(curproc);
1267 buf = uma_zalloc(pbuf_zone, M_WAITOK);
1268 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1269 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
1270 ret = EFAULT;
1271 goto err;
1272 }
1273 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1274 nvme_pt_done, pt);
1275 } else
1276 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1277 nvme_pt_done, pt);
1278 } else
1279 req = nvme_allocate_request_null(nvme_pt_done, pt);
1280
1281 /* Assume user space already converted to little-endian */
1282 req->cmd.opc = pt->cmd.opc;
1283 req->cmd.fuse = pt->cmd.fuse;
1284 req->cmd.rsvd2 = pt->cmd.rsvd2;
1285 req->cmd.rsvd3 = pt->cmd.rsvd3;
1286 req->cmd.cdw10 = pt->cmd.cdw10;
1287 req->cmd.cdw11 = pt->cmd.cdw11;
1288 req->cmd.cdw12 = pt->cmd.cdw12;
1289 req->cmd.cdw13 = pt->cmd.cdw13;
1290 req->cmd.cdw14 = pt->cmd.cdw14;
1291 req->cmd.cdw15 = pt->cmd.cdw15;
1292
1293 req->cmd.nsid = htole32(nsid);
1294
1295 mtx = mtx_pool_find(mtxpool_sleep, pt);
1296 pt->driver_lock = mtx;
1297
1298 if (is_admin_cmd)
1299 nvme_ctrlr_submit_admin_request(ctrlr, req);
1300 else
1301 nvme_ctrlr_submit_io_request(ctrlr, req);
1302
1303 mtx_lock(mtx);
1304 while (pt->driver_lock != NULL)
1305 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1306 mtx_unlock(mtx);
1307
1308 if (buf != NULL) {
1309 vunmapbuf(buf);
1310 err:
1311 uma_zfree(pbuf_zone, buf);
1312 PRELE(curproc);
1313 }
1314
1315 return (ret);
1316 }
1317
1318 static void
nvme_npc_done(void * arg,const struct nvme_completion * cpl)1319 nvme_npc_done(void *arg, const struct nvme_completion *cpl)
1320 {
1321 struct nvme_passthru_cmd *npc = arg;
1322 struct mtx *mtx = (void *)(uintptr_t)npc->metadata;
1323
1324 npc->result = cpl->cdw0; /* cpl in host order by now */
1325 mtx_lock(mtx);
1326 npc->metadata = 0;
1327 wakeup(npc);
1328 mtx_unlock(mtx);
1329 }
1330
1331 /* XXX refactor? */
1332
1333 int
nvme_ctrlr_linux_passthru_cmd(struct nvme_controller * ctrlr,struct nvme_passthru_cmd * npc,uint32_t nsid,bool is_user,bool is_admin)1334 nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
1335 struct nvme_passthru_cmd *npc, uint32_t nsid, bool is_user, bool is_admin)
1336 {
1337 struct nvme_request *req;
1338 struct mtx *mtx;
1339 struct buf *buf = NULL;
1340 int ret = 0;
1341
1342 /*
1343 * We don't support metadata.
1344 */
1345 if (npc->metadata != 0 || npc->metadata_len != 0)
1346 return (EIO);
1347
1348 if (npc->data_len > 0 && npc->addr != 0) {
1349 if (npc->data_len > ctrlr->max_xfer_size) {
1350 nvme_printf(ctrlr,
1351 "npc->data_len (%d) exceeds max_xfer_size (%d)\n",
1352 npc->data_len, ctrlr->max_xfer_size);
1353 return (EIO);
1354 }
1355 /* We only support data out or data in commands, but not both at once. */
1356 if ((npc->opcode & 0x3) == 0 || (npc->opcode & 0x3) == 3)
1357 return (EINVAL);
1358 if (is_user) {
1359 /*
1360 * Ensure the user buffer is wired for the duration of
1361 * this pass-through command.
1362 */
1363 PHOLD(curproc);
1364 buf = uma_zalloc(pbuf_zone, M_WAITOK);
1365 buf->b_iocmd = npc->opcode & 1 ? BIO_WRITE : BIO_READ;
1366 if (vmapbuf(buf, (void *)npc->addr, npc->data_len, 1) < 0) {
1367 ret = EFAULT;
1368 goto err;
1369 }
1370 req = nvme_allocate_request_vaddr(buf->b_data, npc->data_len,
1371 nvme_npc_done, npc);
1372 } else
1373 req = nvme_allocate_request_vaddr((void *)npc->addr, npc->data_len,
1374 nvme_npc_done, npc);
1375 } else
1376 req = nvme_allocate_request_null(nvme_npc_done, npc);
1377
1378 req->cmd.opc = npc->opcode;
1379 req->cmd.fuse = npc->flags;
1380 req->cmd.rsvd2 = htole16(npc->cdw2);
1381 req->cmd.rsvd3 = htole16(npc->cdw3);
1382 req->cmd.cdw10 = htole32(npc->cdw10);
1383 req->cmd.cdw11 = htole32(npc->cdw11);
1384 req->cmd.cdw12 = htole32(npc->cdw12);
1385 req->cmd.cdw13 = htole32(npc->cdw13);
1386 req->cmd.cdw14 = htole32(npc->cdw14);
1387 req->cmd.cdw15 = htole32(npc->cdw15);
1388
1389 req->cmd.nsid = htole32(nsid);
1390
1391 mtx = mtx_pool_find(mtxpool_sleep, npc);
1392 npc->metadata = (uintptr_t) mtx;
1393
1394 /* XXX no timeout passed down */
1395 if (is_admin)
1396 nvme_ctrlr_submit_admin_request(ctrlr, req);
1397 else
1398 nvme_ctrlr_submit_io_request(ctrlr, req);
1399
1400 mtx_lock(mtx);
1401 while (npc->metadata != 0)
1402 mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0);
1403 mtx_unlock(mtx);
1404
1405 if (buf != NULL) {
1406 vunmapbuf(buf);
1407 err:
1408 uma_zfree(pbuf_zone, buf);
1409 PRELE(curproc);
1410 }
1411
1412 return (ret);
1413 }
1414
1415 static int
nvme_ctrlr_ioctl(struct cdev * cdev,u_long cmd,caddr_t arg,int flag,struct thread * td)1416 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1417 struct thread *td)
1418 {
1419 struct nvme_controller *ctrlr;
1420 struct nvme_pt_command *pt;
1421
1422 ctrlr = cdev->si_drv1;
1423
1424 switch (cmd) {
1425 case NVME_IOCTL_RESET: /* Linux compat */
1426 case NVME_RESET_CONTROLLER:
1427 nvme_ctrlr_reset(ctrlr);
1428 break;
1429 case NVME_PASSTHROUGH_CMD:
1430 pt = (struct nvme_pt_command *)arg;
1431 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1432 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1433 case NVME_GET_NSID:
1434 {
1435 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1436 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1437 sizeof(gnsid->cdev));
1438 gnsid->nsid = 0;
1439 break;
1440 }
1441 case NVME_GET_MAX_XFER_SIZE:
1442 *(uint64_t *)arg = ctrlr->max_xfer_size;
1443 break;
1444 /* Linux Compatible (see nvme_linux.h) */
1445 case NVME_IOCTL_ID:
1446 td->td_retval[0] = 0xfffffffful;
1447 return (0);
1448
1449 case NVME_IOCTL_ADMIN_CMD:
1450 case NVME_IOCTL_IO_CMD: {
1451 struct nvme_passthru_cmd *npc = (struct nvme_passthru_cmd *)arg;
1452
1453 return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true,
1454 cmd == NVME_IOCTL_ADMIN_CMD));
1455 }
1456
1457 default:
1458 return (ENOTTY);
1459 }
1460
1461 return (0);
1462 }
1463
1464 static struct cdevsw nvme_ctrlr_cdevsw = {
1465 .d_version = D_VERSION,
1466 .d_flags = 0,
1467 .d_ioctl = nvme_ctrlr_ioctl
1468 };
1469
1470 int
nvme_ctrlr_construct(struct nvme_controller * ctrlr,device_t dev)1471 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1472 {
1473 struct make_dev_args md_args;
1474 uint32_t cap_lo;
1475 uint32_t cap_hi;
1476 uint32_t to, vs, pmrcap;
1477 int status, timeout_period;
1478
1479 ctrlr->dev = dev;
1480
1481 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1482 if (bus_get_domain(dev, &ctrlr->domain) != 0)
1483 ctrlr->domain = 0;
1484
1485 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1486 if (bootverbose) {
1487 device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n",
1488 cap_lo, NVME_CAP_LO_MQES(cap_lo),
1489 NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "",
1490 NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "",
1491 (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "",
1492 (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "",
1493 NVME_CAP_LO_TO(cap_lo));
1494 }
1495 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1496 if (bootverbose) {
1497 device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, "
1498 "CPS %x, MPSMIN %u, MPSMAX %u%s%s%s%s%s\n", cap_hi,
1499 NVME_CAP_HI_DSTRD(cap_hi),
1500 NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "",
1501 NVME_CAP_HI_CSS(cap_hi),
1502 NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "",
1503 NVME_CAP_HI_CPS(cap_hi),
1504 NVME_CAP_HI_MPSMIN(cap_hi),
1505 NVME_CAP_HI_MPSMAX(cap_hi),
1506 NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "",
1507 NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "",
1508 NVME_CAP_HI_NSSS(cap_hi) ? ", NSSS" : "",
1509 NVME_CAP_HI_CRWMS(cap_hi) ? ", CRWMS" : "",
1510 NVME_CAP_HI_CRIMS(cap_hi) ? ", CRIMS" : "");
1511 }
1512 if (bootverbose) {
1513 vs = nvme_mmio_read_4(ctrlr, vs);
1514 device_printf(dev, "Version: 0x%08x: %d.%d\n", vs,
1515 NVME_MAJOR(vs), NVME_MINOR(vs));
1516 }
1517 if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) {
1518 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap);
1519 device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, "
1520 "PMRWBM %x, PMRTO %u%s\n", pmrcap,
1521 NVME_PMRCAP_BIR(pmrcap),
1522 NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "",
1523 NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "",
1524 NVME_PMRCAP_PMRTU(pmrcap),
1525 NVME_PMRCAP_PMRWBM(pmrcap),
1526 NVME_PMRCAP_PMRTO(pmrcap),
1527 NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : "");
1528 }
1529
1530 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1531
1532 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi);
1533 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps);
1534
1535 /* Get ready timeout value from controller, in units of 500ms. */
1536 to = NVME_CAP_LO_TO(cap_lo) + 1;
1537 ctrlr->ready_timeout_in_ms = to * 500;
1538
1539 timeout_period = NVME_ADMIN_TIMEOUT_PERIOD;
1540 TUNABLE_INT_FETCH("hw.nvme.admin_timeout_period", &timeout_period);
1541 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1542 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1543 ctrlr->admin_timeout_period = timeout_period;
1544
1545 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1546 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1547 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1548 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1549 ctrlr->timeout_period = timeout_period;
1550
1551 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1552 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1553
1554 ctrlr->enable_aborts = 0;
1555 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1556
1557 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK);
1558
1559 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */
1560 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size));
1561 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1562 return (ENXIO);
1563
1564 /*
1565 * Create 2 threads for the taskqueue. The reset thread will block when
1566 * it detects that the controller has failed until all I/O has been
1567 * failed up the stack. The fail_req task needs to be able to run in
1568 * this case to finish the request failure for some cases.
1569 *
1570 * We could partially solve this race by draining the failed requeust
1571 * queue before proceding to free the sim, though nothing would stop
1572 * new I/O from coming in after we do that drain, but before we reach
1573 * cam_sim_free, so this big hammer is used instead.
1574 */
1575 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1576 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1577 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
1578
1579 ctrlr->is_resetting = 0;
1580 ctrlr->is_initialized = 0;
1581 ctrlr->notification_sent = 0;
1582 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1583 STAILQ_INIT(&ctrlr->fail_req);
1584 ctrlr->is_failed = false;
1585
1586 make_dev_args_init(&md_args);
1587 md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1588 md_args.mda_uid = UID_ROOT;
1589 md_args.mda_gid = GID_WHEEL;
1590 md_args.mda_mode = 0600;
1591 md_args.mda_unit = device_get_unit(dev);
1592 md_args.mda_si_drv1 = (void *)ctrlr;
1593 status = make_dev_s(&md_args, &ctrlr->cdev, "%s",
1594 device_get_nameunit(dev));
1595 if (status != 0)
1596 return (ENXIO);
1597
1598 return (0);
1599 }
1600
1601 void
nvme_ctrlr_destruct(struct nvme_controller * ctrlr,device_t dev)1602 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1603 {
1604 int gone, i;
1605
1606 ctrlr->is_dying = true;
1607
1608 if (ctrlr->resource == NULL)
1609 goto nores;
1610 if (!mtx_initialized(&ctrlr->adminq.lock))
1611 goto noadminq;
1612
1613 /*
1614 * Check whether it is a hot unplug or a clean driver detach.
1615 * If device is not there any more, skip any shutdown commands.
1616 */
1617 gone = (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
1618 if (gone)
1619 nvme_ctrlr_fail(ctrlr);
1620 else
1621 nvme_notify_fail_consumers(ctrlr);
1622
1623 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1624 nvme_ns_destruct(&ctrlr->ns[i]);
1625
1626 if (ctrlr->cdev)
1627 destroy_dev(ctrlr->cdev);
1628
1629 if (ctrlr->is_initialized) {
1630 if (!gone) {
1631 if (ctrlr->hmb_nchunks > 0)
1632 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1633 nvme_ctrlr_delete_qpairs(ctrlr);
1634 }
1635 nvme_ctrlr_hmb_free(ctrlr);
1636 }
1637 if (ctrlr->ioq != NULL) {
1638 for (i = 0; i < ctrlr->num_io_queues; i++)
1639 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1640 free(ctrlr->ioq, M_NVME);
1641 }
1642 nvme_admin_qpair_destroy(&ctrlr->adminq);
1643
1644 /*
1645 * Notify the controller of a shutdown, even though this is due to
1646 * a driver unload, not a system shutdown (this path is not invoked
1647 * during shutdown). This ensures the controller receives a
1648 * shutdown notification in case the system is shutdown before
1649 * reloading the driver.
1650 */
1651 if (!gone)
1652 nvme_ctrlr_shutdown(ctrlr);
1653
1654 if (!gone)
1655 nvme_ctrlr_disable(ctrlr);
1656
1657 noadminq:
1658 if (ctrlr->taskqueue)
1659 taskqueue_free(ctrlr->taskqueue);
1660
1661 if (ctrlr->tag)
1662 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1663
1664 if (ctrlr->res)
1665 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1666 rman_get_rid(ctrlr->res), ctrlr->res);
1667
1668 if (ctrlr->bar4_resource != NULL) {
1669 bus_release_resource(dev, SYS_RES_MEMORY,
1670 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1671 }
1672
1673 bus_release_resource(dev, SYS_RES_MEMORY,
1674 ctrlr->resource_id, ctrlr->resource);
1675
1676 nores:
1677 if (ctrlr->alignment_splits)
1678 counter_u64_free(ctrlr->alignment_splits);
1679
1680 mtx_destroy(&ctrlr->lock);
1681 }
1682
1683 void
nvme_ctrlr_shutdown(struct nvme_controller * ctrlr)1684 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1685 {
1686 uint32_t cc;
1687 uint32_t csts;
1688 int timeout;
1689
1690 cc = nvme_mmio_read_4(ctrlr, cc);
1691 cc &= ~NVMEM(NVME_CC_REG_SHN);
1692 cc |= NVMEF(NVME_CC_REG_SHN, NVME_SHN_NORMAL);
1693 nvme_mmio_write_4(ctrlr, cc, cc);
1694
1695 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1696 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000);
1697 while (1) {
1698 csts = nvme_mmio_read_4(ctrlr, csts);
1699 if (csts == NVME_GONE) /* Hot unplug. */
1700 break;
1701 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1702 break;
1703 if (timeout - ticks < 0) {
1704 nvme_printf(ctrlr, "shutdown timeout\n");
1705 break;
1706 }
1707 pause("nvmeshut", 1);
1708 }
1709 }
1710
1711 void
nvme_ctrlr_submit_admin_request(struct nvme_controller * ctrlr,struct nvme_request * req)1712 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1713 struct nvme_request *req)
1714 {
1715
1716 nvme_qpair_submit_request(&ctrlr->adminq, req);
1717 }
1718
1719 void
nvme_ctrlr_submit_io_request(struct nvme_controller * ctrlr,struct nvme_request * req)1720 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1721 struct nvme_request *req)
1722 {
1723 struct nvme_qpair *qpair;
1724
1725 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1726 nvme_qpair_submit_request(qpair, req);
1727 }
1728
1729 device_t
nvme_ctrlr_get_device(struct nvme_controller * ctrlr)1730 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1731 {
1732
1733 return (ctrlr->dev);
1734 }
1735
1736 const struct nvme_controller_data *
nvme_ctrlr_get_data(struct nvme_controller * ctrlr)1737 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1738 {
1739
1740 return (&ctrlr->cdata);
1741 }
1742
1743 int
nvme_ctrlr_suspend(struct nvme_controller * ctrlr)1744 nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
1745 {
1746 int to = hz;
1747
1748 /*
1749 * Can't touch failed controllers, so it's already suspended.
1750 */
1751 if (ctrlr->is_failed)
1752 return (0);
1753
1754 /*
1755 * We don't want the reset taskqueue running, since it does similar
1756 * things, so prevent it from running after we start. Wait for any reset
1757 * that may have been started to complete. The reset process we follow
1758 * will ensure that any new I/O will queue and be given to the hardware
1759 * after we resume (though there should be none).
1760 */
1761 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1762 pause("nvmesusp", 1);
1763 if (to <= 0) {
1764 nvme_printf(ctrlr,
1765 "Competing reset task didn't finish. Try again later.\n");
1766 return (EWOULDBLOCK);
1767 }
1768
1769 if (ctrlr->hmb_nchunks > 0)
1770 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1771
1772 /*
1773 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to
1774 * delete the hardware I/O queues, and then shutdown. This properly
1775 * flushes any metadata the drive may have stored so it can survive
1776 * having its power removed and prevents the unsafe shutdown count from
1777 * incriminating. Once we delete the qpairs, we have to disable them
1778 * before shutting down.
1779 */
1780 nvme_ctrlr_delete_qpairs(ctrlr);
1781 nvme_ctrlr_disable_qpairs(ctrlr);
1782 nvme_ctrlr_shutdown(ctrlr);
1783
1784 return (0);
1785 }
1786
1787 int
nvme_ctrlr_resume(struct nvme_controller * ctrlr)1788 nvme_ctrlr_resume(struct nvme_controller *ctrlr)
1789 {
1790
1791 /*
1792 * Can't touch failed controllers, so nothing to do to resume.
1793 */
1794 if (ctrlr->is_failed)
1795 return (0);
1796
1797 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1798 goto fail;
1799
1800 /*
1801 * Now that we've reset the hardware, we can restart the controller. Any
1802 * I/O that was pending is requeued. Any admin commands are aborted with
1803 * an error. Once we've restarted, take the controller out of reset.
1804 */
1805 nvme_ctrlr_start(ctrlr, true);
1806 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1807
1808 return (0);
1809 fail:
1810 /*
1811 * Since we can't bring the controller out of reset, announce and fail
1812 * the controller. However, we have to return success for the resume
1813 * itself, due to questionable APIs.
1814 */
1815 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1816 nvme_ctrlr_fail(ctrlr);
1817 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1818 return (0);
1819 }
1820