1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2016 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_nvme.h"
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/ioccom.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39 #include <sys/uio.h>
40 #include <sys/sbuf.h>
41 #include <sys/endian.h>
42 #include <machine/stdarg.h>
43 #include <vm/vm.h>
44
45 #include "nvme_private.h"
46
47 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
48
49 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
50 struct nvme_async_event_request *aer);
51
52 static void
nvme_ctrlr_barrier(struct nvme_controller * ctrlr,int flags)53 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags)
54 {
55 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags);
56 }
57
58 static void
nvme_ctrlr_devctl_va(struct nvme_controller * ctrlr,const char * type,const char * msg,va_list ap)59 nvme_ctrlr_devctl_va(struct nvme_controller *ctrlr, const char *type,
60 const char *msg, va_list ap)
61 {
62 struct sbuf sb;
63 int error;
64
65 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
66 return;
67 sbuf_printf(&sb, "name=\"%s\" ", device_get_nameunit(ctrlr->dev));
68 sbuf_vprintf(&sb, msg, ap);
69 error = sbuf_finish(&sb);
70 if (error == 0)
71 devctl_notify("nvme", "controller", type, sbuf_data(&sb));
72 sbuf_delete(&sb);
73 }
74
75 static void
nvme_ctrlr_devctl(struct nvme_controller * ctrlr,const char * type,const char * msg,...)76 nvme_ctrlr_devctl(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
77 {
78 va_list ap;
79
80 va_start(ap, msg);
81 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
82 va_end(ap);
83 }
84
85 static void
nvme_ctrlr_devctl_log(struct nvme_controller * ctrlr,const char * type,const char * msg,...)86 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
87 {
88 struct sbuf sb;
89 va_list ap;
90 int error;
91
92 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
93 return;
94 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev));
95 va_start(ap, msg);
96 sbuf_vprintf(&sb, msg, ap);
97 va_end(ap);
98 error = sbuf_finish(&sb);
99 if (error == 0)
100 printf("%s\n", sbuf_data(&sb));
101 sbuf_delete(&sb);
102 va_start(ap, msg);
103 nvme_ctrlr_devctl_va(ctrlr, type, msg, ap);
104 va_end(ap);
105 }
106
107 static int
nvme_ctrlr_construct_admin_qpair(struct nvme_controller * ctrlr)108 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
109 {
110 struct nvme_qpair *qpair;
111 uint32_t num_entries;
112 int error;
113
114 qpair = &ctrlr->adminq;
115 qpair->id = 0;
116 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
117 qpair->domain = ctrlr->domain;
118
119 num_entries = NVME_ADMIN_ENTRIES;
120 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
121 /*
122 * If admin_entries was overridden to an invalid value, revert it
123 * back to our default value.
124 */
125 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
126 num_entries > NVME_MAX_ADMIN_ENTRIES) {
127 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
128 "specified\n", num_entries);
129 num_entries = NVME_ADMIN_ENTRIES;
130 }
131
132 /*
133 * The admin queue's max xfer size is treated differently than the
134 * max I/O xfer size. 16KB is sufficient here - maybe even less?
135 */
136 error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS,
137 ctrlr);
138 return (error);
139 }
140
141 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
142
143 static int
nvme_ctrlr_construct_io_qpairs(struct nvme_controller * ctrlr)144 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
145 {
146 struct nvme_qpair *qpair;
147 uint32_t cap_lo;
148 uint16_t mqes;
149 int c, error, i, n;
150 int num_entries, num_trackers, max_entries;
151
152 /*
153 * NVMe spec sets a hard limit of 64K max entries, but devices may
154 * specify a smaller limit, so we need to check the MQES field in the
155 * capabilities register. We have to cap the number of entries to the
156 * current stride allows for in BAR 0/1, otherwise the remainder entries
157 * are inaccessible. MQES should reflect this, and this is just a
158 * fail-safe.
159 */
160 max_entries =
161 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
162 (1 << (ctrlr->dstrd + 1));
163 num_entries = NVME_IO_ENTRIES;
164 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
165 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
166 mqes = NVME_CAP_LO_MQES(cap_lo);
167 num_entries = min(num_entries, mqes + 1);
168 num_entries = min(num_entries, max_entries);
169
170 num_trackers = NVME_IO_TRACKERS;
171 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
172
173 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
174 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
175 /*
176 * No need to have more trackers than entries in the submit queue. Note
177 * also that for a queue size of N, we can only have (N-1) commands
178 * outstanding, hence the "-1" here.
179 */
180 num_trackers = min(num_trackers, (num_entries-1));
181
182 /*
183 * Our best estimate for the maximum number of I/Os that we should
184 * normally have in flight at one time. This should be viewed as a hint,
185 * not a hard limit and will need to be revisited when the upper layers
186 * of the storage system grows multi-queue support.
187 */
188 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
189
190 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
191 M_NVME, M_ZERO | M_WAITOK);
192
193 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
194 qpair = &ctrlr->ioq[i];
195
196 /*
197 * Admin queue has ID=0. IO queues start at ID=1 -
198 * hence the 'i+1' here.
199 */
200 qpair->id = i + 1;
201 if (ctrlr->num_io_queues > 1) {
202 /* Find number of CPUs served by this queue. */
203 for (n = 1; QP(ctrlr, c + n) == i; n++)
204 ;
205 /* Shuffle multiple NVMe devices between CPUs. */
206 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
207 qpair->domain = pcpu_find(qpair->cpu)->pc_domain;
208 } else {
209 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
210 qpair->domain = ctrlr->domain;
211 }
212
213 /*
214 * For I/O queues, use the controller-wide max_xfer_size
215 * calculated in nvme_attach().
216 */
217 error = nvme_qpair_construct(qpair, num_entries, num_trackers,
218 ctrlr);
219 if (error)
220 return (error);
221
222 /*
223 * Do not bother binding interrupts if we only have one I/O
224 * interrupt thread for this controller.
225 */
226 if (ctrlr->num_io_queues > 1)
227 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
228 }
229
230 return (0);
231 }
232
233 static void
nvme_ctrlr_fail(struct nvme_controller * ctrlr)234 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
235 {
236 int i;
237
238 /*
239 * No need to disable queues before failing them. Failing is a superet
240 * of disabling (though pedantically we'd abort the AERs silently with
241 * a different error, though when we fail, that hardly matters).
242 */
243 ctrlr->is_failed = true;
244 nvme_qpair_fail(&ctrlr->adminq);
245 if (ctrlr->ioq != NULL) {
246 for (i = 0; i < ctrlr->num_io_queues; i++) {
247 nvme_qpair_fail(&ctrlr->ioq[i]);
248 }
249 }
250 nvme_notify_fail_consumers(ctrlr);
251 }
252
253 /*
254 * Wait for RDY to change.
255 *
256 * Starts sleeping for 1us and geometrically increases it the longer we wait,
257 * capped at 1ms.
258 */
259 static int
nvme_ctrlr_wait_for_ready(struct nvme_controller * ctrlr,int desired_val)260 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
261 {
262 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms);
263 sbintime_t delta_t = SBT_1US;
264 uint32_t csts;
265
266 while (1) {
267 csts = nvme_mmio_read_4(ctrlr, csts);
268 if (csts == NVME_GONE) /* Hot unplug. */
269 return (ENXIO);
270 if (NVMEV(NVME_CSTS_REG_RDY, csts) == desired_val)
271 break;
272 if (timeout - ticks < 0) {
273 nvme_printf(ctrlr, "controller ready did not become %d "
274 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
275 return (ENXIO);
276 }
277
278 pause_sbt("nvmerdy", delta_t, 0, C_PREL(1));
279 delta_t = min(SBT_1MS, delta_t * 3 / 2);
280 }
281
282 return (0);
283 }
284
285 static int
nvme_ctrlr_disable(struct nvme_controller * ctrlr)286 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
287 {
288 uint32_t cc;
289 uint32_t csts;
290 uint8_t en, rdy;
291 int err;
292
293 cc = nvme_mmio_read_4(ctrlr, cc);
294 csts = nvme_mmio_read_4(ctrlr, csts);
295
296 en = NVMEV(NVME_CC_REG_EN, cc);
297 rdy = NVMEV(NVME_CSTS_REG_RDY, csts);
298
299 /*
300 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
301 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
302 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
303 * isn't the desired value. Short circuit if we're already disabled.
304 */
305 if (en == 0) {
306 /* Wait for RDY == 0 or timeout & fail */
307 if (rdy == 0)
308 return (0);
309 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
310 }
311 if (rdy == 0) {
312 /* EN == 1, wait for RDY == 1 or timeout & fail */
313 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
314 if (err != 0)
315 return (err);
316 }
317
318 cc &= ~NVMEM(NVME_CC_REG_EN);
319 nvme_mmio_write_4(ctrlr, cc, cc);
320
321 /*
322 * A few drives have firmware bugs that freeze the drive if we access
323 * the mmio too soon after we disable.
324 */
325 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
326 pause("nvmeR", MSEC_2_TICKS(B4_CHK_RDY_DELAY_MS));
327 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
328 }
329
330 static int
nvme_ctrlr_enable(struct nvme_controller * ctrlr)331 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
332 {
333 uint32_t cc;
334 uint32_t csts;
335 uint32_t aqa;
336 uint32_t qsize;
337 uint8_t en, rdy;
338 int err;
339
340 cc = nvme_mmio_read_4(ctrlr, cc);
341 csts = nvme_mmio_read_4(ctrlr, csts);
342
343 en = NVMEV(NVME_CC_REG_EN, cc);
344 rdy = NVMEV(NVME_CSTS_REG_RDY, csts);
345
346 /*
347 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
348 */
349 if (en == 1) {
350 if (rdy == 1)
351 return (0);
352 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
353 }
354
355 /* EN == 0 already wait for RDY == 0 or timeout & fail */
356 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
357 if (err != 0)
358 return (err);
359
360 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
361 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
362
363 /* acqs and asqs are 0-based. */
364 qsize = ctrlr->adminq.num_entries - 1;
365
366 aqa = 0;
367 aqa |= NVMEF(NVME_AQA_REG_ACQS, qsize);
368 aqa |= NVMEF(NVME_AQA_REG_ASQS, qsize);
369 nvme_mmio_write_4(ctrlr, aqa, aqa);
370
371 /* Initialization values for CC */
372 cc = 0;
373 cc |= NVMEF(NVME_CC_REG_EN, 1);
374 cc |= NVMEF(NVME_CC_REG_CSS, 0);
375 cc |= NVMEF(NVME_CC_REG_AMS, 0);
376 cc |= NVMEF(NVME_CC_REG_SHN, 0);
377 cc |= NVMEF(NVME_CC_REG_IOSQES, 6); /* SQ entry size == 64 == 2^6 */
378 cc |= NVMEF(NVME_CC_REG_IOCQES, 4); /* CQ entry size == 16 == 2^4 */
379
380 /*
381 * Use the Memory Page Size selected during device initialization. Note
382 * that value stored in mps is suitable to use here without adjusting by
383 * NVME_MPS_SHIFT.
384 */
385 cc |= NVMEF(NVME_CC_REG_MPS, ctrlr->mps);
386
387 nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE);
388 nvme_mmio_write_4(ctrlr, cc, cc);
389
390 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
391 }
392
393 static void
nvme_ctrlr_disable_qpairs(struct nvme_controller * ctrlr)394 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
395 {
396 int i;
397
398 nvme_admin_qpair_disable(&ctrlr->adminq);
399 /*
400 * I/O queues are not allocated before the initial HW
401 * reset, so do not try to disable them. Use is_initialized
402 * to determine if this is the initial HW reset.
403 */
404 if (ctrlr->is_initialized) {
405 for (i = 0; i < ctrlr->num_io_queues; i++)
406 nvme_io_qpair_disable(&ctrlr->ioq[i]);
407 }
408 }
409
410 static int
nvme_ctrlr_hw_reset(struct nvme_controller * ctrlr)411 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
412 {
413 int err;
414
415 TSENTER();
416
417 nvme_ctrlr_disable_qpairs(ctrlr);
418
419 err = nvme_ctrlr_disable(ctrlr);
420 if (err != 0)
421 goto out;
422
423 err = nvme_ctrlr_enable(ctrlr);
424 out:
425
426 TSEXIT();
427 return (err);
428 }
429
430 void
nvme_ctrlr_reset(struct nvme_controller * ctrlr)431 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
432 {
433 int cmpset;
434
435 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
436
437 if (cmpset == 0 || ctrlr->is_failed)
438 /*
439 * Controller is already resetting or has failed. Return
440 * immediately since there is no need to kick off another
441 * reset in these cases.
442 */
443 return;
444
445 if (!ctrlr->is_dying)
446 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
447 }
448
449 static int
nvme_ctrlr_identify(struct nvme_controller * ctrlr)450 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
451 {
452 struct nvme_completion_poll_status status;
453
454 status.done = 0;
455 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
456 nvme_completion_poll_cb, &status);
457 nvme_completion_poll(&status);
458 if (nvme_completion_is_error(&status.cpl)) {
459 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
460 return (ENXIO);
461 }
462
463 /* Convert data to host endian */
464 nvme_controller_data_swapbytes(&ctrlr->cdata);
465
466 /*
467 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
468 * controller supports.
469 */
470 if (ctrlr->cdata.mdts > 0)
471 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
472 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT +
473 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi)));
474
475 return (0);
476 }
477
478 static int
nvme_ctrlr_set_num_qpairs(struct nvme_controller * ctrlr)479 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
480 {
481 struct nvme_completion_poll_status status;
482 int cq_allocated, sq_allocated;
483
484 status.done = 0;
485 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
486 nvme_completion_poll_cb, &status);
487 nvme_completion_poll(&status);
488 if (nvme_completion_is_error(&status.cpl)) {
489 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
490 return (ENXIO);
491 }
492
493 /*
494 * Data in cdw0 is 0-based.
495 * Lower 16-bits indicate number of submission queues allocated.
496 * Upper 16-bits indicate number of completion queues allocated.
497 */
498 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
499 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
500
501 /*
502 * Controller may allocate more queues than we requested,
503 * so use the minimum of the number requested and what was
504 * actually allocated.
505 */
506 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
507 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
508 if (ctrlr->num_io_queues > vm_ndomains)
509 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
510
511 return (0);
512 }
513
514 static int
nvme_ctrlr_create_qpairs(struct nvme_controller * ctrlr)515 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
516 {
517 struct nvme_completion_poll_status status;
518 struct nvme_qpair *qpair;
519 int i;
520
521 for (i = 0; i < ctrlr->num_io_queues; i++) {
522 qpair = &ctrlr->ioq[i];
523
524 status.done = 0;
525 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
526 nvme_completion_poll_cb, &status);
527 nvme_completion_poll(&status);
528 if (nvme_completion_is_error(&status.cpl)) {
529 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
530 return (ENXIO);
531 }
532
533 status.done = 0;
534 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair,
535 nvme_completion_poll_cb, &status);
536 nvme_completion_poll(&status);
537 if (nvme_completion_is_error(&status.cpl)) {
538 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
539 return (ENXIO);
540 }
541 }
542
543 return (0);
544 }
545
546 static int
nvme_ctrlr_delete_qpairs(struct nvme_controller * ctrlr)547 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
548 {
549 struct nvme_completion_poll_status status;
550 struct nvme_qpair *qpair;
551
552 for (int i = 0; i < ctrlr->num_io_queues; i++) {
553 qpair = &ctrlr->ioq[i];
554
555 status.done = 0;
556 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
557 nvme_completion_poll_cb, &status);
558 nvme_completion_poll(&status);
559 if (nvme_completion_is_error(&status.cpl)) {
560 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
561 return (ENXIO);
562 }
563
564 status.done = 0;
565 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
566 nvme_completion_poll_cb, &status);
567 nvme_completion_poll(&status);
568 if (nvme_completion_is_error(&status.cpl)) {
569 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
570 return (ENXIO);
571 }
572 }
573
574 return (0);
575 }
576
577 static int
nvme_ctrlr_construct_namespaces(struct nvme_controller * ctrlr)578 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
579 {
580 struct nvme_namespace *ns;
581 uint32_t i;
582
583 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
584 ns = &ctrlr->ns[i];
585 nvme_ns_construct(ns, i+1, ctrlr);
586 }
587
588 return (0);
589 }
590
591 static bool
is_log_page_id_valid(uint8_t page_id)592 is_log_page_id_valid(uint8_t page_id)
593 {
594
595 switch (page_id) {
596 case NVME_LOG_ERROR:
597 case NVME_LOG_HEALTH_INFORMATION:
598 case NVME_LOG_FIRMWARE_SLOT:
599 case NVME_LOG_CHANGED_NAMESPACE:
600 case NVME_LOG_COMMAND_EFFECT:
601 case NVME_LOG_RES_NOTIFICATION:
602 case NVME_LOG_SANITIZE_STATUS:
603 return (true);
604 }
605
606 return (false);
607 }
608
609 static uint32_t
nvme_ctrlr_get_log_page_size(struct nvme_controller * ctrlr,uint8_t page_id)610 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
611 {
612 uint32_t log_page_size;
613
614 switch (page_id) {
615 case NVME_LOG_ERROR:
616 log_page_size = min(
617 sizeof(struct nvme_error_information_entry) *
618 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
619 break;
620 case NVME_LOG_HEALTH_INFORMATION:
621 log_page_size = sizeof(struct nvme_health_information_page);
622 break;
623 case NVME_LOG_FIRMWARE_SLOT:
624 log_page_size = sizeof(struct nvme_firmware_page);
625 break;
626 case NVME_LOG_CHANGED_NAMESPACE:
627 log_page_size = sizeof(struct nvme_ns_list);
628 break;
629 case NVME_LOG_COMMAND_EFFECT:
630 log_page_size = sizeof(struct nvme_command_effects_page);
631 break;
632 case NVME_LOG_RES_NOTIFICATION:
633 log_page_size = sizeof(struct nvme_res_notification_page);
634 break;
635 case NVME_LOG_SANITIZE_STATUS:
636 log_page_size = sizeof(struct nvme_sanitize_status_page);
637 break;
638 default:
639 log_page_size = 0;
640 break;
641 }
642
643 return (log_page_size);
644 }
645
646 static void
nvme_ctrlr_log_critical_warnings(struct nvme_controller * ctrlr,uint8_t state)647 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
648 uint8_t state)
649 {
650
651 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
652 nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
653
654 if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
655 nvme_printf(ctrlr, "SMART WARNING: temperature above threshold\n");
656
657 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
658 nvme_printf(ctrlr, "SMART WARNING: device reliability degraded\n");
659
660 if (state & NVME_CRIT_WARN_ST_READ_ONLY)
661 nvme_printf(ctrlr, "SMART WARNING: media placed in read only mode\n");
662
663 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
664 nvme_printf(ctrlr, "SMART WARNING: volatile memory backup device failed\n");
665
666 if (state & NVME_CRIT_WARN_ST_PERSISTENT_MEMORY_REGION)
667 nvme_printf(ctrlr, "SMART WARNING: persistent memory read only or unreliable\n");
668
669 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
670 nvme_printf(ctrlr, "SMART WARNING: unknown critical warning(s): state = 0x%02x\n",
671 state & NVME_CRIT_WARN_ST_RESERVED_MASK);
672
673 nvme_ctrlr_devctl(ctrlr, "critical", "SMART_ERROR", "state=0x%02x", state);
674 }
675
676 static void
nvme_ctrlr_async_event_log_page_cb(void * arg,const struct nvme_completion * cpl)677 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
678 {
679 struct nvme_async_event_request *aer = arg;
680 struct nvme_health_information_page *health_info;
681 struct nvme_ns_list *nsl;
682 struct nvme_error_information_entry *err;
683 int i;
684
685 /*
686 * If the log page fetch for some reason completed with an error,
687 * don't pass log page data to the consumers. In practice, this case
688 * should never happen.
689 */
690 if (nvme_completion_is_error(cpl))
691 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
692 aer->log_page_id, NULL, 0);
693 else {
694 /* Convert data to host endian */
695 switch (aer->log_page_id) {
696 case NVME_LOG_ERROR:
697 err = (struct nvme_error_information_entry *)aer->log_page_buffer;
698 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
699 nvme_error_information_entry_swapbytes(err++);
700 break;
701 case NVME_LOG_HEALTH_INFORMATION:
702 nvme_health_information_page_swapbytes(
703 (struct nvme_health_information_page *)aer->log_page_buffer);
704 break;
705 case NVME_LOG_CHANGED_NAMESPACE:
706 nvme_ns_list_swapbytes(
707 (struct nvme_ns_list *)aer->log_page_buffer);
708 break;
709 case NVME_LOG_COMMAND_EFFECT:
710 nvme_command_effects_page_swapbytes(
711 (struct nvme_command_effects_page *)aer->log_page_buffer);
712 break;
713 case NVME_LOG_RES_NOTIFICATION:
714 nvme_res_notification_page_swapbytes(
715 (struct nvme_res_notification_page *)aer->log_page_buffer);
716 break;
717 case NVME_LOG_SANITIZE_STATUS:
718 nvme_sanitize_status_page_swapbytes(
719 (struct nvme_sanitize_status_page *)aer->log_page_buffer);
720 break;
721 default:
722 break;
723 }
724
725 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
726 health_info = (struct nvme_health_information_page *)
727 aer->log_page_buffer;
728 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
729 health_info->critical_warning);
730 /*
731 * Critical warnings reported through the
732 * SMART/health log page are persistent, so
733 * clear the associated bits in the async event
734 * config so that we do not receive repeated
735 * notifications for the same event.
736 */
737 aer->ctrlr->async_event_config &=
738 ~health_info->critical_warning;
739 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
740 aer->ctrlr->async_event_config, NULL, NULL);
741 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
742 !nvme_use_nvd) {
743 nsl = (struct nvme_ns_list *)aer->log_page_buffer;
744 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
745 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
746 break;
747 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
748 }
749 }
750
751 /*
752 * Pass the cpl data from the original async event completion,
753 * not the log page fetch.
754 */
755 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
756 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
757 }
758
759 /*
760 * Repost another asynchronous event request to replace the one
761 * that just completed.
762 */
763 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
764 }
765
766 static void
nvme_ctrlr_async_event_cb(void * arg,const struct nvme_completion * cpl)767 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
768 {
769 struct nvme_async_event_request *aer = arg;
770
771 if (nvme_completion_is_error(cpl)) {
772 /*
773 * Do not retry failed async event requests. This avoids
774 * infinite loops where a new async event request is submitted
775 * to replace the one just failed, only to fail again and
776 * perpetuate the loop.
777 */
778 return;
779 }
780
781 /* Associated log page is in bits 23:16 of completion entry dw0. */
782 aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0);
783
784 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
785 " page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0),
786 NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0),
787 aer->log_page_id);
788
789 if (is_log_page_id_valid(aer->log_page_id)) {
790 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
791 aer->log_page_id);
792 memcpy(&aer->cpl, cpl, sizeof(*cpl));
793 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
794 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
795 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
796 aer);
797 /* Wait to notify consumers until after log page is fetched. */
798 } else {
799 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
800 NULL, 0);
801
802 /*
803 * Repost another asynchronous event request to replace the one
804 * that just completed.
805 */
806 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
807 }
808 }
809
810 static void
nvme_ctrlr_construct_and_submit_aer(struct nvme_controller * ctrlr,struct nvme_async_event_request * aer)811 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
812 struct nvme_async_event_request *aer)
813 {
814 struct nvme_request *req;
815
816 aer->ctrlr = ctrlr;
817 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
818 aer->req = req;
819
820 /*
821 * Disable timeout here, since asynchronous event requests should by
822 * nature never be timed out.
823 */
824 req->timeout = false;
825 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
826 nvme_ctrlr_submit_admin_request(ctrlr, req);
827 }
828
829 static void
nvme_ctrlr_configure_aer(struct nvme_controller * ctrlr)830 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
831 {
832 struct nvme_completion_poll_status status;
833 struct nvme_async_event_request *aer;
834 uint32_t i;
835
836 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
837 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
838 NVME_CRIT_WARN_ST_READ_ONLY |
839 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
840 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
841 ctrlr->async_event_config |=
842 ctrlr->cdata.oaes & (NVME_ASYNC_EVENT_NS_ATTRIBUTE |
843 NVME_ASYNC_EVENT_FW_ACTIVATE);
844
845 status.done = 0;
846 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
847 0, NULL, 0, nvme_completion_poll_cb, &status);
848 nvme_completion_poll(&status);
849 if (nvme_completion_is_error(&status.cpl) ||
850 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
851 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
852 nvme_printf(ctrlr, "temperature threshold not supported\n");
853 } else
854 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
855
856 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
857 ctrlr->async_event_config, NULL, NULL);
858
859 /* aerl is a zero-based value, so we need to add 1 here. */
860 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
861
862 for (i = 0; i < ctrlr->num_aers; i++) {
863 aer = &ctrlr->aer[i];
864 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
865 }
866 }
867
868 static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller * ctrlr)869 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
870 {
871
872 ctrlr->int_coal_time = 0;
873 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
874 &ctrlr->int_coal_time);
875
876 ctrlr->int_coal_threshold = 0;
877 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
878 &ctrlr->int_coal_threshold);
879
880 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
881 ctrlr->int_coal_threshold, NULL, NULL);
882 }
883
884 static void
nvme_ctrlr_hmb_free(struct nvme_controller * ctrlr)885 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr)
886 {
887 struct nvme_hmb_chunk *hmbc;
888 int i;
889
890 if (ctrlr->hmb_desc_paddr) {
891 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
892 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
893 ctrlr->hmb_desc_map);
894 ctrlr->hmb_desc_paddr = 0;
895 }
896 if (ctrlr->hmb_desc_tag) {
897 bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
898 ctrlr->hmb_desc_tag = NULL;
899 }
900 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
901 hmbc = &ctrlr->hmb_chunks[i];
902 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
903 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
904 hmbc->hmbc_map);
905 }
906 ctrlr->hmb_nchunks = 0;
907 if (ctrlr->hmb_tag) {
908 bus_dma_tag_destroy(ctrlr->hmb_tag);
909 ctrlr->hmb_tag = NULL;
910 }
911 if (ctrlr->hmb_chunks) {
912 free(ctrlr->hmb_chunks, M_NVME);
913 ctrlr->hmb_chunks = NULL;
914 }
915 }
916
917 static void
nvme_ctrlr_hmb_alloc(struct nvme_controller * ctrlr)918 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr)
919 {
920 struct nvme_hmb_chunk *hmbc;
921 size_t pref, min, minc, size;
922 int err, i;
923 uint64_t max;
924
925 /* Limit HMB to 5% of RAM size per device by default. */
926 max = (uint64_t)physmem * PAGE_SIZE / 20;
927 TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max);
928
929 /*
930 * Units of Host Memory Buffer in the Identify info are always in terms
931 * of 4k units.
932 */
933 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS;
934 if (max == 0 || max < min)
935 return;
936 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max);
937 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size);
938 if (min > 0 && ctrlr->cdata.hmmaxd > 0)
939 minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
940 ctrlr->hmb_chunk = pref;
941
942 again:
943 /*
944 * However, the chunk sizes, number of chunks, and alignment of chunks
945 * are all based on the current MPS (ctrlr->page_size).
946 */
947 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size);
948 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
949 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
950 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
951 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
952 ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
953 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
954 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
955 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
956 if (err != 0) {
957 nvme_printf(ctrlr, "HMB tag create failed %d\n", err);
958 nvme_ctrlr_hmb_free(ctrlr);
959 return;
960 }
961
962 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
963 hmbc = &ctrlr->hmb_chunks[i];
964 if (bus_dmamem_alloc(ctrlr->hmb_tag,
965 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT,
966 &hmbc->hmbc_map)) {
967 nvme_printf(ctrlr, "failed to alloc HMB\n");
968 break;
969 }
970 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
971 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
972 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) {
973 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
974 hmbc->hmbc_map);
975 nvme_printf(ctrlr, "failed to load HMB\n");
976 break;
977 }
978 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
979 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
980 }
981
982 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
983 ctrlr->hmb_chunk / 2 >= minc) {
984 ctrlr->hmb_nchunks = i;
985 nvme_ctrlr_hmb_free(ctrlr);
986 ctrlr->hmb_chunk /= 2;
987 goto again;
988 }
989 ctrlr->hmb_nchunks = i;
990 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
991 nvme_ctrlr_hmb_free(ctrlr);
992 return;
993 }
994
995 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
996 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
997 16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
998 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
999 if (err != 0) {
1000 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err);
1001 nvme_ctrlr_hmb_free(ctrlr);
1002 return;
1003 }
1004 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
1005 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
1006 &ctrlr->hmb_desc_map)) {
1007 nvme_printf(ctrlr, "failed to alloc HMB desc\n");
1008 nvme_ctrlr_hmb_free(ctrlr);
1009 return;
1010 }
1011 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1012 ctrlr->hmb_desc_vaddr, size, nvme_single_map,
1013 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
1014 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
1015 ctrlr->hmb_desc_map);
1016 nvme_printf(ctrlr, "failed to load HMB desc\n");
1017 nvme_ctrlr_hmb_free(ctrlr);
1018 return;
1019 }
1020
1021 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
1022 memset(&ctrlr->hmb_desc_vaddr[i], 0,
1023 sizeof(struct nvme_hmb_desc));
1024 ctrlr->hmb_desc_vaddr[i].addr =
1025 htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
1026 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size);
1027 }
1028 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1029 BUS_DMASYNC_PREWRITE);
1030
1031 nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n",
1032 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
1033 / 1024 / 1024);
1034 }
1035
1036 static void
nvme_ctrlr_hmb_enable(struct nvme_controller * ctrlr,bool enable,bool memret)1037 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret)
1038 {
1039 struct nvme_completion_poll_status status;
1040 uint32_t cdw11;
1041
1042 cdw11 = 0;
1043 if (enable)
1044 cdw11 |= 1;
1045 if (memret)
1046 cdw11 |= 2;
1047 status.done = 0;
1048 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11,
1049 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size,
1050 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32,
1051 ctrlr->hmb_nchunks, NULL, 0,
1052 nvme_completion_poll_cb, &status);
1053 nvme_completion_poll(&status);
1054 if (nvme_completion_is_error(&status.cpl))
1055 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n");
1056 }
1057
1058 static void
nvme_ctrlr_start(void * ctrlr_arg,bool resetting)1059 nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
1060 {
1061 struct nvme_controller *ctrlr = ctrlr_arg;
1062 uint32_t old_num_io_queues;
1063 int i;
1064
1065 TSENTER();
1066
1067 /*
1068 * Only reset adminq here when we are restarting the
1069 * controller after a reset. During initialization,
1070 * we have already submitted admin commands to get
1071 * the number of I/O queues supported, so cannot reset
1072 * the adminq again here.
1073 */
1074 if (resetting) {
1075 nvme_qpair_reset(&ctrlr->adminq);
1076 nvme_admin_qpair_enable(&ctrlr->adminq);
1077 }
1078
1079 if (ctrlr->ioq != NULL) {
1080 for (i = 0; i < ctrlr->num_io_queues; i++)
1081 nvme_qpair_reset(&ctrlr->ioq[i]);
1082 }
1083
1084 /*
1085 * If it was a reset on initialization command timeout, just
1086 * return here, letting initialization code fail gracefully.
1087 */
1088 if (resetting && !ctrlr->is_initialized)
1089 return;
1090
1091 if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
1092 nvme_ctrlr_fail(ctrlr);
1093 return;
1094 }
1095
1096 /*
1097 * The number of qpairs are determined during controller initialization,
1098 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
1099 * HW limit. We call SET_FEATURES again here so that it gets called
1100 * after any reset for controllers that depend on the driver to
1101 * explicit specify how many queues it will use. This value should
1102 * never change between resets, so panic if somehow that does happen.
1103 */
1104 if (resetting) {
1105 old_num_io_queues = ctrlr->num_io_queues;
1106 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
1107 nvme_ctrlr_fail(ctrlr);
1108 return;
1109 }
1110
1111 if (old_num_io_queues != ctrlr->num_io_queues) {
1112 panic("num_io_queues changed from %u to %u",
1113 old_num_io_queues, ctrlr->num_io_queues);
1114 }
1115 }
1116
1117 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1118 nvme_ctrlr_hmb_alloc(ctrlr);
1119 if (ctrlr->hmb_nchunks > 0)
1120 nvme_ctrlr_hmb_enable(ctrlr, true, false);
1121 } else if (ctrlr->hmb_nchunks > 0)
1122 nvme_ctrlr_hmb_enable(ctrlr, true, true);
1123
1124 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
1125 nvme_ctrlr_fail(ctrlr);
1126 return;
1127 }
1128
1129 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
1130 nvme_ctrlr_fail(ctrlr);
1131 return;
1132 }
1133
1134 nvme_ctrlr_configure_aer(ctrlr);
1135 nvme_ctrlr_configure_int_coalescing(ctrlr);
1136
1137 for (i = 0; i < ctrlr->num_io_queues; i++)
1138 nvme_io_qpair_enable(&ctrlr->ioq[i]);
1139 TSEXIT();
1140 }
1141
1142 void
nvme_ctrlr_start_config_hook(void * arg)1143 nvme_ctrlr_start_config_hook(void *arg)
1144 {
1145 struct nvme_controller *ctrlr = arg;
1146
1147 TSENTER();
1148
1149 if (nvme_ctrlr_hw_reset(ctrlr) != 0) {
1150 fail:
1151 nvme_ctrlr_fail(ctrlr);
1152 config_intrhook_disestablish(&ctrlr->config_hook);
1153 return;
1154 }
1155
1156 nvme_qpair_reset(&ctrlr->adminq);
1157 nvme_admin_qpair_enable(&ctrlr->adminq);
1158
1159 if (nvme_ctrlr_identify(ctrlr) == 0 &&
1160 nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
1161 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
1162 nvme_ctrlr_start(ctrlr, false);
1163 else
1164 goto fail;
1165
1166 nvme_sysctl_initialize_ctrlr(ctrlr);
1167 config_intrhook_disestablish(&ctrlr->config_hook);
1168
1169 ctrlr->is_initialized = 1;
1170 nvme_notify_new_controller(ctrlr);
1171 TSEXIT();
1172 }
1173
1174 static void
nvme_ctrlr_reset_task(void * arg,int pending)1175 nvme_ctrlr_reset_task(void *arg, int pending)
1176 {
1177 struct nvme_controller *ctrlr = arg;
1178 int status;
1179
1180 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"start\"");
1181 status = nvme_ctrlr_hw_reset(ctrlr);
1182 if (status == 0) {
1183 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"success\"");
1184 nvme_ctrlr_start(ctrlr, true);
1185 } else {
1186 nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"timed_out\"");
1187 nvme_ctrlr_fail(ctrlr);
1188 }
1189
1190 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1191 }
1192
1193 /*
1194 * Poll all the queues enabled on the device for completion.
1195 */
1196 void
nvme_ctrlr_poll(struct nvme_controller * ctrlr)1197 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
1198 {
1199 int i;
1200
1201 nvme_qpair_process_completions(&ctrlr->adminq);
1202
1203 for (i = 0; i < ctrlr->num_io_queues; i++)
1204 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1205 nvme_qpair_process_completions(&ctrlr->ioq[i]);
1206 }
1207
1208 /*
1209 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1210 * there's only a single vector. While we're polling, we mask further
1211 * interrupts in the controller.
1212 */
1213 void
nvme_ctrlr_shared_handler(void * arg)1214 nvme_ctrlr_shared_handler(void *arg)
1215 {
1216 struct nvme_controller *ctrlr = arg;
1217
1218 nvme_mmio_write_4(ctrlr, intms, 1);
1219 nvme_ctrlr_poll(ctrlr);
1220 nvme_mmio_write_4(ctrlr, intmc, 1);
1221 }
1222
1223 static void
nvme_pt_done(void * arg,const struct nvme_completion * cpl)1224 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1225 {
1226 struct nvme_pt_command *pt = arg;
1227 struct mtx *mtx = pt->driver_lock;
1228 uint16_t status;
1229
1230 bzero(&pt->cpl, sizeof(pt->cpl));
1231 pt->cpl.cdw0 = cpl->cdw0;
1232
1233 status = cpl->status;
1234 status &= ~NVMEM(NVME_STATUS_P);
1235 pt->cpl.status = status;
1236
1237 mtx_lock(mtx);
1238 pt->driver_lock = NULL;
1239 wakeup(pt);
1240 mtx_unlock(mtx);
1241 }
1242
1243 int
nvme_ctrlr_passthrough_cmd(struct nvme_controller * ctrlr,struct nvme_pt_command * pt,uint32_t nsid,int is_user_buffer,int is_admin_cmd)1244 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1245 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1246 int is_admin_cmd)
1247 {
1248 struct nvme_request *req;
1249 struct mtx *mtx;
1250 struct buf *buf = NULL;
1251 int ret = 0;
1252
1253 if (pt->len > 0) {
1254 if (pt->len > ctrlr->max_xfer_size) {
1255 nvme_printf(ctrlr, "pt->len (%d) "
1256 "exceeds max_xfer_size (%d)\n", pt->len,
1257 ctrlr->max_xfer_size);
1258 return EIO;
1259 }
1260 if (is_user_buffer) {
1261 /*
1262 * Ensure the user buffer is wired for the duration of
1263 * this pass-through command.
1264 */
1265 PHOLD(curproc);
1266 buf = uma_zalloc(pbuf_zone, M_WAITOK);
1267 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1268 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
1269 ret = EFAULT;
1270 goto err;
1271 }
1272 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1273 nvme_pt_done, pt);
1274 } else
1275 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1276 nvme_pt_done, pt);
1277 } else
1278 req = nvme_allocate_request_null(nvme_pt_done, pt);
1279
1280 /* Assume user space already converted to little-endian */
1281 req->cmd.opc = pt->cmd.opc;
1282 req->cmd.fuse = pt->cmd.fuse;
1283 req->cmd.rsvd2 = pt->cmd.rsvd2;
1284 req->cmd.rsvd3 = pt->cmd.rsvd3;
1285 req->cmd.cdw10 = pt->cmd.cdw10;
1286 req->cmd.cdw11 = pt->cmd.cdw11;
1287 req->cmd.cdw12 = pt->cmd.cdw12;
1288 req->cmd.cdw13 = pt->cmd.cdw13;
1289 req->cmd.cdw14 = pt->cmd.cdw14;
1290 req->cmd.cdw15 = pt->cmd.cdw15;
1291
1292 req->cmd.nsid = htole32(nsid);
1293
1294 mtx = mtx_pool_find(mtxpool_sleep, pt);
1295 pt->driver_lock = mtx;
1296
1297 if (is_admin_cmd)
1298 nvme_ctrlr_submit_admin_request(ctrlr, req);
1299 else
1300 nvme_ctrlr_submit_io_request(ctrlr, req);
1301
1302 mtx_lock(mtx);
1303 while (pt->driver_lock != NULL)
1304 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1305 mtx_unlock(mtx);
1306
1307 if (buf != NULL) {
1308 vunmapbuf(buf);
1309 err:
1310 uma_zfree(pbuf_zone, buf);
1311 PRELE(curproc);
1312 }
1313
1314 return (ret);
1315 }
1316
1317 static int
nvme_ctrlr_ioctl(struct cdev * cdev,u_long cmd,caddr_t arg,int flag,struct thread * td)1318 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1319 struct thread *td)
1320 {
1321 struct nvme_controller *ctrlr;
1322 struct nvme_pt_command *pt;
1323
1324 ctrlr = cdev->si_drv1;
1325
1326 switch (cmd) {
1327 case NVME_RESET_CONTROLLER:
1328 nvme_ctrlr_reset(ctrlr);
1329 break;
1330 case NVME_PASSTHROUGH_CMD:
1331 pt = (struct nvme_pt_command *)arg;
1332 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1333 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1334 case NVME_GET_NSID:
1335 {
1336 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1337 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1338 sizeof(gnsid->cdev));
1339 gnsid->nsid = 0;
1340 break;
1341 }
1342 case NVME_GET_MAX_XFER_SIZE:
1343 *(uint64_t *)arg = ctrlr->max_xfer_size;
1344 break;
1345 default:
1346 return (ENOTTY);
1347 }
1348
1349 return (0);
1350 }
1351
1352 static struct cdevsw nvme_ctrlr_cdevsw = {
1353 .d_version = D_VERSION,
1354 .d_flags = 0,
1355 .d_ioctl = nvme_ctrlr_ioctl
1356 };
1357
1358 int
nvme_ctrlr_construct(struct nvme_controller * ctrlr,device_t dev)1359 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1360 {
1361 struct make_dev_args md_args;
1362 uint32_t cap_lo;
1363 uint32_t cap_hi;
1364 uint32_t to, vs, pmrcap;
1365 int status, timeout_period;
1366
1367 ctrlr->dev = dev;
1368
1369 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1370 if (bus_get_domain(dev, &ctrlr->domain) != 0)
1371 ctrlr->domain = 0;
1372
1373 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1374 if (bootverbose) {
1375 device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n",
1376 cap_lo, NVME_CAP_LO_MQES(cap_lo),
1377 NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "",
1378 NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "",
1379 (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "",
1380 (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "",
1381 NVME_CAP_LO_TO(cap_lo));
1382 }
1383 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1384 if (bootverbose) {
1385 device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, "
1386 "CPS %x, MPSMIN %u, MPSMAX %u%s%s%s%s%s\n", cap_hi,
1387 NVME_CAP_HI_DSTRD(cap_hi),
1388 NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "",
1389 NVME_CAP_HI_CSS(cap_hi),
1390 NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "",
1391 NVME_CAP_HI_CPS(cap_hi),
1392 NVME_CAP_HI_MPSMIN(cap_hi),
1393 NVME_CAP_HI_MPSMAX(cap_hi),
1394 NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "",
1395 NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "",
1396 NVME_CAP_HI_NSSS(cap_hi) ? ", NSSS" : "",
1397 NVME_CAP_HI_CRWMS(cap_hi) ? ", CRWMS" : "",
1398 NVME_CAP_HI_CRIMS(cap_hi) ? ", CRIMS" : "");
1399 }
1400 if (bootverbose) {
1401 vs = nvme_mmio_read_4(ctrlr, vs);
1402 device_printf(dev, "Version: 0x%08x: %d.%d\n", vs,
1403 NVME_MAJOR(vs), NVME_MINOR(vs));
1404 }
1405 if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) {
1406 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap);
1407 device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, "
1408 "PMRWBM %x, PMRTO %u%s\n", pmrcap,
1409 NVME_PMRCAP_BIR(pmrcap),
1410 NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "",
1411 NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "",
1412 NVME_PMRCAP_PMRTU(pmrcap),
1413 NVME_PMRCAP_PMRWBM(pmrcap),
1414 NVME_PMRCAP_PMRTO(pmrcap),
1415 NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : "");
1416 }
1417
1418 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1419
1420 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi);
1421 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps);
1422
1423 /* Get ready timeout value from controller, in units of 500ms. */
1424 to = NVME_CAP_LO_TO(cap_lo) + 1;
1425 ctrlr->ready_timeout_in_ms = to * 500;
1426
1427 timeout_period = NVME_ADMIN_TIMEOUT_PERIOD;
1428 TUNABLE_INT_FETCH("hw.nvme.admin_timeout_period", &timeout_period);
1429 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1430 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1431 ctrlr->admin_timeout_period = timeout_period;
1432
1433 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1434 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1435 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1436 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1437 ctrlr->timeout_period = timeout_period;
1438
1439 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1440 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1441
1442 ctrlr->enable_aborts = 0;
1443 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1444
1445 ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK);
1446
1447 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */
1448 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size));
1449 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1450 return (ENXIO);
1451
1452 /*
1453 * Create 2 threads for the taskqueue. The reset thread will block when
1454 * it detects that the controller has failed until all I/O has been
1455 * failed up the stack. The fail_req task needs to be able to run in
1456 * this case to finish the request failure for some cases.
1457 *
1458 * We could partially solve this race by draining the failed requeust
1459 * queue before proceding to free the sim, though nothing would stop
1460 * new I/O from coming in after we do that drain, but before we reach
1461 * cam_sim_free, so this big hammer is used instead.
1462 */
1463 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1464 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1465 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
1466
1467 ctrlr->is_resetting = 0;
1468 ctrlr->is_initialized = 0;
1469 ctrlr->notification_sent = 0;
1470 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1471 STAILQ_INIT(&ctrlr->fail_req);
1472 ctrlr->is_failed = false;
1473
1474 make_dev_args_init(&md_args);
1475 md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1476 md_args.mda_uid = UID_ROOT;
1477 md_args.mda_gid = GID_WHEEL;
1478 md_args.mda_mode = 0600;
1479 md_args.mda_unit = device_get_unit(dev);
1480 md_args.mda_si_drv1 = (void *)ctrlr;
1481 status = make_dev_s(&md_args, &ctrlr->cdev, "%s",
1482 device_get_nameunit(dev));
1483 if (status != 0)
1484 return (ENXIO);
1485
1486 return (0);
1487 }
1488
1489 void
nvme_ctrlr_destruct(struct nvme_controller * ctrlr,device_t dev)1490 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1491 {
1492 int gone, i;
1493
1494 ctrlr->is_dying = true;
1495
1496 if (ctrlr->resource == NULL)
1497 goto nores;
1498 if (!mtx_initialized(&ctrlr->adminq.lock))
1499 goto noadminq;
1500
1501 /*
1502 * Check whether it is a hot unplug or a clean driver detach.
1503 * If device is not there any more, skip any shutdown commands.
1504 */
1505 gone = (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
1506 if (gone)
1507 nvme_ctrlr_fail(ctrlr);
1508 else
1509 nvme_notify_fail_consumers(ctrlr);
1510
1511 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1512 nvme_ns_destruct(&ctrlr->ns[i]);
1513
1514 if (ctrlr->cdev)
1515 destroy_dev(ctrlr->cdev);
1516
1517 if (ctrlr->is_initialized) {
1518 if (!gone) {
1519 if (ctrlr->hmb_nchunks > 0)
1520 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1521 nvme_ctrlr_delete_qpairs(ctrlr);
1522 }
1523 nvme_ctrlr_hmb_free(ctrlr);
1524 }
1525 if (ctrlr->ioq != NULL) {
1526 for (i = 0; i < ctrlr->num_io_queues; i++)
1527 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1528 free(ctrlr->ioq, M_NVME);
1529 }
1530 nvme_admin_qpair_destroy(&ctrlr->adminq);
1531
1532 /*
1533 * Notify the controller of a shutdown, even though this is due to
1534 * a driver unload, not a system shutdown (this path is not invoked
1535 * during shutdown). This ensures the controller receives a
1536 * shutdown notification in case the system is shutdown before
1537 * reloading the driver.
1538 */
1539 if (!gone)
1540 nvme_ctrlr_shutdown(ctrlr);
1541
1542 if (!gone)
1543 nvme_ctrlr_disable(ctrlr);
1544
1545 noadminq:
1546 if (ctrlr->taskqueue)
1547 taskqueue_free(ctrlr->taskqueue);
1548
1549 if (ctrlr->tag)
1550 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1551
1552 if (ctrlr->res)
1553 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1554 rman_get_rid(ctrlr->res), ctrlr->res);
1555
1556 if (ctrlr->bar4_resource != NULL) {
1557 bus_release_resource(dev, SYS_RES_MEMORY,
1558 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1559 }
1560
1561 bus_release_resource(dev, SYS_RES_MEMORY,
1562 ctrlr->resource_id, ctrlr->resource);
1563
1564 nores:
1565 if (ctrlr->alignment_splits)
1566 counter_u64_free(ctrlr->alignment_splits);
1567
1568 mtx_destroy(&ctrlr->lock);
1569 }
1570
1571 void
nvme_ctrlr_shutdown(struct nvme_controller * ctrlr)1572 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1573 {
1574 uint32_t cc;
1575 uint32_t csts;
1576 int timeout;
1577
1578 cc = nvme_mmio_read_4(ctrlr, cc);
1579 cc &= ~NVMEM(NVME_CC_REG_SHN);
1580 cc |= NVMEF(NVME_CC_REG_SHN, NVME_SHN_NORMAL);
1581 nvme_mmio_write_4(ctrlr, cc, cc);
1582
1583 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1584 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000);
1585 while (1) {
1586 csts = nvme_mmio_read_4(ctrlr, csts);
1587 if (csts == NVME_GONE) /* Hot unplug. */
1588 break;
1589 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1590 break;
1591 if (timeout - ticks < 0) {
1592 nvme_printf(ctrlr, "shutdown timeout\n");
1593 break;
1594 }
1595 pause("nvmeshut", 1);
1596 }
1597 }
1598
1599 void
nvme_ctrlr_submit_admin_request(struct nvme_controller * ctrlr,struct nvme_request * req)1600 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1601 struct nvme_request *req)
1602 {
1603
1604 nvme_qpair_submit_request(&ctrlr->adminq, req);
1605 }
1606
1607 void
nvme_ctrlr_submit_io_request(struct nvme_controller * ctrlr,struct nvme_request * req)1608 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1609 struct nvme_request *req)
1610 {
1611 struct nvme_qpair *qpair;
1612
1613 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1614 nvme_qpair_submit_request(qpair, req);
1615 }
1616
1617 device_t
nvme_ctrlr_get_device(struct nvme_controller * ctrlr)1618 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1619 {
1620
1621 return (ctrlr->dev);
1622 }
1623
1624 const struct nvme_controller_data *
nvme_ctrlr_get_data(struct nvme_controller * ctrlr)1625 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1626 {
1627
1628 return (&ctrlr->cdata);
1629 }
1630
1631 int
nvme_ctrlr_suspend(struct nvme_controller * ctrlr)1632 nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
1633 {
1634 int to = hz;
1635
1636 /*
1637 * Can't touch failed controllers, so it's already suspended.
1638 */
1639 if (ctrlr->is_failed)
1640 return (0);
1641
1642 /*
1643 * We don't want the reset taskqueue running, since it does similar
1644 * things, so prevent it from running after we start. Wait for any reset
1645 * that may have been started to complete. The reset process we follow
1646 * will ensure that any new I/O will queue and be given to the hardware
1647 * after we resume (though there should be none).
1648 */
1649 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1650 pause("nvmesusp", 1);
1651 if (to <= 0) {
1652 nvme_printf(ctrlr,
1653 "Competing reset task didn't finish. Try again later.\n");
1654 return (EWOULDBLOCK);
1655 }
1656
1657 if (ctrlr->hmb_nchunks > 0)
1658 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1659
1660 /*
1661 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to
1662 * delete the hardware I/O queues, and then shutdown. This properly
1663 * flushes any metadata the drive may have stored so it can survive
1664 * having its power removed and prevents the unsafe shutdown count from
1665 * incriminating. Once we delete the qpairs, we have to disable them
1666 * before shutting down.
1667 */
1668 nvme_ctrlr_delete_qpairs(ctrlr);
1669 nvme_ctrlr_disable_qpairs(ctrlr);
1670 nvme_ctrlr_shutdown(ctrlr);
1671
1672 return (0);
1673 }
1674
1675 int
nvme_ctrlr_resume(struct nvme_controller * ctrlr)1676 nvme_ctrlr_resume(struct nvme_controller *ctrlr)
1677 {
1678
1679 /*
1680 * Can't touch failed controllers, so nothing to do to resume.
1681 */
1682 if (ctrlr->is_failed)
1683 return (0);
1684
1685 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1686 goto fail;
1687
1688 /*
1689 * Now that we've reset the hardware, we can restart the controller. Any
1690 * I/O that was pending is requeued. Any admin commands are aborted with
1691 * an error. Once we've restarted, take the controller out of reset.
1692 */
1693 nvme_ctrlr_start(ctrlr, true);
1694 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1695
1696 return (0);
1697 fail:
1698 /*
1699 * Since we can't bring the controller out of reset, announce and fail
1700 * the controller. However, we have to return success for the resume
1701 * itself, due to questionable APIs.
1702 */
1703 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1704 nvme_ctrlr_fail(ctrlr);
1705 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1706 return (0);
1707 }
1708