xref: /dragonfly/sys/dev/disk/nvme/nvme_admin.c (revision 235fb4ac)
1 /*
2  * Copyright (c) 2016 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Administration thread
36  *
37  * - Handles resetting, features, iteration of namespaces, and disk
38  *   attachments.  Most admin operations are serialized by the admin thread.
39  *
40  * - Ioctls as well as any BIOs which require more sophisticated processing
41  *   are handed to this thread as well.
42  *
43  * - Can freeze/resume other queues for various purposes.
44  */
45 
46 #include "nvme.h"
47 
48 static void nvme_admin_thread(void *arg);
49 static int nvme_admin_state_identify_ctlr(nvme_softc_t *sc);
50 static int nvme_admin_state_make_queues(nvme_softc_t *sc);
51 static int nvme_admin_state_identify_ns(nvme_softc_t *sc);
52 static int nvme_admin_state_operating(nvme_softc_t *sc);
53 static int nvme_admin_state_failed(nvme_softc_t *sc);
54 
55 /*
56  * Start the admin thread and block until it says it is running.
57  */
58 int
59 nvme_start_admin_thread(nvme_softc_t *sc)
60 {
61 	int error, intr_flags;
62 
63 	lockinit(&sc->admin_lk, "admlk", 0, 0);
64 	lockinit(&sc->ioctl_lk, "nvioc", 0, 0);
65 	sc->admin_signal = 0;
66 
67 	intr_flags = INTR_MPSAFE;
68 	if (sc->nirqs == 1) {
69 		/* This interrupt processes data CQs too */
70 		intr_flags |= INTR_HIFREQ;
71 	}
72 
73 	error = bus_setup_intr(sc->dev, sc->irq[0], intr_flags,
74 			       nvme_intr, &sc->comqueues[0],
75 			       &sc->irq_handle[0], NULL);
76 	if (error) {
77 		device_printf(sc->dev, "unable to install interrupt\n");
78 		return error;
79 	}
80 	lockmgr(&sc->admin_lk, LK_EXCLUSIVE);
81 	kthread_create(nvme_admin_thread, sc, &sc->admintd, "nvme_admin");
82 	while ((sc->admin_signal & ADMIN_SIG_RUNNING) == 0)
83 		lksleep(&sc->admin_signal, &sc->admin_lk, 0, "nvwbeg", 0);
84 	lockmgr(&sc->admin_lk, LK_RELEASE);
85 
86 	return 0;
87 }
88 
89 /*
90  * Stop the admin thread and block until it says it is done.
91  */
92 void
93 nvme_stop_admin_thread(nvme_softc_t *sc)
94 {
95 	uint32_t i;
96 
97 	atomic_set_int(&sc->admin_signal, ADMIN_SIG_STOP);
98 
99 	/*
100 	 * We have to wait for the admin thread to finish its probe
101 	 * before shutting it down.  Break out if the admin thread
102 	 * never managed to even start.
103 	 */
104 	lockmgr(&sc->admin_lk, LK_EXCLUSIVE);
105 	while ((sc->admin_signal & ADMIN_SIG_PROBED) == 0) {
106 		if ((sc->admin_signal & ADMIN_SIG_RUNNING) == 0)
107 			break;
108 		lksleep(&sc->admin_signal, &sc->admin_lk, 0, "nvwend", 0);
109 	}
110 	lockmgr(&sc->admin_lk, LK_RELEASE);
111 
112 	/*
113 	 * Disconnect our disks while the admin thread is still running,
114 	 * ensuring that the poll works even if interrupts are broken.
115 	 * Otherwise we could deadlock in the devfs core.
116 	 */
117 	for (i = 0; i < NVME_MAX_NAMESPACES; ++i) {
118 		nvme_softns_t *nsc;
119 
120 		if ((nsc = sc->nscary[i]) != NULL) {
121 			nvme_disk_detach(nsc);
122 
123 			kfree(nsc, M_NVME);
124 			sc->nscary[i] = NULL;
125 		}
126 	}
127 
128 	/*
129 	 * Ask the admin thread to shut-down.
130 	 */
131 	lockmgr(&sc->admin_lk, LK_EXCLUSIVE);
132 	wakeup(&sc->admin_signal);
133 	while (sc->admin_signal & ADMIN_SIG_RUNNING)
134 		lksleep(&sc->admin_signal, &sc->admin_lk, 0, "nvwend", 0);
135 	lockmgr(&sc->admin_lk, LK_RELEASE);
136 	if (sc->irq_handle[0]) {
137 		bus_teardown_intr(sc->dev, sc->irq[0], sc->irq_handle[0]);
138 		sc->irq_handle[0] = NULL;
139 	}
140 	lockuninit(&sc->ioctl_lk);
141 	lockuninit(&sc->admin_lk);
142 
143 	/*
144 	 * Thread might be running on another cpu, give it time to actually
145 	 * exit before returning in case the caller is about to unload the
146 	 * module.  Otherwise we don't need this.
147 	 */
148 	nvme_os_sleep(1);
149 }
150 
151 static
152 void
153 nvme_admin_thread(void *arg)
154 {
155 	nvme_softc_t *sc = arg;
156 	uint32_t i;
157 
158 	lockmgr(&sc->admin_lk, LK_EXCLUSIVE);
159 	atomic_set_int(&sc->admin_signal, ADMIN_SIG_RUNNING);
160 	wakeup(&sc->admin_signal);
161 
162 	sc->admin_func = nvme_admin_state_identify_ctlr;
163 
164 	while ((sc->admin_signal & ADMIN_SIG_STOP) == 0) {
165 		for (i = 0; i <= sc->niocomqs; ++i) {
166 			nvme_comqueue_t *comq = &sc->comqueues[i];
167 
168 			if (comq->nqe == 0)	/* not configured */
169 				continue;
170 
171 			lockmgr(&comq->lk, LK_EXCLUSIVE);
172 			nvme_poll_completions(comq, &comq->lk);
173 			lockmgr(&comq->lk, LK_RELEASE);
174 		}
175 		if (sc->admin_signal & ADMIN_SIG_REQUEUE) {
176 			atomic_clear_int(&sc->admin_signal, ADMIN_SIG_REQUEUE);
177 			nvme_disk_requeues(sc);
178 		}
179 		if (sc->admin_func(sc) == 0 &&
180 		    (sc->admin_signal & ADMIN_SIG_RUN_MASK) == 0) {
181 			lksleep(&sc->admin_signal, &sc->admin_lk, 0,
182 				"nvidle", hz);
183 		}
184 	}
185 
186 	/*
187 	 * Cleanup state.
188 	 *
189 	 * Note that we actually issue delete queue commands here.  The NVME
190 	 * spec says that for a normal shutdown the I/O queues should be
191 	 * deleted prior to issuing the shutdown in the CONFIG register.
192 	 */
193 	for (i = 1; i <= sc->niosubqs; ++i) {
194 		nvme_delete_subqueue(sc, i);
195 		nvme_free_subqueue(sc, i);
196 	}
197 	for (i = 1; i <= sc->niocomqs; ++i) {
198 		nvme_delete_comqueue(sc, i);
199 		nvme_free_comqueue(sc, i);
200 	}
201 
202 	/*
203 	 * Signal that we are done.
204 	 */
205 	atomic_clear_int(&sc->admin_signal, ADMIN_SIG_RUNNING);
206 	wakeup(&sc->admin_signal);
207 	lockmgr(&sc->admin_lk, LK_RELEASE);
208 }
209 
210 /*
211  * Identify the controller
212  */
213 static
214 int
215 nvme_admin_state_identify_ctlr(nvme_softc_t *sc)
216 {
217 	nvme_request_t *req;
218 	nvme_ident_ctlr_data_t *rp;
219 	int status;
220 	uint64_t mempgsize;
221 	char serial[20+16];
222 	char model[40+16];
223 
224 	/*
225 	 * Identify Controller
226 	 */
227 	mempgsize = NVME_CAP_MEMPG_MIN_GET(sc->cap);
228 
229 	req = nvme_get_admin_request(sc, NVME_OP_IDENTIFY);
230 	req->cmd.identify.cns = NVME_CNS_CTLR;
231 	req->cmd.identify.cntid = 0;
232 	bzero(req->info, sizeof(*req->info));
233 	nvme_submit_request(req);
234 	status = nvme_wait_request(req, hz);
235 	/* XXX handle status */
236 
237 	sc->idctlr = req->info->idctlr;
238 	nvme_put_request(req);
239 
240 	rp = &sc->idctlr;
241 
242 	KKASSERT(sizeof(sc->idctlr.serialno) == 20);
243 	KKASSERT(sizeof(sc->idctlr.modelno) == 40);
244 	bzero(serial, sizeof(serial));
245 	bzero(model, sizeof(model));
246 	bcopy(rp->serialno, serial, sizeof(rp->serialno));
247 	bcopy(rp->modelno, model, sizeof(rp->modelno));
248 	string_cleanup(serial, 0);
249 	string_cleanup(model, 0);
250 
251 	device_printf(sc->dev, "Model %s BaseSerial %s nscount=%d\n",
252 		      model, serial, rp->ns_count);
253 
254 	sc->admin_func = nvme_admin_state_make_queues;
255 
256 	return 1;
257 }
258 
259 #define COMQFIXUP(msix, ncomqs)	((((msix) - 1) % ncomqs) + 1)
260 
261 /*
262  * Request and create the I/O queues.  Figure out CPU mapping optimizations.
263  */
264 static
265 int
266 nvme_admin_state_make_queues(nvme_softc_t *sc)
267 {
268 	nvme_request_t *req;
269 	uint16_t niosubqs;
270 	uint16_t niocomqs;
271 	uint32_t i;
272 	uint16_t qno;
273 	int status;
274 	int error;
275 
276 	/*
277 	 * Calculate how many I/O queues (non-inclusive of admin queue)
278 	 * we want to have, up to 65535.  dw0 in the response returns the
279 	 * number of queues the controller gives us.  Submission and
280 	 * Completion queues are specified separately.
281 	 *
282 	 * This driver runs optimally with 4 submission queues and one
283 	 * completion queue per cpu (rdhipri, rdlopri, wrhipri, wrlopri),
284 	 *
285 	 * +1 for dumps			XXX future
286 	 * +1 for async events		XXX future
287 	 */
288 	req = nvme_get_admin_request(sc, NVME_OP_SET_FEATURES);
289 
290 	niosubqs = ncpus * 2 + 0;
291 	niocomqs = ncpus + 0;
292 	if (niosubqs > NVME_MAX_QUEUES)
293 		niosubqs = NVME_MAX_QUEUES;
294 	if (niocomqs > NVME_MAX_QUEUES)
295 		niocomqs = NVME_MAX_QUEUES;
296 	device_printf(sc->dev, "Request %u/%u queues, ", niosubqs, niocomqs);
297 
298 	req->cmd.setfeat.flags = NVME_FID_NUMQUEUES;
299 	req->cmd.setfeat.numqs.nsqr = niosubqs - 1;	/* 0's based 0=1 */
300 	req->cmd.setfeat.numqs.ncqr = niocomqs - 1;	/* 0's based 0=1 */
301 
302 	nvme_submit_request(req);
303 
304 	/*
305 	 * Get response and set our operations mode.
306 	 */
307 	status = nvme_wait_request(req, hz);
308 	/* XXX handle status */
309 
310 	if (status == 0) {
311 		sc->niosubqs = 1 + (req->res.setfeat.dw0 & 0xFFFFU);
312 		sc->niocomqs = 1 + ((req->res.setfeat.dw0 >> 16) & 0xFFFFU);
313 	} else {
314 		sc->niosubqs = 0;
315 		sc->niocomqs = 0;
316 	}
317 	kprintf("Returns %u/%u queues, ", sc->niosubqs, sc->niocomqs);
318 
319 	nvme_put_request(req);
320 
321 	sc->dumpqno = 0;
322 	sc->eventqno = 0;
323 
324 	if (sc->niosubqs >= ncpus * 2 + 0 && sc->niocomqs >= ncpus + 0) {
325 		/*
326 		 * If we got all the queues we wanted do a full-bore setup of
327 		 * qmap[cpu][type].
328 		 *
329 		 * Remember that subq 0 / comq 0 is the admin queue.
330 		 */
331 		kprintf("optimal map\n");
332 		qno = 1;
333 		for (i = 0; i < ncpus; ++i) {
334 			int cpuqno = COMQFIXUP(sc->cputovect[i], ncpus);
335 
336 			KKASSERT(cpuqno != 0);
337 			sc->qmap[i][0] = qno + 0;
338 			sc->qmap[i][1] = qno + 1;
339 			sc->subqueues[qno + 0].comqid = cpuqno;
340 			sc->subqueues[qno + 1].comqid = cpuqno;
341 			qno += 2;
342 		}
343 		sc->niosubqs = ncpus * 2 + 0;
344 		sc->niocomqs = ncpus + 0;
345 	} else if (sc->niosubqs >= ncpus && sc->niocomqs >= ncpus) {
346 		/*
347 		 * We have enough to give each cpu its own submission
348 		 * and completion queue.
349 		 *
350 		 * leave dumpqno and eventqno set to the admin queue.
351 		 */
352 		kprintf("nominal map 1:1 cpu\n");
353 		for (i = 0; i < ncpus; ++i) {
354 			qno = sc->cputovect[i];
355 			KKASSERT(qno != 0);
356 			sc->qmap[i][0] = qno;
357 			sc->qmap[i][1] = qno;
358 			sc->subqueues[qno].comqid = COMQFIXUP(qno, ncpus);
359 		}
360 		sc->niosubqs = ncpus;
361 		sc->niocomqs = ncpus;
362 	} else if (sc->niosubqs >= 2 && sc->niocomqs >= 2) {
363 		/*
364 		 * We have enough queues to separate and prioritize reads
365 		 * and writes, but all cpus have to share the same submission
366 		 * queues.  There aren't enough submission queues to split up
367 		 * the completion queues between cpus.
368 		 *
369 		 * leave dumpqno and eventqno set to the admin queue.
370 		 */
371 		kprintf("rw-sep map\n");
372 		qno = 1;
373 		for (i = 0; i < ncpus; ++i) {
374 			int cpuqno = COMQFIXUP(sc->cputovect[i], 2);
375 
376 			KKASSERT(qno != 0);
377 			sc->qmap[i][0] = qno + 0;	/* read lopri */
378 			sc->qmap[i][1] = qno + 1;	/* read hipri */
379 			if (i <= 0)
380 				sc->subqueues[qno + 0].comqid = cpuqno;
381 			if (i <= 1)
382 				sc->subqueues[qno + 1].comqid = cpuqno;
383 			/* do not increment qno */
384 		}
385 		sc->niosubqs = 2;
386 		sc->niocomqs = 2;
387 	} else if (sc->niosubqs >= 2) {
388 		/*
389 		 * We have enough to have separate read and write queues.
390 		 */
391 		kprintf("basic map\n");
392 		qno = 1;
393 		for (i = 0; i < ncpus; ++i) {
394 			int cpuqno = COMQFIXUP(sc->cputovect[i], 1);
395 
396 			KKASSERT(qno != 0);
397 			sc->qmap[i][0] = qno + 0;	/* read */
398 			sc->qmap[i][1] = qno + 1;	/* write */
399 			if (i <= 0)
400 				sc->subqueues[qno + 0].comqid = cpuqno;
401 			if (i <= 1)
402 				sc->subqueues[qno + 1].comqid = cpuqno;
403 		}
404 		sc->niosubqs = 2;
405 		sc->niocomqs = 1;
406 	} else {
407 		/*
408 		 * Minimal configuration, all cpus and I/O types use the
409 		 * same queue.  Sad day.
410 		 */
411 		kprintf("minimal map\n");
412 		sc->dumpqno = 0;
413 		sc->eventqno = 0;
414 		for (i = 0; i < ncpus; ++i) {
415 			sc->qmap[i][0] = 1;
416 			sc->qmap[i][1] = 1;
417 		}
418 		sc->subqueues[1].comqid = 1;
419 		sc->niosubqs = 1;
420 		sc->niocomqs = 1;
421 	}
422 
423 	/*
424 	 * Create all I/O submission and completion queues.  The I/O
425 	 * queues start at 1 and are inclusive of niosubqs and niocomqs.
426 	 *
427 	 * NOTE: Completion queues must be created before submission queues.
428 	 *	 That is, the completion queue specified when creating a
429 	 *	 submission queue must already exist.
430 	 */
431 	error = 0;
432 	for (i = 1; i <= sc->niocomqs; ++i) {
433 		error += nvme_alloc_comqueue(sc, i);
434 		if (error) {
435 			device_printf(sc->dev, "Unable to allocate comqs\n");
436 			break;
437 		}
438 		error += nvme_create_comqueue(sc, i);
439 	}
440 	for (i = 1; i <= sc->niosubqs; ++i) {
441 		error += nvme_alloc_subqueue(sc, i);
442 		if (error) {
443 			device_printf(sc->dev, "Unable to allocate subqs\n");
444 			break;
445 		}
446 		error += nvme_create_subqueue(sc, i);
447 	}
448 
449 	if (error) {
450 		device_printf(sc->dev, "Failed to initialize device!\n");
451 		sc->admin_func = nvme_admin_state_failed;
452 	} else {
453 		sc->admin_func = nvme_admin_state_identify_ns;
454 	}
455 
456 	/*
457 	 * Basically interrupt coalescing is worthless if we care about
458 	 * performance, at least on the Intel 750.  Setting the threshold
459 	 * has no effect if time is set to 0.  The smallest time that can
460 	 * be set is a value of 1 (== 100uS), which is much too long.  That
461 	 * is only 10,000 interrupts/sec/cpu and on the Intel 750 it totally
462 	 * destroys sequential performance.
463 	 */
464 	req = nvme_get_admin_request(sc, NVME_OP_SET_FEATURES);
465 
466 	device_printf(sc->dev, "Interrupt Coalesce: 100uS / 4 qentries\n");
467 
468 	req->cmd.setfeat.flags = NVME_FID_INTCOALESCE;
469 	req->cmd.setfeat.intcoal.thr = 0;
470 	req->cmd.setfeat.intcoal.time = 0;
471 
472 	nvme_submit_request(req);
473 	status = nvme_wait_request(req, hz);
474 	if (status) {
475 		device_printf(sc->dev,
476 			      "Interrupt coalesce failed status=%d\n",
477 			      status);
478 	}
479 	nvme_put_request(req);
480 
481 	return 1;
482 }
483 
484 /*
485  * Identify available namespaces, iterate, and attach to disks.
486  */
487 static
488 int
489 nvme_admin_state_identify_ns(nvme_softc_t *sc)
490 {
491 	nvme_request_t *req;
492 	nvme_ident_ns_list_t *rp;
493 	int status;
494 	uint32_t i;
495 	uint32_t j;
496 
497 	if (bootverbose) {
498 		if (sc->idctlr.admin_cap & NVME_ADMIN_NSMANAGE)
499 			device_printf(sc->dev,
500 				      "Namespace management supported\n");
501 		else
502 			device_printf(sc->dev,
503 				      "Namespace management not supported\n");
504 	}
505 #if 0
506 	/*
507 	 * Identify Controllers		TODO TODO TODO
508 	 */
509 	if (sc->idctlr.admin_cap & NVME_ADMIN_NSMANAGE) {
510 		req = nvme_get_admin_request(sc, NVME_OP_IDENTIFY);
511 		req->cmd.identify.cns = NVME_CNS_ANY_CTLR_LIST;
512 		req->cmd.identify.cntid = 0;
513 		bzero(req->info, sizeof(*req->info));
514 		nvme_submit_request(req);
515 		status = nvme_wait_request(req, hz);
516 		kprintf("nsquery status %08x\n", status);
517 
518 #if 0
519 		for (i = 0; i < req->info->ctlrlist.idcount; ++i) {
520 			kprintf("CTLR %04x\n", req->info->ctlrlist.ctlrids[i]);
521 		}
522 #endif
523 		nvme_put_request(req);
524 	}
525 #endif
526 
527 	rp = kmalloc(sizeof(*rp), M_NVME, M_WAITOK | M_ZERO);
528 	if (sc->idctlr.admin_cap & NVME_ADMIN_NSMANAGE) {
529 		/*
530 		 * Namespace management supported, query active namespaces.
531 		 */
532 		req = nvme_get_admin_request(sc, NVME_OP_IDENTIFY);
533 		req->cmd.identify.cns = NVME_CNS_ACT_NSLIST;
534 		req->cmd.identify.cntid = 0;
535 		bzero(req->info, sizeof(*req->info));
536 		nvme_submit_request(req);
537 		status = nvme_wait_request(req, hz);
538 		kprintf("nsquery status %08x\n", status);
539 		/* XXX handle status */
540 
541 		cpu_lfence();
542 		*rp = req->info->nslist;
543 		nvme_put_request(req);
544 	} else {
545 		/*
546 		 * Namespace management not supported, assume nsids 1..N.
547 		 */
548 		for (i = 1; i <= sc->idctlr.ns_count && i <= 1024; ++i)
549 			rp->nsids[i-1] = i;
550 	}
551 
552 	/*
553 	 * Identify each Namespace
554 	 */
555 	for (i = 0; i < 1024; ++i) {
556 		nvme_softns_t *nsc;
557 		nvme_lba_fmt_data_t *lbafmt;
558 
559 		if (rp->nsids[i] == 0)
560 			continue;
561 		req = nvme_get_admin_request(sc, NVME_OP_IDENTIFY);
562 		req->cmd.identify.cns = NVME_CNS_ACT_NS;
563 		req->cmd.identify.cntid = 0;
564 		req->cmd.identify.head.nsid = rp->nsids[i];
565 		bzero(req->info, sizeof(*req->info));
566 		nvme_submit_request(req);
567 		status = nvme_wait_request(req, hz);
568 		if (status != 0) {
569 			kprintf("NS FAILED %08x\n", status);
570 			continue;
571 		}
572 
573 		for (j = 0; j < NVME_MAX_NAMESPACES; ++j) {
574 			if (sc->nscary[j] &&
575 			    sc->nscary[j]->nsid == rp->nsids[i])
576 				break;
577 		}
578 		if (j == NVME_MAX_NAMESPACES) {
579 			j = i;
580 			if (sc->nscary[j] != NULL) {
581 				for (j = NVME_MAX_NAMESPACES - 1; j >= 0; --j) {
582 					if (sc->nscary[j] == NULL)
583 						break;
584 				}
585 			}
586 		}
587 		if (j < 0) {
588 			device_printf(sc->dev, "not enough room in nscary for "
589 					       "namespace %08x\n", rp->nsids[i]);
590 			nvme_put_request(req);
591 			continue;
592 		}
593 		nsc = sc->nscary[j];
594 		if (nsc == NULL) {
595 			nsc = kmalloc(sizeof(*nsc), M_NVME, M_WAITOK | M_ZERO);
596 			nsc->unit = nvme_alloc_disk_unit();
597 			sc->nscary[j] = nsc;
598 		}
599 		if (sc->nscmax <= j)
600 			sc->nscmax = j + 1;
601 		nsc->sc = sc;
602 		nsc->nsid = rp->nsids[i];
603 		nsc->state = NVME_NSC_STATE_UNATTACHED;
604 		nsc->idns = req->info->idns;
605 		bioq_init(&nsc->bioq);
606 		lockinit(&nsc->lk, "nvnsc", 0, 0);
607 
608 		nvme_put_request(req);
609 
610 		j = NVME_FLBAS_SEL_GET(nsc->idns.flbas);
611 		lbafmt = &nsc->idns.lba_fmt[j];
612 		nsc->blksize = 1 << lbafmt->sect_size;
613 
614 		/*
615 		 * Attach the namespace
616 		 */
617 		nvme_disk_attach(nsc);
618 	}
619 	kfree(rp, M_NVME);
620 
621 	sc->admin_func = nvme_admin_state_operating;
622 	return 1;
623 }
624 
625 static
626 int
627 nvme_admin_state_operating(nvme_softc_t *sc)
628 {
629 	if ((sc->admin_signal & ADMIN_SIG_PROBED) == 0) {
630 		atomic_set_int(&sc->admin_signal, ADMIN_SIG_PROBED);
631 		wakeup(&sc->admin_signal);
632 	}
633 
634 	return 0;
635 }
636 
637 static
638 int
639 nvme_admin_state_failed(nvme_softc_t *sc)
640 {
641 	if ((sc->admin_signal & ADMIN_SIG_PROBED) == 0) {
642 		atomic_set_int(&sc->admin_signal, ADMIN_SIG_PROBED);
643 		wakeup(&sc->admin_signal);
644 	}
645 
646 	return 0;
647 }
648