xref: /dragonfly/sys/dev/disk/nvme/nvme_disk.c (revision f0e61bb7)
1 /*
2  * Copyright (c) 2016 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include "nvme.h"
36 
37 static void nvme_disk_callback(nvme_request_t *req, struct lock *lk);
38 static int nvme_strategy_core(nvme_softns_t *nsc, struct bio *bio, int delay);
39 static const char *nvme_status_string(nvme_status_buf_t *buf,
40 			int type, int code);
41 
42 static d_open_t nvme_open;
43 static d_close_t nvme_close;
44 static d_ioctl_t nvme_ioctl;
45 static d_strategy_t nvme_strategy;
46 static d_dump_t nvme_dump;
47 
48 static struct dev_ops nvme_ops = {
49 	{ "nvme", 0, D_DISK | D_MPSAFE | D_CANFREE | D_TRACKCLOSE | D_KVABIO },
50 	.d_open =       nvme_open,
51 	.d_close =      nvme_close,
52 	.d_read =       physread,
53 	.d_dump =       nvme_dump,
54 	.d_write =      physwrite,
55 	.d_ioctl =      nvme_ioctl,
56 	.d_strategy =   nvme_strategy,
57 };
58 
59 static struct krate krate_nvmeio = { .freq = 1 };
60 
61 __read_mostly static int nvme_sync_delay = 0;
62 SYSCTL_INT(_debug, OID_AUTO, nvme_sync_delay, CTLFLAG_RW, &nvme_sync_delay, 0,
63 	   "Enable synchronous delay/completion-check, uS");
64 
65 /*
66  * Attach a namespace as a disk, making the disk available to the system.
67  */
68 void
69 nvme_disk_attach(nvme_softns_t *nsc)
70 {
71 	nvme_softc_t *sc;
72 	struct disk_info info;
73 	char serial[20+16];
74 	size_t len;
75 	uint64_t cap_gb;
76 
77 	sc = nsc->sc;
78 	devstat_add_entry(&nsc->stats, "nvme", nsc->unit, nsc->blksize,
79 			  DEVSTAT_NO_ORDERED_TAGS,
80 			  DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER,
81 			  DEVSTAT_PRIORITY_OTHER);
82 	nsc->cdev = disk_create(nsc->unit, &nsc->disk, &nvme_ops);
83 	nsc->cdev->si_drv1 = nsc;
84 	nsc->cdev->si_iosize_max = MAXPHYS;	/* XXX */
85 	disk_setdisktype(&nsc->disk, "ssd");
86 
87 	bzero(&info, sizeof(info));
88 	info.d_media_blksize = nsc->blksize;
89 	info.d_media_blocks = nsc->idns.size;
90 	info.d_secpertrack = 1024;
91 	info.d_nheads = 1;
92 	info.d_secpercyl = info.d_secpertrack * info.d_nheads;
93 	info.d_ncylinders =  (u_int)(info.d_media_blocks / info.d_secpercyl);
94 
95 	KKASSERT(sizeof(sc->idctlr.serialno) == 20);
96 	bzero(serial, sizeof(serial));
97 	bcopy(sc->idctlr.serialno, serial, sizeof(sc->idctlr.serialno));
98 	len = string_cleanup(serial, 1);
99 
100 	ksnprintf(serial + len, sizeof(serial) - len, "-%u", nsc->nsid);
101 
102 	info.d_serialno = serial;
103 
104 	cap_gb = nsc->idns.size / (1024 * 1024 * 1024 / nsc->blksize);
105 	device_printf(sc->dev,
106 		"Disk nvme%d ns=%u "
107 		"blksize=%u lbacnt=%ju cap=%juGB serno=%s\n",
108 		nsc->unit, nsc->nsid,
109 		nsc->blksize, nsc->idns.size, cap_gb, serial);
110 
111 	disk_setdiskinfo(&nsc->disk, &info);
112 	/* serial is copied and does not have to be persistent */
113 }
114 
115 void
116 nvme_disk_detach(nvme_softns_t *nsc)
117 {
118 	if (nsc->cdev) {
119 		disk_destroy(&nsc->disk);
120 		devstat_remove_entry(&nsc->stats);
121 	}
122 }
123 
124 static
125 int
126 nvme_open(struct dev_open_args *ap)
127 {
128 	cdev_t dev = ap->a_head.a_dev;
129 	nvme_softns_t *nsc = dev->si_drv1;
130 	nvme_softc_t *sc = nsc->sc;
131 
132 	if (sc->flags & NVME_SC_UNLOADING)
133 		return ENXIO;
134 
135 	atomic_add_long(&sc->opencnt, 1);
136 
137 	return 0;
138 }
139 
140 static
141 int
142 nvme_close(struct dev_close_args *ap)
143 {
144 	cdev_t dev = ap->a_head.a_dev;
145 	nvme_softns_t *nsc = dev->si_drv1;
146 	nvme_softc_t *sc = nsc->sc;
147 
148 	atomic_add_long(&sc->opencnt, -1);
149 
150 	return 0;
151 }
152 
153 static int
154 nvme_ioctl(struct dev_ioctl_args *ap)
155 {
156 	cdev_t dev = ap->a_head.a_dev;
157 	nvme_softns_t *nsc = dev->si_drv1;
158 	nvme_softc_t *sc = nsc->sc;
159 	int error;
160 
161 	switch(ap->a_cmd) {
162 	case NVMEIOCGETLOG:
163 		error = nvme_getlog_ioctl(sc, (void *)ap->a_data);
164 		break;
165 	default:
166 		error = ENOIOCTL;
167 		break;
168 	}
169 	return error;
170 }
171 
172 static int
173 nvme_strategy(struct dev_strategy_args *ap)
174 {
175 	cdev_t dev = ap->a_head.a_dev;
176 	nvme_softns_t *nsc = dev->si_drv1;
177 
178 	nvme_strategy_core(nsc, ap->a_bio, nvme_sync_delay);
179 
180 	return 0;
181 }
182 
183 /*
184  * Called from admin thread to requeue BIOs.  We must call
185  * nvme_strategy_core() with delay = 0 to disable synchronous
186  * optimizations to avoid deadlocking the admin thread.
187  */
188 void
189 nvme_disk_requeues(nvme_softc_t *sc)
190 {
191 	nvme_softns_t *nsc;
192 	struct bio *bio;
193 	int i;
194 
195 	for (i = 0; i < sc->nscmax; ++i) {
196 		nsc = sc->nscary[i];
197 		if (nsc == NULL || nsc->sc == NULL)
198 			continue;
199 		if (bioq_first(&nsc->bioq)) {
200 			lockmgr(&nsc->lk, LK_EXCLUSIVE);
201 			while ((bio = bioq_first(&nsc->bioq)) != NULL) {
202 				bioq_remove(&nsc->bioq, bio);
203 				lockmgr(&nsc->lk, LK_RELEASE);
204 				if (nvme_strategy_core(nsc, bio, 0))
205 					goto next;
206 				lockmgr(&nsc->lk, LK_EXCLUSIVE);
207 			}
208 			lockmgr(&nsc->lk, LK_RELEASE);
209 		}
210 next:
211 		;
212 	}
213 }
214 
215 
216 /*
217  * Returns non-zero if no requests are available.
218  *
219  * WARNING! We are using the KVABIO API and must not access memory
220  *	    through bp->b_data without first calling bkvasync(bp).
221  */
222 static int
223 nvme_strategy_core(nvme_softns_t *nsc, struct bio *bio, int delay)
224 {
225 	nvme_softc_t *sc = nsc->sc;
226 	struct buf *bp = bio->bio_buf;
227 	uint64_t nlba;
228 	uint64_t secno;
229 	nvme_subqueue_t *subq;
230 	nvme_request_t *req;
231 	int nobytes;
232 
233 	/*
234 	 * Calculate sector/extent
235 	 */
236 	secno = bio->bio_offset / nsc->blksize;
237 	nlba = bp->b_bcount / nsc->blksize;
238 
239 	devstat_start_transaction(&nsc->stats);
240 
241 	subq = NULL;
242 	req = NULL;
243 	nobytes = 0;
244 
245 	/*
246 	 * Convert bio to low-level request
247 	 */
248 	switch (bp->b_cmd) {
249 	case BUF_CMD_READ:
250 		if (nlba == 0) {
251 			nobytes = 1;
252 			break;
253 		}
254 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_RD]];
255 		/* get_request does not need the subq lock */
256 		req = nvme_get_request(subq, NVME_IOCMD_READ,
257 				       bp->b_data, nlba * nsc->blksize);
258 		if (req == NULL)
259 			goto requeue;
260 
261 		req->cmd.read.head.nsid = nsc->nsid;
262 		req->cmd.read.start_lba = secno;
263 		req->cmd.read.count_lba = nlba - 1;	/* 0's based */
264 		req->cmd.read.ioflags = 0; /* NVME_IOFLG_LR, NVME_IOFLG_FUA */
265 		req->cmd.read.dsm = 0;	   /* NVME_DSM_INCOMPRESSIBLE */
266 					   /* NVME_DSM_SEQREQ */
267 		break;
268 	case BUF_CMD_WRITE:
269 		if (nlba == 0) {
270 			nobytes = 1;
271 			break;
272 		}
273 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
274 		/* get_request does not need the subq lock */
275 		req = nvme_get_request(subq, NVME_IOCMD_WRITE,
276 				       bp->b_data, nlba * nsc->blksize);
277 		if (req == NULL)
278 			goto requeue;
279 		req->cmd.write.head.nsid = nsc->nsid;
280 		req->cmd.write.start_lba = secno;
281 		req->cmd.write.count_lba = nlba - 1;	/* 0's based */
282 		break;
283 	case BUF_CMD_FREEBLKS:
284 		if (nlba == 0) {
285 			nobytes = 1;
286 			break;
287 		}
288 		if (nlba > 65536) {
289 			/* will cause INVAL error */
290 			break;
291 		}
292 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
293 		/* get_request does not need the subq lock */
294 		req = nvme_get_request(subq, NVME_IOCMD_WRITEZ, NULL, 0);
295 		if (req == NULL)
296 			goto requeue;
297 		req->cmd.writez.head.nsid = nsc->nsid;
298 		req->cmd.writez.start_lba = secno;
299 		req->cmd.writez.count_lba = nlba - 1;	/* 0's based */
300 		req->cmd.read.ioflags = 0; /* NVME_IOFLG_LR, NVME_IOFLG_FUA */
301 		req->cmd.read.dsm = 0;	   /* NVME_DSM_INCOMPRESSIBLE */
302 					   /* NVME_DSM_SEQREQ */
303 		break;
304 	case BUF_CMD_FLUSH:
305 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
306 		/* get_request does not need the subq lock */
307 		req = nvme_get_request(subq, NVME_IOCMD_FLUSH, NULL, 0);
308 		if (req == NULL)
309 			goto requeue;
310 		req->cmd.flush.head.nsid = nsc->nsid;
311 		break;
312 	default:
313 		break;
314 	}
315 
316 	/*
317 	 * Submit the request
318 	 */
319 	if (req) {
320 		nvme_comqueue_t *comq;
321 
322 		/* HACK OPTIMIZATIONS - TODO NEEDS WORK */
323 
324 		/*
325 		 * Prevent callback from occurring if the synchronous
326 		 * delay optimization is enabled.
327 		 *
328 		 * NOTE: subq lock does not protect the I/O (completion
329 		 *	 only needs the comq lock).
330 		 */
331 		if (delay == 0)
332 			req->callback = nvme_disk_callback;
333 		req->nsc = nsc;
334 		req->bio = bio;
335 		BUF_KERNPROC(bp);		/* do before submit */
336 		lockmgr(&subq->lk, LK_EXCLUSIVE);
337 		nvme_submit_request(req);	/* needs subq lock */
338 		lockmgr(&subq->lk, LK_RELEASE);
339 		if (delay) {
340 			comq = req->comq;
341 			DELAY(delay);		/* XXX */
342 			lockmgr(&comq->lk, LK_EXCLUSIVE);
343 			nvme_poll_completions(comq, &comq->lk);
344 			if (req->state == NVME_REQ_SUBMITTED) {
345 				/*
346 				 * Didn't finish, do it the slow way
347 				 * (restore async completion).
348 				 */
349 				req->callback = nvme_disk_callback;
350 				lockmgr(&comq->lk, LK_RELEASE);
351 			} else {
352 				/*
353 				 * Jeeze, that was fast.
354 				 */
355 				nvme_disk_callback(req, &comq->lk);
356 				lockmgr(&comq->lk, LK_RELEASE);
357 			}
358 		} /* else async completion */
359 	} else if (nobytes) {
360 		devstat_end_transaction_buf(&nsc->stats, bp);
361 		biodone(bio);
362 	} else {
363 		bp->b_error = EINVAL;
364 		bp->b_flags |= B_ERROR;
365 		devstat_end_transaction_buf(&nsc->stats, bp);
366 		biodone(bio);
367 	}
368 	return 0;
369 
370 	/*
371 	 * No requests were available, requeue the bio.
372 	 *
373 	 * The nvme_get_request() call armed the requeue signal but
374 	 * it is possible that it was picked up too quickly.  If it
375 	 * was, signal the admin thread ourselves.  This case will occur
376 	 * relatively rarely and only under heavy I/O conditions so we
377 	 * don't have to be entirely efficient about dealing with it.
378 	 */
379 requeue:
380 	BUF_KERNPROC(bp);
381 	lockmgr(&nsc->lk, LK_EXCLUSIVE);
382 	bioqdisksort(&nsc->bioq, bio);
383 	lockmgr(&nsc->lk, LK_RELEASE);
384 	if (atomic_swap_int(&subq->signal_requeue, 1) == 0) {
385 		atomic_swap_int(&subq->signal_requeue, 0);
386                 atomic_set_int(&subq->sc->admin_signal, ADMIN_SIG_REQUEUE);
387                 wakeup(&subq->sc->admin_signal);
388 	}
389 	return 1;
390 }
391 
392 static
393 void
394 nvme_disk_callback(nvme_request_t *req, struct lock *lk)
395 {
396 	nvme_softns_t *nsc = req->nsc;
397 	struct bio *bio;
398 	struct buf *bp;
399 	int code;
400 	int type;
401 
402 	code = NVME_COMQ_STATUS_CODE_GET(req->res.tail.status);
403 	type = NVME_COMQ_STATUS_TYPE_GET(req->res.tail.status);
404 	bio = req->bio;
405 	bp = bio->bio_buf;
406 
407 	if (lk)					/* comq lock */
408 		lockmgr(lk, LK_RELEASE);
409 	nvme_put_request(req);			/* does not need subq lock */
410 	devstat_end_transaction_buf(&nsc->stats, bp);
411 
412 	if (code) {
413 		nvme_status_buf_t sb;
414 
415 		krateprintf(&krate_nvmeio,
416 			    "%s%d: %s error nvme-code %s\n",
417 			    device_get_name(nsc->sc->dev),
418 			    device_get_unit(nsc->sc->dev),
419 			    buf_cmd_name(bp),
420 			    nvme_status_string(&sb, type, code));
421 		bp->b_error = EIO;
422 		bp->b_flags |= B_ERROR;
423 		biodone(bio);
424 	} else {
425 		bp->b_resid = 0;
426 		biodone(bio);
427 	}
428 	if (lk)					/* comq lock */
429 		lockmgr(lk, LK_EXCLUSIVE);
430 }
431 
432 int
433 nvme_alloc_disk_unit(void)
434 {
435 	static int unit_counter = 0;
436 	int unit;
437 
438 	unit = atomic_fetchadd_int(&unit_counter, 1);
439 
440 	return unit;
441 }
442 
443 static int
444 nvme_dump(struct dev_dump_args *ap)
445 {
446 	cdev_t dev = ap->a_head.a_dev;
447 	nvme_softns_t *nsc = dev->si_drv1;
448 	nvme_softc_t *sc = nsc->sc;
449 	uint64_t nlba;
450 	uint64_t secno;
451 	nvme_subqueue_t *subq;
452 	nvme_comqueue_t *comq;
453 	nvme_request_t *req;
454 	int didlock;
455 
456 	/*
457 	 * Calculate sector/extent
458 	 */
459 	secno = ap->a_offset / nsc->blksize;
460 	nlba = ap->a_length / nsc->blksize;
461 
462 	subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
463 
464 	if (nlba) {
465 		/*
466 		 * Issue a WRITE
467 		 *
468 		 * get_request does not need the subq lock.
469 		 */
470 		req = nvme_get_dump_request(subq, NVME_IOCMD_WRITE,
471 				       ap->a_virtual, nlba * nsc->blksize);
472 		req->cmd.write.head.nsid = nsc->nsid;
473 		req->cmd.write.start_lba = secno;
474 		req->cmd.write.count_lba = nlba - 1;	/* 0's based */
475 	} else {
476 		/*
477 		 * Issue a FLUSH
478 		 *
479 		 * get_request does not need the subq lock.
480 		 */
481 		req = nvme_get_dump_request(subq, NVME_IOCMD_FLUSH, NULL, 0);
482 		req->cmd.flush.head.nsid = nsc->nsid;
483 	}
484 
485 	/*
486 	 * Prevent callback from occurring if the synchronous
487 	 * delay optimization is enabled.
488 	 */
489 	req->callback = NULL;
490 	req->nsc = nsc;
491 
492 	/*
493 	 * 500 x 1uS poll wait on lock.  We might be the idle thread, so
494 	 * we can't safely block during a dump.
495 	 */
496 	didlock = 500;
497 	while (lockmgr(&subq->lk, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
498 		if (--didlock == 0)
499 			break;
500 		tsc_delay(1000);	/* 1uS */
501 		lwkt_switch();
502 	}
503 	nvme_submit_request(req);	/* needs subq lock */
504 	if (didlock)
505 		lockmgr(&subq->lk, LK_RELEASE);
506 
507 	comq = req->comq;
508 	nvme_poll_request(req);
509 	nvme_put_dump_request(req);		/* does not need subq lock */
510 
511 	/*
512 	 * Shut the nvme controller down nicely when we finish the dump.
513 	 * We should to do this whether we are in a panic or not because
514 	 * frankly the dump is overwriting swap space, thus the system is
515 	 * probably not stable.
516 	 */
517 	if (nlba == 0)
518 		nvme_issue_shutdown(sc, 1);
519 	return 0;
520 }
521 
522 static
523 const char *
524 nvme_status_string(nvme_status_buf_t *sb, int type, int code)
525 {
526 	const char *cstr = NULL;
527 
528 	switch(type) {
529 	case NVME_STATUS_TYPE_GENERIC:
530 		switch(code) {
531 		case NVME_CODE_SUCCESS:
532 			cstr = "success";
533 			break;
534 		case NVME_CODE_BADOP:
535 			cstr = "badop";
536 			break;
537 		case NVME_CODE_BADFIELD:
538 			cstr = "badfield";
539 			break;
540 		case NVME_CODE_IDCONFLICT:
541 			cstr = "idconflict";
542 			break;
543 		case NVME_CODE_BADXFER:
544 			cstr = "badxfer";
545 			break;
546 		case NVME_CODE_ABORTED_PWRLOSS:
547 			cstr = "aborted-powerloss";
548 			break;
549 		case NVME_CODE_INTERNAL:
550 			cstr = "internal";
551 			break;
552 		case NVME_CODE_ABORTED_ONREQ:
553 			cstr = "aborted-onreq";
554 			break;
555 		case NVME_CODE_ABORTED_SQDEL:
556 			cstr = "aborted-sqdel";
557 			break;
558 		case NVME_CODE_ABORTED_FUSEFAIL:
559 			cstr = "aborted-fusefail";
560 			break;
561 		case NVME_CODE_ABORTED_FUSEMISSING:
562 			cstr = "aborted-fusemissing";
563 			break;
564 		case NVME_CODE_BADNAMESPACE:
565 			cstr = "badnamespace";
566 			break;
567 		case NVME_CODE_SEQERROR:
568 			cstr = "seqerror";
569 			break;
570 		case NVME_CODE_BADSGLSEG:
571 			cstr = "badsgl-seg";
572 			break;
573 		case NVME_CODE_BADSGLCNT:
574 			cstr = "badsgl-cnt";
575 			break;
576 		case NVME_CODE_BADSGLLEN:
577 			cstr = "badsgl-len";
578 			break;
579 		case NVME_CODE_BADSGLMLEN:
580 			cstr = "badsgl-mlen";
581 			break;
582 		case NVME_CODE_BADSGLTYPE:
583 			cstr = "badsgl-type";
584 			break;
585 		case NVME_CODE_BADMEMBUFUSE:
586 			cstr = "badmem-bufuse";
587 			break;
588 		case NVME_CODE_BADPRPOFF:
589 			cstr = "bad-prpoff";
590 			break;
591 
592 		case NVME_CODE_ATOMICWUOVFL:
593 			cstr = "atomic-wuovfl";
594 			break;
595 		case NVME_CODE_LBA_RANGE:
596 			cstr = "lba-range";
597 			break;
598 		case NVME_CODE_CAP_EXCEEDED:
599 			cstr = "cap-exceeded";
600 			break;
601 		case NVME_CODE_NAM_NOT_READY:
602 			cstr = "nam-not-ready";
603 			break;
604 		case NVME_CODE_RSV_CONFLICT:
605 			cstr = "rsv-conflict";
606 			break;
607 		case NVME_CODE_FMT_IN_PROG:
608 			cstr = "fmt-in-prog";
609 			break;
610 		default:
611 			cstr = "unknown";
612 			break;
613 		}
614 		ksnprintf(sb->buf, sizeof(sb->buf),
615 			  "type=generic code=%s(%04x)", cstr, code);
616 		break;
617 	case NVME_STATUS_TYPE_SPECIFIC:
618 		switch(code) {
619 		case NVME_CSSCODE_BADCOMQ:
620 			cstr = "bad-comq";
621 			break;
622 		case NVME_CSSCODE_BADQID:
623 			cstr = "bad-qid";
624 			break;
625 		case NVME_CSSCODE_BADQSIZE:
626 			cstr = "bad-qsize";
627 			break;
628 		case NVME_CSSCODE_ABORTLIM:
629 			cstr = "abort-lim";
630 			break;
631 		case NVME_CSSCODE_RESERVED04 :
632 			cstr = "unknown";
633 			break;
634 		case NVME_CSSCODE_ASYNCEVENTLIM:
635 			cstr = "async-event-lim";
636 			break;
637 		case NVME_CSSCODE_BADFWSLOT:
638 			cstr = "bad-fwslot";
639 			break;
640 		case NVME_CSSCODE_BADFWIMAGE:
641 			cstr = "bad-fwimage";
642 			break;
643 		case NVME_CSSCODE_BADINTRVECT:
644 			cstr = "bad-intrvect";
645 			break;
646 		case NVME_CSSCODE_BADLOGPAGE:
647 			cstr = "bad-logpage";
648 			break;
649 		case NVME_CSSCODE_BADFORMAT:
650 			cstr = "bad-format";
651 			break;
652 		case NVME_CSSCODE_FW_NEEDSCONVRESET:
653 			cstr = "needs-convreset";
654 			break;
655 		case NVME_CSSCODE_BADQDELETE:
656 			cstr = "bad-qdelete";
657 			break;
658 		case NVME_CSSCODE_FEAT_NOT_SAVEABLE:
659 			cstr = "feat-not-saveable";
660 			break;
661 		case NVME_CSSCODE_FEAT_NOT_CHGABLE:
662 			cstr = "feat-not-changeable";
663 			break;
664 		case NVME_CSSCODE_FEAT_NOT_NSSPEC:
665 			cstr = "feat-not-nsspec";
666 			break;
667 		case NVME_CSSCODE_FW_NEEDSSUBRESET:
668 			cstr = "fw-needs-subreset";
669 			break;
670 		case NVME_CSSCODE_FW_NEEDSRESET:
671 			cstr = "fw-needs-reset";
672 			break;
673 		case NVME_CSSCODE_FW_NEEDSMAXTVIOLATE:
674 			cstr = "fw-needsmaxviolate";
675 			break;
676 		case NVME_CSSCODE_FW_PROHIBITED:
677 			cstr = "fw-prohibited";
678 			break;
679 		case NVME_CSSCODE_RANGE_OVERLAP:
680 			cstr = "range-overlap";
681 			break;
682 		case NVME_CSSCODE_NAM_INSUFF_CAP:
683 			cstr = "name-insufficient-cap";
684 			break;
685 		case NVME_CSSCODE_NAM_ID_UNAVAIL:
686 			cstr = "name-id-unavail";
687 			break;
688 		case NVME_CSSCODE_RESERVED17:
689 			cstr = "unknown";
690 			break;
691 		case NVME_CSSCODE_NAM_ALREADY_ATT:
692 			cstr = "name-already-att";
693 			break;
694 		case NVME_CSSCODE_NAM_IS_PRIVATE:
695 			cstr = "name-is-private";
696 			break;
697 		case NVME_CSSCODE_NAM_NOT_ATT:
698 			cstr = "name-not-att";
699 			break;
700 		case NVME_CSSCODE_NO_THIN_PROVISION:
701 			cstr = "no-thin-provision";
702 			break;
703 		case NVME_CSSCODE_CTLR_LIST_INVALID:
704 			cstr = "controller-list-invalid";
705 			break;
706 
707 		case NVME_CSSCODE_ATTR_CONFLICT:
708 			cstr = "attr-conflict";
709 			break;
710 		case NVME_CSSCODE_BADPROTINFO:
711 			cstr = "bad-prot-info";
712 			break;
713 		case NVME_CSSCODE_WRITE_TO_RDONLY:
714 			cstr = "write-to-readonly";
715 			break;
716 		default:
717 			cstr = "unknown";
718 			break;
719 		}
720 		ksnprintf(sb->buf, sizeof(sb->buf),
721 			  "type=specific code=%s(%04x)", cstr, code);
722 		break;
723 	case NVME_STATUS_TYPE_MEDIA:
724 		switch(code) {
725 		case NVME_MEDCODE_WRITE_FAULT:
726 			cstr = "write-fault";
727 			break;
728 		case NVME_MEDCODE_UNRECOV_READ_ERROR:
729 			cstr = "unrecoverable-read-error";
730 			break;
731 		case NVME_MEDCODE_ETOE_GUARD_CHK:
732 			cstr = "etoe-guard-check";
733 			break;
734 		case NVME_MEDCODE_ETOE_APPTAG_CHK:
735 			cstr = "etoe-apptag-check";
736 			break;
737 		case NVME_MEDCODE_ETOE_REFTAG_CHK:
738 			cstr = "etoe-reftag-check";
739 			break;
740 		case NVME_MEDCODE_COMPARE_FAILURE:
741 			cstr = "compare-failure";
742 			break;
743 		case NVME_MEDCODE_ACCESS_DENIED:
744 			cstr = "access-denied";
745 			break;
746 		case NVME_MEDCODE_UNALLOCATED:
747 			cstr = "unallocated";
748 			break;
749 		default:
750 			cstr = "unknown";
751 			break;
752 		}
753 		ksnprintf(sb->buf, sizeof(sb->buf),
754 			  "type=media code=%s(%04x)", cstr, code);
755 		break;
756 	case NVME_STATUS_TYPE_VENDOR:
757 		ksnprintf(sb->buf, sizeof(sb->buf),
758 			  "type=vendor code=%04x", code);
759 		break;
760 	default:
761 		ksnprintf(sb->buf, sizeof(sb->buf),
762 			  "type=%02x code=%04x", type, code);
763 		break;
764 	}
765 	return sb->buf;
766 }
767