xref: /dragonfly/sys/dev/disk/nvme/nvme_disk.c (revision 0c941bc8)
1 /*
2  * Copyright (c) 2016 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include "nvme.h"
36 
37 static void nvme_disk_callback(nvme_request_t *req, struct lock *lk);
38 static int nvme_strategy_core(nvme_softns_t *nsc, struct bio *bio, int delay);
39 
40 static d_open_t nvme_open;
41 static d_close_t nvme_close;
42 static d_ioctl_t nvme_ioctl;
43 static d_strategy_t nvme_strategy;
44 static d_dump_t nvme_dump;
45 
46 static struct dev_ops nvme_ops = {
47 	{ "nvme", 0, D_DISK | D_MPSAFE | D_CANFREE | D_TRACKCLOSE},
48 	.d_open =       nvme_open,
49 	.d_close =      nvme_close,
50 	.d_read =       physread,
51 	.d_dump =       nvme_dump,
52 	.d_write =      physwrite,
53 	.d_ioctl =      nvme_ioctl,
54 	.d_strategy =   nvme_strategy,
55 };
56 
57 static int nvme_sync_delay = 0;
58 SYSCTL_INT(_debug, OID_AUTO, nvme_sync_delay, CTLFLAG_RW, &nvme_sync_delay, 0,
59 	   "Enable synchronous delay/completion-check, uS");
60 
61 /*
62  * Attach a namespace as a disk, making the disk available to the system.
63  */
64 void
65 nvme_disk_attach(nvme_softns_t *nsc)
66 {
67 	nvme_softc_t *sc;
68 	struct disk_info info;
69 	char serial[20+16];
70 	size_t len;
71 	uint64_t cap_gb;
72 
73 	sc = nsc->sc;
74 	devstat_add_entry(&nsc->stats, "nvme", nsc->unit, nsc->blksize,
75 			  DEVSTAT_NO_ORDERED_TAGS,
76 			  DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER,
77 			  DEVSTAT_PRIORITY_OTHER);
78 	nsc->cdev = disk_create(nsc->unit, &nsc->disk, &nvme_ops);
79 	nsc->cdev->si_drv1 = nsc;
80 	nsc->cdev->si_iosize_max = MAXPHYS;	/* XXX */
81 	disk_setdisktype(&nsc->disk, "ssd");
82 
83 	bzero(&info, sizeof(info));
84 	info.d_media_blksize = nsc->blksize;
85 	info.d_media_blocks = nsc->idns.size;
86 	info.d_secpertrack = 1024;
87 	info.d_nheads = 1;
88 	info.d_secpercyl = info.d_secpertrack * info.d_nheads;
89 	info.d_ncylinders =  (u_int)(info.d_media_blocks / info.d_secpercyl);
90 
91 	KKASSERT(sizeof(sc->idctlr.serialno) == 20);
92 	bzero(serial, sizeof(serial));
93 	bcopy(sc->idctlr.serialno, serial, sizeof(sc->idctlr.serialno));
94 	len = string_cleanup(serial, 1);
95 
96 	ksnprintf(serial + len, sizeof(serial) - len, "-%u", nsc->nsid);
97 
98 	info.d_serialno = serial;
99 
100 	cap_gb = nsc->idns.size / (1024 * 1024 * 1024 / nsc->blksize);
101 	device_printf(sc->dev,
102 		"Disk nvme%d ns=%u "
103 		"blksize=%u lbacnt=%ju cap=%juGB serno=%s\n",
104 		nsc->unit, nsc->nsid,
105 		nsc->blksize, nsc->idns.size, cap_gb, serial);
106 
107 	disk_setdiskinfo(&nsc->disk, &info);
108 	/* serial is copied and does not have to be persistent */
109 }
110 
111 void
112 nvme_disk_detach(nvme_softns_t *nsc)
113 {
114 	if (nsc->cdev) {
115 		disk_destroy(&nsc->disk);
116 		devstat_remove_entry(&nsc->stats);
117 	}
118 }
119 
120 static
121 int
122 nvme_open(struct dev_open_args *ap)
123 {
124 	cdev_t dev = ap->a_head.a_dev;
125 	nvme_softns_t *nsc = dev->si_drv1;
126 	nvme_softc_t *sc = nsc->sc;
127 
128 	if (sc->flags & NVME_SC_UNLOADING)
129 		return ENXIO;
130 
131 	atomic_add_long(&sc->opencnt, 1);
132 
133 	return 0;
134 }
135 
136 static
137 int
138 nvme_close(struct dev_close_args *ap)
139 {
140 	cdev_t dev = ap->a_head.a_dev;
141 	nvme_softns_t *nsc = dev->si_drv1;
142 	nvme_softc_t *sc = nsc->sc;
143 
144 	atomic_add_long(&sc->opencnt, -1);
145 
146 	return 0;
147 }
148 
149 static int
150 nvme_ioctl(struct dev_ioctl_args *ap)
151 {
152 	cdev_t dev = ap->a_head.a_dev;
153 	nvme_softns_t *nsc = dev->si_drv1;
154 	nvme_softc_t *sc = nsc->sc;
155 	int error;
156 
157 	switch(ap->a_cmd) {
158 	case NVMEIOCGETLOG:
159 		error = nvme_getlog_ioctl(sc, (void *)ap->a_data);
160 		break;
161 	default:
162 		error = ENOIOCTL;
163 		break;
164 	}
165 	return error;
166 }
167 
168 static int
169 nvme_strategy(struct dev_strategy_args *ap)
170 {
171 	cdev_t dev = ap->a_head.a_dev;
172 	nvme_softns_t *nsc = dev->si_drv1;
173 
174 	nvme_strategy_core(nsc, ap->a_bio, nvme_sync_delay);
175 
176 	return 0;
177 }
178 
179 /*
180  * Called from admin thread to requeue BIOs.  We must call
181  * nvme_strategy_core() with delay = 0 to disable synchronous
182  * optimizations to avoid deadlocking the admin thread.
183  */
184 void
185 nvme_disk_requeues(nvme_softc_t *sc)
186 {
187 	nvme_softns_t *nsc;
188 	struct bio *bio;
189 	int i;
190 
191 	for (i = 0; i < sc->nscmax; ++i) {
192 		nsc = sc->nscary[i];
193 		if (nsc == NULL || nsc->sc == NULL)
194 			continue;
195 		if (bioq_first(&nsc->bioq)) {
196 			lockmgr(&nsc->lk, LK_EXCLUSIVE);
197 			while ((bio = bioq_first(&nsc->bioq)) != NULL) {
198 				bioq_remove(&nsc->bioq, bio);
199 				lockmgr(&nsc->lk, LK_RELEASE);
200 				if (nvme_strategy_core(nsc, bio, 0))
201 					goto next;
202 				lockmgr(&nsc->lk, LK_EXCLUSIVE);
203 			}
204 			lockmgr(&nsc->lk, LK_RELEASE);
205 		}
206 next:
207 		;
208 	}
209 }
210 
211 
212 /*
213  * Returns non-zero if no requests are available.
214  */
215 static int
216 nvme_strategy_core(nvme_softns_t *nsc, struct bio *bio, int delay)
217 {
218 	nvme_softc_t *sc = nsc->sc;
219 	struct buf *bp = bio->bio_buf;
220 	uint64_t nlba;
221 	uint64_t secno;
222 	nvme_subqueue_t *subq;
223 	nvme_request_t *req;
224 	int nobytes;
225 
226 	/*
227 	 * Calculate sector/extent
228 	 */
229 	secno = bio->bio_offset / nsc->blksize;
230 	nlba = bp->b_bcount / nsc->blksize;
231 
232 	devstat_start_transaction(&nsc->stats);
233 
234 	subq = NULL;
235 	req = NULL;
236 	nobytes = 0;
237 
238 	/*
239 	 * Convert bio to low-level request
240 	 */
241 	switch (bp->b_cmd) {
242 	case BUF_CMD_READ:
243 		if (nlba == 0) {
244 			nobytes = 1;
245 			break;
246 		}
247 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_RD]];
248 		/* get_request does not need the subq lock */
249 		req = nvme_get_request(subq, NVME_IOCMD_READ,
250 				       bp->b_data, nlba * nsc->blksize);
251 		if (req == NULL)
252 			goto requeue;
253 
254 		req->cmd.read.head.nsid = nsc->nsid;
255 		req->cmd.read.start_lba = secno;
256 		req->cmd.read.count_lba = nlba - 1;	/* 0's based */
257 		req->cmd.read.ioflags = 0; /* NVME_IOFLG_LR, NVME_IOFLG_FUA */
258 		req->cmd.read.dsm = 0;	   /* NVME_DSM_INCOMPRESSIBLE */
259 					   /* NVME_DSM_SEQREQ */
260 		break;
261 	case BUF_CMD_WRITE:
262 		if (nlba == 0) {
263 			nobytes = 1;
264 			break;
265 		}
266 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
267 		/* get_request does not need the subq lock */
268 		req = nvme_get_request(subq, NVME_IOCMD_WRITE,
269 				       bp->b_data, nlba * nsc->blksize);
270 		if (req == NULL)
271 			goto requeue;
272 		req->cmd.write.head.nsid = nsc->nsid;
273 		req->cmd.write.start_lba = secno;
274 		req->cmd.write.count_lba = nlba - 1;	/* 0's based */
275 		break;
276 	case BUF_CMD_FREEBLKS:
277 		if (nlba == 0) {
278 			nobytes = 1;
279 			break;
280 		}
281 		if (nlba > 65536) {
282 			/* will cause INVAL error */
283 			break;
284 		}
285 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
286 		/* get_request does not need the subq lock */
287 		req = nvme_get_request(subq, NVME_IOCMD_WRITEZ, NULL, 0);
288 		if (req == NULL)
289 			goto requeue;
290 		req->cmd.writez.head.nsid = nsc->nsid;
291 		req->cmd.writez.start_lba = secno;
292 		req->cmd.writez.count_lba = nlba - 1;	/* 0's based */
293 		req->cmd.read.ioflags = 0; /* NVME_IOFLG_LR, NVME_IOFLG_FUA */
294 		req->cmd.read.dsm = 0;	   /* NVME_DSM_INCOMPRESSIBLE */
295 					   /* NVME_DSM_SEQREQ */
296 		break;
297 	case BUF_CMD_FLUSH:
298 		subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
299 		/* get_request does not need the subq lock */
300 		req = nvme_get_request(subq, NVME_IOCMD_FLUSH, NULL, 0);
301 		if (req == NULL)
302 			goto requeue;
303 		req->cmd.flush.head.nsid = nsc->nsid;
304 		break;
305 	default:
306 		break;
307 	}
308 
309 	/*
310 	 * Submit the request
311 	 */
312 	if (req) {
313 		nvme_comqueue_t *comq;
314 
315 		/* HACK OPTIMIZATIONS - TODO NEEDS WORK */
316 
317 		/*
318 		 * Prevent callback from occurring if the synchronous
319 		 * delay optimization is enabled.
320 		 *
321 		 * NOTE: subq lock does not protect the I/O (completion
322 		 *	 only needs the comq lock).
323 		 */
324 		if (delay == 0)
325 			req->callback = nvme_disk_callback;
326 		req->nsc = nsc;
327 		req->bio = bio;
328 		BUF_KERNPROC(bp);		/* do before submit */
329 		lockmgr(&subq->lk, LK_EXCLUSIVE);
330 		nvme_submit_request(req);	/* needs subq lock */
331 		lockmgr(&subq->lk, LK_RELEASE);
332 		if (delay) {
333 			comq = req->comq;
334 			DELAY(delay);		/* XXX */
335 			lockmgr(&comq->lk, LK_EXCLUSIVE);
336 			nvme_poll_completions(comq, &comq->lk);
337 			if (req->state == NVME_REQ_SUBMITTED) {
338 				/*
339 				 * Didn't finish, do it the slow way
340 				 * (restore async completion).
341 				 */
342 				req->callback = nvme_disk_callback;
343 				lockmgr(&comq->lk, LK_RELEASE);
344 			} else {
345 				/*
346 				 * Jeeze, that was fast.
347 				 */
348 				nvme_disk_callback(req, &comq->lk);
349 				lockmgr(&comq->lk, LK_RELEASE);
350 			}
351 		} /* else async completion */
352 	} else if (nobytes) {
353 		devstat_end_transaction_buf(&nsc->stats, bp);
354 		biodone(bio);
355 	} else {
356 		bp->b_error = EINVAL;
357 		bp->b_flags |= B_ERROR;
358 		devstat_end_transaction_buf(&nsc->stats, bp);
359 		biodone(bio);
360 	}
361 	return 0;
362 
363 	/*
364 	 * No requests were available, requeue the bio.
365 	 *
366 	 * The nvme_get_request() call armed the requeue signal but
367 	 * it is possible that it was picked up too quickly.  If it
368 	 * was, signal the admin thread ourselves.  This case will occur
369 	 * relatively rarely and only under heavy I/O conditions so we
370 	 * don't have to be entirely efficient about dealing with it.
371 	 */
372 requeue:
373 	BUF_KERNPROC(bp);
374 	lockmgr(&nsc->lk, LK_EXCLUSIVE);
375 	bioqdisksort(&nsc->bioq, bio);
376 	lockmgr(&nsc->lk, LK_RELEASE);
377 	if (atomic_swap_int(&subq->signal_requeue, 1) == 0) {
378 		atomic_swap_int(&subq->signal_requeue, 0);
379                 atomic_set_int(&subq->sc->admin_signal, ADMIN_SIG_REQUEUE);
380                 wakeup(&subq->sc->admin_signal);
381 	}
382 	return 1;
383 }
384 
385 static
386 void
387 nvme_disk_callback(nvme_request_t *req, struct lock *lk)
388 {
389 	nvme_softns_t *nsc = req->nsc;
390 	struct bio *bio;
391 	struct buf *bp;
392 	int status;
393 
394 	status = NVME_COMQ_STATUS_CODE_GET(req->res.tail.status);
395 	bio = req->bio;
396 	bp = bio->bio_buf;
397 
398 	if (lk)					/* comq lock */
399 		lockmgr(lk, LK_RELEASE);
400 	nvme_put_request(req);			/* does not need subq lock */
401 	devstat_end_transaction_buf(&nsc->stats, bp);
402 	if (status) {
403 		bp->b_error = EIO;
404 		bp->b_flags |= B_ERROR;
405 		biodone(bio);
406 	} else {
407 		bp->b_resid = 0;
408 		biodone(bio);
409 	}
410 	if (lk)					/* comq lock */
411 		lockmgr(lk, LK_EXCLUSIVE);
412 }
413 
414 int
415 nvme_alloc_disk_unit(void)
416 {
417 	static int unit_counter = 0;
418 	int unit;
419 
420 	unit = atomic_fetchadd_int(&unit_counter, 1);
421 
422 	return unit;
423 }
424 
425 static int
426 nvme_dump(struct dev_dump_args *ap)
427 {
428 	cdev_t dev = ap->a_head.a_dev;
429 	nvme_softns_t *nsc = dev->si_drv1;
430 	nvme_softc_t *sc = nsc->sc;
431 	uint64_t nlba;
432 	uint64_t secno;
433 	nvme_subqueue_t *subq;
434 	nvme_comqueue_t *comq;
435 	nvme_request_t *req;
436 
437 	/*
438 	 * Calculate sector/extent
439 	 */
440 	secno = ap->a_offset / nsc->blksize;
441 	nlba = ap->a_length / nsc->blksize;
442 
443 	subq = &sc->subqueues[sc->qmap[mycpuid][NVME_QMAP_WR]];
444 
445 	if (nlba) {
446 		/*
447 		 * Issue a WRITE
448 		 *
449 		 * get_request does not need the subq lock.
450 		 */
451 		req = nvme_get_request(subq, NVME_IOCMD_WRITE,
452 				       ap->a_virtual, nlba * nsc->blksize);
453 		req->cmd.write.head.nsid = nsc->nsid;
454 		req->cmd.write.start_lba = secno;
455 		req->cmd.write.count_lba = nlba - 1;	/* 0's based */
456 	} else {
457 		/*
458 		 * Issue a FLUSH
459 		 *
460 		 * get_request does not need the subq lock.
461 		 */
462 		req = nvme_get_request(subq, NVME_IOCMD_FLUSH, NULL, 0);
463 		req->cmd.flush.head.nsid = nsc->nsid;
464 	}
465 
466 	/*
467 	 * Prevent callback from occurring if the synchronous
468 	 * delay optimization is enabled.
469 	 */
470 	req->callback = NULL;
471 	req->nsc = nsc;
472 	lockmgr(&subq->lk, LK_EXCLUSIVE);
473 	nvme_submit_request(req);	/* needs subq lock */
474 	lockmgr(&subq->lk, LK_RELEASE);
475 
476 	comq = req->comq;
477 	nvme_wait_request(req, 1);
478 	nvme_put_request(req);			/* does not need subq lock */
479 
480 	/*
481 	 * Shut the nvme controller down nicely when we finish the dump.
482 	 */
483 	if (nlba == 0)
484 		nvme_issue_shutdown(sc);
485 
486 
487 	return 0;
488 }
489