1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2012-2013 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/bio.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/disk.h>
34 #include <sys/fcntl.h>
35 #include <sys/ioccom.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/proc.h>
39 #include <sys/systm.h>
40
41 #include <dev/pci/pcivar.h>
42
43 #include <geom/geom.h>
44
45 #include "nvme_private.h"
46
47 static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
48 static void nvme_bio_child_done(void *arg,
49 const struct nvme_completion *cpl);
50 static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size,
51 uint32_t alignment);
52 static void nvme_free_child_bios(int num_bios,
53 struct bio **child_bios);
54 static struct bio ** nvme_allocate_child_bios(int num_bios);
55 static struct bio ** nvme_construct_child_bios(struct bio *bp,
56 uint32_t alignment,
57 int *num_bios);
58 static int nvme_ns_split_bio(struct nvme_namespace *ns,
59 struct bio *bp,
60 uint32_t alignment);
61
62 static int
nvme_ns_ioctl(struct cdev * cdev,u_long cmd,caddr_t arg,int flag,struct thread * td)63 nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
64 struct thread *td)
65 {
66 struct nvme_namespace *ns;
67 struct nvme_controller *ctrlr;
68 struct nvme_pt_command *pt;
69
70 ns = cdev->si_drv1;
71 ctrlr = ns->ctrlr;
72
73 switch (cmd) {
74 case NVME_IO_TEST:
75 case NVME_BIO_TEST:
76 nvme_ns_test(ns, cmd, arg);
77 break;
78 case NVME_PASSTHROUGH_CMD:
79 pt = (struct nvme_pt_command *)arg;
80 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id,
81 1 /* is_user_buffer */, 0 /* is_admin_cmd */));
82 case NVME_GET_NSID:
83 {
84 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
85 strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
86 sizeof(gnsid->cdev));
87 gnsid->nsid = ns->id;
88 break;
89 }
90 case DIOCGMEDIASIZE:
91 *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
92 break;
93 case DIOCGSECTORSIZE:
94 *(u_int *)arg = nvme_ns_get_sector_size(ns);
95 break;
96 default:
97 return (ENOTTY);
98 }
99
100 return (0);
101 }
102
103 static int
nvme_ns_open(struct cdev * dev __unused,int flags,int fmt __unused,struct thread * td)104 nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
105 struct thread *td)
106 {
107 int error = 0;
108
109 if (flags & FWRITE)
110 error = securelevel_gt(td->td_ucred, 0);
111
112 return (error);
113 }
114
115 static int
nvme_ns_close(struct cdev * dev __unused,int flags,int fmt __unused,struct thread * td)116 nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
117 struct thread *td)
118 {
119
120 return (0);
121 }
122
123 static void
nvme_ns_strategy_done(void * arg,const struct nvme_completion * cpl)124 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
125 {
126 struct bio *bp = arg;
127
128 /*
129 * TODO: add more extensive translation of NVMe status codes
130 * to different bio error codes (i.e. EIO, EINVAL, etc.)
131 */
132 if (nvme_completion_is_error(cpl)) {
133 bp->bio_error = EIO;
134 bp->bio_flags |= BIO_ERROR;
135 bp->bio_resid = bp->bio_bcount;
136 } else
137 bp->bio_resid = 0;
138
139 biodone(bp);
140 }
141
142 static void
nvme_ns_strategy(struct bio * bp)143 nvme_ns_strategy(struct bio *bp)
144 {
145 struct nvme_namespace *ns;
146 int err;
147
148 ns = bp->bio_dev->si_drv1;
149 err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
150
151 if (err) {
152 bp->bio_error = err;
153 bp->bio_flags |= BIO_ERROR;
154 bp->bio_resid = bp->bio_bcount;
155 biodone(bp);
156 }
157
158 }
159
160 static struct cdevsw nvme_ns_cdevsw = {
161 .d_version = D_VERSION,
162 .d_flags = D_DISK,
163 .d_read = physread,
164 .d_write = physwrite,
165 .d_open = nvme_ns_open,
166 .d_close = nvme_ns_close,
167 .d_strategy = nvme_ns_strategy,
168 .d_ioctl = nvme_ns_ioctl
169 };
170
171 uint32_t
nvme_ns_get_max_io_xfer_size(struct nvme_namespace * ns)172 nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
173 {
174 return ns->ctrlr->max_xfer_size;
175 }
176
177 uint32_t
nvme_ns_get_sector_size(struct nvme_namespace * ns)178 nvme_ns_get_sector_size(struct nvme_namespace *ns)
179 {
180 uint8_t flbas_fmt, lbads;
181
182 flbas_fmt = NVMEV(NVME_NS_DATA_FLBAS_FORMAT, ns->data.flbas);
183 lbads = NVMEV(NVME_NS_DATA_LBAF_LBADS, ns->data.lbaf[flbas_fmt]);
184
185 return (1 << lbads);
186 }
187
188 uint64_t
nvme_ns_get_num_sectors(struct nvme_namespace * ns)189 nvme_ns_get_num_sectors(struct nvme_namespace *ns)
190 {
191 return (ns->data.nsze);
192 }
193
194 uint64_t
nvme_ns_get_size(struct nvme_namespace * ns)195 nvme_ns_get_size(struct nvme_namespace *ns)
196 {
197 return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
198 }
199
200 uint32_t
nvme_ns_get_flags(struct nvme_namespace * ns)201 nvme_ns_get_flags(struct nvme_namespace *ns)
202 {
203 return (ns->flags);
204 }
205
206 const char *
nvme_ns_get_serial_number(struct nvme_namespace * ns)207 nvme_ns_get_serial_number(struct nvme_namespace *ns)
208 {
209 return ((const char *)ns->ctrlr->cdata.sn);
210 }
211
212 const char *
nvme_ns_get_model_number(struct nvme_namespace * ns)213 nvme_ns_get_model_number(struct nvme_namespace *ns)
214 {
215 return ((const char *)ns->ctrlr->cdata.mn);
216 }
217
218 const struct nvme_namespace_data *
nvme_ns_get_data(struct nvme_namespace * ns)219 nvme_ns_get_data(struct nvme_namespace *ns)
220 {
221
222 return (&ns->data);
223 }
224
225 uint32_t
nvme_ns_get_stripesize(struct nvme_namespace * ns)226 nvme_ns_get_stripesize(struct nvme_namespace *ns)
227 {
228 uint32_t ss;
229
230 if (NVMEV(NVME_NS_DATA_NSFEAT_NPVALID, ns->data.nsfeat) != 0) {
231 ss = nvme_ns_get_sector_size(ns);
232 if (ns->data.npwa != 0)
233 return ((ns->data.npwa + 1) * ss);
234 else if (ns->data.npwg != 0)
235 return ((ns->data.npwg + 1) * ss);
236 }
237 return (ns->boundary);
238 }
239
240 static void
nvme_ns_bio_done(void * arg,const struct nvme_completion * status)241 nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
242 {
243 struct bio *bp = arg;
244 nvme_cb_fn_t bp_cb_fn;
245
246 bp_cb_fn = bp->bio_driver1;
247
248 if (bp->bio_driver2)
249 free(bp->bio_driver2, M_NVME);
250
251 if (nvme_completion_is_error(status)) {
252 bp->bio_flags |= BIO_ERROR;
253 if (bp->bio_error == 0)
254 bp->bio_error = EIO;
255 }
256
257 if ((bp->bio_flags & BIO_ERROR) == 0)
258 bp->bio_resid = 0;
259 else
260 bp->bio_resid = bp->bio_bcount;
261
262 bp_cb_fn(bp, status);
263 }
264
265 static void
nvme_bio_child_inbed(struct bio * parent,int bio_error)266 nvme_bio_child_inbed(struct bio *parent, int bio_error)
267 {
268 struct nvme_completion parent_cpl;
269 int children, inbed;
270
271 if (bio_error != 0) {
272 parent->bio_flags |= BIO_ERROR;
273 parent->bio_error = bio_error;
274 }
275
276 /*
277 * atomic_fetchadd will return value before adding 1, so we still
278 * must add 1 to get the updated inbed number. Save bio_children
279 * before incrementing to guard against race conditions when
280 * two children bios complete on different queues.
281 */
282 children = atomic_load_acq_int(&parent->bio_children);
283 inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1;
284 if (inbed == children) {
285 bzero(&parent_cpl, sizeof(parent_cpl));
286 if (parent->bio_flags & BIO_ERROR) {
287 parent_cpl.status &= ~NVMEM(NVME_STATUS_SC);
288 parent_cpl.status |= NVMEF(NVME_STATUS_SC,
289 NVME_SC_DATA_TRANSFER_ERROR);
290 }
291 nvme_ns_bio_done(parent, &parent_cpl);
292 }
293 }
294
295 static void
nvme_bio_child_done(void * arg,const struct nvme_completion * cpl)296 nvme_bio_child_done(void *arg, const struct nvme_completion *cpl)
297 {
298 struct bio *child = arg;
299 struct bio *parent;
300 int bio_error;
301
302 parent = child->bio_parent;
303 g_destroy_bio(child);
304 bio_error = nvme_completion_is_error(cpl) ? EIO : 0;
305 nvme_bio_child_inbed(parent, bio_error);
306 }
307
308 static uint32_t
nvme_get_num_segments(uint64_t addr,uint64_t size,uint32_t align)309 nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align)
310 {
311 uint32_t num_segs, offset, remainder;
312
313 if (align == 0)
314 return (1);
315
316 KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n"));
317
318 num_segs = size / align;
319 remainder = size & (align - 1);
320 offset = addr & (align - 1);
321 if (remainder > 0 || offset > 0)
322 num_segs += 1 + (remainder + offset - 1) / align;
323 return (num_segs);
324 }
325
326 static void
nvme_free_child_bios(int num_bios,struct bio ** child_bios)327 nvme_free_child_bios(int num_bios, struct bio **child_bios)
328 {
329 int i;
330
331 for (i = 0; i < num_bios; i++) {
332 if (child_bios[i] != NULL)
333 g_destroy_bio(child_bios[i]);
334 }
335
336 free(child_bios, M_NVME);
337 }
338
339 static struct bio **
nvme_allocate_child_bios(int num_bios)340 nvme_allocate_child_bios(int num_bios)
341 {
342 struct bio **child_bios;
343 int err = 0, i;
344
345 child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT);
346 if (child_bios == NULL)
347 return (NULL);
348
349 for (i = 0; i < num_bios; i++) {
350 child_bios[i] = g_new_bio();
351 if (child_bios[i] == NULL)
352 err = ENOMEM;
353 }
354
355 if (err == ENOMEM) {
356 nvme_free_child_bios(num_bios, child_bios);
357 return (NULL);
358 }
359
360 return (child_bios);
361 }
362
363 static struct bio **
nvme_construct_child_bios(struct bio * bp,uint32_t alignment,int * num_bios)364 nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios)
365 {
366 struct bio **child_bios;
367 struct bio *child;
368 uint64_t cur_offset;
369 caddr_t data;
370 uint32_t rem_bcount;
371 int i;
372 struct vm_page **ma;
373 uint32_t ma_offset;
374
375 *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount,
376 alignment);
377 child_bios = nvme_allocate_child_bios(*num_bios);
378 if (child_bios == NULL)
379 return (NULL);
380
381 bp->bio_children = *num_bios;
382 bp->bio_inbed = 0;
383 cur_offset = bp->bio_offset;
384 rem_bcount = bp->bio_bcount;
385 data = bp->bio_data;
386 ma_offset = bp->bio_ma_offset;
387 ma = bp->bio_ma;
388
389 for (i = 0; i < *num_bios; i++) {
390 child = child_bios[i];
391 child->bio_parent = bp;
392 child->bio_cmd = bp->bio_cmd;
393 child->bio_offset = cur_offset;
394 child->bio_bcount = min(rem_bcount,
395 alignment - (cur_offset & (alignment - 1)));
396 child->bio_flags = bp->bio_flags;
397 if (bp->bio_flags & BIO_UNMAPPED) {
398 child->bio_ma_offset = ma_offset;
399 child->bio_ma = ma;
400 child->bio_ma_n =
401 nvme_get_num_segments(child->bio_ma_offset,
402 child->bio_bcount, PAGE_SIZE);
403 ma_offset = (ma_offset + child->bio_bcount) &
404 PAGE_MASK;
405 ma += child->bio_ma_n;
406 if (ma_offset != 0)
407 ma -= 1;
408 } else {
409 child->bio_data = data;
410 data += child->bio_bcount;
411 }
412 cur_offset += child->bio_bcount;
413 rem_bcount -= child->bio_bcount;
414 }
415
416 return (child_bios);
417 }
418
419 static int
nvme_ns_split_bio(struct nvme_namespace * ns,struct bio * bp,uint32_t alignment)420 nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp,
421 uint32_t alignment)
422 {
423 struct bio *child;
424 struct bio **child_bios;
425 int err, i, num_bios;
426
427 child_bios = nvme_construct_child_bios(bp, alignment, &num_bios);
428 if (child_bios == NULL)
429 return (ENOMEM);
430
431 for (i = 0; i < num_bios; i++) {
432 child = child_bios[i];
433 err = nvme_ns_bio_process(ns, child, nvme_bio_child_done);
434 if (err != 0) {
435 nvme_bio_child_inbed(bp, err);
436 g_destroy_bio(child);
437 }
438 }
439
440 free(child_bios, M_NVME);
441 return (0);
442 }
443
444 int
nvme_ns_bio_process(struct nvme_namespace * ns,struct bio * bp,nvme_cb_fn_t cb_fn)445 nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
446 nvme_cb_fn_t cb_fn)
447 {
448 struct nvme_dsm_range *dsm_range;
449 uint32_t num_bios;
450 int err;
451
452 bp->bio_driver1 = cb_fn;
453
454 if (ns->boundary > 0 &&
455 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
456 num_bios = nvme_get_num_segments(bp->bio_offset,
457 bp->bio_bcount, ns->boundary);
458 if (num_bios > 1)
459 return (nvme_ns_split_bio(ns, bp, ns->boundary));
460 }
461
462 switch (bp->bio_cmd) {
463 case BIO_READ:
464 err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
465 break;
466 case BIO_WRITE:
467 err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
468 break;
469 case BIO_FLUSH:
470 err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
471 break;
472 case BIO_DELETE:
473 dsm_range =
474 malloc(sizeof(struct nvme_dsm_range), M_NVME,
475 M_ZERO | M_NOWAIT);
476 if (!dsm_range) {
477 err = ENOMEM;
478 break;
479 }
480 dsm_range->length =
481 htole32(bp->bio_bcount/nvme_ns_get_sector_size(ns));
482 dsm_range->starting_lba =
483 htole64(bp->bio_offset/nvme_ns_get_sector_size(ns));
484 bp->bio_driver2 = dsm_range;
485 err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
486 nvme_ns_bio_done, bp);
487 if (err != 0)
488 free(dsm_range, M_NVME);
489 break;
490 default:
491 err = EOPNOTSUPP;
492 break;
493 }
494
495 return (err);
496 }
497
498 int
nvme_ns_ioctl_process(struct nvme_namespace * ns,u_long cmd,caddr_t arg,int flag,struct thread * td)499 nvme_ns_ioctl_process(struct nvme_namespace *ns, u_long cmd, caddr_t arg,
500 int flag, struct thread *td)
501 {
502 return (nvme_ns_ioctl(ns->cdev, cmd, arg, flag, td));
503 }
504
505 int
nvme_ns_construct(struct nvme_namespace * ns,uint32_t id,struct nvme_controller * ctrlr)506 nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
507 struct nvme_controller *ctrlr)
508 {
509 struct make_dev_args md_args;
510 struct nvme_completion_poll_status status;
511 int res;
512 int unit;
513 uint8_t flbas_fmt;
514 uint8_t vwc_present;
515
516 ns->ctrlr = ctrlr;
517 ns->id = id;
518
519 /*
520 * Namespaces are reconstructed after a controller reset, so check
521 * to make sure we only call mtx_init once on each mtx.
522 *
523 * TODO: Move this somewhere where it gets called at controller
524 * construction time, which is not invoked as part of each
525 * controller reset.
526 */
527 if (!mtx_initialized(&ns->lock))
528 mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
529
530 status.done = 0;
531 nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
532 nvme_completion_poll_cb, &status);
533 nvme_completion_poll(&status);
534 if (nvme_completion_is_error(&status.cpl)) {
535 nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
536 return (ENXIO);
537 }
538
539 /* Convert data to host endian */
540 nvme_namespace_data_swapbytes(&ns->data);
541
542 /*
543 * If the size of is zero, chances are this isn't a valid
544 * namespace (eg one that's not been configured yet). The
545 * standard says the entire id will be zeros, so this is a
546 * cheap way to test for that.
547 */
548 if (ns->data.nsze == 0)
549 return (ENXIO);
550
551 flbas_fmt = NVMEV(NVME_NS_DATA_FLBAS_FORMAT, ns->data.flbas);
552
553 /*
554 * Note: format is a 0-based value, so > is appropriate here,
555 * not >=.
556 */
557 if (flbas_fmt > ns->data.nlbaf) {
558 nvme_printf(ctrlr,
559 "lba format %d exceeds number supported (%d)\n",
560 flbas_fmt, ns->data.nlbaf + 1);
561 return (ENXIO);
562 }
563
564 /*
565 * Older Intel devices (like the PC35xxx and P45xx series) advertise in
566 * vendor specific space an alignment that improves performance. If
567 * present use for the stripe size. NVMe 1.3 standardized this as
568 * NOIOB, and newer Intel drives use that.
569 */
570 if ((ctrlr->quirks & QUIRK_INTEL_ALIGNMENT) != 0) {
571 if (ctrlr->cdata.vs[3] != 0)
572 ns->boundary =
573 1 << (ctrlr->cdata.vs[3] + NVME_MPS_SHIFT +
574 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi));
575 else
576 ns->boundary = 0;
577 } else {
578 ns->boundary = ns->data.noiob * nvme_ns_get_sector_size(ns);
579 }
580
581 if (nvme_ctrlr_has_dataset_mgmt(&ctrlr->cdata))
582 ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
583
584 vwc_present = NVMEV(NVME_CTRLR_DATA_VWC_PRESENT, ctrlr->cdata.vwc);
585 if (vwc_present)
586 ns->flags |= NVME_NS_FLUSH_SUPPORTED;
587
588 /*
589 * cdev may have already been created, if we are reconstructing the
590 * namespace after a controller-level reset.
591 */
592 if (ns->cdev != NULL)
593 return (0);
594
595 /*
596 * Namespace IDs start at 1, so we need to subtract 1 to create a
597 * correct unit number.
598 */
599 unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1;
600
601 make_dev_args_init(&md_args);
602 md_args.mda_devsw = &nvme_ns_cdevsw;
603 md_args.mda_unit = unit;
604 md_args.mda_mode = 0600;
605 md_args.mda_si_drv1 = ns;
606 res = make_dev_s(&md_args, &ns->cdev, "nvme%dns%d",
607 device_get_unit(ctrlr->dev), ns->id);
608 if (res != 0)
609 return (ENXIO);
610
611 ns->cdev->si_flags |= SI_UNMAPPED;
612
613 return (0);
614 }
615
616 void
nvme_ns_destruct(struct nvme_namespace * ns)617 nvme_ns_destruct(struct nvme_namespace *ns)
618 {
619
620 if (ns->cdev != NULL)
621 destroy_dev(ns->cdev);
622 }
623