xref: /illumos-gate/usr/src/uts/common/io/nvme/nvme.c (revision 50d757e7)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright (c) 2016 The MathWorks, Inc.  All rights reserved.
14  * Copyright 2019 Unix Software Ltd.
15  * Copyright 2020 Joyent, Inc.
16  * Copyright 2020 Racktop Systems.
17  * Copyright 2024 Oxide Computer Company.
18  * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
19  * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
20  */
21 
22 /*
23  * blkdev driver for NVMe compliant storage devices
24  *
25  * This driver targets and is designed to support all NVMe 1.x and NVMe 2.x
26  * devices. Features are added to the driver as we encounter devices that
27  * require them and our needs, so some commands or log pages may not take
28  * advantage of newer features that devices support at this time. When you
29  * encounter such a case, it is generally fine to add that support to the driver
30  * as long as you take care to ensure that the requisite device version is met
31  * before using it.
32  *
33  * The driver has only been tested on x86 systems and will not work on big-
34  * endian systems without changes to the code accessing registers and data
35  * structures used by the hardware.
36  *
37  *
38  * Interrupt Usage:
39  *
40  * The driver will use a single interrupt while configuring the device as the
41  * specification requires, but contrary to the specification it will try to use
42  * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
43  * will switch to multiple-message MSI(-X) if supported. The driver wants to
44  * have one interrupt vector per CPU, but it will work correctly if less are
45  * available. Interrupts can be shared by queues, the interrupt handler will
46  * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
47  * the admin queue will share an interrupt with one I/O queue. The interrupt
48  * handler will retrieve completed commands from all queues sharing an interrupt
49  * vector and will post them to a taskq for completion processing.
50  *
51  *
52  * Command Processing:
53  *
54  * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
55  * to 65536 I/O commands. The driver will configure one I/O queue pair per
56  * available interrupt vector, with the queue length usually much smaller than
57  * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
58  * interrupt vectors will be used.
59  *
60  * Additionally the hardware provides a single special admin queue pair that can
61  * hold up to 4096 admin commands.
62  *
63  * From the hardware perspective both queues of a queue pair are independent,
64  * but they share some driver state: the command array (holding pointers to
65  * commands currently being processed by the hardware) and the active command
66  * counter. Access to a submission queue and the shared state is protected by
67  * nq_mutex; completion queue is protected by ncq_mutex.
68  *
69  * When a command is submitted to a queue pair the active command counter is
70  * incremented and a pointer to the command is stored in the command array. The
71  * array index is used as command identifier (CID) in the submission queue
72  * entry. Some commands may take a very long time to complete, and if the queue
73  * wraps around in that time a submission may find the next array slot to still
74  * be used by a long-running command. In this case the array is sequentially
75  * searched for the next free slot. The length of the command array is the same
76  * as the configured queue length. Queue overrun is prevented by the semaphore,
77  * so a command submission may block if the queue is full.
78  *
79  *
80  * Polled I/O Support:
81  *
82  * For kernel core dump support the driver can do polled I/O. As interrupts are
83  * turned off while dumping the driver will just submit a command in the regular
84  * way, and then repeatedly attempt a command retrieval until it gets the
85  * command back.
86  *
87  *
88  * Namespace Support:
89  *
90  * NVMe devices can have multiple namespaces, each being a independent data
91  * store. The driver supports multiple namespaces and creates a blkdev interface
92  * for each namespace found. Namespaces can have various attributes to support
93  * protection information. This driver does not support any of this and ignores
94  * namespaces that have these attributes.
95  *
96  * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
97  * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally
98  * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64
99  * if present to generate the devid, and passes the EUI64 to blkdev to use it
100  * in the device node names.
101  *
102  * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
103  * single controller. This is an artificial limit imposed by the driver to be
104  * able to address a reasonable number of controllers and namespaces while
105  * fitting within the constraints of MAXMIN32, aka a 32-bit device number which
106  * only has 18-bits for the minor number. See the minor node section for more
107  * information.
108  *
109  *
110  * Minor nodes:
111  *
112  * For each NVMe device the driver exposes one minor node for the controller and
113  * one minor node for each namespace. The only operations supported by those
114  * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
115  * primary control interface for the devices. The character device is a private
116  * interface and we attempt stability through libnvme and more so nvmeadm.
117  *
118  * The controller minor node is much more flexible than the namespace minor node
119  * and should be preferred. The controller node allows one to target any
120  * namespace that the device has, while the namespace is limited in what it can
121  * acquire. While the namespace minor exists, it should not be relied upon and
122  * is not by libnvme.
123  *
124  * The minor number space is split in two. We use the lower part to support the
125  * controller and namespaces as described above in the 'Namespace Support'
126  * section. The second set is used for cloning opens. We set aside one million
127  * minors for this purpose. We utilize a cloning open so that way we can have
128  * per-file_t state. This is how we end up implementing and tracking locking
129  * state and related.
130  *
131  * When we have this cloned open, then we allocate a new nvme_minor_t which gets
132  * its minor number from the nvme_open_minors id_space_t and is stored in the
133  * nvme_open_minors_avl. While someone calls open on a controller or namespace
134  * minor, everything else occurs in the context of one of these ephemeral
135  * minors.
136  *
137  *
138  * ioctls, Errors, and Exclusive Access:
139  *
140  * All of the logical commands that one can issue are driven through the
141  * ioctl(9E) interface. All of our ioctls have a similar shape where they
142  * all include the 'nvme_ioctl_common_t' as their first member.
143  *
144  * This common ioctl structure is used to communicate the namespace that should
145  * be targeted. When the namespace is left as 0, then that indicates that it
146  * should target whatever the default is of the minor node. For a namespace
147  * minor, that will be transparently rewritten to the namespace's namespace id.
148  *
149  * In addition, the nvme_ioctl_common_t structure also has a standard error
150  * return. Our goal in our ioctl path is to ensure that we have useful semantic
151  * errors as much as possible. EINVAL, EIO, etc. are all overloaded. Instead as
152  * long as we can copy in our structure, then we will set a semantic error. If
153  * we have an error from the controller, then that will be included there.
154  *
155  * Each command has a specific policy that controls whether or not it is allowed
156  * on the namespace or controller minor, whether the broadcast namespace is
157  * allowed, various settings around what kind of exclusive access is allowed,
158  * and more. Each of these is wrapped up in a bit of policy described by the
159  * 'nvme_ioctl_check_t' structure.
160  *
161  * The device provides a form of exclusion in the form of both a
162  * controller-level and namespace-level read and write lock. Most operations do
163  * not require a lock (e.g. get log page, identify, etc.), but a few do (e.g.
164  * format nvm, firmware related activity, etc.). A read lock guarantees that you
165  * can complete your operation without interference, but read locks are not
166  * required. If you don't take a read lock and someone comes in with a write
167  * lock, then subsequent operations will fail with a semantic error indicating
168  * that you were blocked due to this.
169  *
170  * Here are some of the rules that govern our locks:
171  *
172  * 1. Writers starve readers. Any readers are allowed to finish when there is a
173  *    pending writer; however, all subsequent readers will be blocked upon that
174  *    writer.
175  * 2. A controller write lock takes priority over all other locks. Put
176  *    differently a controller writer not only starves subsequent controller
177  *    readers, but also all namespace read and write locks.
178  * 3. Each namespace lock is independent.
179  * 4. At most a single namespace lock may be owned.
180  * 5. If you own a namespace lock, you may not take a controller lock (to help
181  *    with lock ordering).
182  * 6. In a similar spirit, if you own a controller write lock, you may not take
183  *    any namespace lock. Someone with the controller write lock can perform any
184  *    operations that they need to. However, if you have a controller read lock
185  *    you may take any namespace lock.
186  * 7. There is no ability to upgrade a read lock to a write lock.
187  * 8. There is no recursive locking.
188  *
189  * While there's a lot there to keep track of, the goals of these are to
190  * constrain things so as to avoid deadlock. This is more complex than the
191  * original implementation in the driver which only allowed for an exclusive
192  * open that was tied to the thread. The first issue with tying this to the
193  * thread was that that didn't work well for software that utilized thread
194  * pools, like complex daemons. The second issue is that we want the ability for
195  * daemons, such as a FRU monitor, to be able to retain a file descriptor to the
196  * device without blocking others from taking action except during critical
197  * periods.
198  *
199  * In particular to enable something like libnvme, we didn't want someone to
200  * have to open and close the file descriptor to change what kind of exclusive
201  * access they desired.
202  *
203  * There are two different sets of data structures that we employ for tracking
204  * locking information:
205  *
206  * 1) The nvme_lock_t structure is contained in both the nvme_t and the
207  * nvme_namespace_t and tracks the current writer, readers, and pending writers
208  * and readers. Each of these lists or the writer pointer all refer to our
209  * second data structure.
210  *
211  * When a lock is owned by a single writer, then the nl_writer field is set to a
212  * specific minor's lock data structure. If instead readers are present, then
213  * the nl_readers list_t is not empty. An invariant of the system is that if
214  * nl_writer is non-NULL, nl_readers must be empty and conversely, if nl_readers
215  * is not empty, nl_writer must be NULL.
216  *
217  * 2) The nvme_minor_lock_info_t exists in the nvme_minor_t. There is one
218  * information structure which represents the minor's controller lock and a
219  * second one that represents the minor's namespace lock. The members of this
220  * are broken into tracking what the current lock is and what it targets. It
221  * also several members that are intended for debugging (nli_last_change,
222  * nli_acq_kthread, etc.).
223  *
224  * While the minor has two different lock information structures, our rules
225  * ensure that only one of the two can be pending and that they shouldn't result
226  * in a deadlock. When a lock is pending, the caller is sleeping on the minor's
227  * nm_cv member.
228  *
229  * These relationships are represented in the following image which shows a
230  * controller write lock being held with a pending readers on the controller
231  * lock and pending writers on one of the controller's namespaces.
232  *
233  *  +---------+
234  *  | nvme_t  |
235  *  |         |
236  *  | n_lock -|-------+
237  *  | n_ns -+ |       |                          +-----------------------------+
238  *  +-------|-+   +-----------------+            | nvme_minor_t                |
239  *          |     | nvme_lock_t     |            |                             |
240  *          |     |                 |            |  +------------------------+ |
241  *          |     | writer        --|-------------->| nvme_minor_lock_info_t | |
242  *          |     | reader list     |            |  | nm_ctrl_lock           | |
243  *          |     | pending writers |            |  +------------------------+ |
244  *          |     | pending readers |------+     |  +------------------------+ |
245  *          |     +-----------------+      |     |  | nvme_minor_lock_info_t | |
246  *          |                              |     |  | nm_ns_lock             | |
247  *          |                              |     |  +------------------------+ |
248  *          |                              |     +-----------------------------+
249  *  +------------------+                   |                 +-----------------+
250  *  | nvme_namespace_t |                   |                 | nvme_minor_t    |
251  *  |                  |                   |                 |                 |
252  *  | ns_lock ---+     |                   |                 | +-------------+ |
253  *  +------------|-----+                   +-----------------|>|nm_ctrl_lock | |
254  *               |                                           | +-------------+ |
255  *               v                                           +-----------------+
256  *     +------------------+                                         ...
257  *     | nvme_lock_t      |                                  +-----------------+
258  *     |                  |                                  | nvme_minor_t    |
259  *     | writer           |                                  |                 |
260  *     | reader list      |                                  | +-------------+ |
261  *     | pending writers -|-----------------+                | |nm_ctrl_lock | |
262  *     | pending readers  |                 |                | +-------------+ |
263  *     +------------------+                 |                +-----------------+
264  *         +-----------------------------+  |  +-----------------------------+
265  *         | nvme_minor_t                |  |  | nvme_minor_t                |
266  *         |                             |  |  |                             |
267  *         |  +------------------------+ |  |  |  +------------------------+ |
268  *         |  | nvme_minor_lock_info_t | |  |  |  | nvme_minor_lock_info_t | |
269  *         |  | nm_ctrl_lock           | |  |  |  | nm_ctrl_lock           | |
270  *         |  +------------------------+ |  |  |  +------------------------+ |
271  *         |  +------------------------+ |  v  |  +------------------------+ |
272  *         |  | nvme_minor_lock_info_t |-|-----|->| nvme_minor_lock_info_t | |
273  *         |  | nm_ns_lock             | |     |  | nm_ns_lock             | |
274  *         |  +------------------------+ |     |  +------------------------+ |
275  *         +-----------------------------+     +-----------------------------+
276  *
277  * Blkdev Interface:
278  *
279  * This driver uses blkdev to do all the heavy lifting involved with presenting
280  * a disk device to the system. As a result, the processing of I/O requests is
281  * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
282  * setup, and splitting of transfers into manageable chunks.
283  *
284  * I/O requests coming in from blkdev are turned into NVM commands and posted to
285  * an I/O queue. The queue is selected by taking the CPU id modulo the number of
286  * queues. There is currently no timeout handling of I/O commands.
287  *
288  * Blkdev also supports querying device/media information and generating a
289  * devid. The driver reports the best block size as determined by the namespace
290  * format back to blkdev as physical block size to support partition and block
291  * alignment. The devid is either based on the namespace GUID or EUI64, if
292  * present, or composed using the device vendor ID, model number, serial number,
293  * and the namespace ID.
294  *
295  *
296  * Error Handling:
297  *
298  * Error handling is currently limited to detecting fatal hardware errors,
299  * either by asynchronous events, or synchronously through command status or
300  * admin command timeouts. In case of severe errors the device is fenced off,
301  * all further requests will return EIO. FMA is then called to fault the device.
302  *
303  * The hardware has a limit for outstanding asynchronous event requests. Before
304  * this limit is known the driver assumes it is at least 1 and posts a single
305  * asynchronous request. Later when the limit is known more asynchronous event
306  * requests are posted to allow quicker reception of error information. When an
307  * asynchronous event is posted by the hardware the driver will parse the error
308  * status fields and log information or fault the device, depending on the
309  * severity of the asynchronous event. The asynchronous event request is then
310  * reused and posted to the admin queue again.
311  *
312  * On command completion the command status is checked for errors. In case of
313  * errors indicating a driver bug the driver panics. Almost all other error
314  * status values just cause EIO to be returned.
315  *
316  * Command timeouts are currently detected for all admin commands except
317  * asynchronous event requests. If a command times out and the hardware appears
318  * to be healthy the driver attempts to abort the command. The original command
319  * timeout is also applied to the abort command. If the abort times out too the
320  * driver assumes the device to be dead, fences it off, and calls FMA to retire
321  * it. In all other cases the aborted command should return immediately with a
322  * status indicating it was aborted, and the driver will wait indefinitely for
323  * that to happen. No timeout handling of normal I/O commands is presently done.
324  *
325  * Any command that times out due to the controller dropping dead will be put on
326  * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
327  * memory being reused by the system and later be written to by a "dead" NVMe
328  * controller.
329  *
330  *
331  * Locking:
332  *
333  * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held
334  * when accessing shared state and submission queue registers, ncq_mutex
335  * is held when accessing completion queue state and registers.
336  * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while
337  * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both
338  * mutexes themselves.
339  *
340  * Each command also has its own nc_mutex, which is associated with the
341  * condition variable nc_cv. It is only used on admin commands which are run
342  * synchronously. In that case it must be held across calls to
343  * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
344  * nvme_admin_cmd(). It must also be held whenever the completion state of the
345  * command is changed or while a admin command timeout is handled.
346  *
347  * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
348  * More than one nc_mutex may only be held when aborting commands. In this case,
349  * the nc_mutex of the command to be aborted must be held across the call to
350  * nvme_abort_cmd() to prevent the command from completing while the abort is in
351  * progress.
352  *
353  * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be
354  * acquired first. More than one nq_mutex is never held by a single thread.
355  * The ncq_mutex is only held by nvme_retrieve_cmd() and
356  * nvme_process_iocq(). nvme_process_iocq() is only called from the
357  * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the
358  * mutex is non-contentious but is required for implementation completeness
359  * and safety.
360  *
361  * There is one mutex n_minor_mutex which protects all open flags nm_open and
362  * exclusive-open thread pointers nm_oexcl of each minor node associated with a
363  * controller and its namespaces.
364  *
365  * In addition, there is a logical namespace management mutex which protects the
366  * data about namespaces. When interrogating the metadata of any namespace, this
367  * lock must be held. This gets tricky as we need to call into blkdev, which may
368  * issue callbacks into us which want this and it is illegal to hold locks
369  * across those blkdev calls as otherwise they might lead to deadlock (blkdev
370  * leverages ndi_devi_enter()).
371  *
372  * The lock exposes two levels, one that we call 'NVME' and one 'BDRO' or blkdev
373  * read-only. The idea is that most callers will use the NVME level which says
374  * this is a full traditional mutex operation. The BDRO level is used by blkdev
375  * callback functions and is a promise to only only read the data. When a blkdev
376  * operation starts, the lock holder will use nvme_mgmt_bd_start(). This
377  * strictly speaking drops the mutex, but records that the lock is logically
378  * held by the thread that did the start() operation.
379  *
380  * During this time, other threads (or even the same one) may end up calling
381  * into nvme_mgmt_lock(). Only one person may still hold the lock at any time;
382  * however, the BRDO level will be allowed to proceed during this time. This
383  * allows us to make consistent progress and honor the blkdev lock ordering
384  * requirements, albeit it is not as straightforward as a simple mutex.
385  *
386  * Quiesce / Fast Reboot:
387  *
388  * The driver currently does not support fast reboot. A quiesce(9E) entry point
389  * is still provided which is used to send a shutdown notification to the
390  * device.
391  *
392  *
393  * NVMe Hotplug:
394  *
395  * The driver supports hot removal. The driver uses the NDI event framework
396  * to register a callback, nvme_remove_callback, to clean up when a disk is
397  * removed. In particular, the driver will unqueue outstanding I/O commands and
398  * set n_dead on the softstate to true so that other operations, such as ioctls
399  * and command submissions, fail as well.
400  *
401  * While the callback registration relies on the NDI event framework, the
402  * removal event itself is kicked off in the PCIe hotplug framework, when the
403  * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a
404  * device was removed from the slot.
405  *
406  * The NVMe driver instance itself will remain until the final close of the
407  * device.
408  *
409  *
410  * DDI UFM Support
411  *
412  * The driver supports the DDI UFM framework for reporting information about
413  * the device's firmware image and slot configuration. This data can be
414  * queried by userland software via ioctls to the ufm driver. For more
415  * information, see ddi_ufm(9E).
416  *
417  *
418  * Driver Configuration:
419  *
420  * The following driver properties can be changed to control some aspects of the
421  * drivers operation:
422  * - strict-version: can be set to 0 to allow devices conforming to newer
423  *   major versions to be used
424  * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
425  *   specific command status as a fatal error leading device faulting
426  * - admin-queue-len: the maximum length of the admin queue (16-4096)
427  * - io-squeue-len: the maximum length of the I/O submission queues (16-65536)
428  * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536)
429  * - async-event-limit: the maximum number of asynchronous event requests to be
430  *   posted by the driver
431  * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
432  *   cache
433  * - min-phys-block-size: the minimum physical block size to report to blkdev,
434  *   which is among other things the basis for ZFS vdev ashift
435  * - max-submission-queues: the maximum number of I/O submission queues.
436  * - max-completion-queues: the maximum number of I/O completion queues,
437  *   can be less than max-submission-queues, in which case the completion
438  *   queues are shared.
439  *
440  * In addition to the above properties, some device-specific tunables can be
441  * configured using the nvme-config-list global property. The value of this
442  * property is a list of triplets. The formal syntax is:
443  *
444  *   nvme-config-list ::= <triplet> [, <triplet>]* ;
445  *   <triplet>        ::= "<model>" , "<rev-list>" , "<tuple-list>"
446  *   <rev-list>       ::= [ <fwrev> [, <fwrev>]*]
447  *   <tuple-list>     ::= <tunable> [, <tunable>]*
448  *   <tunable>        ::= <name> : <value>
449  *
450  * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and
451  * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list>
452  * contains one or more tunables to apply to all controllers that match the
453  * specified model number and optionally firmware revision. Each <tunable> is a
454  * <name> : <value> pair.  Supported tunables are:
455  *
456  * - ignore-unknown-vendor-status:  can be set to "on" to not handle any vendor
457  *   specific command status as a fatal error leading device faulting
458  *
459  * - min-phys-block-size: the minimum physical block size to report to blkdev,
460  *   which is among other things the basis for ZFS vdev ashift
461  *
462  * - volatile-write-cache: can be set to "on" or "off" to enable or disable the
463  *   volatile write cache, if present
464  *
465  *
466  * TODO:
467  * - figure out sane default for I/O queue depth reported to blkdev
468  * - FMA handling of media errors
469  * - support for devices supporting very large I/O requests using chained PRPs
470  * - support for configuring hardware parameters like interrupt coalescing
471  * - support for media formatting and hard partitioning into namespaces
472  * - support for big-endian systems
473  * - support for fast reboot
474  * - support for NVMe Subsystem Reset (1.1)
475  * - support for Scatter/Gather lists (1.1)
476  * - support for Reservations (1.1)
477  * - support for power management
478  */
479 
480 #include <sys/byteorder.h>
481 #ifdef _BIG_ENDIAN
482 #error nvme driver needs porting for big-endian platforms
483 #endif
484 
485 #include <sys/modctl.h>
486 #include <sys/conf.h>
487 #include <sys/devops.h>
488 #include <sys/ddi.h>
489 #include <sys/ddi_ufm.h>
490 #include <sys/sunddi.h>
491 #include <sys/sunndi.h>
492 #include <sys/bitmap.h>
493 #include <sys/sysmacros.h>
494 #include <sys/param.h>
495 #include <sys/varargs.h>
496 #include <sys/cpuvar.h>
497 #include <sys/disp.h>
498 #include <sys/blkdev.h>
499 #include <sys/atomic.h>
500 #include <sys/archsystm.h>
501 #include <sys/sata/sata_hba.h>
502 #include <sys/stat.h>
503 #include <sys/policy.h>
504 #include <sys/list.h>
505 #include <sys/dkio.h>
506 #include <sys/pci.h>
507 #include <sys/mkdev.h>
508 
509 #include <sys/nvme.h>
510 
511 #ifdef __x86
512 #include <sys/x86_archext.h>
513 #endif
514 
515 #include "nvme_reg.h"
516 #include "nvme_var.h"
517 
518 /*
519  * Assertions to make sure that we've properly captured various aspects of the
520  * packed structures and haven't broken them during updates.
521  */
522 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE);
523 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
524 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
525 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
526 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
527 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
528 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
529 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
530 
531 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE);
532 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
533 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
534 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
535 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
536 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
537 
538 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE);
539 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE);
540 
541 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE);
542 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
543 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
544 
545 CTASSERT(sizeof (nvme_nschange_list_t) == 4096);
546 
547 
548 /* NVMe spec version supported */
549 static const int nvme_version_major = 2;
550 
551 /* tunable for admin command timeout in seconds, default is 1s */
552 uint32_t nvme_admin_cmd_timeout = 1;
553 
554 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */
555 uint32_t nvme_format_cmd_timeout = 600;
556 
557 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */
558 uint32_t nvme_commit_save_cmd_timeout = 15;
559 
560 /*
561  * tunable for the size of arbitrary vendor specific admin commands,
562  * default is 16MiB.
563  */
564 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24;
565 
566 /*
567  * tunable for the max timeout of arbitary vendor specific admin commands,
568  * default is 60s.
569  */
570 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60;
571 
572 /*
573  * This ID space, AVL, and lock are used for keeping track of minor state across
574  * opens between different devices.
575  */
576 static id_space_t *nvme_open_minors;
577 static avl_tree_t nvme_open_minors_avl;
578 kmutex_t nvme_open_minors_mutex;
579 
580 /*
581  * Removal taskq used for n_dead callback processing.
582  */
583 taskq_t *nvme_dead_taskq;
584 
585 /*
586  * This enumeration is used in tandem with nvme_mgmt_lock() to describe which
587  * form of the lock is being taken. See the theory statement for more context.
588  */
589 typedef enum {
590 	/*
591 	 * This is the primary form of taking the management lock and indicates
592 	 * that the user intends to do a read/write of it. This should always be
593 	 * used for any ioctl paths or truly anything other than a blkdev
594 	 * information operation.
595 	 */
596 	NVME_MGMT_LOCK_NVME,
597 	/*
598 	 * This is a subordinate form of the lock whereby the user is in blkdev
599 	 * callback context and will only intend to read the namespace data.
600 	 */
601 	NVME_MGMT_LOCK_BDRO
602 } nvme_mgmt_lock_level_t;
603 
604 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
605 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
606 static int nvme_quiesce(dev_info_t *);
607 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
608 static int nvme_setup_interrupts(nvme_t *, int, int);
609 static void nvme_release_interrupts(nvme_t *);
610 static uint_t nvme_intr(caddr_t, caddr_t);
611 
612 static void nvme_shutdown(nvme_t *, boolean_t);
613 static boolean_t nvme_reset(nvme_t *, boolean_t);
614 static int nvme_init(nvme_t *);
615 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
616 static void nvme_free_cmd(nvme_cmd_t *);
617 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
618     bd_xfer_t *);
619 static void nvme_admin_cmd(nvme_cmd_t *, uint32_t);
620 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *);
621 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
622 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *);
623 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
624 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
625 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
626 static void nvme_wakeup_cmd(void *);
627 static void nvme_async_event_task(void *);
628 
629 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
630 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
631 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
632 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
633 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
634 static inline int nvme_check_cmd_status(nvme_cmd_t *);
635 static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *,
636     nvme_ioctl_common_t *);
637 
638 static int nvme_abort_cmd(nvme_cmd_t *, uint_t);
639 static void nvme_async_event(nvme_t *);
640 static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *);
641 static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *,
642     uint8_t);
643 static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *,
644     void **);
645 static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **);
646 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
647     uint32_t *);
648 static int nvme_write_cache_set(nvme_t *, boolean_t);
649 static int nvme_set_nqueues(nvme_t *);
650 
651 static void nvme_free_dma(nvme_dma_t *);
652 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
653     nvme_dma_t **);
654 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
655     nvme_dma_t **);
656 static void nvme_free_qpair(nvme_qpair_t *);
657 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
658 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
659 
660 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
661 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
662 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
663 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
664 
665 static boolean_t nvme_check_regs_hdl(nvme_t *);
666 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
667 
668 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t);
669 
670 static void nvme_bd_xfer_done(void *);
671 static void nvme_bd_driveinfo(void *, bd_drive_t *);
672 static int nvme_bd_mediainfo(void *, bd_media_t *);
673 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
674 static int nvme_bd_read(void *, bd_xfer_t *);
675 static int nvme_bd_write(void *, bd_xfer_t *);
676 static int nvme_bd_sync(void *, bd_xfer_t *);
677 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
678 static int nvme_bd_free_space(void *, bd_xfer_t *);
679 
680 static int nvme_prp_dma_constructor(void *, void *, int);
681 static void nvme_prp_dma_destructor(void *, void *);
682 
683 static void nvme_prepare_devid(nvme_t *, uint32_t);
684 
685 /* DDI UFM callbacks */
686 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
687     ddi_ufm_image_t *);
688 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
689     ddi_ufm_slot_t *);
690 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
691 
692 static int nvme_open(dev_t *, int, int, cred_t *);
693 static int nvme_close(dev_t, int, int, cred_t *);
694 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
695 
696 static int nvme_init_ns(nvme_t *, uint32_t);
697 static boolean_t nvme_attach_ns(nvme_t *, nvme_ioctl_common_t *);
698 static boolean_t nvme_detach_ns(nvme_t *, nvme_ioctl_common_t *);
699 
700 static int nvme_minor_comparator(const void *, const void *);
701 
702 static ddi_ufm_ops_t nvme_ufm_ops = {
703 	NULL,
704 	nvme_ufm_fill_image,
705 	nvme_ufm_fill_slot,
706 	nvme_ufm_getcaps
707 };
708 
709 /*
710  * Minor numbers are split amongst those used for controllers and for device
711  * opens. The number of controller minors are limited based open MAXMIN32 per
712  * the theory statement. We allocate 1 million minors as a total guess at a
713  * number that'll probably be enough. The starting point of the open minors can
714  * be shifted to accommodate future expansion of the NVMe device minors.
715  */
716 #define	NVME_MINOR_INST_SHIFT	9
717 #define	NVME_MINOR(inst, nsid)	(((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
718 #define	NVME_MINOR_INST(minor)	((minor) >> NVME_MINOR_INST_SHIFT)
719 #define	NVME_MINOR_NSID(minor)	((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
720 #define	NVME_MINOR_MAX		(NVME_MINOR(1, 0) - 2)
721 #define	NVME_IS_VENDOR_SPECIFIC_CMD(x)	(((x) >= 0xC0) && ((x) <= 0xFF))
722 
723 #define	NVME_OPEN_NMINORS		(1024 * 1024)
724 #define	NVME_OPEN_MINOR_MIN		(MAXMIN32 + 1)
725 #define	NVME_OPEN_MINOR_MAX_EXCL	(NVME_OPEN_MINOR_MIN + \
726     NVME_OPEN_NMINORS)
727 
728 static void *nvme_state;
729 static kmem_cache_t *nvme_cmd_cache;
730 
731 /*
732  * DMA attributes for queue DMA memory
733  *
734  * Queue DMA memory must be page aligned. The maximum length of a queue is
735  * 65536 entries, and an entry can be 64 bytes long.
736  */
737 static const ddi_dma_attr_t nvme_queue_dma_attr = {
738 	.dma_attr_version	= DMA_ATTR_V0,
739 	.dma_attr_addr_lo	= 0,
740 	.dma_attr_addr_hi	= 0xffffffffffffffffULL,
741 	.dma_attr_count_max	= (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
742 	.dma_attr_align		= 0x1000,
743 	.dma_attr_burstsizes	= 0x7ff,
744 	.dma_attr_minxfer	= 0x1000,
745 	.dma_attr_maxxfer	= (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
746 	.dma_attr_seg		= 0xffffffffffffffffULL,
747 	.dma_attr_sgllen	= 1,
748 	.dma_attr_granular	= 1,
749 	.dma_attr_flags		= 0,
750 };
751 
752 /*
753  * DMA attributes for transfers using Physical Region Page (PRP) entries
754  *
755  * A PRP entry describes one page of DMA memory using the page size specified
756  * in the controller configuration's memory page size register (CC.MPS). It uses
757  * a 64bit base address aligned to this page size. There is no limitation on
758  * chaining PRPs together for arbitrarily large DMA transfers. These DMA
759  * attributes will be copied into the nvme_t during nvme_attach() and the
760  * dma_attr_maxxfer will be updated.
761  */
762 static const ddi_dma_attr_t nvme_prp_dma_attr = {
763 	.dma_attr_version	= DMA_ATTR_V0,
764 	.dma_attr_addr_lo	= 0,
765 	.dma_attr_addr_hi	= 0xffffffffffffffffULL,
766 	.dma_attr_count_max	= 0xfff,
767 	.dma_attr_align		= 0x1000,
768 	.dma_attr_burstsizes	= 0x7ff,
769 	.dma_attr_minxfer	= 0x1000,
770 	.dma_attr_maxxfer	= 0x1000,
771 	.dma_attr_seg		= 0xfff,
772 	.dma_attr_sgllen	= -1,
773 	.dma_attr_granular	= 1,
774 	.dma_attr_flags		= 0,
775 };
776 
777 /*
778  * DMA attributes for transfers using scatter/gather lists
779  *
780  * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
781  * 32bit length field. SGL Segment and SGL Last Segment entries require the
782  * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied
783  * into the nvme_t, they are not currently used for any I/O.
784  */
785 static const ddi_dma_attr_t nvme_sgl_dma_attr = {
786 	.dma_attr_version	= DMA_ATTR_V0,
787 	.dma_attr_addr_lo	= 0,
788 	.dma_attr_addr_hi	= 0xffffffffffffffffULL,
789 	.dma_attr_count_max	= 0xffffffffUL,
790 	.dma_attr_align		= 1,
791 	.dma_attr_burstsizes	= 0x7ff,
792 	.dma_attr_minxfer	= 0x10,
793 	.dma_attr_maxxfer	= 0xfffffffffULL,
794 	.dma_attr_seg		= 0xffffffffffffffffULL,
795 	.dma_attr_sgllen	= -1,
796 	.dma_attr_granular	= 0x10,
797 	.dma_attr_flags		= 0
798 };
799 
800 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
801 	.devacc_attr_version	= DDI_DEVICE_ATTR_V0,
802 	.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
803 	.devacc_attr_dataorder	= DDI_STRICTORDER_ACC
804 };
805 
806 /*
807  * ioctl validation policies. These are policies that determine which namespaces
808  * are allowed or disallowed for various operations. Note, all policy items
809  * should be explicitly listed here to help make it clear what our intent is.
810  * That is also why some of these are identical or repeated when they cover
811  * different ioctls.
812  */
813 
814 /*
815  * The controller information ioctl generally contains read-only information
816  * about the controller that is sourced from multiple different pieces of
817  * information. This does not operate on a namespace and none are accepted.
818  */
819 static const nvme_ioctl_check_t nvme_check_ctrl_info = {
820 	.nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
821 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
822 	.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
823 };
824 
825 /*
826  * The kernel namespace information requires a namespace ID to be specified. It
827  * does not allow for the broadcast ID to be specified.
828  */
829 static const nvme_ioctl_check_t nvme_check_ns_info = {
830 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
831 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
832 	.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
833 };
834 
835 /*
836  * Identify commands are allowed to operate on a namespace minor. Unfortunately,
837  * the namespace field in identify commands is a bit, weird. In particular, some
838  * commands need a valid namespace, while others are namespace listing
839  * operations, which means illegal namespaces like zero are allowed.
840  */
841 static const nvme_ioctl_check_t nvme_check_identify = {
842 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
843 	.nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE,
844 	.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
845 };
846 
847 /*
848  * The get log page command requires the ability to specify namespaces. When
849  * targeting the controller, one must use the broadcast NSID.
850  */
851 static const nvme_ioctl_check_t nvme_check_get_logpage = {
852 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
853 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
854 	.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
855 };
856 
857 /*
858  * When getting a feature, we do not want rewriting behavior as most features do
859  * not require a namespace to be specified. Specific instances are checked in
860  * nvme_validate_get_feature().
861  */
862 static const nvme_ioctl_check_t nvme_check_get_feature = {
863 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
864 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
865 	.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
866 };
867 
868 /*
869  * Format commands must target a namespace. The broadcast namespace must be used
870  * when referring to the controller.
871  */
872 static const nvme_ioctl_check_t nvme_check_format = {
873 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
874 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
875 	.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE
876 };
877 
878 /*
879  * Attach and detach must always target a minor. However, the broadcast
880  * namespace is not allowed. We still perform rewriting so that way specifying
881  * the controller node with 0 will be caught.
882  */
883 static const nvme_ioctl_check_t nvme_check_attach_detach = {
884 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
885 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
886 	.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
887 };
888 
889 /*
890  * Firmware operations must not target a namespace and are only allowed from the
891  * controller.
892  */
893 static const nvme_ioctl_check_t nvme_check_firmware = {
894 	.nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
895 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
896 	.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
897 };
898 
899 /*
900  * Passthru commands are an odd set. We only allow them from the primary
901  * controller; however, we allow a namespace to be specified in them and allow
902  * the broadcast namespace. We do not perform rewriting because we don't know
903  * what the semantics are. We explicitly exempt passthru commands from needing
904  * an exclusive lock and leave it up to them to tell us the impact of the
905  * command and semantics. As this is a privileged interface and the semantics
906  * are arbitrary, there's not much we can do without some assistance from the
907  * consumer.
908  */
909 static const nvme_ioctl_check_t nvme_check_passthru = {
910 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
911 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
912 	.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
913 };
914 
915 /*
916  * Lock operations are allowed to target a namespace, but must not be rewritten.
917  * There is no support for the broadcast namespace. This is the only ioctl that
918  * should skip exclusive checking as it's used to grant it.
919  */
920 static const nvme_ioctl_check_t nvme_check_locking = {
921 	.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
922 	.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
923 	.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP
924 };
925 
926 static struct cb_ops nvme_cb_ops = {
927 	.cb_open	= nvme_open,
928 	.cb_close	= nvme_close,
929 	.cb_strategy	= nodev,
930 	.cb_print	= nodev,
931 	.cb_dump	= nodev,
932 	.cb_read	= nodev,
933 	.cb_write	= nodev,
934 	.cb_ioctl	= nvme_ioctl,
935 	.cb_devmap	= nodev,
936 	.cb_mmap	= nodev,
937 	.cb_segmap	= nodev,
938 	.cb_chpoll	= nochpoll,
939 	.cb_prop_op	= ddi_prop_op,
940 	.cb_str		= 0,
941 	.cb_flag	= D_NEW | D_MP,
942 	.cb_rev		= CB_REV,
943 	.cb_aread	= nodev,
944 	.cb_awrite	= nodev
945 };
946 
947 static struct dev_ops nvme_dev_ops = {
948 	.devo_rev	= DEVO_REV,
949 	.devo_refcnt	= 0,
950 	.devo_getinfo	= ddi_no_info,
951 	.devo_identify	= nulldev,
952 	.devo_probe	= nulldev,
953 	.devo_attach	= nvme_attach,
954 	.devo_detach	= nvme_detach,
955 	.devo_reset	= nodev,
956 	.devo_cb_ops	= &nvme_cb_ops,
957 	.devo_bus_ops	= NULL,
958 	.devo_power	= NULL,
959 	.devo_quiesce	= nvme_quiesce,
960 };
961 
962 static struct modldrv nvme_modldrv = {
963 	.drv_modops	= &mod_driverops,
964 	.drv_linkinfo	= "NVMe driver",
965 	.drv_dev_ops	= &nvme_dev_ops
966 };
967 
968 static struct modlinkage nvme_modlinkage = {
969 	.ml_rev		= MODREV_1,
970 	.ml_linkage	= { &nvme_modldrv, NULL }
971 };
972 
973 static bd_ops_t nvme_bd_ops = {
974 	.o_version	= BD_OPS_CURRENT_VERSION,
975 	.o_drive_info	= nvme_bd_driveinfo,
976 	.o_media_info	= nvme_bd_mediainfo,
977 	.o_devid_init	= nvme_bd_devid,
978 	.o_sync_cache	= nvme_bd_sync,
979 	.o_read		= nvme_bd_read,
980 	.o_write	= nvme_bd_write,
981 	.o_free_space	= nvme_bd_free_space,
982 };
983 
984 /*
985  * This list will hold commands that have timed out and couldn't be aborted.
986  * As we don't know what the hardware may still do with the DMA memory we can't
987  * free them, so we'll keep them forever on this list where we can easily look
988  * at them with mdb.
989  */
990 static struct list nvme_lost_cmds;
991 static kmutex_t nvme_lc_mutex;
992 
993 int
_init(void)994 _init(void)
995 {
996 	int error;
997 
998 	error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
999 	if (error != DDI_SUCCESS)
1000 		return (error);
1001 
1002 	if ((nvme_open_minors = id_space_create("nvme_open_minors",
1003 	    NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) {
1004 		ddi_soft_state_fini(&nvme_state);
1005 		return (ENOMEM);
1006 	}
1007 
1008 	nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
1009 	    sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1010 
1011 	mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
1012 	list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
1013 	    offsetof(nvme_cmd_t, nc_list));
1014 
1015 	mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL);
1016 	avl_create(&nvme_open_minors_avl, nvme_minor_comparator,
1017 	    sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl));
1018 
1019 	nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1,
1020 	    TASKQ_PREPOPULATE);
1021 
1022 	bd_mod_init(&nvme_dev_ops);
1023 
1024 	error = mod_install(&nvme_modlinkage);
1025 	if (error != DDI_SUCCESS) {
1026 		ddi_soft_state_fini(&nvme_state);
1027 		id_space_destroy(nvme_open_minors);
1028 		mutex_destroy(&nvme_lc_mutex);
1029 		list_destroy(&nvme_lost_cmds);
1030 		bd_mod_fini(&nvme_dev_ops);
1031 		mutex_destroy(&nvme_open_minors_mutex);
1032 		avl_destroy(&nvme_open_minors_avl);
1033 		taskq_destroy(nvme_dead_taskq);
1034 	}
1035 
1036 	return (error);
1037 }
1038 
1039 int
_fini(void)1040 _fini(void)
1041 {
1042 	int error;
1043 
1044 	if (!list_is_empty(&nvme_lost_cmds))
1045 		return (DDI_FAILURE);
1046 
1047 	error = mod_remove(&nvme_modlinkage);
1048 	if (error == DDI_SUCCESS) {
1049 		ddi_soft_state_fini(&nvme_state);
1050 		id_space_destroy(nvme_open_minors);
1051 		kmem_cache_destroy(nvme_cmd_cache);
1052 		mutex_destroy(&nvme_lc_mutex);
1053 		list_destroy(&nvme_lost_cmds);
1054 		bd_mod_fini(&nvme_dev_ops);
1055 		mutex_destroy(&nvme_open_minors_mutex);
1056 		avl_destroy(&nvme_open_minors_avl);
1057 		taskq_destroy(nvme_dead_taskq);
1058 	}
1059 
1060 	return (error);
1061 }
1062 
1063 int
_info(struct modinfo * modinfop)1064 _info(struct modinfo *modinfop)
1065 {
1066 	return (mod_info(&nvme_modlinkage, modinfop));
1067 }
1068 
1069 static inline void
nvme_put64(nvme_t * nvme,uintptr_t reg,uint64_t val)1070 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
1071 {
1072 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1073 
1074 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
1075 	ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
1076 }
1077 
1078 static inline void
nvme_put32(nvme_t * nvme,uintptr_t reg,uint32_t val)1079 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
1080 {
1081 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1082 
1083 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
1084 	ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
1085 }
1086 
1087 static inline uint64_t
nvme_get64(nvme_t * nvme,uintptr_t reg)1088 nvme_get64(nvme_t *nvme, uintptr_t reg)
1089 {
1090 	uint64_t val;
1091 
1092 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1093 
1094 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
1095 	val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
1096 
1097 	return (val);
1098 }
1099 
1100 static inline uint32_t
nvme_get32(nvme_t * nvme,uintptr_t reg)1101 nvme_get32(nvme_t *nvme, uintptr_t reg)
1102 {
1103 	uint32_t val;
1104 
1105 	ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1106 
1107 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
1108 	val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
1109 
1110 	return (val);
1111 }
1112 
1113 static void
nvme_mgmt_lock_fini(nvme_mgmt_lock_t * lock)1114 nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock)
1115 {
1116 	ASSERT3U(lock->nml_bd_own, ==, 0);
1117 	mutex_destroy(&lock->nml_lock);
1118 	cv_destroy(&lock->nml_cv);
1119 }
1120 
1121 static void
nvme_mgmt_lock_init(nvme_mgmt_lock_t * lock)1122 nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock)
1123 {
1124 	mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL);
1125 	cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL);
1126 	lock->nml_bd_own = 0;
1127 }
1128 
1129 static void
nvme_mgmt_unlock(nvme_t * nvme)1130 nvme_mgmt_unlock(nvme_t *nvme)
1131 {
1132 	nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1133 
1134 	cv_broadcast(&lock->nml_cv);
1135 	mutex_exit(&lock->nml_lock);
1136 }
1137 
1138 #ifdef	DEBUG
1139 static boolean_t
nvme_mgmt_lock_held(nvme_t * nvme)1140 nvme_mgmt_lock_held(nvme_t *nvme)
1141 {
1142 	return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0);
1143 }
1144 #endif	/* DEBUG */
1145 
1146 static void
nvme_mgmt_lock(nvme_t * nvme,nvme_mgmt_lock_level_t level)1147 nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level)
1148 {
1149 	nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1150 	mutex_enter(&lock->nml_lock);
1151 	while (lock->nml_bd_own != 0) {
1152 		if (level == NVME_MGMT_LOCK_BDRO)
1153 			break;
1154 		cv_wait(&lock->nml_cv, &lock->nml_lock);
1155 	}
1156 }
1157 
1158 /*
1159  * This and nvme_mgmt_bd_end() are used to indicate that the driver is going to
1160  * be calling into a re-entrant blkdev related function. We cannot hold the lock
1161  * across such an operation and therefore must indicate that this is logically
1162  * held, while allowing other operations to proceed. This nvme_mgmt_bd_end() may
1163  * only be called by a thread that already holds the nmve_mgmt_lock().
1164  */
1165 static void
nvme_mgmt_bd_start(nvme_t * nvme)1166 nvme_mgmt_bd_start(nvme_t *nvme)
1167 {
1168 	nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1169 
1170 	VERIFY(MUTEX_HELD(&lock->nml_lock));
1171 	VERIFY3U(lock->nml_bd_own, ==, 0);
1172 	lock->nml_bd_own = (uintptr_t)curthread;
1173 	mutex_exit(&lock->nml_lock);
1174 }
1175 
1176 static void
nvme_mgmt_bd_end(nvme_t * nvme)1177 nvme_mgmt_bd_end(nvme_t *nvme)
1178 {
1179 	nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1180 
1181 	mutex_enter(&lock->nml_lock);
1182 	VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread);
1183 	lock->nml_bd_own = 0;
1184 }
1185 
1186 /*
1187  * This is a central clearing house for marking an NVMe controller dead and/or
1188  * removed. This takes care of setting the flag, taking care of outstanding
1189  * blocked locks, and sending a DDI FMA impact. This is called from a precarious
1190  * place where locking is suspect. The only guarantee we have is that the nvme_t
1191  * is valid and won't disappear until we return.
1192  *
1193  * This should only be used after attach has been called.
1194  */
1195 static void
nvme_ctrl_mark_dead(nvme_t * nvme,boolean_t removed)1196 nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed)
1197 {
1198 	boolean_t was_dead;
1199 
1200 	/*
1201 	 * See if we win the race to set things up here. If someone beat us to
1202 	 * it, we do not do anything.
1203 	 */
1204 	was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE,
1205 	    B_TRUE);
1206 	if (was_dead) {
1207 		return;
1208 	}
1209 
1210 	/*
1211 	 * If this was removed, there is no reason to change the service impact.
1212 	 * However, then we need to change our default return code that we use
1213 	 * here to indicate that it was gone versus that it is dead.
1214 	 */
1215 	if (removed) {
1216 		nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE;
1217 	} else {
1218 		ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD);
1219 		ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1220 	}
1221 
1222 	taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme,
1223 	    TQ_NOSLEEP, &nvme->n_dead_tqent);
1224 }
1225 
1226 static boolean_t
nvme_check_regs_hdl(nvme_t * nvme)1227 nvme_check_regs_hdl(nvme_t *nvme)
1228 {
1229 	ddi_fm_error_t error;
1230 
1231 	ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
1232 
1233 	if (error.fme_status != DDI_FM_OK)
1234 		return (B_TRUE);
1235 
1236 	return (B_FALSE);
1237 }
1238 
1239 static boolean_t
nvme_check_dma_hdl(nvme_dma_t * dma)1240 nvme_check_dma_hdl(nvme_dma_t *dma)
1241 {
1242 	ddi_fm_error_t error;
1243 
1244 	if (dma == NULL)
1245 		return (B_FALSE);
1246 
1247 	ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
1248 
1249 	if (error.fme_status != DDI_FM_OK)
1250 		return (B_TRUE);
1251 
1252 	return (B_FALSE);
1253 }
1254 
1255 static void
nvme_free_dma_common(nvme_dma_t * dma)1256 nvme_free_dma_common(nvme_dma_t *dma)
1257 {
1258 	if (dma->nd_dmah != NULL)
1259 		(void) ddi_dma_unbind_handle(dma->nd_dmah);
1260 	if (dma->nd_acch != NULL)
1261 		ddi_dma_mem_free(&dma->nd_acch);
1262 	if (dma->nd_dmah != NULL)
1263 		ddi_dma_free_handle(&dma->nd_dmah);
1264 }
1265 
1266 static void
nvme_free_dma(nvme_dma_t * dma)1267 nvme_free_dma(nvme_dma_t *dma)
1268 {
1269 	nvme_free_dma_common(dma);
1270 	kmem_free(dma, sizeof (*dma));
1271 }
1272 
1273 /* ARGSUSED */
1274 static void
nvme_prp_dma_destructor(void * buf,void * private)1275 nvme_prp_dma_destructor(void *buf, void *private)
1276 {
1277 	nvme_dma_t *dma = (nvme_dma_t *)buf;
1278 
1279 	nvme_free_dma_common(dma);
1280 }
1281 
1282 static int
nvme_alloc_dma_common(nvme_t * nvme,nvme_dma_t * dma,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr)1283 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
1284     size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
1285 {
1286 	if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
1287 	    &dma->nd_dmah) != DDI_SUCCESS) {
1288 		/*
1289 		 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
1290 		 * the only other possible error is DDI_DMA_BADATTR which
1291 		 * indicates a driver bug which should cause a panic.
1292 		 */
1293 		dev_err(nvme->n_dip, CE_PANIC,
1294 		    "!failed to get DMA handle, check DMA attributes");
1295 		return (DDI_FAILURE);
1296 	}
1297 
1298 	/*
1299 	 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
1300 	 * or the flags are conflicting, which isn't the case here.
1301 	 */
1302 	(void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
1303 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
1304 	    &dma->nd_len, &dma->nd_acch);
1305 
1306 	if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
1307 	    dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1308 	    &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
1309 		dev_err(nvme->n_dip, CE_WARN,
1310 		    "!failed to bind DMA memory");
1311 		atomic_inc_32(&nvme->n_dma_bind_err);
1312 		nvme_free_dma_common(dma);
1313 		return (DDI_FAILURE);
1314 	}
1315 
1316 	return (DDI_SUCCESS);
1317 }
1318 
1319 static int
nvme_zalloc_dma(nvme_t * nvme,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr,nvme_dma_t ** ret)1320 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
1321     ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
1322 {
1323 	nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
1324 
1325 	if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
1326 	    DDI_SUCCESS) {
1327 		*ret = NULL;
1328 		kmem_free(dma, sizeof (nvme_dma_t));
1329 		return (DDI_FAILURE);
1330 	}
1331 
1332 	bzero(dma->nd_memp, dma->nd_len);
1333 
1334 	*ret = dma;
1335 	return (DDI_SUCCESS);
1336 }
1337 
1338 /* ARGSUSED */
1339 static int
nvme_prp_dma_constructor(void * buf,void * private,int flags)1340 nvme_prp_dma_constructor(void *buf, void *private, int flags)
1341 {
1342 	nvme_dma_t *dma = (nvme_dma_t *)buf;
1343 	nvme_t *nvme = (nvme_t *)private;
1344 
1345 	dma->nd_dmah = NULL;
1346 	dma->nd_acch = NULL;
1347 
1348 	if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
1349 	    DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
1350 		return (-1);
1351 	}
1352 
1353 	ASSERT(dma->nd_ncookie == 1);
1354 
1355 	dma->nd_cached = B_TRUE;
1356 
1357 	return (0);
1358 }
1359 
1360 static int
nvme_zalloc_queue_dma(nvme_t * nvme,uint32_t nentry,uint16_t qe_len,uint_t flags,nvme_dma_t ** dma)1361 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
1362     uint_t flags, nvme_dma_t **dma)
1363 {
1364 	uint32_t len = nentry * qe_len;
1365 	ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
1366 
1367 	len = roundup(len, nvme->n_pagesize);
1368 
1369 	if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
1370 	    != DDI_SUCCESS) {
1371 		dev_err(nvme->n_dip, CE_WARN,
1372 		    "!failed to get DMA memory for queue");
1373 		goto fail;
1374 	}
1375 
1376 	if ((*dma)->nd_ncookie != 1) {
1377 		dev_err(nvme->n_dip, CE_WARN,
1378 		    "!got too many cookies for queue DMA");
1379 		goto fail;
1380 	}
1381 
1382 	return (DDI_SUCCESS);
1383 
1384 fail:
1385 	if (*dma) {
1386 		nvme_free_dma(*dma);
1387 		*dma = NULL;
1388 	}
1389 
1390 	return (DDI_FAILURE);
1391 }
1392 
1393 static void
nvme_free_cq(nvme_cq_t * cq)1394 nvme_free_cq(nvme_cq_t *cq)
1395 {
1396 	mutex_destroy(&cq->ncq_mutex);
1397 
1398 	if (cq->ncq_cmd_taskq != NULL)
1399 		taskq_destroy(cq->ncq_cmd_taskq);
1400 
1401 	if (cq->ncq_dma != NULL)
1402 		nvme_free_dma(cq->ncq_dma);
1403 
1404 	kmem_free(cq, sizeof (*cq));
1405 }
1406 
1407 static void
nvme_free_qpair(nvme_qpair_t * qp)1408 nvme_free_qpair(nvme_qpair_t *qp)
1409 {
1410 	int i;
1411 
1412 	mutex_destroy(&qp->nq_mutex);
1413 	sema_destroy(&qp->nq_sema);
1414 
1415 	if (qp->nq_sqdma != NULL)
1416 		nvme_free_dma(qp->nq_sqdma);
1417 
1418 	if (qp->nq_active_cmds > 0)
1419 		for (i = 0; i != qp->nq_nentry; i++)
1420 			if (qp->nq_cmd[i] != NULL)
1421 				nvme_free_cmd(qp->nq_cmd[i]);
1422 
1423 	if (qp->nq_cmd != NULL)
1424 		kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
1425 
1426 	kmem_free(qp, sizeof (nvme_qpair_t));
1427 }
1428 
1429 /*
1430  * Destroy the pre-allocated cq array, but only free individual completion
1431  * queues from the given starting index.
1432  */
1433 static void
nvme_destroy_cq_array(nvme_t * nvme,uint_t start)1434 nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
1435 {
1436 	uint_t i;
1437 
1438 	for (i = start; i < nvme->n_cq_count; i++)
1439 		if (nvme->n_cq[i] != NULL)
1440 			nvme_free_cq(nvme->n_cq[i]);
1441 
1442 	kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
1443 }
1444 
1445 static int
nvme_alloc_cq(nvme_t * nvme,uint32_t nentry,nvme_cq_t ** cqp,uint16_t idx,uint_t nthr)1446 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
1447     uint_t nthr)
1448 {
1449 	nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
1450 	char name[64];		/* large enough for the taskq name */
1451 
1452 	mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
1453 	    DDI_INTR_PRI(nvme->n_intr_pri));
1454 
1455 	if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
1456 	    DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
1457 		goto fail;
1458 
1459 	cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
1460 	cq->ncq_nentry = nentry;
1461 	cq->ncq_id = idx;
1462 	cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
1463 
1464 	/*
1465 	 * Each completion queue has its own command taskq.
1466 	 */
1467 	(void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
1468 	    ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
1469 
1470 	cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
1471 	    TASKQ_PREPOPULATE);
1472 
1473 	if (cq->ncq_cmd_taskq == NULL) {
1474 		dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
1475 		    "taskq for cq %u", idx);
1476 		goto fail;
1477 	}
1478 
1479 	*cqp = cq;
1480 	return (DDI_SUCCESS);
1481 
1482 fail:
1483 	nvme_free_cq(cq);
1484 	*cqp = NULL;
1485 
1486 	return (DDI_FAILURE);
1487 }
1488 
1489 /*
1490  * Create the n_cq array big enough to hold "ncq" completion queues.
1491  * If the array already exists it will be re-sized (but only larger).
1492  * The admin queue is included in this array, which boosts the
1493  * max number of entries to UINT16_MAX + 1.
1494  */
1495 static int
nvme_create_cq_array(nvme_t * nvme,uint_t ncq,uint32_t nentry,uint_t nthr)1496 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
1497 {
1498 	nvme_cq_t **cq;
1499 	uint_t i, cq_count;
1500 
1501 	ASSERT3U(ncq, >, nvme->n_cq_count);
1502 
1503 	cq = nvme->n_cq;
1504 	cq_count = nvme->n_cq_count;
1505 
1506 	nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
1507 	nvme->n_cq_count = ncq;
1508 
1509 	for (i = 0; i < cq_count; i++)
1510 		nvme->n_cq[i] = cq[i];
1511 
1512 	for (; i < nvme->n_cq_count; i++)
1513 		if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
1514 		    DDI_SUCCESS)
1515 			goto fail;
1516 
1517 	if (cq != NULL)
1518 		kmem_free(cq, sizeof (*cq) * cq_count);
1519 
1520 	return (DDI_SUCCESS);
1521 
1522 fail:
1523 	nvme_destroy_cq_array(nvme, cq_count);
1524 	/*
1525 	 * Restore the original array
1526 	 */
1527 	nvme->n_cq_count = cq_count;
1528 	nvme->n_cq = cq;
1529 
1530 	return (DDI_FAILURE);
1531 }
1532 
1533 static int
nvme_alloc_qpair(nvme_t * nvme,uint32_t nentry,nvme_qpair_t ** nqp,uint_t idx)1534 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
1535     uint_t idx)
1536 {
1537 	nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
1538 	uint_t cq_idx;
1539 
1540 	mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
1541 	    DDI_INTR_PRI(nvme->n_intr_pri));
1542 
1543 	/*
1544 	 * The NVMe spec defines that a full queue has one empty (unused) slot;
1545 	 * initialize the semaphore accordingly.
1546 	 */
1547 	sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
1548 
1549 	if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
1550 	    DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
1551 		goto fail;
1552 
1553 	/*
1554 	 * idx == 0 is adminq, those above 0 are shared io completion queues.
1555 	 */
1556 	cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
1557 	qp->nq_cq = nvme->n_cq[cq_idx];
1558 	qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
1559 	qp->nq_nentry = nentry;
1560 
1561 	qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
1562 
1563 	qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
1564 	qp->nq_next_cmd = 0;
1565 
1566 	*nqp = qp;
1567 	return (DDI_SUCCESS);
1568 
1569 fail:
1570 	nvme_free_qpair(qp);
1571 	*nqp = NULL;
1572 
1573 	return (DDI_FAILURE);
1574 }
1575 
1576 static nvme_cmd_t *
nvme_alloc_cmd(nvme_t * nvme,int kmflag)1577 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
1578 {
1579 	nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
1580 
1581 	if (cmd == NULL)
1582 		return (cmd);
1583 
1584 	bzero(cmd, sizeof (nvme_cmd_t));
1585 
1586 	cmd->nc_nvme = nvme;
1587 
1588 	mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
1589 	    DDI_INTR_PRI(nvme->n_intr_pri));
1590 	cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
1591 
1592 	return (cmd);
1593 }
1594 
1595 static void
nvme_free_cmd(nvme_cmd_t * cmd)1596 nvme_free_cmd(nvme_cmd_t *cmd)
1597 {
1598 	/* Don't free commands on the lost commands list. */
1599 	if (list_link_active(&cmd->nc_list))
1600 		return;
1601 
1602 	if (cmd->nc_dma) {
1603 		nvme_free_dma(cmd->nc_dma);
1604 		cmd->nc_dma = NULL;
1605 	}
1606 
1607 	if (cmd->nc_prp) {
1608 		kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp);
1609 		cmd->nc_prp = NULL;
1610 	}
1611 
1612 	cv_destroy(&cmd->nc_cv);
1613 	mutex_destroy(&cmd->nc_mutex);
1614 
1615 	kmem_cache_free(nvme_cmd_cache, cmd);
1616 }
1617 
1618 static void
nvme_submit_admin_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd)1619 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1620 {
1621 	sema_p(&qp->nq_sema);
1622 	nvme_submit_cmd_common(qp, cmd);
1623 }
1624 
1625 static int
nvme_submit_io_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd)1626 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1627 {
1628 	if (cmd->nc_nvme->n_dead) {
1629 		return (EIO);
1630 	}
1631 
1632 	if (sema_tryp(&qp->nq_sema) == 0)
1633 		return (EAGAIN);
1634 
1635 	nvme_submit_cmd_common(qp, cmd);
1636 	return (0);
1637 }
1638 
1639 static void
nvme_submit_cmd_common(nvme_qpair_t * qp,nvme_cmd_t * cmd)1640 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1641 {
1642 	nvme_reg_sqtdbl_t tail = { 0 };
1643 
1644 	mutex_enter(&qp->nq_mutex);
1645 	cmd->nc_completed = B_FALSE;
1646 
1647 	/*
1648 	 * Now that we hold the queue pair lock, we must check whether or not
1649 	 * the controller has been listed as dead (e.g. was removed due to
1650 	 * hotplug). This is necessary as otherwise we could race with
1651 	 * nvme_remove_callback(). Because this has not been enqueued, we don't
1652 	 * call nvme_unqueue_cmd(), which is why we must manually decrement the
1653 	 * semaphore.
1654 	 */
1655 	if (cmd->nc_nvme->n_dead) {
1656 		taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback,
1657 		    cmd, TQ_NOSLEEP, &cmd->nc_tqent);
1658 		sema_v(&qp->nq_sema);
1659 		mutex_exit(&qp->nq_mutex);
1660 		return;
1661 	}
1662 
1663 	/*
1664 	 * Try to insert the cmd into the active cmd array at the nq_next_cmd
1665 	 * slot. If the slot is already occupied advance to the next slot and
1666 	 * try again. This can happen for long running commands like async event
1667 	 * requests.
1668 	 */
1669 	while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
1670 		qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1671 	qp->nq_cmd[qp->nq_next_cmd] = cmd;
1672 
1673 	qp->nq_active_cmds++;
1674 
1675 	cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
1676 	bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
1677 	(void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
1678 	    sizeof (nvme_sqe_t) * qp->nq_sqtail,
1679 	    sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
1680 	qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1681 
1682 	tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
1683 	nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
1684 
1685 	mutex_exit(&qp->nq_mutex);
1686 }
1687 
1688 static nvme_cmd_t *
nvme_unqueue_cmd(nvme_t * nvme,nvme_qpair_t * qp,int cid)1689 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
1690 {
1691 	nvme_cmd_t *cmd;
1692 
1693 	ASSERT(mutex_owned(&qp->nq_mutex));
1694 	ASSERT3S(cid, <, qp->nq_nentry);
1695 
1696 	cmd = qp->nq_cmd[cid];
1697 	/*
1698 	 * Some controllers will erroneously add things to the completion queue
1699 	 * for which there is no matching outstanding command. If this happens,
1700 	 * it is almost certainly a controller firmware bug since nq_mutex
1701 	 * is held across command submission and ringing the queue doorbell,
1702 	 * and is also held in this function.
1703 	 *
1704 	 * If we see such an unexpected command, there is not much we can do.
1705 	 * These will be logged and counted in nvme_get_completed(), but
1706 	 * otherwise ignored.
1707 	 */
1708 	if (cmd == NULL)
1709 		return (NULL);
1710 	qp->nq_cmd[cid] = NULL;
1711 	ASSERT3U(qp->nq_active_cmds, >, 0);
1712 	qp->nq_active_cmds--;
1713 	sema_v(&qp->nq_sema);
1714 
1715 	ASSERT3P(cmd, !=, NULL);
1716 	ASSERT3P(cmd->nc_nvme, ==, nvme);
1717 	ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
1718 
1719 	return (cmd);
1720 }
1721 
1722 /*
1723  * Get the command tied to the next completed cqe and bump along completion
1724  * queue head counter.
1725  */
1726 static nvme_cmd_t *
nvme_get_completed(nvme_t * nvme,nvme_cq_t * cq)1727 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
1728 {
1729 	nvme_qpair_t *qp;
1730 	nvme_cqe_t *cqe;
1731 	nvme_cmd_t *cmd;
1732 
1733 	ASSERT(mutex_owned(&cq->ncq_mutex));
1734 
1735 retry:
1736 	cqe = &cq->ncq_cq[cq->ncq_head];
1737 
1738 	/* Check phase tag of CQE. Hardware inverts it for new entries. */
1739 	if (cqe->cqe_sf.sf_p == cq->ncq_phase)
1740 		return (NULL);
1741 
1742 	qp = nvme->n_ioq[cqe->cqe_sqid];
1743 
1744 	mutex_enter(&qp->nq_mutex);
1745 	cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
1746 	mutex_exit(&qp->nq_mutex);
1747 
1748 	qp->nq_sqhead = cqe->cqe_sqhd;
1749 	cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
1750 
1751 	/* Toggle phase on wrap-around. */
1752 	if (cq->ncq_head == 0)
1753 		cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1;
1754 
1755 	if (cmd == NULL) {
1756 		dev_err(nvme->n_dip, CE_WARN,
1757 		    "!received completion for unknown cid 0x%x", cqe->cqe_cid);
1758 		atomic_inc_32(&nvme->n_unknown_cid);
1759 		/*
1760 		 * We want to ignore this unexpected completion entry as it
1761 		 * is most likely a result of a bug in the controller firmware.
1762 		 * However, if we return NULL, then callers will assume there
1763 		 * are no more pending commands for this wakeup. Retry to keep
1764 		 * enumerating commands until the phase tag indicates there are
1765 		 * no more and we are really done.
1766 		 */
1767 		goto retry;
1768 	}
1769 
1770 	ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid);
1771 	bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
1772 
1773 	return (cmd);
1774 }
1775 
1776 /*
1777  * Process all completed commands on the io completion queue.
1778  */
1779 static uint_t
nvme_process_iocq(nvme_t * nvme,nvme_cq_t * cq)1780 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
1781 {
1782 	nvme_reg_cqhdbl_t head = { 0 };
1783 	nvme_cmd_t *cmd;
1784 	uint_t completed = 0;
1785 
1786 	if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1787 	    DDI_SUCCESS)
1788 		dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1789 		    __func__);
1790 
1791 	mutex_enter(&cq->ncq_mutex);
1792 
1793 	while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1794 		taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
1795 		    TQ_NOSLEEP, &cmd->nc_tqent);
1796 
1797 		completed++;
1798 	}
1799 
1800 	if (completed > 0) {
1801 		/*
1802 		 * Update the completion queue head doorbell.
1803 		 */
1804 		head.b.cqhdbl_cqh = cq->ncq_head;
1805 		nvme_put32(nvme, cq->ncq_hdbl, head.r);
1806 	}
1807 
1808 	mutex_exit(&cq->ncq_mutex);
1809 
1810 	return (completed);
1811 }
1812 
1813 static nvme_cmd_t *
nvme_retrieve_cmd(nvme_t * nvme,nvme_qpair_t * qp)1814 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
1815 {
1816 	nvme_cq_t *cq = qp->nq_cq;
1817 	nvme_reg_cqhdbl_t head = { 0 };
1818 	nvme_cmd_t *cmd;
1819 
1820 	if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1821 	    DDI_SUCCESS)
1822 		dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1823 		    __func__);
1824 
1825 	mutex_enter(&cq->ncq_mutex);
1826 
1827 	if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1828 		head.b.cqhdbl_cqh = cq->ncq_head;
1829 		nvme_put32(nvme, cq->ncq_hdbl, head.r);
1830 	}
1831 
1832 	mutex_exit(&cq->ncq_mutex);
1833 
1834 	return (cmd);
1835 }
1836 
1837 static int
nvme_check_unknown_cmd_status(nvme_cmd_t * cmd)1838 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
1839 {
1840 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1841 
1842 	dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1843 	    "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1844 	    "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1845 	    cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1846 	    cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1847 
1848 	if (cmd->nc_xfer != NULL)
1849 		bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1850 
1851 	if (cmd->nc_nvme->n_strict_version) {
1852 		nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
1853 	}
1854 
1855 	return (EIO);
1856 }
1857 
1858 static int
nvme_check_vendor_cmd_status(nvme_cmd_t * cmd)1859 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
1860 {
1861 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1862 
1863 	dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1864 	    "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1865 	    "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1866 	    cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1867 	    cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1868 	if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
1869 		nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
1870 	}
1871 
1872 	return (EIO);
1873 }
1874 
1875 static int
nvme_check_integrity_cmd_status(nvme_cmd_t * cmd)1876 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
1877 {
1878 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1879 
1880 	switch (cqe->cqe_sf.sf_sc) {
1881 	case NVME_CQE_SC_INT_NVM_WRITE:
1882 		/* write fail */
1883 		/* TODO: post ereport */
1884 		if (cmd->nc_xfer != NULL)
1885 			bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1886 		return (EIO);
1887 
1888 	case NVME_CQE_SC_INT_NVM_READ:
1889 		/* read fail */
1890 		/* TODO: post ereport */
1891 		if (cmd->nc_xfer != NULL)
1892 			bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1893 		return (EIO);
1894 
1895 	default:
1896 		return (nvme_check_unknown_cmd_status(cmd));
1897 	}
1898 }
1899 
1900 static int
nvme_check_generic_cmd_status(nvme_cmd_t * cmd)1901 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
1902 {
1903 	nvme_cqe_t *cqe = &cmd->nc_cqe;
1904 
1905 	switch (cqe->cqe_sf.sf_sc) {
1906 	case NVME_CQE_SC_GEN_SUCCESS:
1907 		return (0);
1908 
1909 	/*
1910 	 * Errors indicating a bug in the driver should cause a panic.
1911 	 */
1912 	case NVME_CQE_SC_GEN_INV_OPC:
1913 		/* Invalid Command Opcode */
1914 		if (!cmd->nc_dontpanic)
1915 			dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1916 			    "programming error: invalid opcode in cmd %p",
1917 			    (void *)cmd);
1918 		return (EINVAL);
1919 
1920 	case NVME_CQE_SC_GEN_INV_FLD:
1921 		/* Invalid Field in Command */
1922 		if (!cmd->nc_dontpanic)
1923 			dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1924 			    "programming error: invalid field in cmd %p",
1925 			    (void *)cmd);
1926 		return (EIO);
1927 
1928 	case NVME_CQE_SC_GEN_ID_CNFL:
1929 		/* Command ID Conflict */
1930 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1931 		    "cmd ID conflict in cmd %p", (void *)cmd);
1932 		return (0);
1933 
1934 	case NVME_CQE_SC_GEN_INV_NS:
1935 		/* Invalid Namespace or Format */
1936 		if (!cmd->nc_dontpanic)
1937 			dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1938 			    "programming error: invalid NS/format in cmd %p",
1939 			    (void *)cmd);
1940 		return (EINVAL);
1941 
1942 	case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
1943 		/* LBA Out Of Range */
1944 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1945 		    "LBA out of range in cmd %p", (void *)cmd);
1946 		return (0);
1947 
1948 	/*
1949 	 * Non-fatal errors, handle gracefully.
1950 	 */
1951 	case NVME_CQE_SC_GEN_DATA_XFR_ERR:
1952 		/* Data Transfer Error (DMA) */
1953 		/* TODO: post ereport */
1954 		atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err);
1955 		if (cmd->nc_xfer != NULL)
1956 			bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1957 		return (EIO);
1958 
1959 	case NVME_CQE_SC_GEN_INTERNAL_ERR:
1960 		/*
1961 		 * Internal Error. The spec (v1.0, section 4.5.1.2) says
1962 		 * detailed error information is returned as async event,
1963 		 * so we pretty much ignore the error here and handle it
1964 		 * in the async event handler.
1965 		 */
1966 		atomic_inc_32(&cmd->nc_nvme->n_internal_err);
1967 		if (cmd->nc_xfer != NULL)
1968 			bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1969 		return (EIO);
1970 
1971 	case NVME_CQE_SC_GEN_ABORT_REQUEST:
1972 		/*
1973 		 * Command Abort Requested. This normally happens only when a
1974 		 * command times out.
1975 		 */
1976 		/* TODO: post ereport or change blkdev to handle this? */
1977 		atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err);
1978 		return (ECANCELED);
1979 
1980 	case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
1981 		/* Command Aborted due to Power Loss Notification */
1982 		nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
1983 		return (EIO);
1984 
1985 	case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
1986 		/* Command Aborted due to SQ Deletion */
1987 		atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del);
1988 		return (EIO);
1989 
1990 	case NVME_CQE_SC_GEN_NVM_CAP_EXC:
1991 		/* Capacity Exceeded */
1992 		atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc);
1993 		if (cmd->nc_xfer != NULL)
1994 			bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1995 		return (EIO);
1996 
1997 	case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
1998 		/* Namespace Not Ready */
1999 		atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy);
2000 		if (cmd->nc_xfer != NULL)
2001 			bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2002 		return (EIO);
2003 
2004 	case NVME_CQE_SC_GEN_NVM_FORMATTING:
2005 		/* Format in progress (1.2) */
2006 		if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2))
2007 			return (nvme_check_unknown_cmd_status(cmd));
2008 		atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_formatting);
2009 		if (cmd->nc_xfer != NULL)
2010 			bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2011 		return (EIO);
2012 
2013 	default:
2014 		return (nvme_check_unknown_cmd_status(cmd));
2015 	}
2016 }
2017 
2018 static int
nvme_check_specific_cmd_status(nvme_cmd_t * cmd)2019 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
2020 {
2021 	nvme_cqe_t *cqe = &cmd->nc_cqe;
2022 
2023 	switch (cqe->cqe_sf.sf_sc) {
2024 	case NVME_CQE_SC_SPC_INV_CQ:
2025 		/* Completion Queue Invalid */
2026 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
2027 		atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err);
2028 		return (EINVAL);
2029 
2030 	case NVME_CQE_SC_SPC_INV_QID:
2031 		/* Invalid Queue Identifier */
2032 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2033 		    cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
2034 		    cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
2035 		    cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2036 		atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err);
2037 		return (EINVAL);
2038 
2039 	case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
2040 		/* Max Queue Size Exceeded */
2041 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2042 		    cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2043 		atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc);
2044 		return (EINVAL);
2045 
2046 	case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
2047 		/* Abort Command Limit Exceeded */
2048 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
2049 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2050 		    "abort command limit exceeded in cmd %p", (void *)cmd);
2051 		return (0);
2052 
2053 	case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
2054 		/* Async Event Request Limit Exceeded */
2055 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
2056 		dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2057 		    "async event request limit exceeded in cmd %p",
2058 		    (void *)cmd);
2059 		return (0);
2060 
2061 	case NVME_CQE_SC_SPC_INV_INT_VECT:
2062 		/* Invalid Interrupt Vector */
2063 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2064 		atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect);
2065 		return (EINVAL);
2066 
2067 	case NVME_CQE_SC_SPC_INV_LOG_PAGE:
2068 		/* Invalid Log Page */
2069 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
2070 		atomic_inc_32(&cmd->nc_nvme->n_inv_log_page);
2071 		return (EINVAL);
2072 
2073 	case NVME_CQE_SC_SPC_INV_FORMAT:
2074 		/* Invalid Format */
2075 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT);
2076 		atomic_inc_32(&cmd->nc_nvme->n_inv_format);
2077 		if (cmd->nc_xfer != NULL)
2078 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2079 		return (EINVAL);
2080 
2081 	case NVME_CQE_SC_SPC_INV_Q_DEL:
2082 		/* Invalid Queue Deletion */
2083 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2084 		atomic_inc_32(&cmd->nc_nvme->n_inv_q_del);
2085 		return (EINVAL);
2086 
2087 	case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
2088 		/* Conflicting Attributes */
2089 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
2090 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2091 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2092 		atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr);
2093 		if (cmd->nc_xfer != NULL)
2094 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2095 		return (EINVAL);
2096 
2097 	case NVME_CQE_SC_SPC_NVM_INV_PROT:
2098 		/* Invalid Protection Information */
2099 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
2100 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2101 		    cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2102 		atomic_inc_32(&cmd->nc_nvme->n_inv_prot);
2103 		if (cmd->nc_xfer != NULL)
2104 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2105 		return (EINVAL);
2106 
2107 	case NVME_CQE_SC_SPC_NVM_READONLY:
2108 		/* Write to Read Only Range */
2109 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2110 		atomic_inc_32(&cmd->nc_nvme->n_readonly);
2111 		if (cmd->nc_xfer != NULL)
2112 			bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2113 		return (EROFS);
2114 
2115 	case NVME_CQE_SC_SPC_INV_FW_SLOT:
2116 		/* Invalid Firmware Slot */
2117 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2118 		return (EINVAL);
2119 
2120 	case NVME_CQE_SC_SPC_INV_FW_IMG:
2121 		/* Invalid Firmware Image */
2122 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2123 		return (EINVAL);
2124 
2125 	case NVME_CQE_SC_SPC_FW_RESET:
2126 		/* Conventional Reset Required */
2127 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2128 		return (0);
2129 
2130 	case NVME_CQE_SC_SPC_FW_NSSR:
2131 		/* NVMe Subsystem Reset Required */
2132 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2133 		return (0);
2134 
2135 	case NVME_CQE_SC_SPC_FW_NEXT_RESET:
2136 		/* Activation Requires Reset */
2137 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2138 		return (0);
2139 
2140 	case NVME_CQE_SC_SPC_FW_MTFA:
2141 		/* Activation Requires Maximum Time Violation */
2142 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2143 		return (EAGAIN);
2144 
2145 	case NVME_CQE_SC_SPC_FW_PROHIBITED:
2146 		/* Activation Prohibited */
2147 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2148 		return (EINVAL);
2149 
2150 	case NVME_CQE_SC_SPC_FW_OVERLAP:
2151 		/* Overlapping Firmware Ranges */
2152 		ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD);
2153 		return (EINVAL);
2154 
2155 	default:
2156 		return (nvme_check_unknown_cmd_status(cmd));
2157 	}
2158 }
2159 
2160 static inline int
nvme_check_cmd_status(nvme_cmd_t * cmd)2161 nvme_check_cmd_status(nvme_cmd_t *cmd)
2162 {
2163 	nvme_cqe_t *cqe = &cmd->nc_cqe;
2164 
2165 	/*
2166 	 * Take a shortcut if the controller is dead, or if
2167 	 * command status indicates no error.
2168 	 */
2169 	if (cmd->nc_nvme->n_dead)
2170 		return (EIO);
2171 
2172 	if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2173 	    cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2174 		return (0);
2175 
2176 	if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
2177 		return (nvme_check_generic_cmd_status(cmd));
2178 	else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
2179 		return (nvme_check_specific_cmd_status(cmd));
2180 	else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
2181 		return (nvme_check_integrity_cmd_status(cmd));
2182 	else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
2183 		return (nvme_check_vendor_cmd_status(cmd));
2184 
2185 	return (nvme_check_unknown_cmd_status(cmd));
2186 }
2187 
2188 /*
2189  * Check the command status as used by an ioctl path and do not convert it to an
2190  * errno. We still allow all the command status checking to occur, but otherwise
2191  * will pass back the controller error as is.
2192  */
2193 static boolean_t
nvme_check_cmd_status_ioctl(nvme_cmd_t * cmd,nvme_ioctl_common_t * ioc)2194 nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc)
2195 {
2196 	nvme_cqe_t *cqe = &cmd->nc_cqe;
2197 	nvme_t *nvme = cmd->nc_nvme;
2198 
2199 	if (nvme->n_dead) {
2200 		return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0));
2201 	}
2202 
2203 	if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2204 	    cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2205 		return (B_TRUE);
2206 
2207 	if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) {
2208 		(void) nvme_check_generic_cmd_status(cmd);
2209 	} else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) {
2210 		(void) nvme_check_specific_cmd_status(cmd);
2211 	} else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) {
2212 		(void) nvme_check_integrity_cmd_status(cmd);
2213 	} else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) {
2214 		(void) nvme_check_vendor_cmd_status(cmd);
2215 	} else {
2216 		(void) nvme_check_unknown_cmd_status(cmd);
2217 	}
2218 
2219 	return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR,
2220 	    cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc));
2221 }
2222 
2223 static int
nvme_abort_cmd(nvme_cmd_t * abort_cmd,uint_t sec)2224 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec)
2225 {
2226 	nvme_t *nvme = abort_cmd->nc_nvme;
2227 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2228 	nvme_abort_cmd_t ac = { 0 };
2229 	int ret = 0;
2230 
2231 	sema_p(&nvme->n_abort_sema);
2232 
2233 	ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid;
2234 	ac.b.ac_sqid = abort_cmd->nc_sqid;
2235 
2236 	cmd->nc_sqid = 0;
2237 	cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
2238 	cmd->nc_callback = nvme_wakeup_cmd;
2239 	cmd->nc_sqe.sqe_cdw10 = ac.r;
2240 
2241 	/*
2242 	 * Send the ABORT to the hardware. The ABORT command will return _after_
2243 	 * the aborted command has completed (aborted or otherwise), but since
2244 	 * we still hold the aborted command's mutex its callback hasn't been
2245 	 * processed yet.
2246 	 */
2247 	nvme_admin_cmd(cmd, sec);
2248 	sema_v(&nvme->n_abort_sema);
2249 
2250 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2251 		dev_err(nvme->n_dip, CE_WARN,
2252 		    "!ABORT failed with sct = %x, sc = %x",
2253 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2254 		atomic_inc_32(&nvme->n_abort_failed);
2255 	} else {
2256 		dev_err(nvme->n_dip, CE_WARN,
2257 		    "!ABORT of command %d/%d %ssuccessful",
2258 		    abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid,
2259 		    cmd->nc_cqe.cqe_dw0 & 1 ? "un" : "");
2260 		if ((cmd->nc_cqe.cqe_dw0 & 1) == 0)
2261 			atomic_inc_32(&nvme->n_cmd_aborted);
2262 	}
2263 
2264 	nvme_free_cmd(cmd);
2265 	return (ret);
2266 }
2267 
2268 /*
2269  * nvme_wait_cmd -- wait for command completion or timeout
2270  *
2271  * In case of a serious error or a timeout of the abort command the hardware
2272  * will be declared dead and FMA will be notified.
2273  */
2274 static void
nvme_wait_cmd(nvme_cmd_t * cmd,uint32_t sec)2275 nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec)
2276 {
2277 	clock_t timeout = ddi_get_lbolt() + drv_usectohz((long)sec * MICROSEC);
2278 	nvme_t *nvme = cmd->nc_nvme;
2279 	nvme_reg_csts_t csts;
2280 	nvme_qpair_t *qp;
2281 
2282 	ASSERT(mutex_owned(&cmd->nc_mutex));
2283 
2284 	while (!cmd->nc_completed) {
2285 		if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1)
2286 			break;
2287 	}
2288 
2289 	if (cmd->nc_completed)
2290 		return;
2291 
2292 	/*
2293 	 * The command timed out.
2294 	 *
2295 	 * Check controller for fatal status, any errors associated with the
2296 	 * register or DMA handle, or for a double timeout (abort command timed
2297 	 * out). If necessary log a warning and call FMA.
2298 	 */
2299 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2300 	dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
2301 	    "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2302 	    cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
2303 	atomic_inc_32(&nvme->n_cmd_timeout);
2304 
2305 	if (csts.b.csts_cfs ||
2306 	    nvme_check_regs_hdl(nvme) ||
2307 	    nvme_check_dma_hdl(cmd->nc_dma) ||
2308 	    cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
2309 		nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2310 	} else if (nvme_abort_cmd(cmd, sec) == 0) {
2311 		/*
2312 		 * If the abort succeeded the command should complete
2313 		 * immediately with an appropriate status.
2314 		 */
2315 		while (!cmd->nc_completed)
2316 			cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
2317 
2318 		return;
2319 	}
2320 
2321 	qp = nvme->n_ioq[cmd->nc_sqid];
2322 
2323 	mutex_enter(&qp->nq_mutex);
2324 	(void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
2325 	mutex_exit(&qp->nq_mutex);
2326 
2327 	/*
2328 	 * As we don't know what the presumed dead hardware might still do with
2329 	 * the DMA memory, we'll put the command on the lost commands list if it
2330 	 * has any DMA memory.
2331 	 */
2332 	if (cmd->nc_dma != NULL) {
2333 		mutex_enter(&nvme_lc_mutex);
2334 		list_insert_head(&nvme_lost_cmds, cmd);
2335 		mutex_exit(&nvme_lc_mutex);
2336 	}
2337 }
2338 
2339 static void
nvme_wakeup_cmd(void * arg)2340 nvme_wakeup_cmd(void *arg)
2341 {
2342 	nvme_cmd_t *cmd = arg;
2343 
2344 	mutex_enter(&cmd->nc_mutex);
2345 	cmd->nc_completed = B_TRUE;
2346 	cv_signal(&cmd->nc_cv);
2347 	mutex_exit(&cmd->nc_mutex);
2348 }
2349 
2350 static void
nvme_async_event_task(void * arg)2351 nvme_async_event_task(void *arg)
2352 {
2353 	nvme_cmd_t *cmd = arg;
2354 	nvme_t *nvme = cmd->nc_nvme;
2355 	nvme_error_log_entry_t *error_log = NULL;
2356 	nvme_health_log_t *health_log = NULL;
2357 	nvme_nschange_list_t *nslist = NULL;
2358 	size_t logsize = 0;
2359 	nvme_async_event_t event;
2360 
2361 	/*
2362 	 * Check for errors associated with the async request itself. The only
2363 	 * command-specific error is "async event limit exceeded", which
2364 	 * indicates a programming error in the driver and causes a panic in
2365 	 * nvme_check_cmd_status().
2366 	 *
2367 	 * Other possible errors are various scenarios where the async request
2368 	 * was aborted, or internal errors in the device. Internal errors are
2369 	 * reported to FMA, the command aborts need no special handling here.
2370 	 *
2371 	 * And finally, at least qemu nvme does not support async events,
2372 	 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
2373 	 * will avoid posting async events.
2374 	 */
2375 
2376 	if (nvme_check_cmd_status(cmd) != 0) {
2377 		dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2378 		    "!async event request returned failure, sct = 0x%x, "
2379 		    "sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
2380 		    cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
2381 		    cmd->nc_cqe.cqe_sf.sf_m);
2382 
2383 		if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2384 		    cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
2385 			nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2386 		}
2387 
2388 		if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2389 		    cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
2390 		    cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
2391 			nvme->n_async_event_supported = B_FALSE;
2392 		}
2393 
2394 		nvme_free_cmd(cmd);
2395 		return;
2396 	}
2397 
2398 	event.r = cmd->nc_cqe.cqe_dw0;
2399 
2400 	/* Clear CQE and re-submit the async request. */
2401 	bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
2402 	nvme_submit_admin_cmd(nvme->n_adminq, cmd);
2403 	cmd = NULL;	/* cmd can no longer be used after resubmission */
2404 
2405 	switch (event.b.ae_type) {
2406 	case NVME_ASYNC_TYPE_ERROR:
2407 		if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
2408 			if (!nvme_get_logpage_int(nvme, B_FALSE,
2409 			    (void **)&error_log, &logsize,
2410 			    NVME_LOGPAGE_ERROR)) {
2411 				return;
2412 			}
2413 		} else {
2414 			dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2415 			    "async event reply: type=0x%x logpage=0x%x",
2416 			    event.b.ae_type, event.b.ae_logpage);
2417 			atomic_inc_32(&nvme->n_wrong_logpage);
2418 			return;
2419 		}
2420 
2421 		switch (event.b.ae_info) {
2422 		case NVME_ASYNC_ERROR_INV_SQ:
2423 			dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2424 			    "invalid submission queue");
2425 			return;
2426 
2427 		case NVME_ASYNC_ERROR_INV_DBL:
2428 			dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2429 			    "invalid doorbell write value");
2430 			return;
2431 
2432 		case NVME_ASYNC_ERROR_DIAGFAIL:
2433 			dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
2434 			nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2435 			atomic_inc_32(&nvme->n_diagfail_event);
2436 			break;
2437 
2438 		case NVME_ASYNC_ERROR_PERSISTENT:
2439 			dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
2440 			    "device error");
2441 			nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2442 			atomic_inc_32(&nvme->n_persistent_event);
2443 			break;
2444 
2445 		case NVME_ASYNC_ERROR_TRANSIENT:
2446 			dev_err(nvme->n_dip, CE_WARN, "!transient internal "
2447 			    "device error");
2448 			/* TODO: send ereport */
2449 			atomic_inc_32(&nvme->n_transient_event);
2450 			break;
2451 
2452 		case NVME_ASYNC_ERROR_FW_LOAD:
2453 			dev_err(nvme->n_dip, CE_WARN,
2454 			    "!firmware image load error");
2455 			atomic_inc_32(&nvme->n_fw_load_event);
2456 			break;
2457 		}
2458 		break;
2459 
2460 	case NVME_ASYNC_TYPE_HEALTH:
2461 		if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
2462 			if (!nvme_get_logpage_int(nvme, B_FALSE,
2463 			    (void **)&health_log, &logsize,
2464 			    NVME_LOGPAGE_HEALTH)) {
2465 				return;
2466 			}
2467 		} else {
2468 			dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2469 			    "type=0x%x logpage=0x%x", event.b.ae_type,
2470 			    event.b.ae_logpage);
2471 			atomic_inc_32(&nvme->n_wrong_logpage);
2472 			return;
2473 		}
2474 
2475 		switch (event.b.ae_info) {
2476 		case NVME_ASYNC_HEALTH_RELIABILITY:
2477 			dev_err(nvme->n_dip, CE_WARN,
2478 			    "!device reliability compromised");
2479 			/* TODO: send ereport */
2480 			atomic_inc_32(&nvme->n_reliability_event);
2481 			break;
2482 
2483 		case NVME_ASYNC_HEALTH_TEMPERATURE:
2484 			dev_err(nvme->n_dip, CE_WARN,
2485 			    "!temperature above threshold");
2486 			/* TODO: send ereport */
2487 			atomic_inc_32(&nvme->n_temperature_event);
2488 			break;
2489 
2490 		case NVME_ASYNC_HEALTH_SPARE:
2491 			dev_err(nvme->n_dip, CE_WARN,
2492 			    "!spare space below threshold");
2493 			/* TODO: send ereport */
2494 			atomic_inc_32(&nvme->n_spare_event);
2495 			break;
2496 		}
2497 		break;
2498 
2499 	case NVME_ASYNC_TYPE_NOTICE:
2500 		switch (event.b.ae_info) {
2501 		case NVME_ASYNC_NOTICE_NS_CHANGE:
2502 			if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) {
2503 				dev_err(nvme->n_dip, CE_WARN,
2504 				    "!wrong logpage in async event reply: "
2505 				    "type=0x%x logpage=0x%x",
2506 				    event.b.ae_type, event.b.ae_logpage);
2507 				atomic_inc_32(&nvme->n_wrong_logpage);
2508 				break;
2509 			}
2510 
2511 			dev_err(nvme->n_dip, CE_NOTE,
2512 			    "namespace attribute change event, "
2513 			    "logpage = 0x%x", event.b.ae_logpage);
2514 			atomic_inc_32(&nvme->n_notice_event);
2515 
2516 			if (!nvme_get_logpage_int(nvme, B_FALSE,
2517 			    (void **)&nslist, &logsize,
2518 			    NVME_LOGPAGE_NSCHANGE)) {
2519 				break;
2520 			}
2521 
2522 			if (nslist->nscl_ns[0] == UINT32_MAX) {
2523 				dev_err(nvme->n_dip, CE_CONT,
2524 				    "more than %u namespaces have changed.\n",
2525 				    NVME_NSCHANGE_LIST_SIZE);
2526 				break;
2527 			}
2528 
2529 			nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
2530 			for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) {
2531 				uint32_t nsid = nslist->nscl_ns[i];
2532 
2533 				if (nsid == 0)	/* end of list */
2534 					break;
2535 
2536 				dev_err(nvme->n_dip, CE_NOTE,
2537 				    "!namespace nvme%d/%u has changed.",
2538 				    ddi_get_instance(nvme->n_dip), nsid);
2539 
2540 
2541 				if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
2542 					continue;
2543 
2544 				nvme_mgmt_bd_start(nvme);
2545 				bd_state_change(nvme_nsid2ns(nvme,
2546 				    nsid)->ns_bd_hdl);
2547 				nvme_mgmt_bd_end(nvme);
2548 			}
2549 			nvme_mgmt_unlock(nvme);
2550 
2551 			break;
2552 
2553 		case NVME_ASYNC_NOTICE_FW_ACTIVATE:
2554 			dev_err(nvme->n_dip, CE_NOTE,
2555 			    "firmware activation starting, "
2556 			    "logpage = 0x%x", event.b.ae_logpage);
2557 			atomic_inc_32(&nvme->n_notice_event);
2558 			break;
2559 
2560 		case NVME_ASYNC_NOTICE_TELEMETRY:
2561 			dev_err(nvme->n_dip, CE_NOTE,
2562 			    "telemetry log changed, "
2563 			    "logpage = 0x%x", event.b.ae_logpage);
2564 			atomic_inc_32(&nvme->n_notice_event);
2565 			break;
2566 
2567 		case NVME_ASYNC_NOTICE_NS_ASYMM:
2568 			dev_err(nvme->n_dip, CE_NOTE,
2569 			    "asymmetric namespace access change, "
2570 			    "logpage = 0x%x", event.b.ae_logpage);
2571 			atomic_inc_32(&nvme->n_notice_event);
2572 			break;
2573 
2574 		case NVME_ASYNC_NOTICE_LATENCYLOG:
2575 			dev_err(nvme->n_dip, CE_NOTE,
2576 			    "predictable latency event aggregate log change, "
2577 			    "logpage = 0x%x", event.b.ae_logpage);
2578 			atomic_inc_32(&nvme->n_notice_event);
2579 			break;
2580 
2581 		case NVME_ASYNC_NOTICE_LBASTATUS:
2582 			dev_err(nvme->n_dip, CE_NOTE,
2583 			    "LBA status information alert, "
2584 			    "logpage = 0x%x", event.b.ae_logpage);
2585 			atomic_inc_32(&nvme->n_notice_event);
2586 			break;
2587 
2588 		case NVME_ASYNC_NOTICE_ENDURANCELOG:
2589 			dev_err(nvme->n_dip, CE_NOTE,
2590 			    "endurance group event aggregate log page change, "
2591 			    "logpage = 0x%x", event.b.ae_logpage);
2592 			atomic_inc_32(&nvme->n_notice_event);
2593 			break;
2594 
2595 		default:
2596 			dev_err(nvme->n_dip, CE_WARN,
2597 			    "!unknown notice async event received, "
2598 			    "info = 0x%x, logpage = 0x%x", event.b.ae_info,
2599 			    event.b.ae_logpage);
2600 			atomic_inc_32(&nvme->n_unknown_event);
2601 			break;
2602 		}
2603 		break;
2604 
2605 	case NVME_ASYNC_TYPE_VENDOR:
2606 		dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
2607 		    "received, info = 0x%x, logpage = 0x%x", event.b.ae_info,
2608 		    event.b.ae_logpage);
2609 		atomic_inc_32(&nvme->n_vendor_event);
2610 		break;
2611 
2612 	default:
2613 		dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
2614 		    "type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type,
2615 		    event.b.ae_info, event.b.ae_logpage);
2616 		atomic_inc_32(&nvme->n_unknown_event);
2617 		break;
2618 	}
2619 
2620 	if (error_log != NULL)
2621 		kmem_free(error_log, logsize);
2622 
2623 	if (health_log != NULL)
2624 		kmem_free(health_log, logsize);
2625 
2626 	if (nslist != NULL)
2627 		kmem_free(nslist, logsize);
2628 }
2629 
2630 static void
nvme_admin_cmd(nvme_cmd_t * cmd,uint32_t sec)2631 nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec)
2632 {
2633 	mutex_enter(&cmd->nc_mutex);
2634 	nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd);
2635 	nvme_wait_cmd(cmd, sec);
2636 	mutex_exit(&cmd->nc_mutex);
2637 }
2638 
2639 static void
nvme_async_event(nvme_t * nvme)2640 nvme_async_event(nvme_t *nvme)
2641 {
2642 	nvme_cmd_t *cmd;
2643 
2644 	cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2645 	cmd->nc_sqid = 0;
2646 	cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
2647 	cmd->nc_callback = nvme_async_event_task;
2648 	cmd->nc_dontpanic = B_TRUE;
2649 
2650 	nvme_submit_admin_cmd(nvme->n_adminq, cmd);
2651 }
2652 
2653 /*
2654  * There are commands such as format or vendor unique commands that are going to
2655  * manipulate the data in a namespace or destroy them, we make sure that none of
2656  * the ones that will be impacted are actually attached.
2657  */
2658 static boolean_t
nvme_no_blkdev_attached(nvme_t * nvme,uint32_t nsid)2659 nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid)
2660 {
2661 	ASSERT(nvme_mgmt_lock_held(nvme));
2662 	ASSERT3U(nsid, !=, 0);
2663 
2664 	if (nsid != NVME_NSID_BCAST) {
2665 		nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
2666 		return (!ns->ns_attached);
2667 	}
2668 
2669 	for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
2670 		nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
2671 
2672 		if (ns->ns_attached) {
2673 			return (B_FALSE);
2674 		}
2675 	}
2676 
2677 	return (B_TRUE);
2678 }
2679 
2680 static boolean_t
nvme_format_nvm(nvme_t * nvme,nvme_ioctl_format_t * ioc)2681 nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc)
2682 {
2683 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2684 	nvme_format_nvm_t format_nvm = { 0 };
2685 	boolean_t ret;
2686 
2687 	format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0);
2688 	format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0);
2689 
2690 	cmd->nc_sqid = 0;
2691 	cmd->nc_callback = nvme_wakeup_cmd;
2692 	cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid;
2693 	cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
2694 	cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
2695 
2696 	/*
2697 	 * We don't want to panic on any format commands. There are two reasons
2698 	 * for this:
2699 	 *
2700 	 * 1) All format commands are initiated by users. We don't want to panic
2701 	 * on user commands.
2702 	 *
2703 	 * 2) Several devices like the Samsung SM951 don't allow formatting of
2704 	 * all namespaces in one command and we'd prefer to handle that
2705 	 * gracefully.
2706 	 */
2707 	cmd->nc_dontpanic = B_TRUE;
2708 
2709 	nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
2710 
2711 	if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) {
2712 		dev_err(nvme->n_dip, CE_WARN,
2713 		    "!FORMAT failed with sct = %x, sc = %x",
2714 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2715 		ret = B_FALSE;
2716 		goto fail;
2717 	}
2718 
2719 	ret = B_TRUE;
2720 fail:
2721 	nvme_free_cmd(cmd);
2722 	return (ret);
2723 }
2724 
2725 /*
2726  * Retrieve a specific log page. The contents of the log page request should
2727  * have already been validated by the system.
2728  */
2729 static boolean_t
nvme_get_logpage(nvme_t * nvme,boolean_t user,nvme_ioctl_get_logpage_t * log,void ** buf)2730 nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log,
2731     void **buf)
2732 {
2733 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2734 	nvme_getlogpage_dw10_t dw10;
2735 	uint32_t offlo, offhi;
2736 	nvme_getlogpage_dw11_t dw11;
2737 	nvme_getlogpage_dw14_t dw14;
2738 	uint32_t ndw;
2739 	boolean_t ret = B_FALSE;
2740 
2741 	bzero(&dw10, sizeof (dw10));
2742 	bzero(&dw11, sizeof (dw11));
2743 	bzero(&dw14, sizeof (dw14));
2744 
2745 	cmd->nc_sqid = 0;
2746 	cmd->nc_callback = nvme_wakeup_cmd;
2747 	cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
2748 	cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid;
2749 
2750 	if (user)
2751 		cmd->nc_dontpanic = B_TRUE;
2752 
2753 	/*
2754 	 * The size field is the number of double words, but is a zeros based
2755 	 * value. We need to store our actual value minus one.
2756 	 */
2757 	ndw = (uint32_t)(log->nigl_len / 4);
2758 	ASSERT3U(ndw, >, 0);
2759 	ndw--;
2760 
2761 	dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0);
2762 	dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0);
2763 	dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0);
2764 	dw10.b.lp_lnumdl = bitx32(ndw, 15, 0);
2765 
2766 	dw11.b.lp_numdu = bitx32(ndw, 31, 16);
2767 	dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0);
2768 
2769 	offlo = bitx64(log->nigl_offset, 31, 0);
2770 	offhi = bitx64(log->nigl_offset, 63, 32);
2771 
2772 	dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0);
2773 
2774 	cmd->nc_sqe.sqe_cdw10 = dw10.r;
2775 	cmd->nc_sqe.sqe_cdw11 = dw11.r;
2776 	cmd->nc_sqe.sqe_cdw12 = offlo;
2777 	cmd->nc_sqe.sqe_cdw13 = offhi;
2778 	cmd->nc_sqe.sqe_cdw14 = dw14.r;
2779 
2780 	if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ,
2781 	    &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2782 		dev_err(nvme->n_dip, CE_WARN,
2783 		    "!nvme_zalloc_dma failed for GET LOG PAGE");
2784 		ret = nvme_ioctl_error(&log->nigl_common,
2785 		    NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
2786 		goto fail;
2787 	}
2788 
2789 	if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
2790 		ret = nvme_ioctl_error(&log->nigl_common,
2791 		    NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
2792 		goto fail;
2793 	}
2794 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2795 
2796 	if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) {
2797 		if (!user) {
2798 			dev_err(nvme->n_dip, CE_WARN,
2799 			    "!GET LOG PAGE failed with sct = %x, sc = %x",
2800 			    cmd->nc_cqe.cqe_sf.sf_sct,
2801 			    cmd->nc_cqe.cqe_sf.sf_sc);
2802 		}
2803 		ret = B_FALSE;
2804 		goto fail;
2805 	}
2806 
2807 	*buf = kmem_alloc(log->nigl_len, KM_SLEEP);
2808 	bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len);
2809 
2810 	ret = B_TRUE;
2811 fail:
2812 	nvme_free_cmd(cmd);
2813 
2814 	return (ret);
2815 }
2816 
2817 /*
2818  * This is an internal wrapper for when the kernel wants to get a log page.
2819  * Currently this assumes that the only thing that is required is the log page
2820  * ID. If more information is required, we'll be better served to just use the
2821  * general ioctl interface.
2822  */
2823 static boolean_t
nvme_get_logpage_int(nvme_t * nvme,boolean_t user,void ** buf,size_t * bufsize,uint8_t lid)2824 nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
2825     uint8_t lid)
2826 {
2827 	const nvme_log_page_info_t *info = NULL;
2828 	nvme_ioctl_get_logpage_t log;
2829 	nvme_valid_ctrl_data_t data;
2830 	boolean_t bret;
2831 	bool var;
2832 
2833 	for (size_t i = 0; i < nvme_std_log_npages; i++) {
2834 		if (nvme_std_log_pages[i].nlpi_lid == lid &&
2835 		    nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) {
2836 			info = &nvme_std_log_pages[i];
2837 			break;
2838 		}
2839 	}
2840 
2841 	if (info == NULL) {
2842 		return (B_FALSE);
2843 	}
2844 
2845 	data.vcd_vers = &nvme->n_version;
2846 	data.vcd_id = nvme->n_idctl;
2847 	bzero(&log, sizeof (log));
2848 	log.nigl_common.nioc_nsid = NVME_NSID_BCAST;
2849 	log.nigl_csi = info->nlpi_csi;
2850 	log.nigl_lid = info->nlpi_lid;
2851 	log.nigl_len = nvme_log_page_info_size(info, &data, &var);
2852 
2853 	/*
2854 	 * We only support getting standard fixed-length log pages through the
2855 	 * kernel interface at this time. If a log page either has an unknown
2856 	 * size or has a variable length, then we cannot get it.
2857 	 */
2858 	if (log.nigl_len == 0 || var) {
2859 		return (B_FALSE);
2860 	}
2861 
2862 	bret = nvme_get_logpage(nvme, user, &log, buf);
2863 	if (!bret) {
2864 		return (B_FALSE);
2865 	}
2866 
2867 	*bufsize = log.nigl_len;
2868 	return (B_TRUE);
2869 }
2870 
2871 static boolean_t
nvme_identify(nvme_t * nvme,boolean_t user,nvme_ioctl_identify_t * ioc,void ** buf)2872 nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc,
2873     void **buf)
2874 {
2875 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2876 	boolean_t ret = B_FALSE;
2877 	nvme_identify_dw10_t dw10;
2878 
2879 	ASSERT3P(buf, !=, NULL);
2880 
2881 	bzero(&dw10, sizeof (dw10));
2882 
2883 	cmd->nc_sqid = 0;
2884 	cmd->nc_callback = nvme_wakeup_cmd;
2885 	cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
2886 	cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid;
2887 
2888 	dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0);
2889 	dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0);
2890 
2891 	cmd->nc_sqe.sqe_cdw10 = dw10.r;
2892 
2893 	if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
2894 	    &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2895 		dev_err(nvme->n_dip, CE_WARN,
2896 		    "!nvme_zalloc_dma failed for IDENTIFY");
2897 		ret = nvme_ioctl_error(&ioc->nid_common,
2898 		    NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
2899 		goto fail;
2900 	}
2901 
2902 	if (cmd->nc_dma->nd_ncookie > 2) {
2903 		dev_err(nvme->n_dip, CE_WARN,
2904 		    "!too many DMA cookies for IDENTIFY");
2905 		atomic_inc_32(&nvme->n_too_many_cookies);
2906 		ret = nvme_ioctl_error(&ioc->nid_common,
2907 		    NVME_IOCTL_E_BAD_PRP, 0, 0);
2908 		goto fail;
2909 	}
2910 
2911 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
2912 	if (cmd->nc_dma->nd_ncookie > 1) {
2913 		ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
2914 		    &cmd->nc_dma->nd_cookie);
2915 		cmd->nc_sqe.sqe_dptr.d_prp[1] =
2916 		    cmd->nc_dma->nd_cookie.dmac_laddress;
2917 	}
2918 
2919 	if (user)
2920 		cmd->nc_dontpanic = B_TRUE;
2921 
2922 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2923 
2924 	if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) {
2925 		dev_err(nvme->n_dip, CE_WARN,
2926 		    "!IDENTIFY failed with sct = %x, sc = %x",
2927 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2928 		ret = B_FALSE;
2929 		goto fail;
2930 	}
2931 
2932 	*buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
2933 	bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
2934 	ret = B_TRUE;
2935 
2936 fail:
2937 	nvme_free_cmd(cmd);
2938 
2939 	return (ret);
2940 }
2941 
2942 static boolean_t
nvme_identify_int(nvme_t * nvme,uint32_t nsid,uint8_t cns,void ** buf)2943 nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf)
2944 {
2945 	nvme_ioctl_identify_t id;
2946 
2947 	bzero(&id, sizeof (nvme_ioctl_identify_t));
2948 	id.nid_common.nioc_nsid = nsid;
2949 	id.nid_cns = cns;
2950 
2951 	return (nvme_identify(nvme, B_FALSE, &id, buf));
2952 }
2953 
2954 static int
nvme_set_features(nvme_t * nvme,boolean_t user,uint32_t nsid,uint8_t feature,uint32_t val,uint32_t * res)2955 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
2956     uint32_t val, uint32_t *res)
2957 {
2958 	_NOTE(ARGUNUSED(nsid));
2959 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2960 	int ret = EINVAL;
2961 
2962 	ASSERT(res != NULL);
2963 
2964 	cmd->nc_sqid = 0;
2965 	cmd->nc_callback = nvme_wakeup_cmd;
2966 	cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
2967 	cmd->nc_sqe.sqe_cdw10 = feature;
2968 	cmd->nc_sqe.sqe_cdw11 = val;
2969 
2970 	if (user)
2971 		cmd->nc_dontpanic = B_TRUE;
2972 
2973 	switch (feature) {
2974 	case NVME_FEAT_WRITE_CACHE:
2975 		if (!nvme->n_write_cache_present)
2976 			goto fail;
2977 		break;
2978 
2979 	case NVME_FEAT_NQUEUES:
2980 		break;
2981 
2982 	default:
2983 		goto fail;
2984 	}
2985 
2986 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2987 
2988 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2989 		dev_err(nvme->n_dip, CE_WARN,
2990 		    "!SET FEATURES %d failed with sct = %x, sc = %x",
2991 		    feature, cmd->nc_cqe.cqe_sf.sf_sct,
2992 		    cmd->nc_cqe.cqe_sf.sf_sc);
2993 		goto fail;
2994 	}
2995 
2996 	*res = cmd->nc_cqe.cqe_dw0;
2997 
2998 fail:
2999 	nvme_free_cmd(cmd);
3000 	return (ret);
3001 }
3002 
3003 static int
nvme_write_cache_set(nvme_t * nvme,boolean_t enable)3004 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
3005 {
3006 	nvme_write_cache_t nwc = { 0 };
3007 
3008 	if (enable)
3009 		nwc.b.wc_wce = 1;
3010 
3011 	/*
3012 	 * We've seen some cases where this fails due to us being told we've
3013 	 * specified an invalid namespace when operating against the Xen xcp-ng
3014 	 * qemu NVMe virtual device. As such, we generally ensure that trying to
3015 	 * enable this doesn't lead us to panic. It's not completely clear why
3016 	 * specifying namespace zero here fails, but not when we're setting the
3017 	 * number of queues below.
3018 	 */
3019 	return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE,
3020 	    nwc.r, &nwc.r));
3021 }
3022 
3023 static int
nvme_set_nqueues(nvme_t * nvme)3024 nvme_set_nqueues(nvme_t *nvme)
3025 {
3026 	nvme_nqueues_t nq = { 0 };
3027 	int ret;
3028 
3029 	/*
3030 	 * The default is to allocate one completion queue per vector.
3031 	 */
3032 	if (nvme->n_completion_queues == -1)
3033 		nvme->n_completion_queues = nvme->n_intr_cnt;
3034 
3035 	/*
3036 	 * There is no point in having more completion queues than
3037 	 * interrupt vectors.
3038 	 */
3039 	nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3040 	    nvme->n_intr_cnt);
3041 
3042 	/*
3043 	 * The default is to use one submission queue per completion queue.
3044 	 */
3045 	if (nvme->n_submission_queues == -1)
3046 		nvme->n_submission_queues = nvme->n_completion_queues;
3047 
3048 	/*
3049 	 * There is no point in having more completion queues than
3050 	 * submission queues.
3051 	 */
3052 	nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3053 	    nvme->n_submission_queues);
3054 
3055 	ASSERT(nvme->n_submission_queues > 0);
3056 	ASSERT(nvme->n_completion_queues > 0);
3057 
3058 	nq.b.nq_nsq = nvme->n_submission_queues - 1;
3059 	nq.b.nq_ncq = nvme->n_completion_queues - 1;
3060 
3061 	ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
3062 	    &nq.r);
3063 
3064 	if (ret == 0) {
3065 		/*
3066 		 * Never use more than the requested number of queues.
3067 		 */
3068 		nvme->n_submission_queues = MIN(nvme->n_submission_queues,
3069 		    nq.b.nq_nsq + 1);
3070 		nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3071 		    nq.b.nq_ncq + 1);
3072 	}
3073 
3074 	return (ret);
3075 }
3076 
3077 static int
nvme_create_completion_queue(nvme_t * nvme,nvme_cq_t * cq)3078 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
3079 {
3080 	nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
3081 	nvme_create_queue_dw10_t dw10 = { 0 };
3082 	nvme_create_cq_dw11_t c_dw11 = { 0 };
3083 	int ret;
3084 
3085 	dw10.b.q_qid = cq->ncq_id;
3086 	dw10.b.q_qsize = cq->ncq_nentry - 1;
3087 
3088 	c_dw11.b.cq_pc = 1;
3089 	c_dw11.b.cq_ien = 1;
3090 	c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
3091 
3092 	cmd->nc_sqid = 0;
3093 	cmd->nc_callback = nvme_wakeup_cmd;
3094 	cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
3095 	cmd->nc_sqe.sqe_cdw10 = dw10.r;
3096 	cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
3097 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
3098 
3099 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3100 
3101 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3102 		dev_err(nvme->n_dip, CE_WARN,
3103 		    "!CREATE CQUEUE failed with sct = %x, sc = %x",
3104 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3105 	}
3106 
3107 	nvme_free_cmd(cmd);
3108 
3109 	return (ret);
3110 }
3111 
3112 static int
nvme_create_io_qpair(nvme_t * nvme,nvme_qpair_t * qp,uint16_t idx)3113 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
3114 {
3115 	nvme_cq_t *cq = qp->nq_cq;
3116 	nvme_cmd_t *cmd;
3117 	nvme_create_queue_dw10_t dw10 = { 0 };
3118 	nvme_create_sq_dw11_t s_dw11 = { 0 };
3119 	int ret;
3120 
3121 	/*
3122 	 * It is possible to have more qpairs than completion queues,
3123 	 * and when the idx > ncq_id, that completion queue is shared
3124 	 * and has already been created.
3125 	 */
3126 	if (idx <= cq->ncq_id &&
3127 	    nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
3128 		return (DDI_FAILURE);
3129 
3130 	dw10.b.q_qid = idx;
3131 	dw10.b.q_qsize = qp->nq_nentry - 1;
3132 
3133 	s_dw11.b.sq_pc = 1;
3134 	s_dw11.b.sq_cqid = cq->ncq_id;
3135 
3136 	cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
3137 	cmd->nc_sqid = 0;
3138 	cmd->nc_callback = nvme_wakeup_cmd;
3139 	cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
3140 	cmd->nc_sqe.sqe_cdw10 = dw10.r;
3141 	cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
3142 	cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
3143 
3144 	nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3145 
3146 	if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3147 		dev_err(nvme->n_dip, CE_WARN,
3148 		    "!CREATE SQUEUE failed with sct = %x, sc = %x",
3149 		    cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3150 	}
3151 
3152 	nvme_free_cmd(cmd);
3153 
3154 	return (ret);
3155 }
3156 
3157 static boolean_t
nvme_reset(nvme_t * nvme,boolean_t quiesce)3158 nvme_reset(nvme_t *nvme, boolean_t quiesce)
3159 {
3160 	nvme_reg_csts_t csts;
3161 	int i;
3162 
3163 	nvme_put32(nvme, NVME_REG_CC, 0);
3164 
3165 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3166 	if (csts.b.csts_rdy == 1) {
3167 		nvme_put32(nvme, NVME_REG_CC, 0);
3168 
3169 		/*
3170 		 * The timeout value is from the Controller Capabilities
3171 		 * register (CAP.TO, section 3.1.1). This is the worst case
3172 		 * time to wait for CSTS.RDY to transition from 1 to 0 after
3173 		 * CC.EN transitions from 1 to 0.
3174 		 *
3175 		 * The timeout units are in 500 ms units, and we are delaying
3176 		 * in 50ms chunks, hence counting to n_timeout * 10.
3177 		 */
3178 		for (i = 0; i < nvme->n_timeout * 10; i++) {
3179 			csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3180 			if (csts.b.csts_rdy == 0)
3181 				break;
3182 
3183 			/*
3184 			 * Quiescing drivers should not use locks or timeouts,
3185 			 * so if this is the quiesce path, use a quiesce-safe
3186 			 * delay.
3187 			 */
3188 			if (quiesce) {
3189 				drv_usecwait(50000);
3190 			} else {
3191 				delay(drv_usectohz(50000));
3192 			}
3193 		}
3194 	}
3195 
3196 	nvme_put32(nvme, NVME_REG_AQA, 0);
3197 	nvme_put32(nvme, NVME_REG_ASQ, 0);
3198 	nvme_put32(nvme, NVME_REG_ACQ, 0);
3199 
3200 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3201 	return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
3202 }
3203 
3204 static void
nvme_shutdown(nvme_t * nvme,boolean_t quiesce)3205 nvme_shutdown(nvme_t *nvme, boolean_t quiesce)
3206 {
3207 	nvme_reg_cc_t cc;
3208 	nvme_reg_csts_t csts;
3209 	int i;
3210 
3211 	cc.r = nvme_get32(nvme, NVME_REG_CC);
3212 	cc.b.cc_shn = NVME_CC_SHN_NORMAL;
3213 	nvme_put32(nvme, NVME_REG_CC, cc.r);
3214 
3215 	for (i = 0; i < 10; i++) {
3216 		csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3217 		if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
3218 			break;
3219 
3220 		if (quiesce) {
3221 			drv_usecwait(100000);
3222 		} else {
3223 			delay(drv_usectohz(100000));
3224 		}
3225 	}
3226 }
3227 
3228 /*
3229  * Return length of string without trailing spaces.
3230  */
3231 static int
nvme_strlen(const char * str,int len)3232 nvme_strlen(const char *str, int len)
3233 {
3234 	if (len <= 0)
3235 		return (0);
3236 
3237 	while (str[--len] == ' ')
3238 		;
3239 
3240 	return (++len);
3241 }
3242 
3243 static void
nvme_config_min_block_size(nvme_t * nvme,char * model,char * val)3244 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val)
3245 {
3246 	ulong_t bsize = 0;
3247 	char *msg = "";
3248 
3249 	if (ddi_strtoul(val, NULL, 0, &bsize) != 0)
3250 		goto err;
3251 
3252 	if (!ISP2(bsize)) {
3253 		msg = ": not a power of 2";
3254 		goto err;
3255 	}
3256 
3257 	if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) {
3258 		msg = ": too low";
3259 		goto err;
3260 	}
3261 
3262 	nvme->n_min_block_size = bsize;
3263 	return;
3264 
3265 err:
3266 	dev_err(nvme->n_dip, CE_WARN,
3267 	    "!nvme-config-list: ignoring invalid min-phys-block-size '%s' "
3268 	    "for model '%s'%s", val, model, msg);
3269 
3270 	nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
3271 }
3272 
3273 static void
nvme_config_boolean(nvme_t * nvme,char * model,char * name,char * val,boolean_t * b)3274 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val,
3275     boolean_t *b)
3276 {
3277 	if (strcmp(val, "on") == 0 ||
3278 	    strcmp(val, "true") == 0)
3279 		*b = B_TRUE;
3280 	else if (strcmp(val, "off") == 0 ||
3281 	    strcmp(val, "false") == 0)
3282 		*b = B_FALSE;
3283 	else
3284 		dev_err(nvme->n_dip, CE_WARN,
3285 		    "!nvme-config-list: invalid value for %s '%s'"
3286 		    " for model '%s', ignoring", name, val, model);
3287 }
3288 
3289 static void
nvme_config_list(nvme_t * nvme)3290 nvme_config_list(nvme_t *nvme)
3291 {
3292 	char	**config_list;
3293 	uint_t	nelem;
3294 	int	rv, i;
3295 
3296 	/*
3297 	 * We're following the pattern of 'sd-config-list' here, but extend it.
3298 	 * Instead of two we have three separate strings for "model", "fwrev",
3299 	 * and "name-value-list".
3300 	 */
3301 	rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip,
3302 	    DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem);
3303 
3304 	if (rv != DDI_PROP_SUCCESS) {
3305 		if (rv == DDI_PROP_CANNOT_DECODE) {
3306 			dev_err(nvme->n_dip, CE_WARN,
3307 			    "!nvme-config-list: cannot be decoded");
3308 		}
3309 
3310 		return;
3311 	}
3312 
3313 	if ((nelem % 3) != 0) {
3314 		dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be "
3315 		    "triplets of <model>/<fwrev>/<name-value-list> strings ");
3316 		goto out;
3317 	}
3318 
3319 	for (i = 0; i < nelem; i += 3) {
3320 		char	*model = config_list[i];
3321 		char	*fwrev = config_list[i + 1];
3322 		char	*nvp, *save_nv;
3323 		int	id_model_len, id_fwrev_len;
3324 
3325 		id_model_len = nvme_strlen(nvme->n_idctl->id_model,
3326 		    sizeof (nvme->n_idctl->id_model));
3327 
3328 		if (strlen(model) != id_model_len)
3329 			continue;
3330 
3331 		if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0)
3332 			continue;
3333 
3334 		id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev,
3335 		    sizeof (nvme->n_idctl->id_fwrev));
3336 
3337 		if (strlen(fwrev) != 0) {
3338 			boolean_t match = B_FALSE;
3339 			char *fwr, *last_fw;
3340 
3341 			for (fwr = strtok_r(fwrev, ",", &last_fw);
3342 			    fwr != NULL;
3343 			    fwr = strtok_r(NULL, ",", &last_fw)) {
3344 				if (strlen(fwr) != id_fwrev_len)
3345 					continue;
3346 
3347 				if (strncmp(fwr, nvme->n_idctl->id_fwrev,
3348 				    id_fwrev_len) == 0)
3349 					match = B_TRUE;
3350 			}
3351 
3352 			if (!match)
3353 				continue;
3354 		}
3355 
3356 		/*
3357 		 * We should now have a comma-separated list of name:value
3358 		 * pairs.
3359 		 */
3360 		for (nvp = strtok_r(config_list[i + 2], ",", &save_nv);
3361 		    nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) {
3362 			char	*name = nvp;
3363 			char	*val = strchr(nvp, ':');
3364 
3365 			if (val == NULL || name == val) {
3366 				dev_err(nvme->n_dip, CE_WARN,
3367 				    "!nvme-config-list: <name-value-list> "
3368 				    "for model '%s' is malformed", model);
3369 				goto out;
3370 			}
3371 
3372 			/*
3373 			 * Null-terminate 'name', move 'val' past ':' sep.
3374 			 */
3375 			*val++ = '\0';
3376 
3377 			/*
3378 			 * Process the name:val pairs that we know about.
3379 			 */
3380 			if (strcmp(name, "ignore-unknown-vendor-status") == 0) {
3381 				nvme_config_boolean(nvme, model, name, val,
3382 				    &nvme->n_ignore_unknown_vendor_status);
3383 			} else if (strcmp(name, "min-phys-block-size") == 0) {
3384 				nvme_config_min_block_size(nvme, model, val);
3385 			} else if (strcmp(name, "volatile-write-cache") == 0) {
3386 				nvme_config_boolean(nvme, model, name, val,
3387 				    &nvme->n_write_cache_enabled);
3388 			} else {
3389 				/*
3390 				 * Unknown 'name'.
3391 				 */
3392 				dev_err(nvme->n_dip, CE_WARN,
3393 				    "!nvme-config-list: unknown config '%s' "
3394 				    "for model '%s', ignoring", name, model);
3395 			}
3396 		}
3397 	}
3398 
3399 out:
3400 	ddi_prop_free(config_list);
3401 }
3402 
3403 static void
nvme_prepare_devid(nvme_t * nvme,uint32_t nsid)3404 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
3405 {
3406 	/*
3407 	 * Section 7.7 of the spec describes how to get a unique ID for
3408 	 * the controller: the vendor ID, the model name and the serial
3409 	 * number shall be unique when combined.
3410 	 *
3411 	 * If a namespace has no EUI64 we use the above and add the hex
3412 	 * namespace ID to get a unique ID for the namespace.
3413 	 */
3414 	char model[sizeof (nvme->n_idctl->id_model) + 1];
3415 	char serial[sizeof (nvme->n_idctl->id_serial) + 1];
3416 
3417 	bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
3418 	bcopy(nvme->n_idctl->id_serial, serial,
3419 	    sizeof (nvme->n_idctl->id_serial));
3420 
3421 	model[sizeof (nvme->n_idctl->id_model)] = '\0';
3422 	serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
3423 
3424 	nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X",
3425 	    nvme->n_idctl->id_vid, model, serial, nsid);
3426 }
3427 
3428 static nvme_identify_nsid_list_t *
nvme_update_nsid_list(nvme_t * nvme,int cns)3429 nvme_update_nsid_list(nvme_t *nvme, int cns)
3430 {
3431 	nvme_identify_nsid_list_t *nslist;
3432 
3433 	/*
3434 	 * We currently don't handle cases where there are more than
3435 	 * 1024 active namespaces, requiring several IDENTIFY commands.
3436 	 */
3437 	if (nvme_identify_int(nvme, 0, cns, (void **)&nslist))
3438 		return (nslist);
3439 
3440 	return (NULL);
3441 }
3442 
3443 nvme_namespace_t *
nvme_nsid2ns(nvme_t * nvme,uint32_t nsid)3444 nvme_nsid2ns(nvme_t *nvme, uint32_t nsid)
3445 {
3446 	ASSERT3U(nsid, !=, 0);
3447 	ASSERT3U(nsid, <=, nvme->n_namespace_count);
3448 	return (&nvme->n_ns[nsid - 1]);
3449 }
3450 
3451 static boolean_t
nvme_allocated_ns(nvme_namespace_t * ns)3452 nvme_allocated_ns(nvme_namespace_t *ns)
3453 {
3454 	nvme_t *nvme = ns->ns_nvme;
3455 	uint32_t i;
3456 
3457 	ASSERT(nvme_mgmt_lock_held(nvme));
3458 
3459 	/*
3460 	 * If supported, update the list of allocated namespace IDs.
3461 	 */
3462 	if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) &&
3463 	    nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
3464 		nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3465 		    NVME_IDENTIFY_NSID_ALLOC_LIST);
3466 		boolean_t found = B_FALSE;
3467 
3468 		/*
3469 		 * When namespace management is supported, this really shouldn't
3470 		 * be NULL. Treat all namespaces as allocated if it is.
3471 		 */
3472 		if (nslist == NULL)
3473 			return (B_TRUE);
3474 
3475 		for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3476 			if (ns->ns_id == 0)
3477 				break;
3478 
3479 			if (ns->ns_id == nslist->nl_nsid[i])
3480 				found = B_TRUE;
3481 		}
3482 
3483 		kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3484 		return (found);
3485 	} else {
3486 		/*
3487 		 * If namespace management isn't supported, report all
3488 		 * namespaces as allocated.
3489 		 */
3490 		return (B_TRUE);
3491 	}
3492 }
3493 
3494 static boolean_t
nvme_active_ns(nvme_namespace_t * ns)3495 nvme_active_ns(nvme_namespace_t *ns)
3496 {
3497 	nvme_t *nvme = ns->ns_nvme;
3498 	uint64_t *ptr;
3499 	uint32_t i;
3500 
3501 	ASSERT(nvme_mgmt_lock_held(nvme));
3502 
3503 	/*
3504 	 * If supported, update the list of active namespace IDs.
3505 	 */
3506 	if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) {
3507 		nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3508 		    NVME_IDENTIFY_NSID_LIST);
3509 		boolean_t found = B_FALSE;
3510 
3511 		/*
3512 		 * When namespace management is supported, this really shouldn't
3513 		 * be NULL. Treat all namespaces as allocated if it is.
3514 		 */
3515 		if (nslist == NULL)
3516 			return (B_TRUE);
3517 
3518 		for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3519 			if (ns->ns_id == 0)
3520 				break;
3521 
3522 			if (ns->ns_id == nslist->nl_nsid[i])
3523 				found = B_TRUE;
3524 		}
3525 
3526 		kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3527 		return (found);
3528 	}
3529 
3530 	/*
3531 	 * Workaround for revision 1.0:
3532 	 * Check whether the IDENTIFY NAMESPACE data is zero-filled.
3533 	 */
3534 	for (ptr = (uint64_t *)ns->ns_idns;
3535 	    ptr != (uint64_t *)(ns->ns_idns + 1);
3536 	    ptr++) {
3537 		if (*ptr != 0) {
3538 			return (B_TRUE);
3539 		}
3540 	}
3541 
3542 	return (B_FALSE);
3543 }
3544 
3545 static int
nvme_init_ns(nvme_t * nvme,uint32_t nsid)3546 nvme_init_ns(nvme_t *nvme, uint32_t nsid)
3547 {
3548 	nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
3549 	nvme_identify_nsid_t *idns;
3550 	boolean_t was_ignored;
3551 	int last_rp;
3552 
3553 	ns->ns_nvme = nvme;
3554 
3555 	ASSERT(nvme_mgmt_lock_held(nvme));
3556 
3557 	/*
3558 	 * Because we might rescan a namespace and this will fail after boot
3559 	 * that'd leave us in a bad spot. We need to do something about this
3560 	 * longer term, but it's not clear how exactly we would recover right
3561 	 * now.
3562 	 */
3563 	if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
3564 	    (void **)&idns)) {
3565 		dev_err(nvme->n_dip, CE_WARN,
3566 		    "!failed to identify namespace %d", nsid);
3567 		return (DDI_FAILURE);
3568 	}
3569 
3570 	if (ns->ns_idns != NULL)
3571 		kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t));
3572 
3573 	ns->ns_idns = idns;
3574 	ns->ns_id = nsid;
3575 
3576 	was_ignored = ns->ns_ignore;
3577 
3578 	ns->ns_allocated = nvme_allocated_ns(ns);
3579 	ns->ns_active = nvme_active_ns(ns);
3580 
3581 	ns->ns_block_count = idns->id_nsize;
3582 	ns->ns_block_size =
3583 	    1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
3584 	ns->ns_best_block_size = ns->ns_block_size;
3585 
3586 	/*
3587 	 * Get the EUI64 if present.
3588 	 */
3589 	if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
3590 		bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
3591 
3592 	/*
3593 	 * Get the NGUID if present.
3594 	 */
3595 	if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2))
3596 		bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid));
3597 
3598 	/*LINTED: E_BAD_PTR_CAST_ALIGN*/
3599 	if (*(uint64_t *)ns->ns_eui64 == 0)
3600 		nvme_prepare_devid(nvme, ns->ns_id);
3601 
3602 	(void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id);
3603 
3604 	/*
3605 	 * Find the LBA format with no metadata and the best relative
3606 	 * performance. A value of 3 means "degraded", 0 is best.
3607 	 */
3608 	last_rp = 3;
3609 	for (int j = 0; j <= idns->id_nlbaf; j++) {
3610 		if (idns->id_lbaf[j].lbaf_lbads == 0)
3611 			break;
3612 		if (idns->id_lbaf[j].lbaf_ms != 0)
3613 			continue;
3614 		if (idns->id_lbaf[j].lbaf_rp >= last_rp)
3615 			continue;
3616 		last_rp = idns->id_lbaf[j].lbaf_rp;
3617 		ns->ns_best_block_size =
3618 		    1 << idns->id_lbaf[j].lbaf_lbads;
3619 	}
3620 
3621 	if (ns->ns_best_block_size < nvme->n_min_block_size)
3622 		ns->ns_best_block_size = nvme->n_min_block_size;
3623 
3624 	was_ignored = ns->ns_ignore;
3625 
3626 	/*
3627 	 * We currently don't support namespaces that are inactive, or use
3628 	 * either:
3629 	 * - protection information
3630 	 * - illegal block size (< 512)
3631 	 */
3632 	if (!ns->ns_active) {
3633 		ns->ns_ignore = B_TRUE;
3634 	} else if (idns->id_dps.dp_pinfo) {
3635 		dev_err(nvme->n_dip, CE_WARN,
3636 		    "!ignoring namespace %d, unsupported feature: "
3637 		    "pinfo = %d", nsid, idns->id_dps.dp_pinfo);
3638 		ns->ns_ignore = B_TRUE;
3639 	} else if (ns->ns_block_size < 512) {
3640 		dev_err(nvme->n_dip, CE_WARN,
3641 		    "!ignoring namespace %d, unsupported block size %"PRIu64,
3642 		    nsid, (uint64_t)ns->ns_block_size);
3643 		ns->ns_ignore = B_TRUE;
3644 	} else {
3645 		ns->ns_ignore = B_FALSE;
3646 	}
3647 
3648 	/*
3649 	 * Keep a count of namespaces which are attachable.
3650 	 * See comments in nvme_bd_driveinfo() to understand its effect.
3651 	 */
3652 	if (was_ignored) {
3653 		/*
3654 		 * Previously ignored, but now not. Count it.
3655 		 */
3656 		if (!ns->ns_ignore)
3657 			nvme->n_namespaces_attachable++;
3658 	} else {
3659 		/*
3660 		 * Wasn't ignored previously, but now needs to be.
3661 		 * Discount it.
3662 		 */
3663 		if (ns->ns_ignore)
3664 			nvme->n_namespaces_attachable--;
3665 	}
3666 
3667 	return (DDI_SUCCESS);
3668 }
3669 
3670 static boolean_t
nvme_attach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)3671 nvme_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
3672 {
3673 	nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
3674 	int ret;
3675 
3676 	ASSERT(nvme_mgmt_lock_held(nvme));
3677 
3678 	if (ns->ns_ignore) {
3679 		return (nvme_ioctl_error(com, NVME_IOCTL_E_UNSUP_ATTACH_NS,
3680 		    0, 0));
3681 	}
3682 
3683 	if (ns->ns_bd_hdl == NULL) {
3684 		bd_ops_t ops = nvme_bd_ops;
3685 
3686 		if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
3687 			ops.o_free_space = NULL;
3688 
3689 		ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr,
3690 		    KM_SLEEP);
3691 
3692 		if (ns->ns_bd_hdl == NULL) {
3693 			dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev "
3694 			    "handle for namespace id %u", com->nioc_nsid);
3695 			return (nvme_ioctl_error(com,
3696 			    NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0));
3697 		}
3698 	}
3699 
3700 	nvme_mgmt_bd_start(nvme);
3701 	ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl);
3702 	nvme_mgmt_bd_end(nvme);
3703 	if (ret != DDI_SUCCESS) {
3704 		return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH,
3705 		    0, 0));
3706 	}
3707 
3708 	ns->ns_attached = B_TRUE;
3709 
3710 	return (B_TRUE);
3711 }
3712 
3713 static boolean_t
nvme_detach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)3714 nvme_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
3715 {
3716 	nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
3717 	int ret;
3718 
3719 	ASSERT(nvme_mgmt_lock_held(nvme));
3720 
3721 	if (ns->ns_ignore || !ns->ns_attached)
3722 		return (B_TRUE);
3723 
3724 	nvme_mgmt_bd_start(nvme);
3725 	ASSERT3P(ns->ns_bd_hdl, !=, NULL);
3726 	ret = bd_detach_handle(ns->ns_bd_hdl);
3727 	nvme_mgmt_bd_end(nvme);
3728 
3729 	if (ret != DDI_SUCCESS) {
3730 		return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0,
3731 		    0));
3732 	}
3733 
3734 	ns->ns_attached = B_FALSE;
3735 	return (B_TRUE);
3736 
3737 }
3738 
3739 /*
3740  * Rescan the namespace information associated with the namespaces indicated by
3741  * ioc. They should not be attached to blkdev right now.
3742  */
3743 static void
nvme_rescan_ns(nvme_t * nvme,uint32_t nsid)3744 nvme_rescan_ns(nvme_t *nvme, uint32_t nsid)
3745 {
3746 	ASSERT(nvme_mgmt_lock_held(nvme));
3747 	ASSERT3U(nsid, !=, 0);
3748 
3749 	if (nsid != NVME_NSID_BCAST) {
3750 		nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
3751 
3752 		ASSERT3U(ns->ns_attached, ==, B_FALSE);
3753 		(void) nvme_init_ns(nvme, nsid);
3754 		return;
3755 	}
3756 
3757 	for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
3758 		nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
3759 
3760 		ASSERT3U(ns->ns_attached, ==, B_FALSE);
3761 		(void) nvme_init_ns(nvme, i);
3762 	}
3763 }
3764 
3765 typedef struct nvme_quirk_table {
3766 	uint16_t nq_vendor_id;
3767 	uint16_t nq_device_id;
3768 	nvme_quirk_t nq_quirks;
3769 } nvme_quirk_table_t;
3770 
3771 static const nvme_quirk_table_t nvme_quirks[] = {
3772 	{ 0x1987, 0x5018, NVME_QUIRK_START_CID },	/* Phison E18 */
3773 };
3774 
3775 static void
nvme_detect_quirks(nvme_t * nvme)3776 nvme_detect_quirks(nvme_t *nvme)
3777 {
3778 	for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) {
3779 		const nvme_quirk_table_t *nqt = &nvme_quirks[i];
3780 
3781 		if (nqt->nq_vendor_id == nvme->n_vendor_id &&
3782 		    nqt->nq_device_id == nvme->n_device_id) {
3783 			nvme->n_quirks = nqt->nq_quirks;
3784 			return;
3785 		}
3786 	}
3787 }
3788 
3789 static int
nvme_init(nvme_t * nvme)3790 nvme_init(nvme_t *nvme)
3791 {
3792 	nvme_reg_cc_t cc = { 0 };
3793 	nvme_reg_aqa_t aqa = { 0 };
3794 	nvme_reg_asq_t asq = { 0 };
3795 	nvme_reg_acq_t acq = { 0 };
3796 	nvme_reg_cap_t cap;
3797 	nvme_reg_vs_t vs;
3798 	nvme_reg_csts_t csts;
3799 	int i = 0;
3800 	uint16_t nqueues;
3801 	uint_t tq_threads;
3802 	char model[sizeof (nvme->n_idctl->id_model) + 1];
3803 	char *vendor, *product;
3804 	uint32_t nsid;
3805 
3806 	/* Check controller version */
3807 	vs.r = nvme_get32(nvme, NVME_REG_VS);
3808 	nvme->n_version.v_major = vs.b.vs_mjr;
3809 	nvme->n_version.v_minor = vs.b.vs_mnr;
3810 	dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d",
3811 	    nvme->n_version.v_major, nvme->n_version.v_minor);
3812 
3813 	if (nvme->n_version.v_major > nvme_version_major) {
3814 		dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
3815 		    nvme_version_major);
3816 		if (nvme->n_strict_version)
3817 			goto fail;
3818 	}
3819 
3820 	/* retrieve controller configuration */
3821 	cap.r = nvme_get64(nvme, NVME_REG_CAP);
3822 
3823 	if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
3824 		dev_err(nvme->n_dip, CE_WARN,
3825 		    "!NVM command set not supported by hardware");
3826 		goto fail;
3827 	}
3828 
3829 	nvme->n_nssr_supported = cap.b.cap_nssrs;
3830 	nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
3831 	nvme->n_timeout = cap.b.cap_to;
3832 	nvme->n_arbitration_mechanisms = cap.b.cap_ams;
3833 	nvme->n_cont_queues_reqd = cap.b.cap_cqr;
3834 	nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
3835 
3836 	/*
3837 	 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
3838 	 * the base page size of 4k (1<<12), so add 12 here to get the real
3839 	 * page size value.
3840 	 */
3841 	nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
3842 	    cap.b.cap_mpsmax + 12);
3843 	nvme->n_pagesize = 1UL << (nvme->n_pageshift);
3844 
3845 	/*
3846 	 * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
3847 	 */
3848 	nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
3849 	nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
3850 
3851 	/*
3852 	 * Set up PRP DMA to transfer 1 page-aligned page at a time.
3853 	 * Maxxfer may be increased after we identified the controller limits.
3854 	 */
3855 	nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
3856 	nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
3857 	nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
3858 	nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
3859 
3860 	/*
3861 	 * Reset controller if it's still in ready state.
3862 	 */
3863 	if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
3864 		dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
3865 		ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
3866 		nvme->n_dead = B_TRUE;
3867 		goto fail;
3868 	}
3869 
3870 	/*
3871 	 * Create the cq array with one completion queue to be assigned
3872 	 * to the admin queue pair and a limited number of taskqs (4).
3873 	 */
3874 	if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
3875 	    DDI_SUCCESS) {
3876 		dev_err(nvme->n_dip, CE_WARN,
3877 		    "!failed to pre-allocate admin completion queue");
3878 		goto fail;
3879 	}
3880 	/*
3881 	 * Create the admin queue pair.
3882 	 */
3883 	if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
3884 	    != DDI_SUCCESS) {
3885 		dev_err(nvme->n_dip, CE_WARN,
3886 		    "!unable to allocate admin qpair");
3887 		goto fail;
3888 	}
3889 	nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
3890 	nvme->n_ioq[0] = nvme->n_adminq;
3891 
3892 	if (nvme->n_quirks & NVME_QUIRK_START_CID)
3893 		nvme->n_adminq->nq_next_cmd++;
3894 
3895 	nvme->n_progress |= NVME_ADMIN_QUEUE;
3896 
3897 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
3898 	    "admin-queue-len", nvme->n_admin_queue_len);
3899 
3900 	aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
3901 	asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
3902 	acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
3903 
3904 	ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
3905 	ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
3906 
3907 	nvme_put32(nvme, NVME_REG_AQA, aqa.r);
3908 	nvme_put64(nvme, NVME_REG_ASQ, asq);
3909 	nvme_put64(nvme, NVME_REG_ACQ, acq);
3910 
3911 	cc.b.cc_ams = 0;	/* use Round-Robin arbitration */
3912 	cc.b.cc_css = 0;	/* use NVM command set */
3913 	cc.b.cc_mps = nvme->n_pageshift - 12;
3914 	cc.b.cc_shn = 0;	/* no shutdown in progress */
3915 	cc.b.cc_en = 1;		/* enable controller */
3916 	cc.b.cc_iosqes = 6;	/* submission queue entry is 2^6 bytes long */
3917 	cc.b.cc_iocqes = 4;	/* completion queue entry is 2^4 bytes long */
3918 
3919 	nvme_put32(nvme, NVME_REG_CC, cc.r);
3920 
3921 	/*
3922 	 * Wait for the controller to become ready.
3923 	 */
3924 	csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3925 	if (csts.b.csts_rdy == 0) {
3926 		for (i = 0; i != nvme->n_timeout * 10; i++) {
3927 			delay(drv_usectohz(50000));
3928 			csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3929 
3930 			if (csts.b.csts_cfs == 1) {
3931 				dev_err(nvme->n_dip, CE_WARN,
3932 				    "!controller fatal status at init");
3933 				ddi_fm_service_impact(nvme->n_dip,
3934 				    DDI_SERVICE_LOST);
3935 				nvme->n_dead = B_TRUE;
3936 				goto fail;
3937 			}
3938 
3939 			if (csts.b.csts_rdy == 1)
3940 				break;
3941 		}
3942 	}
3943 
3944 	if (csts.b.csts_rdy == 0) {
3945 		dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
3946 		ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
3947 		nvme->n_dead = B_TRUE;
3948 		goto fail;
3949 	}
3950 
3951 	/*
3952 	 * Assume an abort command limit of 1. We'll destroy and re-init
3953 	 * that later when we know the true abort command limit.
3954 	 */
3955 	sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
3956 
3957 	/*
3958 	 * Set up initial interrupt for admin queue.
3959 	 */
3960 	if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
3961 	    != DDI_SUCCESS) &&
3962 	    (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
3963 	    != DDI_SUCCESS) &&
3964 	    (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
3965 	    != DDI_SUCCESS)) {
3966 		dev_err(nvme->n_dip, CE_WARN,
3967 		    "!failed to set up initial interrupt");
3968 		goto fail;
3969 	}
3970 
3971 	/*
3972 	 * Post an asynchronous event command to catch errors.
3973 	 * We assume the asynchronous events are supported as required by
3974 	 * specification (Figure 40 in section 5 of NVMe 1.2).
3975 	 * However, since at least qemu does not follow the specification,
3976 	 * we need a mechanism to protect ourselves.
3977 	 */
3978 	nvme->n_async_event_supported = B_TRUE;
3979 	nvme_async_event(nvme);
3980 
3981 	/*
3982 	 * Identify Controller
3983 	 */
3984 	if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL,
3985 	    (void **)&nvme->n_idctl)) {
3986 		dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller");
3987 		goto fail;
3988 	}
3989 
3990 	/*
3991 	 * Get the common namespace information if available. If not, we use the
3992 	 * information for nsid 1.
3993 	 */
3994 	if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) &&
3995 	    nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
3996 		nsid = NVME_NSID_BCAST;
3997 	} else {
3998 		nsid = 1;
3999 	}
4000 
4001 	if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
4002 	    (void **)&nvme->n_idcomns)) {
4003 		dev_err(nvme->n_dip, CE_WARN, "!failed to identify common "
4004 		    "namespace information");
4005 		goto fail;
4006 	}
4007 	/*
4008 	 * Process nvme-config-list (if present) in nvme.conf.
4009 	 */
4010 	nvme_config_list(nvme);
4011 
4012 	/*
4013 	 * Get Vendor & Product ID
4014 	 */
4015 	bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
4016 	model[sizeof (nvme->n_idctl->id_model)] = '\0';
4017 	sata_split_model(model, &vendor, &product);
4018 
4019 	if (vendor == NULL)
4020 		nvme->n_vendor = strdup("NVMe");
4021 	else
4022 		nvme->n_vendor = strdup(vendor);
4023 
4024 	nvme->n_product = strdup(product);
4025 
4026 	/*
4027 	 * Get controller limits.
4028 	 */
4029 	nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
4030 	    MIN(nvme->n_admin_queue_len / 10,
4031 	    MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
4032 
4033 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4034 	    "async-event-limit", nvme->n_async_event_limit);
4035 
4036 	nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
4037 
4038 	/*
4039 	 * Reinitialize the semaphore with the true abort command limit
4040 	 * supported by the hardware. It's not necessary to disable interrupts
4041 	 * as only command aborts use the semaphore, and no commands are
4042 	 * executed or aborted while we're here.
4043 	 */
4044 	sema_destroy(&nvme->n_abort_sema);
4045 	sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
4046 	    SEMA_DRIVER, NULL);
4047 
4048 	nvme->n_progress |= NVME_CTRL_LIMITS;
4049 
4050 	if (nvme->n_idctl->id_mdts == 0)
4051 		nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
4052 	else
4053 		nvme->n_max_data_transfer_size =
4054 		    1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
4055 
4056 	nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
4057 
4058 	/*
4059 	 * Limit n_max_data_transfer_size to what we can handle in one PRP.
4060 	 * Chained PRPs are currently unsupported.
4061 	 *
4062 	 * This is a no-op on hardware which doesn't support a transfer size
4063 	 * big enough to require chained PRPs.
4064 	 */
4065 	nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
4066 	    (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
4067 
4068 	nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
4069 
4070 	/*
4071 	 * Make sure the minimum/maximum queue entry sizes are not
4072 	 * larger/smaller than the default.
4073 	 */
4074 
4075 	if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
4076 	    ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
4077 	    ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
4078 	    ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
4079 		goto fail;
4080 
4081 	/*
4082 	 * Check for the presence of a Volatile Write Cache. If present,
4083 	 * enable or disable based on the value of the property
4084 	 * volatile-write-cache-enable (default is enabled).
4085 	 */
4086 	nvme->n_write_cache_present =
4087 	    nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
4088 
4089 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4090 	    "volatile-write-cache-present",
4091 	    nvme->n_write_cache_present ? 1 : 0);
4092 
4093 	if (!nvme->n_write_cache_present) {
4094 		nvme->n_write_cache_enabled = B_FALSE;
4095 	} else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
4096 	    != 0) {
4097 		dev_err(nvme->n_dip, CE_WARN,
4098 		    "!failed to %sable volatile write cache",
4099 		    nvme->n_write_cache_enabled ? "en" : "dis");
4100 		/*
4101 		 * Assume the cache is (still) enabled.
4102 		 */
4103 		nvme->n_write_cache_enabled = B_TRUE;
4104 	}
4105 
4106 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4107 	    "volatile-write-cache-enable",
4108 	    nvme->n_write_cache_enabled ? 1 : 0);
4109 
4110 	/*
4111 	 * Get number of supported namespaces and allocate namespace array.
4112 	 */
4113 	nvme->n_namespace_count = nvme->n_idctl->id_nn;
4114 
4115 	if (nvme->n_namespace_count == 0) {
4116 		dev_err(nvme->n_dip, CE_WARN,
4117 		    "!controllers without namespaces are not supported");
4118 		goto fail;
4119 	}
4120 
4121 	if (nvme->n_namespace_count > NVME_MINOR_MAX) {
4122 		dev_err(nvme->n_dip, CE_WARN,
4123 		    "!too many namespaces: %d, limiting to %d\n",
4124 		    nvme->n_namespace_count, NVME_MINOR_MAX);
4125 		nvme->n_namespace_count = NVME_MINOR_MAX;
4126 	}
4127 
4128 	nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
4129 	    nvme->n_namespace_count, KM_SLEEP);
4130 
4131 	/*
4132 	 * Try to set up MSI/MSI-X interrupts.
4133 	 */
4134 	if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
4135 	    != 0) {
4136 		nvme_release_interrupts(nvme);
4137 
4138 		nqueues = MIN(UINT16_MAX, ncpus);
4139 
4140 		if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
4141 		    nqueues) != DDI_SUCCESS) &&
4142 		    (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
4143 		    nqueues) != DDI_SUCCESS)) {
4144 			dev_err(nvme->n_dip, CE_WARN,
4145 			    "!failed to set up MSI/MSI-X interrupts");
4146 			goto fail;
4147 		}
4148 	}
4149 
4150 	/*
4151 	 * Create I/O queue pairs.
4152 	 */
4153 
4154 	if (nvme_set_nqueues(nvme) != 0) {
4155 		dev_err(nvme->n_dip, CE_WARN,
4156 		    "!failed to set number of I/O queues to %d",
4157 		    nvme->n_intr_cnt);
4158 		goto fail;
4159 	}
4160 
4161 	/*
4162 	 * Reallocate I/O queue array
4163 	 */
4164 	kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
4165 	nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
4166 	    (nvme->n_submission_queues + 1), KM_SLEEP);
4167 	nvme->n_ioq[0] = nvme->n_adminq;
4168 
4169 	/*
4170 	 * There should always be at least as many submission queues
4171 	 * as completion queues.
4172 	 */
4173 	ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
4174 
4175 	nvme->n_ioq_count = nvme->n_submission_queues;
4176 
4177 	nvme->n_io_squeue_len =
4178 	    MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
4179 
4180 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
4181 	    nvme->n_io_squeue_len);
4182 
4183 	/*
4184 	 * Pre-allocate completion queues.
4185 	 * When there are the same number of submission and completion
4186 	 * queues there is no value in having a larger completion
4187 	 * queue length.
4188 	 */
4189 	if (nvme->n_submission_queues == nvme->n_completion_queues)
4190 		nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4191 		    nvme->n_io_squeue_len);
4192 
4193 	nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4194 	    nvme->n_max_queue_entries);
4195 
4196 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
4197 	    nvme->n_io_cqueue_len);
4198 
4199 	/*
4200 	 * Assign the equal quantity of taskq threads to each completion
4201 	 * queue, capping the total number of threads to the number
4202 	 * of CPUs.
4203 	 */
4204 	tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues;
4205 
4206 	/*
4207 	 * In case the calculation above is zero, we need at least one
4208 	 * thread per completion queue.
4209 	 */
4210 	tq_threads = MAX(1, tq_threads);
4211 
4212 	if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
4213 	    nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
4214 		dev_err(nvme->n_dip, CE_WARN,
4215 		    "!failed to pre-allocate completion queues");
4216 		goto fail;
4217 	}
4218 
4219 	/*
4220 	 * If we use less completion queues than interrupt vectors return
4221 	 * some of the interrupt vectors back to the system.
4222 	 */
4223 	if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
4224 		nvme_release_interrupts(nvme);
4225 
4226 		if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
4227 		    nvme->n_completion_queues + 1) != DDI_SUCCESS) {
4228 			dev_err(nvme->n_dip, CE_WARN,
4229 			    "!failed to reduce number of interrupts");
4230 			goto fail;
4231 		}
4232 	}
4233 
4234 	/*
4235 	 * Alloc & register I/O queue pairs
4236 	 */
4237 
4238 	for (i = 1; i != nvme->n_ioq_count + 1; i++) {
4239 		if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
4240 		    &nvme->n_ioq[i], i) != DDI_SUCCESS) {
4241 			dev_err(nvme->n_dip, CE_WARN,
4242 			    "!unable to allocate I/O qpair %d", i);
4243 			goto fail;
4244 		}
4245 
4246 		if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
4247 			dev_err(nvme->n_dip, CE_WARN,
4248 			    "!unable to create I/O qpair %d", i);
4249 			goto fail;
4250 		}
4251 	}
4252 
4253 	/*
4254 	 * Post more asynchronous events commands to reduce event reporting
4255 	 * latency as suggested by the spec.
4256 	 */
4257 	if (nvme->n_async_event_supported) {
4258 		for (i = 1; i != nvme->n_async_event_limit; i++)
4259 			nvme_async_event(nvme);
4260 	}
4261 
4262 	return (DDI_SUCCESS);
4263 
4264 fail:
4265 	(void) nvme_reset(nvme, B_FALSE);
4266 	return (DDI_FAILURE);
4267 }
4268 
4269 static uint_t
nvme_intr(caddr_t arg1,caddr_t arg2)4270 nvme_intr(caddr_t arg1, caddr_t arg2)
4271 {
4272 	/*LINTED: E_PTR_BAD_CAST_ALIGN*/
4273 	nvme_t *nvme = (nvme_t *)arg1;
4274 	int inum = (int)(uintptr_t)arg2;
4275 	int ccnt = 0;
4276 	int qnum;
4277 
4278 	if (inum >= nvme->n_intr_cnt)
4279 		return (DDI_INTR_UNCLAIMED);
4280 
4281 	if (nvme->n_dead)
4282 		return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
4283 		    DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
4284 
4285 	/*
4286 	 * The interrupt vector a queue uses is calculated as queue_idx %
4287 	 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
4288 	 * in steps of n_intr_cnt to process all queues using this vector.
4289 	 */
4290 	for (qnum = inum;
4291 	    qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
4292 	    qnum += nvme->n_intr_cnt) {
4293 		ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
4294 	}
4295 
4296 	return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
4297 }
4298 
4299 static void
nvme_release_interrupts(nvme_t * nvme)4300 nvme_release_interrupts(nvme_t *nvme)
4301 {
4302 	int i;
4303 
4304 	for (i = 0; i < nvme->n_intr_cnt; i++) {
4305 		if (nvme->n_inth[i] == NULL)
4306 			break;
4307 
4308 		if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4309 			(void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
4310 		else
4311 			(void) ddi_intr_disable(nvme->n_inth[i]);
4312 
4313 		(void) ddi_intr_remove_handler(nvme->n_inth[i]);
4314 		(void) ddi_intr_free(nvme->n_inth[i]);
4315 	}
4316 
4317 	kmem_free(nvme->n_inth, nvme->n_inth_sz);
4318 	nvme->n_inth = NULL;
4319 	nvme->n_inth_sz = 0;
4320 
4321 	nvme->n_progress &= ~NVME_INTERRUPTS;
4322 }
4323 
4324 static int
nvme_setup_interrupts(nvme_t * nvme,int intr_type,int nqpairs)4325 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
4326 {
4327 	int nintrs, navail, count;
4328 	int ret;
4329 	int i;
4330 
4331 	if (nvme->n_intr_types == 0) {
4332 		ret = ddi_intr_get_supported_types(nvme->n_dip,
4333 		    &nvme->n_intr_types);
4334 		if (ret != DDI_SUCCESS) {
4335 			dev_err(nvme->n_dip, CE_WARN,
4336 			    "!%s: ddi_intr_get_supported types failed",
4337 			    __func__);
4338 			return (ret);
4339 		}
4340 #ifdef __x86
4341 		if (get_hwenv() == HW_VMWARE)
4342 			nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
4343 #endif
4344 	}
4345 
4346 	if ((nvme->n_intr_types & intr_type) == 0)
4347 		return (DDI_FAILURE);
4348 
4349 	ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
4350 	if (ret != DDI_SUCCESS) {
4351 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
4352 		    __func__);
4353 		return (ret);
4354 	}
4355 
4356 	ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
4357 	if (ret != DDI_SUCCESS) {
4358 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
4359 		    __func__);
4360 		return (ret);
4361 	}
4362 
4363 	/* We want at most one interrupt per queue pair. */
4364 	if (navail > nqpairs)
4365 		navail = nqpairs;
4366 
4367 	nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
4368 	nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
4369 
4370 	ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
4371 	    &count, 0);
4372 	if (ret != DDI_SUCCESS) {
4373 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
4374 		    __func__);
4375 		goto fail;
4376 	}
4377 
4378 	nvme->n_intr_cnt = count;
4379 
4380 	ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
4381 	if (ret != DDI_SUCCESS) {
4382 		dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
4383 		    __func__);
4384 		goto fail;
4385 	}
4386 
4387 	for (i = 0; i < count; i++) {
4388 		ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
4389 		    (void *)nvme, (void *)(uintptr_t)i);
4390 		if (ret != DDI_SUCCESS) {
4391 			dev_err(nvme->n_dip, CE_WARN,
4392 			    "!%s: ddi_intr_add_handler failed", __func__);
4393 			goto fail;
4394 		}
4395 	}
4396 
4397 	(void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
4398 
4399 	for (i = 0; i < count; i++) {
4400 		if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4401 			ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
4402 		else
4403 			ret = ddi_intr_enable(nvme->n_inth[i]);
4404 
4405 		if (ret != DDI_SUCCESS) {
4406 			dev_err(nvme->n_dip, CE_WARN,
4407 			    "!%s: enabling interrupt %d failed", __func__, i);
4408 			goto fail;
4409 		}
4410 	}
4411 
4412 	nvme->n_intr_type = intr_type;
4413 
4414 	nvme->n_progress |= NVME_INTERRUPTS;
4415 
4416 	return (DDI_SUCCESS);
4417 
4418 fail:
4419 	nvme_release_interrupts(nvme);
4420 
4421 	return (ret);
4422 }
4423 
4424 static int
nvme_fm_errcb(dev_info_t * dip,ddi_fm_error_t * fm_error,const void * arg)4425 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
4426 {
4427 	_NOTE(ARGUNUSED(arg));
4428 
4429 	pci_ereport_post(dip, fm_error, NULL);
4430 	return (fm_error->fme_status);
4431 }
4432 
4433 static void
nvme_remove_callback(dev_info_t * dip,ddi_eventcookie_t cookie,void * a,void * b)4434 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a,
4435     void *b)
4436 {
4437 	nvme_t *nvme = a;
4438 
4439 	nvme_ctrl_mark_dead(nvme, B_TRUE);
4440 
4441 	/*
4442 	 * Fail all outstanding commands, including those in the admin queue
4443 	 * (queue 0).
4444 	 */
4445 	for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) {
4446 		nvme_qpair_t *qp = nvme->n_ioq[i];
4447 
4448 		mutex_enter(&qp->nq_mutex);
4449 		for (size_t j = 0; j < qp->nq_nentry; j++) {
4450 			nvme_cmd_t *cmd = qp->nq_cmd[j];
4451 			nvme_cmd_t *u_cmd;
4452 
4453 			if (cmd == NULL) {
4454 				continue;
4455 			}
4456 
4457 			/*
4458 			 * Since we have the queue lock held the entire time we
4459 			 * iterate over it, it's not possible for the queue to
4460 			 * change underneath us. Thus, we don't need to check
4461 			 * that the return value of nvme_unqueue_cmd matches the
4462 			 * requested cmd to unqueue.
4463 			 */
4464 			u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
4465 			taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq,
4466 			    cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
4467 
4468 			ASSERT3P(u_cmd, ==, cmd);
4469 		}
4470 		mutex_exit(&qp->nq_mutex);
4471 	}
4472 }
4473 
4474 /*
4475  * Open minor management
4476  */
4477 static int
nvme_minor_comparator(const void * l,const void * r)4478 nvme_minor_comparator(const void *l, const void *r)
4479 {
4480 	const nvme_minor_t *lm = l;
4481 	const nvme_minor_t *rm = r;
4482 
4483 	if (lm->nm_minor > rm->nm_minor) {
4484 		return (1);
4485 	} else if (lm->nm_minor < rm->nm_minor) {
4486 		return (-1);
4487 	} else {
4488 		return (0);
4489 	}
4490 }
4491 
4492 static void
nvme_minor_free(nvme_minor_t * minor)4493 nvme_minor_free(nvme_minor_t *minor)
4494 {
4495 	if (minor->nm_minor > 0) {
4496 		ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN);
4497 		id_free(nvme_open_minors, minor->nm_minor);
4498 		minor->nm_minor = 0;
4499 	}
4500 	VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node));
4501 	VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node));
4502 	cv_destroy(&minor->nm_cv);
4503 	kmem_free(minor, sizeof (nvme_minor_t));
4504 }
4505 
4506 static nvme_minor_t *
nvme_minor_find_by_dev(dev_t dev)4507 nvme_minor_find_by_dev(dev_t dev)
4508 {
4509 	id_t id = (id_t)getminor(dev);
4510 	nvme_minor_t search = { .nm_minor = id };
4511 	nvme_minor_t *ret;
4512 
4513 	mutex_enter(&nvme_open_minors_mutex);
4514 	ret = avl_find(&nvme_open_minors_avl, &search, NULL);
4515 	mutex_exit(&nvme_open_minors_mutex);
4516 
4517 	return (ret);
4518 }
4519 
4520 static int
nvme_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)4521 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
4522 {
4523 	nvme_t *nvme;
4524 	int instance;
4525 	int nregs;
4526 	off_t regsize;
4527 	char name[32];
4528 	boolean_t attached_ns;
4529 
4530 	if (cmd != DDI_ATTACH)
4531 		return (DDI_FAILURE);
4532 
4533 	instance = ddi_get_instance(dip);
4534 
4535 	if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
4536 		return (DDI_FAILURE);
4537 
4538 	nvme = ddi_get_soft_state(nvme_state, instance);
4539 	ddi_set_driver_private(dip, nvme);
4540 	nvme->n_dip = dip;
4541 
4542 	/*
4543 	 * Map PCI config space
4544 	 */
4545 	if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) {
4546 		dev_err(dip, CE_WARN, "!failed to map PCI config space");
4547 		goto fail;
4548 	}
4549 	nvme->n_progress |= NVME_PCI_CONFIG;
4550 
4551 	/*
4552 	 * Get the various PCI IDs from config space
4553 	 */
4554 	nvme->n_vendor_id =
4555 	    pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID);
4556 	nvme->n_device_id =
4557 	    pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID);
4558 	nvme->n_revision_id =
4559 	    pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID);
4560 	nvme->n_subsystem_device_id =
4561 	    pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID);
4562 	nvme->n_subsystem_vendor_id =
4563 	    pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID);
4564 
4565 	nvme_detect_quirks(nvme);
4566 
4567 	/*
4568 	 * Set up event handlers for hot removal. While npe(4D) supports the hot
4569 	 * removal event being injected for devices, the same is not true of all
4570 	 * of our possible parents (i.e. pci(4D) as of this writing). The most
4571 	 * common case this shows up is in some virtualization environments. We
4572 	 * should treat this as non-fatal so that way devices work but leave
4573 	 * this set up in such a way that if a nexus does grow support for this
4574 	 * we're good to go.
4575 	 */
4576 	if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT,
4577 	    &nvme->n_rm_cookie) == DDI_SUCCESS) {
4578 		if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie,
4579 		    nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) !=
4580 		    DDI_SUCCESS) {
4581 			goto fail;
4582 		}
4583 	} else {
4584 		nvme->n_ev_rm_cb_id = NULL;
4585 	}
4586 
4587 	mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL);
4588 	nvme->n_progress |= NVME_MUTEX_INIT;
4589 
4590 	nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4591 	    DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
4592 	nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
4593 	    dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
4594 	    B_TRUE : B_FALSE;
4595 	nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4596 	    DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
4597 	nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4598 	    DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
4599 	/*
4600 	 * Double up the default for completion queues in case of
4601 	 * queue sharing.
4602 	 */
4603 	nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4604 	    DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
4605 	nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4606 	    DDI_PROP_DONTPASS, "async-event-limit",
4607 	    NVME_DEFAULT_ASYNC_EVENT_LIMIT);
4608 	nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4609 	    DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
4610 	    B_TRUE : B_FALSE;
4611 	nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4612 	    DDI_PROP_DONTPASS, "min-phys-block-size",
4613 	    NVME_DEFAULT_MIN_BLOCK_SIZE);
4614 	nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4615 	    DDI_PROP_DONTPASS, "max-submission-queues", -1);
4616 	nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4617 	    DDI_PROP_DONTPASS, "max-completion-queues", -1);
4618 
4619 	if (!ISP2(nvme->n_min_block_size) ||
4620 	    (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
4621 		dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
4622 		    "using default %d", ISP2(nvme->n_min_block_size) ?
4623 		    "too low" : "not a power of 2",
4624 		    NVME_DEFAULT_MIN_BLOCK_SIZE);
4625 		nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
4626 	}
4627 
4628 	if (nvme->n_submission_queues != -1 &&
4629 	    (nvme->n_submission_queues < 1 ||
4630 	    nvme->n_submission_queues > UINT16_MAX)) {
4631 		dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
4632 		    "valid. Must be [1..%d]", nvme->n_submission_queues,
4633 		    UINT16_MAX);
4634 		nvme->n_submission_queues = -1;
4635 	}
4636 
4637 	if (nvme->n_completion_queues != -1 &&
4638 	    (nvme->n_completion_queues < 1 ||
4639 	    nvme->n_completion_queues > UINT16_MAX)) {
4640 		dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
4641 		    "valid. Must be [1..%d]", nvme->n_completion_queues,
4642 		    UINT16_MAX);
4643 		nvme->n_completion_queues = -1;
4644 	}
4645 
4646 	if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
4647 		nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
4648 	else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
4649 		nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
4650 
4651 	if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
4652 		nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
4653 	if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
4654 		nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
4655 
4656 	if (nvme->n_async_event_limit < 1)
4657 		nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
4658 
4659 	nvme->n_reg_acc_attr = nvme_reg_acc_attr;
4660 	nvme->n_queue_dma_attr = nvme_queue_dma_attr;
4661 	nvme->n_prp_dma_attr = nvme_prp_dma_attr;
4662 	nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
4663 
4664 	/*
4665 	 * Set up FMA support.
4666 	 */
4667 	nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
4668 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4669 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
4670 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
4671 
4672 	ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
4673 
4674 	if (nvme->n_fm_cap) {
4675 		if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
4676 			nvme->n_reg_acc_attr.devacc_attr_access =
4677 			    DDI_FLAGERR_ACC;
4678 
4679 		if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
4680 			nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
4681 			nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
4682 		}
4683 
4684 		if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
4685 		    DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4686 			pci_ereport_setup(dip);
4687 
4688 		if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4689 			ddi_fm_handler_register(dip, nvme_fm_errcb,
4690 			    (void *)nvme);
4691 	}
4692 
4693 	nvme->n_progress |= NVME_FMA_INIT;
4694 
4695 	/*
4696 	 * The spec defines several register sets. Only the controller
4697 	 * registers (set 1) are currently used.
4698 	 */
4699 	if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
4700 	    nregs < 2 ||
4701 	    ddi_dev_regsize(dip, 1, &regsize) == DDI_FAILURE)
4702 		goto fail;
4703 
4704 	if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
4705 	    &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
4706 		dev_err(dip, CE_WARN, "!failed to map regset 1");
4707 		goto fail;
4708 	}
4709 
4710 	nvme->n_progress |= NVME_REGS_MAPPED;
4711 
4712 	/*
4713 	 * Create PRP DMA cache
4714 	 */
4715 	(void) snprintf(name, sizeof (name), "%s%d_prp_cache",
4716 	    ddi_driver_name(dip), ddi_get_instance(dip));
4717 	nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
4718 	    0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
4719 	    NULL, (void *)nvme, NULL, 0);
4720 
4721 	if (nvme_init(nvme) != DDI_SUCCESS)
4722 		goto fail;
4723 
4724 	/*
4725 	 * Initialize the driver with the UFM subsystem
4726 	 */
4727 	if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
4728 	    &nvme->n_ufmh, nvme) != 0) {
4729 		dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
4730 		goto fail;
4731 	}
4732 	mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
4733 	ddi_ufm_update(nvme->n_ufmh);
4734 	nvme->n_progress |= NVME_UFM_INIT;
4735 
4736 	nvme_mgmt_lock_init(&nvme->n_mgmt);
4737 	nvme_lock_init(&nvme->n_lock);
4738 	nvme->n_progress |= NVME_MGMT_INIT;
4739 	nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD;
4740 
4741 
4742 	/*
4743 	 * Identify namespaces.
4744 	 */
4745 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
4746 
4747 	for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
4748 		nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
4749 
4750 		nvme_lock_init(&ns->ns_lock);
4751 		ns->ns_progress |= NVME_NS_LOCK;
4752 
4753 		/*
4754 		 * Namespaces start out ignored. When nvme_init_ns() checks
4755 		 * their properties and finds they can be used, it will set
4756 		 * ns_ignore to B_FALSE. It will also use this state change
4757 		 * to keep an accurate count of attachable namespaces.
4758 		 */
4759 		ns->ns_ignore = B_TRUE;
4760 		if (nvme_init_ns(nvme, i) != 0) {
4761 			nvme_mgmt_unlock(nvme);
4762 			goto fail;
4763 		}
4764 
4765 		if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR,
4766 		    NVME_MINOR(ddi_get_instance(nvme->n_dip), i),
4767 		    DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
4768 			nvme_mgmt_unlock(nvme);
4769 			dev_err(dip, CE_WARN,
4770 			    "!failed to create minor node for namespace %d", i);
4771 			goto fail;
4772 		}
4773 	}
4774 
4775 	if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
4776 	    NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) !=
4777 	    DDI_SUCCESS) {
4778 		nvme_mgmt_unlock(nvme);
4779 		dev_err(dip, CE_WARN, "nvme_attach: "
4780 		    "cannot create devctl minor node");
4781 		goto fail;
4782 	}
4783 
4784 	attached_ns = B_FALSE;
4785 	for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
4786 		nvme_ioctl_common_t com = { .nioc_nsid = i };
4787 
4788 		if (nvme_attach_ns(nvme, &com)) {
4789 			attached_ns = B_TRUE;
4790 		} else if (com.nioc_drv_err != NVME_IOCTL_E_UNSUP_ATTACH_NS) {
4791 			dev_err(nvme->n_dip, CE_WARN, "!failed to attach "
4792 			    "namespace %d due to blkdev error", i);
4793 			/*
4794 			 * Once we have successfully attached a namespace we
4795 			 * can no longer fail the driver attach as there is now
4796 			 * a blkdev child node linked to this device, and
4797 			 * our node is not yet in the attached state.
4798 			 */
4799 			if (!attached_ns) {
4800 				nvme_mgmt_unlock(nvme);
4801 				goto fail;
4802 			}
4803 		}
4804 	}
4805 
4806 	nvme_mgmt_unlock(nvme);
4807 
4808 	return (DDI_SUCCESS);
4809 
4810 fail:
4811 	/* attach successful anyway so that FMA can retire the device */
4812 	if (nvme->n_dead)
4813 		return (DDI_SUCCESS);
4814 
4815 	(void) nvme_detach(dip, DDI_DETACH);
4816 
4817 	return (DDI_FAILURE);
4818 }
4819 
4820 static int
nvme_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)4821 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4822 {
4823 	int instance;
4824 	nvme_t *nvme;
4825 
4826 	if (cmd != DDI_DETACH)
4827 		return (DDI_FAILURE);
4828 
4829 	instance = ddi_get_instance(dip);
4830 
4831 	nvme = ddi_get_soft_state(nvme_state, instance);
4832 
4833 	if (nvme == NULL)
4834 		return (DDI_FAILURE);
4835 
4836 	/*
4837 	 * Remove all minor nodes from the device regardless of the source in
4838 	 * one swoop.
4839 	 */
4840 	ddi_remove_minor_node(dip, NULL);
4841 
4842 	/*
4843 	 * We need to remove the event handler as one of the first things that
4844 	 * we do. If we proceed with other teardown without removing the event
4845 	 * handler, we could end up in a very unfortunate race with ourselves.
4846 	 * The DDI does not serialize these with detach (just like timeout(9F)
4847 	 * and others).
4848 	 */
4849 	if (nvme->n_ev_rm_cb_id != NULL) {
4850 		(void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id);
4851 	}
4852 	nvme->n_ev_rm_cb_id = NULL;
4853 
4854 	/*
4855 	 * If the controller was marked dead, there is a slight chance that we
4856 	 * are asynchronusly processing the removal taskq. Because we have
4857 	 * removed the callback handler above and all minor nodes and commands
4858 	 * are closed, there is no other way to get in here. As such, we wait on
4859 	 * the nvme_dead_taskq to complete so we can avoid tracking if it's
4860 	 * running or not.
4861 	 */
4862 	taskq_wait(nvme_dead_taskq);
4863 
4864 	if (nvme->n_ns) {
4865 		for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
4866 			nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
4867 
4868 			if (ns->ns_bd_hdl) {
4869 				(void) bd_detach_handle(ns->ns_bd_hdl);
4870 				bd_free_handle(ns->ns_bd_hdl);
4871 			}
4872 
4873 			if (ns->ns_idns)
4874 				kmem_free(ns->ns_idns,
4875 				    sizeof (nvme_identify_nsid_t));
4876 			if (ns->ns_devid)
4877 				strfree(ns->ns_devid);
4878 
4879 			if ((ns->ns_progress & NVME_NS_LOCK) != 0)
4880 				nvme_lock_fini(&ns->ns_lock);
4881 		}
4882 
4883 		kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
4884 		    nvme->n_namespace_count);
4885 	}
4886 
4887 	if (nvme->n_progress & NVME_MGMT_INIT) {
4888 		nvme_lock_fini(&nvme->n_lock);
4889 		nvme_mgmt_lock_fini(&nvme->n_mgmt);
4890 	}
4891 
4892 	if (nvme->n_progress & NVME_UFM_INIT) {
4893 		ddi_ufm_fini(nvme->n_ufmh);
4894 		mutex_destroy(&nvme->n_fwslot_mutex);
4895 	}
4896 
4897 	if (nvme->n_progress & NVME_INTERRUPTS)
4898 		nvme_release_interrupts(nvme);
4899 
4900 	for (uint_t i = 0; i < nvme->n_cq_count; i++) {
4901 		if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
4902 			taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
4903 	}
4904 
4905 	if (nvme->n_progress & NVME_MUTEX_INIT) {
4906 		mutex_destroy(&nvme->n_minor_mutex);
4907 	}
4908 
4909 	if (nvme->n_ioq_count > 0) {
4910 		for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) {
4911 			if (nvme->n_ioq[i] != NULL) {
4912 				/* TODO: send destroy queue commands */
4913 				nvme_free_qpair(nvme->n_ioq[i]);
4914 			}
4915 		}
4916 
4917 		kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
4918 		    (nvme->n_ioq_count + 1));
4919 	}
4920 
4921 	if (nvme->n_prp_cache != NULL) {
4922 		kmem_cache_destroy(nvme->n_prp_cache);
4923 	}
4924 
4925 	if (nvme->n_progress & NVME_REGS_MAPPED) {
4926 		nvme_shutdown(nvme, B_FALSE);
4927 		(void) nvme_reset(nvme, B_FALSE);
4928 	}
4929 
4930 	if (nvme->n_progress & NVME_CTRL_LIMITS)
4931 		sema_destroy(&nvme->n_abort_sema);
4932 
4933 	if (nvme->n_progress & NVME_ADMIN_QUEUE)
4934 		nvme_free_qpair(nvme->n_adminq);
4935 
4936 	if (nvme->n_cq_count > 0) {
4937 		nvme_destroy_cq_array(nvme, 0);
4938 		nvme->n_cq = NULL;
4939 		nvme->n_cq_count = 0;
4940 	}
4941 
4942 	if (nvme->n_idcomns)
4943 		kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE);
4944 
4945 	if (nvme->n_idctl)
4946 		kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
4947 
4948 	if (nvme->n_progress & NVME_REGS_MAPPED)
4949 		ddi_regs_map_free(&nvme->n_regh);
4950 
4951 	if (nvme->n_progress & NVME_FMA_INIT) {
4952 		if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4953 			ddi_fm_handler_unregister(nvme->n_dip);
4954 
4955 		if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
4956 		    DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4957 			pci_ereport_teardown(nvme->n_dip);
4958 
4959 		ddi_fm_fini(nvme->n_dip);
4960 	}
4961 
4962 	if (nvme->n_progress & NVME_PCI_CONFIG)
4963 		pci_config_teardown(&nvme->n_pcicfg_handle);
4964 
4965 	if (nvme->n_vendor != NULL)
4966 		strfree(nvme->n_vendor);
4967 
4968 	if (nvme->n_product != NULL)
4969 		strfree(nvme->n_product);
4970 
4971 	ddi_soft_state_free(nvme_state, instance);
4972 
4973 	return (DDI_SUCCESS);
4974 }
4975 
4976 static int
nvme_quiesce(dev_info_t * dip)4977 nvme_quiesce(dev_info_t *dip)
4978 {
4979 	int instance;
4980 	nvme_t *nvme;
4981 
4982 	instance = ddi_get_instance(dip);
4983 
4984 	nvme = ddi_get_soft_state(nvme_state, instance);
4985 
4986 	if (nvme == NULL)
4987 		return (DDI_FAILURE);
4988 
4989 	nvme_shutdown(nvme, B_TRUE);
4990 
4991 	(void) nvme_reset(nvme, B_TRUE);
4992 
4993 	return (DDI_SUCCESS);
4994 }
4995 
4996 static int
nvme_fill_prp(nvme_cmd_t * cmd,ddi_dma_handle_t dma)4997 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma)
4998 {
4999 	nvme_t *nvme = cmd->nc_nvme;
5000 	uint_t nprp_per_page, nprp;
5001 	uint64_t *prp;
5002 	const ddi_dma_cookie_t *cookie;
5003 	uint_t idx;
5004 	uint_t ncookies = ddi_dma_ncookies(dma);
5005 
5006 	if (ncookies == 0)
5007 		return (DDI_FAILURE);
5008 
5009 	if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL)
5010 		return (DDI_FAILURE);
5011 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress;
5012 
5013 	if (ncookies == 1) {
5014 		cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5015 		return (DDI_SUCCESS);
5016 	} else if (ncookies == 2) {
5017 		if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL)
5018 			return (DDI_FAILURE);
5019 		cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress;
5020 		return (DDI_SUCCESS);
5021 	}
5022 
5023 	/*
5024 	 * At this point, we're always operating on cookies at
5025 	 * index >= 1 and writing the addresses of those cookies
5026 	 * into a new page. The address of that page is stored
5027 	 * as the second PRP entry.
5028 	 */
5029 	nprp_per_page = nvme->n_pagesize / sizeof (uint64_t);
5030 	ASSERT(nprp_per_page > 0);
5031 
5032 	/*
5033 	 * We currently don't support chained PRPs and set up our DMA
5034 	 * attributes to reflect that. If we still get an I/O request
5035 	 * that needs a chained PRP something is very wrong. Account
5036 	 * for the first cookie here, which we've placed in d_prp[0].
5037 	 */
5038 	nprp = howmany(ncookies - 1, nprp_per_page);
5039 	VERIFY(nprp == 1);
5040 
5041 	/*
5042 	 * Allocate a page of pointers, in which we'll write the
5043 	 * addresses of cookies 1 to `ncookies`.
5044 	 */
5045 	cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
5046 	bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5047 	cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress;
5048 
5049 	prp = (uint64_t *)cmd->nc_prp->nd_memp;
5050 	for (idx = 1; idx < ncookies; idx++) {
5051 		if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL)
5052 			return (DDI_FAILURE);
5053 		*prp++ = cookie->dmac_laddress;
5054 	}
5055 
5056 	(void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5057 	    DDI_DMA_SYNC_FORDEV);
5058 	return (DDI_SUCCESS);
5059 }
5060 
5061 /*
5062  * The maximum number of requests supported for a deallocate request is
5063  * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and
5064  * unchanged through at least 1.4a). The definition of nvme_range_t is also
5065  * from the NVMe 1.1 spec. Together, the result is that all of the ranges for
5066  * a deallocate request will fit into the smallest supported namespace page
5067  * (4k).
5068  */
5069 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
5070 
5071 static int
nvme_fill_ranges(nvme_cmd_t * cmd,bd_xfer_t * xfer,uint64_t blocksize,int allocflag)5072 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
5073     int allocflag)
5074 {
5075 	const dkioc_free_list_t *dfl = xfer->x_dfl;
5076 	const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
5077 	nvme_t *nvme = cmd->nc_nvme;
5078 	nvme_range_t *ranges = NULL;
5079 	uint_t i;
5080 
5081 	/*
5082 	 * The number of ranges in the request is 0s based (that is
5083 	 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ...,
5084 	 * word10 == 255 -> 256 ranges). Therefore the allowed values are
5085 	 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request,
5086 	 * we either provided bad info in nvme_bd_driveinfo() or there is a bug
5087 	 * in blkdev.
5088 	 */
5089 	VERIFY3U(dfl->dfl_num_exts, >, 0);
5090 	VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
5091 	cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
5092 
5093 	cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
5094 
5095 	cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
5096 	if (cmd->nc_prp == NULL)
5097 		return (DDI_FAILURE);
5098 
5099 	bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5100 	ranges = (nvme_range_t *)cmd->nc_prp->nd_memp;
5101 
5102 	cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress;
5103 	cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5104 
5105 	for (i = 0; i < dfl->dfl_num_exts; i++) {
5106 		uint64_t lba, len;
5107 
5108 		lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
5109 		len = exts[i].dfle_length / blocksize;
5110 
5111 		VERIFY3U(len, <=, UINT32_MAX);
5112 
5113 		/* No context attributes for a deallocate request */
5114 		ranges[i].nr_ctxattr = 0;
5115 		ranges[i].nr_len = len;
5116 		ranges[i].nr_lba = lba;
5117 	}
5118 
5119 	(void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5120 	    DDI_DMA_SYNC_FORDEV);
5121 
5122 	return (DDI_SUCCESS);
5123 }
5124 
5125 static nvme_cmd_t *
nvme_create_nvm_cmd(nvme_namespace_t * ns,uint8_t opc,bd_xfer_t * xfer)5126 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
5127 {
5128 	nvme_t *nvme = ns->ns_nvme;
5129 	nvme_cmd_t *cmd;
5130 	int allocflag;
5131 
5132 	/*
5133 	 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
5134 	 */
5135 	allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
5136 	cmd = nvme_alloc_cmd(nvme, allocflag);
5137 
5138 	if (cmd == NULL)
5139 		return (NULL);
5140 
5141 	cmd->nc_sqe.sqe_opc = opc;
5142 	cmd->nc_callback = nvme_bd_xfer_done;
5143 	cmd->nc_xfer = xfer;
5144 
5145 	switch (opc) {
5146 	case NVME_OPC_NVM_WRITE:
5147 	case NVME_OPC_NVM_READ:
5148 		VERIFY(xfer->x_nblks <= 0x10000);
5149 
5150 		cmd->nc_sqe.sqe_nsid = ns->ns_id;
5151 
5152 		cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
5153 		cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
5154 		cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
5155 
5156 		if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS)
5157 			goto fail;
5158 		break;
5159 
5160 	case NVME_OPC_NVM_FLUSH:
5161 		cmd->nc_sqe.sqe_nsid = ns->ns_id;
5162 		break;
5163 
5164 	case NVME_OPC_NVM_DSET_MGMT:
5165 		cmd->nc_sqe.sqe_nsid = ns->ns_id;
5166 
5167 		if (nvme_fill_ranges(cmd, xfer,
5168 		    (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
5169 			goto fail;
5170 		break;
5171 
5172 	default:
5173 		goto fail;
5174 	}
5175 
5176 	return (cmd);
5177 
5178 fail:
5179 	nvme_free_cmd(cmd);
5180 	return (NULL);
5181 }
5182 
5183 static void
nvme_bd_xfer_done(void * arg)5184 nvme_bd_xfer_done(void *arg)
5185 {
5186 	nvme_cmd_t *cmd = arg;
5187 	bd_xfer_t *xfer = cmd->nc_xfer;
5188 	int error = 0;
5189 
5190 	error = nvme_check_cmd_status(cmd);
5191 	nvme_free_cmd(cmd);
5192 
5193 	bd_xfer_done(xfer, error);
5194 }
5195 
5196 static void
nvme_bd_driveinfo(void * arg,bd_drive_t * drive)5197 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
5198 {
5199 	nvme_namespace_t *ns = arg;
5200 	nvme_t *nvme = ns->ns_nvme;
5201 	uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
5202 
5203 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5204 
5205 	/*
5206 	 * Set the blkdev qcount to the number of submission queues.
5207 	 * It will then create one waitq/runq pair for each submission
5208 	 * queue and spread I/O requests across the queues.
5209 	 */
5210 	drive->d_qcount = nvme->n_ioq_count;
5211 
5212 	/*
5213 	 * I/O activity to individual namespaces is distributed across
5214 	 * each of the d_qcount blkdev queues (which has been set to
5215 	 * the number of nvme submission queues). d_qsize is the number
5216 	 * of submitted and not completed I/Os within each queue that blkdev
5217 	 * will allow before it starts holding them in the waitq.
5218 	 *
5219 	 * Each namespace will create a child blkdev instance, for each one
5220 	 * we try and set the d_qsize so that each namespace gets an
5221 	 * equal portion of the submission queue.
5222 	 *
5223 	 * If post instantiation of the nvme drive, n_namespaces_attachable
5224 	 * changes and a namespace is attached it could calculate a
5225 	 * different d_qsize. It may even be that the sum of the d_qsizes is
5226 	 * now beyond the submission queue size. Should that be the case
5227 	 * and the I/O rate is such that blkdev attempts to submit more
5228 	 * I/Os than the size of the submission queue, the excess I/Os
5229 	 * will be held behind the semaphore nq_sema.
5230 	 */
5231 	drive->d_qsize = nvme->n_io_squeue_len / ns_count;
5232 
5233 	/*
5234 	 * Don't let the queue size drop below the minimum, though.
5235 	 */
5236 	drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
5237 
5238 	/*
5239 	 * d_maxxfer is not set, which means the value is taken from the DMA
5240 	 * attributes specified to bd_alloc_handle.
5241 	 */
5242 
5243 	drive->d_removable = B_FALSE;
5244 	drive->d_hotpluggable = B_FALSE;
5245 
5246 	bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
5247 	drive->d_target = ns->ns_id;
5248 	drive->d_lun = 0;
5249 
5250 	drive->d_model = nvme->n_idctl->id_model;
5251 	drive->d_model_len = sizeof (nvme->n_idctl->id_model);
5252 	drive->d_vendor = nvme->n_vendor;
5253 	drive->d_vendor_len = strlen(nvme->n_vendor);
5254 	drive->d_product = nvme->n_product;
5255 	drive->d_product_len = strlen(nvme->n_product);
5256 	drive->d_serial = nvme->n_idctl->id_serial;
5257 	drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
5258 	drive->d_revision = nvme->n_idctl->id_fwrev;
5259 	drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
5260 
5261 	/*
5262 	 * If we support the dataset management command, the only restrictions
5263 	 * on a discard request are the maximum number of ranges (segments)
5264 	 * per single request.
5265 	 */
5266 	if (nvme->n_idctl->id_oncs.on_dset_mgmt)
5267 		drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
5268 
5269 	nvme_mgmt_unlock(nvme);
5270 }
5271 
5272 static int
nvme_bd_mediainfo(void * arg,bd_media_t * media)5273 nvme_bd_mediainfo(void *arg, bd_media_t *media)
5274 {
5275 	nvme_namespace_t *ns = arg;
5276 	nvme_t *nvme = ns->ns_nvme;
5277 
5278 	if (nvme->n_dead) {
5279 		return (EIO);
5280 	}
5281 
5282 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5283 
5284 	media->m_nblks = ns->ns_block_count;
5285 	media->m_blksize = ns->ns_block_size;
5286 	media->m_readonly = B_FALSE;
5287 	media->m_solidstate = B_TRUE;
5288 
5289 	media->m_pblksize = ns->ns_best_block_size;
5290 
5291 	nvme_mgmt_unlock(nvme);
5292 
5293 	return (0);
5294 }
5295 
5296 static int
nvme_bd_cmd(nvme_namespace_t * ns,bd_xfer_t * xfer,uint8_t opc)5297 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
5298 {
5299 	nvme_t *nvme = ns->ns_nvme;
5300 	nvme_cmd_t *cmd;
5301 	nvme_qpair_t *ioq;
5302 	boolean_t poll;
5303 	int ret;
5304 
5305 	if (nvme->n_dead) {
5306 		return (EIO);
5307 	}
5308 
5309 	cmd = nvme_create_nvm_cmd(ns, opc, xfer);
5310 	if (cmd == NULL)
5311 		return (ENOMEM);
5312 
5313 	cmd->nc_sqid = xfer->x_qnum + 1;
5314 	ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
5315 	ioq = nvme->n_ioq[cmd->nc_sqid];
5316 
5317 	/*
5318 	 * Get the polling flag before submitting the command. The command may
5319 	 * complete immediately after it was submitted, which means we must
5320 	 * treat both cmd and xfer as if they have been freed already.
5321 	 */
5322 	poll = (xfer->x_flags & BD_XFER_POLL) != 0;
5323 
5324 	ret = nvme_submit_io_cmd(ioq, cmd);
5325 
5326 	if (ret != 0)
5327 		return (ret);
5328 
5329 	if (!poll)
5330 		return (0);
5331 
5332 	do {
5333 		cmd = nvme_retrieve_cmd(nvme, ioq);
5334 		if (cmd != NULL)
5335 			cmd->nc_callback(cmd);
5336 		else
5337 			drv_usecwait(10);
5338 	} while (ioq->nq_active_cmds != 0);
5339 
5340 	return (0);
5341 }
5342 
5343 static int
nvme_bd_read(void * arg,bd_xfer_t * xfer)5344 nvme_bd_read(void *arg, bd_xfer_t *xfer)
5345 {
5346 	nvme_namespace_t *ns = arg;
5347 
5348 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
5349 }
5350 
5351 static int
nvme_bd_write(void * arg,bd_xfer_t * xfer)5352 nvme_bd_write(void *arg, bd_xfer_t *xfer)
5353 {
5354 	nvme_namespace_t *ns = arg;
5355 
5356 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
5357 }
5358 
5359 static int
nvme_bd_sync(void * arg,bd_xfer_t * xfer)5360 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
5361 {
5362 	nvme_namespace_t *ns = arg;
5363 
5364 	if (ns->ns_nvme->n_dead)
5365 		return (EIO);
5366 
5367 	/*
5368 	 * If the volatile write cache is not present or not enabled the FLUSH
5369 	 * command is a no-op, so we can take a shortcut here.
5370 	 */
5371 	if (!ns->ns_nvme->n_write_cache_present) {
5372 		bd_xfer_done(xfer, ENOTSUP);
5373 		return (0);
5374 	}
5375 
5376 	if (!ns->ns_nvme->n_write_cache_enabled) {
5377 		bd_xfer_done(xfer, 0);
5378 		return (0);
5379 	}
5380 
5381 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
5382 }
5383 
5384 static int
nvme_bd_devid(void * arg,dev_info_t * devinfo,ddi_devid_t * devid)5385 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
5386 {
5387 	nvme_namespace_t *ns = arg;
5388 	nvme_t *nvme = ns->ns_nvme;
5389 
5390 	if (nvme->n_dead) {
5391 		return (EIO);
5392 	}
5393 
5394 	if (*(uint64_t *)ns->ns_nguid != 0 ||
5395 	    *(uint64_t *)(ns->ns_nguid + 8) != 0) {
5396 		return (ddi_devid_init(devinfo, DEVID_NVME_NGUID,
5397 		    sizeof (ns->ns_nguid), ns->ns_nguid, devid));
5398 	} else if (*(uint64_t *)ns->ns_eui64 != 0) {
5399 		return (ddi_devid_init(devinfo, DEVID_NVME_EUI64,
5400 		    sizeof (ns->ns_eui64), ns->ns_eui64, devid));
5401 	} else {
5402 		return (ddi_devid_init(devinfo, DEVID_NVME_NSID,
5403 		    strlen(ns->ns_devid), ns->ns_devid, devid));
5404 	}
5405 }
5406 
5407 static int
nvme_bd_free_space(void * arg,bd_xfer_t * xfer)5408 nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
5409 {
5410 	nvme_namespace_t *ns = arg;
5411 
5412 	if (xfer->x_dfl == NULL)
5413 		return (EINVAL);
5414 
5415 	if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
5416 		return (ENOTSUP);
5417 
5418 	return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
5419 }
5420 
5421 static int
nvme_open(dev_t * devp,int flag,int otyp,cred_t * cred_p)5422 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
5423 {
5424 #ifndef __lock_lint
5425 	_NOTE(ARGUNUSED(cred_p));
5426 #endif
5427 	nvme_t *nvme;
5428 	nvme_minor_t *minor = NULL;
5429 	uint32_t nsid;
5430 	minor_t m = getminor(*devp);
5431 	int rv = 0;
5432 
5433 	if (otyp != OTYP_CHR)
5434 		return (EINVAL);
5435 
5436 	if (m >= NVME_OPEN_MINOR_MIN)
5437 		return (ENXIO);
5438 
5439 	nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m));
5440 	nsid = NVME_MINOR_NSID(m);
5441 
5442 	if (nvme == NULL)
5443 		return (ENXIO);
5444 
5445 	if (nsid > nvme->n_namespace_count)
5446 		return (ENXIO);
5447 
5448 	if (nvme->n_dead)
5449 		return (EIO);
5450 
5451 	/*
5452 	 * At this point, we're going to allow an open to proceed on this
5453 	 * device. We need to allocate a new instance for this (presuming one is
5454 	 * available).
5455 	 */
5456 	minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY);
5457 	if (minor == NULL) {
5458 		return (ENOMEM);
5459 	}
5460 
5461 	cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL);
5462 	list_link_init(&minor->nm_ctrl_lock.nli_node);
5463 	minor->nm_ctrl_lock.nli_nvme = nvme;
5464 	minor->nm_ctrl_lock.nli_minor = minor;
5465 	list_link_init(&minor->nm_ns_lock.nli_node);
5466 	minor->nm_ns_lock.nli_nvme = nvme;
5467 	minor->nm_ns_lock.nli_minor = minor;
5468 	minor->nm_minor = id_alloc_nosleep(nvme_open_minors);
5469 	if (minor->nm_minor == -1) {
5470 		nvme_minor_free(minor);
5471 		return (ENOSPC);
5472 	}
5473 
5474 	minor->nm_ctrl = nvme;
5475 	if (nsid != 0) {
5476 		minor->nm_ns = nvme_nsid2ns(nvme, nsid);
5477 	}
5478 
5479 	/*
5480 	 * Before we check for exclusive access and attempt a lock if requested,
5481 	 * ensure that this minor is persisted.
5482 	 */
5483 	mutex_enter(&nvme_open_minors_mutex);
5484 	avl_add(&nvme_open_minors_avl, minor);
5485 	mutex_exit(&nvme_open_minors_mutex);
5486 
5487 	/*
5488 	 * A request for opening this FEXCL, is translated into a non-blocking
5489 	 * write lock of the appropriate entity. This honors the original
5490 	 * semantics here. In the future, we should see if we can remove this
5491 	 * and turn a request for FEXCL at open into ENOTSUP.
5492 	 */
5493 	mutex_enter(&nvme->n_minor_mutex);
5494 	if ((flag & FEXCL) != 0) {
5495 		nvme_ioctl_lock_t lock = {
5496 			.nil_level = NVME_LOCK_L_WRITE,
5497 			.nil_flags = NVME_LOCK_F_DONT_BLOCK
5498 		};
5499 
5500 		if (minor->nm_ns != NULL) {
5501 			lock.nil_ent = NVME_LOCK_E_NS;
5502 			lock.nil_common.nioc_nsid = nsid;
5503 		} else {
5504 			lock.nil_ent = NVME_LOCK_E_CTRL;
5505 		}
5506 		nvme_rwlock(minor, &lock);
5507 		if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) {
5508 			mutex_exit(&nvme->n_minor_mutex);
5509 
5510 			mutex_enter(&nvme_open_minors_mutex);
5511 			avl_remove(&nvme_open_minors_avl, minor);
5512 			mutex_exit(&nvme_open_minors_mutex);
5513 
5514 			nvme_minor_free(minor);
5515 			return (EBUSY);
5516 		}
5517 	}
5518 	mutex_exit(&nvme->n_minor_mutex);
5519 
5520 	*devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor);
5521 	return (rv);
5522 
5523 }
5524 
5525 static int
nvme_close(dev_t dev,int flag __unused,int otyp,cred_t * cred_p __unused)5526 nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused)
5527 {
5528 	nvme_minor_t *minor;
5529 	nvme_t *nvme;
5530 
5531 	if (otyp != OTYP_CHR) {
5532 		return (ENXIO);
5533 	}
5534 
5535 	minor = nvme_minor_find_by_dev(dev);
5536 	if (minor == NULL) {
5537 		return (ENXIO);
5538 	}
5539 
5540 	mutex_enter(&nvme_open_minors_mutex);
5541 	avl_remove(&nvme_open_minors_avl, minor);
5542 	mutex_exit(&nvme_open_minors_mutex);
5543 
5544 	/*
5545 	 * When this device is being closed, we must ensure that any locks held
5546 	 * by this are dealt with.
5547 	 */
5548 	nvme = minor->nm_ctrl;
5549 	mutex_enter(&nvme->n_minor_mutex);
5550 	ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
5551 	ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
5552 
5553 	if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
5554 		VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL);
5555 		nvme_rwunlock(&minor->nm_ctrl_lock,
5556 		    minor->nm_ctrl_lock.nli_lock);
5557 	}
5558 
5559 	if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
5560 		VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL);
5561 		nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock);
5562 	}
5563 	mutex_exit(&nvme->n_minor_mutex);
5564 
5565 	nvme_minor_free(minor);
5566 
5567 	return (0);
5568 }
5569 
5570 void
nvme_ioctl_success(nvme_ioctl_common_t * ioc)5571 nvme_ioctl_success(nvme_ioctl_common_t *ioc)
5572 {
5573 	ioc->nioc_drv_err = NVME_IOCTL_E_OK;
5574 	ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS;
5575 	ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC;
5576 }
5577 
5578 boolean_t
nvme_ioctl_error(nvme_ioctl_common_t * ioc,nvme_ioctl_errno_t err,uint32_t sct,uint32_t sc)5579 nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct,
5580     uint32_t sc)
5581 {
5582 	ioc->nioc_drv_err = err;
5583 	ioc->nioc_ctrl_sct = sct;
5584 	ioc->nioc_ctrl_sc = sc;
5585 
5586 	return (B_FALSE);
5587 }
5588 
5589 static int
nvme_ioctl_copyout_error(nvme_ioctl_errno_t err,intptr_t uaddr,int mode)5590 nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode)
5591 {
5592 	nvme_ioctl_common_t ioc;
5593 
5594 	ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR);
5595 	bzero(&ioc, sizeof (ioc));
5596 	if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t),
5597 	    mode & FKIOCTL) != 0) {
5598 		return (EFAULT);
5599 	}
5600 	return (0);
5601 }
5602 
5603 
5604 /*
5605  * The companion to the namespace checking. This occurs after any rewriting
5606  * occurs. This is the primary point that we attempt to enforce any operation's
5607  * exclusivity. Note, it is theoretically possible for an operation to be
5608  * ongoing and to have someone with an exclusive lock ask to unlock it for some
5609  * reason. This does not maintain the number of such events that are going on.
5610  * While perhaps this is leaving too much up to the user, by the same token we
5611  * don't try to stop them from issuing two different format NVM commands
5612  * targeting the whole device at the same time either, even though the
5613  * controller would really rather that didn't happen.
5614  */
5615 static boolean_t
nvme_ioctl_excl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)5616 nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
5617     const nvme_ioctl_check_t *check)
5618 {
5619 	nvme_t *const nvme = minor->nm_ctrl;
5620 	nvme_namespace_t *ns;
5621 	boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl;
5622 
5623 	/*
5624 	 * If the command doesn't require anything, then we're done.
5625 	 */
5626 	if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) {
5627 		return (B_TRUE);
5628 	}
5629 
5630 	if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) {
5631 		ns = NULL;
5632 	} else {
5633 		ns = nvme_nsid2ns(nvme, ioc->nioc_nsid);
5634 	}
5635 
5636 	mutex_enter(&nvme->n_minor_mutex);
5637 	ctrl_is_excl = nvme->n_lock.nl_writer != NULL;
5638 	have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock;
5639 	if (ns != NULL) {
5640 		/*
5641 		 * We explicitly test the namespace lock's writer versus asking
5642 		 * the minor because the minor's namespace lock may apply to a
5643 		 * different namespace.
5644 		 */
5645 		ns_is_excl = ns->ns_lock.nl_writer != NULL;
5646 		have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock;
5647 		ASSERT0(have_ctrl && have_ns);
5648 #ifdef	DEBUG
5649 		if (have_ns) {
5650 			ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns);
5651 		}
5652 #endif
5653 	} else {
5654 		ns_is_excl = B_FALSE;
5655 		have_ns = B_FALSE;
5656 	}
5657 	ASSERT0(ctrl_is_excl && ns_is_excl);
5658 	mutex_exit(&nvme->n_minor_mutex);
5659 
5660 	if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) {
5661 		if (ns == NULL) {
5662 			if (have_ctrl) {
5663 				return (B_TRUE);
5664 			}
5665 			return (nvme_ioctl_error(ioc,
5666 			    NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0));
5667 		} else {
5668 			if (have_ctrl || have_ns) {
5669 				return (B_TRUE);
5670 			}
5671 			return (nvme_ioctl_error(ioc,
5672 			    NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0));
5673 		}
5674 	}
5675 
5676 	/*
5677 	 * Now we have an operation that does not require exclusive access. We
5678 	 * can proceed as long as no one else has it or if someone does it is
5679 	 * us. Regardless of what we target, a controller lock will stop us.
5680 	 */
5681 	if (ctrl_is_excl && !have_ctrl) {
5682 		return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0));
5683 	}
5684 
5685 	/*
5686 	 * Only check namespace exclusivity if we are targeting one.
5687 	 */
5688 	if (ns != NULL && ns_is_excl && !have_ns) {
5689 		return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0));
5690 	}
5691 
5692 	return (B_TRUE);
5693 }
5694 
5695 /*
5696  * Perform common checking as to whether or not an ioctl operation may proceed.
5697  * We check in this function various aspects of the namespace attributes that
5698  * it's calling on. Once the namespace attributes and any possible rewriting
5699  * have been performed, then we proceed to check whether or not the requisite
5700  * exclusive access is present in nvme_ioctl_excl_check().
5701  */
5702 static boolean_t
nvme_ioctl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)5703 nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
5704     const nvme_ioctl_check_t *check)
5705 {
5706 	/*
5707 	 * If the minor has a namespace pointer, then it is constrained to that
5708 	 * namespace. If a namespace is allowed, then there are only two valid
5709 	 * values that we can find. The first is matching the minor. The second
5710 	 * is our value zero, which will be transformed to the current
5711 	 * namespace.
5712 	 */
5713 	if (minor->nm_ns != NULL) {
5714 		if (!check->nck_ns_ok || !check->nck_ns_minor_ok) {
5715 			return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0,
5716 			    0));
5717 		}
5718 
5719 		if (ioc->nioc_nsid == 0) {
5720 			ioc->nioc_nsid = minor->nm_ns->ns_id;
5721 		} else if (ioc->nioc_nsid != minor->nm_ns->ns_id) {
5722 			return (nvme_ioctl_error(ioc,
5723 			    NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0));
5724 		}
5725 
5726 		return (nvme_ioctl_excl_check(minor, ioc, check));
5727 	}
5728 
5729 	/*
5730 	 * If we've been told to skip checking the controller, here's where we
5731 	 * do that. This should really only be for commands which use the
5732 	 * namespace ID for listing purposes and therefore can have
5733 	 * traditionally illegal values here.
5734 	 */
5735 	if (check->nck_skip_ctrl) {
5736 		return (nvme_ioctl_excl_check(minor, ioc, check));
5737 	}
5738 
5739 	/*
5740 	 * At this point, we know that we're on the controller's node. We first
5741 	 * deal with the simple case, is a namespace allowed at all or not. If
5742 	 * it is not allowed, then the only acceptable value is zero.
5743 	 */
5744 	if (!check->nck_ns_ok) {
5745 		if (ioc->nioc_nsid != 0) {
5746 			return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0,
5747 			    0));
5748 		}
5749 
5750 		return (nvme_ioctl_excl_check(minor, ioc, check));
5751 	}
5752 
5753 	/*
5754 	 * At this point, we know that a controller is allowed to use a
5755 	 * namespace. If we haven't been given zero or the broadcast namespace,
5756 	 * check to see if it's actually a valid namespace ID. If is outside of
5757 	 * range, then it is an error. Next, if we have been requested to
5758 	 * rewrite 0 (the this controller indicator) as the broadcast namespace,
5759 	 * do so.
5760 	 *
5761 	 * While we validate that this namespace is within the valid range, we
5762 	 * do not check if it is active or inactive. That is left to our callers
5763 	 * to determine.
5764 	 */
5765 	if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count &&
5766 	    ioc->nioc_nsid != NVME_NSID_BCAST) {
5767 		return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0));
5768 	}
5769 
5770 	if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) {
5771 		ioc->nioc_nsid = NVME_NSID_BCAST;
5772 	}
5773 
5774 	/*
5775 	 * Finally, see if we have ended up with a broadcast namespace ID
5776 	 * whether through specification or rewriting. If that is not allowed,
5777 	 * then that is an error.
5778 	 */
5779 	if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) {
5780 		return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0));
5781 	}
5782 
5783 	return (nvme_ioctl_excl_check(minor, ioc, check));
5784 }
5785 
5786 static int
nvme_ioctl_ctrl_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)5787 nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode,
5788     cred_t *cred_p)
5789 {
5790 	nvme_t *const nvme = minor->nm_ctrl;
5791 	nvme_ioctl_ctrl_info_t *info;
5792 	nvme_reg_cap_t cap = { 0 };
5793 	nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL };
5794 	void *idbuf;
5795 
5796 	if ((mode & FREAD) == 0)
5797 		return (EBADF);
5798 
5799 	info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY);
5800 	if (info == NULL) {
5801 		return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
5802 		    mode));
5803 	}
5804 
5805 	if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t),
5806 	    mode & FKIOCTL) != 0) {
5807 		kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
5808 		return (EFAULT);
5809 	}
5810 
5811 	if (!nvme_ioctl_check(minor, &info->nci_common,
5812 	    &nvme_check_ctrl_info)) {
5813 		goto copyout;
5814 	}
5815 
5816 	/*
5817 	 * We explicitly do not use the identify controller copy in the kernel
5818 	 * right now so that way we can get a snapshot of the controller's
5819 	 * current capacity and values. While it's tempting to try to use this
5820 	 * to refresh the kernel's version we don't just to simplify the rest of
5821 	 * the driver right now.
5822 	 */
5823 	if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
5824 		info->nci_common = id.nid_common;
5825 		goto copyout;
5826 	}
5827 	bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t));
5828 	kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
5829 
5830 	/*
5831 	 * Use the kernel's cached common namespace information for this.
5832 	 */
5833 	bcopy(nvme->n_idcomns, &info->nci_common_ns,
5834 	    sizeof (nvme_identify_nsid_t));
5835 
5836 	info->nci_vers = nvme->n_version;
5837 
5838 	/*
5839 	 * The MPSMIN and MPSMAX fields in the CAP register use 0 to
5840 	 * specify the base page size of 4k (1<<12), so add 12 here to
5841 	 * get the real page size value.
5842 	 */
5843 	cap.r = nvme_get64(nvme, NVME_REG_CAP);
5844 	info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax);
5845 	info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin);
5846 
5847 	info->nci_nintrs = (uint32_t)nvme->n_intr_cnt;
5848 
5849 copyout:
5850 	if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t),
5851 	    mode & FKIOCTL) != 0) {
5852 		kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
5853 		return (EFAULT);
5854 	}
5855 
5856 	kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
5857 	return (0);
5858 }
5859 
5860 static int
nvme_ioctl_ns_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)5861 nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
5862 {
5863 	nvme_t *const nvme = minor->nm_ctrl;
5864 	nvme_ioctl_ns_info_t *ns_info;
5865 	nvme_namespace_t *ns;
5866 	nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID };
5867 	void *idbuf;
5868 
5869 	if ((mode & FREAD) == 0)
5870 		return (EBADF);
5871 
5872 	ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY);
5873 	if (ns_info == NULL) {
5874 		return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
5875 		    mode));
5876 	}
5877 
5878 	if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t),
5879 	    mode & FKIOCTL) != 0) {
5880 		kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
5881 		return (EFAULT);
5882 	}
5883 
5884 	if (!nvme_ioctl_check(minor, &ns_info->nni_common,
5885 	    &nvme_check_ns_info)) {
5886 		goto copyout;
5887 	}
5888 
5889 	ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0);
5890 	ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid);
5891 
5892 	/*
5893 	 * First fetch a fresh copy of the namespace information. Most callers
5894 	 * are using this because they will want a mostly accurate snapshot of
5895 	 * capacity and utilization.
5896 	 */
5897 	id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid;
5898 	if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
5899 		ns_info->nni_common = id.nid_common;
5900 		goto copyout;
5901 	}
5902 	bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t));
5903 	kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
5904 
5905 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
5906 	if (ns->ns_allocated)
5907 		ns_info->nni_state |= NVME_NS_STATE_ALLOCATED;
5908 
5909 	if (ns->ns_active)
5910 		ns_info->nni_state |= NVME_NS_STATE_ACTIVE;
5911 
5912 	if (ns->ns_ignore)
5913 		ns_info->nni_state |= NVME_NS_STATE_IGNORED;
5914 
5915 	if (ns->ns_attached) {
5916 		const char *addr;
5917 
5918 		ns_info->nni_state |= NVME_NS_STATE_ATTACHED;
5919 		addr = bd_address(ns->ns_bd_hdl);
5920 		if (strlcpy(ns_info->nni_addr, addr,
5921 		    sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) {
5922 			nvme_mgmt_unlock(nvme);
5923 			(void) nvme_ioctl_error(&ns_info->nni_common,
5924 			    NVME_IOCTL_E_BD_ADDR_OVER, 0, 0);
5925 			goto copyout;
5926 		}
5927 	}
5928 	nvme_mgmt_unlock(nvme);
5929 
5930 copyout:
5931 	if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t),
5932 	    mode & FKIOCTL) != 0) {
5933 		kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
5934 		return (EFAULT);
5935 	}
5936 
5937 	kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
5938 	return (0);
5939 }
5940 
5941 static int
nvme_ioctl_identify(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)5942 nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
5943 {
5944 	_NOTE(ARGUNUSED(cred_p));
5945 	nvme_t *const nvme = minor->nm_ctrl;
5946 	void *idctl;
5947 	uint_t model;
5948 	nvme_ioctl_identify_t id;
5949 #ifdef	_MULTI_DATAMODEL
5950 	nvme_ioctl_identify32_t id32;
5951 #endif
5952 	boolean_t ns_minor;
5953 
5954 	if ((mode & FREAD) == 0)
5955 		return (EBADF);
5956 
5957 	model = ddi_model_convert_from(mode);
5958 	switch (model) {
5959 #ifdef	_MULTI_DATAMODEL
5960 	case DDI_MODEL_ILP32:
5961 		bzero(&id, sizeof (id));
5962 		if (ddi_copyin((void *)arg, &id32, sizeof (id32),
5963 		    mode & FKIOCTL) != 0) {
5964 			return (EFAULT);
5965 		}
5966 		id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid;
5967 		id.nid_cns = id32.nid_cns;
5968 		id.nid_ctrlid = id32.nid_ctrlid;
5969 		id.nid_data = id32.nid_data;
5970 		break;
5971 #endif	/* _MULTI_DATAMODEL */
5972 	case DDI_MODEL_NONE:
5973 		if (ddi_copyin((void *)arg, &id, sizeof (id),
5974 		    mode & FKIOCTL) != 0) {
5975 			return (EFAULT);
5976 		}
5977 		break;
5978 	default:
5979 		return (ENOTSUP);
5980 	}
5981 
5982 	if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) {
5983 		goto copyout;
5984 	}
5985 
5986 	ns_minor = minor->nm_ns != NULL;
5987 	if (!nvme_validate_identify(nvme, &id, ns_minor)) {
5988 		goto copyout;
5989 	}
5990 
5991 	if (nvme_identify(nvme, B_TRUE, &id, &idctl)) {
5992 		int ret = ddi_copyout(idctl, (void *)id.nid_data,
5993 		    NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL);
5994 		kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
5995 		if (ret != 0) {
5996 			(void) nvme_ioctl_error(&id.nid_common,
5997 			    NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
5998 			goto copyout;
5999 		}
6000 
6001 		nvme_ioctl_success(&id.nid_common);
6002 	}
6003 
6004 copyout:
6005 	switch (model) {
6006 #ifdef	_MULTI_DATAMODEL
6007 	case DDI_MODEL_ILP32:
6008 		id32.nid_common = id.nid_common;
6009 
6010 		if (ddi_copyout(&id32, (void *)arg, sizeof (id32),
6011 		    mode & FKIOCTL) != 0) {
6012 			return (EFAULT);
6013 		}
6014 		break;
6015 #endif	/* _MULTI_DATAMODEL */
6016 	case DDI_MODEL_NONE:
6017 		if (ddi_copyout(&id, (void *)arg, sizeof (id),
6018 		    mode & FKIOCTL) != 0) {
6019 			return (EFAULT);
6020 		}
6021 		break;
6022 	default:
6023 		return (ENOTSUP);
6024 	}
6025 
6026 	return (0);
6027 }
6028 
6029 /*
6030  * Execute commands on behalf of the various ioctls.
6031  *
6032  * If this returns true then the command completed successfully. Otherwise error
6033  * information is returned in the nvme_ioctl_common_t arguments.
6034  */
6035 typedef struct {
6036 	nvme_sqe_t *ica_sqe;
6037 	void *ica_data;
6038 	uint32_t ica_data_len;
6039 	uint_t ica_dma_flags;
6040 	int ica_copy_flags;
6041 	uint32_t ica_timeout;
6042 	uint32_t ica_cdw0;
6043 } nvme_ioc_cmd_args_t;
6044 
6045 static boolean_t
nvme_ioc_cmd(nvme_t * nvme,nvme_ioctl_common_t * ioc,nvme_ioc_cmd_args_t * args)6046 nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args)
6047 {
6048 	nvme_cmd_t *cmd;
6049 	boolean_t ret = B_FALSE;
6050 
6051 	cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
6052 	cmd->nc_sqid = 0;
6053 
6054 	/*
6055 	 * This function is used to facilitate requests from
6056 	 * userspace, so don't panic if the command fails. This
6057 	 * is especially true for admin passthru commands, where
6058 	 * the actual command data structure is entirely defined
6059 	 * by userspace.
6060 	 */
6061 	cmd->nc_dontpanic = B_TRUE;
6062 
6063 	cmd->nc_callback = nvme_wakeup_cmd;
6064 	cmd->nc_sqe = *args->ica_sqe;
6065 
6066 	if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) {
6067 		if (args->ica_data == NULL) {
6068 			ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM,
6069 			    0, 0);
6070 			goto free_cmd;
6071 		}
6072 
6073 		if (nvme_zalloc_dma(nvme, args->ica_data_len,
6074 		    args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) !=
6075 		    DDI_SUCCESS) {
6076 			dev_err(nvme->n_dip, CE_WARN,
6077 			    "!nvme_zalloc_dma failed for nvme_ioc_cmd()");
6078 			ret = nvme_ioctl_error(ioc,
6079 			    NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6080 			goto free_cmd;
6081 		}
6082 
6083 		if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
6084 			ret = nvme_ioctl_error(ioc,
6085 			    NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6086 			goto free_cmd;
6087 		}
6088 
6089 		if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 &&
6090 		    ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp,
6091 		    args->ica_data_len, args->ica_copy_flags) != 0) {
6092 			ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA,
6093 			    0, 0);
6094 			goto free_cmd;
6095 		}
6096 	}
6097 
6098 	nvme_admin_cmd(cmd, args->ica_timeout);
6099 
6100 	if (!nvme_check_cmd_status_ioctl(cmd, ioc)) {
6101 		ret = B_FALSE;
6102 		goto free_cmd;
6103 	}
6104 
6105 	args->ica_cdw0 = cmd->nc_cqe.cqe_dw0;
6106 
6107 	if ((args->ica_dma_flags & DDI_DMA_READ) != 0 &&
6108 	    ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data,
6109 	    args->ica_data_len, args->ica_copy_flags) != 0) {
6110 		ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6111 		goto free_cmd;
6112 	}
6113 
6114 	ret = B_TRUE;
6115 	nvme_ioctl_success(ioc);
6116 
6117 free_cmd:
6118 	nvme_free_cmd(cmd);
6119 
6120 	return (ret);
6121 }
6122 
6123 static int
nvme_ioctl_get_logpage(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6124 nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode,
6125     cred_t *cred_p)
6126 {
6127 	nvme_t *const nvme = minor->nm_ctrl;
6128 	void *buf;
6129 	nvme_ioctl_get_logpage_t log;
6130 	uint_t model;
6131 #ifdef	_MULTI_DATAMODEL
6132 	nvme_ioctl_get_logpage32_t log32;
6133 #endif
6134 
6135 	if ((mode & FREAD) == 0) {
6136 		return (EBADF);
6137 	}
6138 
6139 	model = ddi_model_convert_from(mode);
6140 	switch (model) {
6141 #ifdef	_MULTI_DATAMODEL
6142 	case DDI_MODEL_ILP32:
6143 		bzero(&log, sizeof (log));
6144 		if (ddi_copyin((void *)arg, &log32, sizeof (log32),
6145 		    mode & FKIOCTL) != 0) {
6146 			return (EFAULT);
6147 		}
6148 
6149 		log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid;
6150 		log.nigl_csi = log32.nigl_csi;
6151 		log.nigl_lid = log32.nigl_lid;
6152 		log.nigl_lsp = log32.nigl_lsp;
6153 		log.nigl_len = log32.nigl_len;
6154 		log.nigl_offset = log32.nigl_offset;
6155 		log.nigl_data = log32.nigl_data;
6156 		break;
6157 #endif	/* _MULTI_DATAMODEL */
6158 	case DDI_MODEL_NONE:
6159 		if (ddi_copyin((void *)arg, &log, sizeof (log),
6160 		    mode & FKIOCTL) != 0) {
6161 			return (EFAULT);
6162 		}
6163 		break;
6164 	default:
6165 		return (ENOTSUP);
6166 	}
6167 
6168 	/*
6169 	 * Eventually we'd like to do a soft lock on the namespaces from
6170 	 * changing out from us during this operation in the future. But we
6171 	 * haven't implemented that yet.
6172 	 */
6173 	if (!nvme_ioctl_check(minor, &log.nigl_common,
6174 	    &nvme_check_get_logpage)) {
6175 		goto copyout;
6176 	}
6177 
6178 	if (!nvme_validate_logpage(nvme, &log)) {
6179 		goto copyout;
6180 	}
6181 
6182 	if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) {
6183 		int copy;
6184 
6185 		copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len,
6186 		    mode & FKIOCTL);
6187 		kmem_free(buf, log.nigl_len);
6188 		if (copy != 0) {
6189 			(void) nvme_ioctl_error(&log.nigl_common,
6190 			    NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6191 			goto copyout;
6192 		}
6193 
6194 		nvme_ioctl_success(&log.nigl_common);
6195 	}
6196 
6197 copyout:
6198 	switch (model) {
6199 #ifdef	_MULTI_DATAMODEL
6200 	case DDI_MODEL_ILP32:
6201 		bzero(&log32, sizeof (log32));
6202 
6203 		log32.nigl_common = log.nigl_common;
6204 		log32.nigl_csi = log.nigl_csi;
6205 		log32.nigl_lid = log.nigl_lid;
6206 		log32.nigl_lsp = log.nigl_lsp;
6207 		log32.nigl_len = log.nigl_len;
6208 		log32.nigl_offset = log.nigl_offset;
6209 		log32.nigl_data = log.nigl_data;
6210 		if (ddi_copyout(&log32, (void *)arg, sizeof (log32),
6211 		    mode & FKIOCTL) != 0) {
6212 			return (EFAULT);
6213 		}
6214 		break;
6215 #endif	/* _MULTI_DATAMODEL */
6216 	case DDI_MODEL_NONE:
6217 		if (ddi_copyout(&log, (void *)arg, sizeof (log),
6218 		    mode & FKIOCTL) != 0) {
6219 			return (EFAULT);
6220 		}
6221 		break;
6222 	default:
6223 		return (ENOTSUP);
6224 	}
6225 
6226 	return (0);
6227 }
6228 
6229 static int
nvme_ioctl_get_feature(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6230 nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode,
6231     cred_t *cred_p)
6232 {
6233 	nvme_t *const nvme = minor->nm_ctrl;
6234 	nvme_ioctl_get_feature_t feat;
6235 	uint_t model;
6236 #ifdef	_MULTI_DATAMODEL
6237 	nvme_ioctl_get_feature32_t feat32;
6238 #endif
6239 	nvme_get_features_dw10_t gf_dw10 = { 0 };
6240 	nvme_ioc_cmd_args_t args = { NULL };
6241 	nvme_sqe_t sqe = {
6242 	    .sqe_opc	= NVME_OPC_GET_FEATURES
6243 	};
6244 
6245 	if ((mode & FREAD) == 0) {
6246 		return (EBADF);
6247 	}
6248 
6249 	model = ddi_model_convert_from(mode);
6250 	switch (model) {
6251 #ifdef	_MULTI_DATAMODEL
6252 	case DDI_MODEL_ILP32:
6253 		bzero(&feat, sizeof (feat));
6254 		if (ddi_copyin((void *)arg, &feat32, sizeof (feat32),
6255 		    mode & FKIOCTL) != 0) {
6256 			return (EFAULT);
6257 		}
6258 
6259 		feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid;
6260 		feat.nigf_fid = feat32.nigf_fid;
6261 		feat.nigf_sel = feat32.nigf_sel;
6262 		feat.nigf_cdw11 = feat32.nigf_cdw11;
6263 		feat.nigf_data = feat32.nigf_data;
6264 		feat.nigf_len = feat32.nigf_len;
6265 		break;
6266 #endif	/* _MULTI_DATAMODEL */
6267 	case DDI_MODEL_NONE:
6268 		if (ddi_copyin((void *)arg, &feat, sizeof (feat),
6269 		    mode & FKIOCTL) != 0) {
6270 			return (EFAULT);
6271 		}
6272 		break;
6273 	default:
6274 		return (ENOTSUP);
6275 	}
6276 
6277 	if (!nvme_ioctl_check(minor, &feat.nigf_common,
6278 	    &nvme_check_get_feature)) {
6279 		goto copyout;
6280 	}
6281 
6282 	if (!nvme_validate_get_feature(nvme, &feat)) {
6283 		goto copyout;
6284 	}
6285 
6286 	gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0);
6287 	gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0);
6288 	sqe.sqe_cdw10 = gf_dw10.r;
6289 	sqe.sqe_cdw11 = feat.nigf_cdw11;
6290 	sqe.sqe_nsid = feat.nigf_common.nioc_nsid;
6291 
6292 	args.ica_sqe = &sqe;
6293 	if (feat.nigf_len != 0) {
6294 		args.ica_data = (void *)feat.nigf_data;
6295 		args.ica_data_len = feat.nigf_len;
6296 		args.ica_dma_flags = DDI_DMA_READ;
6297 	}
6298 	args.ica_copy_flags = mode;
6299 	args.ica_timeout = nvme_admin_cmd_timeout;
6300 
6301 	if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) {
6302 		goto copyout;
6303 	}
6304 
6305 	feat.nigf_cdw0 = args.ica_cdw0;
6306 
6307 copyout:
6308 	switch (model) {
6309 #ifdef	_MULTI_DATAMODEL
6310 	case DDI_MODEL_ILP32:
6311 		bzero(&feat32, sizeof (feat32));
6312 
6313 		feat32.nigf_common = feat.nigf_common;
6314 		feat32.nigf_fid = feat.nigf_fid;
6315 		feat32.nigf_sel = feat.nigf_sel;
6316 		feat32.nigf_cdw11 = feat.nigf_cdw11;
6317 		feat32.nigf_data = feat.nigf_data;
6318 		feat32.nigf_len = feat.nigf_len;
6319 		feat32.nigf_cdw0 = feat.nigf_cdw0;
6320 		if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32),
6321 		    mode & FKIOCTL) != 0) {
6322 			return (EFAULT);
6323 		}
6324 		break;
6325 #endif	/* _MULTI_DATAMODEL */
6326 	case DDI_MODEL_NONE:
6327 		if (ddi_copyout(&feat, (void *)arg, sizeof (feat),
6328 		    mode & FKIOCTL) != 0) {
6329 			return (EFAULT);
6330 		}
6331 		break;
6332 	default:
6333 		return (ENOTSUP);
6334 	}
6335 
6336 	return (0);
6337 }
6338 
6339 static int
nvme_ioctl_format(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6340 nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6341 {
6342 	nvme_t *const nvme = minor->nm_ctrl;
6343 	nvme_ioctl_format_t ioc;
6344 
6345 	if ((mode & FWRITE) == 0)
6346 		return (EBADF);
6347 
6348 	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6349 		return (EPERM);
6350 
6351 	if (ddi_copyin((void *)(uintptr_t)arg, &ioc,
6352 	    sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0)
6353 		return (EFAULT);
6354 
6355 	if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) {
6356 		goto copyout;
6357 	}
6358 
6359 	if (!nvme_validate_format(nvme, &ioc)) {
6360 		goto copyout;
6361 	}
6362 
6363 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6364 	if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) {
6365 		nvme_mgmt_unlock(nvme);
6366 		(void) nvme_ioctl_error(&ioc.nif_common,
6367 		    NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
6368 		goto copyout;
6369 	}
6370 
6371 	if (nvme_format_nvm(nvme, &ioc)) {
6372 		nvme_ioctl_success(&ioc.nif_common);
6373 		nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid);
6374 	}
6375 	nvme_mgmt_unlock(nvme);
6376 
6377 copyout:
6378 	if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc),
6379 	    mode & FKIOCTL) != 0) {
6380 		return (EFAULT);
6381 	}
6382 
6383 	return (0);
6384 }
6385 
6386 static int
nvme_ioctl_detach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6387 nvme_ioctl_detach(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6388 {
6389 	nvme_t *const nvme = minor->nm_ctrl;
6390 	nvme_ioctl_common_t com;
6391 
6392 	if ((mode & FWRITE) == 0)
6393 		return (EBADF);
6394 
6395 	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6396 		return (EPERM);
6397 
6398 	if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6399 	    mode & FKIOCTL) != 0) {
6400 		return (EFAULT);
6401 	}
6402 
6403 	if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6404 		goto copyout;
6405 	}
6406 
6407 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6408 	if (nvme_detach_ns(nvme, &com)) {
6409 		nvme_ioctl_success(&com);
6410 	}
6411 	nvme_mgmt_unlock(nvme);
6412 
6413 copyout:
6414 	if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
6415 	    mode & FKIOCTL) != 0) {
6416 		return (EFAULT);
6417 	}
6418 
6419 	return (0);
6420 }
6421 
6422 static int
nvme_ioctl_attach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6423 nvme_ioctl_attach(nvme_minor_t *minor, intptr_t arg, int mode,
6424     cred_t *cred_p)
6425 {
6426 	nvme_t *const nvme = minor->nm_ctrl;
6427 	nvme_ioctl_common_t com;
6428 	nvme_namespace_t *ns;
6429 
6430 	if ((mode & FWRITE) == 0)
6431 		return (EBADF);
6432 
6433 	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6434 		return (EPERM);
6435 
6436 	if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6437 	    mode & FKIOCTL) != 0) {
6438 		return (EFAULT);
6439 	}
6440 
6441 	if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6442 		goto copyout;
6443 	}
6444 
6445 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6446 	ns = nvme_nsid2ns(nvme, com.nioc_nsid);
6447 
6448 	/*
6449 	 * Strictly speaking we shouldn't need to call nvme_init_ns() here as
6450 	 * we should be properly refreshing the internal state when we are
6451 	 * issuing commands that change things. However, we opt to still do so
6452 	 * as a bit of a safety check lest we give the kernel something bad or a
6453 	 * vendor unique command somehow did something behind our backs.
6454 	 */
6455 	if (!ns->ns_attached) {
6456 		(void) nvme_rescan_ns(nvme, com.nioc_nsid);
6457 		if (nvme_attach_ns(nvme, &com)) {
6458 			nvme_ioctl_success(&com);
6459 		}
6460 	} else {
6461 		nvme_ioctl_success(&com);
6462 	}
6463 	nvme_mgmt_unlock(nvme);
6464 
6465 copyout:
6466 	if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
6467 	    mode & FKIOCTL) != 0) {
6468 		return (EFAULT);
6469 	}
6470 
6471 	return (0);
6472 }
6473 
6474 static void
nvme_ufm_update(nvme_t * nvme)6475 nvme_ufm_update(nvme_t *nvme)
6476 {
6477 	mutex_enter(&nvme->n_fwslot_mutex);
6478 	ddi_ufm_update(nvme->n_ufmh);
6479 	if (nvme->n_fwslot != NULL) {
6480 		kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
6481 		nvme->n_fwslot = NULL;
6482 	}
6483 	mutex_exit(&nvme->n_fwslot_mutex);
6484 }
6485 
6486 /*
6487  * Download new firmware to the device's internal staging area. We do not call
6488  * nvme_ufm_update() here because after a firmware download, there has been no
6489  * change to any of the actual persistent firmware data. That requires a
6490  * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot
6491  * or to activate a slot.
6492  */
6493 static int
nvme_ioctl_firmware_download(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6494 nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode,
6495     cred_t *cred_p)
6496 {
6497 	nvme_t *const nvme = minor->nm_ctrl;
6498 	nvme_ioctl_fw_load_t fw;
6499 	uint64_t len, maxcopy;
6500 	offset_t offset;
6501 	uint32_t gran;
6502 	nvme_valid_ctrl_data_t data;
6503 	uintptr_t buf;
6504 	nvme_sqe_t sqe = {
6505 	    .sqe_opc	= NVME_OPC_FW_IMAGE_LOAD
6506 	};
6507 
6508 	if ((mode & FWRITE) == 0)
6509 		return (EBADF);
6510 
6511 	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6512 		return (EPERM);
6513 
6514 	if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
6515 	    mode & FKIOCTL) != 0) {
6516 		return (EFAULT);
6517 	}
6518 
6519 	if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) {
6520 		goto copyout;
6521 	}
6522 
6523 	if (!nvme_validate_fw_load(nvme, &fw)) {
6524 		goto copyout;
6525 	}
6526 
6527 	len = fw.fwl_len;
6528 	offset = fw.fwl_off;
6529 	buf = fw.fwl_buf;
6530 
6531 	/*
6532 	 * We need to determine the minimum and maximum amount of data that we
6533 	 * will send to the device in a given go. Starting in NMVe 1.3 this must
6534 	 * be a multiple of the firmware update granularity (FWUG), but must not
6535 	 * exceed the maximum data transfer that we've set. Many devices don't
6536 	 * report something here, which means we'll end up getting our default
6537 	 * value. Our policy is a little simple, but it's basically if the
6538 	 * maximum data transfer is evenly divided by the granularity, then use
6539 	 * it. Otherwise we use the granularity itself. The granularity is
6540 	 * always in page sized units, so trying to find another optimum point
6541 	 * isn't worth it. If we encounter a contradiction, then we will have to
6542 	 * error out.
6543 	 */
6544 	data.vcd_vers = &nvme->n_version;
6545 	data.vcd_id = nvme->n_idctl;
6546 	gran = nvme_fw_load_granularity(&data);
6547 
6548 	if ((nvme->n_max_data_transfer_size % gran) == 0) {
6549 		maxcopy = nvme->n_max_data_transfer_size;
6550 	} else if (gran <= nvme->n_max_data_transfer_size) {
6551 		maxcopy = gran;
6552 	} else {
6553 		(void) nvme_ioctl_error(&fw.fwl_common,
6554 		    NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0);
6555 		goto copyout;
6556 	}
6557 
6558 	while (len > 0) {
6559 		nvme_ioc_cmd_args_t args = { NULL };
6560 		uint64_t copylen = MIN(maxcopy, len);
6561 
6562 		sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
6563 		sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
6564 
6565 		args.ica_sqe = &sqe;
6566 		args.ica_data = (void *)buf;
6567 		args.ica_data_len = copylen;
6568 		args.ica_dma_flags = DDI_DMA_WRITE;
6569 		args.ica_copy_flags = mode;
6570 		args.ica_timeout = nvme_admin_cmd_timeout;
6571 
6572 		if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) {
6573 			break;
6574 		}
6575 
6576 		buf += copylen;
6577 		offset += copylen;
6578 		len -= copylen;
6579 	}
6580 
6581 copyout:
6582 	if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
6583 	    mode & FKIOCTL) != 0) {
6584 		return (EFAULT);
6585 	}
6586 
6587 	return (0);
6588 }
6589 
6590 static int
nvme_ioctl_firmware_commit(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6591 nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode,
6592     cred_t *cred_p)
6593 {
6594 	nvme_t *const nvme = minor->nm_ctrl;
6595 	nvme_ioctl_fw_commit_t fw;
6596 	nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
6597 	nvme_ioc_cmd_args_t args = { NULL };
6598 	nvme_sqe_t sqe = {
6599 	    .sqe_opc	= NVME_OPC_FW_ACTIVATE
6600 	};
6601 
6602 	if ((mode & FWRITE) == 0)
6603 		return (EBADF);
6604 
6605 	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6606 		return (EPERM);
6607 
6608 	if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
6609 	    mode & FKIOCTL) != 0) {
6610 		return (EFAULT);
6611 	}
6612 
6613 	if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) {
6614 		goto copyout;
6615 	}
6616 
6617 	if (!nvme_validate_fw_commit(nvme, &fw)) {
6618 		goto copyout;
6619 	}
6620 
6621 	fc_dw10.b.fc_slot = fw.fwc_slot;
6622 	fc_dw10.b.fc_action = fw.fwc_action;
6623 	sqe.sqe_cdw10 = fc_dw10.r;
6624 
6625 	args.ica_sqe = &sqe;
6626 	args.ica_timeout = nvme_commit_save_cmd_timeout;
6627 
6628 	/*
6629 	 * There are no conditional actions to take based on this succeeding or
6630 	 * failing. A failure is recorded in the ioctl structure returned to the
6631 	 * user.
6632 	 */
6633 	(void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args);
6634 
6635 	/*
6636 	 * Let the DDI UFM subsystem know that the firmware information for
6637 	 * this device has changed. We perform this unconditionally as an
6638 	 * invalidation doesn't particularly hurt us.
6639 	 */
6640 	nvme_ufm_update(nvme);
6641 
6642 copyout:
6643 	if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
6644 	    mode & FKIOCTL) != 0) {
6645 		return (EFAULT);
6646 	}
6647 
6648 	return (0);
6649 }
6650 
6651 /*
6652  * Helper to copy in a passthru command from userspace, handling
6653  * different data models.
6654  */
6655 static int
nvme_passthru_copyin_cmd(const void * buf,nvme_ioctl_passthru_t * cmd,int mode)6656 nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode)
6657 {
6658 	switch (ddi_model_convert_from(mode & FMODELS)) {
6659 #ifdef _MULTI_DATAMODEL
6660 	case DDI_MODEL_ILP32: {
6661 		nvme_ioctl_passthru32_t cmd32;
6662 
6663 		if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0)
6664 			return (EFAULT);
6665 
6666 		bzero(cmd, sizeof (nvme_ioctl_passthru_t));
6667 
6668 		cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid;
6669 		cmd->npc_opcode = cmd32.npc_opcode;
6670 		cmd->npc_timeout = cmd32.npc_timeout;
6671 		cmd->npc_flags = cmd32.npc_flags;
6672 		cmd->npc_impact = cmd32.npc_impact;
6673 		cmd->npc_cdw12 = cmd32.npc_cdw12;
6674 		cmd->npc_cdw13 = cmd32.npc_cdw13;
6675 		cmd->npc_cdw14 = cmd32.npc_cdw14;
6676 		cmd->npc_cdw15 = cmd32.npc_cdw15;
6677 		cmd->npc_buflen = cmd32.npc_buflen;
6678 		cmd->npc_buf = cmd32.npc_buf;
6679 		break;
6680 	}
6681 #endif	/* _MULTI_DATAMODEL */
6682 	case DDI_MODEL_NONE:
6683 		if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t),
6684 		    mode) != 0) {
6685 			return (EFAULT);
6686 		}
6687 		break;
6688 	default:
6689 		return (ENOTSUP);
6690 	}
6691 
6692 	return (0);
6693 }
6694 
6695 /*
6696  * Helper to copy out a passthru command result to userspace, handling
6697  * different data models.
6698  */
6699 static int
nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t * cmd,void * buf,int mode)6700 nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode)
6701 {
6702 	switch (ddi_model_convert_from(mode & FMODELS)) {
6703 #ifdef _MULTI_DATAMODEL
6704 	case DDI_MODEL_ILP32: {
6705 		nvme_ioctl_passthru32_t cmd32;
6706 
6707 		bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t));
6708 
6709 		cmd32.npc_common = cmd->npc_common;
6710 		cmd32.npc_opcode = cmd->npc_opcode;
6711 		cmd32.npc_timeout = cmd->npc_timeout;
6712 		cmd32.npc_flags = cmd->npc_flags;
6713 		cmd32.npc_impact = cmd->npc_impact;
6714 		cmd32.npc_cdw0 = cmd->npc_cdw0;
6715 		cmd32.npc_cdw12 = cmd->npc_cdw12;
6716 		cmd32.npc_cdw13 = cmd->npc_cdw13;
6717 		cmd32.npc_cdw14 = cmd->npc_cdw14;
6718 		cmd32.npc_cdw15 = cmd->npc_cdw15;
6719 		cmd32.npc_buflen = (size32_t)cmd->npc_buflen;
6720 		cmd32.npc_buf = (uintptr32_t)cmd->npc_buf;
6721 		if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0)
6722 			return (EFAULT);
6723 		break;
6724 	}
6725 #endif	/* _MULTI_DATAMODEL */
6726 	case DDI_MODEL_NONE:
6727 		if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t),
6728 		    mode) != 0) {
6729 			return (EFAULT);
6730 		}
6731 		break;
6732 	default:
6733 		return (ENOTSUP);
6734 	}
6735 	return (0);
6736 }
6737 
6738 /*
6739  * Run an arbitrary vendor-specific admin command on the device.
6740  */
6741 static int
nvme_ioctl_passthru(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6742 nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6743 {
6744 	nvme_t *const nvme = minor->nm_ctrl;
6745 	int rv;
6746 	nvme_ioctl_passthru_t pass;
6747 	nvme_sqe_t sqe;
6748 	nvme_ioc_cmd_args_t args = { NULL };
6749 
6750 	/*
6751 	 * Basic checks: permissions, data model, argument size.
6752 	 */
6753 	if ((mode & FWRITE) == 0)
6754 		return (EBADF);
6755 
6756 	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6757 		return (EPERM);
6758 
6759 	if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass,
6760 	    mode)) != 0) {
6761 		return (rv);
6762 	}
6763 
6764 	if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) {
6765 		goto copyout;
6766 	}
6767 
6768 	if (!nvme_validate_vuc(nvme, &pass)) {
6769 		goto copyout;
6770 	}
6771 
6772 	nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6773 	if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
6774 		/*
6775 		 * We've been told this has ns impact. Right now force that to
6776 		 * be every ns until we have more use cases and reason to trust
6777 		 * the nsid field.
6778 		 */
6779 		if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) {
6780 			nvme_mgmt_unlock(nvme);
6781 			(void) nvme_ioctl_error(&pass.npc_common,
6782 			    NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
6783 			goto copyout;
6784 		}
6785 	}
6786 
6787 	bzero(&sqe, sizeof (sqe));
6788 
6789 	sqe.sqe_opc = pass.npc_opcode;
6790 	sqe.sqe_nsid = pass.npc_common.nioc_nsid;
6791 	sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT);
6792 	sqe.sqe_cdw12 = pass.npc_cdw12;
6793 	sqe.sqe_cdw13 = pass.npc_cdw13;
6794 	sqe.sqe_cdw14 = pass.npc_cdw14;
6795 	sqe.sqe_cdw15 = pass.npc_cdw15;
6796 
6797 	args.ica_sqe = &sqe;
6798 	args.ica_data = (void *)pass.npc_buf;
6799 	args.ica_data_len = pass.npc_buflen;
6800 	args.ica_copy_flags = mode;
6801 	args.ica_timeout = pass.npc_timeout;
6802 
6803 	if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0)
6804 		args.ica_dma_flags |= DDI_DMA_READ;
6805 	else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0)
6806 		args.ica_dma_flags |= DDI_DMA_WRITE;
6807 
6808 	if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) {
6809 		pass.npc_cdw0 = args.ica_cdw0;
6810 		if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
6811 			nvme_rescan_ns(nvme, NVME_NSID_BCAST);
6812 		}
6813 	}
6814 	nvme_mgmt_unlock(nvme);
6815 
6816 copyout:
6817 	rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg,
6818 	    mode);
6819 
6820 	return (rv);
6821 }
6822 
6823 static int
nvme_ioctl_lock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6824 nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode,
6825     cred_t *cred_p)
6826 {
6827 	nvme_ioctl_lock_t lock;
6828 	const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK;
6829 	nvme_t *nvme = minor->nm_ctrl;
6830 
6831 	if ((mode & FWRITE) == 0)
6832 		return (EBADF);
6833 
6834 	if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6835 		return (EPERM);
6836 
6837 	if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock),
6838 	    mode & FKIOCTL) != 0) {
6839 		return (EFAULT);
6840 	}
6841 
6842 	if (lock.nil_ent != NVME_LOCK_E_CTRL &&
6843 	    lock.nil_ent != NVME_LOCK_E_NS) {
6844 		(void) nvme_ioctl_error(&lock.nil_common,
6845 		    NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
6846 		goto copyout;
6847 	}
6848 
6849 	if (lock.nil_level != NVME_LOCK_L_READ &&
6850 	    lock.nil_level != NVME_LOCK_L_WRITE) {
6851 		(void) nvme_ioctl_error(&lock.nil_common,
6852 		    NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0);
6853 		goto copyout;
6854 	}
6855 
6856 	if ((lock.nil_flags & ~all_flags) != 0) {
6857 		(void) nvme_ioctl_error(&lock.nil_common,
6858 		    NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0);
6859 		goto copyout;
6860 	}
6861 
6862 	if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) {
6863 		goto copyout;
6864 	}
6865 
6866 	/*
6867 	 * If we're on a namespace, confirm that we're not asking for the
6868 	 * controller.
6869 	 */
6870 	if (lock.nil_common.nioc_nsid != 0 &&
6871 	    lock.nil_ent == NVME_LOCK_E_CTRL) {
6872 		(void) nvme_ioctl_error(&lock.nil_common,
6873 		    NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0);
6874 		goto copyout;
6875 	}
6876 
6877 	/*
6878 	 * We've reached the point where we can no longer actually check things
6879 	 * without serializing state. First, we need to check to make sure that
6880 	 * none of our invariants are being broken for locking:
6881 	 *
6882 	 * 1) The caller isn't already blocking for a lock operation to
6883 	 * complete.
6884 	 *
6885 	 * 2) The caller is attempting to grab a lock that they already have.
6886 	 * While there are other rule violations that this might create, we opt
6887 	 * to check this ahead of it so we can have slightly better error
6888 	 * messages for our callers.
6889 	 *
6890 	 * 3) The caller is trying to grab a controller lock, while holding a
6891 	 * namespace lock.
6892 	 *
6893 	 * 4) The caller has a controller write lock and is trying to get a
6894 	 * namespace lock. For now, we disallow this case. Holding a controller
6895 	 * read lock is allowed, but the write lock allows you to operate on all
6896 	 * namespaces anyways. In addition, this simplifies the locking logic;
6897 	 * however, this constraint may be loosened in the future.
6898 	 *
6899 	 * 5) The caller is trying to acquire a second namespace lock when they
6900 	 * already have one.
6901 	 */
6902 	mutex_enter(&nvme->n_minor_mutex);
6903 	if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED ||
6904 	    minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) {
6905 		(void) nvme_ioctl_error(&lock.nil_common,
6906 		    NVME_IOCTL_E_LOCK_PENDING, 0, 0);
6907 		mutex_exit(&nvme->n_minor_mutex);
6908 		goto copyout;
6909 	}
6910 
6911 	if ((lock.nil_ent == NVME_LOCK_E_CTRL &&
6912 	    minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) ||
6913 	    (lock.nil_ent == NVME_LOCK_E_NS &&
6914 	    minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
6915 	    minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) {
6916 		(void) nvme_ioctl_error(&lock.nil_common,
6917 		    NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0);
6918 		mutex_exit(&nvme->n_minor_mutex);
6919 		goto copyout;
6920 	}
6921 
6922 	if (lock.nil_ent == NVME_LOCK_E_CTRL &&
6923 	    minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
6924 		(void) nvme_ioctl_error(&lock.nil_common,
6925 		    NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0);
6926 		mutex_exit(&nvme->n_minor_mutex);
6927 		goto copyout;
6928 	}
6929 
6930 	if (lock.nil_ent == NVME_LOCK_E_NS &&
6931 	    (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
6932 	    minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) {
6933 		(void) nvme_ioctl_error(&lock.nil_common,
6934 		    NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0);
6935 		mutex_exit(&nvme->n_minor_mutex);
6936 		goto copyout;
6937 	}
6938 
6939 	if (lock.nil_ent == NVME_LOCK_E_NS &&
6940 	    minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
6941 		(void) nvme_ioctl_error(&lock.nil_common,
6942 		    NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0);
6943 		mutex_exit(&nvme->n_minor_mutex);
6944 		goto copyout;
6945 	}
6946 
6947 
6948 #ifdef	DEBUG
6949 	/*
6950 	 * This is a big block of sanity checks to make sure that we haven't
6951 	 * allowed anything bad to happen.
6952 	 */
6953 	if (lock.nil_ent == NVME_LOCK_E_NS) {
6954 		ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
6955 		ASSERT3U(minor->nm_ns_lock.nli_state, ==,
6956 		    NVME_LOCK_STATE_UNLOCKED);
6957 		ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
6958 		ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
6959 
6960 		if (minor->nm_ns != NULL) {
6961 			ASSERT3U(minor->nm_ns->ns_id, ==,
6962 			    lock.nil_common.nioc_nsid);
6963 		}
6964 
6965 		ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
6966 	} else {
6967 		ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL);
6968 		ASSERT3U(minor->nm_ctrl_lock.nli_state, ==,
6969 		    NVME_LOCK_STATE_UNLOCKED);
6970 		ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0);
6971 		ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
6972 		ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node));
6973 
6974 		ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
6975 		ASSERT3U(minor->nm_ns_lock.nli_state, ==,
6976 		    NVME_LOCK_STATE_UNLOCKED);
6977 		ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
6978 		ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
6979 		ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
6980 	}
6981 #endif	/* DEBUG */
6982 
6983 	/*
6984 	 * At this point we should actually attempt a locking operation.
6985 	 */
6986 	nvme_rwlock(minor, &lock);
6987 	mutex_exit(&nvme->n_minor_mutex);
6988 
6989 copyout:
6990 	if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock),
6991 	    mode & FKIOCTL) != 0) {
6992 		return (EFAULT);
6993 	}
6994 
6995 	return (0);
6996 }
6997 
6998 static int
nvme_ioctl_unlock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6999 nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode,
7000     cred_t *cred_p)
7001 {
7002 	nvme_ioctl_unlock_t unlock;
7003 	nvme_t *const nvme = minor->nm_ctrl;
7004 	boolean_t is_ctrl;
7005 	nvme_lock_t *lock;
7006 	nvme_minor_lock_info_t *info;
7007 
7008 	/*
7009 	 * Note, we explicitly don't check for privileges for unlock. The idea
7010 	 * being that if you have the lock, that's what matters. If you don't
7011 	 * have the lock, it doesn't matter what privileges that you have at
7012 	 * all.
7013 	 */
7014 	if ((mode & FWRITE) == 0)
7015 		return (EBADF);
7016 
7017 	if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock),
7018 	    mode & FKIOCTL) != 0) {
7019 		return (EFAULT);
7020 	}
7021 
7022 	if (unlock.niu_ent != NVME_LOCK_E_CTRL &&
7023 	    unlock.niu_ent != NVME_LOCK_E_NS) {
7024 		(void) nvme_ioctl_error(&unlock.niu_common,
7025 		    NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7026 		goto copyout;
7027 	}
7028 
7029 	if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) {
7030 		goto copyout;
7031 	}
7032 
7033 	/*
7034 	 * If we're on a namespace, confirm that we're not asking for the
7035 	 * controller.
7036 	 */
7037 	if (unlock.niu_common.nioc_nsid != 0 &&
7038 	    unlock.niu_ent == NVME_LOCK_E_CTRL) {
7039 		(void) nvme_ioctl_error(&unlock.niu_common,
7040 		    NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0);
7041 		goto copyout;
7042 	}
7043 
7044 	mutex_enter(&nvme->n_minor_mutex);
7045 	if (unlock.niu_ent == NVME_LOCK_E_CTRL) {
7046 		if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7047 			mutex_exit(&nvme->n_minor_mutex);
7048 			(void) nvme_ioctl_error(&unlock.niu_common,
7049 			    NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7050 			goto copyout;
7051 		}
7052 	} else {
7053 		if (minor->nm_ns_lock.nli_ns == NULL) {
7054 			mutex_exit(&nvme->n_minor_mutex);
7055 			(void) nvme_ioctl_error(&unlock.niu_common,
7056 			    NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7057 			goto copyout;
7058 		}
7059 
7060 		/*
7061 		 * Check that our unlock request corresponds to the namespace ID
7062 		 * that is currently locked. This could happen if we're using
7063 		 * the controller node and it specified a valid, but not locked,
7064 		 * namespace ID.
7065 		 */
7066 		if (minor->nm_ns_lock.nli_ns->ns_id !=
7067 		    unlock.niu_common.nioc_nsid) {
7068 			mutex_exit(&nvme->n_minor_mutex);
7069 			ASSERT3P(minor->nm_ns, ==, NULL);
7070 			(void) nvme_ioctl_error(&unlock.niu_common,
7071 			    NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0);
7072 			goto copyout;
7073 		}
7074 
7075 		if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7076 			mutex_exit(&nvme->n_minor_mutex);
7077 			(void) nvme_ioctl_error(&unlock.niu_common,
7078 			    NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7079 			goto copyout;
7080 		}
7081 	}
7082 
7083 	/*
7084 	 * Finally, perform the unlock.
7085 	 */
7086 	is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL;
7087 	if (is_ctrl) {
7088 		lock = &nvme->n_lock;
7089 		info = &minor->nm_ctrl_lock;
7090 	} else {
7091 		nvme_namespace_t *ns;
7092 		const uint32_t nsid = unlock.niu_common.nioc_nsid;
7093 
7094 		ns = nvme_nsid2ns(nvme, nsid);
7095 		lock = &ns->ns_lock;
7096 		info = &minor->nm_ns_lock;
7097 		VERIFY3P(ns, ==, info->nli_ns);
7098 	}
7099 	nvme_rwunlock(info, lock);
7100 	mutex_exit(&nvme->n_minor_mutex);
7101 	nvme_ioctl_success(&unlock.niu_common);
7102 
7103 copyout:
7104 	if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock),
7105 	    mode & FKIOCTL) != 0) {
7106 		return (EFAULT);
7107 	}
7108 
7109 	return (0);
7110 }
7111 
7112 static int
nvme_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)7113 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
7114     int *rval_p)
7115 {
7116 #ifndef __lock_lint
7117 	_NOTE(ARGUNUSED(rval_p));
7118 #endif
7119 	nvme_minor_t *minor;
7120 	nvme_t *nvme;
7121 
7122 	minor = nvme_minor_find_by_dev(dev);
7123 	if (minor == NULL) {
7124 		return (ENXIO);
7125 	}
7126 
7127 	nvme = minor->nm_ctrl;
7128 	if (nvme == NULL)
7129 		return (ENXIO);
7130 
7131 	if (IS_DEVCTL(cmd))
7132 		return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
7133 
7134 	if (nvme->n_dead && (cmd != NVME_IOC_DETACH && cmd !=
7135 	    NVME_IOC_UNLOCK)) {
7136 		if (IS_NVME_IOC(cmd) == 0) {
7137 			return (EIO);
7138 		}
7139 
7140 		return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg,
7141 		    mode));
7142 	}
7143 
7144 	/*
7145 	 * ioctls that are no longer using the original ioctl structure.
7146 	 */
7147 	switch (cmd) {
7148 	case NVME_IOC_CTRL_INFO:
7149 		return (nvme_ioctl_ctrl_info(minor, arg, mode, cred_p));
7150 	case NVME_IOC_IDENTIFY:
7151 		return (nvme_ioctl_identify(minor, arg, mode, cred_p));
7152 	case NVME_IOC_GET_LOGPAGE:
7153 		return (nvme_ioctl_get_logpage(minor, arg, mode, cred_p));
7154 	case NVME_IOC_GET_FEATURE:
7155 		return (nvme_ioctl_get_feature(minor, arg, mode, cred_p));
7156 	case NVME_IOC_DETACH:
7157 		return (nvme_ioctl_detach(minor, arg, mode, cred_p));
7158 	case NVME_IOC_ATTACH:
7159 		return (nvme_ioctl_attach(minor, arg, mode, cred_p));
7160 	case NVME_IOC_FORMAT:
7161 		return (nvme_ioctl_format(minor, arg, mode, cred_p));
7162 	case NVME_IOC_FIRMWARE_DOWNLOAD:
7163 		return (nvme_ioctl_firmware_download(minor, arg, mode,
7164 		    cred_p));
7165 	case NVME_IOC_FIRMWARE_COMMIT:
7166 		return (nvme_ioctl_firmware_commit(minor, arg, mode,
7167 		    cred_p));
7168 	case NVME_IOC_NS_INFO:
7169 		return (nvme_ioctl_ns_info(minor, arg, mode, cred_p));
7170 	case NVME_IOC_PASSTHRU:
7171 		return (nvme_ioctl_passthru(minor, arg, mode, cred_p));
7172 	case NVME_IOC_LOCK:
7173 		return (nvme_ioctl_lock(minor, arg, mode, cred_p));
7174 	case NVME_IOC_UNLOCK:
7175 		return (nvme_ioctl_unlock(minor, arg, mode, cred_p));
7176 	default:
7177 		return (ENOTTY);
7178 	}
7179 }
7180 
7181 /*
7182  * DDI UFM Callbacks
7183  */
7184 static int
nvme_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * img)7185 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
7186     ddi_ufm_image_t *img)
7187 {
7188 	nvme_t *nvme = arg;
7189 
7190 	if (imgno != 0)
7191 		return (EINVAL);
7192 
7193 	ddi_ufm_image_set_desc(img, "Firmware");
7194 	ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
7195 
7196 	return (0);
7197 }
7198 
7199 /*
7200  * Fill out firmware slot information for the requested slot.  The firmware
7201  * slot information is gathered by requesting the Firmware Slot Information log
7202  * page.  The format of the page is described in section 5.10.1.3.
7203  *
7204  * We lazily cache the log page on the first call and then invalidate the cache
7205  * data after a successful firmware download or firmware commit command.
7206  * The cached data is protected by a mutex as the state can change
7207  * asynchronous to this callback.
7208  */
7209 static int
nvme_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slot)7210 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
7211     uint_t slotno, ddi_ufm_slot_t *slot)
7212 {
7213 	nvme_t *nvme = arg;
7214 	void *log = NULL;
7215 	size_t bufsize;
7216 	ddi_ufm_attr_t attr = 0;
7217 	char fw_ver[NVME_FWVER_SZ + 1];
7218 
7219 	if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
7220 		return (EINVAL);
7221 
7222 	mutex_enter(&nvme->n_fwslot_mutex);
7223 	if (nvme->n_fwslot == NULL) {
7224 		if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize,
7225 		    NVME_LOGPAGE_FWSLOT) ||
7226 		    bufsize != sizeof (nvme_fwslot_log_t)) {
7227 			if (log != NULL)
7228 				kmem_free(log, bufsize);
7229 			mutex_exit(&nvme->n_fwslot_mutex);
7230 			return (EIO);
7231 		}
7232 		nvme->n_fwslot = (nvme_fwslot_log_t *)log;
7233 	}
7234 
7235 	/*
7236 	 * NVMe numbers firmware slots starting at 1
7237 	 */
7238 	if (slotno == (nvme->n_fwslot->fw_afi - 1))
7239 		attr |= DDI_UFM_ATTR_ACTIVE;
7240 
7241 	if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
7242 		attr |= DDI_UFM_ATTR_WRITEABLE;
7243 
7244 	if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
7245 		attr |= DDI_UFM_ATTR_EMPTY;
7246 	} else {
7247 		(void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
7248 		    NVME_FWVER_SZ);
7249 		fw_ver[NVME_FWVER_SZ] = '\0';
7250 		ddi_ufm_slot_set_version(slot, fw_ver);
7251 	}
7252 	mutex_exit(&nvme->n_fwslot_mutex);
7253 
7254 	ddi_ufm_slot_set_attrs(slot, attr);
7255 
7256 	return (0);
7257 }
7258 
7259 static int
nvme_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)7260 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
7261 {
7262 	*caps = DDI_UFM_CAP_REPORT;
7263 	return (0);
7264 }
7265 
7266 boolean_t
nvme_ctrl_atleast(nvme_t * nvme,const nvme_version_t * min)7267 nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min)
7268 {
7269 	return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE);
7270 }
7271