1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved.
14 * Copyright 2019 Unix Software Ltd.
15 * Copyright 2020 Joyent, Inc.
16 * Copyright 2020 Racktop Systems.
17 * Copyright 2024 Oxide Computer Company.
18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
20 */
21
22 /*
23 * blkdev driver for NVMe compliant storage devices
24 *
25 * This driver targets and is designed to support all NVMe 1.x and NVMe 2.x
26 * devices. Features are added to the driver as we encounter devices that
27 * require them and our needs, so some commands or log pages may not take
28 * advantage of newer features that devices support at this time. When you
29 * encounter such a case, it is generally fine to add that support to the driver
30 * as long as you take care to ensure that the requisite device version is met
31 * before using it.
32 *
33 * The driver has only been tested on x86 systems and will not work on big-
34 * endian systems without changes to the code accessing registers and data
35 * structures used by the hardware.
36 *
37 *
38 * Interrupt Usage:
39 *
40 * The driver will use a single interrupt while configuring the device as the
41 * specification requires, but contrary to the specification it will try to use
42 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
43 * will switch to multiple-message MSI(-X) if supported. The driver wants to
44 * have one interrupt vector per CPU, but it will work correctly if less are
45 * available. Interrupts can be shared by queues, the interrupt handler will
46 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
47 * the admin queue will share an interrupt with one I/O queue. The interrupt
48 * handler will retrieve completed commands from all queues sharing an interrupt
49 * vector and will post them to a taskq for completion processing.
50 *
51 *
52 * Command Processing:
53 *
54 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
55 * to 65536 I/O commands. The driver will configure one I/O queue pair per
56 * available interrupt vector, with the queue length usually much smaller than
57 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
58 * interrupt vectors will be used.
59 *
60 * Additionally the hardware provides a single special admin queue pair that can
61 * hold up to 4096 admin commands.
62 *
63 * From the hardware perspective both queues of a queue pair are independent,
64 * but they share some driver state: the command array (holding pointers to
65 * commands currently being processed by the hardware) and the active command
66 * counter. Access to a submission queue and the shared state is protected by
67 * nq_mutex; completion queue is protected by ncq_mutex.
68 *
69 * When a command is submitted to a queue pair the active command counter is
70 * incremented and a pointer to the command is stored in the command array. The
71 * array index is used as command identifier (CID) in the submission queue
72 * entry. Some commands may take a very long time to complete, and if the queue
73 * wraps around in that time a submission may find the next array slot to still
74 * be used by a long-running command. In this case the array is sequentially
75 * searched for the next free slot. The length of the command array is the same
76 * as the configured queue length. Queue overrun is prevented by the semaphore,
77 * so a command submission may block if the queue is full.
78 *
79 *
80 * Polled I/O Support:
81 *
82 * For kernel core dump support the driver can do polled I/O. As interrupts are
83 * turned off while dumping the driver will just submit a command in the regular
84 * way, and then repeatedly attempt a command retrieval until it gets the
85 * command back.
86 *
87 *
88 * Namespace Support:
89 *
90 * NVMe devices can have multiple namespaces, each being a independent data
91 * store. The driver supports multiple namespaces and creates a blkdev interface
92 * for each namespace found. Namespaces can have various attributes to support
93 * protection information. This driver does not support any of this and ignores
94 * namespaces that have these attributes.
95 *
96 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
97 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally
98 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64
99 * if present to generate the devid, and passes the EUI64 to blkdev to use it
100 * in the device node names.
101 *
102 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
103 * single controller. This is an artificial limit imposed by the driver to be
104 * able to address a reasonable number of controllers and namespaces while
105 * fitting within the constraints of MAXMIN32, aka a 32-bit device number which
106 * only has 18-bits for the minor number. See the minor node section for more
107 * information.
108 *
109 *
110 * Minor nodes:
111 *
112 * For each NVMe device the driver exposes one minor node for the controller and
113 * one minor node for each namespace. The only operations supported by those
114 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
115 * primary control interface for the devices. The character device is a private
116 * interface and we attempt stability through libnvme and more so nvmeadm.
117 *
118 * The controller minor node is much more flexible than the namespace minor node
119 * and should be preferred. The controller node allows one to target any
120 * namespace that the device has, while the namespace is limited in what it can
121 * acquire. While the namespace minor exists, it should not be relied upon and
122 * is not by libnvme.
123 *
124 * The minor number space is split in two. We use the lower part to support the
125 * controller and namespaces as described above in the 'Namespace Support'
126 * section. The second set is used for cloning opens. We set aside one million
127 * minors for this purpose. We utilize a cloning open so that way we can have
128 * per-file_t state. This is how we end up implementing and tracking locking
129 * state and related.
130 *
131 * When we have this cloned open, then we allocate a new nvme_minor_t which gets
132 * its minor number from the nvme_open_minors id_space_t and is stored in the
133 * nvme_open_minors_avl. While someone calls open on a controller or namespace
134 * minor, everything else occurs in the context of one of these ephemeral
135 * minors.
136 *
137 *
138 * ioctls, Errors, and Exclusive Access:
139 *
140 * All of the logical commands that one can issue are driven through the
141 * ioctl(9E) interface. All of our ioctls have a similar shape where they
142 * all include the 'nvme_ioctl_common_t' as their first member.
143 *
144 * This common ioctl structure is used to communicate the namespace that should
145 * be targeted. When the namespace is left as 0, then that indicates that it
146 * should target whatever the default is of the minor node. For a namespace
147 * minor, that will be transparently rewritten to the namespace's namespace id.
148 *
149 * In addition, the nvme_ioctl_common_t structure also has a standard error
150 * return. Our goal in our ioctl path is to ensure that we have useful semantic
151 * errors as much as possible. EINVAL, EIO, etc. are all overloaded. Instead as
152 * long as we can copy in our structure, then we will set a semantic error. If
153 * we have an error from the controller, then that will be included there.
154 *
155 * Each command has a specific policy that controls whether or not it is allowed
156 * on the namespace or controller minor, whether the broadcast namespace is
157 * allowed, various settings around what kind of exclusive access is allowed,
158 * and more. Each of these is wrapped up in a bit of policy described by the
159 * 'nvme_ioctl_check_t' structure.
160 *
161 * The device provides a form of exclusion in the form of both a
162 * controller-level and namespace-level read and write lock. Most operations do
163 * not require a lock (e.g. get log page, identify, etc.), but a few do (e.g.
164 * format nvm, firmware related activity, etc.). A read lock guarantees that you
165 * can complete your operation without interference, but read locks are not
166 * required. If you don't take a read lock and someone comes in with a write
167 * lock, then subsequent operations will fail with a semantic error indicating
168 * that you were blocked due to this.
169 *
170 * Here are some of the rules that govern our locks:
171 *
172 * 1. Writers starve readers. Any readers are allowed to finish when there is a
173 * pending writer; however, all subsequent readers will be blocked upon that
174 * writer.
175 * 2. A controller write lock takes priority over all other locks. Put
176 * differently a controller writer not only starves subsequent controller
177 * readers, but also all namespace read and write locks.
178 * 3. Each namespace lock is independent.
179 * 4. At most a single namespace lock may be owned.
180 * 5. If you own a namespace lock, you may not take a controller lock (to help
181 * with lock ordering).
182 * 6. In a similar spirit, if you own a controller write lock, you may not take
183 * any namespace lock. Someone with the controller write lock can perform any
184 * operations that they need to. However, if you have a controller read lock
185 * you may take any namespace lock.
186 * 7. There is no ability to upgrade a read lock to a write lock.
187 * 8. There is no recursive locking.
188 *
189 * While there's a lot there to keep track of, the goals of these are to
190 * constrain things so as to avoid deadlock. This is more complex than the
191 * original implementation in the driver which only allowed for an exclusive
192 * open that was tied to the thread. The first issue with tying this to the
193 * thread was that that didn't work well for software that utilized thread
194 * pools, like complex daemons. The second issue is that we want the ability for
195 * daemons, such as a FRU monitor, to be able to retain a file descriptor to the
196 * device without blocking others from taking action except during critical
197 * periods.
198 *
199 * In particular to enable something like libnvme, we didn't want someone to
200 * have to open and close the file descriptor to change what kind of exclusive
201 * access they desired.
202 *
203 * There are two different sets of data structures that we employ for tracking
204 * locking information:
205 *
206 * 1) The nvme_lock_t structure is contained in both the nvme_t and the
207 * nvme_namespace_t and tracks the current writer, readers, and pending writers
208 * and readers. Each of these lists or the writer pointer all refer to our
209 * second data structure.
210 *
211 * When a lock is owned by a single writer, then the nl_writer field is set to a
212 * specific minor's lock data structure. If instead readers are present, then
213 * the nl_readers list_t is not empty. An invariant of the system is that if
214 * nl_writer is non-NULL, nl_readers must be empty and conversely, if nl_readers
215 * is not empty, nl_writer must be NULL.
216 *
217 * 2) The nvme_minor_lock_info_t exists in the nvme_minor_t. There is one
218 * information structure which represents the minor's controller lock and a
219 * second one that represents the minor's namespace lock. The members of this
220 * are broken into tracking what the current lock is and what it targets. It
221 * also several members that are intended for debugging (nli_last_change,
222 * nli_acq_kthread, etc.).
223 *
224 * While the minor has two different lock information structures, our rules
225 * ensure that only one of the two can be pending and that they shouldn't result
226 * in a deadlock. When a lock is pending, the caller is sleeping on the minor's
227 * nm_cv member.
228 *
229 * These relationships are represented in the following image which shows a
230 * controller write lock being held with a pending readers on the controller
231 * lock and pending writers on one of the controller's namespaces.
232 *
233 * +---------+
234 * | nvme_t |
235 * | |
236 * | n_lock -|-------+
237 * | n_ns -+ | | +-----------------------------+
238 * +-------|-+ +-----------------+ | nvme_minor_t |
239 * | | nvme_lock_t | | |
240 * | | | | +------------------------+ |
241 * | | writer --|-------------->| nvme_minor_lock_info_t | |
242 * | | reader list | | | nm_ctrl_lock | |
243 * | | pending writers | | +------------------------+ |
244 * | | pending readers |------+ | +------------------------+ |
245 * | +-----------------+ | | | nvme_minor_lock_info_t | |
246 * | | | | nm_ns_lock | |
247 * | | | +------------------------+ |
248 * | | +-----------------------------+
249 * +------------------+ | +-----------------+
250 * | nvme_namespace_t | | | nvme_minor_t |
251 * | | | | |
252 * | ns_lock ---+ | | | +-------------+ |
253 * +------------|-----+ +-----------------|>|nm_ctrl_lock | |
254 * | | +-------------+ |
255 * v +-----------------+
256 * +------------------+ ...
257 * | nvme_lock_t | +-----------------+
258 * | | | nvme_minor_t |
259 * | writer | | |
260 * | reader list | | +-------------+ |
261 * | pending writers -|-----------------+ | |nm_ctrl_lock | |
262 * | pending readers | | | +-------------+ |
263 * +------------------+ | +-----------------+
264 * +-----------------------------+ | +-----------------------------+
265 * | nvme_minor_t | | | nvme_minor_t |
266 * | | | | |
267 * | +------------------------+ | | | +------------------------+ |
268 * | | nvme_minor_lock_info_t | | | | | nvme_minor_lock_info_t | |
269 * | | nm_ctrl_lock | | | | | nm_ctrl_lock | |
270 * | +------------------------+ | | | +------------------------+ |
271 * | +------------------------+ | v | +------------------------+ |
272 * | | nvme_minor_lock_info_t |-|-----|->| nvme_minor_lock_info_t | |
273 * | | nm_ns_lock | | | | nm_ns_lock | |
274 * | +------------------------+ | | +------------------------+ |
275 * +-----------------------------+ +-----------------------------+
276 *
277 * Blkdev Interface:
278 *
279 * This driver uses blkdev to do all the heavy lifting involved with presenting
280 * a disk device to the system. As a result, the processing of I/O requests is
281 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
282 * setup, and splitting of transfers into manageable chunks.
283 *
284 * I/O requests coming in from blkdev are turned into NVM commands and posted to
285 * an I/O queue. The queue is selected by taking the CPU id modulo the number of
286 * queues. There is currently no timeout handling of I/O commands.
287 *
288 * Blkdev also supports querying device/media information and generating a
289 * devid. The driver reports the best block size as determined by the namespace
290 * format back to blkdev as physical block size to support partition and block
291 * alignment. The devid is either based on the namespace GUID or EUI64, if
292 * present, or composed using the device vendor ID, model number, serial number,
293 * and the namespace ID.
294 *
295 *
296 * Error Handling:
297 *
298 * Error handling is currently limited to detecting fatal hardware errors,
299 * either by asynchronous events, or synchronously through command status or
300 * admin command timeouts. In case of severe errors the device is fenced off,
301 * all further requests will return EIO. FMA is then called to fault the device.
302 *
303 * The hardware has a limit for outstanding asynchronous event requests. Before
304 * this limit is known the driver assumes it is at least 1 and posts a single
305 * asynchronous request. Later when the limit is known more asynchronous event
306 * requests are posted to allow quicker reception of error information. When an
307 * asynchronous event is posted by the hardware the driver will parse the error
308 * status fields and log information or fault the device, depending on the
309 * severity of the asynchronous event. The asynchronous event request is then
310 * reused and posted to the admin queue again.
311 *
312 * On command completion the command status is checked for errors. In case of
313 * errors indicating a driver bug the driver panics. Almost all other error
314 * status values just cause EIO to be returned.
315 *
316 * Command timeouts are currently detected for all admin commands except
317 * asynchronous event requests. If a command times out and the hardware appears
318 * to be healthy the driver attempts to abort the command. The abort command
319 * timeout is a separate tunable but the original command timeout will be used
320 * if it is greater. If the abort times out too the driver assumes the device
321 * to be dead, fences it off, and calls FMA to retire it. In all other cases
322 * the aborted command should return immediately with a status indicating it
323 * was aborted, and the driver will wait indefinitely for that to happen. No
324 * timeout handling of normal I/O commands is presently done.
325 *
326 * Any command that times out due to the controller dropping dead will be put on
327 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
328 * memory being reused by the system and later being written to by a "dead"
329 * NVMe controller.
330 *
331 *
332 * Locking:
333 *
334 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held
335 * when accessing shared state and submission queue registers, ncq_mutex
336 * is held when accessing completion queue state and registers.
337 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while
338 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both
339 * mutexes themselves.
340 *
341 * Each command also has its own nc_mutex, which is associated with the
342 * condition variable nc_cv. It is only used on admin commands which are run
343 * synchronously. In that case it must be held across calls to
344 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
345 * nvme_admin_cmd(). It must also be held whenever the completion state of the
346 * command is changed or while an admin command timeout is handled.
347 *
348 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
349 * More than one nc_mutex may only be held when aborting commands. In this case,
350 * the nc_mutex of the command to be aborted must be held across the call to
351 * nvme_abort_cmd() to prevent the command from completing while the abort is in
352 * progress.
353 *
354 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be
355 * acquired first. More than one nq_mutex is never held by a single thread.
356 * The ncq_mutex is only held by nvme_retrieve_cmd() and
357 * nvme_process_iocq(). nvme_process_iocq() is only called from the
358 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the
359 * mutex is non-contentious but is required for implementation completeness
360 * and safety.
361 *
362 * Each nvme_t has an n_admin_stat_mutex that protects the admin command
363 * statistics structure. If this is taken in conjunction with any other locks,
364 * then it must be taken last.
365 *
366 * There is one mutex n_minor_mutex which protects all open flags nm_open and
367 * exclusive-open thread pointers nm_oexcl of each minor node associated with a
368 * controller and its namespaces.
369 *
370 * In addition, there is a logical namespace management mutex which protects the
371 * data about namespaces. When interrogating the metadata of any namespace, this
372 * lock must be held. This gets tricky as we need to call into blkdev, which may
373 * issue callbacks into us which want this and it is illegal to hold locks
374 * across those blkdev calls as otherwise they might lead to deadlock (blkdev
375 * leverages ndi_devi_enter()).
376 *
377 * The lock exposes two levels, one that we call 'NVME' and one 'BDRO' or blkdev
378 * read-only. The idea is that most callers will use the NVME level which says
379 * this is a full traditional mutex operation. The BDRO level is used by blkdev
380 * callback functions and is a promise to only only read the data. When a blkdev
381 * operation starts, the lock holder will use nvme_mgmt_bd_start(). This
382 * strictly speaking drops the mutex, but records that the lock is logically
383 * held by the thread that did the start() operation.
384 *
385 * During this time, other threads (or even the same one) may end up calling
386 * into nvme_mgmt_lock(). Only one person may still hold the lock at any time;
387 * however, the BRDO level will be allowed to proceed during this time. This
388 * allows us to make consistent progress and honor the blkdev lock ordering
389 * requirements, albeit it is not as straightforward as a simple mutex.
390 *
391 * Quiesce / Fast Reboot:
392 *
393 * The driver currently does not support fast reboot. A quiesce(9E) entry point
394 * is still provided which is used to send a shutdown notification to the
395 * device.
396 *
397 *
398 * NVMe Hotplug:
399 *
400 * The driver supports hot removal. The driver uses the NDI event framework
401 * to register a callback, nvme_remove_callback, to clean up when a disk is
402 * removed. In particular, the driver will unqueue outstanding I/O commands and
403 * set n_dead on the softstate to true so that other operations, such as ioctls
404 * and command submissions, fail as well.
405 *
406 * While the callback registration relies on the NDI event framework, the
407 * removal event itself is kicked off in the PCIe hotplug framework, when the
408 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a
409 * device was removed from the slot.
410 *
411 * The NVMe driver instance itself will remain until the final close of the
412 * device.
413 *
414 *
415 * DDI UFM Support
416 *
417 * The driver supports the DDI UFM framework for reporting information about
418 * the device's firmware image and slot configuration. This data can be
419 * queried by userland software via ioctls to the ufm driver. For more
420 * information, see ddi_ufm(9E).
421 *
422 *
423 * Driver Configuration:
424 *
425 * The following driver properties can be changed to control some aspects of the
426 * drivers operation:
427 * - strict-version: can be set to 0 to allow devices conforming to newer
428 * major versions to be used
429 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
430 * specific command status as a fatal error leading device faulting
431 * - admin-queue-len: the maximum length of the admin queue (16-4096)
432 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536)
433 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536)
434 * - async-event-limit: the maximum number of asynchronous event requests to be
435 * posted by the driver
436 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
437 * cache
438 * - min-phys-block-size: the minimum physical block size to report to blkdev,
439 * which is among other things the basis for ZFS vdev ashift
440 * - max-submission-queues: the maximum number of I/O submission queues.
441 * - max-completion-queues: the maximum number of I/O completion queues,
442 * can be less than max-submission-queues, in which case the completion
443 * queues are shared.
444 *
445 * In addition to the above properties, some device-specific tunables can be
446 * configured using the nvme-config-list global property. The value of this
447 * property is a list of triplets. The formal syntax is:
448 *
449 * nvme-config-list ::= <triplet> [, <triplet>]* ;
450 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>"
451 * <rev-list> ::= [ <fwrev> [, <fwrev>]*]
452 * <tuple-list> ::= <tunable> [, <tunable>]*
453 * <tunable> ::= <name> : <value>
454 *
455 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and
456 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list>
457 * contains one or more tunables to apply to all controllers that match the
458 * specified model number and optionally firmware revision. Each <tunable> is a
459 * <name> : <value> pair. Supported tunables are:
460 *
461 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor
462 * specific command status as a fatal error leading device faulting
463 *
464 * - min-phys-block-size: the minimum physical block size to report to blkdev,
465 * which is among other things the basis for ZFS vdev ashift
466 *
467 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the
468 * volatile write cache, if present
469 *
470 *
471 * TODO:
472 * - figure out sane default for I/O queue depth reported to blkdev
473 * - FMA handling of media errors
474 * - support for devices supporting very large I/O requests using chained PRPs
475 * - support for configuring hardware parameters like interrupt coalescing
476 * - support for media formatting and hard partitioning into namespaces
477 * - support for big-endian systems
478 * - support for fast reboot
479 * - support for NVMe Subsystem Reset (1.1)
480 * - support for Scatter/Gather lists (1.1)
481 * - support for Reservations (1.1)
482 * - support for power management
483 */
484
485 #include <sys/byteorder.h>
486 #ifdef _BIG_ENDIAN
487 #error nvme driver needs porting for big-endian platforms
488 #endif
489
490 #include <sys/modctl.h>
491 #include <sys/conf.h>
492 #include <sys/devops.h>
493 #include <sys/ddi.h>
494 #include <sys/ddi_ufm.h>
495 #include <sys/sunddi.h>
496 #include <sys/sunndi.h>
497 #include <sys/bitmap.h>
498 #include <sys/sysmacros.h>
499 #include <sys/param.h>
500 #include <sys/varargs.h>
501 #include <sys/cpuvar.h>
502 #include <sys/disp.h>
503 #include <sys/blkdev.h>
504 #include <sys/atomic.h>
505 #include <sys/archsystm.h>
506 #include <sys/sata/sata_hba.h>
507 #include <sys/stat.h>
508 #include <sys/policy.h>
509 #include <sys/list.h>
510 #include <sys/dkio.h>
511 #include <sys/pci.h>
512 #include <sys/mkdev.h>
513
514 #include <sys/nvme.h>
515
516 #ifdef __x86
517 #include <sys/x86_archext.h>
518 #endif
519
520 #include "nvme_reg.h"
521 #include "nvme_var.h"
522
523 /*
524 * Assertions to make sure that we've properly captured various aspects of the
525 * packed structures and haven't broken them during updates.
526 */
527 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE);
528 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
529 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
530 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
531 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
532 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
533 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
534 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
535
536 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE);
537 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
538 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
539 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
540 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
541 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
542
543 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE);
544 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE);
545
546 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE);
547 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
548 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
549
550 CTASSERT(sizeof (nvme_nschange_list_t) == 4096);
551
552 /* NVMe spec version supported */
553 static const int nvme_version_major = 2;
554
555 /* Tunable for FORMAT NVM command timeout in seconds, default is 600s */
556 uint32_t nvme_format_cmd_timeout = 600;
557
558 /* Tunable for firmware commit with NVME_FWC_SAVE, default is 15s */
559 uint32_t nvme_commit_save_cmd_timeout = 15;
560
561 /*
562 * Tunable for the admin command timeout used for commands other than those
563 * with their own timeouts defined above; in seconds. While most commands are
564 * expected to complete very quickly (sub-second), experience has shown that
565 * some controllers can occasionally be a bit slower, and not always consistent
566 * in the time taken - times of up to around 4.2s have been observed. Setting
567 * this to 15s by default provides headroom.
568 */
569 uint32_t nvme_admin_cmd_timeout = 15;
570
571 /*
572 * Tunable for abort command timeout in seconds, default is 60s. This timeout
573 * is used when issuing an abort command, currently only in response to a
574 * different admin command timing out. Aborts always complete after the command
575 * that they are attempting to abort so we need to allow enough time for the
576 * controller to process the long running command that we are attempting to
577 * abort. The abort timeout here is only used if it is greater than the timeout
578 * for the command that is being aborted.
579 */
580 uint32_t nvme_abort_cmd_timeout = 60;
581
582 /*
583 * Tunable for the size of arbitrary vendor specific admin commands,
584 * default is 16MiB.
585 */
586 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24;
587
588 /*
589 * Tunable for the max timeout of arbitary vendor specific admin commands,
590 * default is 60s.
591 */
592 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60;
593
594 /*
595 * This ID space, AVL, and lock are used for keeping track of minor state across
596 * opens between different devices.
597 */
598 static id_space_t *nvme_open_minors;
599 static avl_tree_t nvme_open_minors_avl;
600 kmutex_t nvme_open_minors_mutex;
601
602 /*
603 * Removal taskq used for n_dead callback processing.
604 */
605 taskq_t *nvme_dead_taskq;
606
607 /*
608 * This enumeration is used in tandem with nvme_mgmt_lock() to describe which
609 * form of the lock is being taken. See the theory statement for more context.
610 */
611 typedef enum {
612 /*
613 * This is the primary form of taking the management lock and indicates
614 * that the user intends to do a read/write of it. This should always be
615 * used for any ioctl paths or truly anything other than a blkdev
616 * information operation.
617 */
618 NVME_MGMT_LOCK_NVME,
619 /*
620 * This is a subordinate form of the lock whereby the user is in blkdev
621 * callback context and will only intend to read the namespace data.
622 */
623 NVME_MGMT_LOCK_BDRO
624 } nvme_mgmt_lock_level_t;
625
626 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
627 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
628 static int nvme_quiesce(dev_info_t *);
629 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
630 static int nvme_setup_interrupts(nvme_t *, int, int);
631 static void nvme_release_interrupts(nvme_t *);
632 static uint_t nvme_intr(caddr_t, caddr_t);
633
634 static void nvme_shutdown(nvme_t *, boolean_t);
635 static boolean_t nvme_reset(nvme_t *, boolean_t);
636 static int nvme_init(nvme_t *);
637 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
638 static void nvme_free_cmd(nvme_cmd_t *);
639 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
640 bd_xfer_t *);
641 static void nvme_admin_cmd(nvme_cmd_t *, uint32_t);
642 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
643 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
644 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
645 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
646 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
647 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
648 static void nvme_wakeup_cmd(void *);
649 static void nvme_async_event_task(void *);
650
651 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
652 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
653 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
654 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
655 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
656 static inline int nvme_check_cmd_status(nvme_cmd_t *);
657 static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *,
658 nvme_ioctl_common_t *);
659
660 static int nvme_abort_cmd(nvme_cmd_t *, const uint32_t);
661 static void nvme_async_event(nvme_t *);
662 static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *);
663 static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *,
664 uint8_t);
665 static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *,
666 void **);
667 static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **);
668 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
669 uint32_t *);
670 static int nvme_write_cache_set(nvme_t *, boolean_t);
671 static int nvme_set_nqueues(nvme_t *);
672
673 static void nvme_free_dma(nvme_dma_t *);
674 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
675 nvme_dma_t **);
676 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
677 nvme_dma_t **);
678 static void nvme_free_qpair(nvme_qpair_t *);
679 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
680 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
681
682 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
683 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
684 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
685 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
686
687 static boolean_t nvme_check_regs_hdl(nvme_t *);
688 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
689
690 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t);
691
692 static void nvme_bd_xfer_done(void *);
693 static void nvme_bd_driveinfo(void *, bd_drive_t *);
694 static int nvme_bd_mediainfo(void *, bd_media_t *);
695 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
696 static int nvme_bd_read(void *, bd_xfer_t *);
697 static int nvme_bd_write(void *, bd_xfer_t *);
698 static int nvme_bd_sync(void *, bd_xfer_t *);
699 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
700 static int nvme_bd_free_space(void *, bd_xfer_t *);
701
702 static int nvme_prp_dma_constructor(void *, void *, int);
703 static void nvme_prp_dma_destructor(void *, void *);
704
705 static void nvme_prepare_devid(nvme_t *, uint32_t);
706
707 /* DDI UFM callbacks */
708 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
709 ddi_ufm_image_t *);
710 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
711 ddi_ufm_slot_t *);
712 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
713
714 static int nvme_open(dev_t *, int, int, cred_t *);
715 static int nvme_close(dev_t, int, int, cred_t *);
716 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
717
718 static int nvme_init_ns(nvme_t *, uint32_t);
719 static boolean_t nvme_attach_ns(nvme_t *, nvme_ioctl_common_t *);
720 static boolean_t nvme_detach_ns(nvme_t *, nvme_ioctl_common_t *);
721
722 static int nvme_minor_comparator(const void *, const void *);
723
724 static ddi_ufm_ops_t nvme_ufm_ops = {
725 NULL,
726 nvme_ufm_fill_image,
727 nvme_ufm_fill_slot,
728 nvme_ufm_getcaps
729 };
730
731 /*
732 * Minor numbers are split amongst those used for controllers and for device
733 * opens. The number of controller minors are limited based open MAXMIN32 per
734 * the theory statement. We allocate 1 million minors as a total guess at a
735 * number that'll probably be enough. The starting point of the open minors can
736 * be shifted to accommodate future expansion of the NVMe device minors.
737 */
738 #define NVME_MINOR_INST_SHIFT 9
739 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
740 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT)
741 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
742 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2)
743
744 #define NVME_OPEN_NMINORS (1024 * 1024)
745 #define NVME_OPEN_MINOR_MIN (MAXMIN32 + 1)
746 #define NVME_OPEN_MINOR_MAX_EXCL (NVME_OPEN_MINOR_MIN + \
747 NVME_OPEN_NMINORS)
748
749 #define NVME_BUMP_STAT(nvme, stat) \
750 atomic_inc_64(&nvme->n_device_stat.nds_ ## stat.value.ui64)
751
752 static void *nvme_state;
753 static kmem_cache_t *nvme_cmd_cache;
754
755 /*
756 * DMA attributes for queue DMA memory
757 *
758 * Queue DMA memory must be page aligned. The maximum length of a queue is
759 * 65536 entries, and an entry can be 64 bytes long.
760 */
761 static const ddi_dma_attr_t nvme_queue_dma_attr = {
762 .dma_attr_version = DMA_ATTR_V0,
763 .dma_attr_addr_lo = 0,
764 .dma_attr_addr_hi = 0xffffffffffffffffULL,
765 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
766 .dma_attr_align = 0x1000,
767 .dma_attr_burstsizes = 0x7ff,
768 .dma_attr_minxfer = 0x1000,
769 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
770 .dma_attr_seg = 0xffffffffffffffffULL,
771 .dma_attr_sgllen = 1,
772 .dma_attr_granular = 1,
773 .dma_attr_flags = 0,
774 };
775
776 /*
777 * DMA attributes for transfers using Physical Region Page (PRP) entries
778 *
779 * A PRP entry describes one page of DMA memory using the page size specified
780 * in the controller configuration's memory page size register (CC.MPS). It uses
781 * a 64bit base address aligned to this page size. There is no limitation on
782 * chaining PRPs together for arbitrarily large DMA transfers. These DMA
783 * attributes will be copied into the nvme_t during nvme_attach() and the
784 * dma_attr_maxxfer will be updated.
785 */
786 static const ddi_dma_attr_t nvme_prp_dma_attr = {
787 .dma_attr_version = DMA_ATTR_V0,
788 .dma_attr_addr_lo = 0,
789 .dma_attr_addr_hi = 0xffffffffffffffffULL,
790 .dma_attr_count_max = 0xfff,
791 .dma_attr_align = 0x1000,
792 .dma_attr_burstsizes = 0x7ff,
793 .dma_attr_minxfer = 0x1000,
794 .dma_attr_maxxfer = 0x1000,
795 .dma_attr_seg = 0xfff,
796 .dma_attr_sgllen = -1,
797 .dma_attr_granular = 1,
798 .dma_attr_flags = 0,
799 };
800
801 /*
802 * DMA attributes for transfers using scatter/gather lists
803 *
804 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
805 * 32bit length field. SGL Segment and SGL Last Segment entries require the
806 * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied
807 * into the nvme_t, they are not currently used for any I/O.
808 */
809 static const ddi_dma_attr_t nvme_sgl_dma_attr = {
810 .dma_attr_version = DMA_ATTR_V0,
811 .dma_attr_addr_lo = 0,
812 .dma_attr_addr_hi = 0xffffffffffffffffULL,
813 .dma_attr_count_max = 0xffffffffUL,
814 .dma_attr_align = 1,
815 .dma_attr_burstsizes = 0x7ff,
816 .dma_attr_minxfer = 0x10,
817 .dma_attr_maxxfer = 0xfffffffffULL,
818 .dma_attr_seg = 0xffffffffffffffffULL,
819 .dma_attr_sgllen = -1,
820 .dma_attr_granular = 0x10,
821 .dma_attr_flags = 0
822 };
823
824 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
825 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
826 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
827 .devacc_attr_dataorder = DDI_STRICTORDER_ACC
828 };
829
830 /*
831 * ioctl validation policies. These are policies that determine which namespaces
832 * are allowed or disallowed for various operations. Note, all policy items
833 * should be explicitly listed here to help make it clear what our intent is.
834 * That is also why some of these are identical or repeated when they cover
835 * different ioctls.
836 */
837
838 /*
839 * The controller information ioctl generally contains read-only information
840 * about the controller that is sourced from multiple different pieces of
841 * information. This does not operate on a namespace and none are accepted.
842 */
843 static const nvme_ioctl_check_t nvme_check_ctrl_info = {
844 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
845 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
846 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
847 };
848
849 /*
850 * The kernel namespace information requires a namespace ID to be specified. It
851 * does not allow for the broadcast ID to be specified.
852 */
853 static const nvme_ioctl_check_t nvme_check_ns_info = {
854 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
855 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
856 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
857 };
858
859 /*
860 * Identify commands are allowed to operate on a namespace minor. Unfortunately,
861 * the namespace field in identify commands is a bit, weird. In particular, some
862 * commands need a valid namespace, while others are namespace listing
863 * operations, which means illegal namespaces like zero are allowed.
864 */
865 static const nvme_ioctl_check_t nvme_check_identify = {
866 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
867 .nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE,
868 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
869 };
870
871 /*
872 * The get log page command requires the ability to specify namespaces. When
873 * targeting the controller, one must use the broadcast NSID.
874 */
875 static const nvme_ioctl_check_t nvme_check_get_logpage = {
876 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
877 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
878 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
879 };
880
881 /*
882 * When getting a feature, we do not want rewriting behavior as most features do
883 * not require a namespace to be specified. Specific instances are checked in
884 * nvme_validate_get_feature().
885 */
886 static const nvme_ioctl_check_t nvme_check_get_feature = {
887 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
888 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
889 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
890 };
891
892 /*
893 * Format commands must target a namespace. The broadcast namespace must be used
894 * when referring to the controller.
895 */
896 static const nvme_ioctl_check_t nvme_check_format = {
897 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
898 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
899 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE
900 };
901
902 /*
903 * Attach and detach must always target a minor. However, the broadcast
904 * namespace is not allowed. We still perform rewriting so that way specifying
905 * the controller node with 0 will be caught.
906 */
907 static const nvme_ioctl_check_t nvme_check_attach_detach = {
908 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
909 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
910 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
911 };
912
913 /*
914 * Firmware operations must not target a namespace and are only allowed from the
915 * controller.
916 */
917 static const nvme_ioctl_check_t nvme_check_firmware = {
918 .nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
919 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
920 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
921 };
922
923 /*
924 * Passthru commands are an odd set. We only allow them from the primary
925 * controller; however, we allow a namespace to be specified in them and allow
926 * the broadcast namespace. We do not perform rewriting because we don't know
927 * what the semantics are. We explicitly exempt passthru commands from needing
928 * an exclusive lock and leave it up to them to tell us the impact of the
929 * command and semantics. As this is a privileged interface and the semantics
930 * are arbitrary, there's not much we can do without some assistance from the
931 * consumer.
932 */
933 static const nvme_ioctl_check_t nvme_check_passthru = {
934 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
935 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
936 .nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
937 };
938
939 /*
940 * Lock operations are allowed to target a namespace, but must not be rewritten.
941 * There is no support for the broadcast namespace. This is the only ioctl that
942 * should skip exclusive checking as it's used to grant it.
943 */
944 static const nvme_ioctl_check_t nvme_check_locking = {
945 .nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
946 .nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
947 .nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP
948 };
949
950 static struct cb_ops nvme_cb_ops = {
951 .cb_open = nvme_open,
952 .cb_close = nvme_close,
953 .cb_strategy = nodev,
954 .cb_print = nodev,
955 .cb_dump = nodev,
956 .cb_read = nodev,
957 .cb_write = nodev,
958 .cb_ioctl = nvme_ioctl,
959 .cb_devmap = nodev,
960 .cb_mmap = nodev,
961 .cb_segmap = nodev,
962 .cb_chpoll = nochpoll,
963 .cb_prop_op = ddi_prop_op,
964 .cb_str = 0,
965 .cb_flag = D_NEW | D_MP,
966 .cb_rev = CB_REV,
967 .cb_aread = nodev,
968 .cb_awrite = nodev
969 };
970
971 static struct dev_ops nvme_dev_ops = {
972 .devo_rev = DEVO_REV,
973 .devo_refcnt = 0,
974 .devo_getinfo = ddi_no_info,
975 .devo_identify = nulldev,
976 .devo_probe = nulldev,
977 .devo_attach = nvme_attach,
978 .devo_detach = nvme_detach,
979 .devo_reset = nodev,
980 .devo_cb_ops = &nvme_cb_ops,
981 .devo_bus_ops = NULL,
982 .devo_power = NULL,
983 .devo_quiesce = nvme_quiesce,
984 };
985
986 static struct modldrv nvme_modldrv = {
987 .drv_modops = &mod_driverops,
988 .drv_linkinfo = "NVMe driver",
989 .drv_dev_ops = &nvme_dev_ops
990 };
991
992 static struct modlinkage nvme_modlinkage = {
993 .ml_rev = MODREV_1,
994 .ml_linkage = { &nvme_modldrv, NULL }
995 };
996
997 static bd_ops_t nvme_bd_ops = {
998 .o_version = BD_OPS_CURRENT_VERSION,
999 .o_drive_info = nvme_bd_driveinfo,
1000 .o_media_info = nvme_bd_mediainfo,
1001 .o_devid_init = nvme_bd_devid,
1002 .o_sync_cache = nvme_bd_sync,
1003 .o_read = nvme_bd_read,
1004 .o_write = nvme_bd_write,
1005 .o_free_space = nvme_bd_free_space,
1006 };
1007
1008 /*
1009 * This list will hold commands that have timed out and couldn't be aborted.
1010 * As we don't know what the hardware may still do with the DMA memory we can't
1011 * free them, so we'll keep them forever on this list where we can easily look
1012 * at them with mdb.
1013 */
1014 static struct list nvme_lost_cmds;
1015 static kmutex_t nvme_lc_mutex;
1016
1017 int
_init(void)1018 _init(void)
1019 {
1020 int error;
1021
1022 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
1023 if (error != DDI_SUCCESS)
1024 return (error);
1025
1026 if ((nvme_open_minors = id_space_create("nvme_open_minors",
1027 NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) {
1028 ddi_soft_state_fini(&nvme_state);
1029 return (ENOMEM);
1030 }
1031
1032 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
1033 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1034
1035 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
1036 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
1037 offsetof(nvme_cmd_t, nc_list));
1038
1039 mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL);
1040 avl_create(&nvme_open_minors_avl, nvme_minor_comparator,
1041 sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl));
1042
1043 nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1,
1044 TASKQ_PREPOPULATE);
1045
1046 bd_mod_init(&nvme_dev_ops);
1047
1048 error = mod_install(&nvme_modlinkage);
1049 if (error != DDI_SUCCESS) {
1050 ddi_soft_state_fini(&nvme_state);
1051 id_space_destroy(nvme_open_minors);
1052 mutex_destroy(&nvme_lc_mutex);
1053 list_destroy(&nvme_lost_cmds);
1054 bd_mod_fini(&nvme_dev_ops);
1055 mutex_destroy(&nvme_open_minors_mutex);
1056 avl_destroy(&nvme_open_minors_avl);
1057 taskq_destroy(nvme_dead_taskq);
1058 }
1059
1060 return (error);
1061 }
1062
1063 int
_fini(void)1064 _fini(void)
1065 {
1066 int error;
1067
1068 if (!list_is_empty(&nvme_lost_cmds))
1069 return (DDI_FAILURE);
1070
1071 error = mod_remove(&nvme_modlinkage);
1072 if (error == DDI_SUCCESS) {
1073 ddi_soft_state_fini(&nvme_state);
1074 id_space_destroy(nvme_open_minors);
1075 kmem_cache_destroy(nvme_cmd_cache);
1076 mutex_destroy(&nvme_lc_mutex);
1077 list_destroy(&nvme_lost_cmds);
1078 bd_mod_fini(&nvme_dev_ops);
1079 mutex_destroy(&nvme_open_minors_mutex);
1080 avl_destroy(&nvme_open_minors_avl);
1081 taskq_destroy(nvme_dead_taskq);
1082 }
1083
1084 return (error);
1085 }
1086
1087 int
_info(struct modinfo * modinfop)1088 _info(struct modinfo *modinfop)
1089 {
1090 return (mod_info(&nvme_modlinkage, modinfop));
1091 }
1092
1093 static inline void
nvme_put64(nvme_t * nvme,uintptr_t reg,uint64_t val)1094 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
1095 {
1096 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1097
1098 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1099 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
1100 }
1101
1102 static inline void
nvme_put32(nvme_t * nvme,uintptr_t reg,uint32_t val)1103 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
1104 {
1105 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1106
1107 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1108 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
1109 }
1110
1111 static inline uint64_t
nvme_get64(nvme_t * nvme,uintptr_t reg)1112 nvme_get64(nvme_t *nvme, uintptr_t reg)
1113 {
1114 uint64_t val;
1115
1116 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
1117
1118 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1119 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
1120
1121 return (val);
1122 }
1123
1124 static inline uint32_t
nvme_get32(nvme_t * nvme,uintptr_t reg)1125 nvme_get32(nvme_t *nvme, uintptr_t reg)
1126 {
1127 uint32_t val;
1128
1129 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
1130
1131 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
1132 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
1133
1134 return (val);
1135 }
1136
1137 static void
nvme_mgmt_lock_fini(nvme_mgmt_lock_t * lock)1138 nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock)
1139 {
1140 ASSERT3U(lock->nml_bd_own, ==, 0);
1141 mutex_destroy(&lock->nml_lock);
1142 cv_destroy(&lock->nml_cv);
1143 }
1144
1145 static void
nvme_mgmt_lock_init(nvme_mgmt_lock_t * lock)1146 nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock)
1147 {
1148 mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL);
1149 cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL);
1150 lock->nml_bd_own = 0;
1151 }
1152
1153 static void
nvme_mgmt_unlock(nvme_t * nvme)1154 nvme_mgmt_unlock(nvme_t *nvme)
1155 {
1156 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1157
1158 cv_broadcast(&lock->nml_cv);
1159 mutex_exit(&lock->nml_lock);
1160 }
1161
1162 #ifdef DEBUG
1163 static boolean_t
nvme_mgmt_lock_held(nvme_t * nvme)1164 nvme_mgmt_lock_held(nvme_t *nvme)
1165 {
1166 return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0);
1167 }
1168 #endif /* DEBUG */
1169
1170 static void
nvme_mgmt_lock(nvme_t * nvme,nvme_mgmt_lock_level_t level)1171 nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level)
1172 {
1173 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1174 mutex_enter(&lock->nml_lock);
1175 while (lock->nml_bd_own != 0) {
1176 if (level == NVME_MGMT_LOCK_BDRO)
1177 break;
1178 cv_wait(&lock->nml_cv, &lock->nml_lock);
1179 }
1180 }
1181
1182 /*
1183 * This and nvme_mgmt_bd_end() are used to indicate that the driver is going to
1184 * be calling into a re-entrant blkdev related function. We cannot hold the lock
1185 * across such an operation and therefore must indicate that this is logically
1186 * held, while allowing other operations to proceed. This nvme_mgmt_bd_end() may
1187 * only be called by a thread that already holds the nmve_mgmt_lock().
1188 */
1189 static void
nvme_mgmt_bd_start(nvme_t * nvme)1190 nvme_mgmt_bd_start(nvme_t *nvme)
1191 {
1192 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1193
1194 VERIFY(MUTEX_HELD(&lock->nml_lock));
1195 VERIFY3U(lock->nml_bd_own, ==, 0);
1196 lock->nml_bd_own = (uintptr_t)curthread;
1197 mutex_exit(&lock->nml_lock);
1198 }
1199
1200 static void
nvme_mgmt_bd_end(nvme_t * nvme)1201 nvme_mgmt_bd_end(nvme_t *nvme)
1202 {
1203 nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
1204
1205 mutex_enter(&lock->nml_lock);
1206 VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread);
1207 lock->nml_bd_own = 0;
1208 }
1209
1210 /*
1211 * This is a central clearing house for marking an NVMe controller dead and/or
1212 * removed. This takes care of setting the flag, taking care of outstanding
1213 * blocked locks, and sending a DDI FMA impact. This is called from a precarious
1214 * place where locking is suspect. The only guarantee we have is that the nvme_t
1215 * is valid and won't disappear until we return.
1216 *
1217 * This should only be used after attach has been called.
1218 */
1219 static void
nvme_ctrl_mark_dead(nvme_t * nvme,boolean_t removed)1220 nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed)
1221 {
1222 boolean_t was_dead;
1223
1224 /*
1225 * See if we win the race to set things up here. If someone beat us to
1226 * it, we do not do anything.
1227 */
1228 was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE,
1229 B_TRUE);
1230 if (was_dead) {
1231 return;
1232 }
1233
1234 /*
1235 * If this was removed, there is no reason to change the service impact.
1236 * However, then we need to change our default return code that we use
1237 * here to indicate that it was gone versus that it is dead.
1238 */
1239 if (removed) {
1240 nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE;
1241 } else {
1242 ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD);
1243 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1244 }
1245
1246 taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme,
1247 TQ_NOSLEEP, &nvme->n_dead_tqent);
1248 }
1249
1250 static boolean_t
nvme_check_regs_hdl(nvme_t * nvme)1251 nvme_check_regs_hdl(nvme_t *nvme)
1252 {
1253 ddi_fm_error_t error;
1254
1255 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
1256
1257 if (error.fme_status != DDI_FM_OK)
1258 return (B_TRUE);
1259
1260 return (B_FALSE);
1261 }
1262
1263 static boolean_t
nvme_check_dma_hdl(nvme_dma_t * dma)1264 nvme_check_dma_hdl(nvme_dma_t *dma)
1265 {
1266 ddi_fm_error_t error;
1267
1268 if (dma == NULL)
1269 return (B_FALSE);
1270
1271 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
1272
1273 if (error.fme_status != DDI_FM_OK)
1274 return (B_TRUE);
1275
1276 return (B_FALSE);
1277 }
1278
1279 static void
nvme_free_dma_common(nvme_dma_t * dma)1280 nvme_free_dma_common(nvme_dma_t *dma)
1281 {
1282 if (dma->nd_dmah != NULL)
1283 (void) ddi_dma_unbind_handle(dma->nd_dmah);
1284 if (dma->nd_acch != NULL)
1285 ddi_dma_mem_free(&dma->nd_acch);
1286 if (dma->nd_dmah != NULL)
1287 ddi_dma_free_handle(&dma->nd_dmah);
1288 }
1289
1290 static void
nvme_free_dma(nvme_dma_t * dma)1291 nvme_free_dma(nvme_dma_t *dma)
1292 {
1293 nvme_free_dma_common(dma);
1294 kmem_free(dma, sizeof (*dma));
1295 }
1296
1297 static void
nvme_prp_dma_destructor(void * buf,void * private __unused)1298 nvme_prp_dma_destructor(void *buf, void *private __unused)
1299 {
1300 nvme_dma_t *dma = (nvme_dma_t *)buf;
1301
1302 nvme_free_dma_common(dma);
1303 }
1304
1305 static int
nvme_alloc_dma_common(nvme_t * nvme,nvme_dma_t * dma,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr)1306 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
1307 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
1308 {
1309 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
1310 &dma->nd_dmah) != DDI_SUCCESS) {
1311 /*
1312 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
1313 * the only other possible error is DDI_DMA_BADATTR which
1314 * indicates a driver bug which should cause a panic.
1315 */
1316 dev_err(nvme->n_dip, CE_PANIC,
1317 "!failed to get DMA handle, check DMA attributes");
1318 return (DDI_FAILURE);
1319 }
1320
1321 /*
1322 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
1323 * or the flags are conflicting, which isn't the case here.
1324 */
1325 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
1326 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
1327 &dma->nd_len, &dma->nd_acch);
1328
1329 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
1330 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1331 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
1332 dev_err(nvme->n_dip, CE_WARN,
1333 "!failed to bind DMA memory");
1334 NVME_BUMP_STAT(nvme, dma_bind_err);
1335 nvme_free_dma_common(dma);
1336 return (DDI_FAILURE);
1337 }
1338
1339 return (DDI_SUCCESS);
1340 }
1341
1342 static int
nvme_zalloc_dma(nvme_t * nvme,size_t len,uint_t flags,ddi_dma_attr_t * dma_attr,nvme_dma_t ** ret)1343 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
1344 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
1345 {
1346 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
1347
1348 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
1349 DDI_SUCCESS) {
1350 *ret = NULL;
1351 kmem_free(dma, sizeof (nvme_dma_t));
1352 return (DDI_FAILURE);
1353 }
1354
1355 bzero(dma->nd_memp, dma->nd_len);
1356
1357 *ret = dma;
1358 return (DDI_SUCCESS);
1359 }
1360
1361 static int
nvme_prp_dma_constructor(void * buf,void * private,int flags __unused)1362 nvme_prp_dma_constructor(void *buf, void *private, int flags __unused)
1363 {
1364 nvme_dma_t *dma = (nvme_dma_t *)buf;
1365 nvme_t *nvme = (nvme_t *)private;
1366
1367 dma->nd_dmah = NULL;
1368 dma->nd_acch = NULL;
1369
1370 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
1371 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
1372 return (-1);
1373 }
1374
1375 ASSERT(dma->nd_ncookie == 1);
1376
1377 dma->nd_cached = B_TRUE;
1378
1379 return (0);
1380 }
1381
1382 static int
nvme_zalloc_queue_dma(nvme_t * nvme,uint32_t nentry,uint16_t qe_len,uint_t flags,nvme_dma_t ** dma)1383 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
1384 uint_t flags, nvme_dma_t **dma)
1385 {
1386 uint32_t len = nentry * qe_len;
1387 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
1388
1389 len = roundup(len, nvme->n_pagesize);
1390
1391 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
1392 != DDI_SUCCESS) {
1393 dev_err(nvme->n_dip, CE_WARN,
1394 "!failed to get DMA memory for queue");
1395 goto fail;
1396 }
1397
1398 if ((*dma)->nd_ncookie != 1) {
1399 dev_err(nvme->n_dip, CE_WARN,
1400 "!got too many cookies for queue DMA");
1401 goto fail;
1402 }
1403
1404 return (DDI_SUCCESS);
1405
1406 fail:
1407 if (*dma) {
1408 nvme_free_dma(*dma);
1409 *dma = NULL;
1410 }
1411
1412 return (DDI_FAILURE);
1413 }
1414
1415 static void
nvme_free_cq(nvme_cq_t * cq)1416 nvme_free_cq(nvme_cq_t *cq)
1417 {
1418 mutex_destroy(&cq->ncq_mutex);
1419
1420 if (cq->ncq_cmd_taskq != NULL)
1421 taskq_destroy(cq->ncq_cmd_taskq);
1422
1423 if (cq->ncq_dma != NULL)
1424 nvme_free_dma(cq->ncq_dma);
1425
1426 kmem_free(cq, sizeof (*cq));
1427 }
1428
1429 static void
nvme_free_qpair(nvme_qpair_t * qp)1430 nvme_free_qpair(nvme_qpair_t *qp)
1431 {
1432 int i;
1433
1434 mutex_destroy(&qp->nq_mutex);
1435 sema_destroy(&qp->nq_sema);
1436
1437 if (qp->nq_sqdma != NULL)
1438 nvme_free_dma(qp->nq_sqdma);
1439
1440 if (qp->nq_active_cmds > 0)
1441 for (i = 0; i != qp->nq_nentry; i++)
1442 if (qp->nq_cmd[i] != NULL)
1443 nvme_free_cmd(qp->nq_cmd[i]);
1444
1445 if (qp->nq_cmd != NULL)
1446 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
1447
1448 kmem_free(qp, sizeof (nvme_qpair_t));
1449 }
1450
1451 /*
1452 * Destroy the pre-allocated cq array, but only free individual completion
1453 * queues from the given starting index.
1454 */
1455 static void
nvme_destroy_cq_array(nvme_t * nvme,uint_t start)1456 nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
1457 {
1458 uint_t i;
1459
1460 for (i = start; i < nvme->n_cq_count; i++)
1461 if (nvme->n_cq[i] != NULL)
1462 nvme_free_cq(nvme->n_cq[i]);
1463
1464 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
1465 }
1466
1467 static int
nvme_alloc_cq(nvme_t * nvme,uint32_t nentry,nvme_cq_t ** cqp,uint16_t idx,uint_t nthr)1468 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
1469 uint_t nthr)
1470 {
1471 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
1472 char name[64]; /* large enough for the taskq name */
1473
1474 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
1475 DDI_INTR_PRI(nvme->n_intr_pri));
1476
1477 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
1478 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
1479 goto fail;
1480
1481 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
1482 cq->ncq_nentry = nentry;
1483 cq->ncq_id = idx;
1484 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
1485
1486 /*
1487 * Each completion queue has its own command taskq.
1488 */
1489 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
1490 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
1491
1492 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
1493 TASKQ_PREPOPULATE);
1494
1495 if (cq->ncq_cmd_taskq == NULL) {
1496 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
1497 "taskq for cq %u", idx);
1498 goto fail;
1499 }
1500
1501 *cqp = cq;
1502 return (DDI_SUCCESS);
1503
1504 fail:
1505 nvme_free_cq(cq);
1506 *cqp = NULL;
1507
1508 return (DDI_FAILURE);
1509 }
1510
1511 /*
1512 * Create the n_cq array big enough to hold "ncq" completion queues.
1513 * If the array already exists it will be re-sized (but only larger).
1514 * The admin queue is included in this array, which boosts the
1515 * max number of entries to UINT16_MAX + 1.
1516 */
1517 static int
nvme_create_cq_array(nvme_t * nvme,uint_t ncq,uint32_t nentry,uint_t nthr)1518 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
1519 {
1520 nvme_cq_t **cq;
1521 uint_t i, cq_count;
1522
1523 ASSERT3U(ncq, >, nvme->n_cq_count);
1524
1525 cq = nvme->n_cq;
1526 cq_count = nvme->n_cq_count;
1527
1528 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
1529 nvme->n_cq_count = ncq;
1530
1531 for (i = 0; i < cq_count; i++)
1532 nvme->n_cq[i] = cq[i];
1533
1534 for (; i < nvme->n_cq_count; i++)
1535 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
1536 DDI_SUCCESS)
1537 goto fail;
1538
1539 if (cq != NULL)
1540 kmem_free(cq, sizeof (*cq) * cq_count);
1541
1542 return (DDI_SUCCESS);
1543
1544 fail:
1545 nvme_destroy_cq_array(nvme, cq_count);
1546 /*
1547 * Restore the original array
1548 */
1549 nvme->n_cq_count = cq_count;
1550 nvme->n_cq = cq;
1551
1552 return (DDI_FAILURE);
1553 }
1554
1555 static int
nvme_alloc_qpair(nvme_t * nvme,uint32_t nentry,nvme_qpair_t ** nqp,uint_t idx)1556 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
1557 uint_t idx)
1558 {
1559 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
1560 uint_t cq_idx;
1561
1562 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
1563 DDI_INTR_PRI(nvme->n_intr_pri));
1564
1565 /*
1566 * The NVMe spec defines that a full queue has one empty (unused) slot;
1567 * initialize the semaphore accordingly.
1568 */
1569 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
1570
1571 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
1572 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
1573 goto fail;
1574
1575 /*
1576 * idx == 0 is adminq, those above 0 are shared io completion queues.
1577 */
1578 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
1579 qp->nq_cq = nvme->n_cq[cq_idx];
1580 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
1581 qp->nq_nentry = nentry;
1582
1583 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
1584
1585 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
1586 qp->nq_next_cmd = 0;
1587
1588 *nqp = qp;
1589 return (DDI_SUCCESS);
1590
1591 fail:
1592 nvme_free_qpair(qp);
1593 *nqp = NULL;
1594
1595 return (DDI_FAILURE);
1596 }
1597
1598 /*
1599 * One might reasonably consider that the nvme_cmd_cache should have a cache
1600 * constructor and destructor that takes care of the mutex/cv init/destroy, and
1601 * that nvme_free_cmd should reset more fields such that allocation becomes
1602 * simpler. This is not currently implemented as:
1603 * - nvme_cmd_cache is a global cache, shared across nvme instances and
1604 * therefore there is no easy access to the corresponding nvme_t in the
1605 * constructor to determine the required interrupt priority.
1606 * - Most fields in nvme_cmd_t would need to be zeroed in nvme_free_cmd while
1607 * preserving the mutex/cv. It is easier to able to zero the entire
1608 * structure and then init the mutex/cv only in the unlikely event that we
1609 * want an admin command.
1610 */
1611 static nvme_cmd_t *
nvme_alloc_cmd(nvme_t * nvme,int kmflag)1612 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
1613 {
1614 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
1615
1616 if (cmd != NULL) {
1617 bzero(cmd, sizeof (nvme_cmd_t));
1618 cmd->nc_nvme = nvme;
1619 }
1620
1621 return (cmd);
1622 }
1623
1624 static nvme_cmd_t *
nvme_alloc_admin_cmd(nvme_t * nvme,int kmflag)1625 nvme_alloc_admin_cmd(nvme_t *nvme, int kmflag)
1626 {
1627 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, kmflag);
1628
1629 if (cmd != NULL) {
1630 cmd->nc_flags |= NVME_CMD_F_USELOCK;
1631 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
1632 DDI_INTR_PRI(nvme->n_intr_pri));
1633 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
1634 }
1635
1636 return (cmd);
1637 }
1638
1639 static void
nvme_free_cmd(nvme_cmd_t * cmd)1640 nvme_free_cmd(nvme_cmd_t *cmd)
1641 {
1642 /* Don't free commands on the lost commands list. */
1643 if (list_link_active(&cmd->nc_list))
1644 return;
1645
1646 if (cmd->nc_dma) {
1647 nvme_free_dma(cmd->nc_dma);
1648 cmd->nc_dma = NULL;
1649 }
1650
1651 if (cmd->nc_prp) {
1652 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp);
1653 cmd->nc_prp = NULL;
1654 }
1655
1656 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
1657 cv_destroy(&cmd->nc_cv);
1658 mutex_destroy(&cmd->nc_mutex);
1659 }
1660
1661 kmem_cache_free(nvme_cmd_cache, cmd);
1662 }
1663
1664 static void
nvme_submit_admin_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1665 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1666 {
1667 sema_p(&qp->nq_sema);
1668 nvme_submit_cmd_common(qp, cmd, qtimeoutp);
1669 }
1670
1671 static int
nvme_submit_io_cmd(nvme_qpair_t * qp,nvme_cmd_t * cmd)1672 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1673 {
1674 if (cmd->nc_nvme->n_dead) {
1675 return (EIO);
1676 }
1677
1678 if (sema_tryp(&qp->nq_sema) == 0)
1679 return (EAGAIN);
1680
1681 nvme_submit_cmd_common(qp, cmd, NULL);
1682 return (0);
1683 }
1684
1685 /*
1686 * Common command submission routine. If `qtimeoutp` is not NULL then it will
1687 * be set to the sum of the timeouts of any active commands ahead of the one
1688 * being submitted.
1689 */
1690 static void
nvme_submit_cmd_common(nvme_qpair_t * qp,nvme_cmd_t * cmd,uint32_t * qtimeoutp)1691 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
1692 {
1693 nvme_reg_sqtdbl_t tail = { 0 };
1694
1695 /*
1696 * We don't need to take a lock on cmd since it is not yet enqueued.
1697 */
1698 cmd->nc_submit_ts = gethrtime();
1699 cmd->nc_state = NVME_CMD_SUBMITTED;
1700
1701 mutex_enter(&qp->nq_mutex);
1702
1703 /*
1704 * Now that we hold the queue pair lock, we must check whether or not
1705 * the controller has been listed as dead (e.g. was removed due to
1706 * hotplug). This is necessary as otherwise we could race with
1707 * nvme_remove_callback(). Because this has not been enqueued, we don't
1708 * call nvme_unqueue_cmd(), which is why we must manually decrement the
1709 * semaphore.
1710 */
1711 if (cmd->nc_nvme->n_dead) {
1712 cmd->nc_queue_ts = gethrtime();
1713 cmd->nc_state = NVME_CMD_QUEUED;
1714 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback,
1715 cmd, TQ_NOSLEEP, &cmd->nc_tqent);
1716 sema_v(&qp->nq_sema);
1717 mutex_exit(&qp->nq_mutex);
1718 return;
1719 }
1720
1721 /*
1722 * Try to insert the cmd into the active cmd array at the nq_next_cmd
1723 * slot. If the slot is already occupied advance to the next slot and
1724 * try again. This can happen for long running commands like async event
1725 * requests.
1726 */
1727 while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
1728 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1729 qp->nq_cmd[qp->nq_next_cmd] = cmd;
1730
1731 /*
1732 * We keep track of the number of active commands in this queue, and
1733 * the sum of the timeouts for those active commands.
1734 */
1735 qp->nq_active_cmds++;
1736 if (qtimeoutp != NULL)
1737 *qtimeoutp = qp->nq_active_timeout;
1738 qp->nq_active_timeout += cmd->nc_timeout;
1739
1740 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
1741 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
1742 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
1743 sizeof (nvme_sqe_t) * qp->nq_sqtail,
1744 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
1745 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1746
1747 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
1748 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
1749
1750 mutex_exit(&qp->nq_mutex);
1751 }
1752
1753 static nvme_cmd_t *
nvme_unqueue_cmd(nvme_t * nvme,nvme_qpair_t * qp,int cid)1754 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
1755 {
1756 nvme_cmd_t *cmd;
1757
1758 ASSERT(mutex_owned(&qp->nq_mutex));
1759 ASSERT3S(cid, <, qp->nq_nentry);
1760
1761 cmd = qp->nq_cmd[cid];
1762 /*
1763 * Some controllers will erroneously add things to the completion queue
1764 * for which there is no matching outstanding command. If this happens,
1765 * it is almost certainly a controller firmware bug since nq_mutex
1766 * is held across command submission and ringing the queue doorbell,
1767 * and is also held in this function.
1768 *
1769 * If we see such an unexpected command, there is not much we can do.
1770 * These will be logged and counted in nvme_get_completed(), but
1771 * otherwise ignored.
1772 */
1773 if (cmd == NULL)
1774 return (NULL);
1775 qp->nq_cmd[cid] = NULL;
1776 ASSERT3U(qp->nq_active_cmds, >, 0);
1777 qp->nq_active_cmds--;
1778 ASSERT3U(qp->nq_active_timeout, >=, cmd->nc_timeout);
1779 qp->nq_active_timeout -= cmd->nc_timeout;
1780 sema_v(&qp->nq_sema);
1781
1782 ASSERT3P(cmd, !=, NULL);
1783 ASSERT3P(cmd->nc_nvme, ==, nvme);
1784 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
1785
1786 return (cmd);
1787 }
1788
1789 /*
1790 * This is called when an admin abort has failed to complete, once for the
1791 * original command and once for the abort itself. At this point the controller
1792 * has been marked dead. The commands are considered lost, de-queued if
1793 * possible, and placed on a global lost commands list so that they cannot be
1794 * freed and so that any DMA memory they have have is not re-used.
1795 */
1796 static void
nvme_lost_cmd(nvme_t * nvme,nvme_cmd_t * cmd)1797 nvme_lost_cmd(nvme_t *nvme, nvme_cmd_t *cmd)
1798 {
1799 ASSERT(mutex_owned(&cmd->nc_mutex));
1800
1801 switch (cmd->nc_state) {
1802 case NVME_CMD_SUBMITTED: {
1803 nvme_qpair_t *qp = nvme->n_ioq[cmd->nc_sqid];
1804
1805 /*
1806 * The command is still in the submitted state, meaning that we
1807 * have not processed a completion queue entry for it. De-queue
1808 * should be successful and if the hardware does later report
1809 * completion we'll skip it as a command for which we aren't
1810 * expecting a response (see nvme_unqueue_cmd()).
1811 */
1812 mutex_enter(&qp->nq_mutex);
1813 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
1814 mutex_exit(&qp->nq_mutex);
1815 }
1816 case NVME_CMD_ALLOCATED:
1817 case NVME_CMD_COMPLETED:
1818 /*
1819 * If the command has not been submitted, or has completed,
1820 * there is nothing to do here. In the event of an abort
1821 * command timeout, we can end up here in the process of
1822 * "losing" the original command. It's possible that command
1823 * has actually completed (or been queued on the taskq) in the
1824 * interim.
1825 */
1826 break;
1827 case NVME_CMD_QUEUED:
1828 /*
1829 * The command is on the taskq, awaiting callback. This should
1830 * be fairly rapid so wait for completion.
1831 */
1832 while (cmd->nc_state != NVME_CMD_COMPLETED)
1833 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
1834 break;
1835 case NVME_CMD_LOST:
1836 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1837 "%s: command %p already lost", __func__, (void *)cmd);
1838 break;
1839 }
1840
1841 cmd->nc_state = NVME_CMD_LOST;
1842
1843 mutex_enter(&nvme_lc_mutex);
1844 list_insert_head(&nvme_lost_cmds, cmd);
1845 mutex_exit(&nvme_lc_mutex);
1846 }
1847
1848 /*
1849 * Get the command tied to the next completed cqe and bump along completion
1850 * queue head counter.
1851 */
1852 static nvme_cmd_t *
nvme_get_completed(nvme_t * nvme,nvme_cq_t * cq)1853 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
1854 {
1855 nvme_qpair_t *qp;
1856 nvme_cqe_t *cqe;
1857 nvme_cmd_t *cmd;
1858
1859 ASSERT(mutex_owned(&cq->ncq_mutex));
1860
1861 retry:
1862 cqe = &cq->ncq_cq[cq->ncq_head];
1863
1864 /* Check phase tag of CQE. Hardware inverts it for new entries. */
1865 if (cqe->cqe_sf.sf_p == cq->ncq_phase)
1866 return (NULL);
1867
1868 qp = nvme->n_ioq[cqe->cqe_sqid];
1869
1870 mutex_enter(&qp->nq_mutex);
1871 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
1872 mutex_exit(&qp->nq_mutex);
1873
1874 qp->nq_sqhead = cqe->cqe_sqhd;
1875 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
1876
1877 /* Toggle phase on wrap-around. */
1878 if (cq->ncq_head == 0)
1879 cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1;
1880
1881 if (cmd == NULL) {
1882 dev_err(nvme->n_dip, CE_WARN,
1883 "!received completion for unknown cid 0x%x", cqe->cqe_cid);
1884 NVME_BUMP_STAT(nvme, unknown_cid);
1885 /*
1886 * We want to ignore this unexpected completion entry as it
1887 * is most likely a result of a bug in the controller firmware.
1888 * However, if we return NULL, then callers will assume there
1889 * are no more pending commands for this wakeup. Retry to keep
1890 * enumerating commands until the phase tag indicates there are
1891 * no more and we are really done.
1892 */
1893 goto retry;
1894 }
1895
1896 ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid);
1897 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
1898
1899 return (cmd);
1900 }
1901
1902 /*
1903 * Process all completed commands on the io completion queue.
1904 */
1905 static uint_t
nvme_process_iocq(nvme_t * nvme,nvme_cq_t * cq)1906 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
1907 {
1908 nvme_reg_cqhdbl_t head = { 0 };
1909 nvme_cmd_t *cmd;
1910 uint_t completed = 0;
1911
1912 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1913 DDI_SUCCESS)
1914 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1915 __func__);
1916
1917 mutex_enter(&cq->ncq_mutex);
1918
1919 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1920 /*
1921 * NVME_CMD_F_USELOCK is applied to all commands which are
1922 * going to be waited for by another thread in nvme_wait_cmd
1923 * and indicates that the lock should be taken before modifying
1924 * protected fields, and that the mutex has been initialised.
1925 * Commands which do not require the mutex to be held have not
1926 * initialised it (to reduce overhead).
1927 */
1928 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
1929 mutex_enter(&cmd->nc_mutex);
1930 /*
1931 * The command could have been de-queued as lost while
1932 * we waited on the lock, in which case we drop it.
1933 */
1934 if (cmd->nc_state == NVME_CMD_LOST) {
1935 mutex_exit(&cmd->nc_mutex);
1936 completed++;
1937 continue;
1938 }
1939 }
1940 cmd->nc_queue_ts = gethrtime();
1941 cmd->nc_state = NVME_CMD_QUEUED;
1942 if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0)
1943 mutex_exit(&cmd->nc_mutex);
1944 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
1945 TQ_NOSLEEP, &cmd->nc_tqent);
1946
1947 completed++;
1948 }
1949
1950 if (completed > 0) {
1951 /*
1952 * Update the completion queue head doorbell.
1953 */
1954 head.b.cqhdbl_cqh = cq->ncq_head;
1955 nvme_put32(nvme, cq->ncq_hdbl, head.r);
1956 }
1957
1958 mutex_exit(&cq->ncq_mutex);
1959
1960 return (completed);
1961 }
1962
1963 static nvme_cmd_t *
nvme_retrieve_cmd(nvme_t * nvme,nvme_qpair_t * qp)1964 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
1965 {
1966 nvme_cq_t *cq = qp->nq_cq;
1967 nvme_reg_cqhdbl_t head = { 0 };
1968 nvme_cmd_t *cmd;
1969
1970 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1971 DDI_SUCCESS)
1972 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1973 __func__);
1974
1975 mutex_enter(&cq->ncq_mutex);
1976
1977 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1978 head.b.cqhdbl_cqh = cq->ncq_head;
1979 nvme_put32(nvme, cq->ncq_hdbl, head.r);
1980 }
1981
1982 mutex_exit(&cq->ncq_mutex);
1983
1984 return (cmd);
1985 }
1986
1987 static int
nvme_check_unknown_cmd_status(nvme_cmd_t * cmd)1988 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
1989 {
1990 nvme_cqe_t *cqe = &cmd->nc_cqe;
1991
1992 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1993 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1994 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1995 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1996 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1997
1998 if (cmd->nc_xfer != NULL)
1999 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2000
2001 if (cmd->nc_nvme->n_strict_version) {
2002 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2003 }
2004
2005 return (EIO);
2006 }
2007
2008 static int
nvme_check_vendor_cmd_status(nvme_cmd_t * cmd)2009 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
2010 {
2011 nvme_cqe_t *cqe = &cmd->nc_cqe;
2012
2013 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2014 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
2015 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
2016 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
2017 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
2018 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
2019 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2020 }
2021
2022 return (EIO);
2023 }
2024
2025 static int
nvme_check_integrity_cmd_status(nvme_cmd_t * cmd)2026 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
2027 {
2028 nvme_cqe_t *cqe = &cmd->nc_cqe;
2029
2030 switch (cqe->cqe_sf.sf_sc) {
2031 case NVME_CQE_SC_INT_NVM_WRITE:
2032 /* write fail */
2033 /* TODO: post ereport */
2034 if (cmd->nc_xfer != NULL)
2035 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2036 return (EIO);
2037
2038 case NVME_CQE_SC_INT_NVM_READ:
2039 /* read fail */
2040 /* TODO: post ereport */
2041 if (cmd->nc_xfer != NULL)
2042 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2043 return (EIO);
2044
2045 default:
2046 return (nvme_check_unknown_cmd_status(cmd));
2047 }
2048 }
2049
2050 static int
nvme_check_generic_cmd_status(nvme_cmd_t * cmd)2051 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
2052 {
2053 nvme_cqe_t *cqe = &cmd->nc_cqe;
2054
2055 switch (cqe->cqe_sf.sf_sc) {
2056 case NVME_CQE_SC_GEN_SUCCESS:
2057 return (0);
2058
2059 /*
2060 * Errors indicating a bug in the driver should cause a panic.
2061 */
2062 case NVME_CQE_SC_GEN_INV_OPC:
2063 /* Invalid Command Opcode */
2064 NVME_BUMP_STAT(cmd->nc_nvme, inv_cmd_err);
2065 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2066 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2067 "programming error: invalid opcode in cmd %p",
2068 (void *)cmd);
2069 }
2070 return (EINVAL);
2071
2072 case NVME_CQE_SC_GEN_INV_FLD:
2073 /* Invalid Field in Command */
2074 NVME_BUMP_STAT(cmd->nc_nvme, inv_field_err);
2075 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2076 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2077 "programming error: invalid field in cmd %p",
2078 (void *)cmd);
2079 }
2080 return (EIO);
2081
2082 case NVME_CQE_SC_GEN_ID_CNFL:
2083 /* Command ID Conflict */
2084 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2085 "cmd ID conflict in cmd %p", (void *)cmd);
2086 return (0);
2087
2088 case NVME_CQE_SC_GEN_INV_NS:
2089 /* Invalid Namespace or Format */
2090 NVME_BUMP_STAT(cmd->nc_nvme, inv_nsfmt_err);
2091 if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
2092 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
2093 "programming error: invalid NS/format in cmd %p",
2094 (void *)cmd);
2095 }
2096 return (EINVAL);
2097
2098 case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
2099 /* LBA Out Of Range */
2100 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2101 "LBA out of range in cmd %p", (void *)cmd);
2102 return (0);
2103
2104 /*
2105 * Non-fatal errors, handle gracefully.
2106 */
2107 case NVME_CQE_SC_GEN_DATA_XFR_ERR:
2108 /* Data Transfer Error (DMA) */
2109 /* TODO: post ereport */
2110 NVME_BUMP_STAT(cmd->nc_nvme, data_xfr_err);
2111 if (cmd->nc_xfer != NULL)
2112 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2113 return (EIO);
2114
2115 case NVME_CQE_SC_GEN_INTERNAL_ERR:
2116 /*
2117 * Internal Error. The spec (v1.0, section 4.5.1.2) says
2118 * detailed error information is returned as async event,
2119 * so we pretty much ignore the error here and handle it
2120 * in the async event handler.
2121 */
2122 NVME_BUMP_STAT(cmd->nc_nvme, internal_err);
2123 if (cmd->nc_xfer != NULL)
2124 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2125 return (EIO);
2126
2127 case NVME_CQE_SC_GEN_ABORT_REQUEST:
2128 /*
2129 * Command Abort Requested. This normally happens only when a
2130 * command times out.
2131 */
2132 /* TODO: post ereport or change blkdev to handle this? */
2133 NVME_BUMP_STAT(cmd->nc_nvme, abort_rq_err);
2134 return (ECANCELED);
2135
2136 case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
2137 /* Command Aborted due to Power Loss Notification */
2138 NVME_BUMP_STAT(cmd->nc_nvme, abort_pwrloss_err);
2139 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2140 return (EIO);
2141
2142 case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
2143 /* Command Aborted due to SQ Deletion */
2144 NVME_BUMP_STAT(cmd->nc_nvme, abort_sq_del);
2145 return (EIO);
2146
2147 case NVME_CQE_SC_GEN_NVM_CAP_EXC:
2148 /* Capacity Exceeded */
2149 NVME_BUMP_STAT(cmd->nc_nvme, nvm_cap_exc);
2150 if (cmd->nc_xfer != NULL)
2151 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
2152 return (EIO);
2153
2154 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
2155 /* Namespace Not Ready */
2156 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_notrdy);
2157 if (cmd->nc_xfer != NULL)
2158 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2159 return (EIO);
2160
2161 case NVME_CQE_SC_GEN_NVM_FORMATTING:
2162 /* Format in progress (1.2) */
2163 if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2))
2164 return (nvme_check_unknown_cmd_status(cmd));
2165 NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_formatting);
2166 if (cmd->nc_xfer != NULL)
2167 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
2168 return (EIO);
2169
2170 default:
2171 return (nvme_check_unknown_cmd_status(cmd));
2172 }
2173 }
2174
2175 static int
nvme_check_specific_cmd_status(nvme_cmd_t * cmd)2176 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
2177 {
2178 nvme_cqe_t *cqe = &cmd->nc_cqe;
2179
2180 switch (cqe->cqe_sf.sf_sc) {
2181 case NVME_CQE_SC_SPC_INV_CQ:
2182 /* Completion Queue Invalid */
2183 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
2184 NVME_BUMP_STAT(cmd->nc_nvme, inv_cq_err);
2185 return (EINVAL);
2186
2187 case NVME_CQE_SC_SPC_INV_QID:
2188 /* Invalid Queue Identifier */
2189 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2190 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
2191 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
2192 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2193 NVME_BUMP_STAT(cmd->nc_nvme, inv_qid_err);
2194 return (EINVAL);
2195
2196 case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
2197 /* Max Queue Size Exceeded */
2198 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
2199 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2200 NVME_BUMP_STAT(cmd->nc_nvme, max_qsz_exc);
2201 return (EINVAL);
2202
2203 case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
2204 /* Abort Command Limit Exceeded */
2205 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
2206 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2207 "abort command limit exceeded in cmd %p", (void *)cmd);
2208 return (0);
2209
2210 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
2211 /* Async Event Request Limit Exceeded */
2212 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
2213 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
2214 "async event request limit exceeded in cmd %p",
2215 (void *)cmd);
2216 return (0);
2217
2218 case NVME_CQE_SC_SPC_INV_INT_VECT:
2219 /* Invalid Interrupt Vector */
2220 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
2221 NVME_BUMP_STAT(cmd->nc_nvme, inv_int_vect);
2222 return (EINVAL);
2223
2224 case NVME_CQE_SC_SPC_INV_LOG_PAGE:
2225 /* Invalid Log Page */
2226 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
2227 NVME_BUMP_STAT(cmd->nc_nvme, inv_log_page);
2228 return (EINVAL);
2229
2230 case NVME_CQE_SC_SPC_INV_FORMAT:
2231 /* Invalid Format */
2232 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT);
2233 NVME_BUMP_STAT(cmd->nc_nvme, inv_format);
2234 if (cmd->nc_xfer != NULL)
2235 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2236 return (EINVAL);
2237
2238 case NVME_CQE_SC_SPC_INV_Q_DEL:
2239 /* Invalid Queue Deletion */
2240 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
2241 NVME_BUMP_STAT(cmd->nc_nvme, inv_q_del);
2242 return (EINVAL);
2243
2244 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
2245 /* Conflicting Attributes */
2246 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
2247 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2248 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2249 NVME_BUMP_STAT(cmd->nc_nvme, cnfl_attr);
2250 if (cmd->nc_xfer != NULL)
2251 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2252 return (EINVAL);
2253
2254 case NVME_CQE_SC_SPC_NVM_INV_PROT:
2255 /* Invalid Protection Information */
2256 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
2257 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
2258 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2259 NVME_BUMP_STAT(cmd->nc_nvme, inv_prot);
2260 if (cmd->nc_xfer != NULL)
2261 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2262 return (EINVAL);
2263
2264 case NVME_CQE_SC_SPC_NVM_READONLY:
2265 /* Write to Read Only Range */
2266 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
2267 NVME_BUMP_STAT(cmd->nc_nvme, readonly);
2268 if (cmd->nc_xfer != NULL)
2269 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
2270 return (EROFS);
2271
2272 case NVME_CQE_SC_SPC_INV_FW_SLOT:
2273 /* Invalid Firmware Slot */
2274 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwslot);
2275 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2276 return (EINVAL);
2277
2278 case NVME_CQE_SC_SPC_INV_FW_IMG:
2279 /* Invalid Firmware Image */
2280 NVME_BUMP_STAT(cmd->nc_nvme, inv_fwimg);
2281 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2282 return (EINVAL);
2283
2284 case NVME_CQE_SC_SPC_FW_RESET:
2285 /* Conventional Reset Required */
2286 NVME_BUMP_STAT(cmd->nc_nvme, fwact_creset);
2287 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2288 return (0);
2289
2290 case NVME_CQE_SC_SPC_FW_NSSR:
2291 /* NVMe Subsystem Reset Required */
2292 NVME_BUMP_STAT(cmd->nc_nvme, fwact_nssr);
2293 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2294 return (0);
2295
2296 case NVME_CQE_SC_SPC_FW_NEXT_RESET:
2297 /* Activation Requires Reset */
2298 NVME_BUMP_STAT(cmd->nc_nvme, fwact_reset);
2299 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2300 return (0);
2301
2302 case NVME_CQE_SC_SPC_FW_MTFA:
2303 /* Activation Requires Maximum Time Violation */
2304 NVME_BUMP_STAT(cmd->nc_nvme, fwact_mtfa);
2305 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2306 return (EAGAIN);
2307
2308 case NVME_CQE_SC_SPC_FW_PROHIBITED:
2309 /* Activation Prohibited */
2310 NVME_BUMP_STAT(cmd->nc_nvme, fwact_prohibited);
2311 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2312 return (EINVAL);
2313
2314 case NVME_CQE_SC_SPC_FW_OVERLAP:
2315 /* Overlapping Firmware Ranges */
2316 NVME_BUMP_STAT(cmd->nc_nvme, fw_overlap);
2317 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD ||
2318 cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
2319 return (EINVAL);
2320
2321 default:
2322 return (nvme_check_unknown_cmd_status(cmd));
2323 }
2324 }
2325
2326 static inline int
nvme_check_cmd_status(nvme_cmd_t * cmd)2327 nvme_check_cmd_status(nvme_cmd_t *cmd)
2328 {
2329 nvme_cqe_t *cqe = &cmd->nc_cqe;
2330
2331 /*
2332 * Take a shortcut if the controller is dead, or if
2333 * command status indicates no error.
2334 */
2335 if (cmd->nc_nvme->n_dead)
2336 return (EIO);
2337
2338 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2339 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2340 return (0);
2341
2342 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
2343 return (nvme_check_generic_cmd_status(cmd));
2344 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
2345 return (nvme_check_specific_cmd_status(cmd));
2346 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
2347 return (nvme_check_integrity_cmd_status(cmd));
2348 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
2349 return (nvme_check_vendor_cmd_status(cmd));
2350
2351 return (nvme_check_unknown_cmd_status(cmd));
2352 }
2353
2354 /*
2355 * Check the command status as used by an ioctl path and do not convert it to an
2356 * errno. We still allow all the command status checking to occur, but otherwise
2357 * will pass back the controller error as is.
2358 */
2359 static boolean_t
nvme_check_cmd_status_ioctl(nvme_cmd_t * cmd,nvme_ioctl_common_t * ioc)2360 nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc)
2361 {
2362 nvme_cqe_t *cqe = &cmd->nc_cqe;
2363 nvme_t *nvme = cmd->nc_nvme;
2364
2365 if (nvme->n_dead) {
2366 return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0));
2367 }
2368
2369 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2370 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
2371 return (B_TRUE);
2372
2373 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) {
2374 (void) nvme_check_generic_cmd_status(cmd);
2375 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) {
2376 (void) nvme_check_specific_cmd_status(cmd);
2377 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) {
2378 (void) nvme_check_integrity_cmd_status(cmd);
2379 } else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) {
2380 (void) nvme_check_vendor_cmd_status(cmd);
2381 } else {
2382 (void) nvme_check_unknown_cmd_status(cmd);
2383 }
2384
2385 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR,
2386 cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc));
2387 }
2388
2389 static int
nvme_abort_cmd(nvme_cmd_t * cmd,const uint32_t sec)2390 nvme_abort_cmd(nvme_cmd_t *cmd, const uint32_t sec)
2391 {
2392 nvme_t *nvme = cmd->nc_nvme;
2393 nvme_cmd_t *abort_cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2394 nvme_abort_cmd_t ac = { 0 };
2395 int ret = 0;
2396
2397 sema_p(&nvme->n_abort_sema);
2398
2399 ac.b.ac_cid = cmd->nc_sqe.sqe_cid;
2400 ac.b.ac_sqid = cmd->nc_sqid;
2401
2402 abort_cmd->nc_sqid = 0;
2403 abort_cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
2404 abort_cmd->nc_callback = nvme_wakeup_cmd;
2405 abort_cmd->nc_sqe.sqe_cdw10 = ac.r;
2406
2407 /*
2408 * Send the ABORT to the hardware. The ABORT command will return _after_
2409 * the aborted command has completed (aborted or otherwise) so we must
2410 * drop the aborted command's lock to allow it to complete.
2411 * We want to allow at least `nvme_abort_cmd_timeout` seconds for the
2412 * abort to be processed, but more if we are aborting a long-running
2413 * command to give that time to complete/abort too.
2414 */
2415 mutex_exit(&cmd->nc_mutex);
2416 nvme_admin_cmd(abort_cmd, MAX(nvme_abort_cmd_timeout, sec));
2417 mutex_enter(&cmd->nc_mutex);
2418
2419 sema_v(&nvme->n_abort_sema);
2420
2421 /*
2422 * If the abort command itself has timed out, it will have been
2423 * de-queued so that its callback will not be called after this point,
2424 * and its state will be NVME_CMD_LOST.
2425 *
2426 * nvme_admin_cmd(abort_cmd)
2427 * -> nvme_wait_cmd(abort_cmd)
2428 * -> nvme_cmd(abort_cmd)
2429 * | -> nvme_admin_cmd(cmd)
2430 * | -> nvme_wait_cmd(cmd)
2431 * | -> nvme_ctrl_mark_dead()
2432 * | -> nvme_lost_cmd(cmd)
2433 * | -> cmd->nc_stat = NVME_CMD_LOST
2434 * and here we are.
2435 */
2436 if (abort_cmd->nc_state == NVME_CMD_LOST) {
2437 dev_err(nvme->n_dip, CE_WARN,
2438 "!ABORT of command %d/%d timed out",
2439 cmd->nc_sqe.sqe_cid, cmd->nc_sqid);
2440 NVME_BUMP_STAT(nvme, abort_timeout);
2441 ret = EIO;
2442 } else if ((ret = nvme_check_cmd_status(abort_cmd)) != 0) {
2443 dev_err(nvme->n_dip, CE_WARN,
2444 "!ABORT of command %d/%d "
2445 "failed with sct = %x, sc = %x",
2446 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2447 abort_cmd->nc_cqe.cqe_sf.sf_sct,
2448 abort_cmd->nc_cqe.cqe_sf.sf_sc);
2449 NVME_BUMP_STAT(nvme, abort_failed);
2450 } else {
2451 boolean_t success = ((abort_cmd->nc_cqe.cqe_dw0 & 1) == 0);
2452
2453 dev_err(nvme->n_dip, CE_WARN,
2454 "!ABORT of command %d/%d %ssuccessful",
2455 cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2456 success ? "" : "un");
2457
2458 if (success) {
2459 NVME_BUMP_STAT(nvme, abort_successful);
2460 } else {
2461 NVME_BUMP_STAT(nvme, abort_unsuccessful);
2462 }
2463 }
2464
2465 /*
2466 * This abort abort_cmd has either completed or been de-queued as
2467 * lost in nvme_wait_cmd. Either way it's safe to free it here.
2468 */
2469 nvme_free_cmd(abort_cmd);
2470
2471 return (ret);
2472 }
2473
2474 /*
2475 * nvme_wait_cmd -- wait for command completion or timeout
2476 *
2477 * In case of a serious error or a timeout of the abort command the hardware
2478 * will be declared dead and FMA will be notified.
2479 */
2480 static void
nvme_wait_cmd(nvme_cmd_t * cmd,uint32_t sec)2481 nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec)
2482 {
2483 nvme_t *nvme = cmd->nc_nvme;
2484 nvme_reg_csts_t csts;
2485
2486 ASSERT(mutex_owned(&cmd->nc_mutex));
2487
2488 while (cmd->nc_state != NVME_CMD_COMPLETED) {
2489 clock_t timeout = ddi_get_lbolt() +
2490 drv_usectohz((long)sec * MICROSEC);
2491
2492 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) {
2493 /*
2494 * If this command is on the task queue then we don't
2495 * consider it to have timed out. We are waiting for
2496 * the callback to be invoked, the timing of which can
2497 * be affected by system load and should not count
2498 * against the device; continue to wait.
2499 * While this doesn't help deal with the possibility of
2500 * a command timing out between being placed on the CQ
2501 * and arriving on the taskq, we expect interrupts to
2502 * run fairly promptly making this a small window.
2503 */
2504 if (cmd->nc_state != NVME_CMD_QUEUED)
2505 break;
2506 }
2507 }
2508
2509 if (cmd->nc_state == NVME_CMD_COMPLETED) {
2510 DTRACE_PROBE1(nvme_admin_cmd_completed, nvme_cmd_t *, cmd);
2511 nvme_admin_stat_cmd(nvme, cmd);
2512 return;
2513 }
2514
2515 /*
2516 * The command timed out.
2517 */
2518
2519 DTRACE_PROBE1(nvme_admin_cmd_timeout, nvme_cmd_t *, cmd);
2520 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2521 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
2522 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
2523 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
2524 NVME_BUMP_STAT(nvme, cmd_timeout);
2525
2526 /*
2527 * Check controller for fatal status, any errors associated with the
2528 * register or DMA handle, or for a double timeout (abort command timed
2529 * out). If necessary log a warning and call FMA.
2530 */
2531 if (csts.b.csts_cfs ||
2532 nvme_check_regs_hdl(nvme) ||
2533 nvme_check_dma_hdl(cmd->nc_dma) ||
2534 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
2535 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2536 nvme_lost_cmd(nvme, cmd);
2537 return;
2538 }
2539
2540 /* Issue an abort for the command that has timed out */
2541 if (nvme_abort_cmd(cmd, sec) == 0) {
2542 /*
2543 * If the abort completed, whether or not it was
2544 * successful in aborting the command, that command
2545 * will also have completed with an appropriate
2546 * status.
2547 */
2548 while (cmd->nc_state != NVME_CMD_COMPLETED)
2549 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
2550 return;
2551 }
2552
2553 /*
2554 * Otherwise, the abort has also timed out or failed, which
2555 * will have marked the controller dead. De-queue the original command
2556 * and add it to the lost commands list.
2557 */
2558 VERIFY(cmd->nc_nvme->n_dead);
2559 nvme_lost_cmd(nvme, cmd);
2560 }
2561
2562 static void
nvme_wakeup_cmd(void * arg)2563 nvme_wakeup_cmd(void *arg)
2564 {
2565 nvme_cmd_t *cmd = arg;
2566
2567 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
2568
2569 mutex_enter(&cmd->nc_mutex);
2570 cmd->nc_state = NVME_CMD_COMPLETED;
2571 cv_signal(&cmd->nc_cv);
2572 mutex_exit(&cmd->nc_mutex);
2573 }
2574
2575 static void
nvme_async_event_task(void * arg)2576 nvme_async_event_task(void *arg)
2577 {
2578 nvme_cmd_t *cmd = arg;
2579 nvme_t *nvme = cmd->nc_nvme;
2580 nvme_error_log_entry_t *error_log = NULL;
2581 nvme_health_log_t *health_log = NULL;
2582 nvme_nschange_list_t *nslist = NULL;
2583 size_t logsize = 0;
2584 nvme_async_event_t event;
2585
2586 /*
2587 * Check for errors associated with the async request itself. The only
2588 * command-specific error is "async event limit exceeded", which
2589 * indicates a programming error in the driver and causes a panic in
2590 * nvme_check_cmd_status().
2591 *
2592 * Other possible errors are various scenarios where the async request
2593 * was aborted, or internal errors in the device. Internal errors are
2594 * reported to FMA, the command aborts need no special handling here.
2595 *
2596 * And finally, at least qemu nvme does not support async events,
2597 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
2598 * will avoid posting async events.
2599 */
2600
2601 if (nvme_check_cmd_status(cmd) != 0) {
2602 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
2603 "!async event request returned failure, sct = 0x%x, "
2604 "sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
2605 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
2606 cmd->nc_cqe.cqe_sf.sf_m);
2607
2608 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2609 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
2610 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2611 }
2612
2613 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2614 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
2615 cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
2616 nvme->n_async_event_supported = B_FALSE;
2617 }
2618
2619 nvme_free_cmd(cmd);
2620 return;
2621 }
2622
2623 event.r = cmd->nc_cqe.cqe_dw0;
2624
2625 /* Clear CQE and re-submit the async request. */
2626 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
2627 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
2628 cmd = NULL; /* cmd can no longer be used after resubmission */
2629
2630 switch (event.b.ae_type) {
2631 case NVME_ASYNC_TYPE_ERROR:
2632 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
2633 if (!nvme_get_logpage_int(nvme, B_FALSE,
2634 (void **)&error_log, &logsize,
2635 NVME_LOGPAGE_ERROR)) {
2636 return;
2637 }
2638 } else {
2639 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2640 "async event reply: type=0x%x logpage=0x%x",
2641 event.b.ae_type, event.b.ae_logpage);
2642 NVME_BUMP_STAT(nvme, wrong_logpage);
2643 return;
2644 }
2645
2646 switch (event.b.ae_info) {
2647 case NVME_ASYNC_ERROR_INV_SQ:
2648 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2649 "invalid submission queue");
2650 return;
2651
2652 case NVME_ASYNC_ERROR_INV_DBL:
2653 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
2654 "invalid doorbell write value");
2655 return;
2656
2657 case NVME_ASYNC_ERROR_DIAGFAIL:
2658 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
2659 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2660 NVME_BUMP_STAT(nvme, diagfail_event);
2661 break;
2662
2663 case NVME_ASYNC_ERROR_PERSISTENT:
2664 dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
2665 "device error");
2666 nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
2667 NVME_BUMP_STAT(nvme, persistent_event);
2668 break;
2669
2670 case NVME_ASYNC_ERROR_TRANSIENT:
2671 dev_err(nvme->n_dip, CE_WARN, "!transient internal "
2672 "device error");
2673 /* TODO: send ereport */
2674 NVME_BUMP_STAT(nvme, transient_event);
2675 break;
2676
2677 case NVME_ASYNC_ERROR_FW_LOAD:
2678 dev_err(nvme->n_dip, CE_WARN,
2679 "!firmware image load error");
2680 NVME_BUMP_STAT(nvme, fw_load_event);
2681 break;
2682 }
2683 break;
2684
2685 case NVME_ASYNC_TYPE_HEALTH:
2686 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
2687 if (!nvme_get_logpage_int(nvme, B_FALSE,
2688 (void **)&health_log, &logsize,
2689 NVME_LOGPAGE_HEALTH)) {
2690 return;
2691 }
2692 } else {
2693 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
2694 "type=0x%x logpage=0x%x", event.b.ae_type,
2695 event.b.ae_logpage);
2696 NVME_BUMP_STAT(nvme, wrong_logpage);
2697 return;
2698 }
2699
2700 switch (event.b.ae_info) {
2701 case NVME_ASYNC_HEALTH_RELIABILITY:
2702 dev_err(nvme->n_dip, CE_WARN,
2703 "!device reliability compromised");
2704 /* TODO: send ereport */
2705 NVME_BUMP_STAT(nvme, reliability_event);
2706 break;
2707
2708 case NVME_ASYNC_HEALTH_TEMPERATURE:
2709 dev_err(nvme->n_dip, CE_WARN,
2710 "!temperature above threshold");
2711 /* TODO: send ereport */
2712 NVME_BUMP_STAT(nvme, temperature_event);
2713 break;
2714
2715 case NVME_ASYNC_HEALTH_SPARE:
2716 dev_err(nvme->n_dip, CE_WARN,
2717 "!spare space below threshold");
2718 /* TODO: send ereport */
2719 NVME_BUMP_STAT(nvme, spare_event);
2720 break;
2721 }
2722 break;
2723
2724 case NVME_ASYNC_TYPE_NOTICE:
2725 switch (event.b.ae_info) {
2726 case NVME_ASYNC_NOTICE_NS_CHANGE:
2727 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) {
2728 dev_err(nvme->n_dip, CE_WARN,
2729 "!wrong logpage in async event reply: "
2730 "type=0x%x logpage=0x%x",
2731 event.b.ae_type, event.b.ae_logpage);
2732 NVME_BUMP_STAT(nvme, wrong_logpage);
2733 break;
2734 }
2735
2736 dev_err(nvme->n_dip, CE_NOTE,
2737 "namespace attribute change event, "
2738 "logpage = 0x%x", event.b.ae_logpage);
2739 NVME_BUMP_STAT(nvme, notice_event);
2740
2741 if (!nvme_get_logpage_int(nvme, B_FALSE,
2742 (void **)&nslist, &logsize,
2743 NVME_LOGPAGE_NSCHANGE)) {
2744 break;
2745 }
2746
2747 if (nslist->nscl_ns[0] == UINT32_MAX) {
2748 dev_err(nvme->n_dip, CE_CONT,
2749 "more than %u namespaces have changed.\n",
2750 NVME_NSCHANGE_LIST_SIZE);
2751 break;
2752 }
2753
2754 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
2755 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) {
2756 uint32_t nsid = nslist->nscl_ns[i];
2757
2758 if (nsid == 0) /* end of list */
2759 break;
2760
2761 dev_err(nvme->n_dip, CE_NOTE,
2762 "!namespace nvme%d/%u has changed.",
2763 ddi_get_instance(nvme->n_dip), nsid);
2764
2765 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
2766 continue;
2767
2768 nvme_mgmt_bd_start(nvme);
2769 bd_state_change(nvme_nsid2ns(nvme,
2770 nsid)->ns_bd_hdl);
2771 nvme_mgmt_bd_end(nvme);
2772 }
2773 nvme_mgmt_unlock(nvme);
2774
2775 break;
2776
2777 case NVME_ASYNC_NOTICE_FW_ACTIVATE:
2778 dev_err(nvme->n_dip, CE_NOTE,
2779 "firmware activation starting, "
2780 "logpage = 0x%x", event.b.ae_logpage);
2781 NVME_BUMP_STAT(nvme, notice_event);
2782 break;
2783
2784 case NVME_ASYNC_NOTICE_TELEMETRY:
2785 dev_err(nvme->n_dip, CE_NOTE,
2786 "telemetry log changed, "
2787 "logpage = 0x%x", event.b.ae_logpage);
2788 NVME_BUMP_STAT(nvme, notice_event);
2789 break;
2790
2791 case NVME_ASYNC_NOTICE_NS_ASYMM:
2792 dev_err(nvme->n_dip, CE_NOTE,
2793 "asymmetric namespace access change, "
2794 "logpage = 0x%x", event.b.ae_logpage);
2795 NVME_BUMP_STAT(nvme, notice_event);
2796 break;
2797
2798 case NVME_ASYNC_NOTICE_LATENCYLOG:
2799 dev_err(nvme->n_dip, CE_NOTE,
2800 "predictable latency event aggregate log change, "
2801 "logpage = 0x%x", event.b.ae_logpage);
2802 NVME_BUMP_STAT(nvme, notice_event);
2803 break;
2804
2805 case NVME_ASYNC_NOTICE_LBASTATUS:
2806 dev_err(nvme->n_dip, CE_NOTE,
2807 "LBA status information alert, "
2808 "logpage = 0x%x", event.b.ae_logpage);
2809 NVME_BUMP_STAT(nvme, notice_event);
2810 break;
2811
2812 case NVME_ASYNC_NOTICE_ENDURANCELOG:
2813 dev_err(nvme->n_dip, CE_NOTE,
2814 "endurance group event aggregate log page change, "
2815 "logpage = 0x%x", event.b.ae_logpage);
2816 NVME_BUMP_STAT(nvme, notice_event);
2817 break;
2818
2819 default:
2820 dev_err(nvme->n_dip, CE_WARN,
2821 "!unknown notice async event received, "
2822 "info = 0x%x, logpage = 0x%x", event.b.ae_info,
2823 event.b.ae_logpage);
2824 NVME_BUMP_STAT(nvme, unknown_event);
2825 break;
2826 }
2827 break;
2828
2829 case NVME_ASYNC_TYPE_VENDOR:
2830 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
2831 "received, info = 0x%x, logpage = 0x%x", event.b.ae_info,
2832 event.b.ae_logpage);
2833 NVME_BUMP_STAT(nvme, vendor_event);
2834 break;
2835
2836 default:
2837 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
2838 "type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type,
2839 event.b.ae_info, event.b.ae_logpage);
2840 NVME_BUMP_STAT(nvme, unknown_event);
2841 break;
2842 }
2843
2844 if (error_log != NULL)
2845 kmem_free(error_log, logsize);
2846
2847 if (health_log != NULL)
2848 kmem_free(health_log, logsize);
2849
2850 if (nslist != NULL)
2851 kmem_free(nslist, logsize);
2852 }
2853
2854 static void
nvme_admin_cmd(nvme_cmd_t * cmd,uint32_t sec)2855 nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec)
2856 {
2857 uint32_t qtimeout;
2858
2859 ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
2860
2861 mutex_enter(&cmd->nc_mutex);
2862 cmd->nc_timeout = sec;
2863 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd, &qtimeout);
2864 /*
2865 * We will wait for a total of this command's specified timeout plus
2866 * the sum of the timeouts of any commands queued ahead of this one. If
2867 * we aren't first in the queue, this will inflate the timeout somewhat
2868 * but these times are not critical and it means that if we get stuck
2869 * behind a long running command such as a namespace format then we
2870 * won't time out and trigger an abort.
2871 */
2872 nvme_wait_cmd(cmd, sec + qtimeout);
2873 mutex_exit(&cmd->nc_mutex);
2874 }
2875
2876 static void
nvme_async_event(nvme_t * nvme)2877 nvme_async_event(nvme_t *nvme)
2878 {
2879 nvme_cmd_t *cmd;
2880
2881 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2882 cmd->nc_sqid = 0;
2883 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
2884 cmd->nc_callback = nvme_async_event_task;
2885 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
2886
2887 nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
2888 }
2889
2890 /*
2891 * There are commands such as format or vendor unique commands that are going to
2892 * manipulate the data in a namespace or destroy them, we make sure that none of
2893 * the ones that will be impacted are actually attached.
2894 */
2895 static boolean_t
nvme_no_blkdev_attached(nvme_t * nvme,uint32_t nsid)2896 nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid)
2897 {
2898 ASSERT(nvme_mgmt_lock_held(nvme));
2899 ASSERT3U(nsid, !=, 0);
2900
2901 if (nsid != NVME_NSID_BCAST) {
2902 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
2903 return (!ns->ns_attached);
2904 }
2905
2906 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
2907 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
2908
2909 if (ns->ns_attached) {
2910 return (B_FALSE);
2911 }
2912 }
2913
2914 return (B_TRUE);
2915 }
2916
2917 static boolean_t
nvme_format_nvm(nvme_t * nvme,nvme_ioctl_format_t * ioc)2918 nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc)
2919 {
2920 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2921 nvme_format_nvm_t format_nvm = { 0 };
2922 boolean_t ret;
2923
2924 format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0);
2925 format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0);
2926
2927 cmd->nc_sqid = 0;
2928 cmd->nc_callback = nvme_wakeup_cmd;
2929 cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid;
2930 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
2931 cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
2932
2933 /*
2934 * We don't want to panic on any format commands. There are two reasons
2935 * for this:
2936 *
2937 * 1) All format commands are initiated by users. We don't want to panic
2938 * on user commands.
2939 *
2940 * 2) Several devices like the Samsung SM951 don't allow formatting of
2941 * all namespaces in one command and we'd prefer to handle that
2942 * gracefully.
2943 */
2944 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
2945
2946 nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
2947
2948 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) {
2949 dev_err(nvme->n_dip, CE_WARN,
2950 "!FORMAT failed with sct = %x, sc = %x",
2951 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2952 ret = B_FALSE;
2953 goto fail;
2954 }
2955
2956 ret = B_TRUE;
2957 fail:
2958 nvme_free_cmd(cmd);
2959 return (ret);
2960 }
2961
2962 /*
2963 * Retrieve a specific log page. The contents of the log page request should
2964 * have already been validated by the system.
2965 */
2966 static boolean_t
nvme_get_logpage(nvme_t * nvme,boolean_t user,nvme_ioctl_get_logpage_t * log,void ** buf)2967 nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log,
2968 void **buf)
2969 {
2970 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
2971 nvme_getlogpage_dw10_t dw10;
2972 uint32_t offlo, offhi;
2973 nvme_getlogpage_dw11_t dw11;
2974 nvme_getlogpage_dw14_t dw14;
2975 uint32_t ndw;
2976 boolean_t ret = B_FALSE;
2977
2978 bzero(&dw10, sizeof (dw10));
2979 bzero(&dw11, sizeof (dw11));
2980 bzero(&dw14, sizeof (dw14));
2981
2982 cmd->nc_sqid = 0;
2983 cmd->nc_callback = nvme_wakeup_cmd;
2984 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
2985 cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid;
2986
2987 if (user)
2988 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
2989
2990 /*
2991 * The size field is the number of double words, but is a zeros based
2992 * value. We need to store our actual value minus one.
2993 */
2994 ndw = (uint32_t)(log->nigl_len / 4);
2995 ASSERT3U(ndw, >, 0);
2996 ndw--;
2997
2998 dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0);
2999 dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0);
3000 dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0);
3001 dw10.b.lp_lnumdl = bitx32(ndw, 15, 0);
3002
3003 dw11.b.lp_numdu = bitx32(ndw, 31, 16);
3004 dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0);
3005
3006 offlo = bitx64(log->nigl_offset, 31, 0);
3007 offhi = bitx64(log->nigl_offset, 63, 32);
3008
3009 dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0);
3010
3011 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3012 cmd->nc_sqe.sqe_cdw11 = dw11.r;
3013 cmd->nc_sqe.sqe_cdw12 = offlo;
3014 cmd->nc_sqe.sqe_cdw13 = offhi;
3015 cmd->nc_sqe.sqe_cdw14 = dw14.r;
3016
3017 if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ,
3018 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3019 dev_err(nvme->n_dip, CE_WARN,
3020 "!nvme_zalloc_dma failed for GET LOG PAGE");
3021 ret = nvme_ioctl_error(&log->nigl_common,
3022 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3023 goto fail;
3024 }
3025
3026 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
3027 ret = nvme_ioctl_error(&log->nigl_common,
3028 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3029 goto fail;
3030 }
3031 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3032
3033 if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) {
3034 if (!user) {
3035 dev_err(nvme->n_dip, CE_WARN,
3036 "!GET LOG PAGE failed with sct = %x, sc = %x",
3037 cmd->nc_cqe.cqe_sf.sf_sct,
3038 cmd->nc_cqe.cqe_sf.sf_sc);
3039 }
3040 ret = B_FALSE;
3041 goto fail;
3042 }
3043
3044 *buf = kmem_alloc(log->nigl_len, KM_SLEEP);
3045 bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len);
3046
3047 ret = B_TRUE;
3048 fail:
3049 nvme_free_cmd(cmd);
3050
3051 return (ret);
3052 }
3053
3054 /*
3055 * This is an internal wrapper for when the kernel wants to get a log page.
3056 * Currently this assumes that the only thing that is required is the log page
3057 * ID. If more information is required, we'll be better served to just use the
3058 * general ioctl interface.
3059 */
3060 static boolean_t
nvme_get_logpage_int(nvme_t * nvme,boolean_t user,void ** buf,size_t * bufsize,uint8_t lid)3061 nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
3062 uint8_t lid)
3063 {
3064 const nvme_log_page_info_t *info = NULL;
3065 nvme_ioctl_get_logpage_t log;
3066 nvme_valid_ctrl_data_t data;
3067 boolean_t bret;
3068 bool var;
3069
3070 for (size_t i = 0; i < nvme_std_log_npages; i++) {
3071 if (nvme_std_log_pages[i].nlpi_lid == lid &&
3072 nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) {
3073 info = &nvme_std_log_pages[i];
3074 break;
3075 }
3076 }
3077
3078 if (info == NULL) {
3079 return (B_FALSE);
3080 }
3081
3082 data.vcd_vers = &nvme->n_version;
3083 data.vcd_id = nvme->n_idctl;
3084 bzero(&log, sizeof (log));
3085 log.nigl_common.nioc_nsid = NVME_NSID_BCAST;
3086 log.nigl_csi = info->nlpi_csi;
3087 log.nigl_lid = info->nlpi_lid;
3088 log.nigl_len = nvme_log_page_info_size(info, &data, &var);
3089
3090 /*
3091 * We only support getting standard fixed-length log pages through the
3092 * kernel interface at this time. If a log page either has an unknown
3093 * size or has a variable length, then we cannot get it.
3094 */
3095 if (log.nigl_len == 0 || var) {
3096 return (B_FALSE);
3097 }
3098
3099 bret = nvme_get_logpage(nvme, user, &log, buf);
3100 if (!bret) {
3101 return (B_FALSE);
3102 }
3103
3104 *bufsize = log.nigl_len;
3105 return (B_TRUE);
3106 }
3107
3108 static boolean_t
nvme_identify(nvme_t * nvme,boolean_t user,nvme_ioctl_identify_t * ioc,void ** buf)3109 nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc,
3110 void **buf)
3111 {
3112 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3113 boolean_t ret = B_FALSE;
3114 nvme_identify_dw10_t dw10;
3115
3116 ASSERT3P(buf, !=, NULL);
3117
3118 bzero(&dw10, sizeof (dw10));
3119
3120 cmd->nc_sqid = 0;
3121 cmd->nc_callback = nvme_wakeup_cmd;
3122 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
3123 cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid;
3124
3125 dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0);
3126 dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0);
3127
3128 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3129
3130 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
3131 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
3132 dev_err(nvme->n_dip, CE_WARN,
3133 "!nvme_zalloc_dma failed for IDENTIFY");
3134 ret = nvme_ioctl_error(&ioc->nid_common,
3135 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
3136 goto fail;
3137 }
3138
3139 if (cmd->nc_dma->nd_ncookie > 2) {
3140 dev_err(nvme->n_dip, CE_WARN,
3141 "!too many DMA cookies for IDENTIFY");
3142 NVME_BUMP_STAT(nvme, too_many_cookies);
3143 ret = nvme_ioctl_error(&ioc->nid_common,
3144 NVME_IOCTL_E_BAD_PRP, 0, 0);
3145 goto fail;
3146 }
3147
3148 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
3149 if (cmd->nc_dma->nd_ncookie > 1) {
3150 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
3151 &cmd->nc_dma->nd_cookie);
3152 cmd->nc_sqe.sqe_dptr.d_prp[1] =
3153 cmd->nc_dma->nd_cookie.dmac_laddress;
3154 }
3155
3156 if (user)
3157 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3158
3159 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3160
3161 if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) {
3162 dev_err(nvme->n_dip, CE_WARN,
3163 "!IDENTIFY failed with sct = %x, sc = %x",
3164 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3165 ret = B_FALSE;
3166 goto fail;
3167 }
3168
3169 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
3170 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
3171 ret = B_TRUE;
3172
3173 fail:
3174 nvme_free_cmd(cmd);
3175
3176 return (ret);
3177 }
3178
3179 static boolean_t
nvme_identify_int(nvme_t * nvme,uint32_t nsid,uint8_t cns,void ** buf)3180 nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf)
3181 {
3182 nvme_ioctl_identify_t id;
3183
3184 bzero(&id, sizeof (nvme_ioctl_identify_t));
3185 id.nid_common.nioc_nsid = nsid;
3186 id.nid_cns = cns;
3187
3188 return (nvme_identify(nvme, B_FALSE, &id, buf));
3189 }
3190
3191 static int
nvme_set_features(nvme_t * nvme,boolean_t user,uint32_t nsid,uint8_t feature,uint32_t val,uint32_t * res)3192 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
3193 uint32_t val, uint32_t *res)
3194 {
3195 _NOTE(ARGUNUSED(nsid));
3196 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3197 int ret = EINVAL;
3198
3199 ASSERT(res != NULL);
3200
3201 cmd->nc_sqid = 0;
3202 cmd->nc_callback = nvme_wakeup_cmd;
3203 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
3204 cmd->nc_sqe.sqe_cdw10 = feature;
3205 cmd->nc_sqe.sqe_cdw11 = val;
3206
3207 if (user)
3208 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
3209
3210 switch (feature) {
3211 case NVME_FEAT_WRITE_CACHE:
3212 if (!nvme->n_write_cache_present)
3213 goto fail;
3214 break;
3215
3216 case NVME_FEAT_NQUEUES:
3217 break;
3218
3219 default:
3220 goto fail;
3221 }
3222
3223 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3224
3225 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3226 dev_err(nvme->n_dip, CE_WARN,
3227 "!SET FEATURES %d failed with sct = %x, sc = %x",
3228 feature, cmd->nc_cqe.cqe_sf.sf_sct,
3229 cmd->nc_cqe.cqe_sf.sf_sc);
3230 goto fail;
3231 }
3232
3233 *res = cmd->nc_cqe.cqe_dw0;
3234
3235 fail:
3236 nvme_free_cmd(cmd);
3237 return (ret);
3238 }
3239
3240 static int
nvme_write_cache_set(nvme_t * nvme,boolean_t enable)3241 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
3242 {
3243 nvme_write_cache_t nwc = { 0 };
3244
3245 if (enable)
3246 nwc.b.wc_wce = 1;
3247
3248 /*
3249 * We've seen some cases where this fails due to us being told we've
3250 * specified an invalid namespace when operating against the Xen xcp-ng
3251 * qemu NVMe virtual device. As such, we generally ensure that trying to
3252 * enable this doesn't lead us to panic. It's not completely clear why
3253 * specifying namespace zero here fails, but not when we're setting the
3254 * number of queues below.
3255 */
3256 return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE,
3257 nwc.r, &nwc.r));
3258 }
3259
3260 static int
nvme_set_nqueues(nvme_t * nvme)3261 nvme_set_nqueues(nvme_t *nvme)
3262 {
3263 nvme_nqueues_t nq = { 0 };
3264 int ret;
3265
3266 /*
3267 * The default is to allocate one completion queue per vector.
3268 */
3269 if (nvme->n_completion_queues == -1)
3270 nvme->n_completion_queues = nvme->n_intr_cnt;
3271
3272 /*
3273 * There is no point in having more completion queues than
3274 * interrupt vectors.
3275 */
3276 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3277 nvme->n_intr_cnt);
3278
3279 /*
3280 * The default is to use one submission queue per completion queue.
3281 */
3282 if (nvme->n_submission_queues == -1)
3283 nvme->n_submission_queues = nvme->n_completion_queues;
3284
3285 /*
3286 * There is no point in having more completion queues than
3287 * submission queues.
3288 */
3289 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3290 nvme->n_submission_queues);
3291
3292 ASSERT(nvme->n_submission_queues > 0);
3293 ASSERT(nvme->n_completion_queues > 0);
3294
3295 nq.b.nq_nsq = nvme->n_submission_queues - 1;
3296 nq.b.nq_ncq = nvme->n_completion_queues - 1;
3297
3298 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
3299 &nq.r);
3300
3301 if (ret == 0) {
3302 /*
3303 * Never use more than the requested number of queues.
3304 */
3305 nvme->n_submission_queues = MIN(nvme->n_submission_queues,
3306 nq.b.nq_nsq + 1);
3307 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
3308 nq.b.nq_ncq + 1);
3309 }
3310
3311 return (ret);
3312 }
3313
3314 static int
nvme_create_completion_queue(nvme_t * nvme,nvme_cq_t * cq)3315 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
3316 {
3317 nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3318 nvme_create_queue_dw10_t dw10 = { 0 };
3319 nvme_create_cq_dw11_t c_dw11 = { 0 };
3320 int ret;
3321
3322 dw10.b.q_qid = cq->ncq_id;
3323 dw10.b.q_qsize = cq->ncq_nentry - 1;
3324
3325 c_dw11.b.cq_pc = 1;
3326 c_dw11.b.cq_ien = 1;
3327 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
3328
3329 cmd->nc_sqid = 0;
3330 cmd->nc_callback = nvme_wakeup_cmd;
3331 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
3332 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3333 cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
3334 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
3335
3336 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3337
3338 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3339 dev_err(nvme->n_dip, CE_WARN,
3340 "!CREATE CQUEUE failed with sct = %x, sc = %x",
3341 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3342 }
3343
3344 nvme_free_cmd(cmd);
3345
3346 return (ret);
3347 }
3348
3349 static int
nvme_create_io_qpair(nvme_t * nvme,nvme_qpair_t * qp,uint16_t idx)3350 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
3351 {
3352 nvme_cq_t *cq = qp->nq_cq;
3353 nvme_cmd_t *cmd;
3354 nvme_create_queue_dw10_t dw10 = { 0 };
3355 nvme_create_sq_dw11_t s_dw11 = { 0 };
3356 int ret;
3357
3358 /*
3359 * It is possible to have more qpairs than completion queues,
3360 * and when the idx > ncq_id, that completion queue is shared
3361 * and has already been created.
3362 */
3363 if (idx <= cq->ncq_id &&
3364 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
3365 return (DDI_FAILURE);
3366
3367 dw10.b.q_qid = idx;
3368 dw10.b.q_qsize = qp->nq_nentry - 1;
3369
3370 s_dw11.b.sq_pc = 1;
3371 s_dw11.b.sq_cqid = cq->ncq_id;
3372
3373 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
3374 cmd->nc_sqid = 0;
3375 cmd->nc_callback = nvme_wakeup_cmd;
3376 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
3377 cmd->nc_sqe.sqe_cdw10 = dw10.r;
3378 cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
3379 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
3380
3381 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
3382
3383 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
3384 dev_err(nvme->n_dip, CE_WARN,
3385 "!CREATE SQUEUE failed with sct = %x, sc = %x",
3386 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
3387 }
3388
3389 nvme_free_cmd(cmd);
3390
3391 return (ret);
3392 }
3393
3394 static boolean_t
nvme_reset(nvme_t * nvme,boolean_t quiesce)3395 nvme_reset(nvme_t *nvme, boolean_t quiesce)
3396 {
3397 nvme_reg_csts_t csts;
3398 int i;
3399
3400 nvme_put32(nvme, NVME_REG_CC, 0);
3401
3402 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3403 if (csts.b.csts_rdy == 1) {
3404 nvme_put32(nvme, NVME_REG_CC, 0);
3405
3406 /*
3407 * The timeout value is from the Controller Capabilities
3408 * register (CAP.TO, section 3.1.1). This is the worst case
3409 * time to wait for CSTS.RDY to transition from 1 to 0 after
3410 * CC.EN transitions from 1 to 0.
3411 *
3412 * The timeout units are in 500 ms units, and we are delaying
3413 * in 50ms chunks, hence counting to n_timeout * 10.
3414 */
3415 for (i = 0; i < nvme->n_timeout * 10; i++) {
3416 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3417 if (csts.b.csts_rdy == 0)
3418 break;
3419
3420 /*
3421 * Quiescing drivers should not use locks or timeouts,
3422 * so if this is the quiesce path, use a quiesce-safe
3423 * delay.
3424 */
3425 if (quiesce) {
3426 drv_usecwait(50000);
3427 } else {
3428 delay(drv_usectohz(50000));
3429 }
3430 }
3431 }
3432
3433 nvme_put32(nvme, NVME_REG_AQA, 0);
3434 nvme_put32(nvme, NVME_REG_ASQ, 0);
3435 nvme_put32(nvme, NVME_REG_ACQ, 0);
3436
3437 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3438 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
3439 }
3440
3441 static void
nvme_shutdown(nvme_t * nvme,boolean_t quiesce)3442 nvme_shutdown(nvme_t *nvme, boolean_t quiesce)
3443 {
3444 nvme_reg_cc_t cc;
3445 nvme_reg_csts_t csts;
3446 int i;
3447
3448 cc.r = nvme_get32(nvme, NVME_REG_CC);
3449 cc.b.cc_shn = NVME_CC_SHN_NORMAL;
3450 nvme_put32(nvme, NVME_REG_CC, cc.r);
3451
3452 for (i = 0; i < 10; i++) {
3453 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
3454 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
3455 break;
3456
3457 if (quiesce) {
3458 drv_usecwait(100000);
3459 } else {
3460 delay(drv_usectohz(100000));
3461 }
3462 }
3463 }
3464
3465 /*
3466 * Return length of string without trailing spaces.
3467 */
3468 static int
nvme_strlen(const char * str,int len)3469 nvme_strlen(const char *str, int len)
3470 {
3471 if (len <= 0)
3472 return (0);
3473
3474 while (str[--len] == ' ')
3475 ;
3476
3477 return (++len);
3478 }
3479
3480 static void
nvme_config_min_block_size(nvme_t * nvme,char * model,char * val)3481 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val)
3482 {
3483 ulong_t bsize = 0;
3484 char *msg = "";
3485
3486 if (ddi_strtoul(val, NULL, 0, &bsize) != 0)
3487 goto err;
3488
3489 if (!ISP2(bsize)) {
3490 msg = ": not a power of 2";
3491 goto err;
3492 }
3493
3494 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) {
3495 msg = ": too low";
3496 goto err;
3497 }
3498
3499 nvme->n_min_block_size = bsize;
3500 return;
3501
3502 err:
3503 dev_err(nvme->n_dip, CE_WARN,
3504 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' "
3505 "for model '%s'%s", val, model, msg);
3506
3507 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
3508 }
3509
3510 static void
nvme_config_boolean(nvme_t * nvme,char * model,char * name,char * val,boolean_t * b)3511 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val,
3512 boolean_t *b)
3513 {
3514 if (strcmp(val, "on") == 0 ||
3515 strcmp(val, "true") == 0)
3516 *b = B_TRUE;
3517 else if (strcmp(val, "off") == 0 ||
3518 strcmp(val, "false") == 0)
3519 *b = B_FALSE;
3520 else
3521 dev_err(nvme->n_dip, CE_WARN,
3522 "!nvme-config-list: invalid value for %s '%s'"
3523 " for model '%s', ignoring", name, val, model);
3524 }
3525
3526 static void
nvme_config_list(nvme_t * nvme)3527 nvme_config_list(nvme_t *nvme)
3528 {
3529 char **config_list;
3530 uint_t nelem;
3531 int rv, i;
3532
3533 /*
3534 * We're following the pattern of 'sd-config-list' here, but extend it.
3535 * Instead of two we have three separate strings for "model", "fwrev",
3536 * and "name-value-list".
3537 */
3538 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip,
3539 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem);
3540
3541 if (rv != DDI_PROP_SUCCESS) {
3542 if (rv == DDI_PROP_CANNOT_DECODE) {
3543 dev_err(nvme->n_dip, CE_WARN,
3544 "!nvme-config-list: cannot be decoded");
3545 }
3546
3547 return;
3548 }
3549
3550 if ((nelem % 3) != 0) {
3551 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be "
3552 "triplets of <model>/<fwrev>/<name-value-list> strings ");
3553 goto out;
3554 }
3555
3556 for (i = 0; i < nelem; i += 3) {
3557 char *model = config_list[i];
3558 char *fwrev = config_list[i + 1];
3559 char *nvp, *save_nv;
3560 int id_model_len, id_fwrev_len;
3561
3562 id_model_len = nvme_strlen(nvme->n_idctl->id_model,
3563 sizeof (nvme->n_idctl->id_model));
3564
3565 if (strlen(model) != id_model_len)
3566 continue;
3567
3568 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0)
3569 continue;
3570
3571 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev,
3572 sizeof (nvme->n_idctl->id_fwrev));
3573
3574 if (strlen(fwrev) != 0) {
3575 boolean_t match = B_FALSE;
3576 char *fwr, *last_fw;
3577
3578 for (fwr = strtok_r(fwrev, ",", &last_fw);
3579 fwr != NULL;
3580 fwr = strtok_r(NULL, ",", &last_fw)) {
3581 if (strlen(fwr) != id_fwrev_len)
3582 continue;
3583
3584 if (strncmp(fwr, nvme->n_idctl->id_fwrev,
3585 id_fwrev_len) == 0)
3586 match = B_TRUE;
3587 }
3588
3589 if (!match)
3590 continue;
3591 }
3592
3593 /*
3594 * We should now have a comma-separated list of name:value
3595 * pairs.
3596 */
3597 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv);
3598 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) {
3599 char *name = nvp;
3600 char *val = strchr(nvp, ':');
3601
3602 if (val == NULL || name == val) {
3603 dev_err(nvme->n_dip, CE_WARN,
3604 "!nvme-config-list: <name-value-list> "
3605 "for model '%s' is malformed", model);
3606 goto out;
3607 }
3608
3609 /*
3610 * Null-terminate 'name', move 'val' past ':' sep.
3611 */
3612 *val++ = '\0';
3613
3614 /*
3615 * Process the name:val pairs that we know about.
3616 */
3617 if (strcmp(name, "ignore-unknown-vendor-status") == 0) {
3618 nvme_config_boolean(nvme, model, name, val,
3619 &nvme->n_ignore_unknown_vendor_status);
3620 } else if (strcmp(name, "min-phys-block-size") == 0) {
3621 nvme_config_min_block_size(nvme, model, val);
3622 } else if (strcmp(name, "volatile-write-cache") == 0) {
3623 nvme_config_boolean(nvme, model, name, val,
3624 &nvme->n_write_cache_enabled);
3625 } else {
3626 /*
3627 * Unknown 'name'.
3628 */
3629 dev_err(nvme->n_dip, CE_WARN,
3630 "!nvme-config-list: unknown config '%s' "
3631 "for model '%s', ignoring", name, model);
3632 }
3633 }
3634 }
3635
3636 out:
3637 ddi_prop_free(config_list);
3638 }
3639
3640 static void
nvme_prepare_devid(nvme_t * nvme,uint32_t nsid)3641 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
3642 {
3643 /*
3644 * Section 7.7 of the spec describes how to get a unique ID for
3645 * the controller: the vendor ID, the model name and the serial
3646 * number shall be unique when combined.
3647 *
3648 * If a namespace has no EUI64 we use the above and add the hex
3649 * namespace ID to get a unique ID for the namespace.
3650 */
3651 char model[sizeof (nvme->n_idctl->id_model) + 1];
3652 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
3653
3654 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
3655 bcopy(nvme->n_idctl->id_serial, serial,
3656 sizeof (nvme->n_idctl->id_serial));
3657
3658 model[sizeof (nvme->n_idctl->id_model)] = '\0';
3659 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
3660
3661 nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X",
3662 nvme->n_idctl->id_vid, model, serial, nsid);
3663 }
3664
3665 static nvme_identify_nsid_list_t *
nvme_update_nsid_list(nvme_t * nvme,int cns)3666 nvme_update_nsid_list(nvme_t *nvme, int cns)
3667 {
3668 nvme_identify_nsid_list_t *nslist;
3669
3670 /*
3671 * We currently don't handle cases where there are more than
3672 * 1024 active namespaces, requiring several IDENTIFY commands.
3673 */
3674 if (nvme_identify_int(nvme, 0, cns, (void **)&nslist))
3675 return (nslist);
3676
3677 return (NULL);
3678 }
3679
3680 nvme_namespace_t *
nvme_nsid2ns(nvme_t * nvme,uint32_t nsid)3681 nvme_nsid2ns(nvme_t *nvme, uint32_t nsid)
3682 {
3683 ASSERT3U(nsid, !=, 0);
3684 ASSERT3U(nsid, <=, nvme->n_namespace_count);
3685 return (&nvme->n_ns[nsid - 1]);
3686 }
3687
3688 static boolean_t
nvme_allocated_ns(nvme_namespace_t * ns)3689 nvme_allocated_ns(nvme_namespace_t *ns)
3690 {
3691 nvme_t *nvme = ns->ns_nvme;
3692 uint32_t i;
3693
3694 ASSERT(nvme_mgmt_lock_held(nvme));
3695
3696 /*
3697 * If supported, update the list of allocated namespace IDs.
3698 */
3699 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) &&
3700 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
3701 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3702 NVME_IDENTIFY_NSID_ALLOC_LIST);
3703 boolean_t found = B_FALSE;
3704
3705 /*
3706 * When namespace management is supported, this really shouldn't
3707 * be NULL. Treat all namespaces as allocated if it is.
3708 */
3709 if (nslist == NULL)
3710 return (B_TRUE);
3711
3712 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3713 if (ns->ns_id == 0)
3714 break;
3715
3716 if (ns->ns_id == nslist->nl_nsid[i])
3717 found = B_TRUE;
3718 }
3719
3720 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3721 return (found);
3722 } else {
3723 /*
3724 * If namespace management isn't supported, report all
3725 * namespaces as allocated.
3726 */
3727 return (B_TRUE);
3728 }
3729 }
3730
3731 static boolean_t
nvme_active_ns(nvme_namespace_t * ns)3732 nvme_active_ns(nvme_namespace_t *ns)
3733 {
3734 nvme_t *nvme = ns->ns_nvme;
3735 uint64_t *ptr;
3736 uint32_t i;
3737
3738 ASSERT(nvme_mgmt_lock_held(nvme));
3739
3740 /*
3741 * If supported, update the list of active namespace IDs.
3742 */
3743 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) {
3744 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
3745 NVME_IDENTIFY_NSID_LIST);
3746 boolean_t found = B_FALSE;
3747
3748 /*
3749 * When namespace management is supported, this really shouldn't
3750 * be NULL. Treat all namespaces as allocated if it is.
3751 */
3752 if (nslist == NULL)
3753 return (B_TRUE);
3754
3755 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
3756 if (ns->ns_id == 0)
3757 break;
3758
3759 if (ns->ns_id == nslist->nl_nsid[i])
3760 found = B_TRUE;
3761 }
3762
3763 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
3764 return (found);
3765 }
3766
3767 /*
3768 * Workaround for revision 1.0:
3769 * Check whether the IDENTIFY NAMESPACE data is zero-filled.
3770 */
3771 for (ptr = (uint64_t *)ns->ns_idns;
3772 ptr != (uint64_t *)(ns->ns_idns + 1);
3773 ptr++) {
3774 if (*ptr != 0) {
3775 return (B_TRUE);
3776 }
3777 }
3778
3779 return (B_FALSE);
3780 }
3781
3782 static int
nvme_init_ns(nvme_t * nvme,uint32_t nsid)3783 nvme_init_ns(nvme_t *nvme, uint32_t nsid)
3784 {
3785 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
3786 nvme_identify_nsid_t *idns;
3787 boolean_t was_ignored;
3788 int last_rp;
3789
3790 ns->ns_nvme = nvme;
3791
3792 ASSERT(nvme_mgmt_lock_held(nvme));
3793
3794 /*
3795 * Because we might rescan a namespace and this will fail after boot
3796 * that'd leave us in a bad spot. We need to do something about this
3797 * longer term, but it's not clear how exactly we would recover right
3798 * now.
3799 */
3800 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
3801 (void **)&idns)) {
3802 dev_err(nvme->n_dip, CE_WARN,
3803 "!failed to identify namespace %d", nsid);
3804 return (DDI_FAILURE);
3805 }
3806
3807 if (ns->ns_idns != NULL)
3808 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t));
3809
3810 ns->ns_idns = idns;
3811 ns->ns_id = nsid;
3812
3813 was_ignored = ns->ns_ignore;
3814
3815 ns->ns_allocated = nvme_allocated_ns(ns);
3816 ns->ns_active = nvme_active_ns(ns);
3817
3818 ns->ns_block_count = idns->id_nsize;
3819 ns->ns_block_size =
3820 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
3821 ns->ns_best_block_size = ns->ns_block_size;
3822
3823 /*
3824 * Get the EUI64 if present.
3825 */
3826 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
3827 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
3828
3829 /*
3830 * Get the NGUID if present.
3831 */
3832 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2))
3833 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid));
3834
3835 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
3836 if (*(uint64_t *)ns->ns_eui64 == 0)
3837 nvme_prepare_devid(nvme, ns->ns_id);
3838
3839 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id);
3840
3841 /*
3842 * Find the LBA format with no metadata and the best relative
3843 * performance. A value of 3 means "degraded", 0 is best.
3844 */
3845 last_rp = 3;
3846 for (int j = 0; j <= idns->id_nlbaf; j++) {
3847 if (idns->id_lbaf[j].lbaf_lbads == 0)
3848 break;
3849 if (idns->id_lbaf[j].lbaf_ms != 0)
3850 continue;
3851 if (idns->id_lbaf[j].lbaf_rp >= last_rp)
3852 continue;
3853 last_rp = idns->id_lbaf[j].lbaf_rp;
3854 ns->ns_best_block_size =
3855 1 << idns->id_lbaf[j].lbaf_lbads;
3856 }
3857
3858 if (ns->ns_best_block_size < nvme->n_min_block_size)
3859 ns->ns_best_block_size = nvme->n_min_block_size;
3860
3861 was_ignored = ns->ns_ignore;
3862
3863 /*
3864 * We currently don't support namespaces that are inactive, or use
3865 * either:
3866 * - protection information
3867 * - illegal block size (< 512)
3868 */
3869 if (!ns->ns_active) {
3870 ns->ns_ignore = B_TRUE;
3871 } else if (idns->id_dps.dp_pinfo) {
3872 dev_err(nvme->n_dip, CE_WARN,
3873 "!ignoring namespace %d, unsupported feature: "
3874 "pinfo = %d", nsid, idns->id_dps.dp_pinfo);
3875 ns->ns_ignore = B_TRUE;
3876 } else if (ns->ns_block_size < 512) {
3877 dev_err(nvme->n_dip, CE_WARN,
3878 "!ignoring namespace %d, unsupported block size %"PRIu64,
3879 nsid, (uint64_t)ns->ns_block_size);
3880 ns->ns_ignore = B_TRUE;
3881 } else {
3882 ns->ns_ignore = B_FALSE;
3883 }
3884
3885 /*
3886 * Keep a count of namespaces which are attachable.
3887 * See comments in nvme_bd_driveinfo() to understand its effect.
3888 */
3889 if (was_ignored) {
3890 /*
3891 * Previously ignored, but now not. Count it.
3892 */
3893 if (!ns->ns_ignore)
3894 nvme->n_namespaces_attachable++;
3895 } else {
3896 /*
3897 * Wasn't ignored previously, but now needs to be.
3898 * Discount it.
3899 */
3900 if (ns->ns_ignore)
3901 nvme->n_namespaces_attachable--;
3902 }
3903
3904 return (DDI_SUCCESS);
3905 }
3906
3907 static boolean_t
nvme_attach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)3908 nvme_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
3909 {
3910 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
3911 int ret;
3912
3913 ASSERT(nvme_mgmt_lock_held(nvme));
3914
3915 if (ns->ns_ignore) {
3916 return (nvme_ioctl_error(com, NVME_IOCTL_E_UNSUP_ATTACH_NS,
3917 0, 0));
3918 }
3919
3920 if (ns->ns_bd_hdl == NULL) {
3921 bd_ops_t ops = nvme_bd_ops;
3922
3923 if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
3924 ops.o_free_space = NULL;
3925
3926 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr,
3927 KM_SLEEP);
3928
3929 if (ns->ns_bd_hdl == NULL) {
3930 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev "
3931 "handle for namespace id %u", com->nioc_nsid);
3932 return (nvme_ioctl_error(com,
3933 NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0));
3934 }
3935 }
3936
3937 nvme_mgmt_bd_start(nvme);
3938 ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl);
3939 nvme_mgmt_bd_end(nvme);
3940 if (ret != DDI_SUCCESS) {
3941 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH,
3942 0, 0));
3943 }
3944
3945 ns->ns_attached = B_TRUE;
3946
3947 return (B_TRUE);
3948 }
3949
3950 static boolean_t
nvme_detach_ns(nvme_t * nvme,nvme_ioctl_common_t * com)3951 nvme_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
3952 {
3953 nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
3954 int ret;
3955
3956 ASSERT(nvme_mgmt_lock_held(nvme));
3957
3958 if (ns->ns_ignore || !ns->ns_attached)
3959 return (B_TRUE);
3960
3961 nvme_mgmt_bd_start(nvme);
3962 ASSERT3P(ns->ns_bd_hdl, !=, NULL);
3963 ret = bd_detach_handle(ns->ns_bd_hdl);
3964 nvme_mgmt_bd_end(nvme);
3965
3966 if (ret != DDI_SUCCESS) {
3967 return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0,
3968 0));
3969 }
3970
3971 ns->ns_attached = B_FALSE;
3972 return (B_TRUE);
3973
3974 }
3975
3976 /*
3977 * Rescan the namespace information associated with the namespaces indicated by
3978 * ioc. They should not be attached to blkdev right now.
3979 */
3980 static void
nvme_rescan_ns(nvme_t * nvme,uint32_t nsid)3981 nvme_rescan_ns(nvme_t *nvme, uint32_t nsid)
3982 {
3983 ASSERT(nvme_mgmt_lock_held(nvme));
3984 ASSERT3U(nsid, !=, 0);
3985
3986 if (nsid != NVME_NSID_BCAST) {
3987 nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
3988
3989 ASSERT3U(ns->ns_attached, ==, B_FALSE);
3990 (void) nvme_init_ns(nvme, nsid);
3991 return;
3992 }
3993
3994 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
3995 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
3996
3997 ASSERT3U(ns->ns_attached, ==, B_FALSE);
3998 (void) nvme_init_ns(nvme, i);
3999 }
4000 }
4001
4002 typedef struct nvme_quirk_table {
4003 uint16_t nq_vendor_id;
4004 uint16_t nq_device_id;
4005 nvme_quirk_t nq_quirks;
4006 } nvme_quirk_table_t;
4007
4008 static const nvme_quirk_table_t nvme_quirks[] = {
4009 { 0x1987, 0x5018, NVME_QUIRK_START_CID }, /* Phison E18 */
4010 };
4011
4012 static void
nvme_detect_quirks(nvme_t * nvme)4013 nvme_detect_quirks(nvme_t *nvme)
4014 {
4015 for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) {
4016 const nvme_quirk_table_t *nqt = &nvme_quirks[i];
4017
4018 if (nqt->nq_vendor_id == nvme->n_vendor_id &&
4019 nqt->nq_device_id == nvme->n_device_id) {
4020 nvme->n_quirks = nqt->nq_quirks;
4021 return;
4022 }
4023 }
4024 }
4025
4026 static int
nvme_init(nvme_t * nvme)4027 nvme_init(nvme_t *nvme)
4028 {
4029 nvme_reg_cc_t cc = { 0 };
4030 nvme_reg_aqa_t aqa = { 0 };
4031 nvme_reg_asq_t asq = { 0 };
4032 nvme_reg_acq_t acq = { 0 };
4033 nvme_reg_cap_t cap;
4034 nvme_reg_vs_t vs;
4035 nvme_reg_csts_t csts;
4036 int i = 0;
4037 uint16_t nqueues;
4038 uint_t tq_threads;
4039 char model[sizeof (nvme->n_idctl->id_model) + 1];
4040 char *vendor, *product;
4041 uint32_t nsid;
4042
4043 /* Check controller version */
4044 vs.r = nvme_get32(nvme, NVME_REG_VS);
4045 nvme->n_version.v_major = vs.b.vs_mjr;
4046 nvme->n_version.v_minor = vs.b.vs_mnr;
4047 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d\n",
4048 nvme->n_version.v_major, nvme->n_version.v_minor);
4049
4050 if (nvme->n_version.v_major > nvme_version_major) {
4051 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
4052 nvme_version_major);
4053 if (nvme->n_strict_version)
4054 goto fail;
4055 }
4056
4057 /* retrieve controller configuration */
4058 cap.r = nvme_get64(nvme, NVME_REG_CAP);
4059
4060 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
4061 dev_err(nvme->n_dip, CE_WARN,
4062 "!NVM command set not supported by hardware");
4063 goto fail;
4064 }
4065
4066 nvme->n_nssr_supported = cap.b.cap_nssrs;
4067 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
4068 nvme->n_timeout = cap.b.cap_to;
4069 nvme->n_arbitration_mechanisms = cap.b.cap_ams;
4070 nvme->n_cont_queues_reqd = cap.b.cap_cqr;
4071 nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
4072
4073 /*
4074 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
4075 * the base page size of 4k (1<<12), so add 12 here to get the real
4076 * page size value.
4077 */
4078 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
4079 cap.b.cap_mpsmax + 12);
4080 nvme->n_pagesize = 1UL << (nvme->n_pageshift);
4081
4082 /*
4083 * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
4084 */
4085 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
4086 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4087
4088 /*
4089 * Set up PRP DMA to transfer 1 page-aligned page at a time.
4090 * Maxxfer may be increased after we identified the controller limits.
4091 */
4092 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
4093 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
4094 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
4095 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
4096
4097 /*
4098 * Reset controller if it's still in ready state.
4099 */
4100 if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
4101 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
4102 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4103 nvme->n_dead = B_TRUE;
4104 goto fail;
4105 }
4106
4107 /*
4108 * Create the cq array with one completion queue to be assigned
4109 * to the admin queue pair and a limited number of taskqs (4).
4110 */
4111 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
4112 DDI_SUCCESS) {
4113 dev_err(nvme->n_dip, CE_WARN,
4114 "!failed to pre-allocate admin completion queue");
4115 goto fail;
4116 }
4117 /*
4118 * Create the admin queue pair.
4119 */
4120 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
4121 != DDI_SUCCESS) {
4122 dev_err(nvme->n_dip, CE_WARN,
4123 "!unable to allocate admin qpair");
4124 goto fail;
4125 }
4126 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
4127 nvme->n_ioq[0] = nvme->n_adminq;
4128
4129 if (nvme->n_quirks & NVME_QUIRK_START_CID)
4130 nvme->n_adminq->nq_next_cmd++;
4131
4132 nvme->n_progress |= NVME_ADMIN_QUEUE;
4133
4134 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4135 "admin-queue-len", nvme->n_admin_queue_len);
4136
4137 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
4138 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
4139 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
4140
4141 ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
4142 ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
4143
4144 nvme_put32(nvme, NVME_REG_AQA, aqa.r);
4145 nvme_put64(nvme, NVME_REG_ASQ, asq);
4146 nvme_put64(nvme, NVME_REG_ACQ, acq);
4147
4148 cc.b.cc_ams = 0; /* use Round-Robin arbitration */
4149 cc.b.cc_css = 0; /* use NVM command set */
4150 cc.b.cc_mps = nvme->n_pageshift - 12;
4151 cc.b.cc_shn = 0; /* no shutdown in progress */
4152 cc.b.cc_en = 1; /* enable controller */
4153 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */
4154 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */
4155
4156 nvme_put32(nvme, NVME_REG_CC, cc.r);
4157
4158 /*
4159 * Wait for the controller to become ready.
4160 */
4161 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4162 if (csts.b.csts_rdy == 0) {
4163 for (i = 0; i != nvme->n_timeout * 10; i++) {
4164 delay(drv_usectohz(50000));
4165 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
4166
4167 if (csts.b.csts_cfs == 1) {
4168 dev_err(nvme->n_dip, CE_WARN,
4169 "!controller fatal status at init");
4170 ddi_fm_service_impact(nvme->n_dip,
4171 DDI_SERVICE_LOST);
4172 nvme->n_dead = B_TRUE;
4173 goto fail;
4174 }
4175
4176 if (csts.b.csts_rdy == 1)
4177 break;
4178 }
4179 }
4180
4181 if (csts.b.csts_rdy == 0) {
4182 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
4183 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
4184 nvme->n_dead = B_TRUE;
4185 goto fail;
4186 }
4187
4188 /*
4189 * Assume an abort command limit of 1. We'll destroy and re-init
4190 * that later when we know the true abort command limit.
4191 */
4192 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
4193
4194 /*
4195 * Set up initial interrupt for admin queue.
4196 */
4197 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
4198 != DDI_SUCCESS) &&
4199 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
4200 != DDI_SUCCESS) &&
4201 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
4202 != DDI_SUCCESS)) {
4203 dev_err(nvme->n_dip, CE_WARN,
4204 "!failed to set up initial interrupt");
4205 goto fail;
4206 }
4207
4208 /*
4209 * Post an asynchronous event command to catch errors.
4210 * We assume the asynchronous events are supported as required by
4211 * specification (Figure 40 in section 5 of NVMe 1.2).
4212 * However, since at least qemu does not follow the specification,
4213 * we need a mechanism to protect ourselves.
4214 */
4215 nvme->n_async_event_supported = B_TRUE;
4216 nvme_async_event(nvme);
4217
4218 /*
4219 * Identify Controller
4220 */
4221 if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL,
4222 (void **)&nvme->n_idctl)) {
4223 dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller");
4224 goto fail;
4225 }
4226
4227 /*
4228 * Get the common namespace information if available. If not, we use the
4229 * information for nsid 1.
4230 */
4231 if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) &&
4232 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
4233 nsid = NVME_NSID_BCAST;
4234 } else {
4235 nsid = 1;
4236 }
4237
4238 if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
4239 (void **)&nvme->n_idcomns)) {
4240 dev_err(nvme->n_dip, CE_WARN, "!failed to identify common "
4241 "namespace information");
4242 goto fail;
4243 }
4244 /*
4245 * Process nvme-config-list (if present) in nvme.conf.
4246 */
4247 nvme_config_list(nvme);
4248
4249 /*
4250 * Get Vendor & Product ID
4251 */
4252 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
4253 model[sizeof (nvme->n_idctl->id_model)] = '\0';
4254 sata_split_model(model, &vendor, &product);
4255
4256 if (vendor == NULL)
4257 nvme->n_vendor = strdup("NVMe");
4258 else
4259 nvme->n_vendor = strdup(vendor);
4260
4261 nvme->n_product = strdup(product);
4262
4263 /*
4264 * Get controller limits.
4265 */
4266 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
4267 MIN(nvme->n_admin_queue_len / 10,
4268 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
4269
4270 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4271 "async-event-limit", nvme->n_async_event_limit);
4272
4273 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
4274
4275 /*
4276 * Reinitialize the semaphore with the true abort command limit
4277 * supported by the hardware. It's not necessary to disable interrupts
4278 * as only command aborts use the semaphore, and no commands are
4279 * executed or aborted while we're here.
4280 */
4281 sema_destroy(&nvme->n_abort_sema);
4282 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
4283 SEMA_DRIVER, NULL);
4284
4285 nvme->n_progress |= NVME_CTRL_LIMITS;
4286
4287 if (nvme->n_idctl->id_mdts == 0)
4288 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
4289 else
4290 nvme->n_max_data_transfer_size =
4291 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
4292
4293 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
4294
4295 /*
4296 * Limit n_max_data_transfer_size to what we can handle in one PRP.
4297 * Chained PRPs are currently unsupported.
4298 *
4299 * This is a no-op on hardware which doesn't support a transfer size
4300 * big enough to require chained PRPs.
4301 */
4302 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
4303 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
4304
4305 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
4306
4307 /*
4308 * Make sure the minimum/maximum queue entry sizes are not
4309 * larger/smaller than the default.
4310 */
4311
4312 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
4313 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
4314 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
4315 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
4316 goto fail;
4317
4318 /*
4319 * Check for the presence of a Volatile Write Cache. If present,
4320 * enable or disable based on the value of the property
4321 * volatile-write-cache-enable (default is enabled).
4322 */
4323 nvme->n_write_cache_present =
4324 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
4325
4326 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4327 "volatile-write-cache-present",
4328 nvme->n_write_cache_present ? 1 : 0);
4329
4330 if (!nvme->n_write_cache_present) {
4331 nvme->n_write_cache_enabled = B_FALSE;
4332 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
4333 != 0) {
4334 dev_err(nvme->n_dip, CE_WARN,
4335 "!failed to %sable volatile write cache",
4336 nvme->n_write_cache_enabled ? "en" : "dis");
4337 /*
4338 * Assume the cache is (still) enabled.
4339 */
4340 nvme->n_write_cache_enabled = B_TRUE;
4341 }
4342
4343 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
4344 "volatile-write-cache-enable",
4345 nvme->n_write_cache_enabled ? 1 : 0);
4346
4347 /*
4348 * Get number of supported namespaces and allocate namespace array.
4349 */
4350 nvme->n_namespace_count = nvme->n_idctl->id_nn;
4351
4352 if (nvme->n_namespace_count == 0) {
4353 dev_err(nvme->n_dip, CE_WARN,
4354 "!controllers without namespaces are not supported");
4355 goto fail;
4356 }
4357
4358 if (nvme->n_namespace_count > NVME_MINOR_MAX) {
4359 dev_err(nvme->n_dip, CE_WARN,
4360 "!too many namespaces: %d, limiting to %d\n",
4361 nvme->n_namespace_count, NVME_MINOR_MAX);
4362 nvme->n_namespace_count = NVME_MINOR_MAX;
4363 }
4364
4365 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
4366 nvme->n_namespace_count, KM_SLEEP);
4367
4368 /*
4369 * Try to set up MSI/MSI-X interrupts.
4370 */
4371 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
4372 != 0) {
4373 nvme_release_interrupts(nvme);
4374
4375 nqueues = MIN(UINT16_MAX, ncpus);
4376
4377 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
4378 nqueues) != DDI_SUCCESS) &&
4379 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
4380 nqueues) != DDI_SUCCESS)) {
4381 dev_err(nvme->n_dip, CE_WARN,
4382 "!failed to set up MSI/MSI-X interrupts");
4383 goto fail;
4384 }
4385 }
4386
4387 /*
4388 * Create I/O queue pairs.
4389 */
4390
4391 if (nvme_set_nqueues(nvme) != 0) {
4392 dev_err(nvme->n_dip, CE_WARN,
4393 "!failed to set number of I/O queues to %d",
4394 nvme->n_intr_cnt);
4395 goto fail;
4396 }
4397
4398 /*
4399 * Reallocate I/O queue array
4400 */
4401 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
4402 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
4403 (nvme->n_submission_queues + 1), KM_SLEEP);
4404 nvme->n_ioq[0] = nvme->n_adminq;
4405
4406 /*
4407 * There should always be at least as many submission queues
4408 * as completion queues.
4409 */
4410 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
4411
4412 nvme->n_ioq_count = nvme->n_submission_queues;
4413
4414 nvme->n_io_squeue_len =
4415 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
4416
4417 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
4418 nvme->n_io_squeue_len);
4419
4420 /*
4421 * Pre-allocate completion queues.
4422 * When there are the same number of submission and completion
4423 * queues there is no value in having a larger completion
4424 * queue length.
4425 */
4426 if (nvme->n_submission_queues == nvme->n_completion_queues)
4427 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4428 nvme->n_io_squeue_len);
4429
4430 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
4431 nvme->n_max_queue_entries);
4432
4433 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
4434 nvme->n_io_cqueue_len);
4435
4436 /*
4437 * Assign the equal quantity of taskq threads to each completion
4438 * queue, capping the total number of threads to the number
4439 * of CPUs.
4440 */
4441 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues;
4442
4443 /*
4444 * In case the calculation above is zero, we need at least one
4445 * thread per completion queue.
4446 */
4447 tq_threads = MAX(1, tq_threads);
4448
4449 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
4450 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
4451 dev_err(nvme->n_dip, CE_WARN,
4452 "!failed to pre-allocate completion queues");
4453 goto fail;
4454 }
4455
4456 /*
4457 * If we use less completion queues than interrupt vectors return
4458 * some of the interrupt vectors back to the system.
4459 */
4460 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
4461 nvme_release_interrupts(nvme);
4462
4463 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
4464 nvme->n_completion_queues + 1) != DDI_SUCCESS) {
4465 dev_err(nvme->n_dip, CE_WARN,
4466 "!failed to reduce number of interrupts");
4467 goto fail;
4468 }
4469 }
4470
4471 /*
4472 * Alloc & register I/O queue pairs
4473 */
4474
4475 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
4476 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
4477 &nvme->n_ioq[i], i) != DDI_SUCCESS) {
4478 dev_err(nvme->n_dip, CE_WARN,
4479 "!unable to allocate I/O qpair %d", i);
4480 goto fail;
4481 }
4482
4483 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
4484 dev_err(nvme->n_dip, CE_WARN,
4485 "!unable to create I/O qpair %d", i);
4486 goto fail;
4487 }
4488 }
4489
4490 /*
4491 * Post more asynchronous events commands to reduce event reporting
4492 * latency as suggested by the spec.
4493 */
4494 if (nvme->n_async_event_supported) {
4495 for (i = 1; i != nvme->n_async_event_limit; i++)
4496 nvme_async_event(nvme);
4497 }
4498
4499 return (DDI_SUCCESS);
4500
4501 fail:
4502 (void) nvme_reset(nvme, B_FALSE);
4503 return (DDI_FAILURE);
4504 }
4505
4506 static uint_t
nvme_intr(caddr_t arg1,caddr_t arg2)4507 nvme_intr(caddr_t arg1, caddr_t arg2)
4508 {
4509 nvme_t *nvme = (nvme_t *)arg1;
4510 int inum = (int)(uintptr_t)arg2;
4511 int ccnt = 0;
4512 int qnum;
4513
4514 if (inum >= nvme->n_intr_cnt)
4515 return (DDI_INTR_UNCLAIMED);
4516
4517 if (nvme->n_dead) {
4518 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
4519 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
4520 }
4521
4522 /*
4523 * The interrupt vector a queue uses is calculated as queue_idx %
4524 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
4525 * in steps of n_intr_cnt to process all queues using this vector.
4526 */
4527 for (qnum = inum;
4528 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
4529 qnum += nvme->n_intr_cnt) {
4530 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
4531 }
4532
4533 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
4534 }
4535
4536 static void
nvme_release_interrupts(nvme_t * nvme)4537 nvme_release_interrupts(nvme_t *nvme)
4538 {
4539 int i;
4540
4541 for (i = 0; i < nvme->n_intr_cnt; i++) {
4542 if (nvme->n_inth[i] == NULL)
4543 break;
4544
4545 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4546 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
4547 else
4548 (void) ddi_intr_disable(nvme->n_inth[i]);
4549
4550 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
4551 (void) ddi_intr_free(nvme->n_inth[i]);
4552 }
4553
4554 kmem_free(nvme->n_inth, nvme->n_inth_sz);
4555 nvme->n_inth = NULL;
4556 nvme->n_inth_sz = 0;
4557
4558 nvme->n_progress &= ~NVME_INTERRUPTS;
4559 }
4560
4561 static int
nvme_setup_interrupts(nvme_t * nvme,int intr_type,int nqpairs)4562 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
4563 {
4564 int nintrs, navail, count;
4565 int ret;
4566 int i;
4567
4568 if (nvme->n_intr_types == 0) {
4569 ret = ddi_intr_get_supported_types(nvme->n_dip,
4570 &nvme->n_intr_types);
4571 if (ret != DDI_SUCCESS) {
4572 dev_err(nvme->n_dip, CE_WARN,
4573 "!%s: ddi_intr_get_supported types failed",
4574 __func__);
4575 return (ret);
4576 }
4577 #ifdef __x86
4578 if (get_hwenv() == HW_VMWARE)
4579 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
4580 #endif
4581 }
4582
4583 if ((nvme->n_intr_types & intr_type) == 0)
4584 return (DDI_FAILURE);
4585
4586 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
4587 if (ret != DDI_SUCCESS) {
4588 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
4589 __func__);
4590 return (ret);
4591 }
4592
4593 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
4594 if (ret != DDI_SUCCESS) {
4595 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
4596 __func__);
4597 return (ret);
4598 }
4599
4600 /* We want at most one interrupt per queue pair. */
4601 if (navail > nqpairs)
4602 navail = nqpairs;
4603
4604 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
4605 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
4606
4607 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
4608 &count, 0);
4609 if (ret != DDI_SUCCESS) {
4610 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
4611 __func__);
4612 goto fail;
4613 }
4614
4615 nvme->n_intr_cnt = count;
4616
4617 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
4618 if (ret != DDI_SUCCESS) {
4619 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
4620 __func__);
4621 goto fail;
4622 }
4623
4624 for (i = 0; i < count; i++) {
4625 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
4626 (void *)nvme, (void *)(uintptr_t)i);
4627 if (ret != DDI_SUCCESS) {
4628 dev_err(nvme->n_dip, CE_WARN,
4629 "!%s: ddi_intr_add_handler failed", __func__);
4630 goto fail;
4631 }
4632 }
4633
4634 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
4635
4636 for (i = 0; i < count; i++) {
4637 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
4638 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
4639 else
4640 ret = ddi_intr_enable(nvme->n_inth[i]);
4641
4642 if (ret != DDI_SUCCESS) {
4643 dev_err(nvme->n_dip, CE_WARN,
4644 "!%s: enabling interrupt %d failed", __func__, i);
4645 goto fail;
4646 }
4647 }
4648
4649 nvme->n_intr_type = intr_type;
4650
4651 nvme->n_progress |= NVME_INTERRUPTS;
4652
4653 return (DDI_SUCCESS);
4654
4655 fail:
4656 nvme_release_interrupts(nvme);
4657
4658 return (ret);
4659 }
4660
4661 static int
nvme_fm_errcb(dev_info_t * dip,ddi_fm_error_t * fm_error,const void * arg)4662 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
4663 {
4664 _NOTE(ARGUNUSED(arg));
4665
4666 pci_ereport_post(dip, fm_error, NULL);
4667 return (fm_error->fme_status);
4668 }
4669
4670 static void
nvme_remove_callback(dev_info_t * dip,ddi_eventcookie_t cookie,void * a,void * b)4671 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a,
4672 void *b)
4673 {
4674 nvme_t *nvme = a;
4675
4676 nvme_ctrl_mark_dead(nvme, B_TRUE);
4677
4678 /*
4679 * Fail all outstanding commands, including those in the admin queue
4680 * (queue 0).
4681 */
4682 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) {
4683 nvme_qpair_t *qp = nvme->n_ioq[i];
4684
4685 mutex_enter(&qp->nq_mutex);
4686 for (size_t j = 0; j < qp->nq_nentry; j++) {
4687 nvme_cmd_t *cmd = qp->nq_cmd[j];
4688 nvme_cmd_t *u_cmd;
4689
4690 if (cmd == NULL) {
4691 continue;
4692 }
4693
4694 /*
4695 * Since we have the queue lock held the entire time we
4696 * iterate over it, it's not possible for the queue to
4697 * change underneath us. Thus, we don't need to check
4698 * that the return value of nvme_unqueue_cmd matches the
4699 * requested cmd to unqueue.
4700 */
4701 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
4702 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq,
4703 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
4704
4705 ASSERT3P(u_cmd, ==, cmd);
4706 }
4707 mutex_exit(&qp->nq_mutex);
4708 }
4709 }
4710
4711 /*
4712 * Open minor management
4713 */
4714 static int
nvme_minor_comparator(const void * l,const void * r)4715 nvme_minor_comparator(const void *l, const void *r)
4716 {
4717 const nvme_minor_t *lm = l;
4718 const nvme_minor_t *rm = r;
4719
4720 if (lm->nm_minor > rm->nm_minor) {
4721 return (1);
4722 } else if (lm->nm_minor < rm->nm_minor) {
4723 return (-1);
4724 } else {
4725 return (0);
4726 }
4727 }
4728
4729 static void
nvme_minor_free(nvme_minor_t * minor)4730 nvme_minor_free(nvme_minor_t *minor)
4731 {
4732 if (minor->nm_minor > 0) {
4733 ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN);
4734 id_free(nvme_open_minors, minor->nm_minor);
4735 minor->nm_minor = 0;
4736 }
4737 VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node));
4738 VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node));
4739 cv_destroy(&minor->nm_cv);
4740 kmem_free(minor, sizeof (nvme_minor_t));
4741 }
4742
4743 static nvme_minor_t *
nvme_minor_find_by_dev(dev_t dev)4744 nvme_minor_find_by_dev(dev_t dev)
4745 {
4746 id_t id = (id_t)getminor(dev);
4747 nvme_minor_t search = { .nm_minor = id };
4748 nvme_minor_t *ret;
4749
4750 mutex_enter(&nvme_open_minors_mutex);
4751 ret = avl_find(&nvme_open_minors_avl, &search, NULL);
4752 mutex_exit(&nvme_open_minors_mutex);
4753
4754 return (ret);
4755 }
4756
4757 static int
nvme_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)4758 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
4759 {
4760 nvme_t *nvme;
4761 int instance;
4762 int nregs;
4763 off_t regsize;
4764 char name[32];
4765 boolean_t attached_ns;
4766
4767 if (cmd != DDI_ATTACH)
4768 return (DDI_FAILURE);
4769
4770 instance = ddi_get_instance(dip);
4771
4772 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
4773 return (DDI_FAILURE);
4774
4775 nvme = ddi_get_soft_state(nvme_state, instance);
4776 ddi_set_driver_private(dip, nvme);
4777 nvme->n_dip = dip;
4778
4779 /*
4780 * Map PCI config space
4781 */
4782 if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) {
4783 dev_err(dip, CE_WARN, "!failed to map PCI config space");
4784 goto fail;
4785 }
4786 nvme->n_progress |= NVME_PCI_CONFIG;
4787
4788 /*
4789 * Get the various PCI IDs from config space
4790 */
4791 nvme->n_vendor_id =
4792 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID);
4793 nvme->n_device_id =
4794 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID);
4795 nvme->n_revision_id =
4796 pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID);
4797 nvme->n_subsystem_device_id =
4798 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID);
4799 nvme->n_subsystem_vendor_id =
4800 pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID);
4801
4802 nvme_detect_quirks(nvme);
4803
4804 /*
4805 * Set up event handlers for hot removal. While npe(4D) supports the hot
4806 * removal event being injected for devices, the same is not true of all
4807 * of our possible parents (i.e. pci(4D) as of this writing). The most
4808 * common case this shows up is in some virtualization environments. We
4809 * should treat this as non-fatal so that way devices work but leave
4810 * this set up in such a way that if a nexus does grow support for this
4811 * we're good to go.
4812 */
4813 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT,
4814 &nvme->n_rm_cookie) == DDI_SUCCESS) {
4815 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie,
4816 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) !=
4817 DDI_SUCCESS) {
4818 goto fail;
4819 }
4820 } else {
4821 nvme->n_ev_rm_cb_id = NULL;
4822 }
4823
4824 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL);
4825 nvme->n_progress |= NVME_MUTEX_INIT;
4826
4827 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4828 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
4829 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
4830 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
4831 B_TRUE : B_FALSE;
4832 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4833 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
4834 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4835 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
4836 /*
4837 * Double up the default for completion queues in case of
4838 * queue sharing.
4839 */
4840 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4841 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
4842 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4843 DDI_PROP_DONTPASS, "async-event-limit",
4844 NVME_DEFAULT_ASYNC_EVENT_LIMIT);
4845 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4846 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
4847 B_TRUE : B_FALSE;
4848 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4849 DDI_PROP_DONTPASS, "min-phys-block-size",
4850 NVME_DEFAULT_MIN_BLOCK_SIZE);
4851 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4852 DDI_PROP_DONTPASS, "max-submission-queues", -1);
4853 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4854 DDI_PROP_DONTPASS, "max-completion-queues", -1);
4855
4856 if (!ISP2(nvme->n_min_block_size) ||
4857 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
4858 dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
4859 "using default %d", ISP2(nvme->n_min_block_size) ?
4860 "too low" : "not a power of 2",
4861 NVME_DEFAULT_MIN_BLOCK_SIZE);
4862 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
4863 }
4864
4865 if (nvme->n_submission_queues != -1 &&
4866 (nvme->n_submission_queues < 1 ||
4867 nvme->n_submission_queues > UINT16_MAX)) {
4868 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
4869 "valid. Must be [1..%d]", nvme->n_submission_queues,
4870 UINT16_MAX);
4871 nvme->n_submission_queues = -1;
4872 }
4873
4874 if (nvme->n_completion_queues != -1 &&
4875 (nvme->n_completion_queues < 1 ||
4876 nvme->n_completion_queues > UINT16_MAX)) {
4877 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
4878 "valid. Must be [1..%d]", nvme->n_completion_queues,
4879 UINT16_MAX);
4880 nvme->n_completion_queues = -1;
4881 }
4882
4883 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
4884 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
4885 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
4886 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
4887
4888 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
4889 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
4890 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
4891 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
4892
4893 if (nvme->n_async_event_limit < 1)
4894 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
4895
4896 nvme->n_reg_acc_attr = nvme_reg_acc_attr;
4897 nvme->n_queue_dma_attr = nvme_queue_dma_attr;
4898 nvme->n_prp_dma_attr = nvme_prp_dma_attr;
4899 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
4900
4901 /*
4902 * Set up FMA support.
4903 */
4904 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
4905 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4906 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
4907 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
4908
4909 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
4910
4911 if (nvme->n_fm_cap) {
4912 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
4913 nvme->n_reg_acc_attr.devacc_attr_access =
4914 DDI_FLAGERR_ACC;
4915
4916 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
4917 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
4918 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
4919 }
4920
4921 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
4922 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4923 pci_ereport_setup(dip);
4924
4925 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
4926 ddi_fm_handler_register(dip, nvme_fm_errcb,
4927 (void *)nvme);
4928 }
4929
4930 nvme->n_progress |= NVME_FMA_INIT;
4931
4932 /*
4933 * The spec defines several register sets. Only the controller
4934 * registers (set 1) are currently used.
4935 */
4936 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
4937 nregs < 2 ||
4938 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE)
4939 goto fail;
4940
4941 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
4942 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
4943 dev_err(dip, CE_WARN, "!failed to map regset 1");
4944 goto fail;
4945 }
4946
4947 nvme->n_progress |= NVME_REGS_MAPPED;
4948
4949 /*
4950 * Set up kstats
4951 */
4952 if (!nvme_stat_init(nvme)) {
4953 dev_err(dip, CE_WARN, "!failed to create device kstats");
4954 goto fail;
4955 }
4956 nvme->n_progress |= NVME_STAT_INIT;
4957
4958 /*
4959 * Create PRP DMA cache
4960 */
4961 (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
4962 ddi_driver_name(dip), ddi_get_instance(dip));
4963 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
4964 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
4965 NULL, (void *)nvme, NULL, 0);
4966
4967 if (nvme_init(nvme) != DDI_SUCCESS)
4968 goto fail;
4969
4970 /*
4971 * Initialize the driver with the UFM subsystem
4972 */
4973 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
4974 &nvme->n_ufmh, nvme) != 0) {
4975 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
4976 goto fail;
4977 }
4978 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
4979 ddi_ufm_update(nvme->n_ufmh);
4980 nvme->n_progress |= NVME_UFM_INIT;
4981
4982 nvme_mgmt_lock_init(&nvme->n_mgmt);
4983 nvme_lock_init(&nvme->n_lock);
4984 nvme->n_progress |= NVME_MGMT_INIT;
4985 nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD;
4986
4987 /*
4988 * Identify namespaces.
4989 */
4990 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
4991
4992 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
4993 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
4994
4995 nvme_lock_init(&ns->ns_lock);
4996 ns->ns_progress |= NVME_NS_LOCK;
4997
4998 /*
4999 * Namespaces start out ignored. When nvme_init_ns() checks
5000 * their properties and finds they can be used, it will set
5001 * ns_ignore to B_FALSE. It will also use this state change
5002 * to keep an accurate count of attachable namespaces.
5003 */
5004 ns->ns_ignore = B_TRUE;
5005 if (nvme_init_ns(nvme, i) != 0) {
5006 nvme_mgmt_unlock(nvme);
5007 goto fail;
5008 }
5009
5010 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR,
5011 NVME_MINOR(ddi_get_instance(nvme->n_dip), i),
5012 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
5013 nvme_mgmt_unlock(nvme);
5014 dev_err(dip, CE_WARN,
5015 "!failed to create minor node for namespace %d", i);
5016 goto fail;
5017 }
5018 }
5019
5020 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
5021 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) !=
5022 DDI_SUCCESS) {
5023 nvme_mgmt_unlock(nvme);
5024 dev_err(dip, CE_WARN, "nvme_attach: "
5025 "cannot create devctl minor node");
5026 goto fail;
5027 }
5028
5029 attached_ns = B_FALSE;
5030 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5031 nvme_ioctl_common_t com = { .nioc_nsid = i };
5032
5033 if (nvme_attach_ns(nvme, &com)) {
5034 attached_ns = B_TRUE;
5035 } else if (com.nioc_drv_err != NVME_IOCTL_E_UNSUP_ATTACH_NS) {
5036 dev_err(nvme->n_dip, CE_WARN, "!failed to attach "
5037 "namespace %d due to blkdev error", i);
5038 /*
5039 * Once we have successfully attached a namespace we
5040 * can no longer fail the driver attach as there is now
5041 * a blkdev child node linked to this device, and
5042 * our node is not yet in the attached state.
5043 */
5044 if (!attached_ns) {
5045 nvme_mgmt_unlock(nvme);
5046 goto fail;
5047 }
5048 }
5049 }
5050
5051 nvme_mgmt_unlock(nvme);
5052
5053 return (DDI_SUCCESS);
5054
5055 fail:
5056 /* attach successful anyway so that FMA can retire the device */
5057 if (nvme->n_dead)
5058 return (DDI_SUCCESS);
5059
5060 (void) nvme_detach(dip, DDI_DETACH);
5061
5062 return (DDI_FAILURE);
5063 }
5064
5065 static int
nvme_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)5066 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
5067 {
5068 int instance;
5069 nvme_t *nvme;
5070
5071 if (cmd != DDI_DETACH)
5072 return (DDI_FAILURE);
5073
5074 instance = ddi_get_instance(dip);
5075
5076 nvme = ddi_get_soft_state(nvme_state, instance);
5077
5078 if (nvme == NULL)
5079 return (DDI_FAILURE);
5080
5081 /*
5082 * Remove all minor nodes from the device regardless of the source in
5083 * one swoop.
5084 */
5085 ddi_remove_minor_node(dip, NULL);
5086
5087 /*
5088 * We need to remove the event handler as one of the first things that
5089 * we do. If we proceed with other teardown without removing the event
5090 * handler, we could end up in a very unfortunate race with ourselves.
5091 * The DDI does not serialize these with detach (just like timeout(9F)
5092 * and others).
5093 */
5094 if (nvme->n_ev_rm_cb_id != NULL) {
5095 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id);
5096 }
5097 nvme->n_ev_rm_cb_id = NULL;
5098
5099 /*
5100 * If the controller was marked dead, there is a slight chance that we
5101 * are asynchronusly processing the removal taskq. Because we have
5102 * removed the callback handler above and all minor nodes and commands
5103 * are closed, there is no other way to get in here. As such, we wait on
5104 * the nvme_dead_taskq to complete so we can avoid tracking if it's
5105 * running or not.
5106 */
5107 taskq_wait(nvme_dead_taskq);
5108
5109 if (nvme->n_ns) {
5110 for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
5111 nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
5112
5113 if (ns->ns_bd_hdl) {
5114 (void) bd_detach_handle(ns->ns_bd_hdl);
5115 bd_free_handle(ns->ns_bd_hdl);
5116 }
5117
5118 if (ns->ns_idns)
5119 kmem_free(ns->ns_idns,
5120 sizeof (nvme_identify_nsid_t));
5121 if (ns->ns_devid)
5122 strfree(ns->ns_devid);
5123
5124 if ((ns->ns_progress & NVME_NS_LOCK) != 0)
5125 nvme_lock_fini(&ns->ns_lock);
5126 }
5127
5128 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
5129 nvme->n_namespace_count);
5130 }
5131
5132 if (nvme->n_progress & NVME_MGMT_INIT) {
5133 nvme_lock_fini(&nvme->n_lock);
5134 nvme_mgmt_lock_fini(&nvme->n_mgmt);
5135 }
5136
5137 if (nvme->n_progress & NVME_UFM_INIT) {
5138 ddi_ufm_fini(nvme->n_ufmh);
5139 mutex_destroy(&nvme->n_fwslot_mutex);
5140 }
5141
5142 if (nvme->n_progress & NVME_INTERRUPTS)
5143 nvme_release_interrupts(nvme);
5144
5145 for (uint_t i = 0; i < nvme->n_cq_count; i++) {
5146 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
5147 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
5148 }
5149
5150 if (nvme->n_progress & NVME_MUTEX_INIT) {
5151 mutex_destroy(&nvme->n_minor_mutex);
5152 }
5153
5154 if (nvme->n_ioq_count > 0) {
5155 for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) {
5156 if (nvme->n_ioq[i] != NULL) {
5157 /* TODO: send destroy queue commands */
5158 nvme_free_qpair(nvme->n_ioq[i]);
5159 }
5160 }
5161
5162 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
5163 (nvme->n_ioq_count + 1));
5164 }
5165
5166 if (nvme->n_prp_cache != NULL) {
5167 kmem_cache_destroy(nvme->n_prp_cache);
5168 }
5169
5170 if (nvme->n_progress & NVME_REGS_MAPPED) {
5171 nvme_shutdown(nvme, B_FALSE);
5172 (void) nvme_reset(nvme, B_FALSE);
5173 }
5174
5175 if (nvme->n_progress & NVME_CTRL_LIMITS)
5176 sema_destroy(&nvme->n_abort_sema);
5177
5178 if (nvme->n_progress & NVME_ADMIN_QUEUE)
5179 nvme_free_qpair(nvme->n_adminq);
5180
5181 if (nvme->n_cq_count > 0) {
5182 nvme_destroy_cq_array(nvme, 0);
5183 nvme->n_cq = NULL;
5184 nvme->n_cq_count = 0;
5185 }
5186
5187 if (nvme->n_idcomns)
5188 kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE);
5189
5190 if (nvme->n_idctl)
5191 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
5192
5193 if (nvme->n_progress & NVME_REGS_MAPPED)
5194 ddi_regs_map_free(&nvme->n_regh);
5195
5196 if (nvme->n_progress & NVME_STAT_INIT)
5197 nvme_stat_cleanup(nvme);
5198
5199 if (nvme->n_progress & NVME_FMA_INIT) {
5200 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5201 ddi_fm_handler_unregister(nvme->n_dip);
5202
5203 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
5204 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
5205 pci_ereport_teardown(nvme->n_dip);
5206
5207 ddi_fm_fini(nvme->n_dip);
5208 }
5209
5210 if (nvme->n_progress & NVME_PCI_CONFIG)
5211 pci_config_teardown(&nvme->n_pcicfg_handle);
5212
5213 if (nvme->n_vendor != NULL)
5214 strfree(nvme->n_vendor);
5215
5216 if (nvme->n_product != NULL)
5217 strfree(nvme->n_product);
5218
5219 ddi_soft_state_free(nvme_state, instance);
5220
5221 return (DDI_SUCCESS);
5222 }
5223
5224 static int
nvme_quiesce(dev_info_t * dip)5225 nvme_quiesce(dev_info_t *dip)
5226 {
5227 int instance;
5228 nvme_t *nvme;
5229
5230 instance = ddi_get_instance(dip);
5231
5232 nvme = ddi_get_soft_state(nvme_state, instance);
5233
5234 if (nvme == NULL)
5235 return (DDI_FAILURE);
5236
5237 nvme_shutdown(nvme, B_TRUE);
5238
5239 (void) nvme_reset(nvme, B_TRUE);
5240
5241 return (DDI_SUCCESS);
5242 }
5243
5244 static int
nvme_fill_prp(nvme_cmd_t * cmd,ddi_dma_handle_t dma)5245 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma)
5246 {
5247 nvme_t *nvme = cmd->nc_nvme;
5248 uint_t nprp_per_page, nprp;
5249 uint64_t *prp;
5250 const ddi_dma_cookie_t *cookie;
5251 uint_t idx;
5252 uint_t ncookies = ddi_dma_ncookies(dma);
5253
5254 if (ncookies == 0)
5255 return (DDI_FAILURE);
5256
5257 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL)
5258 return (DDI_FAILURE);
5259 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress;
5260
5261 if (ncookies == 1) {
5262 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5263 return (DDI_SUCCESS);
5264 } else if (ncookies == 2) {
5265 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL)
5266 return (DDI_FAILURE);
5267 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress;
5268 return (DDI_SUCCESS);
5269 }
5270
5271 /*
5272 * At this point, we're always operating on cookies at
5273 * index >= 1 and writing the addresses of those cookies
5274 * into a new page. The address of that page is stored
5275 * as the second PRP entry.
5276 */
5277 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t);
5278 ASSERT(nprp_per_page > 0);
5279
5280 /*
5281 * We currently don't support chained PRPs and set up our DMA
5282 * attributes to reflect that. If we still get an I/O request
5283 * that needs a chained PRP something is very wrong. Account
5284 * for the first cookie here, which we've placed in d_prp[0].
5285 */
5286 nprp = howmany(ncookies - 1, nprp_per_page);
5287 VERIFY(nprp == 1);
5288
5289 /*
5290 * Allocate a page of pointers, in which we'll write the
5291 * addresses of cookies 1 to `ncookies`.
5292 */
5293 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
5294 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5295 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress;
5296
5297 prp = (uint64_t *)cmd->nc_prp->nd_memp;
5298 for (idx = 1; idx < ncookies; idx++) {
5299 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL)
5300 return (DDI_FAILURE);
5301 *prp++ = cookie->dmac_laddress;
5302 }
5303
5304 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5305 DDI_DMA_SYNC_FORDEV);
5306 return (DDI_SUCCESS);
5307 }
5308
5309 /*
5310 * The maximum number of requests supported for a deallocate request is
5311 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and
5312 * unchanged through at least 1.4a). The definition of nvme_range_t is also
5313 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for
5314 * a deallocate request will fit into the smallest supported namespace page
5315 * (4k).
5316 */
5317 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
5318
5319 static int
nvme_fill_ranges(nvme_cmd_t * cmd,bd_xfer_t * xfer,uint64_t blocksize,int allocflag)5320 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
5321 int allocflag)
5322 {
5323 const dkioc_free_list_t *dfl = xfer->x_dfl;
5324 const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
5325 nvme_t *nvme = cmd->nc_nvme;
5326 nvme_range_t *ranges = NULL;
5327 uint_t i;
5328
5329 /*
5330 * The number of ranges in the request is 0s based (that is
5331 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ...,
5332 * word10 == 255 -> 256 ranges). Therefore the allowed values are
5333 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request,
5334 * we either provided bad info in nvme_bd_driveinfo() or there is a bug
5335 * in blkdev.
5336 */
5337 VERIFY3U(dfl->dfl_num_exts, >, 0);
5338 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
5339 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
5340
5341 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
5342
5343 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
5344 if (cmd->nc_prp == NULL)
5345 return (DDI_FAILURE);
5346
5347 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
5348 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp;
5349
5350 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress;
5351 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
5352
5353 for (i = 0; i < dfl->dfl_num_exts; i++) {
5354 uint64_t lba, len;
5355
5356 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
5357 len = exts[i].dfle_length / blocksize;
5358
5359 VERIFY3U(len, <=, UINT32_MAX);
5360
5361 /* No context attributes for a deallocate request */
5362 ranges[i].nr_ctxattr = 0;
5363 ranges[i].nr_len = len;
5364 ranges[i].nr_lba = lba;
5365 }
5366
5367 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
5368 DDI_DMA_SYNC_FORDEV);
5369
5370 return (DDI_SUCCESS);
5371 }
5372
5373 static nvme_cmd_t *
nvme_create_nvm_cmd(nvme_namespace_t * ns,uint8_t opc,bd_xfer_t * xfer)5374 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
5375 {
5376 nvme_t *nvme = ns->ns_nvme;
5377 nvme_cmd_t *cmd;
5378 int allocflag;
5379
5380 /*
5381 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
5382 */
5383 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
5384 cmd = nvme_alloc_cmd(nvme, allocflag);
5385
5386 if (cmd == NULL)
5387 return (NULL);
5388
5389 cmd->nc_sqe.sqe_opc = opc;
5390 cmd->nc_callback = nvme_bd_xfer_done;
5391 cmd->nc_xfer = xfer;
5392
5393 switch (opc) {
5394 case NVME_OPC_NVM_WRITE:
5395 case NVME_OPC_NVM_READ:
5396 VERIFY(xfer->x_nblks <= 0x10000);
5397
5398 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5399
5400 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
5401 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
5402 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
5403
5404 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS)
5405 goto fail;
5406 break;
5407
5408 case NVME_OPC_NVM_FLUSH:
5409 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5410 break;
5411
5412 case NVME_OPC_NVM_DSET_MGMT:
5413 cmd->nc_sqe.sqe_nsid = ns->ns_id;
5414
5415 if (nvme_fill_ranges(cmd, xfer,
5416 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
5417 goto fail;
5418 break;
5419
5420 default:
5421 goto fail;
5422 }
5423
5424 return (cmd);
5425
5426 fail:
5427 nvme_free_cmd(cmd);
5428 return (NULL);
5429 }
5430
5431 static void
nvme_bd_xfer_done(void * arg)5432 nvme_bd_xfer_done(void *arg)
5433 {
5434 nvme_cmd_t *cmd = arg;
5435 bd_xfer_t *xfer = cmd->nc_xfer;
5436 int error = 0;
5437
5438 error = nvme_check_cmd_status(cmd);
5439 nvme_free_cmd(cmd);
5440
5441 bd_xfer_done(xfer, error);
5442 }
5443
5444 static void
nvme_bd_driveinfo(void * arg,bd_drive_t * drive)5445 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
5446 {
5447 nvme_namespace_t *ns = arg;
5448 nvme_t *nvme = ns->ns_nvme;
5449 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
5450
5451 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5452
5453 /*
5454 * Set the blkdev qcount to the number of submission queues.
5455 * It will then create one waitq/runq pair for each submission
5456 * queue and spread I/O requests across the queues.
5457 */
5458 drive->d_qcount = nvme->n_ioq_count;
5459
5460 /*
5461 * I/O activity to individual namespaces is distributed across
5462 * each of the d_qcount blkdev queues (which has been set to
5463 * the number of nvme submission queues). d_qsize is the number
5464 * of submitted and not completed I/Os within each queue that blkdev
5465 * will allow before it starts holding them in the waitq.
5466 *
5467 * Each namespace will create a child blkdev instance, for each one
5468 * we try and set the d_qsize so that each namespace gets an
5469 * equal portion of the submission queue.
5470 *
5471 * If post instantiation of the nvme drive, n_namespaces_attachable
5472 * changes and a namespace is attached it could calculate a
5473 * different d_qsize. It may even be that the sum of the d_qsizes is
5474 * now beyond the submission queue size. Should that be the case
5475 * and the I/O rate is such that blkdev attempts to submit more
5476 * I/Os than the size of the submission queue, the excess I/Os
5477 * will be held behind the semaphore nq_sema.
5478 */
5479 drive->d_qsize = nvme->n_io_squeue_len / ns_count;
5480
5481 /*
5482 * Don't let the queue size drop below the minimum, though.
5483 */
5484 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
5485
5486 /*
5487 * d_maxxfer is not set, which means the value is taken from the DMA
5488 * attributes specified to bd_alloc_handle.
5489 */
5490
5491 drive->d_removable = B_FALSE;
5492 drive->d_hotpluggable = B_FALSE;
5493
5494 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
5495 drive->d_target = ns->ns_id;
5496 drive->d_lun = 0;
5497
5498 drive->d_model = nvme->n_idctl->id_model;
5499 drive->d_model_len = sizeof (nvme->n_idctl->id_model);
5500 drive->d_vendor = nvme->n_vendor;
5501 drive->d_vendor_len = strlen(nvme->n_vendor);
5502 drive->d_product = nvme->n_product;
5503 drive->d_product_len = strlen(nvme->n_product);
5504 drive->d_serial = nvme->n_idctl->id_serial;
5505 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
5506 drive->d_revision = nvme->n_idctl->id_fwrev;
5507 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
5508
5509 /*
5510 * If we support the dataset management command, the only restrictions
5511 * on a discard request are the maximum number of ranges (segments)
5512 * per single request.
5513 */
5514 if (nvme->n_idctl->id_oncs.on_dset_mgmt)
5515 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
5516
5517 nvme_mgmt_unlock(nvme);
5518 }
5519
5520 static int
nvme_bd_mediainfo(void * arg,bd_media_t * media)5521 nvme_bd_mediainfo(void *arg, bd_media_t *media)
5522 {
5523 nvme_namespace_t *ns = arg;
5524 nvme_t *nvme = ns->ns_nvme;
5525
5526 if (nvme->n_dead) {
5527 return (EIO);
5528 }
5529
5530 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
5531
5532 media->m_nblks = ns->ns_block_count;
5533 media->m_blksize = ns->ns_block_size;
5534 media->m_readonly = B_FALSE;
5535 media->m_solidstate = B_TRUE;
5536
5537 media->m_pblksize = ns->ns_best_block_size;
5538
5539 nvme_mgmt_unlock(nvme);
5540
5541 return (0);
5542 }
5543
5544 static int
nvme_bd_cmd(nvme_namespace_t * ns,bd_xfer_t * xfer,uint8_t opc)5545 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
5546 {
5547 nvme_t *nvme = ns->ns_nvme;
5548 nvme_cmd_t *cmd;
5549 nvme_qpair_t *ioq;
5550 boolean_t poll;
5551 int ret;
5552
5553 if (nvme->n_dead) {
5554 return (EIO);
5555 }
5556
5557 cmd = nvme_create_nvm_cmd(ns, opc, xfer);
5558 if (cmd == NULL)
5559 return (ENOMEM);
5560
5561 cmd->nc_sqid = xfer->x_qnum + 1;
5562 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
5563 ioq = nvme->n_ioq[cmd->nc_sqid];
5564
5565 /*
5566 * Get the polling flag before submitting the command. The command may
5567 * complete immediately after it was submitted, which means we must
5568 * treat both cmd and xfer as if they have been freed already.
5569 */
5570 poll = (xfer->x_flags & BD_XFER_POLL) != 0;
5571
5572 ret = nvme_submit_io_cmd(ioq, cmd);
5573
5574 if (ret != 0)
5575 return (ret);
5576
5577 if (!poll)
5578 return (0);
5579
5580 do {
5581 cmd = nvme_retrieve_cmd(nvme, ioq);
5582 if (cmd != NULL) {
5583 ASSERT0(cmd->nc_flags & NVME_CMD_F_USELOCK);
5584 cmd->nc_callback(cmd);
5585 } else {
5586 drv_usecwait(10);
5587 }
5588 } while (ioq->nq_active_cmds != 0);
5589
5590 return (0);
5591 }
5592
5593 static int
nvme_bd_read(void * arg,bd_xfer_t * xfer)5594 nvme_bd_read(void *arg, bd_xfer_t *xfer)
5595 {
5596 nvme_namespace_t *ns = arg;
5597
5598 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
5599 }
5600
5601 static int
nvme_bd_write(void * arg,bd_xfer_t * xfer)5602 nvme_bd_write(void *arg, bd_xfer_t *xfer)
5603 {
5604 nvme_namespace_t *ns = arg;
5605
5606 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
5607 }
5608
5609 static int
nvme_bd_sync(void * arg,bd_xfer_t * xfer)5610 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
5611 {
5612 nvme_namespace_t *ns = arg;
5613
5614 if (ns->ns_nvme->n_dead)
5615 return (EIO);
5616
5617 /*
5618 * If the volatile write cache is not present or not enabled the FLUSH
5619 * command is a no-op, so we can take a shortcut here.
5620 */
5621 if (!ns->ns_nvme->n_write_cache_present) {
5622 bd_xfer_done(xfer, ENOTSUP);
5623 return (0);
5624 }
5625
5626 if (!ns->ns_nvme->n_write_cache_enabled) {
5627 bd_xfer_done(xfer, 0);
5628 return (0);
5629 }
5630
5631 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
5632 }
5633
5634 static int
nvme_bd_devid(void * arg,dev_info_t * devinfo,ddi_devid_t * devid)5635 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
5636 {
5637 nvme_namespace_t *ns = arg;
5638 nvme_t *nvme = ns->ns_nvme;
5639
5640 if (nvme->n_dead) {
5641 return (EIO);
5642 }
5643
5644 if (*(uint64_t *)ns->ns_nguid != 0 ||
5645 *(uint64_t *)(ns->ns_nguid + 8) != 0) {
5646 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID,
5647 sizeof (ns->ns_nguid), ns->ns_nguid, devid));
5648 } else if (*(uint64_t *)ns->ns_eui64 != 0) {
5649 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64,
5650 sizeof (ns->ns_eui64), ns->ns_eui64, devid));
5651 } else {
5652 return (ddi_devid_init(devinfo, DEVID_NVME_NSID,
5653 strlen(ns->ns_devid), ns->ns_devid, devid));
5654 }
5655 }
5656
5657 static int
nvme_bd_free_space(void * arg,bd_xfer_t * xfer)5658 nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
5659 {
5660 nvme_namespace_t *ns = arg;
5661
5662 if (xfer->x_dfl == NULL)
5663 return (EINVAL);
5664
5665 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
5666 return (ENOTSUP);
5667
5668 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
5669 }
5670
5671 static int
nvme_open(dev_t * devp,int flag,int otyp,cred_t * cred_p)5672 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
5673 {
5674 #ifndef __lock_lint
5675 _NOTE(ARGUNUSED(cred_p));
5676 #endif
5677 nvme_t *nvme;
5678 nvme_minor_t *minor = NULL;
5679 uint32_t nsid;
5680 minor_t m = getminor(*devp);
5681 int rv = 0;
5682
5683 if (otyp != OTYP_CHR)
5684 return (EINVAL);
5685
5686 if (m >= NVME_OPEN_MINOR_MIN)
5687 return (ENXIO);
5688
5689 nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m));
5690 nsid = NVME_MINOR_NSID(m);
5691
5692 if (nvme == NULL)
5693 return (ENXIO);
5694
5695 if (nsid > nvme->n_namespace_count)
5696 return (ENXIO);
5697
5698 if (nvme->n_dead)
5699 return (EIO);
5700
5701 /*
5702 * At this point, we're going to allow an open to proceed on this
5703 * device. We need to allocate a new instance for this (presuming one is
5704 * available).
5705 */
5706 minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY);
5707 if (minor == NULL) {
5708 return (ENOMEM);
5709 }
5710
5711 cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL);
5712 list_link_init(&minor->nm_ctrl_lock.nli_node);
5713 minor->nm_ctrl_lock.nli_nvme = nvme;
5714 minor->nm_ctrl_lock.nli_minor = minor;
5715 list_link_init(&minor->nm_ns_lock.nli_node);
5716 minor->nm_ns_lock.nli_nvme = nvme;
5717 minor->nm_ns_lock.nli_minor = minor;
5718 minor->nm_minor = id_alloc_nosleep(nvme_open_minors);
5719 if (minor->nm_minor == -1) {
5720 nvme_minor_free(minor);
5721 return (ENOSPC);
5722 }
5723
5724 minor->nm_ctrl = nvme;
5725 if (nsid != 0) {
5726 minor->nm_ns = nvme_nsid2ns(nvme, nsid);
5727 }
5728
5729 /*
5730 * Before we check for exclusive access and attempt a lock if requested,
5731 * ensure that this minor is persisted.
5732 */
5733 mutex_enter(&nvme_open_minors_mutex);
5734 avl_add(&nvme_open_minors_avl, minor);
5735 mutex_exit(&nvme_open_minors_mutex);
5736
5737 /*
5738 * A request for opening this FEXCL, is translated into a non-blocking
5739 * write lock of the appropriate entity. This honors the original
5740 * semantics here. In the future, we should see if we can remove this
5741 * and turn a request for FEXCL at open into ENOTSUP.
5742 */
5743 mutex_enter(&nvme->n_minor_mutex);
5744 if ((flag & FEXCL) != 0) {
5745 nvme_ioctl_lock_t lock = {
5746 .nil_level = NVME_LOCK_L_WRITE,
5747 .nil_flags = NVME_LOCK_F_DONT_BLOCK
5748 };
5749
5750 if (minor->nm_ns != NULL) {
5751 lock.nil_ent = NVME_LOCK_E_NS;
5752 lock.nil_common.nioc_nsid = nsid;
5753 } else {
5754 lock.nil_ent = NVME_LOCK_E_CTRL;
5755 }
5756 nvme_rwlock(minor, &lock);
5757 if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) {
5758 mutex_exit(&nvme->n_minor_mutex);
5759
5760 mutex_enter(&nvme_open_minors_mutex);
5761 avl_remove(&nvme_open_minors_avl, minor);
5762 mutex_exit(&nvme_open_minors_mutex);
5763
5764 nvme_minor_free(minor);
5765 return (EBUSY);
5766 }
5767 }
5768 mutex_exit(&nvme->n_minor_mutex);
5769
5770 *devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor);
5771 return (rv);
5772
5773 }
5774
5775 static int
nvme_close(dev_t dev,int flag __unused,int otyp,cred_t * cred_p __unused)5776 nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused)
5777 {
5778 nvme_minor_t *minor;
5779 nvme_t *nvme;
5780
5781 if (otyp != OTYP_CHR) {
5782 return (ENXIO);
5783 }
5784
5785 minor = nvme_minor_find_by_dev(dev);
5786 if (minor == NULL) {
5787 return (ENXIO);
5788 }
5789
5790 mutex_enter(&nvme_open_minors_mutex);
5791 avl_remove(&nvme_open_minors_avl, minor);
5792 mutex_exit(&nvme_open_minors_mutex);
5793
5794 /*
5795 * When this device is being closed, we must ensure that any locks held
5796 * by this are dealt with.
5797 */
5798 nvme = minor->nm_ctrl;
5799 mutex_enter(&nvme->n_minor_mutex);
5800 ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
5801 ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
5802
5803 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
5804 VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL);
5805 nvme_rwunlock(&minor->nm_ctrl_lock,
5806 minor->nm_ctrl_lock.nli_lock);
5807 }
5808
5809 if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
5810 VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL);
5811 nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock);
5812 }
5813 mutex_exit(&nvme->n_minor_mutex);
5814
5815 nvme_minor_free(minor);
5816
5817 return (0);
5818 }
5819
5820 void
nvme_ioctl_success(nvme_ioctl_common_t * ioc)5821 nvme_ioctl_success(nvme_ioctl_common_t *ioc)
5822 {
5823 ioc->nioc_drv_err = NVME_IOCTL_E_OK;
5824 ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS;
5825 ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC;
5826 }
5827
5828 boolean_t
nvme_ioctl_error(nvme_ioctl_common_t * ioc,nvme_ioctl_errno_t err,uint32_t sct,uint32_t sc)5829 nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct,
5830 uint32_t sc)
5831 {
5832 ioc->nioc_drv_err = err;
5833 ioc->nioc_ctrl_sct = sct;
5834 ioc->nioc_ctrl_sc = sc;
5835
5836 return (B_FALSE);
5837 }
5838
5839 static int
nvme_ioctl_copyout_error(nvme_ioctl_errno_t err,intptr_t uaddr,int mode)5840 nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode)
5841 {
5842 nvme_ioctl_common_t ioc;
5843
5844 ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR);
5845 bzero(&ioc, sizeof (ioc));
5846 if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t),
5847 mode & FKIOCTL) != 0) {
5848 return (EFAULT);
5849 }
5850 return (0);
5851 }
5852
5853 /*
5854 * The companion to the namespace checking. This occurs after any rewriting
5855 * occurs. This is the primary point that we attempt to enforce any operation's
5856 * exclusivity. Note, it is theoretically possible for an operation to be
5857 * ongoing and to have someone with an exclusive lock ask to unlock it for some
5858 * reason. This does not maintain the number of such events that are going on.
5859 * While perhaps this is leaving too much up to the user, by the same token we
5860 * don't try to stop them from issuing two different format NVM commands
5861 * targeting the whole device at the same time either, even though the
5862 * controller would really rather that didn't happen.
5863 */
5864 static boolean_t
nvme_ioctl_excl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)5865 nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
5866 const nvme_ioctl_check_t *check)
5867 {
5868 nvme_t *const nvme = minor->nm_ctrl;
5869 nvme_namespace_t *ns;
5870 boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl;
5871
5872 /*
5873 * If the command doesn't require anything, then we're done.
5874 */
5875 if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) {
5876 return (B_TRUE);
5877 }
5878
5879 if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) {
5880 ns = NULL;
5881 } else {
5882 ns = nvme_nsid2ns(nvme, ioc->nioc_nsid);
5883 }
5884
5885 mutex_enter(&nvme->n_minor_mutex);
5886 ctrl_is_excl = nvme->n_lock.nl_writer != NULL;
5887 have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock;
5888 if (ns != NULL) {
5889 /*
5890 * We explicitly test the namespace lock's writer versus asking
5891 * the minor because the minor's namespace lock may apply to a
5892 * different namespace.
5893 */
5894 ns_is_excl = ns->ns_lock.nl_writer != NULL;
5895 have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock;
5896 ASSERT0(have_ctrl && have_ns);
5897 #ifdef DEBUG
5898 if (have_ns) {
5899 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns);
5900 }
5901 #endif
5902 } else {
5903 ns_is_excl = B_FALSE;
5904 have_ns = B_FALSE;
5905 }
5906 ASSERT0(ctrl_is_excl && ns_is_excl);
5907 mutex_exit(&nvme->n_minor_mutex);
5908
5909 if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) {
5910 if (ns == NULL) {
5911 if (have_ctrl) {
5912 return (B_TRUE);
5913 }
5914 return (nvme_ioctl_error(ioc,
5915 NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0));
5916 } else {
5917 if (have_ctrl || have_ns) {
5918 return (B_TRUE);
5919 }
5920 return (nvme_ioctl_error(ioc,
5921 NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0));
5922 }
5923 }
5924
5925 /*
5926 * Now we have an operation that does not require exclusive access. We
5927 * can proceed as long as no one else has it or if someone does it is
5928 * us. Regardless of what we target, a controller lock will stop us.
5929 */
5930 if (ctrl_is_excl && !have_ctrl) {
5931 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0));
5932 }
5933
5934 /*
5935 * Only check namespace exclusivity if we are targeting one.
5936 */
5937 if (ns != NULL && ns_is_excl && !have_ns) {
5938 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0));
5939 }
5940
5941 return (B_TRUE);
5942 }
5943
5944 /*
5945 * Perform common checking as to whether or not an ioctl operation may proceed.
5946 * We check in this function various aspects of the namespace attributes that
5947 * it's calling on. Once the namespace attributes and any possible rewriting
5948 * have been performed, then we proceed to check whether or not the requisite
5949 * exclusive access is present in nvme_ioctl_excl_check().
5950 */
5951 static boolean_t
nvme_ioctl_check(nvme_minor_t * minor,nvme_ioctl_common_t * ioc,const nvme_ioctl_check_t * check)5952 nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
5953 const nvme_ioctl_check_t *check)
5954 {
5955 /*
5956 * If the minor has a namespace pointer, then it is constrained to that
5957 * namespace. If a namespace is allowed, then there are only two valid
5958 * values that we can find. The first is matching the minor. The second
5959 * is our value zero, which will be transformed to the current
5960 * namespace.
5961 */
5962 if (minor->nm_ns != NULL) {
5963 if (!check->nck_ns_ok || !check->nck_ns_minor_ok) {
5964 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0,
5965 0));
5966 }
5967
5968 if (ioc->nioc_nsid == 0) {
5969 ioc->nioc_nsid = minor->nm_ns->ns_id;
5970 } else if (ioc->nioc_nsid != minor->nm_ns->ns_id) {
5971 return (nvme_ioctl_error(ioc,
5972 NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0));
5973 }
5974
5975 return (nvme_ioctl_excl_check(minor, ioc, check));
5976 }
5977
5978 /*
5979 * If we've been told to skip checking the controller, here's where we
5980 * do that. This should really only be for commands which use the
5981 * namespace ID for listing purposes and therefore can have
5982 * traditionally illegal values here.
5983 */
5984 if (check->nck_skip_ctrl) {
5985 return (nvme_ioctl_excl_check(minor, ioc, check));
5986 }
5987
5988 /*
5989 * At this point, we know that we're on the controller's node. We first
5990 * deal with the simple case, is a namespace allowed at all or not. If
5991 * it is not allowed, then the only acceptable value is zero.
5992 */
5993 if (!check->nck_ns_ok) {
5994 if (ioc->nioc_nsid != 0) {
5995 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0,
5996 0));
5997 }
5998
5999 return (nvme_ioctl_excl_check(minor, ioc, check));
6000 }
6001
6002 /*
6003 * At this point, we know that a controller is allowed to use a
6004 * namespace. If we haven't been given zero or the broadcast namespace,
6005 * check to see if it's actually a valid namespace ID. If is outside of
6006 * range, then it is an error. Next, if we have been requested to
6007 * rewrite 0 (the this controller indicator) as the broadcast namespace,
6008 * do so.
6009 *
6010 * While we validate that this namespace is within the valid range, we
6011 * do not check if it is active or inactive. That is left to our callers
6012 * to determine.
6013 */
6014 if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count &&
6015 ioc->nioc_nsid != NVME_NSID_BCAST) {
6016 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0));
6017 }
6018
6019 if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) {
6020 ioc->nioc_nsid = NVME_NSID_BCAST;
6021 }
6022
6023 /*
6024 * Finally, see if we have ended up with a broadcast namespace ID
6025 * whether through specification or rewriting. If that is not allowed,
6026 * then that is an error.
6027 */
6028 if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) {
6029 return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0));
6030 }
6031
6032 return (nvme_ioctl_excl_check(minor, ioc, check));
6033 }
6034
6035 static int
nvme_ioctl_ctrl_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6036 nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode,
6037 cred_t *cred_p)
6038 {
6039 nvme_t *const nvme = minor->nm_ctrl;
6040 nvme_ioctl_ctrl_info_t *info;
6041 nvme_reg_cap_t cap = { 0 };
6042 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL };
6043 void *idbuf;
6044
6045 if ((mode & FREAD) == 0)
6046 return (EBADF);
6047
6048 info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY);
6049 if (info == NULL) {
6050 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6051 mode));
6052 }
6053
6054 if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t),
6055 mode & FKIOCTL) != 0) {
6056 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6057 return (EFAULT);
6058 }
6059
6060 if (!nvme_ioctl_check(minor, &info->nci_common,
6061 &nvme_check_ctrl_info)) {
6062 goto copyout;
6063 }
6064
6065 /*
6066 * We explicitly do not use the identify controller copy in the kernel
6067 * right now so that way we can get a snapshot of the controller's
6068 * current capacity and values. While it's tempting to try to use this
6069 * to refresh the kernel's version we don't just to simplify the rest of
6070 * the driver right now.
6071 */
6072 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6073 info->nci_common = id.nid_common;
6074 goto copyout;
6075 }
6076 bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t));
6077 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6078
6079 /*
6080 * Use the kernel's cached common namespace information for this.
6081 */
6082 bcopy(nvme->n_idcomns, &info->nci_common_ns,
6083 sizeof (nvme_identify_nsid_t));
6084
6085 info->nci_vers = nvme->n_version;
6086
6087 /*
6088 * The MPSMIN and MPSMAX fields in the CAP register use 0 to
6089 * specify the base page size of 4k (1<<12), so add 12 here to
6090 * get the real page size value.
6091 */
6092 cap.r = nvme_get64(nvme, NVME_REG_CAP);
6093 info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax);
6094 info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin);
6095
6096 info->nci_nintrs = (uint32_t)nvme->n_intr_cnt;
6097
6098 copyout:
6099 if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t),
6100 mode & FKIOCTL) != 0) {
6101 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6102 return (EFAULT);
6103 }
6104
6105 kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
6106 return (0);
6107 }
6108
6109 static int
nvme_ioctl_ns_info(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6110 nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6111 {
6112 nvme_t *const nvme = minor->nm_ctrl;
6113 nvme_ioctl_ns_info_t *ns_info;
6114 nvme_namespace_t *ns;
6115 nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID };
6116 void *idbuf;
6117
6118 if ((mode & FREAD) == 0)
6119 return (EBADF);
6120
6121 ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY);
6122 if (ns_info == NULL) {
6123 return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
6124 mode));
6125 }
6126
6127 if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t),
6128 mode & FKIOCTL) != 0) {
6129 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6130 return (EFAULT);
6131 }
6132
6133 if (!nvme_ioctl_check(minor, &ns_info->nni_common,
6134 &nvme_check_ns_info)) {
6135 goto copyout;
6136 }
6137
6138 ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0);
6139 ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid);
6140
6141 /*
6142 * First fetch a fresh copy of the namespace information. Most callers
6143 * are using this because they will want a mostly accurate snapshot of
6144 * capacity and utilization.
6145 */
6146 id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid;
6147 if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
6148 ns_info->nni_common = id.nid_common;
6149 goto copyout;
6150 }
6151 bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t));
6152 kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
6153
6154 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6155 if (ns->ns_allocated)
6156 ns_info->nni_state |= NVME_NS_STATE_ALLOCATED;
6157
6158 if (ns->ns_active)
6159 ns_info->nni_state |= NVME_NS_STATE_ACTIVE;
6160
6161 if (ns->ns_ignore)
6162 ns_info->nni_state |= NVME_NS_STATE_IGNORED;
6163
6164 if (ns->ns_attached) {
6165 const char *addr;
6166
6167 ns_info->nni_state |= NVME_NS_STATE_ATTACHED;
6168 addr = bd_address(ns->ns_bd_hdl);
6169 if (strlcpy(ns_info->nni_addr, addr,
6170 sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) {
6171 nvme_mgmt_unlock(nvme);
6172 (void) nvme_ioctl_error(&ns_info->nni_common,
6173 NVME_IOCTL_E_BD_ADDR_OVER, 0, 0);
6174 goto copyout;
6175 }
6176 }
6177 nvme_mgmt_unlock(nvme);
6178
6179 copyout:
6180 if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t),
6181 mode & FKIOCTL) != 0) {
6182 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6183 return (EFAULT);
6184 }
6185
6186 kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
6187 return (0);
6188 }
6189
6190 static int
nvme_ioctl_identify(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6191 nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6192 {
6193 _NOTE(ARGUNUSED(cred_p));
6194 nvme_t *const nvme = minor->nm_ctrl;
6195 void *idctl;
6196 uint_t model;
6197 nvme_ioctl_identify_t id;
6198 #ifdef _MULTI_DATAMODEL
6199 nvme_ioctl_identify32_t id32;
6200 #endif
6201 boolean_t ns_minor;
6202
6203 if ((mode & FREAD) == 0)
6204 return (EBADF);
6205
6206 model = ddi_model_convert_from(mode);
6207 switch (model) {
6208 #ifdef _MULTI_DATAMODEL
6209 case DDI_MODEL_ILP32:
6210 bzero(&id, sizeof (id));
6211 if (ddi_copyin((void *)arg, &id32, sizeof (id32),
6212 mode & FKIOCTL) != 0) {
6213 return (EFAULT);
6214 }
6215 id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid;
6216 id.nid_cns = id32.nid_cns;
6217 id.nid_ctrlid = id32.nid_ctrlid;
6218 id.nid_data = id32.nid_data;
6219 break;
6220 #endif /* _MULTI_DATAMODEL */
6221 case DDI_MODEL_NONE:
6222 if (ddi_copyin((void *)arg, &id, sizeof (id),
6223 mode & FKIOCTL) != 0) {
6224 return (EFAULT);
6225 }
6226 break;
6227 default:
6228 return (ENOTSUP);
6229 }
6230
6231 if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) {
6232 goto copyout;
6233 }
6234
6235 ns_minor = minor->nm_ns != NULL;
6236 if (!nvme_validate_identify(nvme, &id, ns_minor)) {
6237 goto copyout;
6238 }
6239
6240 if (nvme_identify(nvme, B_TRUE, &id, &idctl)) {
6241 int ret = ddi_copyout(idctl, (void *)id.nid_data,
6242 NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL);
6243 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
6244 if (ret != 0) {
6245 (void) nvme_ioctl_error(&id.nid_common,
6246 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6247 goto copyout;
6248 }
6249
6250 nvme_ioctl_success(&id.nid_common);
6251 }
6252
6253 copyout:
6254 switch (model) {
6255 #ifdef _MULTI_DATAMODEL
6256 case DDI_MODEL_ILP32:
6257 id32.nid_common = id.nid_common;
6258
6259 if (ddi_copyout(&id32, (void *)arg, sizeof (id32),
6260 mode & FKIOCTL) != 0) {
6261 return (EFAULT);
6262 }
6263 break;
6264 #endif /* _MULTI_DATAMODEL */
6265 case DDI_MODEL_NONE:
6266 if (ddi_copyout(&id, (void *)arg, sizeof (id),
6267 mode & FKIOCTL) != 0) {
6268 return (EFAULT);
6269 }
6270 break;
6271 default:
6272 return (ENOTSUP);
6273 }
6274
6275 return (0);
6276 }
6277
6278 /*
6279 * Execute commands on behalf of the various ioctls.
6280 *
6281 * If this returns true then the command completed successfully. Otherwise error
6282 * information is returned in the nvme_ioctl_common_t arguments.
6283 */
6284 typedef struct {
6285 nvme_sqe_t *ica_sqe;
6286 void *ica_data;
6287 uint32_t ica_data_len;
6288 uint_t ica_dma_flags;
6289 int ica_copy_flags;
6290 uint32_t ica_timeout;
6291 uint32_t ica_cdw0;
6292 } nvme_ioc_cmd_args_t;
6293
6294 static boolean_t
nvme_ioc_cmd(nvme_t * nvme,nvme_ioctl_common_t * ioc,nvme_ioc_cmd_args_t * args)6295 nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args)
6296 {
6297 nvme_cmd_t *cmd;
6298 boolean_t ret = B_FALSE;
6299
6300 cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
6301 cmd->nc_sqid = 0;
6302
6303 /*
6304 * This function is used to facilitate requests from
6305 * userspace, so don't panic if the command fails. This
6306 * is especially true for admin passthru commands, where
6307 * the actual command data structure is entirely defined
6308 * by userspace.
6309 */
6310 cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
6311
6312 cmd->nc_callback = nvme_wakeup_cmd;
6313 cmd->nc_sqe = *args->ica_sqe;
6314
6315 if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) {
6316 if (args->ica_data == NULL) {
6317 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM,
6318 0, 0);
6319 goto free_cmd;
6320 }
6321
6322 if (nvme_zalloc_dma(nvme, args->ica_data_len,
6323 args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) !=
6324 DDI_SUCCESS) {
6325 dev_err(nvme->n_dip, CE_WARN,
6326 "!nvme_zalloc_dma failed for nvme_ioc_cmd()");
6327 ret = nvme_ioctl_error(ioc,
6328 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6329 goto free_cmd;
6330 }
6331
6332 if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
6333 ret = nvme_ioctl_error(ioc,
6334 NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
6335 goto free_cmd;
6336 }
6337
6338 if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 &&
6339 ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp,
6340 args->ica_data_len, args->ica_copy_flags) != 0) {
6341 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA,
6342 0, 0);
6343 goto free_cmd;
6344 }
6345 }
6346
6347 nvme_admin_cmd(cmd, args->ica_timeout);
6348
6349 if (!nvme_check_cmd_status_ioctl(cmd, ioc)) {
6350 ret = B_FALSE;
6351 goto free_cmd;
6352 }
6353
6354 args->ica_cdw0 = cmd->nc_cqe.cqe_dw0;
6355
6356 if ((args->ica_dma_flags & DDI_DMA_READ) != 0 &&
6357 ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data,
6358 args->ica_data_len, args->ica_copy_flags) != 0) {
6359 ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6360 goto free_cmd;
6361 }
6362
6363 ret = B_TRUE;
6364 nvme_ioctl_success(ioc);
6365
6366 free_cmd:
6367 nvme_free_cmd(cmd);
6368
6369 return (ret);
6370 }
6371
6372 static int
nvme_ioctl_get_logpage(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6373 nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode,
6374 cred_t *cred_p)
6375 {
6376 nvme_t *const nvme = minor->nm_ctrl;
6377 void *buf;
6378 nvme_ioctl_get_logpage_t log;
6379 uint_t model;
6380 #ifdef _MULTI_DATAMODEL
6381 nvme_ioctl_get_logpage32_t log32;
6382 #endif
6383
6384 if ((mode & FREAD) == 0) {
6385 return (EBADF);
6386 }
6387
6388 model = ddi_model_convert_from(mode);
6389 switch (model) {
6390 #ifdef _MULTI_DATAMODEL
6391 case DDI_MODEL_ILP32:
6392 bzero(&log, sizeof (log));
6393 if (ddi_copyin((void *)arg, &log32, sizeof (log32),
6394 mode & FKIOCTL) != 0) {
6395 return (EFAULT);
6396 }
6397
6398 log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid;
6399 log.nigl_csi = log32.nigl_csi;
6400 log.nigl_lid = log32.nigl_lid;
6401 log.nigl_lsp = log32.nigl_lsp;
6402 log.nigl_len = log32.nigl_len;
6403 log.nigl_offset = log32.nigl_offset;
6404 log.nigl_data = log32.nigl_data;
6405 break;
6406 #endif /* _MULTI_DATAMODEL */
6407 case DDI_MODEL_NONE:
6408 if (ddi_copyin((void *)arg, &log, sizeof (log),
6409 mode & FKIOCTL) != 0) {
6410 return (EFAULT);
6411 }
6412 break;
6413 default:
6414 return (ENOTSUP);
6415 }
6416
6417 /*
6418 * Eventually we'd like to do a soft lock on the namespaces from
6419 * changing out from us during this operation in the future. But we
6420 * haven't implemented that yet.
6421 */
6422 if (!nvme_ioctl_check(minor, &log.nigl_common,
6423 &nvme_check_get_logpage)) {
6424 goto copyout;
6425 }
6426
6427 if (!nvme_validate_logpage(nvme, &log)) {
6428 goto copyout;
6429 }
6430
6431 if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) {
6432 int copy;
6433
6434 copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len,
6435 mode & FKIOCTL);
6436 kmem_free(buf, log.nigl_len);
6437 if (copy != 0) {
6438 (void) nvme_ioctl_error(&log.nigl_common,
6439 NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
6440 goto copyout;
6441 }
6442
6443 nvme_ioctl_success(&log.nigl_common);
6444 }
6445
6446 copyout:
6447 switch (model) {
6448 #ifdef _MULTI_DATAMODEL
6449 case DDI_MODEL_ILP32:
6450 bzero(&log32, sizeof (log32));
6451
6452 log32.nigl_common = log.nigl_common;
6453 log32.nigl_csi = log.nigl_csi;
6454 log32.nigl_lid = log.nigl_lid;
6455 log32.nigl_lsp = log.nigl_lsp;
6456 log32.nigl_len = log.nigl_len;
6457 log32.nigl_offset = log.nigl_offset;
6458 log32.nigl_data = log.nigl_data;
6459 if (ddi_copyout(&log32, (void *)arg, sizeof (log32),
6460 mode & FKIOCTL) != 0) {
6461 return (EFAULT);
6462 }
6463 break;
6464 #endif /* _MULTI_DATAMODEL */
6465 case DDI_MODEL_NONE:
6466 if (ddi_copyout(&log, (void *)arg, sizeof (log),
6467 mode & FKIOCTL) != 0) {
6468 return (EFAULT);
6469 }
6470 break;
6471 default:
6472 return (ENOTSUP);
6473 }
6474
6475 return (0);
6476 }
6477
6478 static int
nvme_ioctl_get_feature(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6479 nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode,
6480 cred_t *cred_p)
6481 {
6482 nvme_t *const nvme = minor->nm_ctrl;
6483 nvme_ioctl_get_feature_t feat;
6484 uint_t model;
6485 #ifdef _MULTI_DATAMODEL
6486 nvme_ioctl_get_feature32_t feat32;
6487 #endif
6488 nvme_get_features_dw10_t gf_dw10 = { 0 };
6489 nvme_ioc_cmd_args_t args = { NULL };
6490 nvme_sqe_t sqe = {
6491 .sqe_opc = NVME_OPC_GET_FEATURES
6492 };
6493
6494 if ((mode & FREAD) == 0) {
6495 return (EBADF);
6496 }
6497
6498 model = ddi_model_convert_from(mode);
6499 switch (model) {
6500 #ifdef _MULTI_DATAMODEL
6501 case DDI_MODEL_ILP32:
6502 bzero(&feat, sizeof (feat));
6503 if (ddi_copyin((void *)arg, &feat32, sizeof (feat32),
6504 mode & FKIOCTL) != 0) {
6505 return (EFAULT);
6506 }
6507
6508 feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid;
6509 feat.nigf_fid = feat32.nigf_fid;
6510 feat.nigf_sel = feat32.nigf_sel;
6511 feat.nigf_cdw11 = feat32.nigf_cdw11;
6512 feat.nigf_data = feat32.nigf_data;
6513 feat.nigf_len = feat32.nigf_len;
6514 break;
6515 #endif /* _MULTI_DATAMODEL */
6516 case DDI_MODEL_NONE:
6517 if (ddi_copyin((void *)arg, &feat, sizeof (feat),
6518 mode & FKIOCTL) != 0) {
6519 return (EFAULT);
6520 }
6521 break;
6522 default:
6523 return (ENOTSUP);
6524 }
6525
6526 if (!nvme_ioctl_check(minor, &feat.nigf_common,
6527 &nvme_check_get_feature)) {
6528 goto copyout;
6529 }
6530
6531 if (!nvme_validate_get_feature(nvme, &feat)) {
6532 goto copyout;
6533 }
6534
6535 gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0);
6536 gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0);
6537 sqe.sqe_cdw10 = gf_dw10.r;
6538 sqe.sqe_cdw11 = feat.nigf_cdw11;
6539 sqe.sqe_nsid = feat.nigf_common.nioc_nsid;
6540
6541 args.ica_sqe = &sqe;
6542 if (feat.nigf_len != 0) {
6543 args.ica_data = (void *)feat.nigf_data;
6544 args.ica_data_len = feat.nigf_len;
6545 args.ica_dma_flags = DDI_DMA_READ;
6546 }
6547 args.ica_copy_flags = mode;
6548 args.ica_timeout = nvme_admin_cmd_timeout;
6549
6550 if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) {
6551 goto copyout;
6552 }
6553
6554 feat.nigf_cdw0 = args.ica_cdw0;
6555
6556 copyout:
6557 switch (model) {
6558 #ifdef _MULTI_DATAMODEL
6559 case DDI_MODEL_ILP32:
6560 bzero(&feat32, sizeof (feat32));
6561
6562 feat32.nigf_common = feat.nigf_common;
6563 feat32.nigf_fid = feat.nigf_fid;
6564 feat32.nigf_sel = feat.nigf_sel;
6565 feat32.nigf_cdw11 = feat.nigf_cdw11;
6566 feat32.nigf_data = feat.nigf_data;
6567 feat32.nigf_len = feat.nigf_len;
6568 feat32.nigf_cdw0 = feat.nigf_cdw0;
6569 if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32),
6570 mode & FKIOCTL) != 0) {
6571 return (EFAULT);
6572 }
6573 break;
6574 #endif /* _MULTI_DATAMODEL */
6575 case DDI_MODEL_NONE:
6576 if (ddi_copyout(&feat, (void *)arg, sizeof (feat),
6577 mode & FKIOCTL) != 0) {
6578 return (EFAULT);
6579 }
6580 break;
6581 default:
6582 return (ENOTSUP);
6583 }
6584
6585 return (0);
6586 }
6587
6588 static int
nvme_ioctl_format(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6589 nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6590 {
6591 nvme_t *const nvme = minor->nm_ctrl;
6592 nvme_ioctl_format_t ioc;
6593
6594 if ((mode & FWRITE) == 0)
6595 return (EBADF);
6596
6597 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6598 return (EPERM);
6599
6600 if (ddi_copyin((void *)(uintptr_t)arg, &ioc,
6601 sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0)
6602 return (EFAULT);
6603
6604 if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) {
6605 goto copyout;
6606 }
6607
6608 if (!nvme_validate_format(nvme, &ioc)) {
6609 goto copyout;
6610 }
6611
6612 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6613 if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) {
6614 nvme_mgmt_unlock(nvme);
6615 (void) nvme_ioctl_error(&ioc.nif_common,
6616 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
6617 goto copyout;
6618 }
6619
6620 if (nvme_format_nvm(nvme, &ioc)) {
6621 nvme_ioctl_success(&ioc.nif_common);
6622 nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid);
6623 }
6624 nvme_mgmt_unlock(nvme);
6625
6626 copyout:
6627 if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc),
6628 mode & FKIOCTL) != 0) {
6629 return (EFAULT);
6630 }
6631
6632 return (0);
6633 }
6634
6635 static int
nvme_ioctl_detach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6636 nvme_ioctl_detach(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6637 {
6638 nvme_t *const nvme = minor->nm_ctrl;
6639 nvme_ioctl_common_t com;
6640
6641 if ((mode & FWRITE) == 0)
6642 return (EBADF);
6643
6644 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6645 return (EPERM);
6646
6647 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6648 mode & FKIOCTL) != 0) {
6649 return (EFAULT);
6650 }
6651
6652 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6653 goto copyout;
6654 }
6655
6656 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6657 if (nvme_detach_ns(nvme, &com)) {
6658 nvme_ioctl_success(&com);
6659 }
6660 nvme_mgmt_unlock(nvme);
6661
6662 copyout:
6663 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
6664 mode & FKIOCTL) != 0) {
6665 return (EFAULT);
6666 }
6667
6668 return (0);
6669 }
6670
6671 static int
nvme_ioctl_attach(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6672 nvme_ioctl_attach(nvme_minor_t *minor, intptr_t arg, int mode,
6673 cred_t *cred_p)
6674 {
6675 nvme_t *const nvme = minor->nm_ctrl;
6676 nvme_ioctl_common_t com;
6677 nvme_namespace_t *ns;
6678
6679 if ((mode & FWRITE) == 0)
6680 return (EBADF);
6681
6682 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6683 return (EPERM);
6684
6685 if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
6686 mode & FKIOCTL) != 0) {
6687 return (EFAULT);
6688 }
6689
6690 if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
6691 goto copyout;
6692 }
6693
6694 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
6695 ns = nvme_nsid2ns(nvme, com.nioc_nsid);
6696
6697 /*
6698 * Strictly speaking we shouldn't need to call nvme_init_ns() here as
6699 * we should be properly refreshing the internal state when we are
6700 * issuing commands that change things. However, we opt to still do so
6701 * as a bit of a safety check lest we give the kernel something bad or a
6702 * vendor unique command somehow did something behind our backs.
6703 */
6704 if (!ns->ns_attached) {
6705 (void) nvme_rescan_ns(nvme, com.nioc_nsid);
6706 if (nvme_attach_ns(nvme, &com)) {
6707 nvme_ioctl_success(&com);
6708 }
6709 } else {
6710 nvme_ioctl_success(&com);
6711 }
6712 nvme_mgmt_unlock(nvme);
6713
6714 copyout:
6715 if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
6716 mode & FKIOCTL) != 0) {
6717 return (EFAULT);
6718 }
6719
6720 return (0);
6721 }
6722
6723 static void
nvme_ufm_update(nvme_t * nvme)6724 nvme_ufm_update(nvme_t *nvme)
6725 {
6726 mutex_enter(&nvme->n_fwslot_mutex);
6727 ddi_ufm_update(nvme->n_ufmh);
6728 if (nvme->n_fwslot != NULL) {
6729 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
6730 nvme->n_fwslot = NULL;
6731 }
6732 mutex_exit(&nvme->n_fwslot_mutex);
6733 }
6734
6735 /*
6736 * Download new firmware to the device's internal staging area. We do not call
6737 * nvme_ufm_update() here because after a firmware download, there has been no
6738 * change to any of the actual persistent firmware data. That requires a
6739 * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot
6740 * or to activate a slot.
6741 */
6742 static int
nvme_ioctl_firmware_download(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6743 nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode,
6744 cred_t *cred_p)
6745 {
6746 nvme_t *const nvme = minor->nm_ctrl;
6747 nvme_ioctl_fw_load_t fw;
6748 uint64_t len, maxcopy;
6749 offset_t offset;
6750 uint32_t gran;
6751 nvme_valid_ctrl_data_t data;
6752 uintptr_t buf;
6753 nvme_sqe_t sqe = {
6754 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD
6755 };
6756
6757 if ((mode & FWRITE) == 0)
6758 return (EBADF);
6759
6760 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6761 return (EPERM);
6762
6763 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
6764 mode & FKIOCTL) != 0) {
6765 return (EFAULT);
6766 }
6767
6768 if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) {
6769 goto copyout;
6770 }
6771
6772 if (!nvme_validate_fw_load(nvme, &fw)) {
6773 goto copyout;
6774 }
6775
6776 len = fw.fwl_len;
6777 offset = fw.fwl_off;
6778 buf = fw.fwl_buf;
6779
6780 /*
6781 * We need to determine the minimum and maximum amount of data that we
6782 * will send to the device in a given go. Starting in NMVe 1.3 this must
6783 * be a multiple of the firmware update granularity (FWUG), but must not
6784 * exceed the maximum data transfer that we've set. Many devices don't
6785 * report something here, which means we'll end up getting our default
6786 * value. Our policy is a little simple, but it's basically if the
6787 * maximum data transfer is evenly divided by the granularity, then use
6788 * it. Otherwise we use the granularity itself. The granularity is
6789 * always in page sized units, so trying to find another optimum point
6790 * isn't worth it. If we encounter a contradiction, then we will have to
6791 * error out.
6792 */
6793 data.vcd_vers = &nvme->n_version;
6794 data.vcd_id = nvme->n_idctl;
6795 gran = nvme_fw_load_granularity(&data);
6796
6797 if ((nvme->n_max_data_transfer_size % gran) == 0) {
6798 maxcopy = nvme->n_max_data_transfer_size;
6799 } else if (gran <= nvme->n_max_data_transfer_size) {
6800 maxcopy = gran;
6801 } else {
6802 (void) nvme_ioctl_error(&fw.fwl_common,
6803 NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0);
6804 goto copyout;
6805 }
6806
6807 while (len > 0) {
6808 nvme_ioc_cmd_args_t args = { NULL };
6809 uint64_t copylen = MIN(maxcopy, len);
6810
6811 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
6812 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
6813
6814 args.ica_sqe = &sqe;
6815 args.ica_data = (void *)buf;
6816 args.ica_data_len = copylen;
6817 args.ica_dma_flags = DDI_DMA_WRITE;
6818 args.ica_copy_flags = mode;
6819 args.ica_timeout = nvme_admin_cmd_timeout;
6820
6821 if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) {
6822 break;
6823 }
6824
6825 buf += copylen;
6826 offset += copylen;
6827 len -= copylen;
6828 }
6829
6830 copyout:
6831 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
6832 mode & FKIOCTL) != 0) {
6833 return (EFAULT);
6834 }
6835
6836 return (0);
6837 }
6838
6839 static int
nvme_ioctl_firmware_commit(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6840 nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode,
6841 cred_t *cred_p)
6842 {
6843 nvme_t *const nvme = minor->nm_ctrl;
6844 nvme_ioctl_fw_commit_t fw;
6845 nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
6846 nvme_ioc_cmd_args_t args = { NULL };
6847 nvme_sqe_t sqe = {
6848 .sqe_opc = NVME_OPC_FW_ACTIVATE
6849 };
6850
6851 if ((mode & FWRITE) == 0)
6852 return (EBADF);
6853
6854 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
6855 return (EPERM);
6856
6857 if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
6858 mode & FKIOCTL) != 0) {
6859 return (EFAULT);
6860 }
6861
6862 if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) {
6863 goto copyout;
6864 }
6865
6866 if (!nvme_validate_fw_commit(nvme, &fw)) {
6867 goto copyout;
6868 }
6869
6870 fc_dw10.b.fc_slot = fw.fwc_slot;
6871 fc_dw10.b.fc_action = fw.fwc_action;
6872 sqe.sqe_cdw10 = fc_dw10.r;
6873
6874 args.ica_sqe = &sqe;
6875 args.ica_timeout = nvme_commit_save_cmd_timeout;
6876
6877 /*
6878 * There are no conditional actions to take based on this succeeding or
6879 * failing. A failure is recorded in the ioctl structure returned to the
6880 * user.
6881 */
6882 (void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args);
6883
6884 /*
6885 * Let the DDI UFM subsystem know that the firmware information for
6886 * this device has changed. We perform this unconditionally as an
6887 * invalidation doesn't particularly hurt us.
6888 */
6889 nvme_ufm_update(nvme);
6890
6891 copyout:
6892 if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
6893 mode & FKIOCTL) != 0) {
6894 return (EFAULT);
6895 }
6896
6897 return (0);
6898 }
6899
6900 /*
6901 * Helper to copy in a passthru command from userspace, handling
6902 * different data models.
6903 */
6904 static int
nvme_passthru_copyin_cmd(const void * buf,nvme_ioctl_passthru_t * cmd,int mode)6905 nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode)
6906 {
6907 switch (ddi_model_convert_from(mode & FMODELS)) {
6908 #ifdef _MULTI_DATAMODEL
6909 case DDI_MODEL_ILP32: {
6910 nvme_ioctl_passthru32_t cmd32;
6911
6912 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0)
6913 return (EFAULT);
6914
6915 bzero(cmd, sizeof (nvme_ioctl_passthru_t));
6916
6917 cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid;
6918 cmd->npc_opcode = cmd32.npc_opcode;
6919 cmd->npc_timeout = cmd32.npc_timeout;
6920 cmd->npc_flags = cmd32.npc_flags;
6921 cmd->npc_impact = cmd32.npc_impact;
6922 cmd->npc_cdw12 = cmd32.npc_cdw12;
6923 cmd->npc_cdw13 = cmd32.npc_cdw13;
6924 cmd->npc_cdw14 = cmd32.npc_cdw14;
6925 cmd->npc_cdw15 = cmd32.npc_cdw15;
6926 cmd->npc_buflen = cmd32.npc_buflen;
6927 cmd->npc_buf = cmd32.npc_buf;
6928 break;
6929 }
6930 #endif /* _MULTI_DATAMODEL */
6931 case DDI_MODEL_NONE:
6932 if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t),
6933 mode) != 0) {
6934 return (EFAULT);
6935 }
6936 break;
6937 default:
6938 return (ENOTSUP);
6939 }
6940
6941 return (0);
6942 }
6943
6944 /*
6945 * Helper to copy out a passthru command result to userspace, handling
6946 * different data models.
6947 */
6948 static int
nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t * cmd,void * buf,int mode)6949 nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode)
6950 {
6951 switch (ddi_model_convert_from(mode & FMODELS)) {
6952 #ifdef _MULTI_DATAMODEL
6953 case DDI_MODEL_ILP32: {
6954 nvme_ioctl_passthru32_t cmd32;
6955
6956 bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t));
6957
6958 cmd32.npc_common = cmd->npc_common;
6959 cmd32.npc_opcode = cmd->npc_opcode;
6960 cmd32.npc_timeout = cmd->npc_timeout;
6961 cmd32.npc_flags = cmd->npc_flags;
6962 cmd32.npc_impact = cmd->npc_impact;
6963 cmd32.npc_cdw0 = cmd->npc_cdw0;
6964 cmd32.npc_cdw12 = cmd->npc_cdw12;
6965 cmd32.npc_cdw13 = cmd->npc_cdw13;
6966 cmd32.npc_cdw14 = cmd->npc_cdw14;
6967 cmd32.npc_cdw15 = cmd->npc_cdw15;
6968 cmd32.npc_buflen = (size32_t)cmd->npc_buflen;
6969 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf;
6970 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0)
6971 return (EFAULT);
6972 break;
6973 }
6974 #endif /* _MULTI_DATAMODEL */
6975 case DDI_MODEL_NONE:
6976 if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t),
6977 mode) != 0) {
6978 return (EFAULT);
6979 }
6980 break;
6981 default:
6982 return (ENOTSUP);
6983 }
6984 return (0);
6985 }
6986
6987 /*
6988 * Run an arbitrary vendor-specific admin command on the device.
6989 */
6990 static int
nvme_ioctl_passthru(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)6991 nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
6992 {
6993 nvme_t *const nvme = minor->nm_ctrl;
6994 int rv;
6995 nvme_ioctl_passthru_t pass;
6996 nvme_sqe_t sqe;
6997 nvme_ioc_cmd_args_t args = { NULL };
6998
6999 /*
7000 * Basic checks: permissions, data model, argument size.
7001 */
7002 if ((mode & FWRITE) == 0)
7003 return (EBADF);
7004
7005 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7006 return (EPERM);
7007
7008 if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass,
7009 mode)) != 0) {
7010 return (rv);
7011 }
7012
7013 if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) {
7014 goto copyout;
7015 }
7016
7017 if (!nvme_validate_vuc(nvme, &pass)) {
7018 goto copyout;
7019 }
7020
7021 nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
7022 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7023 /*
7024 * We've been told this has ns impact. Right now force that to
7025 * be every ns until we have more use cases and reason to trust
7026 * the nsid field.
7027 */
7028 if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) {
7029 nvme_mgmt_unlock(nvme);
7030 (void) nvme_ioctl_error(&pass.npc_common,
7031 NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
7032 goto copyout;
7033 }
7034 }
7035
7036 bzero(&sqe, sizeof (sqe));
7037
7038 sqe.sqe_opc = pass.npc_opcode;
7039 sqe.sqe_nsid = pass.npc_common.nioc_nsid;
7040 sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT);
7041 sqe.sqe_cdw12 = pass.npc_cdw12;
7042 sqe.sqe_cdw13 = pass.npc_cdw13;
7043 sqe.sqe_cdw14 = pass.npc_cdw14;
7044 sqe.sqe_cdw15 = pass.npc_cdw15;
7045
7046 args.ica_sqe = &sqe;
7047 args.ica_data = (void *)pass.npc_buf;
7048 args.ica_data_len = pass.npc_buflen;
7049 args.ica_copy_flags = mode;
7050 args.ica_timeout = pass.npc_timeout;
7051
7052 if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0)
7053 args.ica_dma_flags |= DDI_DMA_READ;
7054 else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0)
7055 args.ica_dma_flags |= DDI_DMA_WRITE;
7056
7057 if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) {
7058 pass.npc_cdw0 = args.ica_cdw0;
7059 if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
7060 nvme_rescan_ns(nvme, NVME_NSID_BCAST);
7061 }
7062 }
7063 nvme_mgmt_unlock(nvme);
7064
7065 copyout:
7066 rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg,
7067 mode);
7068
7069 return (rv);
7070 }
7071
7072 static int
nvme_ioctl_lock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7073 nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode,
7074 cred_t *cred_p)
7075 {
7076 nvme_ioctl_lock_t lock;
7077 const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK;
7078 nvme_t *nvme = minor->nm_ctrl;
7079
7080 if ((mode & FWRITE) == 0)
7081 return (EBADF);
7082
7083 if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
7084 return (EPERM);
7085
7086 if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock),
7087 mode & FKIOCTL) != 0) {
7088 return (EFAULT);
7089 }
7090
7091 if (lock.nil_ent != NVME_LOCK_E_CTRL &&
7092 lock.nil_ent != NVME_LOCK_E_NS) {
7093 (void) nvme_ioctl_error(&lock.nil_common,
7094 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7095 goto copyout;
7096 }
7097
7098 if (lock.nil_level != NVME_LOCK_L_READ &&
7099 lock.nil_level != NVME_LOCK_L_WRITE) {
7100 (void) nvme_ioctl_error(&lock.nil_common,
7101 NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0);
7102 goto copyout;
7103 }
7104
7105 if ((lock.nil_flags & ~all_flags) != 0) {
7106 (void) nvme_ioctl_error(&lock.nil_common,
7107 NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0);
7108 goto copyout;
7109 }
7110
7111 if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) {
7112 goto copyout;
7113 }
7114
7115 /*
7116 * If we're on a namespace, confirm that we're not asking for the
7117 * controller.
7118 */
7119 if (lock.nil_common.nioc_nsid != 0 &&
7120 lock.nil_ent == NVME_LOCK_E_CTRL) {
7121 (void) nvme_ioctl_error(&lock.nil_common,
7122 NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0);
7123 goto copyout;
7124 }
7125
7126 /*
7127 * We've reached the point where we can no longer actually check things
7128 * without serializing state. First, we need to check to make sure that
7129 * none of our invariants are being broken for locking:
7130 *
7131 * 1) The caller isn't already blocking for a lock operation to
7132 * complete.
7133 *
7134 * 2) The caller is attempting to grab a lock that they already have.
7135 * While there are other rule violations that this might create, we opt
7136 * to check this ahead of it so we can have slightly better error
7137 * messages for our callers.
7138 *
7139 * 3) The caller is trying to grab a controller lock, while holding a
7140 * namespace lock.
7141 *
7142 * 4) The caller has a controller write lock and is trying to get a
7143 * namespace lock. For now, we disallow this case. Holding a controller
7144 * read lock is allowed, but the write lock allows you to operate on all
7145 * namespaces anyways. In addition, this simplifies the locking logic;
7146 * however, this constraint may be loosened in the future.
7147 *
7148 * 5) The caller is trying to acquire a second namespace lock when they
7149 * already have one.
7150 */
7151 mutex_enter(&nvme->n_minor_mutex);
7152 if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED ||
7153 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) {
7154 (void) nvme_ioctl_error(&lock.nil_common,
7155 NVME_IOCTL_E_LOCK_PENDING, 0, 0);
7156 mutex_exit(&nvme->n_minor_mutex);
7157 goto copyout;
7158 }
7159
7160 if ((lock.nil_ent == NVME_LOCK_E_CTRL &&
7161 minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) ||
7162 (lock.nil_ent == NVME_LOCK_E_NS &&
7163 minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7164 minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) {
7165 (void) nvme_ioctl_error(&lock.nil_common,
7166 NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0);
7167 mutex_exit(&nvme->n_minor_mutex);
7168 goto copyout;
7169 }
7170
7171 if (lock.nil_ent == NVME_LOCK_E_CTRL &&
7172 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7173 (void) nvme_ioctl_error(&lock.nil_common,
7174 NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0);
7175 mutex_exit(&nvme->n_minor_mutex);
7176 goto copyout;
7177 }
7178
7179 if (lock.nil_ent == NVME_LOCK_E_NS &&
7180 (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
7181 minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) {
7182 (void) nvme_ioctl_error(&lock.nil_common,
7183 NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0);
7184 mutex_exit(&nvme->n_minor_mutex);
7185 goto copyout;
7186 }
7187
7188 if (lock.nil_ent == NVME_LOCK_E_NS &&
7189 minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
7190 (void) nvme_ioctl_error(&lock.nil_common,
7191 NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0);
7192 mutex_exit(&nvme->n_minor_mutex);
7193 goto copyout;
7194 }
7195
7196 #ifdef DEBUG
7197 /*
7198 * This is a big block of sanity checks to make sure that we haven't
7199 * allowed anything bad to happen.
7200 */
7201 if (lock.nil_ent == NVME_LOCK_E_NS) {
7202 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7203 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7204 NVME_LOCK_STATE_UNLOCKED);
7205 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7206 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7207
7208 if (minor->nm_ns != NULL) {
7209 ASSERT3U(minor->nm_ns->ns_id, ==,
7210 lock.nil_common.nioc_nsid);
7211 }
7212
7213 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7214 } else {
7215 ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL);
7216 ASSERT3U(minor->nm_ctrl_lock.nli_state, ==,
7217 NVME_LOCK_STATE_UNLOCKED);
7218 ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0);
7219 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7220 ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node));
7221
7222 ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
7223 ASSERT3U(minor->nm_ns_lock.nli_state, ==,
7224 NVME_LOCK_STATE_UNLOCKED);
7225 ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
7226 ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
7227 ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
7228 }
7229 #endif /* DEBUG */
7230
7231 /*
7232 * At this point we should actually attempt a locking operation.
7233 */
7234 nvme_rwlock(minor, &lock);
7235 mutex_exit(&nvme->n_minor_mutex);
7236
7237 copyout:
7238 if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock),
7239 mode & FKIOCTL) != 0) {
7240 return (EFAULT);
7241 }
7242
7243 return (0);
7244 }
7245
7246 static int
nvme_ioctl_unlock(nvme_minor_t * minor,intptr_t arg,int mode,cred_t * cred_p)7247 nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode,
7248 cred_t *cred_p)
7249 {
7250 nvme_ioctl_unlock_t unlock;
7251 nvme_t *const nvme = minor->nm_ctrl;
7252 boolean_t is_ctrl;
7253 nvme_lock_t *lock;
7254 nvme_minor_lock_info_t *info;
7255
7256 /*
7257 * Note, we explicitly don't check for privileges for unlock. The idea
7258 * being that if you have the lock, that's what matters. If you don't
7259 * have the lock, it doesn't matter what privileges that you have at
7260 * all.
7261 */
7262 if ((mode & FWRITE) == 0)
7263 return (EBADF);
7264
7265 if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock),
7266 mode & FKIOCTL) != 0) {
7267 return (EFAULT);
7268 }
7269
7270 if (unlock.niu_ent != NVME_LOCK_E_CTRL &&
7271 unlock.niu_ent != NVME_LOCK_E_NS) {
7272 (void) nvme_ioctl_error(&unlock.niu_common,
7273 NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
7274 goto copyout;
7275 }
7276
7277 if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) {
7278 goto copyout;
7279 }
7280
7281 /*
7282 * If we're on a namespace, confirm that we're not asking for the
7283 * controller.
7284 */
7285 if (unlock.niu_common.nioc_nsid != 0 &&
7286 unlock.niu_ent == NVME_LOCK_E_CTRL) {
7287 (void) nvme_ioctl_error(&unlock.niu_common,
7288 NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0);
7289 goto copyout;
7290 }
7291
7292 mutex_enter(&nvme->n_minor_mutex);
7293 if (unlock.niu_ent == NVME_LOCK_E_CTRL) {
7294 if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7295 mutex_exit(&nvme->n_minor_mutex);
7296 (void) nvme_ioctl_error(&unlock.niu_common,
7297 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7298 goto copyout;
7299 }
7300 } else {
7301 if (minor->nm_ns_lock.nli_ns == NULL) {
7302 mutex_exit(&nvme->n_minor_mutex);
7303 (void) nvme_ioctl_error(&unlock.niu_common,
7304 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7305 goto copyout;
7306 }
7307
7308 /*
7309 * Check that our unlock request corresponds to the namespace ID
7310 * that is currently locked. This could happen if we're using
7311 * the controller node and it specified a valid, but not locked,
7312 * namespace ID.
7313 */
7314 if (minor->nm_ns_lock.nli_ns->ns_id !=
7315 unlock.niu_common.nioc_nsid) {
7316 mutex_exit(&nvme->n_minor_mutex);
7317 ASSERT3P(minor->nm_ns, ==, NULL);
7318 (void) nvme_ioctl_error(&unlock.niu_common,
7319 NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0);
7320 goto copyout;
7321 }
7322
7323 if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
7324 mutex_exit(&nvme->n_minor_mutex);
7325 (void) nvme_ioctl_error(&unlock.niu_common,
7326 NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
7327 goto copyout;
7328 }
7329 }
7330
7331 /*
7332 * Finally, perform the unlock.
7333 */
7334 is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL;
7335 if (is_ctrl) {
7336 lock = &nvme->n_lock;
7337 info = &minor->nm_ctrl_lock;
7338 } else {
7339 nvme_namespace_t *ns;
7340 const uint32_t nsid = unlock.niu_common.nioc_nsid;
7341
7342 ns = nvme_nsid2ns(nvme, nsid);
7343 lock = &ns->ns_lock;
7344 info = &minor->nm_ns_lock;
7345 VERIFY3P(ns, ==, info->nli_ns);
7346 }
7347 nvme_rwunlock(info, lock);
7348 mutex_exit(&nvme->n_minor_mutex);
7349 nvme_ioctl_success(&unlock.niu_common);
7350
7351 copyout:
7352 if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock),
7353 mode & FKIOCTL) != 0) {
7354 return (EFAULT);
7355 }
7356
7357 return (0);
7358 }
7359
7360 static int
nvme_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)7361 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
7362 int *rval_p)
7363 {
7364 #ifndef __lock_lint
7365 _NOTE(ARGUNUSED(rval_p));
7366 #endif
7367 nvme_minor_t *minor;
7368 nvme_t *nvme;
7369
7370 minor = nvme_minor_find_by_dev(dev);
7371 if (minor == NULL) {
7372 return (ENXIO);
7373 }
7374
7375 nvme = minor->nm_ctrl;
7376 if (nvme == NULL)
7377 return (ENXIO);
7378
7379 if (IS_DEVCTL(cmd))
7380 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
7381
7382 if (nvme->n_dead && (cmd != NVME_IOC_DETACH && cmd !=
7383 NVME_IOC_UNLOCK)) {
7384 if (IS_NVME_IOC(cmd) == 0) {
7385 return (EIO);
7386 }
7387
7388 return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg,
7389 mode));
7390 }
7391
7392 /*
7393 * ioctls that are no longer using the original ioctl structure.
7394 */
7395 switch (cmd) {
7396 case NVME_IOC_CTRL_INFO:
7397 return (nvme_ioctl_ctrl_info(minor, arg, mode, cred_p));
7398 case NVME_IOC_IDENTIFY:
7399 return (nvme_ioctl_identify(minor, arg, mode, cred_p));
7400 case NVME_IOC_GET_LOGPAGE:
7401 return (nvme_ioctl_get_logpage(minor, arg, mode, cred_p));
7402 case NVME_IOC_GET_FEATURE:
7403 return (nvme_ioctl_get_feature(minor, arg, mode, cred_p));
7404 case NVME_IOC_DETACH:
7405 return (nvme_ioctl_detach(minor, arg, mode, cred_p));
7406 case NVME_IOC_ATTACH:
7407 return (nvme_ioctl_attach(minor, arg, mode, cred_p));
7408 case NVME_IOC_FORMAT:
7409 return (nvme_ioctl_format(minor, arg, mode, cred_p));
7410 case NVME_IOC_FIRMWARE_DOWNLOAD:
7411 return (nvme_ioctl_firmware_download(minor, arg, mode,
7412 cred_p));
7413 case NVME_IOC_FIRMWARE_COMMIT:
7414 return (nvme_ioctl_firmware_commit(minor, arg, mode,
7415 cred_p));
7416 case NVME_IOC_NS_INFO:
7417 return (nvme_ioctl_ns_info(minor, arg, mode, cred_p));
7418 case NVME_IOC_PASSTHRU:
7419 return (nvme_ioctl_passthru(minor, arg, mode, cred_p));
7420 case NVME_IOC_LOCK:
7421 return (nvme_ioctl_lock(minor, arg, mode, cred_p));
7422 case NVME_IOC_UNLOCK:
7423 return (nvme_ioctl_unlock(minor, arg, mode, cred_p));
7424 default:
7425 return (ENOTTY);
7426 }
7427 }
7428
7429 /*
7430 * DDI UFM Callbacks
7431 */
7432 static int
nvme_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * img)7433 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
7434 ddi_ufm_image_t *img)
7435 {
7436 nvme_t *nvme = arg;
7437
7438 if (imgno != 0)
7439 return (EINVAL);
7440
7441 ddi_ufm_image_set_desc(img, "Firmware");
7442 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
7443
7444 return (0);
7445 }
7446
7447 /*
7448 * Fill out firmware slot information for the requested slot. The firmware
7449 * slot information is gathered by requesting the Firmware Slot Information log
7450 * page. The format of the page is described in section 5.10.1.3.
7451 *
7452 * We lazily cache the log page on the first call and then invalidate the cache
7453 * data after a successful firmware download or firmware commit command.
7454 * The cached data is protected by a mutex as the state can change
7455 * asynchronous to this callback.
7456 */
7457 static int
nvme_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slot)7458 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
7459 uint_t slotno, ddi_ufm_slot_t *slot)
7460 {
7461 nvme_t *nvme = arg;
7462 void *log = NULL;
7463 size_t bufsize;
7464 ddi_ufm_attr_t attr = 0;
7465 char fw_ver[NVME_FWVER_SZ + 1];
7466
7467 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
7468 return (EINVAL);
7469
7470 mutex_enter(&nvme->n_fwslot_mutex);
7471 if (nvme->n_fwslot == NULL) {
7472 if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize,
7473 NVME_LOGPAGE_FWSLOT) ||
7474 bufsize != sizeof (nvme_fwslot_log_t)) {
7475 if (log != NULL)
7476 kmem_free(log, bufsize);
7477 mutex_exit(&nvme->n_fwslot_mutex);
7478 return (EIO);
7479 }
7480 nvme->n_fwslot = (nvme_fwslot_log_t *)log;
7481 }
7482
7483 /*
7484 * NVMe numbers firmware slots starting at 1
7485 */
7486 if (slotno == (nvme->n_fwslot->fw_afi - 1))
7487 attr |= DDI_UFM_ATTR_ACTIVE;
7488
7489 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
7490 attr |= DDI_UFM_ATTR_WRITEABLE;
7491
7492 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
7493 attr |= DDI_UFM_ATTR_EMPTY;
7494 } else {
7495 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
7496 NVME_FWVER_SZ);
7497 fw_ver[NVME_FWVER_SZ] = '\0';
7498 ddi_ufm_slot_set_version(slot, fw_ver);
7499 }
7500 mutex_exit(&nvme->n_fwslot_mutex);
7501
7502 ddi_ufm_slot_set_attrs(slot, attr);
7503
7504 return (0);
7505 }
7506
7507 static int
nvme_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)7508 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
7509 {
7510 *caps = DDI_UFM_CAP_REPORT;
7511 return (0);
7512 }
7513
7514 boolean_t
nvme_ctrl_atleast(nvme_t * nvme,const nvme_version_t * min)7515 nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min)
7516 {
7517 return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE);
7518 }
7519