xref: /dragonfly/sys/dev/disk/nvme/nvme_chipset.h (revision a4fe36f1)
1 /*
2  * Copyright (c) 2016 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * Misc limits
37  */
38 #define NVME_MAX_ADMIN_BUFFER	4096U
39 
40 /*
41  * NVME chipset register and structural definitions
42  *
43  * NOTE! Firmware related commands and responses are in nvme_fw.h
44  */
45 #define NVME_REG_CAP		0x0000	/* Capabilities */
46 #define NVME_REG_VERS		0x0008	/* Version */
47 #define NVME_REG_INTSET		0x000C	/* Set interrupt mask bits */
48 #define NVME_REG_INTCLR		0x0010	/* Clr interrupt mask bits */
49 #define NVME_REG_CONFIG		0x0014	/* Configuration */
50 #define NVME_REG_RESERVED_18	0x0018
51 #define NVME_REG_STATUS		0x001C
52 #define NVME_REG_SUBRESET	0x0020
53 #define NVME_REG_ADM_ATTR	0x0024
54 #define NVME_REG_ADM_SUBADR	0x0028
55 #define NVME_REG_ADM_COMADR	0x0030
56 #define NVME_REG_MEMBUF		0x0038
57 #define NVME_REG_MEMSIZE	0x003C
58 #define NVME_REG_RESERVED_40	0x0040
59 #define NVME_REG_CSS		0x0F00	/* Command-set specific area */
60 
61 /*
62  * Doorbell area for queues.  Queue 0 is the admin queue.  The number of
63  * queues supported is specified in the capabilities.
64  */
65 #define NVME_REG_SUBQ_BELL(n, dstrd4)	(0x1000 + ((n) * 2 + 0) * (dstrd4))
66 #define NVME_REG_COMQ_BELL(n, dstrd4)	(0x1000 + ((n) * 2 + 1) * (dstrd4))
67 
68 /*
69  * NVME_REG_CAP		- Capabilities (64 bits)
70  *
71  * DSTRD- Doorbell stride (0=4 bytes, in multiples of 4 bytes)
72  * AMS  - Arbitration mechanisms supported.  WRRUP means weighted round robin
73  *	  with urgent priority feature.
74  * CQR  - Indicates that submission and completion queues must be physically
75  *	  contiguous.
76  * MQES	- Maximum queue entries (0 means a maximum of 1, 1 is 2, etc)
77  */
78 #define NVME_CAP_CSS_63		(0x8000000000000000LLU)
79 #define NVME_CAP_CSS_62		(0x4000000000000000LLU)
80 #define NVME_CAP_CSS_61		(0x2000000000000000LLU)
81 #define NVME_CAP_CSS_60		(0x1000000000000000LLU)
82 #define NVME_CAP_CSS_59		(0x0800000000000000LLU)
83 #define NVME_CAP_CSS_58		(0x0400000000000000LLU)
84 #define NVME_CAP_CSS_57		(0x0200000000000000LLU)
85 #define NVME_CAP_CSS_56		(0x0100000000000000LLU)
86 #define NVME_CAP_MEMPG_MAX	(0x00F0000000000000LLU)
87 #define NVME_CAP_MEMPG_MIN	(0x000F000000000000LLU)
88 #define NVME_CAP_RESERVED_47	(0x0000800000000000LLU)
89 #define NVME_CAP_RESERVED_46	(0x0000400000000000LLU)
90 #define NVME_CAP_RESERVED_45	(0x0000200000000000LLU)
91 #define NVME_CAP_CSS_44		(0x0000100000000000LLU)
92 #define NVME_CAP_CSS_43		(0x0000080000000000LLU)
93 #define NVME_CAP_CSS_42		(0x0000040000000000LLU)
94 #define NVME_CAP_CSS_41		(0x0000020000000000LLU)
95 #define NVME_CAP_CSS_40		(0x0000010000000000LLU)
96 #define NVME_CAP_CSS_39		(0x0000008000000000LLU)
97 #define NVME_CAP_CSS_38		(0x0000004000000000LLU)
98 #define NVME_CAP_CSS_NVM	(0x0000002000000000LLU)
99 #define NVME_CAP_SUBRESET	(0x0000001000000000LLU)
100 #define NVME_CAP_DSTRD_MASK	(0x0000000F00000000LLU)
101 #define NVME_CAP_TIMEOUT	(0x00000000FF000000LLU)
102 #define NVME_CAP_RESERVED_19	(0x0000000000F80000LLU)
103 #define NVME_CAP_AMS_VENDOR	(0x0000000000040000LLU)
104 #define NVME_CAP_AMS_WRRUP	(0x0000000000020000LLU)
105 #define NVME_CAP_CQR		(0x0000000000010000LLU)
106 #define NVME_CAP_MQES		(0x000000000000FFFFLLU)
107 
108 #define NVME_CAP_MEMPG_MAX_GET(data)	\
109 		(1 << (12 + (((data) & NVME_CAP_MEMPG_MAX) >> 52)))
110 #define NVME_CAP_MEMPG_MIN_GET(data)	\
111 		(1 << (12 + (((data) & NVME_CAP_MEMPG_MIN) >> 48)))
112 #define NVME_CAP_DSTRD_GET(data)	\
113 		(4 << ((((data) & NVME_CAP_DSTRD_MASK) >> 32)))
114 #define NVME_CAP_TIMEOUT_GET(data)	\
115 		(((data) & NVME_CAP_TIMEOUT) >> 24)	/* 500ms incr */
116 #define NVME_CAP_MQES_GET(data)		\
117 		(((data) & NVME_CAP_MQES) + 1)		/* 0-based 0=1, min 2 */
118 
119 /*
120  * NVME_REG_VERS	- Version register (32 bits)
121  *
122  * Just extract and shift the fields, 1=1, e.g. '1.2' has MAJOR=1, MINOR=2
123  */
124 #define NVME_VERS_MAJOR		(0xFFFF0000U)
125 #define NVME_VERS_MINOR		(0x0000FF00U)
126 #define NVME_VERS_RESERVED_00	(0x00000000U)
127 
128 #define NVME_VERS_MAJOR_GET(data)	\
129 		(((data) & NVME_VERS_MAJOR) >> 16)
130 #define NVME_VERS_MINOR_GET(data)	\
131 		(((data) & NVME_VERS_MINOR) >> 8)
132 
133 /*
134  * NVME_REG_INTSET	(32 bits)
135  * NVME_REG_INTCLR	(32 bits)
136  *
137  * Set and clear interrupt mask bits (up to 32 interrupt sources).
138  * Writing a 1 to the appropriate bits in the appropriate register sets
139  * or clears that interrupt source.  Writing a 0 has no effect.  Reading
140  * either register returns the current interrupt mask.
141  *
142  * Setting an interrupt mask bit via INTSET masks the interrupt so it
143  * cannot occur.
144  */
145 
146 /*
147  * NVME_REG_CONFIG	(32 bits)
148  *
149  * Controller configuration, set by the host prior to enabling the
150  * controller.
151  *
152  * IOCOM_ES	- I/O completion queue entry size, 1<<n
153  * IOSUB_ES	- I/O submission queue entry size, 1<<n
154  * SHUT*	- Set while controller enabled to indicate shutdown pending.
155  *
156  * ENABLE (EN):
157  *	Works as expected.  On the 1->0 transition the controller state
158  *	is reset except for PCI registers and the Admin Queue registers.
159  *	When clearing to 0, poll the CSTS RDY bit until it becomes 0.
160  *	Similarly, when enabling EN, poll CSTS RDY until it becomes 1.
161  */
162 #define NVME_CONFIG_RESERVED_24	0xFF000000U
163 #define NVME_CONFIG_IOCOM_ES	0x00F00000U
164 #define NVME_CONFIG_IOSUB_ES	0x000F0000U
165 
166 #define NVME_CONFIG_IOCOM_ES_SET(pwr)	((pwr) << 20)
167 #define NVME_CONFIG_IOSUB_ES_SET(pwr)	((pwr) << 16)
168 
169 #define NVME_CONFIG_SHUT_MASK	0x0000C000U
170 #define NVME_CONFIG_SHUT_NONE	0x00000000U
171 #define NVME_CONFIG_SHUT_NORM	0x00004000U	/* normal shutdown */
172 #define NVME_CONFIG_SHUT_EMERG	0x00008000U	/* emergency shutdown */
173 #define NVME_CONFIG_SHUT_RES3	0x0000C000U
174 
175 #define NVME_CONFIG_AMS_DEF	0x00000000U	/* default (round-robin) */
176 #define NVME_CONFIG_AMS_WRRUP	0x00000800U	/* weighted round-robin */
177 #define NVME_CONFIG_AMS_2	0x00001000U
178 #define NVME_CONFIG_AMS_3	0x00001800U
179 #define NVME_CONFIG_AMS_4	0x00002000U
180 #define NVME_CONFIG_AMS_5	0x00002800U
181 #define NVME_CONFIG_AMS_6	0x00003000U
182 #define NVME_CONFIG_AMS_VENDOR	0x00003800U
183 
184 #define NVME_CONFIG_MEMPG	0x00000780U	/* MPS register */
185 #define NVME_CONFIG_CSS_MASK	0x00000070U
186 #define NVME_CONFIG_3		0x00000008U
187 #define NVME_CONFIG_2		0x00000004U
188 #define NVME_CONFIG_1		0x00000002U
189 #define NVME_CONFIG_EN		0x00000001U
190 
191 #define NVME_CONFIG_CSS_NVM	(0U << 4)	/* NVM command set */
192 #define NVME_CONFIG_CSS_1	(1U << 4)	/* these are reserved */
193 #define NVME_CONFIG_CSS_2	(2U << 4)
194 #define NVME_CONFIG_CSS_3	(3U << 4)
195 #define NVME_CONFIG_CSS_4	(4U << 4)
196 #define NVME_CONFIG_CSS_5	(5U << 4)
197 #define NVME_CONFIG_CSS_6	(6U << 4)
198 #define NVME_CONFIG_CSS_7	(7U << 4)
199 
200 #define NVME_CONFIG_MEMPG_SET(pwr)	\
201 		(uint32_t)(((pwr) - 12) << 7)
202 
203 
204 /*
205  * NVME_REG_STATUS	(32 bits)
206  *
207  * PAUSED	- Set to 1 if the controller is paused, 0 if normal operation.
208  * SUBRESET	- Set to 1 if a subsystem reset occurred (if available).
209  * SHUT*	- Shutdown state for poller
210  * FATAL	- Indicates a fatal controller error ocurred.
211  * RDY		- Controller ready/disable response to ENABLE.
212  */
213 #define NVME_STATUS_RESERVED	0xFFFFFFC0U
214 #define NVME_STATUS_PAUSED	0x00000020U
215 #define NVME_STATUS_SUBRESET	0x00000010U
216 #define NVME_STATUS_SHUT_MASK	0x0000000CU
217 #define NVME_STATUS_FATAL	0x00000002U
218 #define NVME_STATUS_RDY		0x00000001U
219 
220 #define NVME_STATUS_SHUT_NORM	0x00000000U
221 #define NVME_STATUS_SHUT_INPROG	0x00000004U
222 #define NVME_STATUS_SHUT_DONE	0x00000008U
223 #define NVME_STATUS_SHUT_11	0x0000000CU
224 
225 /*
226  * NVME_REG_SUBRESET
227  *
228  * Allows for the initiation of a subsystem reset, if available (see caps).
229  * Writing the value 0x4E565D65 ('NVMe') initiates the reset.
230  */
231 
232 /*
233  * NVME_REG_ADM_ATTR
234  *
235  * Specifies the completion and submission queue size in #entries.  Values
236  * between 2 and 4096 are valid.  COM and SUB are a 0's based value (0=1).
237  */
238 #define NVME_ATTR_RESERVED_31	0x80000000U
239 #define NVME_ATTR_RESERVED_30	0x40000000U
240 #define NVME_ATTR_RESERVED_29	0x20000000U
241 #define NVME_ATTR_RESERVED_28	0x10000000U
242 #define NVME_ATTR_COM		0x0FFF0000U
243 #define NVME_ATTR_RESERVED_15	0x00008000U
244 #define NVME_ATTR_RESERVED_14	0x00004000U
245 #define NVME_ATTR_RESERVED_13	0x00002000U
246 #define NVME_ATTR_RESERVED_12	0x00001000U
247 #define NVME_ATTR_SUB		0x00000FFFU
248 
249 #define NVME_ATTR_COM_SET(nqe)	(((nqe - 1) << 16) & NVME_ATTR_COM)
250 #define NVME_ATTR_SUB_SET(nqe)	((nqe - 1) & NVME_ATTR_SUB)
251 
252 /*
253  * NVME_REG_ADM_SUBADR (64 bits)
254  * NVME_REG_ADM_COMADR (64 bits)
255  *
256  * Specify the admin submission and completion queue physical base
257  * address.  These are 64-bit addresses, 4K aligned.  Bits 11:00 must
258  * be 0.
259  */
260 
261 /*
262  * NVME_REG_MEMBUF	(RO, 32 bits)
263  * NVME_REG_MEMSIZE	(RO, 32 bits)
264  *
265  * These are optional registers which specify the location and extent
266  * of the controller memory buffer.  The offset is specified in multipleps
267  * of CMBSZ and must be 4K aligned.  The BIR tells us which BAR controls
268  * MEMBUF/MEMSIZE.
269  *
270  * The SIZE field in MEMSIZE is in multiple of the UNITS field.
271  *
272  * WDS - If set to 1, data and meta-data for commands may be placed in
273  *	 the memory buffer.
274  * RDS - (same)
275  * LISTS - PRP and SGL lists can be in controller memory.
276  * CQS - completion queues can be in controller memory.
277  * SQS - submission queues can be in controller memory.
278  *
279  * Implementation note: We can ignore this entire register.  We will always
280  * use host memory for data and meta-data transfers.
281  */
282 #define NVME_MEMBUF_OFFSET	0xFFFFF000U	/* 0, 2, 3, 4, or 5 only */
283 #define NVME_MEMBUF_BAR		0x00000007U	/* 0, 2, 3, 4, or 5 only */
284 
285 #define NVME_MEMSIZE_SIZE	0xFFFFF000U
286 #define NVME_MEMSIZE_UNITS_MASK	0x00000F00U
287 #define NVME_MEMSIZE_UNITS_4K	0x00000000U
288 #define NVME_MEMSIZE_UNITS_64K	0x00000100U
289 #define NVME_MEMSIZE_UNITS_1M	0x00000200U
290 #define NVME_MEMSIZE_UNITS_16M	0x00000300U
291 #define NVME_MEMSIZE_UNITS_256M	0x00000400U
292 #define NVME_MEMSIZE_UNITS_4G	0x00000500U
293 #define NVME_MEMSIZE_UNITS_64G	0x00000600U
294 				/* 7-F are reserved */
295 
296 #define NVME_MEMSIZE_WDS	0x00000010U
297 #define NVME_MEMSIZE_RDS	0x00000008U
298 #define NVME_MEMSIZE_LISTS	0x00000004U
299 #define NVME_MEMSIZE_CQS	0x00000002U
300 #define NVME_MEMSIZE_SQS	0x00000001U
301 
302 /*
303  * NVME_REG_SUBQ*_BELL		Submission queue doorbell register (WO)
304  * NVME_REG_COMQ*_BELL		Completion queue doorbell register (WO)
305  *
306  * The low 16 bits specify the index of the submission queue tail or
307  * completion queue head.  Only write to this register, do not read.
308  * Writing to the register passes control of the related command block(s)
309  * to/from the controller.  For example, if the submission queue is
310  * at index 4 and empty the host can setup N command blocks and then
311  * doorbell 4+N.  Each command block does not need to be independently
312  * doorbelled.  The doorbell is managing a circular queue.
313  *
314  * NOTE: A full queue is N-1 entries.  The last entry cannot be in-play
315  *	 to distinguish between an empty queue and a full queue.
316  */
317 #define NVME_BELL_MASK		0x0000FFFFU
318 
319 /*
320  * Submission Queue Entry Header (40 bytes, full entry is 64 bytes)
321  *
322  * This is just the header for the entry, see the opcode section later
323  * on for the full entry structure (which we will define as a union).
324  *
325  * NOTE: prp1/prp2 format depends on config and use cases also depend on
326  *	 the command.
327  *
328  *	 PRP - Basically a 4-byte-aligned 64-bit address.  The first PRP
329  *	       can offset into a page, subsequent PRPs must be page-aligned.
330  *	       If pointing to a PRP list, must be 8-byte aligned and each
331  *	       PRP in the PRP list must be page-aligned.
332  *
333  * NOTE: For optional admin and nvm vendor specific commands
334  *
335  * NOTE: PRP data target vs PRP to PRP list.  Typically prp1 represents
336  *	 the starting point in the target data space and prp2, if needed,
337  *	 is a PRP list entry.  In our driver implementation we limit the
338  *	 data transfer size such that we do not have to chain PRP lists.
339  *	 That is, 4096 / 8 = 512 x 4K pages (2MB).  So 2MB is the maximum
340  *	 transfer size we will support.
341  */
342 typedef struct {
343 #if _BYTE_ORDER == _LITTLE_ENDIAN
344 	uint8_t	opcode;
345 	uint8_t	flags;
346 	uint16_t cid;
347 #else
348 	uint16_t cid;
349 	uint8_t	flags;
350 	uint8_t	opcode;
351 #endif
352 	uint32_t nsid;		/* namespace id. 0=not used, -1=apply all */
353 	uint32_t dw2;
354 	uint32_t dw3;
355 	uint64_t mptr;
356 	uint64_t prp1;		/* prp or sgl depending on config */
357 	uint64_t prp2;		/* prp or sgl depending on config */
358 } __packed nvme_subq_head_t;
359 
360 /*
361  * NOTE: SGL1 - buffer can be byte aligned
362  *	 SGL2 - buffer containing single SGL desc must be 8-byte aligned
363  */
364 #define NVME_SUBQFLG_PRP	0x00
365 #define NVME_SUBQFLG_SGL1	0x40	/* MPTR -> single contig buffer */
366 #define NVME_SUBQFLG_SGL2	0x80	/* MPTR -> &SGL seg w/one SGL desc */
367 #define NVME_SUBQFLG_RESERVEDS	0xC0
368 
369 #define NVME_SUBQFLG_NORM	0x00
370 #define NVME_SUBQFLG_FUSED0	0x01
371 #define NVME_SUBQFLG_FUSED1	0x02
372 #define NVME_SUBQFLG_RESERVEDF	0x03
373 
374 /*
375  * Submission queue full generic entry (64 bytes)
376  *
377  * NOTE: nvme_subq_item_t is used as part of the nvme_allcmd_t union later
378  *	 on.  Do not use the generic item structure to construct nominal
379  *	 commands.
380  */
381 typedef struct {
382 	nvme_subq_head_t head;
383 	/*
384 	 * command specific fields
385 	 */
386 	union {
387 		uint32_t dw10;
388 		uint32_t ndt;	/* number of dwords in data xfer */
389 	};
390 	union {
391 		uint32_t dw11;
392 		uint32_t ndm;	/* number of dwords in meta-data xfer */
393 	};
394 	uint32_t dw12;
395 	uint32_t dw13;
396 	uint32_t dw14;
397 	uint32_t dw15;
398 } __packed nvme_subq_item_t;
399 
400 /*
401  * Completion Queue Entry Trailer (8 bytes, full entry is 16 bytes)
402  */
403 typedef struct {
404 #if _BYTE_ORDER == _LITTLE_ENDIAN
405 	uint16_t	subq_head_ptr;
406 	uint16_t	subq_id;
407 	uint16_t	cmd_id;
408 	uint16_t	status;
409 #else
410 	uint16_t	subq_id;
411 	uint16_t	subq_head_ptr;
412 	uint16_t	status;
413 	uint16_t	cmd_id;
414 #endif
415 } __packed nvme_comq_tail_t;
416 
417 /*
418  * Completion queue full generic entry (16 bytes)
419  *
420  * subq_head_ptr	- Current submission queue head pointer
421  * subq_id		- Submission queue the command came from
422  * cmd_id;		- Command identifier
423  * status;		- Result status
424  *
425  * NOTE: nvme_comq_item_t is used as part of the nvme_allres_t union later
426  *	 on.  Do not use the generic item structure to parse nominal
427  *	 results.
428  */
429 typedef struct {
430 	uint32_t	dw0;		/* command-specific status */
431 	uint32_t	dw1;		/* (typically reserved) */
432 	nvme_comq_tail_t tail;
433 } __packed nvme_comq_item_t;
434 
435 #define NVME_COMQ_STATUS_DNR	0x8000U
436 #define NVME_COMQ_STATUS_MORE	0x4000U
437 #define NVME_COMQ_STATUS_29	0x2000U
438 #define NVME_COMQ_STATUS_28	0x1000U
439 #define NVME_COMQ_STATUS_TYPE	0x0E00U
440 #define NVME_COMQ_STATUS_CODE	0x01FEU
441 #define NVME_COMQ_STATUS_PHASE	0x0001U
442 
443 #define NVME_COMQ_STATUS_TYPE_GET(data)	\
444 		(((data) & NVME_COMQ_STATUS_TYPE) >> 9)
445 #define NVME_COMQ_STATUS_CODE_GET(data)	\
446 		(((data) & NVME_COMQ_STATUS_CODE) >> 1)
447 
448 #define NVME_STATUS_TYPE_GENERIC	0	/* generic status code */
449 #define NVME_STATUS_TYPE_SPECIFIC	1	/* opcode-specific code */
450 #define NVME_STATUS_TYPE_MEDIA		2	/* media & data errors */
451 #define NVME_STATUS_TYPE_3		3
452 #define NVME_STATUS_TYPE_4		4
453 #define NVME_STATUS_TYPE_5		5
454 #define NVME_STATUS_TYPE_6		6
455 #define NVME_STATUS_TYPE_VENDOR		7
456 
457 /*
458  * Generic status (NVME_STATUS_TYPE_GENERIC)
459  *
460  * Status codes 0x00-0x7F are applicable to the admin command set or
461  * are generic across multiple command sets.
462  *
463  * Status codes 0x80-0xBF are applicable to the I/O command set.
464  *
465  * Status codes 0xC0-0xFF are vendor-specific
466  */
467 #define NVME_CODE_SUCCESS		0x00
468 #define NVME_CODE_BADOP			0x01
469 #define NVME_CODE_BADFIELD		0x02
470 #define NVME_CODE_IDCONFLICT		0x03
471 #define NVME_CODE_BADXFER		0x04
472 #define NVME_CODE_ABORTED_PWRLOSS	0x05
473 #define NVME_CODE_INTERNAL		0x06
474 #define NVME_CODE_ABORTED_ONREQ		0x07
475 #define NVME_CODE_ABORTED_SQDEL		0x08
476 #define NVME_CODE_ABORTED_FUSEFAIL	0x09
477 #define NVME_CODE_ABORTED_FUSEMISSING	0x0A
478 #define NVME_CODE_BADNAMESPACE		0x0B
479 #define NVME_CODE_SEQERROR		0x0C
480 #define NVME_CODE_BADSGLSEG		0x0D
481 #define NVME_CODE_BADSGLCNT		0x0E
482 #define NVME_CODE_BADSGLLEN		0x0F
483 #define NVME_CODE_BADSGLMLEN		0x10
484 #define NVME_CODE_BADSGLTYPE		0x11
485 #define NVME_CODE_BADMEMBUFUSE		0x12
486 #define NVME_CODE_BADPRPOFF		0x13
487 #define NVME_CODE_ATOMICWUOVFL		0x14
488 					/* 15-7f reserved */
489 
490 #define NVME_CODE_LBA_RANGE		0x80
491 #define NVME_CODE_CAP_EXCEEDED		0x81
492 #define NVME_CODE_NAM_NOT_READY		0x82
493 #define NVME_CODE_RSV_CONFLICT		0x83
494 #define NVME_CODE_FMT_IN_PROG		0x84
495 					/* 85-bf reserved */
496 
497 /*
498  * Command specific status codes (NVME_STATUS_TYPE_SPECIFIC)
499  */
500 #define NVME_CSSCODE_BADCOMQ			0x00
501 #define NVME_CSSCODE_BADQID			0x01
502 #define NVME_CSSCODE_BADQSIZE			0x02
503 #define NVME_CSSCODE_ABORTLIM			0x03
504 #define NVME_CSSCODE_RESERVED04			0x04
505 #define NVME_CSSCODE_ASYNCEVENTLIM		0x05
506 #define NVME_CSSCODE_BADFWSLOT			0x06
507 #define NVME_CSSCODE_BADFWIMAGE			0x07
508 #define NVME_CSSCODE_BADINTRVECT		0x08
509 #define NVME_CSSCODE_BADLOGPAGE			0x09
510 #define NVME_CSSCODE_BADFORMAT			0x0A
511 #define NVME_CSSCODE_FW_NEEDSCONVRESET		0x0B
512 #define NVME_CSSCODE_BADQDELETE			0x0C
513 #define NVME_CSSCODE_FEAT_NOT_SAVEABLE		0x0D
514 #define NVME_CSSCODE_FEAT_NOT_CHGABLE		0x0E
515 #define NVME_CSSCODE_FEAT_NOT_NSSPEC		0x0F
516 #define NVME_CSSCODE_FW_NEEDSSUBRESET		0x10
517 #define NVME_CSSCODE_FW_NEEDSRESET		0x11
518 #define NVME_CSSCODE_FW_NEEDSMAXTVIOLATE	0x12
519 #define NVME_CSSCODE_FW_PROHIBITED		0x13
520 #define NVME_CSSCODE_RANGE_OVERLAP		0x14
521 #define NVME_CSSCODE_NAM_INSUFF_CAP		0x15
522 #define NVME_CSSCODE_NAM_ID_UNAVAIL		0x16
523 #define NVME_CSSCODE_RESERVED17			0x17
524 #define NVME_CSSCODE_NAM_ALREADY_ATT		0x18
525 #define NVME_CSSCODE_NAM_IS_PRIVATE		0x19
526 #define NVME_CSSCODE_NAM_NOT_ATT		0x1A
527 #define NVME_CSSCODE_NO_THIN_PROVISION		0x1B
528 #define NVME_CSSCODE_CTLR_LIST_INVALID		0x1C
529 						/* 1D-7F reserved */
530 
531 #define NVME_CSSCODE_ATTR_CONFLICT		0x80
532 #define NVME_CSSCODE_BADPROTINFO		0x81
533 #define NVME_CSSCODE_WRITE_TO_RDONLY		0x82
534 					/* 83-BF reserved */
535 
536 /*
537  * Media and Data Integrity (NVME_STATUS_TYPE_MEDIA)
538  */
539 #define NVME_MEDCODE_WRITE_FAULT		0x80
540 #define NVME_MEDCODE_UNRECOV_READ_ERROR		0x81
541 #define NVME_MEDCODE_ETOE_GUARD_CHK		0x82
542 #define NVME_MEDCODE_ETOE_APPTAG_CHK		0x83
543 #define NVME_MEDCODE_ETOE_REFTAG_CHK		0x84
544 #define NVME_MEDCODE_COMPARE_FAILURE		0x85
545 #define NVME_MEDCODE_ACCESS_DENIED		0x86
546 #define NVME_MEDCODE_UNALLOCATED		0x87
547 					/* 88-BF reserved */
548 
549 /*
550  * OPCODES:
551  *	7:	1=IO Command set or vendor specific
552  *	6:2	Function
553  *	1	Data XFer R
554  *	0	Data XFer W	(note: both R and W cannot be specified)
555  *
556  * Admin commands
557  */
558 #define NVME_OP_DELETE_SUBQ	0x00
559 #define NVME_OP_CREATE_SUBQ	0x01
560 #define NVME_OP_GET_LOG_PAGE	0x02	/* (needs namid) */
561 #define NVME_OP_DELETE_COMQ	0x04
562 #define NVME_OP_CREATE_COMQ	0x05
563 #define NVME_OP_IDENTIFY	0x06	/* (needs namid) */
564 #define NVME_OP_ABORT		0x08
565 #define NVME_OP_SET_FEATURES	0x09	/* (needs namid) */
566 #define NVME_OP_GET_FEATURES	0x0A	/* (needs namid) */
567 #define NVME_OP_ASY_EVENT_REQ	0x0C
568 #define NVME_OP_NAM_MANAGE	0x0D	/* (optional, needs namid) */
569 #define NVME_OP_FW_COMMIT	0x10	/* (optional) */
570 #define NVME_OP_FW_DOWNLOAD	0x11	/* (optional) */
571 #define NVME_OP_NAM_ATTACH	0x15	/* (optional, needs namid) */
572 
573 #define NVME_OP_FORMATNVM	0x80	/* (optional, needs namid) */
574 #define NVME_OP_SEC_SEND	0x81	/* (optional, needs namid) */
575 #define NVME_OP_SEC_RECV	0x82	/* (optional, needs namid) */
576 
577 /*
578  * Abort command
579  *
580  * Error status possible: NVM_CSSCODE_ABORTLIM
581  */
582 typedef struct {
583 	nvme_subq_head_t head;
584 #if _BYTE_ORDER == _LITTLE_ENDIAN
585 	uint16_t	subq_id;	/* subq to abort */
586 	uint16_t	cmd_id;		/* cmdid to abort */
587 #else
588 	uint16_t	cmd_id;
589 	uint16_t	subq_id;
590 #endif
591 	uint32_t	reserved11;
592 	uint32_t	reserved12;
593 	uint32_t	reserved13;
594 	uint32_t	reserved14;
595 	uint32_t	reserved15;
596 } __packed nvme_abort_cmd_t;
597 
598 typedef struct {
599 	uint32_t dw0;
600 	uint32_t dw1;
601 	nvme_comq_tail_t tail;
602 } __packed nvme_abort_res_t;
603 
604 /*
605  * Asynchronous Event Request Command
606  *
607  * Error status possible: NVM_CSSCODE_ASYNCEVENTLIM
608  *
609  * NOTE: Should be posted to an independent queue, with no timeout.  Async
610  *	 events are returned when they occur and so might not be returned
611  *	 for a very long time (like hours, or longer).
612  */
613 typedef struct {
614 	nvme_subq_head_t head;
615 	uint32_t	reserved10;
616 	uint32_t	reserved11;
617 	uint32_t	reserved12;
618 	uint32_t	reserved13;
619 	uint32_t	reserved14;
620 	uint32_t	reserved15;
621 } __packed nvme_async_cmd_t;
622 
623 typedef struct {
624 #if _BYTE_ORDER == _LITTLE_ENDIAN
625 	uint8_t		type;
626 	uint8_t		info;
627 	uint8_t		lid;
628 	uint8_t		reserved03;
629 #else
630 	uint8_t		reserved03;
631 	uint8_t		lid;
632 	uint8_t		info;
633 	uint8_t		type;
634 #endif
635 	uint32_t	dw1;
636 	nvme_comq_tail_t tail;
637 } __packed nvme_async_res_t;
638 
639 #define NVME_ASYNC_TYPE_MASK		0x07
640 #define NVME_ASYNC_TYPE_ERROR		0x00	/* error status */
641 #define NVME_ASYNC_TYPE_SMART		0x01	/* smart status */
642 #define NVME_ASYNC_TYPE_NOTICE		0x02
643 #define NVME_ASYNC_TYPE_RESERVED3	0x03
644 #define NVME_ASYNC_TYPE_RESERVED4	0x04
645 #define NVME_ASYNC_TYPE_RESERVED5	0x05
646 #define NVME_ASYNC_TYPE_CSS		0x06	/* cmd-specific status */
647 #define NVME_ASYNC_TYPE_VENDOR		0x07
648 
649 /* TYPE_ERROR */
650 #define NVME_ASYNC_INFO_INVDOORBELL	0x00
651 #define NVME_ASYNC_INFO_INVDOORVALUE	0x01
652 #define NVME_ASYNC_INFO_DIAGFAIL	0x02
653 #define NVME_ASYNC_INFO_INTRNL_FATAL	0x03	/* persistent internal error */
654 #define NVME_ASYNC_INFO_INTRNL_TRANS	0x04	/* transient internal error */
655 #define NVME_ASYNC_INFO_FIRMLOADERR	0x05
656 					/* 06-FF reserved */
657 
658 /* TYPE_SMART */
659 #define NVME_ASYNC_INFO_UNRELIABLE	0x00
660 #define NVME_ASYNC_INFO_TEMP_THR	0x01
661 #define NVME_ASYNC_INFO_SPARE_BELOW_THR	0x02
662 					/* 03-FF reserved */
663 
664 /* TYPE_NOTICE */
665 #define NVME_ASYNC_INFO_NAM_ATTR_CHG	0x00
666 #define NVME_ASYNC_INFO_FW_ACT_START	0x01
667 					/* 02-FF reserved */
668 
669 /* TYPE_CSS */
670 #define NVME_ASYNC_INFO_RES_LOGPG_AVAIL	0x00
671 					/* 01-FF reserved */
672 
673 /*
674  * Create I/O Completion Queue Command
675  *
676  * NOTE: PRP1 depends on the PC (physically contiguous) config bit.
677  *	 If set (which we do), points to a single physically contiguous
678  *	 array.
679  *
680  * NOTE: XXX IV specification associated with msix PCI space ?
681  */
682 typedef struct {
683 	nvme_subq_head_t head;
684 #if _BYTE_ORDER == _LITTLE_ENDIAN
685 	uint16_t	comq_id;	/* create w/unique id */
686 	uint16_t	comq_size;	/* in entries */
687 	uint16_t	flags;
688 	uint16_t	ivect;		/* multiple MSI or MSI-X only */
689 #else
690 	uint16_t	comq_size;
691 	uint16_t	comq_id;
692 	uint16_t	ivect;
693 	uint16_t	flags;
694 #endif
695 	uint32_t	reserved12;
696 	uint32_t	reserved13;
697 	uint32_t	reserved14;
698 	uint32_t	reserved15;
699 } __packed nvme_createcomq_cmd_t;
700 
701 #define NVME_CREATECOM_IEN	0x0002
702 #define NVME_CREATECOM_PC	0x0001
703 
704 typedef struct {
705 	uint32_t	dw0;
706 	uint32_t	dw1;
707 	nvme_comq_tail_t tail;
708 } __packed nvme_createcomq_res_t;
709 
710 /*
711  * Create I/O Completion Queue Command
712  *
713  * NOTE: PRP1 depends on the PC (physically contiguous) config bit.
714  *	 If set (which we do), points to a single physically contiguous
715  *	 array.
716  *
717  * NOTE: XXX IV specification associated with msix PCI space ?
718  */
719 typedef struct {
720 	nvme_subq_head_t head;
721 #if _BYTE_ORDER == _LITTLE_ENDIAN
722 	uint16_t	subq_id;	/* create w/unique id */
723 	uint16_t	subq_size;	/* in entries */
724 	uint16_t	flags;
725 	uint16_t	comq_id;	/* completion queue to use */
726 #else
727 	uint16_t	subq_size;
728 	uint16_t	subq_id;
729 	uint16_t	comq_id;
730 	uint16_t	flags;
731 #endif
732 	uint32_t	reserved12;
733 	uint32_t	reserved13;
734 	uint32_t	reserved14;
735 	uint32_t	reserved15;
736 } __packed nvme_createsubq_cmd_t;
737 
738 #define NVME_CREATESUB_PRI	0x0006
739 #define NVME_CREATESUB_PRI_LOW	0x0006
740 #define NVME_CREATESUB_PRI_MED	0x0004
741 #define NVME_CREATESUB_PRI_HIG	0x0002
742 #define NVME_CREATESUB_PRI_URG	0x0000
743 
744 #define NVME_CREATESUB_PC	0x0001
745 
746 typedef struct {
747 	uint32_t	dw0;
748 	uint32_t	dw1;
749 	nvme_comq_tail_t tail;
750 } __packed nvme_createsubq_res_t;
751 
752 /*
753  * Delete I/O Completion Queue Command
754  * Delete I/O Submission Queue Command
755  *
756  * Both commands use the same structures.
757  */
758 typedef struct {
759 	nvme_subq_head_t head;
760 #if _BYTE_ORDER == _LITTLE_ENDIAN
761 	uint16_t	qid;		/* queue id to delete */
762 	uint16_t	reserved02;
763 #else
764 	uint16_t	reserved02;
765 	uint16_t	qid;		/* queue id to delete */
766 #endif
767 	uint32_t	reserved11;
768 	uint32_t	reserved12;
769 	uint32_t	reserved13;
770 	uint32_t	reserved14;
771 	uint32_t	reserved15;
772 } __packed nvme_deleteq_cmd_t;
773 
774 typedef struct {
775 	uint32_t	dw0;
776 	uint32_t	dw1;
777 	nvme_comq_tail_t tail;
778 } __packed nvme_deleteq_res_t;
779 
780 /*
781  * Get Features Command
782  */
783 typedef struct {
784 	nvme_subq_head_t head;
785 	uint32_t	flags;
786 	uint32_t	reserved11;
787 	uint32_t	reserved12;
788 	uint32_t	reserved13;
789 	uint32_t	reserved14;
790 	uint32_t	reserved15;
791 } __packed nvme_getfeat_cmd_t;
792 
793 #define NVME_GETFEAT_ID_MASK	0x000000FFU
794 #define NVME_GETFEAT_SEL_MASK	0x00000700U	/* NOTE: optional support */
795 
796 #define NVME_GETFEAT_SEL_CUR	0x00000000U	/* current */
797 #define NVME_GETFEAT_SEL_DEF	0x00000100U	/* default */
798 #define NVME_GETFEAT_SEL_SAV	0x00000200U	/* saved */
799 #define NVME_GETFEAT_SEL_SUP	0x00000300U	/* supported */
800 #define NVME_GETFEAT_SEL_4	0x00000400U
801 #define NVME_GETFEAT_SEL_5	0x00000500U
802 #define NVME_GETFEAT_SEL_6	0x00000600U
803 #define NVME_GETFEAT_SEL_7	0x00000700U
804 
805 typedef struct {
806 	uint32_t	cap;	/* SEL_SUP select only */
807 	uint32_t	dw1;
808 	nvme_comq_tail_t tail;
809 } __packed nvme_getfeat_res_t;
810 
811 #define NVME_GETFEAT_CAP_SAVEABLE	0x00000001U
812 #define NVME_GETFEAT_CAP_NAM_SPECIFIC	0x00000002U
813 #define NVME_GETFEAT_CAP_CHANGEABLE	0x00000004U
814 
815 /*
816  * Get Log Page Command
817  *
818  * See nvme_log.h for returned data content
819  */
820 typedef struct {
821 	nvme_subq_head_t head;
822 #if _BYTE_ORDER == _LITTLE_ENDIAN
823 	uint8_t		lid;
824 	uint8_t		reserved01;
825 	uint16_t	numd;
826 #else
827 	uint16_t	numd;
828 	uint8_t		reserved01;
829 	uint8_t		lid;
830 #endif
831 	uint32_t	reserved11;
832 	uint32_t	reserved12;
833 	uint32_t	reserved13;
834 	uint32_t	reserved14;
835 	uint32_t	reserved15;
836 } __packed nvme_getlog_cmd_t;
837 
838 #define NVME_GETLOGPG_NUMD_MASK	0x0FFF
839 
840 #define NVME_LID_00		0x00
841 #define NVME_LID_ERROR		0x01	/* error information */
842 #define NVME_LID_SMART		0x02	/* smart/health information */
843 #define NVME_LID_FWSLOT		0x03	/* firmware slot information */
844 #define NVME_LID_NAM_CHG_LIST	0x04	/* (optional) changed ns list */
845 #define NVME_LID_CMDEFF		0x05	/* (optional) command effects log */
846 				/* 06-7F reserved */
847 #define NVME_LID_RES_NOTIFY	0x80	/* (optional) Reservation notify */
848 				/* 81-BF I/O command set specific */
849 				/* C0-FF Vendor specific */
850 
851 typedef struct {
852 	uint32_t	dw0;
853 	uint32_t	dw1;
854 	nvme_comq_tail_t tail;
855 } __packed nvme_getlog_res_t;
856 
857 /*
858  * Identify Command
859  *
860  * See nvme_ident.h for the returned data structure(s)
861  */
862 typedef struct {
863 	nvme_subq_head_t head;
864 #if _BYTE_ORDER == _LITTLE_ENDIAN
865 	uint8_t		cns;
866 	uint8_t		reserved01;
867 	uint16_t	cntid;
868 #else
869 	uint16_t	cntid;
870 	uint8_t		reserved01;
871 	uint8_t		cns;
872 #endif
873 	uint32_t	reserved11;
874 	uint32_t	reserved12;
875 	uint32_t	reserved13;
876 	uint32_t	reserved14;
877 	uint32_t	reserved15;
878 } __packed nvme_identify_cmd_t;
879 
880 #define NVME_CNS_ACT_NS		0x00	/* Identify Namespace Structure */
881 #define NVME_CNS_CTLR		0x01	/* Identify Controller Structure */
882 #define NVME_CNS_ACT_NSLIST	0x02	/* List of 1024 ACTIVE nsids > nsid */
883 				/* 03-0F reserved */
884 
885 #define NVME_CNS_ALO_NSLIST	0x10	/* List of1024 ALLOCATED nsids >nsid*/
886 #define NVME_CNS_ALO_NS		0x11	/* Identify Namespace Structure */
887 #define NVME_CNS_ATT_CTLR_LIST	0x12	/* up to 2047 ctlr ids >= cntid */
888 					/* (that are attached to nsid) */
889 #define NVME_CNS_ANY_CTLR_LIST	0x13	/* same, but may/maynot be attached */
890 				/* 14-1F reserved */
891 				/* 20-FF reserved */
892 
893 typedef struct {
894 	uint32_t dw0;
895 	uint32_t dw1;
896 	nvme_comq_tail_t tail;
897 } __packed nvme_identify_res_t;
898 
899 /*
900  * Namespace Attachment Command
901  */
902 typedef struct {
903 	nvme_subq_head_t head;
904 	uint32_t	sel;
905 	uint32_t	reserved11;
906 	uint32_t	reserved12;
907 	uint32_t	reserved13;
908 	uint32_t	reserved14;
909 	uint32_t	reserved15;
910 } __packed nvme_nsatt_cmd_t;
911 
912 #define NVME_NSATT_SEL_MASK	0x0000000FU
913 
914 #define NVME_NSATT_SEL_GET(data)	\
915 		((data) & NVME_NSATT_SEL_MASK)
916 #define NVME_NSATT_SEL_ATTACH	0
917 #define NVME_NSATT_SEL_DETACH	1
918 				/* 2-F reserved */
919 
920 typedef struct {
921 	uint32_t dw0;
922 	uint32_t dw1;
923 	nvme_comq_tail_t tail;
924 } __packed nvme_nsatt_res_t;
925 
926 /*
927  * Namespace Management Command
928  *
929  * See nvme_ns.h for transfered data structures
930  */
931 typedef struct {
932 	nvme_subq_head_t head;
933 	uint32_t	sel;
934 	uint32_t	reserved11;
935 	uint32_t	reserved12;
936 	uint32_t	reserved13;
937 	uint32_t	reserved14;
938 	uint32_t	reserved15;
939 } __packed nvme_nsmgmt_cmd_t;
940 
941 #define NVME_NSMGMT_SEL_MASK	0x0000000FU
942 
943 #define NVME_NSMGMT_SEL_GET(data)	\
944 		((data) & NVME_NSMGMT_SEL_MASK)
945 #define NVME_NSMGMT_SEL_CREATE	0
946 #define NVME_NSMGMT_SEL_DELETE	1
947 				/* 2-F reserved */
948 
949 typedef struct {
950 	uint32_t	nsid;	/* nsid created in a CREATE op only */
951 	uint32_t	dw1;
952 	nvme_comq_tail_t tail;
953 } __packed nvme_nsmgmt_res_t;
954 
955 /*
956  * NVME Set Features Command
957  *
958  * NOTE: PRP2 cannot point to a PRP list.  It exists in case the data area
959  *	 crosses a page boundary and has a direct PRP.  Our driver
960  *	 implementation page-aligns requests and will only use PRP1.
961  *
962  * NOTE: I decided to embed the sub-commands in the main structure, and
963  *	 place the related #define's nearby.  This is the only place where
964  *	 I try to embed #defines because doing so normally makes things hard
965  *	 to read.
966  */
967 typedef struct {
968 	nvme_subq_head_t head;
969 	uint32_t flags;		/* dw10 */
970 	union {
971 		/*
972 		 * (Generic)
973 		 */
974 		struct {
975 			uint32_t dw11;
976 			uint32_t dw12;
977 			uint32_t dw13;
978 			uint32_t dw14;
979 			uint32_t dw15;
980 		};
981 
982 		/*
983 		 * NVME_FID_ARB
984 		 */
985 		struct {
986 			uint8_t	burst;	/* arb burst 2^n n=0-7 */
987 			uint8_t lpw;	/* N 0-255 (0=1) low pri weight */
988 			uint8_t mpw;	/* N 0-255 (0=1) med pri weight */
989 			uint8_t	hpw;	/* N 0-255 (0=1) high pri weight */
990 		} arb;
991 #define NVME_ARB_BURST_MASK	0x07
992 #define NVME_ARB_BURST_MAX	0x07
993 
994 		/*
995 		 * NVME_FID_PWRMGMT
996 		 */
997 		struct {
998 			uint8_t xflags;
999 			uint8_t	reserved01;
1000 			uint8_t	reserved02;
1001 			uint8_t	reserved03;
1002 		} pwrmgmt;
1003 #define NVME_PWRMGMT_PS_MASK	0x1F;
1004 #define NVME_PWRMGMT_WH_MASK	0xE0;
1005 #define NVME_PWRMGMT_PS_SET(data)	((data) & NVME_PWRMGMT_PS_MASK)
1006 #define NVME_PWRMGMT_WH_SET(data)	(((data) << 5) & NVME_PWRMGMT_WH_MASK)
1007 
1008 		/*
1009 		 * NVME_FID_LBARNGTYPE (requires Host Memory Buffer)
1010 		 */
1011 		struct {
1012 			uint32_t xflags;
1013 			uint32_t dw12;
1014 			uint32_t dw13;
1015 			uint32_t dw14;
1016 			uint32_t dw15;
1017 		} lbarng;
1018 #define NVME_LBARNG_NUM_MASK	0x0000003FU
1019 
1020 		/*
1021 		 * NVME_FID_TEMPTHRESH
1022 		 */
1023 		struct {
1024 #if _BYTE_ORDER == _LITTLE_ENDIAN
1025 			uint16_t tmpth;
1026 			uint16_t xflags;
1027 #else
1028 			uint16_t xflags;
1029 			uint16_t tmpth;
1030 #endif
1031 			uint32_t dw12;
1032 			uint32_t dw13;
1033 			uint32_t dw14;
1034 			uint32_t dw15;
1035 		} tempth;
1036 #define NVME_TEMPTH_SEL_MASK	0x000FU
1037 #define NVME_TEMPTH_TYPE_MASK	0x0030U
1038 #define NVME_TEMPTH_TYPE_OVER	0x0000U
1039 #define NVME_TEMPTH_TYPE_UNDER	0x0010U
1040 #define NVME_TEMPTH_TYPE_2	0x0020U
1041 #define NVME_TEMPTH_TYPE_3	0x0030U
1042 
1043 		/*
1044 		 * NVME_FID_ERRORRECOVERY
1045 		 */
1046 		struct {
1047 #if _BYTE_ORDER == _LITTLE_ENDIAN
1048 			uint16_t tler;
1049 			uint16_t xflags;
1050 #else
1051 			uint16_t xflags;
1052 			uint16_t tler;
1053 #endif
1054 			uint32_t dw12;
1055 			uint32_t dw13;
1056 			uint32_t dw14;
1057 			uint32_t dw15;
1058 		} errrec;
1059 #define NVME_ERRREC_DEALLOCERR	0x0001U	/* enable deallo/unwritten blk error */
1060 
1061 		/*
1062 		 * NVME_FID_VOLATILEWC
1063 		 */
1064 		struct {
1065 			uint32_t xflags;
1066 			uint32_t dw12;
1067 			uint32_t dw13;
1068 			uint32_t dw14;
1069 			uint32_t dw15;
1070 		} volatilewc;
1071 #define NVME_VOLATILEWC_ENABLE	0x0001U
1072 
1073 		/*
1074 		 * NVME_FID_NUMQUEUES
1075 		 *
1076 		 * (dw0 in completion block contains the number of submission
1077 		 *  and completion queues allocated).
1078 		 */
1079 		struct {
1080 #if _BYTE_ORDER == _LITTLE_ENDIAN
1081 			uint16_t nsqr;		/* #submissions qus requested */
1082 			uint16_t ncqr;		/* #completion qus requested */
1083 #else
1084 			uint16_t ncqr;
1085 			uint16_t nsqr;
1086 #endif
1087 			uint32_t dw12;
1088 			uint32_t dw13;
1089 			uint32_t dw14;
1090 			uint32_t dw15;
1091 		} numqs;
1092 
1093 		/*
1094 		 * NVME_FID_INTCOALESCE
1095 		 *
1096 		 * NOTE: default upon reset is 0 (no coalescing)
1097 		 */
1098 		struct {
1099 #if _BYTE_ORDER == _LITTLE_ENDIAN
1100 			uint8_t	thr;		/* 0's based value, 0=1 */
1101 			uint8_t	time;		/* 0's based value, 0=1 */
1102 			uint16_t reserved02;
1103 #else
1104 			uint16_t reserved02;
1105 			uint8_t	time;
1106 			uint8_t	thr;
1107 #endif
1108 			uint32_t dw12;
1109 			uint32_t dw13;
1110 			uint32_t dw14;
1111 			uint32_t dw15;
1112 		} intcoal;
1113 
1114 		/*
1115 		 * NVME_FID_INTVECTOR
1116 		 */
1117 		struct {
1118 #if _BYTE_ORDER == _LITTLE_ENDIAN
1119 			uint16_t iv;
1120 			uint16_t xflags;
1121 #else
1122 			uint16_t xflags;
1123 			uint16_t iv;
1124 #endif
1125 			uint32_t dw12;
1126 			uint32_t dw13;
1127 			uint32_t dw14;
1128 			uint32_t dw15;
1129 		} intvect;
1130 #define NVME_INTVECT_CD		0x0001U		/* disable coalescing */
1131 
1132 		/*
1133 		 * NVME_FID_WRATOMICYNRM
1134 		 */
1135 		struct {
1136 			uint32_t xflags;
1137 			uint32_t dw12;
1138 			uint32_t dw13;
1139 			uint32_t dw14;
1140 			uint32_t dw15;
1141 		} wratom;
1142 #define NVME_WRATOM_DN		0x00000001U	/* disables AWUN/NAWUN */
1143 
1144 		/*
1145 		 * NVME_FID_ASYNCEVCFG
1146 		 */
1147 		struct {
1148 			uint32_t xflags;
1149 			uint32_t dw12;
1150 			uint32_t dw13;
1151 			uint32_t dw14;
1152 			uint32_t dw15;
1153 		} asyncev;
1154 #define NVME_ASYNCEV_SMART_MASK	0x000000FFU	/* bits same as SMART bits */
1155 #define NVME_ASYNCEV_NS_ATTR	0x00000100U	/* ns attr change */
1156 #define NVME_ASYNCEV_FW_ACTVTE	0x00000200U	/* fw activation notice */
1157 
1158 		/*
1159 		 * NVME_FID_AUTOPS	(requires Host Memory Buffer)
1160 		 */
1161 		struct {
1162 			uint32_t xflags;
1163 			uint32_t dw12;
1164 			uint32_t dw13;
1165 			uint32_t dw14;
1166 			uint32_t dw15;
1167 		} autops;
1168 #define NVME_AUTOPS_ENABLE	0x00000001U	/* enable autonomous ps trans */
1169 
1170 		/*
1171 		 * NVME_FID_HOSTMEMBUF
1172 		 */
1173 		struct {
1174 			uint32_t xflags;
1175 			uint32_t sizepgs;	/* buffer size in mps units */
1176 			uint32_t hmdlla;	/* desclist lower address */
1177 			uint32_t hmdlua;	/* desclist upper address */
1178 			uint32_t count;		/* list entry count */
1179 		} hostmem;
1180 #define NVME_HOSTMEM_RETURN	0x00000002U	/* same memory after reset */
1181 #define NVME_HOSTMEM_ENABLE	0x00000001U
1182 
1183 		/*
1184 		 * NVME_FID_SFTPROGRESS
1185 		 */
1186 		struct {
1187 #if _BYTE_ORDER == _LITTLE_ENDIAN
1188 			uint8_t pbslc;		/* pre-boot software load cnt */
1189 			uint8_t reserved01;
1190 			uint8_t reserved02;
1191 			uint8_t reserved03;
1192 #else
1193 			uint8_t reserved03;
1194 			uint8_t reserved02;
1195 			uint8_t reserved01;
1196 			uint8_t pbslc;
1197 #endif
1198 			uint32_t dw12;
1199 			uint32_t dw13;
1200 			uint32_t dw14;
1201 			uint32_t dw15;
1202 		} sftprog;
1203 
1204 		/*
1205 		 * NVME_FID_HOSTID
1206 		 */
1207 		struct {
1208 			uint32_t dw11;
1209 			uint32_t dw12;
1210 			uint32_t dw13;
1211 			uint32_t dw14;
1212 			uint32_t dw15;
1213 		} hostid;
1214 
1215 		/*
1216 		 * NVME_FID_RESERVENOTMASK
1217 		 */
1218 		struct {
1219 			uint32_t xflags;
1220 			uint32_t dw12;
1221 			uint32_t dw13;
1222 			uint32_t dw14;
1223 			uint32_t dw15;
1224 		} resnotify;
1225 #define NVME_RESNOTIFY_RESPRE	0x00000008U
1226 #define NVME_RESNOTIFY_RESREL	0x00000004U
1227 #define NVME_RESNOTIFY_REGPRE	0x00000002U
1228 
1229 		/*
1230 		 * NVME_FID_RESERVEPERSIST
1231 		 */
1232 		struct {
1233 			uint32_t xflags;
1234 			uint32_t dw12;
1235 			uint32_t dw13;
1236 			uint32_t dw14;
1237 			uint32_t dw15;
1238 		} respersist;
1239 #define NVME_RESPERSIST_PTPL	0x00000001U	/* persist thru power loss */
1240 	};
1241 } __packed nvme_setfeat_cmd_t;
1242 
1243 #define NVME_SETFEAT_SAVE	0x80000000U
1244 #define NVME_FID_MASK		0x000000FFU
1245 
1246 #define NVME_FID_GET(data)	\
1247 		((data) & NVME_FID_MASK)
1248 #define NVME_FID_SET(fid)	\
1249 		((fid) & NVME_FID_MASK)
1250 
1251 #define NVME_FID_00		0x00
1252 #define NVME_FID_ARB		0x01	/* Aribtration */
1253 #define NVME_FID_PWRMGMT	0x02	/* Power Management */
1254 #define NVME_FID_LBARNGTYPE	0x03	/* (opt) LBA Range Type */
1255 #define NVME_FID_TEMPTHRESH	0x04	/* Temp Threshold */
1256 #define NVME_FID_ERRORRECOVERY	0x05	/* Error Recovery */
1257 #define NVME_FID_VOLATILEWC	0x06	/* (opt) Volatile Write Cache */
1258 #define NVME_FID_NUMQUEUES	0x07	/* Number of Queues */
1259 #define NVME_FID_INTCOALESCE	0x08	/* Interrupt Coalescing */
1260 #define NVME_FID_INTVECTOR	0x09	/* Interrupt Vector Config */
1261 #define NVME_FID_WRATOMICYNRM	0x0A	/* Write Atomicy Normal */
1262 #define NVME_FID_ASYNCEVCFG	0x0B	/* Async Event Config */
1263 #define NVME_FID_AUTOPS		0x0C	/* (opt) Autonomous pwr state */
1264 #define NVME_FID_HOSTMEMBUF	0x0D	/* (opt) Host memory buffer */
1265 				/* 0E-77 reserved */
1266 				/* 78-7F see NVMe management ifc spec */
1267 				/* 80-BF cmd set specific (reserved) */
1268 #define NVME_FID_SFTPROGRESS	0x80	/* (opt) Software Progress Marker */
1269 #define NVME_FID_HOSTID		0x81	/* (opt) Host Identifier */
1270 #define NVME_FID_RESERVENOTMASK	0x82	/* (opt) Reservation Notify Marker */
1271 #define NVME_FID_RESERVEPERSIST	0x83	/* (opt) Reservation Persistance */
1272 
1273 typedef struct {
1274 	uint32_t dw0;
1275 	uint32_t dw1;
1276 	nvme_comq_tail_t tail;
1277 } __packed nvme_setfeat_res_t;
1278 
1279 /*
1280  * Format NVM Command
1281  */
1282 typedef struct {
1283 	nvme_subq_head_t head;
1284 	uint32_t	flags;
1285 	uint32_t	reserved11;
1286 	uint32_t	reserved12;
1287 	uint32_t	reserved13;
1288 	uint32_t	reserved14;
1289 	uint32_t	reserved15;
1290 } __packed nvme_format_cmd_t;
1291 
1292 #define NVME_FORMAT_SES_MASK		0x00000E00U
1293 #define NVME_FORMAT_SES_NONE		0x00000000U
1294 #define NVME_FORMAT_SES_NORM		0x00000200U
1295 #define NVME_FORMAT_SES_CRYPTO		0x00000400U
1296 					/* remainint ids reserved */
1297 
1298 #define NVME_FORMAT_PROT_FIRST		0x00000100U	/* first-8 of meta */
1299 							/* (else last-8) */
1300 
1301 #define NVME_FORMAT_PROT_MASK		0x000000E0U
1302 #define NVME_FORMAT_PROT_NONE		0x00000000U
1303 #define NVME_FORMAT_PROT_TYPE1		0x00000020U
1304 #define NVME_FORMAT_PROT_TYPE2		0x00000040U
1305 #define NVME_FORMAT_PROT_TYPE3		0x00000060U
1306 					/* remaining ids reserved */
1307 
1308 #define NVME_FORMAT_MS			0x00000010U	/* metadata 1=inline */
1309 #define NVME_FORMAT_LBA_FMT_MASK	0x0000000FU
1310 #define NVME_FORMAT_LBA_FMT_SET(data)	\
1311 	((data) & NVME_FORMAT_LBA_FMT_MASK)
1312 
1313 typedef struct {
1314 	uint32_t dw0;
1315 	uint32_t dw1;
1316 	nvme_comq_tail_t tail;
1317 } __packed nvme_format_res_t;
1318 
1319 /*
1320  * Security Receive Command
1321  */
1322 typedef struct {
1323 	nvme_subq_head_t head;
1324 #if _BYTE_ORDER == _LITTLE_ENDIAN
1325 	uint8_t		nssf;
1326 	uint8_t		spsp0;
1327 	uint8_t		spsp1;
1328 	uint8_t		secp;
1329 #else
1330 	uint8_t		secp;
1331 	uint8_t		spsp1;
1332 	uint8_t		spsp0;
1333 	uint8_t		nssf;
1334 #endif
1335 	uint32_t	alloc_len;	/* allocation length */
1336 	uint32_t	reserved12;
1337 	uint32_t	reserved13;
1338 	uint32_t	reserved14;
1339 	uint32_t	reserved15;
1340 } __packed nvme_secrecv_cmd_t;
1341 
1342 typedef struct {
1343 	uint32_t dw0;
1344 	uint32_t dw1;
1345 	nvme_comq_tail_t tail;
1346 } __packed nvme_secrecv_res_t;
1347 
1348 /*
1349  * Security Send Command
1350  */
1351 typedef struct {
1352 	nvme_subq_head_t head;
1353 #if _BYTE_ORDER == _LITTLE_ENDIAN
1354 	uint8_t		nssf;
1355 	uint8_t		spsp0;
1356 	uint8_t		spsp1;
1357 	uint8_t		secp;
1358 #else
1359 	uint8_t		secp;
1360 	uint8_t		spsp1;
1361 	uint8_t		spsp0;
1362 	uint8_t		nssf;
1363 #endif
1364 	uint32_t	xfer_len;	/* xfer length */
1365 	uint32_t	reserved12;
1366 	uint32_t	reserved13;
1367 	uint32_t	reserved14;
1368 	uint32_t	reserved15;
1369 } __packed nvme_secsend_cmd_t;
1370 
1371 typedef struct {
1372 	uint32_t dw0;
1373 	uint32_t dw1;
1374 	nvme_comq_tail_t tail;
1375 } __packed nvme_secsend_res_t;
1376 
1377 
1378 /************************************************************************
1379  * NVM I/O COMMANDS - Core I/O Commands, NVM command set		*
1380  ************************************************************************
1381  *
1382  * The nsid field is required for all of these commands.
1383  */
1384 
1385 #define NVME_IOCMD_FLUSH	0x00
1386 #define NVME_IOCMD_WRITE	0x01
1387 #define NVME_IOCMD_READ		0x02
1388 #define NVME_IOCMD_WRITEUC	0x04
1389 #define NVME_IOCMD_COMPARE	0x05
1390 #define NVME_IOCMD_WRITEZ	0x08
1391 #define NVME_IOCMD_DATAMGMT	0x09
1392 #define NVME_IOCMD_RESREG	0x0D
1393 #define NVME_IOCMD_RESREP	0x0E
1394 #define NVME_IOCMD_RESACQ	0x11
1395 #define NVME_IOCMD_RESREL	0x15
1396 
1397 /*
1398  * ioflags (16 bits) is similar across many NVM commands, make
1399  * those definitions generic.
1400  */
1401 #define NVME_IOFLG_LR		0x8000U	/* limited retry */
1402 #define NVME_IOFLG_FUA		0x4000U	/* force unit access */
1403 #define NVME_IOFLG_PRINFO_MASK	0x3C00U	/* prot info mask */
1404 #define NVME_IOFLG_RESV_MASK	0x03FFU
1405 
1406 /*
1407  * dsm (32 bits) exists in the read and write commands.
1408  */
1409 #define NVME_DSM_INCOMPRESSIBLE	0x00000080U
1410 #define NVME_DSM_SEQREQ		0x00000040U
1411 
1412 #define NVME_DSM_ACCLAT_MASK	0x00000030U
1413 #define NVME_DSM_ACCLAT_UNSPEC	0x00000000U
1414 #define NVME_DSM_ACCLAT_IDLE	0x00000010U
1415 #define NVME_DSM_ACCLAT_NORM	0x00000020U
1416 #define NVME_DSM_ACCLAT_LOW	0x00000030U
1417 
1418 #define NVME_DSM_ACCFREQ_MASK	0x0000000FU
1419 #define NVME_DSM_ACCFREQ_UNSPEC	0x00000000U	/* unspecified */
1420 #define NVME_DSM_ACCFREQ_WRTYP	0x00000001U	/* typical reads & writes */
1421 #define NVME_DSM_ACCFREQ_WRLOW	0x00000002U	/* few writes, few reads */
1422 #define NVME_DSM_ACCFREQ_WLORHI	0x00000003U	/* few writes, many reads */
1423 #define NVME_DSM_ACCFREQ_WHIRLO	0x00000004U	/* many writes, few reads */
1424 #define NVME_DSM_ACCFREQ_WHIRHI	0x00000005U	/* many writes, many reads */
1425 #define NVME_DSM_ACCFREQ_RONETM	0x00000006U	/* one-time read */
1426 #define NVME_DSM_ACCFREQ_RSPECU	0x00000007U	/* speculative read */
1427 #define NVME_DSM_ACCFREQ_OVERWR	0x00000008U	/* will be overwritten soon */
1428 				/* 9-F reserved */
1429 
1430 
1431 /*
1432  * NVM Flush Command			NVME_IOCMD_FLUSH
1433  *
1434  * For entire nsid, dw10-15 are reserved and should be zerod.
1435  */
1436 typedef struct {
1437 	nvme_subq_head_t head;
1438 	uint32_t	reserved10;
1439 	uint32_t	reserved11;
1440 	uint32_t	reserved12;
1441 	uint32_t	reserved13;
1442 	uint32_t	reserved14;
1443 	uint32_t	reserved15;
1444 } __packed nvme_flush_cmd_t;
1445 
1446 typedef struct {
1447 	uint32_t dw0;
1448 	uint32_t dw1;
1449 	nvme_comq_tail_t tail;
1450 } __packed nvme_flush_res_t;
1451 
1452 /*
1453  * NVM Write Command			NVME_IOCMD_WRITE
1454  */
1455 typedef struct {
1456 	nvme_subq_head_t head;
1457 	uint64_t	start_lba;
1458 #if _BYTE_ORDER == _LITTLE_ENDIAN
1459 	uint16_t	count_lba;
1460 	uint16_t	ioflags;
1461 #else
1462 	uint16_t	ioflags;
1463 	uint16_t	count_lba;
1464 #endif
1465 	uint32_t	dsm;
1466 	uint32_t	iilbrt;		/* expected initial logblk ref tag */
1467 #if _BYTE_ORDER == _LITTLE_ENDIAN
1468 	uint16_t	lbat;		/* expected log blk app tag */
1469 	uint16_t	lbatm;		/* expected log blk app tag mask */
1470 #else
1471 	uint16_t	lbatm;
1472 	uint16_t	lbat;
1473 #endif
1474 } __packed nvme_write_cmd_t;
1475 
1476 typedef struct {
1477 	uint32_t dw0;
1478 	uint32_t dw1;
1479 	nvme_comq_tail_t tail;
1480 } __packed nvme_write_res_t;
1481 
1482 /*
1483  * NVM Read Command			NVME_IOCMD_READ
1484  */
1485 typedef struct {
1486 	nvme_subq_head_t head;
1487 	uint64_t	start_lba;
1488 #if _BYTE_ORDER == _LITTLE_ENDIAN
1489 	uint16_t	count_lba;
1490 	uint16_t	ioflags;
1491 #else
1492 	uint16_t	ioflags;
1493 	uint16_t	count_lba;
1494 #endif
1495 	uint32_t	dsm;
1496 	uint32_t	eilbrt;		/* expected initial logblk ref tag */
1497 #if _BYTE_ORDER == _LITTLE_ENDIAN
1498 	uint16_t	elbat;		/* expected log blk app tag */
1499 	uint16_t	elbatm;		/* expected log blk app tag mask */
1500 #else
1501 	uint16_t	elbatm;
1502 	uint16_t	elbat;
1503 #endif
1504 } __packed nvme_read_cmd_t;
1505 
1506 typedef struct {
1507 	uint32_t dw0;
1508 	uint32_t dw1;
1509 	nvme_comq_tail_t tail;
1510 } __packed nvme_read_res_t;
1511 
1512 /*
1513  * NVM Write Uncorrectable Command	NVME_IOCMD_WRITEUC
1514  */
1515 typedef struct {
1516 	nvme_subq_head_t head;
1517 	uint64_t	start_lba;
1518 #if _BYTE_ORDER == _LITTLE_ENDIAN
1519 	uint16_t	count_lba;
1520 	uint16_t	reserved12l;
1521 #else
1522 	uint16_t	reserved12l;
1523 	uint16_t	count_lba;
1524 #endif
1525 	uint32_t	reserved13;
1526 	uint32_t	reserved14;
1527 	uint32_t	reserved15;
1528 } __packed nvme_writeuc_cmd_t;
1529 
1530 typedef struct {
1531 	uint32_t dw0;
1532 	uint32_t dw1;
1533 	nvme_comq_tail_t tail;
1534 } __packed nvme_writeuc_res_t;
1535 
1536 /*
1537  * NVM Compare Command			NVME_IOCMD_COMPARE
1538  */
1539 typedef struct {
1540 	nvme_subq_head_t head;
1541 	uint64_t	start_lba;
1542 #if _BYTE_ORDER == _LITTLE_ENDIAN
1543 	uint16_t	count_lba;
1544 	uint16_t	ioflags;
1545 #else
1546 	uint16_t	ioflags;
1547 	uint16_t	count_lba;
1548 #endif
1549 	uint32_t	reserved13;
1550 	uint32_t	eilbrt;		/* expected initial logblk ref tag */
1551 #if _BYTE_ORDER == _LITTLE_ENDIAN
1552 	uint16_t	elbat;		/* expected log blk app tag */
1553 	uint16_t	elbatm;		/* expected log blk app tag mask */
1554 #else
1555 	uint16_t	elbatm;
1556 	uint16_t	elbat;
1557 #endif
1558 } __packed nvme_cmp_cmd_t;
1559 
1560 typedef struct {
1561 	uint32_t dw0;
1562 	uint32_t dw1;
1563 	nvme_comq_tail_t tail;
1564 } __packed nvme_cmp_res_t;
1565 
1566 /*
1567  * NVM Write Zeros Command		NVME_IOCMD_WRITEZ
1568  */
1569 typedef struct {
1570 	nvme_subq_head_t head;
1571 	uint64_t	start_lba;
1572 #if _BYTE_ORDER == _LITTLE_ENDIAN
1573 	uint16_t	count_lba;
1574 	uint16_t	ioflags;
1575 #else
1576 	uint16_t	ioflags;
1577 	uint16_t	count_lba;
1578 #endif
1579 	uint32_t	dsm;
1580 	uint32_t	iilbrt;		/* expected initial logblk ref tag */
1581 #if _BYTE_ORDER == _LITTLE_ENDIAN
1582 	uint16_t	lbat;		/* expected log blk app tag */
1583 	uint16_t	lbatm;		/* expected log blk app tag mask */
1584 #else
1585 	uint16_t	lbatm;
1586 	uint16_t	lbat;
1587 #endif
1588 } __packed nvme_writez_cmd_t;
1589 
1590 typedef struct {
1591 	uint32_t dw0;
1592 	uint32_t dw1;
1593 	nvme_comq_tail_t tail;
1594 } __packed nvme_writez_res_t;
1595 
1596 /*
1597  * NVM Dataset Management Command	NVME_IOCMD_DATAMGMT
1598  *
1599  * See nvme_datamgmt.h for range and context attributes
1600  */
1601 typedef struct {
1602 	nvme_subq_head_t head;
1603 #if _BYTE_ORDER == _LITTLE_ENDIAN
1604 	uint8_t		nr;	/* number of 16-byte ranges 0's based (0=1) */
1605 	uint8_t		reserved01;
1606 	uint8_t		reserved02;
1607 	uint8_t		reserved03;
1608 #else
1609 	uint8_t		reserved03;
1610 	uint8_t		reserved02;
1611 	uint8_t		reserved01;
1612 	uint8_t		nr;	/* number of 16-byte ranges 0's based (0=1) */
1613 #endif
1614 	uint32_t	flags;
1615 	uint32_t	reserved12;
1616 	uint32_t	reserved13;
1617 	uint32_t	reserved14;
1618 	uint32_t	reserved15;
1619 } __packed nvme_datamgmt_cmd_t;
1620 
1621 /* flags field */
1622 #define NVME_DATAMGT_AD		0x00000004U	/* 1=deallocate ranges */
1623 #define NVME_DATAMGT_IDW	0x00000002U	/* 1=hint for write acc */
1624 #define NVME_DATAMGT_IDR	0x00000001U	/* 1=hint for read acc */
1625 
1626 typedef struct {
1627 	uint32_t dw0;
1628 	uint32_t dw1;
1629 	nvme_comq_tail_t tail;
1630 } __packed nvme_datamgmt_res_t;
1631 
1632 /*
1633  * NVM Reservation Register Command	NVME_IOCMD_RESREG (TOD)
1634  */
1635 typedef struct {
1636 	nvme_subq_head_t head;
1637 	uint32_t	dw10;
1638 	uint32_t	dw11;
1639 	uint32_t	dw12;
1640 	uint32_t	dw13;
1641 	uint32_t	dw14;
1642 	uint32_t	dw15;
1643 } __packed nvme_resreg_cmd_t;
1644 
1645 typedef struct {
1646 	uint32_t dw0;
1647 	uint32_t dw1;
1648 	nvme_comq_tail_t tail;
1649 } __packed nvme_resreg_res_t;
1650 
1651 /*
1652  * NVM Reservation Report Command	NVME_IOCMD_RESREP (TODO)
1653  */
1654 typedef struct {
1655 	nvme_subq_head_t head;
1656 	uint32_t	dw10;
1657 	uint32_t	dw11;
1658 	uint32_t	dw12;
1659 	uint32_t	dw13;
1660 	uint32_t	dw14;
1661 	uint32_t	dw15;
1662 } __packed nvme_resrep_cmd_t;
1663 
1664 typedef struct {
1665 	uint32_t dw0;
1666 	uint32_t dw1;
1667 	nvme_comq_tail_t tail;
1668 } __packed nvme_resrep_res_t;
1669 
1670 /*
1671  * NVM Reservation Acquire Command	NVME_IOCMD_RESACQ (TODO)
1672  */
1673 typedef struct {
1674 	nvme_subq_head_t head;
1675 	uint32_t	dw10;
1676 	uint32_t	dw11;
1677 	uint32_t	dw12;
1678 	uint32_t	dw13;
1679 	uint32_t	dw14;
1680 	uint32_t	dw15;
1681 } __packed nvme_resacq_cmd_t;
1682 
1683 typedef struct {
1684 	uint32_t dw0;
1685 	uint32_t dw1;
1686 	nvme_comq_tail_t tail;
1687 } __packed nvme_resacq_res_t;
1688 
1689 /*
1690  * NVM Reservation Release Command	NVME_IOCMD_RESREL (TODO)
1691  */
1692 typedef struct {
1693 	nvme_subq_head_t head;
1694 	uint32_t	dw10;
1695 	uint32_t	dw11;
1696 	uint32_t	dw12;
1697 	uint32_t	dw13;
1698 	uint32_t	dw14;
1699 	uint32_t	dw15;
1700 } __packed nvme_resrel_cmd_t;
1701 
1702 typedef struct {
1703 	uint32_t dw0;
1704 	uint32_t dw1;
1705 	nvme_comq_tail_t tail;
1706 } __packed nvme_resrel_res_t;
1707 
1708 
1709 /*
1710  * SUBMISSION AND COMPLETION QUEUE ALL-COMMAND UNIONS (primary API)
1711  *
1712  * Union of all submission queue commands (64 bytes)
1713  */
1714 typedef union {
1715 	struct {		/* convenient accessors */
1716 		nvme_subq_head_t head;
1717 		uint32_t dw10;
1718 		uint32_t dw11;
1719 		uint32_t dw12;
1720 		uint32_t dw13;
1721 		uint32_t dw14;
1722 		uint32_t dw15;
1723 	};
1724 	nvme_subq_item_t	item;
1725 	nvme_abort_cmd_t	abort;
1726 	nvme_async_cmd_t	async;
1727 	nvme_createcomq_cmd_t	crcom;
1728 	nvme_createsubq_cmd_t	crsub;
1729 	nvme_deleteq_cmd_t	delete;
1730 	nvme_getfeat_cmd_t	getfeat;
1731 	nvme_getlog_cmd_t	getlog;
1732 	nvme_identify_cmd_t	identify;
1733 	nvme_nsatt_cmd_t	nsatt;
1734 	nvme_nsmgmt_cmd_t	nsmgmt;
1735 	nvme_setfeat_cmd_t	setfeat;
1736 	nvme_format_cmd_t	format;
1737 	nvme_secrecv_cmd_t	secrecv;
1738 	nvme_secsend_cmd_t	secsend;
1739 	nvme_flush_cmd_t	flush;
1740 	nvme_write_cmd_t	write;
1741 	nvme_read_cmd_t		read;
1742 	nvme_writeuc_cmd_t	writeuc;
1743 	nvme_cmp_cmd_t		cmp;
1744 	nvme_writez_cmd_t	writez;
1745 	nvme_datamgmt_cmd_t	datamgmt;
1746 	nvme_resreg_cmd_t	resreg;
1747 	nvme_resrep_cmd_t	resrep;
1748 	nvme_resacq_cmd_t	resacq;
1749 	nvme_resrel_cmd_t	resrel;
1750 } __packed nvme_allcmd_t;
1751 
1752 /*
1753  * Union of all completion queue responses (16 bytes)
1754  */
1755 typedef union {
1756 	struct {		/* convenient accessors */
1757 		uint32_t dw0;
1758 		uint32_t dw1;
1759 		nvme_comq_tail_t tail;
1760 	};
1761 	nvme_comq_item_t	item;
1762 	nvme_async_res_t	async;
1763 	nvme_createcomq_res_t	crcom;
1764 	nvme_createsubq_res_t	crsub;
1765 	nvme_deleteq_res_t	delete;
1766 	nvme_getfeat_res_t	getfeat;
1767 	nvme_getlog_res_t	getlog;
1768 	nvme_identify_res_t	identify;
1769 	nvme_nsatt_res_t	nsatt;
1770 	nvme_nsmgmt_res_t	nsmgmt;
1771 	nvme_setfeat_res_t	setfeat;
1772 	nvme_format_res_t	format;
1773 	nvme_secrecv_res_t	secrecv;
1774 	nvme_secsend_res_t	secsend;
1775 	nvme_flush_res_t	flush;
1776 	nvme_write_res_t	write;
1777 	nvme_read_res_t		read;
1778 	nvme_writeuc_res_t	writeuc;
1779 	nvme_cmp_res_t		cmp;
1780 	nvme_writez_res_t	writez;
1781 	nvme_datamgmt_res_t	datamgmt;
1782 	nvme_resreg_res_t	resreg;
1783 	nvme_resrep_res_t	resrep;
1784 	nvme_resacq_res_t	resacq;
1785 	nvme_resrel_res_t	resrel;
1786 } __packed nvme_allres_t;
1787 
1788 /*
1789  * Union of all administrative data buffers (does not exceed 4KB)
1790  */
1791 typedef union {
1792 	nvme_pwstate_data_t	pwstate;
1793 	nvme_lba_fmt_data_t	lbafmt;
1794 	nvme_ident_ctlr_data_t	idctlr;
1795 	nvme_ident_ns_data_t	idns;
1796 	nvme_ident_ns_list_t	nslist;
1797 	nvme_ident_ctlr_list_t	ctlrlist;
1798 	nvme_log_error_data_t	logerr;
1799 	nvme_log_smart_data_t	logsmart;
1800 	nvme_fw_slot_data_t	fwslot;
1801 	nvme_nsmgmt_create_data_t nsmgmt;
1802 	nvme_cmdeff_data_t	cmdeff;
1803 	nvme_resnotify_data_t	resnotify;
1804 } __packed nvme_admin_data_t;
1805 
1806 /*
1807  * MISC STRUCTURES SENT OR RECEIVED AS DATA
1808  */
1809