xref: /openbsd/sys/dev/ic/nvmereg.h (revision 4e9514d6)
1 /*	$OpenBSD: nvmereg.h,v 1.15 2024/05/24 12:04:07 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #define NVME_CAP	0x0000	/* Controller Capabilities */
20 #define  NVME_CAP_MPSMAX(_r)	(12 + (((_r) >> 52) & 0xf)) /* shift */
21 #define  NVME_CAP_MPSMIN(_r)	(12 + (((_r) >> 48) & 0xf)) /* shift */
22 #define  NVME_CAP_CSS(_r)	(((_r) >> 37) & 0x7f)
23 #define  NVME_CAP_CSS_NVM	(1 << 0)
24 #define  NVME_CAP_NSSRS(_r)	ISSET((_r), (1ULL << 36))
25 #define  NVME_CAP_DSTRD(_r)	(1 << (2 + (((_r) >> 32) & 0xf))) /* bytes */
26 #define  NVME_CAP_TO(_r)	(500 * (((_r) >> 24) & 0xff)) /* ms */
27 #define  NVME_CAP_AMS(_r)	(((_r) >> 17) & 0x3)
28 #define  NVME_CAP_AMS_WRR	(1 << 0)
29 #define  NVME_CAP_AMS_VENDOR	(1 << 1)
30 #define  NVME_CAP_CQR(_r)	ISSET((_r), (1 << 16))
31 #define  NVME_CAP_MQES(_r)	(((_r) & 0xffff) + 1)
32 #define NVME_CAP_LO	0x0000
33 #define NVME_CAP_HI	0x0004
34 #define NVME_VS		0x0008	/* Version */
35 #define  NVME_VS_MJR(_r)	(((_r) & 0xffff0000) >> 16)
36 #define  NVME_VS_MNR(_r)	(((_r) & 0x0000ff00) >> 8)
37 #define NVME_INTMS	0x000c	/* Interrupt Mask Set */
38 #define NVME_INTMC	0x0010	/* Interrupt Mask Clear */
39 #define NVME_CC		0x0014	/* Controller Configuration */
40 #define  NVME_CC_IOCQES(_v)	(((_v) & 0xf) << 20)
41 #define  NVME_CC_IOCQES_MASK	NVME_CC_IOCQES(0xf)
42 #define  NVME_CC_IOCQES_R(_v)	(((_v) >> 20) & 0xf)
43 #define  NVME_CC_IOSQES(_v)	(((_v) & 0xf) << 16)
44 #define  NVME_CC_IOSQES_MASK	NVME_CC_IOSQES(0xf)
45 #define  NVME_CC_IOSQES_R(_v)	(((_v) >> 16) & 0xf)
46 #define  NVME_CC_SHN(_v)	(((_v) & 0x3) << 14)
47 #define  NVME_CC_SHN_MASK	NVME_CC_SHN(0x3)
48 #define  NVME_CC_SHN_R(_v)	(((_v) >> 15) & 0x3)
49 #define  NVME_CC_SHN_NONE	0
50 #define  NVME_CC_SHN_NORMAL	1
51 #define  NVME_CC_SHN_ABRUPT	2
52 #define  NVME_CC_AMS(_v)	(((_v) & 0x7) << 11)
53 #define  NVME_CC_AMS_MASK	NVME_CC_AMS(0x7)
54 #define  NVME_CC_AMS_R(_v)	(((_v) >> 11) & 0xf)
55 #define  NVME_CC_AMS_RR		0 /* round-robin */
56 #define  NVME_CC_AMS_WRR_U	1 /* weighted round-robin w/ urgent */
57 #define  NVME_CC_AMS_VENDOR	7 /* vendor */
58 #define  NVME_CC_MPS(_v)	((((_v) - 12) & 0xf) << 7)
59 #define  NVME_CC_MPS_MASK	(0xf << 7)
60 #define  NVME_CC_MPS_R(_v)	(12 + (((_v) >> 7) & 0xf))
61 #define  NVME_CC_CSS(_v)	(((_v) & 0x7) << 4)
62 #define  NVME_CC_CSS_MASK	NVME_CC_CSS(0x7)
63 #define  NVME_CC_CSS_R(_v)	(((_v) >> 4) & 0x7)
64 #define  NVME_CC_CSS_NVM	0
65 #define  NVME_CC_EN		(1 << 0)
66 #define NVME_CSTS	0x001c	/* Controller Status */
67 #define  NVME_CSTS_SHST_MASK	(0x3 << 2)
68 #define  NVME_CSTS_SHST_NONE	(0x0 << 2) /* normal operation */
69 #define  NVME_CSTS_SHST_WAIT	(0x1 << 2) /* shutdown processing occurring */
70 #define  NVME_CSTS_SHST_DONE	(0x2 << 2) /* shutdown processing complete */
71 #define  NVME_CSTS_CFS		(1 << 1)
72 #define  NVME_CSTS_RDY		(1 << 0)
73 #define NVME_NSSR	0x0020	/* NVM Subsystem Reset (Optional) */
74 #define NVME_AQA	0x0024	/* Admin Queue Attributes */
75 				/* Admin Completion Queue Size */
76 #define  NVME_AQA_ACQS(_v)	(((_v) - 1) << 16)
77 				/* Admin Submission Queue Size */
78 #define  NVME_AQA_ASQS(_v)	(((_v) - 1) << 0)
79 #define NVME_ASQ	0x0028	/* Admin Submission Queue Base Address */
80 #define NVME_ACQ	0x0030	/* Admin Completion Queue Base Address */
81 
82 #define NVME_ADMIN_Q		0
83 /* Submission Queue Tail Doorbell */
84 #define NVME_SQTDBL(_q, _s)	(0x1000 + (2 * (_q) + 0) * (_s))
85 /* Completion Queue Head Doorbell */
86 #define NVME_CQHDBL(_q, _s)	(0x1000 + (2 * (_q) + 1) * (_s))
87 
88 struct nvme_sge {
89 	u_int8_t	id;
90 	u_int8_t	_reserved[15];
91 } __packed __aligned(8);
92 
93 struct nvme_sge_data {
94 	u_int8_t	id;
95 	u_int8_t	_reserved[3];
96 
97 	u_int32_t	length;
98 
99 	u_int64_t	address;
100 } __packed __aligned(8);
101 
102 struct nvme_sge_bit_bucket {
103 	u_int8_t	id;
104 	u_int8_t	_reserved[3];
105 
106 	u_int32_t	length;
107 
108 	u_int64_t	address;
109 } __packed __aligned(8);
110 
111 struct nvme_sqe {
112 	u_int8_t	opcode;
113 	u_int8_t	flags;
114 	u_int16_t	cid;
115 
116 	u_int32_t	nsid;
117 
118 	u_int8_t	_reserved[8];
119 
120 	u_int64_t	mptr;
121 
122 	union {
123 		u_int64_t	prp[2];
124 		struct nvme_sge	sge;
125 	} __packed	entry;
126 
127 	u_int32_t	cdw10;
128 	u_int32_t	cdw11;
129 	u_int32_t	cdw12;
130 	u_int32_t	cdw13;
131 	u_int32_t	cdw14;
132 	u_int32_t	cdw15;
133 } __packed __aligned(8);
134 
135 struct nvme_sqe_q {
136 	u_int8_t	opcode;
137 	u_int8_t	flags;
138 	u_int16_t	cid;
139 
140 	u_int8_t	_reserved1[20];
141 
142 	u_int64_t	prp1;
143 
144 	u_int8_t	_reserved2[8];
145 
146 	u_int16_t	qid;
147 	u_int16_t	qsize;
148 
149 	u_int8_t	qflags;
150 #define NVM_SQE_SQ_QPRIO_URG	(0x0 << 1)
151 #define NVM_SQE_SQ_QPRIO_HI	(0x1 << 1)
152 #define NVM_SQE_SQ_QPRIO_MED	(0x2 << 1)
153 #define NVM_SQE_SQ_QPRIO_LOW	(0x3 << 1)
154 #define NVM_SQE_CQ_IEN		(1 << 1)
155 #define NVM_SQE_Q_PC		(1 << 0)
156 	u_int8_t	_reserved3;
157 	u_int16_t	cqid; /* XXX interrupt vector for cq */
158 
159 	u_int8_t	_reserved4[16];
160 } __packed __aligned(8);
161 
162 struct nvme_sqe_io {
163 	u_int8_t	opcode;
164 	u_int8_t	flags;
165 	u_int16_t	cid;
166 
167 	u_int32_t	nsid;
168 
169 	u_int8_t	_reserved[8];
170 
171 	u_int64_t	mptr;
172 
173 	union {
174 		u_int64_t	prp[2];
175 		struct nvme_sge	sge;
176 	} __packed	entry;
177 
178 	u_int64_t	slba;	/* Starting LBA */
179 
180 	u_int16_t	nlb;	/* Number of Logical Blocks */
181 	u_int16_t	ioflags;
182 
183 	u_int8_t	dsm;	/* Dataset Management */
184 	u_int8_t	_reserved2[3];
185 
186 	u_int32_t	eilbrt;	/* Expected Initial Logical Block
187 				   Reference Tag */
188 
189 	u_int16_t	elbat;	/* Expected Logical Block
190 				   Application Tag */
191 	u_int16_t	elbatm;	/* Expected Logical Block
192 				   Application Tag Mask */
193 } __packed __aligned(8);
194 
195 struct nvme_cqe {
196 	u_int32_t	cdw0;
197 
198 	u_int32_t	_reserved;
199 
200 	u_int16_t	sqhd; /* SQ Head Pointer */
201 	u_int16_t	sqid; /* SQ Identifier */
202 
203 	u_int16_t	cid; /* Command Identifier */
204 	u_int16_t	flags;
205 #define NVME_CQE_DNR		(1 << 15)
206 #define NVME_CQE_M		(1 << 14)
207 #define NVME_CQE_SCT(_f)	((_f) & (0x07 << 9))
208 #define  NVME_CQE_SCT_GENERIC		(0x00 << 9)
209 #define  NVME_CQE_SCT_COMMAND		(0x01 << 9)
210 #define  NVME_CQE_SCT_MEDIAERR		(0x02 << 9)
211 #define  NVME_CQE_SCT_VENDOR		(0x07 << 9)
212 #define NVME_CQE_SC(_f)		((_f) & (0xff << 1))
213 #define  NVME_CQE_SC_SUCCESS		(0x00 << 1)
214 #define  NVME_CQE_SC_INVALID_OPCODE	(0x01 << 1)
215 #define  NVME_CQE_SC_INVALID_FIELD	(0x02 << 1)
216 #define  NVME_CQE_SC_CID_CONFLICT	(0x03 << 1)
217 #define  NVME_CQE_SC_DATA_XFER_ERR	(0x04 << 1)
218 #define  NVME_CQE_SC_ABRT_BY_NO_PWR	(0x05 << 1)
219 #define  NVME_CQE_SC_INTERNAL_DEV_ERR	(0x06 << 1)
220 #define  NVME_CQE_SC_CMD_ABRT_REQD	(0x07 << 1)
221 #define  NVME_CQE_SC_CMD_ABDR_SQ_DEL	(0x08 << 1)
222 #define  NVME_CQE_SC_CMD_ABDR_FUSE_ERR	(0x09 << 1)
223 #define  NVME_CQE_SC_CMD_ABDR_FUSE_MISS	(0x0a << 1)
224 #define  NVME_CQE_SC_INVALID_NS		(0x0b << 1)
225 #define  NVME_CQE_SC_CMD_SEQ_ERR	(0x0c << 1)
226 #define  NVME_CQE_SC_INVALID_LAST_SGL	(0x0d << 1)
227 #define  NVME_CQE_SC_INVALID_NUM_SGL	(0x0e << 1)
228 #define  NVME_CQE_SC_DATA_SGL_LEN	(0x0f << 1)
229 #define  NVME_CQE_SC_MDATA_SGL_LEN	(0x10 << 1)
230 #define  NVME_CQE_SC_SGL_TYPE_INVALID	(0x11 << 1)
231 #define  NVME_CQE_SC_LBA_RANGE		(0x80 << 1)
232 #define  NVME_CQE_SC_CAP_EXCEEDED	(0x81 << 1)
233 #define  NVME_CQE_NS_NOT_RDY		(0x82 << 1)
234 #define  NVME_CQE_RSV_CONFLICT		(0x83 << 1)
235 #define NVME_CQE_PHASE		(1 << 0)
236 } __packed __aligned(8);
237 
238 #define NVM_ADMIN_DEL_IOSQ	0x00 /* Delete I/O Submission Queue */
239 #define NVM_ADMIN_ADD_IOSQ	0x01 /* Create I/O Submission Queue */
240 #define NVM_ADMIN_GET_LOG_PG	0x02 /* Get Log Page */
241 #define NVM_ADMIN_DEL_IOCQ	0x04 /* Delete I/O Completion Queue */
242 #define NVM_ADMIN_ADD_IOCQ	0x05 /* Create I/O Completion Queue */
243 #define NVM_ADMIN_IDENTIFY	0x06 /* Identify */
244 #define NVM_ADMIN_ABORT		0x08 /* Abort */
245 #define NVM_ADMIN_SET_FEATURES	0x09 /* Set Features */
246 #define NVM_ADMIN_GET_FEATURES	0x0a /* Get Features */
247 #define NVM_ADMIN_ASYNC_EV_REQ	0x0c /* Asynchronous Event Request */
248 #define NVM_ADMIN_FW_ACTIVATE	0x10 /* Firmware Activate */
249 #define NVM_ADMIN_FW_DOWNLOAD	0x11 /* Firmware Image Download */
250 #define NVM_ADMIN_SELFTEST	0x14 /* Start self test */
251 
252 #define NVM_CMD_FLUSH		0x00 /* Flush */
253 #define NVM_CMD_WRITE		0x01 /* Write */
254 #define NVM_CMD_READ		0x02 /* Read */
255 #define NVM_CMD_WR_UNCOR	0x04 /* Write Uncorrectable */
256 #define NVM_CMD_COMPARE		0x05 /* Compare */
257 #define NVM_CMD_DSM		0x09 /* Dataset Management */
258 
259 /* Power State Descriptor Data */
260 struct nvm_identify_psd {
261 	u_int16_t	mp;		/* Max Power */
262 	u_int16_t	flags;
263 
264 	u_int32_t	enlat;		/* Entry Latency */
265 
266 	u_int32_t	exlat;		/* Exit Latency */
267 
268 	u_int8_t	rrt;		/* Relative Read Throughput */
269 	u_int8_t	rrl;		/* Relative Read Latency */
270 	u_int8_t	rwt;		/* Relative Write Throughput */
271 	u_int8_t	rwl;		/* Relative Write Latency */
272 
273 	u_int8_t	_reserved[16];
274 } __packed __aligned(8);
275 
276 struct nvm_identify_controller {
277 	/* Controller Capabilities and Features */
278 
279 	u_int16_t	vid;		/* PCI Vendor ID */
280 	u_int16_t	ssvid;		/* PCI Subsystem Vendor ID */
281 
282 	u_int8_t	sn[20];		/* Serial Number */
283 	u_int8_t	mn[40];		/* Model Number */
284 	u_int8_t	fr[8];		/* Firmware Revision */
285 
286 	u_int8_t	rab;		/* Recommended Arbitration Burst */
287 	u_int8_t	ieee[3];	/* IEEE OUI Identifier */
288 
289 	u_int8_t	cmic;		/* Controller Multi-Path I/O and
290 					   Namespace Sharing Capabilities */
291 	u_int8_t	mdts;		/* Maximum Data Transfer Size */
292 	u_int16_t	cntlid;		/* Controller ID */
293 
294 	u_int8_t	_reserved1[16];
295 	u_int32_t	ctratt;
296 #define NVM_ID_CTRL_CTRATT_FMT			"\020" \
297 	"\016DELEG" "\017DEVNVM" "\020ELBAS" "\005ENDURGRPS" \
298 	"\014FIXCAPMGMT" "\001HOSTID" "\013MDS" "\002NOPSPM" \
299 	"\010NSGRAN" "\003NVMSETS" "\006PREDLATENCY" "\004READRCVRY" \
300 	"\011SQASSOC" "\007TBKAS" "\012UUIDLIST" "\015VARCAPMGMT"
301 
302 	u_int8_t	_reserved9[156];
303 
304 	/* Admin Command Set Attributes & Optional Controller Capabilities */
305 
306 	u_int16_t	oacs;		/* Optional Admin Command Support */
307 #define NVM_ID_CTRL_OACS_FMT			"\020" \
308 	"\013CAFL" "\011DBBC" "\006DIREC" "\005DST" "\012GLBAS" \
309 	"\002FORMAT" "\003FWCD" "\007MISR" "\004NSMGMT" "\001SECSR" \
310 	"\010VM"
311 
312 	u_int8_t	acl;		/* Abort Command Limit */
313 	u_int8_t	aerl;		/* Asynchronous Event Request Limit */
314 
315 	u_int8_t	frmw;		/* Firmware Updates */
316 	u_int8_t	lpa;		/* Log Page Attributes */
317 #define NVM_ID_CTRL_LPA_PE		(1 << 4)
318 	u_int8_t	elpe;		/* Error Log Page Entries */
319 	u_int8_t	npss;		/* Number of Power States Support */
320 
321 	u_int8_t	avscc;		/* Admin Vendor Specific Command
322 					   Configuration */
323 	u_int8_t	apsta;		/* Autonomous Power State Transition
324 					   Attributes */
325 
326 	u_int8_t	_reserved2[62];
327 	u_int32_t	sanicap;
328 #define NVM_ID_CTRL_SANICAP_FMT			"\020" \
329 	"\002BlockErase" "\001CryptoErase" "\003Overwrite"
330 	u_int8_t	_reserved10[180];
331 
332 	/* NVM Command Set Attributes */
333 
334 	u_int8_t	sqes;		/* Submission Queue Entry Size */
335 	u_int8_t	cqes;		/* Completion Queue Entry Size */
336 	u_int8_t	_reserved3[2];
337 
338 	u_int32_t	nn;		/* Number of Namespaces */
339 
340 	u_int16_t	oncs;		/* Optional NVM Command Support */
341 #define NVM_ID_CTRL_ONCS_FMT			"\020" \
342 	"\006RSV" "\001SCMP" "\011SCPY" "\003SDMGMT" "\005SF" \
343 	"\010SV" "\002SWU" "\004SWZ" "\007TS"
344 
345 	u_int16_t	fuses;		/* Fused Operation Support */
346 
347 	u_int8_t	fna;		/* Format NVM Attributes */
348 #define NVM_ID_CTRL_FNA_CRYPTOFORMAT		(1 << 2)
349 	u_int8_t	vwc;		/* Volatile Write Cache */
350 #define NVM_ID_CTRL_VWC_PRESENT			(1 << 0)
351 	u_int16_t	awun;		/* Atomic Write Unit Normal */
352 
353 	u_int16_t	awupf;		/* Atomic Write Unit Power Fail */
354 	u_int8_t	nvscc;		/* NVM Vendor Specific Command */
355 	u_int8_t	_reserved4[1];
356 
357 	u_int16_t	acwu;		/* Atomic Compare & Write Unit */
358 	u_int8_t	_reserved5[2];
359 
360 	u_int32_t	sgls;		/* SGL Support */
361 
362 	u_int8_t	_reserved6[164];
363 
364 	/* I/O Command Set Attributes */
365 
366 	u_int8_t	_reserved7[1344];
367 
368 	/* Power State Descriptors */
369 
370 	struct nvm_identify_psd psd[32]; /* Power State Descriptors */
371 
372 	/* Vendor Specific */
373 
374 	u_int8_t	_reserved8[1024];
375 } __packed __aligned(8);
376 
377 struct nvm_namespace_format {
378 	u_int16_t	ms;		/* Metadata Size */
379 	u_int8_t	lbads;		/* LBA Data Size */
380 	u_int8_t	rp;		/* Relative Performance */
381 } __packed __aligned(4);
382 
383 struct nvm_identify_namespace {
384 	u_int64_t	nsze;		/* Namespace Size */
385 
386 	u_int64_t	ncap;		/* Namespace Capacity */
387 
388 	u_int64_t	nuse;		/* Namespace Utilization */
389 
390 	u_int8_t	nsfeat;		/* Namespace Features */
391 #define	NVME_ID_NS_NSFEAT_THIN_PROV	(1 << 0)
392 #define NVME_ID_NS_NSFEAT_FMT		"\020" \
393 	"\002NSABP" "\005OPTPERF" "\001THIN_PROV" "\004UIDREUSE" "\003DAE"
394 
395 	u_int8_t	nlbaf;		/* Number of LBA Formats */
396 	u_int8_t	flbas;		/* Formatted LBA Size */
397 #define NVME_ID_NS_FLBAS(_f)		((_f) & 0x0f)
398 #define NVME_ID_NS_FLBAS_MD		0x10
399 	u_int8_t	mc;		/* Metadata Capabilities */
400 
401 	u_int8_t	dpc;		/* End-to-end Data Protection
402 					   Capabilities */
403 	u_int8_t	dps;		/* End-to-end Data Protection Type Settings */
404 #define NVME_ID_NS_DPS_PIP		(1 << 3)
405 #define NVME_ID_NS_DPS_TYPE(_f)		((_f) & 0x7)
406 
407 	u_int8_t	_reserved1[74];
408 	uint8_t		nguid[16];
409 	uint8_t		eui64[8];	/* BIG-endian */
410 
411 	struct nvm_namespace_format
412 			lbaf[16];	/* LBA Format Support */
413 
414 	u_int8_t	_reserved2[192];
415 
416 	u_int8_t	vs[3712];
417 } __packed __aligned(8);
418