xref: /freebsd/sys/dev/nvme/nvme_private.h (revision 780fb4a2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
33 
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/systm.h>
43 #include <sys/taskqueue.h>
44 
45 #include <vm/uma.h>
46 
47 #include <machine/bus.h>
48 
49 #include "nvme.h"
50 
51 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
52 
53 MALLOC_DECLARE(M_NVME);
54 
55 #define IDT32_PCI_ID		0x80d0111d /* 32 channel board */
56 #define IDT8_PCI_ID		0x80d2111d /* 8 channel board */
57 
58 /*
59  * For commands requiring more than 2 PRP entries, one PRP will be
60  *  embedded in the command (prp1), and the rest of the PRP entries
61  *  will be in a list pointed to by the command (prp2).  This means
62  *  that real max number of PRP entries we support is 32+1, which
63  *  results in a max xfer size of 32*PAGE_SIZE.
64  */
65 #define NVME_MAX_PRP_LIST_ENTRIES	(NVME_MAX_XFER_SIZE / PAGE_SIZE)
66 
67 #define NVME_ADMIN_TRACKERS	(16)
68 #define NVME_ADMIN_ENTRIES	(128)
69 /* min and max are defined in admin queue attributes section of spec */
70 #define NVME_MIN_ADMIN_ENTRIES	(2)
71 #define NVME_MAX_ADMIN_ENTRIES	(4096)
72 
73 /*
74  * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
75  *  queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
76  *  will allow outstanding on an I/O qpair at any time.  The only advantage in
77  *  having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
78  *  the contents of the submission and completion queues, it will show a longer
79  *  history of data.
80  */
81 #define NVME_IO_ENTRIES		(256)
82 #define NVME_IO_TRACKERS	(128)
83 #define NVME_MIN_IO_TRACKERS	(4)
84 #define NVME_MAX_IO_TRACKERS	(1024)
85 
86 /*
87  * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
88  *  for each controller.
89  */
90 
91 #define NVME_INT_COAL_TIME	(0)	/* disabled */
92 #define NVME_INT_COAL_THRESHOLD (0)	/* 0-based */
93 
94 #define NVME_MAX_NAMESPACES	(16)
95 #define NVME_MAX_CONSUMERS	(2)
96 #define NVME_MAX_ASYNC_EVENTS	(8)
97 
98 #define NVME_DEFAULT_TIMEOUT_PERIOD	(30)    /* in seconds */
99 #define NVME_MIN_TIMEOUT_PERIOD		(5)
100 #define NVME_MAX_TIMEOUT_PERIOD		(120)
101 
102 #define NVME_DEFAULT_RETRY_COUNT	(4)
103 
104 /* Maximum log page size to fetch for AERs. */
105 #define NVME_MAX_AER_LOG_SIZE		(4096)
106 
107 /*
108  * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
109  *  it.
110  */
111 #ifndef CACHE_LINE_SIZE
112 #define CACHE_LINE_SIZE		(64)
113 #endif
114 
115 /*
116  * Use presence of the BIO_UNMAPPED flag to determine whether unmapped I/O
117  *  support and the bus_dmamap_load_bio API are available on the target
118  *  kernel.  This will ease porting back to earlier stable branches at a
119  *  later point.
120  */
121 #ifdef BIO_UNMAPPED
122 #define NVME_UNMAPPED_BIO_SUPPORT
123 #endif
124 
125 extern uma_zone_t	nvme_request_zone;
126 extern int32_t		nvme_retry_count;
127 
128 struct nvme_completion_poll_status {
129 
130 	struct nvme_completion	cpl;
131 	int			done;
132 };
133 
134 #define NVME_REQUEST_VADDR	1
135 #define NVME_REQUEST_NULL	2 /* For requests with no payload. */
136 #define NVME_REQUEST_UIO	3
137 #ifdef NVME_UNMAPPED_BIO_SUPPORT
138 #define NVME_REQUEST_BIO	4
139 #endif
140 #define NVME_REQUEST_CCB        5
141 
142 struct nvme_request {
143 
144 	struct nvme_command		cmd;
145 	struct nvme_qpair		*qpair;
146 	union {
147 		void			*payload;
148 		struct bio		*bio;
149 	} u;
150 	uint32_t			type;
151 	uint32_t			payload_size;
152 	boolean_t			timeout;
153 	nvme_cb_fn_t			cb_fn;
154 	void				*cb_arg;
155 	int32_t				retries;
156 	STAILQ_ENTRY(nvme_request)	stailq;
157 };
158 
159 struct nvme_async_event_request {
160 
161 	struct nvme_controller		*ctrlr;
162 	struct nvme_request		*req;
163 	struct nvme_completion		cpl;
164 	uint32_t			log_page_id;
165 	uint32_t			log_page_size;
166 	uint8_t				log_page_buffer[NVME_MAX_AER_LOG_SIZE];
167 };
168 
169 struct nvme_tracker {
170 
171 	TAILQ_ENTRY(nvme_tracker)	tailq;
172 	struct nvme_request		*req;
173 	struct nvme_qpair		*qpair;
174 	struct callout			timer;
175 	bus_dmamap_t			payload_dma_map;
176 	uint16_t			cid;
177 
178 	uint64_t			*prp;
179 	bus_addr_t			prp_bus_addr;
180 };
181 
182 struct nvme_qpair {
183 
184 	struct nvme_controller	*ctrlr;
185 	uint32_t		id;
186 	uint32_t		phase;
187 
188 	uint16_t		vector;
189 	int			rid;
190 	struct resource		*res;
191 	void 			*tag;
192 
193 	uint32_t		num_entries;
194 	uint32_t		num_trackers;
195 	uint32_t		sq_tdbl_off;
196 	uint32_t		cq_hdbl_off;
197 
198 	uint32_t		sq_head;
199 	uint32_t		sq_tail;
200 	uint32_t		cq_head;
201 
202 	int64_t			num_cmds;
203 	int64_t			num_intr_handler_calls;
204 
205 	struct nvme_command	*cmd;
206 	struct nvme_completion	*cpl;
207 
208 	bus_dma_tag_t		dma_tag;
209 	bus_dma_tag_t		dma_tag_payload;
210 
211 	bus_dmamap_t		queuemem_map;
212 	uint64_t		cmd_bus_addr;
213 	uint64_t		cpl_bus_addr;
214 
215 	TAILQ_HEAD(, nvme_tracker)	free_tr;
216 	TAILQ_HEAD(, nvme_tracker)	outstanding_tr;
217 	STAILQ_HEAD(, nvme_request)	queued_req;
218 
219 	struct nvme_tracker	**act_tr;
220 
221 	boolean_t		is_enabled;
222 
223 	struct mtx		lock __aligned(CACHE_LINE_SIZE);
224 
225 } __aligned(CACHE_LINE_SIZE);
226 
227 struct nvme_namespace {
228 
229 	struct nvme_controller		*ctrlr;
230 	struct nvme_namespace_data	data;
231 	uint32_t			id;
232 	uint32_t			flags;
233 	struct cdev			*cdev;
234 	void				*cons_cookie[NVME_MAX_CONSUMERS];
235 	uint32_t			stripesize;
236 	struct mtx			lock;
237 };
238 
239 /*
240  * One of these per allocated PCI device.
241  */
242 struct nvme_controller {
243 
244 	device_t		dev;
245 
246 	struct mtx		lock;
247 
248 	uint32_t		ready_timeout_in_ms;
249 	uint32_t		quirks;
250 #define QUIRK_DELAY_B4_CHK_RDY 1		/* Can't touch MMIO on disable */
251 
252 	bus_space_tag_t		bus_tag;
253 	bus_space_handle_t	bus_handle;
254 	int			resource_id;
255 	struct resource		*resource;
256 
257 	/*
258 	 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
259 	 *  separate from the control registers which are in BAR 0/1.  These
260 	 *  members track the mapping of BAR 4/5 for that reason.
261 	 */
262 	int			bar4_resource_id;
263 	struct resource		*bar4_resource;
264 
265 	uint32_t		msix_enabled;
266 	uint32_t		force_intx;
267 	uint32_t		enable_aborts;
268 
269 	uint32_t		num_io_queues;
270 	uint32_t		num_cpus_per_ioq;
271 	uint32_t		max_hw_pend_io;
272 
273 	/* Fields for tracking progress during controller initialization. */
274 	struct intr_config_hook	config_hook;
275 	uint32_t		ns_identified;
276 	uint32_t		queues_created;
277 
278 	struct task		reset_task;
279 	struct task		fail_req_task;
280 	struct taskqueue	*taskqueue;
281 
282 	/* For shared legacy interrupt. */
283 	int			rid;
284 	struct resource		*res;
285 	void			*tag;
286 
287 	bus_dma_tag_t		hw_desc_tag;
288 	bus_dmamap_t		hw_desc_map;
289 
290 	/** maximum i/o size in bytes */
291 	uint32_t		max_xfer_size;
292 
293 	/** minimum page size supported by this controller in bytes */
294 	uint32_t		min_page_size;
295 
296 	/** interrupt coalescing time period (in microseconds) */
297 	uint32_t		int_coal_time;
298 
299 	/** interrupt coalescing threshold */
300 	uint32_t		int_coal_threshold;
301 
302 	/** timeout period in seconds */
303 	uint32_t		timeout_period;
304 
305 	struct nvme_qpair	adminq;
306 	struct nvme_qpair	*ioq;
307 
308 	struct nvme_registers		*regs;
309 
310 	struct nvme_controller_data	cdata;
311 	struct nvme_namespace		ns[NVME_MAX_NAMESPACES];
312 
313 	struct cdev			*cdev;
314 
315 	/** bit mask of event types currently enabled for async events */
316 	uint32_t			async_event_config;
317 
318 	uint32_t			num_aers;
319 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
320 
321 	void				*cons_cookie[NVME_MAX_CONSUMERS];
322 
323 	uint32_t			is_resetting;
324 	uint32_t			is_initialized;
325 	uint32_t			notification_sent;
326 
327 	boolean_t			is_failed;
328 	STAILQ_HEAD(, nvme_request)	fail_req;
329 };
330 
331 #define nvme_mmio_offsetof(reg)						       \
332 	offsetof(struct nvme_registers, reg)
333 
334 #define nvme_mmio_read_4(sc, reg)					       \
335 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle,		       \
336 	    nvme_mmio_offsetof(reg))
337 
338 #define nvme_mmio_write_4(sc, reg, val)					       \
339 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,		       \
340 	    nvme_mmio_offsetof(reg), val)
341 
342 #define nvme_mmio_write_8(sc, reg, val)					       \
343 	do {								       \
344 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
345 		    nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); 	       \
346 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
347 		    nvme_mmio_offsetof(reg)+4,				       \
348 		    (val & 0xFFFFFFFF00000000ULL) >> 32);		       \
349 	} while (0);
350 
351 #if __FreeBSD_version < 800054
352 #define wmb()	__asm volatile("sfence" ::: "memory")
353 #define mb()	__asm volatile("mfence" ::: "memory")
354 #endif
355 
356 #define nvme_printf(ctrlr, fmt, args...)	\
357     device_printf(ctrlr->dev, fmt, ##args)
358 
359 void	nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
360 
361 void	nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
362 					   void *payload,
363 					   nvme_cb_fn_t cb_fn, void *cb_arg);
364 void	nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
365 					  uint32_t nsid, void *payload,
366 					  nvme_cb_fn_t cb_fn, void *cb_arg);
367 void	nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
368 						uint32_t microseconds,
369 						uint32_t threshold,
370 						nvme_cb_fn_t cb_fn,
371 						void *cb_arg);
372 void	nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
373 				      struct nvme_error_information_entry *payload,
374 				      uint32_t num_entries, /* 0 = max */
375 				      nvme_cb_fn_t cb_fn,
376 				      void *cb_arg);
377 void	nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
378 						   uint32_t nsid,
379 						   struct nvme_health_information_page *payload,
380 						   nvme_cb_fn_t cb_fn,
381 						   void *cb_arg);
382 void	nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
383 					 struct nvme_firmware_page *payload,
384 					 nvme_cb_fn_t cb_fn,
385 					 void *cb_arg);
386 void	nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
387 				    struct nvme_qpair *io_que, uint16_t vector,
388 				    nvme_cb_fn_t cb_fn, void *cb_arg);
389 void	nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
390 				    struct nvme_qpair *io_que,
391 				    nvme_cb_fn_t cb_fn, void *cb_arg);
392 void	nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
393 				    struct nvme_qpair *io_que,
394 				    nvme_cb_fn_t cb_fn, void *cb_arg);
395 void	nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
396 				    struct nvme_qpair *io_que,
397 				    nvme_cb_fn_t cb_fn, void *cb_arg);
398 void	nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
399 				      uint32_t num_queues, nvme_cb_fn_t cb_fn,
400 				      void *cb_arg);
401 void	nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
402 					      uint32_t state,
403 					      nvme_cb_fn_t cb_fn, void *cb_arg);
404 void	nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
405 			     uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
406 
407 void	nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
408 
409 int	nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
410 void	nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
411 void	nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
412 int	nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
413 void	nvme_ctrlr_reset(struct nvme_controller *ctrlr);
414 /* ctrlr defined as void * to allow use with config_intrhook. */
415 void	nvme_ctrlr_start_config_hook(void *ctrlr_arg);
416 void	nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
417 					struct nvme_request *req);
418 void	nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
419 				     struct nvme_request *req);
420 void	nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
421 				       struct nvme_request *req);
422 
423 int	nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
424 			     uint16_t vector, uint32_t num_entries,
425 			     uint32_t num_trackers,
426 			     struct nvme_controller *ctrlr);
427 void	nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
428 				  struct nvme_tracker *tr);
429 bool	nvme_qpair_process_completions(struct nvme_qpair *qpair);
430 void	nvme_qpair_submit_request(struct nvme_qpair *qpair,
431 				  struct nvme_request *req);
432 void	nvme_qpair_reset(struct nvme_qpair *qpair);
433 void	nvme_qpair_fail(struct nvme_qpair *qpair);
434 void	nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
435 					   struct nvme_request *req,
436 					   uint32_t sct, uint32_t sc,
437 					   boolean_t print_on_error);
438 
439 void	nvme_admin_qpair_enable(struct nvme_qpair *qpair);
440 void	nvme_admin_qpair_disable(struct nvme_qpair *qpair);
441 void	nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
442 
443 void	nvme_io_qpair_enable(struct nvme_qpair *qpair);
444 void	nvme_io_qpair_disable(struct nvme_qpair *qpair);
445 void	nvme_io_qpair_destroy(struct nvme_qpair *qpair);
446 
447 int	nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
448 			  struct nvme_controller *ctrlr);
449 void	nvme_ns_destruct(struct nvme_namespace *ns);
450 
451 void	nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
452 
453 void	nvme_dump_command(struct nvme_command *cmd);
454 void	nvme_dump_completion(struct nvme_completion *cpl);
455 
456 static __inline void
457 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
458 {
459 	uint64_t *bus_addr = (uint64_t *)arg;
460 
461 	if (error != 0)
462 		printf("nvme_single_map err %d\n", error);
463 	*bus_addr = seg[0].ds_addr;
464 }
465 
466 static __inline struct nvme_request *
467 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
468 {
469 	struct nvme_request *req;
470 
471 	req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
472 	if (req != NULL) {
473 		req->cb_fn = cb_fn;
474 		req->cb_arg = cb_arg;
475 		req->timeout = TRUE;
476 	}
477 	return (req);
478 }
479 
480 static __inline struct nvme_request *
481 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
482     nvme_cb_fn_t cb_fn, void *cb_arg)
483 {
484 	struct nvme_request *req;
485 
486 	req = _nvme_allocate_request(cb_fn, cb_arg);
487 	if (req != NULL) {
488 		req->type = NVME_REQUEST_VADDR;
489 		req->u.payload = payload;
490 		req->payload_size = payload_size;
491 	}
492 	return (req);
493 }
494 
495 static __inline struct nvme_request *
496 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
497 {
498 	struct nvme_request *req;
499 
500 	req = _nvme_allocate_request(cb_fn, cb_arg);
501 	if (req != NULL)
502 		req->type = NVME_REQUEST_NULL;
503 	return (req);
504 }
505 
506 static __inline struct nvme_request *
507 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
508 {
509 	struct nvme_request *req;
510 
511 	req = _nvme_allocate_request(cb_fn, cb_arg);
512 	if (req != NULL) {
513 #ifdef NVME_UNMAPPED_BIO_SUPPORT
514 		req->type = NVME_REQUEST_BIO;
515 		req->u.bio = bio;
516 #else
517 		req->type = NVME_REQUEST_VADDR;
518 		req->u.payload = bio->bio_data;
519 		req->payload_size = bio->bio_bcount;
520 #endif
521 	}
522 	return (req);
523 }
524 
525 static __inline struct nvme_request *
526 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
527 {
528 	struct nvme_request *req;
529 
530 	req = _nvme_allocate_request(cb_fn, cb_arg);
531 	if (req != NULL) {
532 		req->type = NVME_REQUEST_CCB;
533 		req->u.payload = ccb;
534 	}
535 
536 	return (req);
537 }
538 
539 #define nvme_free_request(req)	uma_zfree(nvme_request_zone, req)
540 
541 void	nvme_notify_async_consumers(struct nvme_controller *ctrlr,
542 				    const struct nvme_completion *async_cpl,
543 				    uint32_t log_page_id, void *log_page_buffer,
544 				    uint32_t log_page_size);
545 void	nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
546 void	nvme_notify_new_controller(struct nvme_controller *ctrlr);
547 void	nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
548 
549 void	nvme_ctrlr_intx_handler(void *arg);
550 void	nvme_ctrlr_poll(struct nvme_controller *ctrlr);
551 
552 #endif /* __NVME_PRIVATE_H__ */
553