xref: /freebsd/sys/dev/nvme/nvme_private.h (revision 4d846d26)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
33 
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/systm.h>
44 #include <sys/taskqueue.h>
45 
46 #include <vm/uma.h>
47 
48 #include <machine/bus.h>
49 
50 #include "nvme.h"
51 
52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
53 
54 MALLOC_DECLARE(M_NVME);
55 
56 #define IDT32_PCI_ID		0x80d0111d /* 32 channel board */
57 #define IDT8_PCI_ID		0x80d2111d /* 8 channel board */
58 
59 #define NVME_ADMIN_TRACKERS	(16)
60 #define NVME_ADMIN_ENTRIES	(128)
61 /* min and max are defined in admin queue attributes section of spec */
62 #define NVME_MIN_ADMIN_ENTRIES	(2)
63 #define NVME_MAX_ADMIN_ENTRIES	(4096)
64 
65 /*
66  * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
67  *  queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
68  *  will allow outstanding on an I/O qpair at any time.  The only advantage in
69  *  having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
70  *  the contents of the submission and completion queues, it will show a longer
71  *  history of data.
72  */
73 #define NVME_IO_ENTRIES		(256)
74 #define NVME_IO_TRACKERS	(128)
75 #define NVME_MIN_IO_TRACKERS	(4)
76 #define NVME_MAX_IO_TRACKERS	(1024)
77 
78 /*
79  * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
80  *  for each controller.
81  */
82 
83 #define NVME_INT_COAL_TIME	(0)	/* disabled */
84 #define NVME_INT_COAL_THRESHOLD (0)	/* 0-based */
85 
86 #define NVME_MAX_NAMESPACES	(16)
87 #define NVME_MAX_CONSUMERS	(2)
88 #define NVME_MAX_ASYNC_EVENTS	(8)
89 
90 #define NVME_DEFAULT_TIMEOUT_PERIOD	(30)    /* in seconds */
91 #define NVME_MIN_TIMEOUT_PERIOD		(5)
92 #define NVME_MAX_TIMEOUT_PERIOD		(120)
93 
94 #define NVME_DEFAULT_RETRY_COUNT	(4)
95 
96 /* Maximum log page size to fetch for AERs. */
97 #define NVME_MAX_AER_LOG_SIZE		(4096)
98 
99 /*
100  * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
101  *  it.
102  */
103 #ifndef CACHE_LINE_SIZE
104 #define CACHE_LINE_SIZE		(64)
105 #endif
106 
107 #define NVME_GONE		0xfffffffful
108 
109 extern int32_t		nvme_retry_count;
110 extern bool		nvme_verbose_cmd_dump;
111 
112 struct nvme_completion_poll_status {
113 	struct nvme_completion	cpl;
114 	int			done;
115 };
116 
117 #define NVME_REQUEST_VADDR	1
118 #define NVME_REQUEST_NULL	2 /* For requests with no payload. */
119 #define NVME_REQUEST_UIO	3
120 #define NVME_REQUEST_BIO	4
121 #define NVME_REQUEST_CCB        5
122 
123 struct nvme_request {
124 	struct nvme_command		cmd;
125 	struct nvme_qpair		*qpair;
126 	union {
127 		void			*payload;
128 		struct bio		*bio;
129 	} u;
130 	uint32_t			type;
131 	uint32_t			payload_size;
132 	bool				timeout;
133 	nvme_cb_fn_t			cb_fn;
134 	void				*cb_arg;
135 	int32_t				retries;
136 	STAILQ_ENTRY(nvme_request)	stailq;
137 };
138 
139 struct nvme_async_event_request {
140 	struct nvme_controller		*ctrlr;
141 	struct nvme_request		*req;
142 	struct nvme_completion		cpl;
143 	uint32_t			log_page_id;
144 	uint32_t			log_page_size;
145 	uint8_t				log_page_buffer[NVME_MAX_AER_LOG_SIZE];
146 };
147 
148 struct nvme_tracker {
149 	TAILQ_ENTRY(nvme_tracker)	tailq;
150 	struct nvme_request		*req;
151 	struct nvme_qpair		*qpair;
152 	sbintime_t			deadline;
153 	bus_dmamap_t			payload_dma_map;
154 	uint16_t			cid;
155 
156 	uint64_t			*prp;
157 	bus_addr_t			prp_bus_addr;
158 };
159 
160 enum nvme_recovery {
161 	RECOVERY_NONE = 0,		/* Normal operations */
162 	RECOVERY_START,			/* Deadline has passed, start recovering */
163 	RECOVERY_RESET,			/* This pass, initiate reset of controller */
164 	RECOVERY_WAITING,		/* waiting for the reset to complete */
165 };
166 struct nvme_qpair {
167 	struct nvme_controller	*ctrlr;
168 	uint32_t		id;
169 	int			domain;
170 	int			cpu;
171 
172 	uint16_t		vector;
173 	int			rid;
174 	struct resource		*res;
175 	void 			*tag;
176 
177 	struct callout		timer;
178 	sbintime_t		deadline;
179 	bool			timer_armed;
180 	enum nvme_recovery	recovery_state;
181 
182 	uint32_t		num_entries;
183 	uint32_t		num_trackers;
184 	uint32_t		sq_tdbl_off;
185 	uint32_t		cq_hdbl_off;
186 
187 	uint32_t		phase;
188 	uint32_t		sq_head;
189 	uint32_t		sq_tail;
190 	uint32_t		cq_head;
191 
192 	int64_t			num_cmds;
193 	int64_t			num_intr_handler_calls;
194 	int64_t			num_retries;
195 	int64_t			num_failures;
196 	int64_t			num_ignored;
197 
198 	struct nvme_command	*cmd;
199 	struct nvme_completion	*cpl;
200 
201 	bus_dma_tag_t		dma_tag;
202 	bus_dma_tag_t		dma_tag_payload;
203 
204 	bus_dmamap_t		queuemem_map;
205 	uint64_t		cmd_bus_addr;
206 	uint64_t		cpl_bus_addr;
207 
208 	TAILQ_HEAD(, nvme_tracker)	free_tr;
209 	TAILQ_HEAD(, nvme_tracker)	outstanding_tr;
210 	STAILQ_HEAD(, nvme_request)	queued_req;
211 
212 	struct nvme_tracker	**act_tr;
213 
214 	struct mtx		lock __aligned(CACHE_LINE_SIZE);
215 
216 } __aligned(CACHE_LINE_SIZE);
217 
218 struct nvme_namespace {
219 	struct nvme_controller		*ctrlr;
220 	struct nvme_namespace_data	data;
221 	uint32_t			id;
222 	uint32_t			flags;
223 	struct cdev			*cdev;
224 	void				*cons_cookie[NVME_MAX_CONSUMERS];
225 	uint32_t			boundary;
226 	struct mtx			lock;
227 };
228 
229 /*
230  * One of these per allocated PCI device.
231  */
232 struct nvme_controller {
233 	device_t		dev;
234 
235 	struct mtx		lock;
236 	int			domain;
237 	uint32_t		ready_timeout_in_ms;
238 	uint32_t		quirks;
239 #define	QUIRK_DELAY_B4_CHK_RDY	1		/* Can't touch MMIO on disable */
240 #define	QUIRK_DISABLE_TIMEOUT	2		/* Disable broken completion timeout feature */
241 #define	QUIRK_INTEL_ALIGNMENT	4		/* Pre NVMe 1.3 performance alignment */
242 #define QUIRK_AHCI		8		/* Attached via AHCI redirect */
243 
244 	bus_space_tag_t		bus_tag;
245 	bus_space_handle_t	bus_handle;
246 	int			resource_id;
247 	struct resource		*resource;
248 
249 	/*
250 	 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
251 	 *  separate from the control registers which are in BAR 0/1.  These
252 	 *  members track the mapping of BAR 4/5 for that reason.
253 	 */
254 	int			bar4_resource_id;
255 	struct resource		*bar4_resource;
256 
257 	int			msi_count;
258 	uint32_t		enable_aborts;
259 
260 	uint32_t		num_io_queues;
261 	uint32_t		max_hw_pend_io;
262 
263 	/* Fields for tracking progress during controller initialization. */
264 	struct intr_config_hook	config_hook;
265 	uint32_t		ns_identified;
266 	uint32_t		queues_created;
267 
268 	struct task		reset_task;
269 	struct task		fail_req_task;
270 	struct taskqueue	*taskqueue;
271 
272 	/* For shared legacy interrupt. */
273 	int			rid;
274 	struct resource		*res;
275 	void			*tag;
276 
277 	/** maximum i/o size in bytes */
278 	uint32_t		max_xfer_size;
279 
280 	/** LO and HI capacity mask */
281 	uint32_t		cap_lo;
282 	uint32_t		cap_hi;
283 
284 	/** Page size and log2(page_size) - 12 that we're currently using */
285 	uint32_t		page_size;
286 	uint32_t		mps;
287 
288 	/** interrupt coalescing time period (in microseconds) */
289 	uint32_t		int_coal_time;
290 
291 	/** interrupt coalescing threshold */
292 	uint32_t		int_coal_threshold;
293 
294 	/** timeout period in seconds */
295 	uint32_t		timeout_period;
296 
297 	/** doorbell stride */
298 	uint32_t		dstrd;
299 
300 	struct nvme_qpair	adminq;
301 	struct nvme_qpair	*ioq;
302 
303 	struct nvme_registers		*regs;
304 
305 	struct nvme_controller_data	cdata;
306 	struct nvme_namespace		ns[NVME_MAX_NAMESPACES];
307 
308 	struct cdev			*cdev;
309 
310 	/** bit mask of event types currently enabled for async events */
311 	uint32_t			async_event_config;
312 
313 	uint32_t			num_aers;
314 	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
315 
316 	void				*cons_cookie[NVME_MAX_CONSUMERS];
317 
318 	uint32_t			is_resetting;
319 	uint32_t			is_initialized;
320 	uint32_t			notification_sent;
321 
322 	bool				is_failed;
323 	bool				is_dying;
324 	STAILQ_HEAD(, nvme_request)	fail_req;
325 
326 	/* Host Memory Buffer */
327 	int				hmb_nchunks;
328 	size_t				hmb_chunk;
329 	bus_dma_tag_t			hmb_tag;
330 	struct nvme_hmb_chunk {
331 		bus_dmamap_t		hmbc_map;
332 		void			*hmbc_vaddr;
333 		uint64_t		hmbc_paddr;
334 	} *hmb_chunks;
335 	bus_dma_tag_t			hmb_desc_tag;
336 	bus_dmamap_t			hmb_desc_map;
337 	struct nvme_hmb_desc		*hmb_desc_vaddr;
338 	uint64_t			hmb_desc_paddr;
339 };
340 
341 #define nvme_mmio_offsetof(reg)						       \
342 	offsetof(struct nvme_registers, reg)
343 
344 #define nvme_mmio_read_4(sc, reg)					       \
345 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle,		       \
346 	    nvme_mmio_offsetof(reg))
347 
348 #define nvme_mmio_write_4(sc, reg, val)					       \
349 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,		       \
350 	    nvme_mmio_offsetof(reg), val)
351 
352 #define nvme_mmio_write_8(sc, reg, val)					       \
353 	do {								       \
354 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
355 		    nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); 	       \
356 		bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,	       \
357 		    nvme_mmio_offsetof(reg)+4,				       \
358 		    (val & 0xFFFFFFFF00000000ULL) >> 32);		       \
359 	} while (0);
360 
361 #define nvme_printf(ctrlr, fmt, args...)	\
362     device_printf(ctrlr->dev, fmt, ##args)
363 
364 void	nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
365 
366 void	nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
367 					   void *payload,
368 					   nvme_cb_fn_t cb_fn, void *cb_arg);
369 void	nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
370 					  uint32_t nsid, void *payload,
371 					  nvme_cb_fn_t cb_fn, void *cb_arg);
372 void	nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
373 						uint32_t microseconds,
374 						uint32_t threshold,
375 						nvme_cb_fn_t cb_fn,
376 						void *cb_arg);
377 void	nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
378 				      struct nvme_error_information_entry *payload,
379 				      uint32_t num_entries, /* 0 = max */
380 				      nvme_cb_fn_t cb_fn,
381 				      void *cb_arg);
382 void	nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
383 						   uint32_t nsid,
384 						   struct nvme_health_information_page *payload,
385 						   nvme_cb_fn_t cb_fn,
386 						   void *cb_arg);
387 void	nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
388 					 struct nvme_firmware_page *payload,
389 					 nvme_cb_fn_t cb_fn,
390 					 void *cb_arg);
391 void	nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
392 				    struct nvme_qpair *io_que,
393 				    nvme_cb_fn_t cb_fn, void *cb_arg);
394 void	nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
395 				    struct nvme_qpair *io_que,
396 				    nvme_cb_fn_t cb_fn, void *cb_arg);
397 void	nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
398 				    struct nvme_qpair *io_que,
399 				    nvme_cb_fn_t cb_fn, void *cb_arg);
400 void	nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
401 				    struct nvme_qpair *io_que,
402 				    nvme_cb_fn_t cb_fn, void *cb_arg);
403 void	nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
404 				      uint32_t num_queues, nvme_cb_fn_t cb_fn,
405 				      void *cb_arg);
406 void	nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
407 					      uint32_t state,
408 					      nvme_cb_fn_t cb_fn, void *cb_arg);
409 void	nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
410 			     uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
411 
412 void	nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
413 
414 int	nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
415 void	nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
416 void	nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
417 void	nvme_ctrlr_reset(struct nvme_controller *ctrlr);
418 /* ctrlr defined as void * to allow use with config_intrhook. */
419 void	nvme_ctrlr_start_config_hook(void *ctrlr_arg);
420 void	nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
421 					struct nvme_request *req);
422 void	nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
423 				     struct nvme_request *req);
424 void	nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
425 				       struct nvme_request *req);
426 
427 int	nvme_qpair_construct(struct nvme_qpair *qpair,
428 			     uint32_t num_entries, uint32_t num_trackers,
429 			     struct nvme_controller *ctrlr);
430 void	nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
431 				  struct nvme_tracker *tr);
432 bool	nvme_qpair_process_completions(struct nvme_qpair *qpair);
433 void	nvme_qpair_submit_request(struct nvme_qpair *qpair,
434 				  struct nvme_request *req);
435 void	nvme_qpair_reset(struct nvme_qpair *qpair);
436 void	nvme_qpair_fail(struct nvme_qpair *qpair);
437 void	nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
438 					   struct nvme_request *req,
439                                            uint32_t sct, uint32_t sc);
440 
441 void	nvme_admin_qpair_enable(struct nvme_qpair *qpair);
442 void	nvme_admin_qpair_disable(struct nvme_qpair *qpair);
443 void	nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
444 
445 void	nvme_io_qpair_enable(struct nvme_qpair *qpair);
446 void	nvme_io_qpair_disable(struct nvme_qpair *qpair);
447 void	nvme_io_qpair_destroy(struct nvme_qpair *qpair);
448 
449 int	nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
450 			  struct nvme_controller *ctrlr);
451 void	nvme_ns_destruct(struct nvme_namespace *ns);
452 
453 void	nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
454 
455 void	nvme_dump_command(struct nvme_command *cmd);
456 void	nvme_dump_completion(struct nvme_completion *cpl);
457 
458 int	nvme_attach(device_t dev);
459 int	nvme_shutdown(device_t dev);
460 int	nvme_detach(device_t dev);
461 
462 /*
463  * Wait for a command to complete using the nvme_completion_poll_cb.  Used in
464  * limited contexts where the caller knows it's OK to block briefly while the
465  * command runs. The ISR will run the callback which will set status->done to
466  * true, usually within microseconds. If not, then after one second timeout
467  * handler should reset the controller and abort all outstanding requests
468  * including this polled one. If still not after ten seconds, then something is
469  * wrong with the driver, and panic is the only way to recover.
470  *
471  * Most commands using this interface aren't actual I/O to the drive's media so
472  * complete within a few microseconds. Adaptively spin for one tick to catch the
473  * vast majority of these without waiting for a tick plus scheduling delays. Since
474  * these are on startup, this drastically reduces startup time.
475  */
476 static __inline
477 void
478 nvme_completion_poll(struct nvme_completion_poll_status *status)
479 {
480 	int timeout = ticks + 10 * hz;
481 	sbintime_t delta_t = SBT_1US;
482 
483 	while (!atomic_load_acq_int(&status->done)) {
484 		if (timeout - ticks < 0)
485 			panic("NVME polled command failed to complete within 10s.");
486 		pause_sbt("nvme", delta_t, 0, C_PREL(1));
487 		delta_t = min(SBT_1MS, delta_t * 3 / 2);
488 	}
489 }
490 
491 static __inline void
492 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
493 {
494 	uint64_t *bus_addr = (uint64_t *)arg;
495 
496 	KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
497 	if (error != 0)
498 		printf("nvme_single_map err %d\n", error);
499 	*bus_addr = seg[0].ds_addr;
500 }
501 
502 static __inline struct nvme_request *
503 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
504 {
505 	struct nvme_request *req;
506 
507 	req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
508 	if (req != NULL) {
509 		req->cb_fn = cb_fn;
510 		req->cb_arg = cb_arg;
511 		req->timeout = true;
512 	}
513 	return (req);
514 }
515 
516 static __inline struct nvme_request *
517 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
518     nvme_cb_fn_t cb_fn, void *cb_arg)
519 {
520 	struct nvme_request *req;
521 
522 	req = _nvme_allocate_request(cb_fn, cb_arg);
523 	if (req != NULL) {
524 		req->type = NVME_REQUEST_VADDR;
525 		req->u.payload = payload;
526 		req->payload_size = payload_size;
527 	}
528 	return (req);
529 }
530 
531 static __inline struct nvme_request *
532 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
533 {
534 	struct nvme_request *req;
535 
536 	req = _nvme_allocate_request(cb_fn, cb_arg);
537 	if (req != NULL)
538 		req->type = NVME_REQUEST_NULL;
539 	return (req);
540 }
541 
542 static __inline struct nvme_request *
543 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
544 {
545 	struct nvme_request *req;
546 
547 	req = _nvme_allocate_request(cb_fn, cb_arg);
548 	if (req != NULL) {
549 		req->type = NVME_REQUEST_BIO;
550 		req->u.bio = bio;
551 	}
552 	return (req);
553 }
554 
555 static __inline struct nvme_request *
556 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
557 {
558 	struct nvme_request *req;
559 
560 	req = _nvme_allocate_request(cb_fn, cb_arg);
561 	if (req != NULL) {
562 		req->type = NVME_REQUEST_CCB;
563 		req->u.payload = ccb;
564 	}
565 
566 	return (req);
567 }
568 
569 #define nvme_free_request(req)	free(req, M_NVME)
570 
571 void	nvme_notify_async_consumers(struct nvme_controller *ctrlr,
572 				    const struct nvme_completion *async_cpl,
573 				    uint32_t log_page_id, void *log_page_buffer,
574 				    uint32_t log_page_size);
575 void	nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
576 void	nvme_notify_new_controller(struct nvme_controller *ctrlr);
577 void	nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
578 
579 void	nvme_ctrlr_shared_handler(void *arg);
580 void	nvme_ctrlr_poll(struct nvme_controller *ctrlr);
581 
582 int	nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
583 int	nvme_ctrlr_resume(struct nvme_controller *ctrlr);
584 
585 #endif /* __NVME_PRIVATE_H__ */
586