xref: /freebsd/sys/dev/nvmf/host/nvmf_var.h (revision b985c9ca)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2023-2024 Chelsio Communications, Inc.
5  * Written by: John Baldwin <jhb@FreeBSD.org>
6  */
7 
8 #ifndef __NVMF_VAR_H__
9 #define	__NVMF_VAR_H__
10 
11 #include <sys/_callout.h>
12 #include <sys/_lock.h>
13 #include <sys/_mutex.h>
14 #include <sys/_sx.h>
15 #include <sys/_task.h>
16 #include <sys/queue.h>
17 #include <dev/nvme/nvme.h>
18 #include <dev/nvmf/nvmf_transport.h>
19 
20 struct nvmf_aer;
21 struct nvmf_capsule;
22 struct nvmf_host_qpair;
23 struct nvmf_namespace;
24 
25 typedef void nvmf_request_complete_t(void *, const struct nvme_completion *);
26 
27 struct nvmf_ivars {
28 	struct nvmf_handoff_host *hh;
29 	struct nvmf_handoff_qpair_params *io_params;
30 	struct nvme_controller_data *cdata;
31 };
32 
33 struct nvmf_softc {
34 	device_t dev;
35 
36 	struct nvmf_host_qpair *admin;
37 	struct nvmf_host_qpair **io;
38 	u_int	num_io_queues;
39 	enum nvmf_trtype trtype;
40 
41 	struct cam_sim *sim;
42 	struct cam_path *path;
43 	struct mtx sim_mtx;
44 	bool sim_disconnected;
45 
46 	struct nvmf_namespace **ns;
47 
48 	struct nvme_controller_data *cdata;
49 	uint64_t cap;
50 	uint32_t vs;
51 	u_int max_pending_io;
52 	u_long max_xfer_size;
53 
54 	struct cdev *cdev;
55 
56 	/*
57 	 * Keep Alive support depends on two timers.  The 'tx' timer
58 	 * is responsible for sending KeepAlive commands and runs at
59 	 * half the timeout interval.  The 'rx' timer is responsible
60 	 * for detecting an actual timeout.
61 	 *
62 	 * For efficient support of TKAS, the host does not reschedule
63 	 * these timers every time new commands are scheduled.
64 	 * Instead, the host sets the *_traffic flags when commands
65 	 * are sent and received.  The timeout handlers check and
66 	 * clear these flags.  This does mean it can take up to twice
67 	 * the timeout time to detect an AWOL controller.
68 	 */
69 	bool	ka_traffic;			/* Using TKAS? */
70 
71 	volatile int ka_active_tx_traffic;
72 	struct callout ka_tx_timer;
73 	sbintime_t ka_tx_sbt;
74 
75 	volatile int ka_active_rx_traffic;
76 	struct callout ka_rx_timer;
77 	sbintime_t ka_rx_sbt;
78 
79 	struct sx connection_lock;
80 	struct task disconnect_task;
81 	bool detaching;
82 
83 	u_int num_aer;
84 	struct nvmf_aer *aer;
85 };
86 
87 struct nvmf_request {
88 	struct nvmf_host_qpair *qp;
89 	struct nvmf_capsule *nc;
90 	nvmf_request_complete_t *cb;
91 	void	*cb_arg;
92 	bool	aer;
93 
94 	STAILQ_ENTRY(nvmf_request) link;
95 };
96 
97 struct nvmf_completion_status {
98 	struct nvme_completion cqe;
99 	bool	done;
100 	bool	io_done;
101 	int	io_error;
102 };
103 
104 static __inline struct nvmf_host_qpair *
105 nvmf_select_io_queue(struct nvmf_softc *sc)
106 {
107 	/* TODO: Support multiple queues? */
108 	return (sc->io[0]);
109 }
110 
111 static __inline bool
112 nvmf_cqe_aborted(const struct nvme_completion *cqe)
113 {
114 	uint16_t status;
115 
116 	status = le16toh(cqe->status);
117 	return (NVME_STATUS_GET_SCT(status) == NVME_SCT_PATH_RELATED &&
118 	    NVME_STATUS_GET_SC(status) == NVME_SC_COMMAND_ABORTED_BY_HOST);
119 }
120 
121 static __inline void
122 nvmf_status_init(struct nvmf_completion_status *status)
123 {
124 	status->done = false;
125 	status->io_done = true;
126 	status->io_error = 0;
127 }
128 
129 static __inline void
130 nvmf_status_wait_io(struct nvmf_completion_status *status)
131 {
132 	status->io_done = false;
133 }
134 
135 #ifdef DRIVER_MODULE
136 extern driver_t nvme_nvmf_driver;
137 #endif
138 
139 #ifdef MALLOC_DECLARE
140 MALLOC_DECLARE(M_NVMF);
141 #endif
142 
143 /* nvmf.c */
144 void	nvmf_complete(void *arg, const struct nvme_completion *cqe);
145 void	nvmf_io_complete(void *arg, size_t xfered, int error);
146 void	nvmf_wait_for_reply(struct nvmf_completion_status *status);
147 int	nvmf_init_ivars(struct nvmf_ivars *ivars, struct nvmf_handoff_host *hh);
148 void	nvmf_free_ivars(struct nvmf_ivars *ivars);
149 void	nvmf_disconnect(struct nvmf_softc *sc);
150 void	nvmf_rescan_ns(struct nvmf_softc *sc, uint32_t nsid);
151 int	nvmf_passthrough_cmd(struct nvmf_softc *sc, struct nvme_pt_command *pt,
152     bool admin);
153 
154 /* nvmf_aer.c */
155 void	nvmf_init_aer(struct nvmf_softc *sc);
156 int	nvmf_start_aer(struct nvmf_softc *sc);
157 void	nvmf_destroy_aer(struct nvmf_softc *sc);
158 
159 /* nvmf_cmd.c */
160 bool	nvmf_cmd_get_property(struct nvmf_softc *sc, uint32_t offset,
161     uint8_t size, nvmf_request_complete_t *cb, void *cb_arg, int how);
162 bool	nvmf_cmd_set_property(struct nvmf_softc *sc, uint32_t offset,
163     uint8_t size, uint64_t value, nvmf_request_complete_t *cb, void *cb_arg,
164     int how);
165 bool	nvmf_cmd_keep_alive(struct nvmf_softc *sc, nvmf_request_complete_t *cb,
166     void *cb_arg, int how);
167 bool	nvmf_cmd_identify_active_namespaces(struct nvmf_softc *sc, uint32_t id,
168     struct nvme_ns_list *nslist, nvmf_request_complete_t *req_cb,
169     void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
170 bool	nvmf_cmd_identify_namespace(struct nvmf_softc *sc, uint32_t id,
171     struct nvme_namespace_data *nsdata, nvmf_request_complete_t *req_cb,
172     void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
173 bool	nvmf_cmd_get_log_page(struct nvmf_softc *sc, uint32_t nsid, uint8_t lid,
174     uint64_t offset, void *buf, size_t len, nvmf_request_complete_t *req_cb,
175     void *req_cb_arg, nvmf_io_complete_t *io_cb, void *io_cb_arg, int how);
176 
177 /* nvmf_ctldev.c */
178 int	nvmf_ctl_load(void);
179 void	nvmf_ctl_unload(void);
180 
181 /* nvmf_ns.c */
182 struct nvmf_namespace *nvmf_init_ns(struct nvmf_softc *sc, uint32_t id,
183     struct nvme_namespace_data *data);
184 void	nvmf_disconnect_ns(struct nvmf_namespace *ns);
185 void	nvmf_reconnect_ns(struct nvmf_namespace *ns);
186 void	nvmf_destroy_ns(struct nvmf_namespace *ns);
187 bool	nvmf_update_ns(struct nvmf_namespace *ns,
188     struct nvme_namespace_data *data);
189 
190 /* nvmf_qpair.c */
191 struct nvmf_host_qpair *nvmf_init_qp(struct nvmf_softc *sc,
192     enum nvmf_trtype trtype, struct nvmf_handoff_qpair_params *handoff,
193     const char *name);
194 void	nvmf_shutdown_qp(struct nvmf_host_qpair *qp);
195 void	nvmf_destroy_qp(struct nvmf_host_qpair *qp);
196 struct nvmf_request *nvmf_allocate_request(struct nvmf_host_qpair *qp,
197     void *sqe, nvmf_request_complete_t *cb, void *cb_arg, int how);
198 void	nvmf_submit_request(struct nvmf_request *req);
199 void	nvmf_free_request(struct nvmf_request *req);
200 
201 /* nvmf_sim.c */
202 int	nvmf_init_sim(struct nvmf_softc *sc);
203 void	nvmf_disconnect_sim(struct nvmf_softc *sc);
204 void	nvmf_reconnect_sim(struct nvmf_softc *sc);
205 void	nvmf_destroy_sim(struct nvmf_softc *sc);
206 void	nvmf_sim_rescan_ns(struct nvmf_softc *sc, uint32_t id);
207 
208 #endif /* !__NVMF_VAR_H__ */
209