xref: /freebsd/sys/dev/irdma/irdma_verbs.h (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #ifndef IRDMA_VERBS_H
36 #define IRDMA_VERBS_H
37 
38 #define IRDMA_MAX_SAVED_PHY_PGADDR	4
39 #define IRDMA_FLUSH_DELAY_MS		20
40 
41 #define IRDMA_PKEY_TBL_SZ		1
42 #define IRDMA_DEFAULT_PKEY		0xFFFF
43 
44 #define iwdev_to_idev(iwdev)	(&(iwdev)->rf->sc_dev)
45 
46 struct irdma_ucontext {
47 	struct ib_ucontext ibucontext;
48 	struct irdma_device *iwdev;
49 #if __FreeBSD_version >= 1400026
50 	struct rdma_user_mmap_entry *db_mmap_entry;
51 #else
52 	struct irdma_user_mmap_entry *db_mmap_entry;
53 	DECLARE_HASHTABLE(mmap_hash_tbl, 6);
54 	spinlock_t mmap_tbl_lock; /* protect mmap hash table entries */
55 #endif
56 	struct list_head cq_reg_mem_list;
57 	spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
58 	struct list_head qp_reg_mem_list;
59 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
60 	/* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */
61 	struct list_head vma_list;
62 	struct mutex vma_list_mutex; /* protect the vma_list */
63 	int abi_ver;
64 	bool legacy_mode:1;
65 	bool use_raw_attrs:1;
66 };
67 
68 struct irdma_pd {
69 	struct ib_pd ibpd;
70 	struct irdma_sc_pd sc_pd;
71 	struct list_head udqp_list;
72 	spinlock_t udqp_list_lock;
73 };
74 
75 struct irdma_av {
76 	u8 macaddr[16];
77 	struct ib_ah_attr attrs;
78 	union {
79 		struct sockaddr saddr;
80 		struct sockaddr_in saddr_in;
81 		struct sockaddr_in6 saddr_in6;
82 	} sgid_addr, dgid_addr;
83 	u8 net_type;
84 };
85 
86 struct irdma_ah {
87 	struct ib_ah ibah;
88 	struct irdma_sc_ah sc_ah;
89 	struct irdma_pd *pd;
90 	struct irdma_av av;
91 	u8 sgid_index;
92 	union ib_gid dgid;
93 };
94 
95 struct irdma_hmc_pble {
96 	union {
97 		u32 idx;
98 		dma_addr_t addr;
99 	};
100 };
101 
102 struct irdma_cq_mr {
103 	struct irdma_hmc_pble cq_pbl;
104 	dma_addr_t shadow;
105 	bool split;
106 };
107 
108 struct irdma_qp_mr {
109 	struct irdma_hmc_pble sq_pbl;
110 	struct irdma_hmc_pble rq_pbl;
111 	dma_addr_t shadow;
112 	struct page *sq_page;
113 };
114 
115 struct irdma_cq_buf {
116 	struct irdma_dma_mem kmem_buf;
117 	struct irdma_cq_uk cq_uk;
118 	struct irdma_hw *hw;
119 	struct list_head list;
120 	struct work_struct work;
121 };
122 
123 struct irdma_pbl {
124 	struct list_head list;
125 	union {
126 		struct irdma_qp_mr qp_mr;
127 		struct irdma_cq_mr cq_mr;
128 	};
129 
130 	bool pbl_allocated:1;
131 	bool on_list:1;
132 	u64 user_base;
133 	struct irdma_pble_alloc pble_alloc;
134 	struct irdma_mr *iwmr;
135 };
136 
137 struct irdma_mr {
138 	union {
139 		struct ib_mr ibmr;
140 		struct ib_mw ibmw;
141 	};
142 	struct ib_umem *region;
143 	int access;
144 	u8 is_hwreg;
145 	u16 type;
146 	u32 page_cnt;
147 	u64 page_size;
148 	u64 page_msk;
149 	u32 npages;
150 	u32 stag;
151 	u64 len;
152 	u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
153 	struct irdma_pbl iwpbl;
154 };
155 
156 struct irdma_cq {
157 	struct ib_cq ibcq;
158 	struct irdma_sc_cq sc_cq;
159 	u16 cq_head;
160 	u16 cq_size;
161 	u16 cq_num;
162 	bool user_mode;
163 	atomic_t armed;
164 	enum irdma_cmpl_notify last_notify;
165 	u32 polled_cmpls;
166 	u32 cq_mem_size;
167 	struct irdma_dma_mem kmem;
168 	struct irdma_dma_mem kmem_shadow;
169 	struct completion free_cq;
170 	atomic_t refcnt;
171 	spinlock_t lock; /* for poll cq */
172 	struct irdma_pbl *iwpbl;
173 	struct irdma_pbl *iwpbl_shadow;
174 	struct list_head resize_list;
175 	struct irdma_cq_poll_info cur_cqe;
176 	struct list_head cmpl_generated;
177 };
178 
179 struct irdma_cmpl_gen {
180 	struct list_head list;
181 	struct irdma_cq_poll_info cpi;
182 };
183 
184 struct disconn_work {
185 	struct work_struct work;
186 	struct irdma_qp *iwqp;
187 };
188 
189 struct if_notify_work {
190 	struct work_struct work;
191 	struct irdma_device *iwdev;
192 	u32 ipaddr[4];
193 	u16 vlan_id;
194 	bool ipv4:1;
195 	bool ifup:1;
196 };
197 
198 struct iw_cm_id;
199 
200 struct irdma_qp_kmode {
201 	struct irdma_dma_mem dma_mem;
202 	u32 *sig_trk_mem;
203 	struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
204 	u64 *rq_wrid_mem;
205 };
206 
207 struct irdma_qp {
208 	struct ib_qp ibqp;
209 	struct irdma_sc_qp sc_qp;
210 	struct irdma_device *iwdev;
211 	struct irdma_cq *iwscq;
212 	struct irdma_cq *iwrcq;
213 	struct irdma_pd *iwpd;
214 #if __FreeBSD_version >= 1400026
215 	struct rdma_user_mmap_entry *push_wqe_mmap_entry;
216 	struct rdma_user_mmap_entry *push_db_mmap_entry;
217 #else
218 	struct irdma_user_mmap_entry *push_wqe_mmap_entry;
219 	struct irdma_user_mmap_entry *push_db_mmap_entry;
220 #endif
221 	struct irdma_qp_host_ctx_info ctx_info;
222 	union {
223 		struct irdma_iwarp_offload_info iwarp_info;
224 		struct irdma_roce_offload_info roce_info;
225 	};
226 
227 	union {
228 		struct irdma_tcp_offload_info tcp_info;
229 		struct irdma_udp_offload_info udp_info;
230 	};
231 
232 	struct irdma_ah roce_ah;
233 	struct list_head teardown_entry;
234 	struct list_head ud_list_elem;
235 	atomic_t refcnt;
236 	struct iw_cm_id *cm_id;
237 	struct irdma_cm_node *cm_node;
238 	struct delayed_work dwork_flush;
239 	struct ib_mr *lsmm_mr;
240 	atomic_t hw_mod_qp_pend;
241 	enum ib_qp_state ibqp_state;
242 	u32 qp_mem_size;
243 	u32 last_aeq;
244 	int max_send_wr;
245 	int max_recv_wr;
246 	atomic_t close_timer_started;
247 	spinlock_t lock; /* serialize posting WRs to SQ/RQ */
248 	struct irdma_qp_context *iwqp_context;
249 	void *pbl_vbase;
250 	dma_addr_t pbl_pbase;
251 	struct page *page;
252 	u8 iwarp_state;
253 	u16 term_sq_flush_code;
254 	u16 term_rq_flush_code;
255 	u8 hw_iwarp_state;
256 	u8 hw_tcp_state;
257 	struct irdma_qp_kmode kqp;
258 	struct irdma_dma_mem host_ctx;
259 	struct timer_list terminate_timer;
260 	struct irdma_pbl *iwpbl;
261 	struct irdma_sge *sg_list;
262 	struct irdma_dma_mem q2_ctx_mem;
263 	struct irdma_dma_mem ietf_mem;
264 	struct completion free_qp;
265 	wait_queue_head_t waitq;
266 	wait_queue_head_t mod_qp_waitq;
267 	u8 rts_ae_rcvd;
268 	u8 active_conn : 1;
269 	u8 user_mode : 1;
270 	u8 hte_added : 1;
271 	u8 flush_issued : 1;
272 	u8 sig_all : 1;
273 	u8 pau_mode : 1;
274 };
275 
276 struct irdma_udqs_work {
277 	struct work_struct work;
278 	struct irdma_qp *iwqp;
279 	u8 user_prio;
280 	bool qs_change:1;
281 };
282 
283 enum irdma_mmap_flag {
284 	IRDMA_MMAP_IO_NC,
285 	IRDMA_MMAP_IO_WC,
286 };
287 
288 struct irdma_user_mmap_entry {
289 #if __FreeBSD_version >= 1400026
290 	struct rdma_user_mmap_entry rdma_entry;
291 #else
292 	struct irdma_ucontext *ucontext;
293 	struct hlist_node hlist;
294 	u64 pgoff_key; /* Used to compute offset (in bytes) returned to user libc's mmap */
295 #endif
296 	u64 bar_offset;
297 	u8 mmap_flag;
298 };
299 
300 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
301 {
302 	return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
303 }
304 
305 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
306 {
307 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
308 }
309 
310 /**
311  * irdma_mcast_mac_v4 - Get the multicast MAC for an IP address
312  * @ip_addr: IPv4 address
313  * @mac: pointer to result MAC address
314  *
315  */
316 static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
317 {
318 	u8 *ip = (u8 *)ip_addr;
319 	unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F, ip[1],
320 					ip[0]};
321 
322 	ether_addr_copy(mac, mac4);
323 }
324 
325 /**
326  * irdma_mcast_mac_v6 - Get the multicast MAC for an IP address
327  * @ip_addr: IPv6 address
328  * @mac: pointer to result MAC address
329  *
330  */
331 static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
332 {
333 	u8 *ip = (u8 *)ip_addr;
334 	unsigned char mac6[ETH_ALEN] = {0x33, 0x33, ip[3], ip[2], ip[1], ip[0]};
335 
336 	ether_addr_copy(mac, mac6);
337 }
338 
339 #if __FreeBSD_version >= 1400026
340 struct rdma_user_mmap_entry*
341 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
342 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
343 #else
344 struct irdma_user_mmap_entry *
345 irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
346 			       enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
347 void irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry);
348 #endif
349 int irdma_ib_register_device(struct irdma_device *iwdev);
350 void irdma_ib_unregister_device(struct irdma_device *iwdev);
351 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
352 void irdma_generate_flush_completions(struct irdma_qp *iwqp);
353 void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
354 int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
355 void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
356 void irdma_flush_worker(struct work_struct *work);
357 #endif /* IRDMA_VERBS_H */
358