xref: /freebsd/sys/dev/irdma/fbsd_kcompat.h (revision 38a52bd3)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #ifndef FBSD_KCOMPAT_H
37 #define FBSD_KCOMPAT_H
38 #include "ice_rdma.h"
39 
40 #define TASKLET_DATA_TYPE	unsigned long
41 #define TASKLET_FUNC_TYPE	void (*)(TASKLET_DATA_TYPE)
42 
43 #define tasklet_setup(tasklet, callback)				\
44 	tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback),		\
45 		      (TASKLET_DATA_TYPE)(tasklet))
46 
47 #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
48 	container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
49 
50 #define IRDMA_SET_RDMA_OBJ_SIZE(ib_struct, drv_struct, member)    \
51 	(sizeof(struct drv_struct) +                              \
52 	 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
53 	 BUILD_BUG_ON_ZERO(                                       \
54 		!__same_type(((struct drv_struct *)NULL)->member, \
55                                       struct ib_struct)))
56 #define set_ibdev_dma_device(ibdev, dev) \
57 	ibdev.dma_device = (dev)
58 #define set_max_sge(props, rf)  \
59 	((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags)
60 #define kc_set_props_ip_gid_caps(props) \
61 	((props)->port_cap_flags  |= IB_PORT_IP_BASED_GIDS)
62 #define rdma_query_gid(ibdev, port, index, gid) \
63 	ib_get_cached_gid(ibdev, port, index, gid, NULL)
64 #define kmap(pg) page_address(pg)
65 #define kmap_local_page(pg) page_address(pg)
66 #define kunmap(pg)
67 #define kunmap_local(pg)
68 #define kc_free_lsmm_dereg_mr(iwdev, iwqp) \
69 	((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr, NULL))
70 
71 #define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
72 #define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata)
73 
74 #ifndef IB_QP_ATTR_STANDARD_BITS
75 #define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
76 #endif
77 
78 #define IRDMA_QOS_MODE_VLAN 0x0
79 #define IRDMA_QOS_MODE_DSCP 0x1
80 
81 void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
82 void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
83 
84 struct irdma_tunable_info {
85 	struct sysctl_ctx_list irdma_sysctl_ctx;
86 	struct sysctl_oid *irdma_sysctl_tree;
87 	u8 roce_ena;
88 };
89 
90 static inline int irdma_iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
91 				      u16 *pkey)
92 {
93 	*pkey = 0;
94 	return 0;
95 }
96 
97 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
98 {
99 	/* GEN1 does not support CQ create flags */
100 	if (hw_rev == IRDMA_GEN_1)
101 		return flags ? -EOPNOTSUPP : 0;
102 
103 	return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
104 }
105 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
106 				       u32 *idx)
107 {
108 	*idx += 1;
109 	if (!(*pinfo) || *idx != (*pinfo)->cnt)
110 		return ++pbl;
111 	*idx = 0;
112 	(*pinfo)++;
113 
114 	return (*pinfo)->addr;
115 }
116 int irdma_create_cq(struct ib_cq *ibcq,
117 		    const struct ib_cq_init_attr *attr,
118 		    struct ib_udata *udata);
119 struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
120 			      struct ib_qp_init_attr *init_attr,
121 			      struct ib_udata *udata);
122 int irdma_create_ah(struct ib_ah *ib_ah,
123 		    struct ib_ah_attr *attr, u32 flags,
124 		    struct ib_udata *udata);
125 int irdma_create_ah_stub(struct ib_ah *ib_ah,
126 			 struct ib_ah_attr *attr, u32 flags,
127 			 struct ib_udata *udata);
128 void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr);
129 
130 void irdma_destroy_ah(struct ib_ah *ibah, u32 flags);
131 void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags);
132 int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
133 int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
134 void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
135 				   u8 *active_width);
136 enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
137 					  u8 port_num);
138 int irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
139 			      struct ib_port_immutable *immutable);
140 int irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
141 			    struct ib_port_immutable *immutable);
142 int irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
143 		    union ib_gid *gid);
144 int irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
145 			 union ib_gid *gid);
146 int irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
147 		     u16 *pkey);
148 int irdma_query_port(struct ib_device *ibdev, u8 port,
149 		     struct ib_port_attr *props);
150 struct rdma_hw_stats *irdma_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
151 int irdma_get_hw_stats(struct ib_device *ibdev,
152 		       struct rdma_hw_stats *stats, u8 port_num,
153 		       int index);
154 
155 int irdma_register_qset(struct irdma_sc_vsi *vsi,
156 			struct irdma_ws_node *tc_node);
157 void irdma_unregister_qset(struct irdma_sc_vsi *vsi,
158 			   struct irdma_ws_node *tc_node);
159 void ib_unregister_device(struct ib_device *ibdev);
160 void irdma_disassociate_ucontext(struct ib_ucontext *context);
161 int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp,
162 			      struct ib_qp_attr *attr,
163 			      u16 *vlan_id);
164 struct irdma_device *kc_irdma_get_device(struct ifnet *netdev);
165 void kc_irdma_put_device(struct irdma_device *iwdev);
166 
167 void kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node);
168 
169 void irdma_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len);
170 
171 int irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
172 		      struct ib_port_modify *props);
173 int irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin,
174 		      u8 *dst_mac);
175 int irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
176 				struct irdma_cm_info *cm_info);
177 int irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node, u32 dst_ip,
178 			     int arpindex);
179 int irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node, u32 *dest,
180 				  int arpindex);
181 void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf);
182 u32 irdma_create_stag(struct irdma_device *iwdev);
183 void irdma_free_stag(struct irdma_device *iwdev, u32 stag);
184 
185 struct irdma_mr;
186 struct irdma_cq;
187 struct irdma_cq_buf;
188 struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
189 			     u32 max_num_sg, struct ib_udata *udata);
190 int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr);
191 struct ib_mw *irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
192 			     struct ib_udata *udata);
193 int irdma_hw_alloc_stag(struct irdma_device *iwdev, struct irdma_mr *iwmr);
194 void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq);
195 int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
196 			    struct irdma_device *iwdev);
197 void irdma_setup_virt_qp(struct irdma_device *iwdev,
198                          struct irdma_qp *iwqp,
199                          struct irdma_qp_init_info *init_info);
200 int irdma_setup_kmode_qp(struct irdma_device *iwdev,
201 			 struct irdma_qp *iwqp,
202 			 struct irdma_qp_init_info *info,
203 			 struct ib_qp_init_attr *init_attr);
204 void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
205 					struct irdma_qp_host_ctx_info *ctx_info);
206 void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
207 				      struct irdma_qp_host_ctx_info *ctx_info);
208 int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp);
209 void irdma_dealloc_push_page(struct irdma_pci_f *rf,
210 			     struct irdma_sc_qp *qp);
211 int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev,
212 			      struct irdma_cq_buf *lcqe_buf);
213 void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
214 int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
215 void irdma_dealloc_ucontext(struct ib_ucontext *context);
216 int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
217 void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
218 int irdma_add_gid(struct ib_device *, u8, unsigned int, const union ib_gid *,
219 		  const struct ib_gid_attr *, void **);
220 int irdma_del_gid(struct ib_device *, u8, unsigned int, void **);
221 struct ib_device *ib_device_get_by_netdev(struct ifnet *ndev, int driver_id);
222 void ib_device_put(struct ib_device *device);
223 void ib_unregister_device_put(struct ib_device *device);
224 enum ib_mtu ib_mtu_int_to_enum(int mtu);
225 struct irdma_pbl *irdma_get_pbl(unsigned long va, struct list_head *pbl_list);
226 void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq);
227 void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp);
228 
229 struct irdma_ucontext;
230 void irdma_del_memlist(struct irdma_mr *iwmr, struct irdma_ucontext *ucontext);
231 void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
232 			     enum irdma_pble_level level);
233 void irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf);
234 void irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf);
235 
236 /* Introduced in this series https://lore.kernel.org/linux-rdma/0-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com/
237  * An irdma version helper doing same for older functions with difference that iova is passed in
238  * as opposed to derived from umem->iova.
239  */
240 static inline size_t irdma_ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned long pgsz, u64 iova)
241 {
242 	/* some older OFED distros do not have ALIGN_DOWN */
243 #ifndef ALIGN_DOWN
244 #define ALIGN_DOWN(x, a)	ALIGN((x) - ((a) - 1), (a))
245 #endif
246 
247 	return (size_t)((ALIGN(iova + umem->length, pgsz) -
248 			 ALIGN_DOWN(iova, pgsz))) / pgsz;
249 }
250 
251 #endif /* FBSD_KCOMPAT_H */
252