xref: /freebsd/sys/dev/irdma/fbsd_kcompat.c (revision 9768746b)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "ice_rdma.h"
38 #include "irdma_di_if.h"
39 #include "irdma_main.h"
40 #include <sys/gsb_crc32.h>
41 #include <netinet/in_fib.h>
42 #include <netinet6/in6_fib.h>
43 #include <net/route/nhop.h>
44 #include <net/if_llatbl.h>
45 
46 /* additional QP debuging option. Keep false unless needed */
47 bool irdma_upload_context = false;
48 
49 inline u32
50 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
51 
52 	KASSERT(reg < dev_ctx->mem_bus_space_size,
53 		("irdma: register offset %#jx too large (max is %#jx)",
54 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
55 
56 	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
57 				 dev_ctx->mem_bus_space_handle, reg));
58 }
59 
60 inline void
61 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
62 {
63 
64 	KASSERT(reg < dev_ctx->mem_bus_space_size,
65 		("irdma: register offset %#jx too large (max is %#jx)",
66 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
67 
68 	bus_space_write_4(dev_ctx->mem_bus_space_tag,
69 			  dev_ctx->mem_bus_space_handle, reg, value);
70 }
71 
72 inline u64
73 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
74 
75 	KASSERT(reg < dev_ctx->mem_bus_space_size,
76 		("irdma: register offset %#jx too large (max is %#jx)",
77 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
78 
79 	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
80 				 dev_ctx->mem_bus_space_handle, reg));
81 }
82 
83 inline void
84 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
85 {
86 
87 	KASSERT(reg < dev_ctx->mem_bus_space_size,
88 		("irdma: register offset %#jx too large (max is %#jx)",
89 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
90 
91 	bus_space_write_8(dev_ctx->mem_bus_space_tag,
92 			  dev_ctx->mem_bus_space_handle, reg, value);
93 
94 }
95 
96 int
97 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
98 {
99 	struct irdma_device *iwdev = vsi->back_vsi;
100 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
101 	struct ice_rdma_request req = {0};
102 	struct ice_rdma_qset_update *res = &req.res;
103 
104 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
105 	res->cnt_req = 1;
106 	res->res_type = ICE_RDMA_QSET_ALLOC;
107 	res->qsets.qs_handle = tc_node->qs_handle;
108 	res->qsets.tc = tc_node->traffic_class;
109 	res->qsets.vsi_id = vsi->vsi_idx;
110 
111 	IRDMA_DI_REQ_HANDLER(peer, &req);
112 
113 	tc_node->l2_sched_node_id = res->qsets.teid;
114 	vsi->qos[tc_node->user_pri].l2_sched_node_id =
115 	    res->qsets.teid;
116 
117 	return 0;
118 }
119 
120 void
121 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
122 {
123 	struct irdma_device *iwdev = vsi->back_vsi;
124 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
125 	struct ice_rdma_request req = {0};
126 	struct ice_rdma_qset_update *res = &req.res;
127 
128 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
129 	res->res_allocated = 1;
130 	res->res_type = ICE_RDMA_QSET_FREE;
131 	res->qsets.vsi_id = vsi->vsi_idx;
132 	res->qsets.teid = tc_node->l2_sched_node_id;
133 	res->qsets.qs_handle = tc_node->qs_handle;
134 
135 	IRDMA_DI_REQ_HANDLER(peer, &req);
136 }
137 
138 void *
139 hw_to_dev(struct irdma_hw *hw)
140 {
141 	struct irdma_pci_f *rf;
142 
143 	rf = container_of(hw, struct irdma_pci_f, hw);
144 	return rf->pcidev;
145 }
146 
147 void
148 irdma_free_hash_desc(void *desc)
149 {
150 	return;
151 }
152 
153 int
154 irdma_init_hash_desc(void **desc)
155 {
156 	return 0;
157 }
158 
159 int
160 irdma_ieq_check_mpacrc(void *desc,
161 		       void *addr, u32 len, u32 val)
162 {
163 	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
164 	int ret_code = 0;
165 
166 	if (crc != val) {
167 		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
168 		ret_code = -EINVAL;
169 	}
170 	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
171 	return ret_code;
172 }
173 
174 /**
175  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
176  * @iwdev: irdma device
177  * @ifp: interface network device pointer
178  */
179 static void
180 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
181 {
182 	struct ifaddr *ifa, *tmp;
183 	struct sockaddr_in6 *sin6;
184 	u32 local_ipaddr6[4];
185 	u8 *mac_addr;
186 	char ip6buf[INET6_ADDRSTRLEN];
187 
188 	if_addr_rlock(ifp);
189 	IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
190 		sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
191 		if (sin6->sin6_family != AF_INET6)
192 			continue;
193 
194 		irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
195 		mac_addr = IF_LLADDR(ifp);
196 
197 		printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
198 		       __func__, __LINE__,
199 		       ip6_sprintf(ip6buf, &sin6->sin6_addr),
200 		       mac_addr[0], mac_addr[1], mac_addr[2],
201 		       mac_addr[3], mac_addr[4], mac_addr[5]);
202 
203 		irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
204 				       IRDMA_ARP_ADD);
205 
206 	}
207 	if_addr_runlock(ifp);
208 }
209 
210 /**
211  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
212  * @iwdev: irdma device
213  * @ifp: interface network device pointer
214  */
215 static void
216 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
217 {
218 	struct ifaddr *ifa;
219 	struct sockaddr_in *sin;
220 	u32 ip_addr[4] = {};
221 	u8 *mac_addr;
222 
223 	if_addr_rlock(ifp);
224 	IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
225 		sin = (struct sockaddr_in *)ifa->ifa_addr;
226 		if (sin->sin_family != AF_INET)
227 			continue;
228 
229 		ip_addr[0] = ntohl(sin->sin_addr.s_addr);
230 		mac_addr = IF_LLADDR(ifp);
231 
232 		printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
233 		       __func__, __LINE__,
234 		       ip_addr[0] >> 24,
235 		       (ip_addr[0] >> 16) & 0xFF,
236 		       (ip_addr[0] >> 8) & 0xFF,
237 		       ip_addr[0] & 0xFF,
238 		       mac_addr[0], mac_addr[1], mac_addr[2],
239 		       mac_addr[3], mac_addr[4], mac_addr[5]);
240 
241 		irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
242 				       IRDMA_ARP_ADD);
243 	}
244 	if_addr_runlock(ifp);
245 }
246 
247 /**
248  * irdma_add_ip - add ip addresses
249  * @iwdev: irdma device
250  *
251  * Add ipv4/ipv6 addresses to the arp cache
252  */
253 void
254 irdma_add_ip(struct irdma_device *iwdev)
255 {
256 	struct ifnet *ifp = iwdev->netdev;
257 	struct ifnet *ifv;
258 	int i;
259 
260 	irdma_add_ipv4_addr(iwdev, ifp);
261 	irdma_add_ipv6_addr(iwdev, ifp);
262 	for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
263 		ifv = VLAN_DEVAT(ifp, i);
264 		if (!ifv)
265 			continue;
266 		irdma_add_ipv4_addr(iwdev, ifv);
267 		irdma_add_ipv6_addr(iwdev, ifv);
268 	}
269 }
270 
271 static void
272 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
273 {
274 	struct irdma_pci_f *rf = arg;
275 	struct ifnet *ifv = NULL;
276 	struct sockaddr_in *sin;
277 	struct epoch_tracker et;
278 	int arp_index = 0, i = 0;
279 	u32 ip[4] = {};
280 
281 	if (!ifa || !ifa->ifa_addr || !ifp)
282 		return;
283 	if (rf->iwdev->netdev != ifp) {
284 		for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
285 			NET_EPOCH_ENTER(et);
286 			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
287 			NET_EPOCH_EXIT(et);
288 			if (ifv == ifp)
289 				break;
290 		}
291 		if (ifv != ifp)
292 			return;
293 	}
294 	sin = (struct sockaddr_in *)ifa->ifa_addr;
295 
296 	switch (event) {
297 	case IFADDR_EVENT_ADD:
298 		if (sin->sin_family == AF_INET)
299 			irdma_add_ipv4_addr(rf->iwdev, ifp);
300 		else if (sin->sin_family == AF_INET6)
301 			irdma_add_ipv6_addr(rf->iwdev, ifp);
302 		break;
303 	case IFADDR_EVENT_DEL:
304 		if (sin->sin_family == AF_INET) {
305 			ip[0] = ntohl(sin->sin_addr.s_addr);
306 		} else if (sin->sin_family == AF_INET6) {
307 			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
308 		} else {
309 			break;
310 		}
311 		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
312 			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
313 				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
314 						       rf->arp_table[arp_index].ip_addr,
315 						       IRDMA_ARP_DELETE);
316 			}
317 		}
318 		break;
319 	default:
320 		break;
321 	}
322 }
323 
324 void
325 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
326 {
327 	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
328 						       irdma_ifaddrevent_handler,
329 						       rf,
330 						       EVENTHANDLER_PRI_ANY);
331 }
332 
333 void
334 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
335 {
336 	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
337 }
338 
339 static int
340 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
341 		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
342 {
343 	struct nhop_object *nh;
344 
345 	if (dst_sin->sa_family == AF_INET6)
346 		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
347 	else
348 		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
349 	if (!nh || (nh->nh_ifp != netdev &&
350 		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
351 		goto rt_not_found;
352 	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
353 	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
354 	*ifp = nh->nh_ifp;
355 
356 	return 0;
357 
358 rt_not_found:
359 	pr_err("irdma: route not found\n");
360 	return -ENETUNREACH;
361 }
362 
363 /**
364  * irdma_get_dst_mac - get destination mac address
365  * @cm_node: connection's node
366  * @dst_sin: destination address information
367  * @dst_mac: mac address array to return
368  */
369 int
370 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
371 {
372 	struct ifnet *netdev = cm_node->iwdev->netdev;
373 #ifdef VIMAGE
374 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
375 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
376 #endif
377 	struct ifnet *ifp;
378 	struct llentry *lle;
379 	struct sockaddr *nexthop;
380 	struct epoch_tracker et;
381 	int err;
382 	bool gateway;
383 
384 	NET_EPOCH_ENTER(et);
385 	CURVNET_SET_QUIET(vnet);
386 	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
387 	if (err)
388 		goto get_route_fail;
389 
390 	if (dst_sin->sa_family == AF_INET) {
391 		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
392 	} else if (dst_sin->sa_family == AF_INET6) {
393 		err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
394 				  dst_mac, NULL, &lle);
395 	} else {
396 		err = -EPROTONOSUPPORT;
397 	}
398 
399 get_route_fail:
400 	CURVNET_RESTORE();
401 	NET_EPOCH_EXIT(et);
402 	if (err) {
403 		pr_err("failed to resolve neighbor address (err=%d)\n",
404 		       err);
405 		return -ENETUNREACH;
406 	}
407 
408 	return 0;
409 }
410 
411 /**
412  * irdma_addr_resolve_neigh - resolve neighbor address
413  * @cm_node: connection's node
414  * @dst_ip: remote ip address
415  * @arpindex: if there is an arp entry
416  */
417 int
418 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
419 			 u32 dst_ip, int arpindex)
420 {
421 	struct irdma_device *iwdev = cm_node->iwdev;
422 	struct sockaddr_in dst_sin = {};
423 	int err;
424 	u32 ip[4] = {};
425 	u8 dst_mac[MAX_ADDR_LEN];
426 
427 	dst_sin.sin_len = sizeof(dst_sin);
428 	dst_sin.sin_family = AF_INET;
429 	dst_sin.sin_port = 0;
430 	dst_sin.sin_addr.s_addr = htonl(dst_ip);
431 
432 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
433 	if (err)
434 		return arpindex;
435 
436 	ip[0] = dst_ip;
437 
438 	return irdma_add_arp(iwdev->rf, ip, dst_mac);
439 }
440 
441 /**
442  * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
443  * @cm_node: connection's node
444  * @dest: remote ip address
445  * @arpindex: if there is an arp entry
446  */
447 int
448 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
449 			      u32 *dest, int arpindex)
450 {
451 	struct irdma_device *iwdev = cm_node->iwdev;
452 	struct sockaddr_in6 dst_addr = {};
453 	int err;
454 	u8 dst_mac[MAX_ADDR_LEN];
455 
456 	dst_addr.sin6_family = AF_INET6;
457 	dst_addr.sin6_len = sizeof(dst_addr);
458 	dst_addr.sin6_scope_id = iwdev->netdev->if_index;
459 
460 	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
461 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
462 	if (err)
463 		return arpindex;
464 
465 	return irdma_add_arp(iwdev->rf, dest, dst_mac);
466 }
467 
468 int
469 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
470 			    struct irdma_cm_info *cm_info)
471 {
472 #ifdef VIMAGE
473 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
474 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
475 #endif
476 	int arpindex;
477 	int oldarpindex;
478 	bool is_lpb = false;
479 
480 	CURVNET_SET_QUIET(vnet);
481 	is_lpb = cm_node->ipv4 ?
482 	    irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
483 	    irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
484 	CURVNET_RESTORE();
485 	if (is_lpb) {
486 		cm_node->do_lpb = true;
487 		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
488 					   NULL,
489 					   IRDMA_ARP_RESOLVE);
490 	} else {
491 		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
492 					      NULL,
493 					      IRDMA_ARP_RESOLVE);
494 		if (cm_node->ipv4)
495 			arpindex = irdma_addr_resolve_neigh(cm_node,
496 							    cm_info->rem_addr[0],
497 							    oldarpindex);
498 		else
499 			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
500 								 cm_info->rem_addr,
501 								 oldarpindex);
502 	}
503 	return arpindex;
504 }
505 
506 /**
507  * irdma_add_handler - add a handler to the list
508  * @hdl: handler to be added to the handler list
509  */
510 void
511 irdma_add_handler(struct irdma_handler *hdl)
512 {
513 	unsigned long flags;
514 
515 	spin_lock_irqsave(&irdma_handler_lock, flags);
516 	list_add(&hdl->list, &irdma_handlers);
517 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
518 }
519 
520 /**
521  * irdma_del_handler - delete a handler from the list
522  * @hdl: handler to be deleted from the handler list
523  */
524 void
525 irdma_del_handler(struct irdma_handler *hdl)
526 {
527 	unsigned long flags;
528 
529 	spin_lock_irqsave(&irdma_handler_lock, flags);
530 	list_del(&hdl->list);
531 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
532 }
533 
534 /**
535  * irdma_set_rf_user_cfg_params - apply user configurable settings
536  * @rf: RDMA PCI function
537  */
538 void
539 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
540 {
541 	int en_rem_endpoint_trk = 0;
542 	int limits_sel = 4;
543 
544 	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
545 	rf->limits_sel = limits_sel;
546 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
547 	/* Enable DCQCN algorithm by default */
548 	rf->dcqcn_ena = true;
549 }
550 
551 /**
552  * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
553  * @arg1: pointer to rf
554  * @arg2: unused
555  * @oidp: sysctl oid structure
556  * @req: sysctl request pointer
557  */
558 static int
559 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
560 {
561 	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
562 	int ret;
563 	u8 dcqcn_ena = rf->dcqcn_ena;
564 
565 	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
566 	if ((ret) || (req->newptr == NULL))
567 		return ret;
568 	if (dcqcn_ena == 0)
569 		rf->dcqcn_ena = false;
570 	else
571 		rf->dcqcn_ena = true;
572 
573 	return 0;
574 }
575 
576 /**
577  * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
578  * @rf: RDMA PCI function
579  *
580  * Create DCQCN related sysctls for the driver.
581  * dcqcn_ena is writeable settings and applicable to next QP creation or
582  * context setting.
583  * all other settings are of RDTUN type (read on driver load) and are
584  * applicable only to CQP creation.
585  */
586 void
587 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
588 {
589 	struct sysctl_oid_list *irdma_sysctl_oid_list;
590 
591 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
592 
593 	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
594 			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
595 			irdma_sysctl_dcqcn_update, "A",
596 			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
597 
598 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
599 		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
600 		      &rf->dcqcn_params.cc_cfg_valid, 0,
601 		      "set DCQCN parameters to be valid, default=false");
602 
603 	rf->dcqcn_params.min_dec_factor = 1;
604 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
605 		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
606 		      &rf->dcqcn_params.min_dec_factor, 0,
607 		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
608 
609 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
610 		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
611 		      &rf->dcqcn_params.min_rate, 0,
612 		      "set minimum rate limit value, in MBits per second, default=0");
613 
614 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
615 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
616 		      "set number of times to stay in each stage of bandwidth recovery, default=0");
617 
618 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
619 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
620 		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
621 
622 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
623 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
624 		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
625 
626 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
627 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
628 		       &rf->dcqcn_params.rai_factor, 0,
629 		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
630 
631 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
632 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
633 		       &rf->dcqcn_params.hai_factor, 0,
634 		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
635 
636 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
637 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
638 		       &rf->dcqcn_params.rreduce_mperiod, 0,
639 		       "set minimum time between 2 consecutive rate reductions for a single flow, default=0");
640 }
641 
642 /**
643  * irdma_dmamap_cb - callback for bus_dmamap_load
644  */
645 static void
646 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
647 {
648 	if (error)
649 		return;
650 	*(bus_addr_t *) arg = segs->ds_addr;
651 	return;
652 }
653 
654 /**
655  * irdma_allocate_dma_mem - allocate dma memory
656  * @hw: pointer to hw structure
657  * @mem: structure holding memory information
658  * @size: requested size
659  * @alignment: requested alignment
660  */
661 void *
662 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
663 		       u64 size, u32 alignment)
664 {
665 	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
666 	device_t dev = dev_ctx->dev;
667 	void *va;
668 	int ret;
669 
670 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
671 				 alignment, 0,	/* alignment, bounds */
672 				 BUS_SPACE_MAXADDR,	/* lowaddr */
673 				 BUS_SPACE_MAXADDR,	/* highaddr */
674 				 NULL, NULL,	/* filter, filterarg */
675 				 size,	/* maxsize */
676 				 1,	/* nsegments */
677 				 size,	/* maxsegsize */
678 				 BUS_DMA_ALLOCNOW,	/* flags */
679 				 NULL,	/* lockfunc */
680 				 NULL,	/* lockfuncarg */
681 				 &mem->tag);
682 	if (ret != 0) {
683 		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
684 			      __func__, ret);
685 		goto fail_0;
686 	}
687 	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
688 			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
689 	if (ret != 0) {
690 		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
691 			      __func__, ret);
692 		goto fail_1;
693 	}
694 	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
695 			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
696 	if (ret != 0) {
697 		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
698 			      __func__, ret);
699 		goto fail_2;
700 	}
701 	mem->nseg = 1;
702 	mem->size = size;
703 	bus_dmamap_sync(mem->tag, mem->map,
704 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
705 
706 	return va;
707 fail_2:
708 	bus_dmamem_free(mem->tag, va, mem->map);
709 fail_1:
710 	bus_dma_tag_destroy(mem->tag);
711 fail_0:
712 	mem->map = NULL;
713 	mem->tag = NULL;
714 
715 	return NULL;
716 }
717 
718 /**
719  * irdma_free_dma_mem - Memory free helper fn
720  * @hw: pointer to hw structure
721  * @mem: ptr to mem struct to free
722  */
723 int
724 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
725 {
726 	if (!mem)
727 		return -EINVAL;
728 	bus_dmamap_sync(mem->tag, mem->map,
729 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
730 	bus_dmamap_unload(mem->tag, mem->map);
731 	if (!mem->va)
732 		return -ENOMEM;
733 	bus_dmamem_free(mem->tag, mem->va, mem->map);
734 	bus_dma_tag_destroy(mem->tag);
735 
736 	mem->va = NULL;
737 
738 	return 0;
739 }
740 
741 inline void
742 irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
743 {
744 	kfree(chunk->bitmapmem.va);
745 }
746