xref: /freebsd/sys/dev/irdma/fbsd_kcompat.c (revision be181ee2)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "ice_rdma.h"
38 #include "irdma_di_if.h"
39 #include "irdma_main.h"
40 #include <sys/gsb_crc32.h>
41 #include <netinet/in_fib.h>
42 #include <netinet6/in6_fib.h>
43 #include <net/route/nhop.h>
44 
45 /* additional QP debuging option. Keep false unless needed */
46 bool irdma_upload_context = false;
47 
48 inline u32
49 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
50 
51 	KASSERT(reg < dev_ctx->mem_bus_space_size,
52 		("irdma: register offset %#jx too large (max is %#jx)",
53 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
54 
55 	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
56 				 dev_ctx->mem_bus_space_handle, reg));
57 }
58 
59 inline void
60 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
61 {
62 
63 	KASSERT(reg < dev_ctx->mem_bus_space_size,
64 		("irdma: register offset %#jx too large (max is %#jx)",
65 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
66 
67 	bus_space_write_4(dev_ctx->mem_bus_space_tag,
68 			  dev_ctx->mem_bus_space_handle, reg, value);
69 }
70 
71 inline u64
72 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
73 
74 	KASSERT(reg < dev_ctx->mem_bus_space_size,
75 		("irdma: register offset %#jx too large (max is %#jx)",
76 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
77 
78 	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
79 				 dev_ctx->mem_bus_space_handle, reg));
80 }
81 
82 inline void
83 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
84 {
85 
86 	KASSERT(reg < dev_ctx->mem_bus_space_size,
87 		("irdma: register offset %#jx too large (max is %#jx)",
88 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
89 
90 	bus_space_write_8(dev_ctx->mem_bus_space_tag,
91 			  dev_ctx->mem_bus_space_handle, reg, value);
92 
93 }
94 
95 int
96 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
97 {
98 	struct irdma_device *iwdev = vsi->back_vsi;
99 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
100 	struct ice_rdma_request req = {0};
101 	struct ice_rdma_qset_update *res = &req.res;
102 
103 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
104 	res->cnt_req = 1;
105 	res->res_type = ICE_RDMA_QSET_ALLOC;
106 	res->qsets.qs_handle = tc_node->qs_handle;
107 	res->qsets.tc = tc_node->traffic_class;
108 	res->qsets.vsi_id = vsi->vsi_idx;
109 
110 	IRDMA_DI_REQ_HANDLER(peer, &req);
111 
112 	tc_node->l2_sched_node_id = res->qsets.teid;
113 	vsi->qos[tc_node->user_pri].l2_sched_node_id =
114 	    res->qsets.teid;
115 
116 	return 0;
117 }
118 
119 void
120 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
121 {
122 	struct irdma_device *iwdev = vsi->back_vsi;
123 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
124 	struct ice_rdma_request req = {0};
125 	struct ice_rdma_qset_update *res = &req.res;
126 
127 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
128 	res->res_allocated = 1;
129 	res->res_type = ICE_RDMA_QSET_FREE;
130 	res->qsets.vsi_id = vsi->vsi_idx;
131 	res->qsets.teid = tc_node->l2_sched_node_id;
132 	res->qsets.qs_handle = tc_node->qs_handle;
133 
134 	IRDMA_DI_REQ_HANDLER(peer, &req);
135 }
136 
137 void *
138 hw_to_dev(struct irdma_hw *hw)
139 {
140 	struct irdma_pci_f *rf;
141 
142 	rf = container_of(hw, struct irdma_pci_f, hw);
143 	return rf->pcidev;
144 }
145 
146 void
147 irdma_free_hash_desc(void *desc)
148 {
149 	return;
150 }
151 
152 int
153 irdma_init_hash_desc(void **desc)
154 {
155 	return 0;
156 }
157 
158 int
159 irdma_ieq_check_mpacrc(void *desc,
160 		       void *addr, u32 len, u32 val)
161 {
162 	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
163 	int ret_code = 0;
164 
165 	if (crc != val) {
166 		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
167 		ret_code = -EINVAL;
168 	}
169 	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
170 	return ret_code;
171 }
172 
173 /**
174  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
175  * @iwdev: irdma device
176  * @ifp: interface network device pointer
177  */
178 static void
179 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
180 {
181 	struct ifaddr *ifa, *tmp;
182 	struct sockaddr_in6 *sin6;
183 	u32 local_ipaddr6[4];
184 	u8 *mac_addr;
185 	char ip6buf[INET6_ADDRSTRLEN];
186 
187 	if_addr_rlock(ifp);
188 	IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
189 		sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
190 		if (sin6->sin6_family != AF_INET6)
191 			continue;
192 
193 		irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
194 		mac_addr = IF_LLADDR(ifp);
195 
196 		printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
197 		       __func__, __LINE__,
198 		       ip6_sprintf(ip6buf, &sin6->sin6_addr),
199 		       mac_addr[0], mac_addr[1], mac_addr[2],
200 		       mac_addr[3], mac_addr[4], mac_addr[5]);
201 
202 		irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
203 				       IRDMA_ARP_ADD);
204 
205 	}
206 	if_addr_runlock(ifp);
207 }
208 
209 /**
210  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
211  * @iwdev: irdma device
212  * @ifp: interface network device pointer
213  */
214 static void
215 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
216 {
217 	struct ifaddr *ifa;
218 	struct sockaddr_in *sin;
219 	u32 ip_addr[4] = {};
220 	u8 *mac_addr;
221 
222 	if_addr_rlock(ifp);
223 	IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
224 		sin = (struct sockaddr_in *)ifa->ifa_addr;
225 		if (sin->sin_family != AF_INET)
226 			continue;
227 
228 		ip_addr[0] = ntohl(sin->sin_addr.s_addr);
229 		mac_addr = IF_LLADDR(ifp);
230 
231 		printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
232 		       __func__, __LINE__,
233 		       ip_addr[0] >> 24,
234 		       (ip_addr[0] >> 16) & 0xFF,
235 		       (ip_addr[0] >> 8) & 0xFF,
236 		       ip_addr[0] & 0xFF,
237 		       mac_addr[0], mac_addr[1], mac_addr[2],
238 		       mac_addr[3], mac_addr[4], mac_addr[5]);
239 
240 		irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
241 				       IRDMA_ARP_ADD);
242 	}
243 	if_addr_runlock(ifp);
244 }
245 
246 /**
247  * irdma_add_ip - add ip addresses
248  * @iwdev: irdma device
249  *
250  * Add ipv4/ipv6 addresses to the arp cache
251  */
252 void
253 irdma_add_ip(struct irdma_device *iwdev)
254 {
255 	struct ifnet *ifp = iwdev->netdev;
256 	struct ifnet *ifv;
257 	int i;
258 
259 	irdma_add_ipv4_addr(iwdev, ifp);
260 	irdma_add_ipv6_addr(iwdev, ifp);
261 	for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
262 		ifv = VLAN_DEVAT(ifp, i);
263 		if (!ifv)
264 			continue;
265 		irdma_add_ipv4_addr(iwdev, ifv);
266 		irdma_add_ipv6_addr(iwdev, ifv);
267 	}
268 }
269 
270 static void
271 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
272 {
273 	struct irdma_pci_f *rf = arg;
274 	struct ifnet *ifv = NULL;
275 	struct sockaddr_in *sin;
276 	struct epoch_tracker et;
277 	int arp_index = 0, i = 0;
278 	u32 ip[4] = {};
279 
280 	if (!ifa || !ifa->ifa_addr || !ifp)
281 		return;
282 	if (rf->iwdev->netdev != ifp) {
283 		for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
284 			NET_EPOCH_ENTER(et);
285 			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
286 			NET_EPOCH_EXIT(et);
287 			if (ifv == ifp)
288 				break;
289 		}
290 		if (ifv != ifp)
291 			return;
292 	}
293 	sin = (struct sockaddr_in *)ifa->ifa_addr;
294 
295 	switch (event) {
296 	case IFADDR_EVENT_ADD:
297 		if (sin->sin_family == AF_INET)
298 			irdma_add_ipv4_addr(rf->iwdev, ifp);
299 		else if (sin->sin_family == AF_INET6)
300 			irdma_add_ipv6_addr(rf->iwdev, ifp);
301 		break;
302 	case IFADDR_EVENT_DEL:
303 		if (sin->sin_family == AF_INET) {
304 			ip[0] = ntohl(sin->sin_addr.s_addr);
305 		} else if (sin->sin_family == AF_INET6) {
306 			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
307 		} else {
308 			break;
309 		}
310 		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
311 			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
312 				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
313 						       rf->arp_table[arp_index].ip_addr,
314 						       IRDMA_ARP_DELETE);
315 			}
316 		}
317 		break;
318 	default:
319 		break;
320 	}
321 }
322 
323 void
324 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
325 {
326 	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
327 						       irdma_ifaddrevent_handler,
328 						       rf,
329 						       EVENTHANDLER_PRI_ANY);
330 }
331 
332 void
333 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
334 {
335 	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
336 }
337 
338 static int
339 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
340 		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
341 {
342 	struct nhop_object *nh;
343 
344 	if (dst_sin->sa_family == AF_INET6)
345 		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
346 	else
347 		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
348 	if (!nh || (nh->nh_ifp != netdev &&
349 		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
350 		goto rt_not_found;
351 	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
352 	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
353 	*ifp = nh->nh_ifp;
354 
355 	return 0;
356 
357 rt_not_found:
358 	pr_err("irdma: route not found\n");
359 	return -ENETUNREACH;
360 }
361 
362 /**
363  * irdma_get_dst_mac - get destination mac address
364  * @cm_node: connection's node
365  * @dst_sin: destination address information
366  * @dst_mac: mac address array to return
367  */
368 int
369 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
370 {
371 	struct ifnet *netdev = cm_node->iwdev->netdev;
372 #ifdef VIMAGE
373 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
374 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
375 #endif
376 	struct ifnet *ifp;
377 	struct llentry *lle;
378 	struct sockaddr *nexthop;
379 	struct epoch_tracker et;
380 	int err;
381 	bool gateway;
382 
383 	NET_EPOCH_ENTER(et);
384 	CURVNET_SET_QUIET(vnet);
385 	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
386 	if (err)
387 		goto get_route_fail;
388 
389 	if (dst_sin->sa_family == AF_INET) {
390 		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
391 	} else if (dst_sin->sa_family == AF_INET6) {
392 		err = nd6_resolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
393 	} else {
394 		err = -EPROTONOSUPPORT;
395 	}
396 
397 get_route_fail:
398 	CURVNET_RESTORE();
399 	NET_EPOCH_EXIT(et);
400 	if (err) {
401 		pr_err("failed to resolve neighbor address (err=%d)\n",
402 		       err);
403 		return -ENETUNREACH;
404 	}
405 
406 	return 0;
407 }
408 
409 /**
410  * irdma_addr_resolve_neigh - resolve neighbor address
411  * @cm_node: connection's node
412  * @dst_ip: remote ip address
413  * @arpindex: if there is an arp entry
414  */
415 int
416 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
417 			 u32 dst_ip, int arpindex)
418 {
419 	struct irdma_device *iwdev = cm_node->iwdev;
420 	struct sockaddr_in dst_sin = {};
421 	int err;
422 	u32 ip[4] = {};
423 	u8 dst_mac[MAX_ADDR_LEN];
424 
425 	dst_sin.sin_len = sizeof(dst_sin);
426 	dst_sin.sin_family = AF_INET;
427 	dst_sin.sin_port = 0;
428 	dst_sin.sin_addr.s_addr = htonl(dst_ip);
429 
430 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
431 	if (err)
432 		return arpindex;
433 
434 	ip[0] = dst_ip;
435 
436 	return irdma_add_arp(iwdev->rf, ip, dst_mac);
437 }
438 
439 /**
440  * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
441  * @cm_node: connection's node
442  * @dest: remote ip address
443  * @arpindex: if there is an arp entry
444  */
445 int
446 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
447 			      u32 *dest, int arpindex)
448 {
449 	struct irdma_device *iwdev = cm_node->iwdev;
450 	struct sockaddr_in6 dst_addr = {};
451 	int err;
452 	u8 dst_mac[MAX_ADDR_LEN];
453 
454 	dst_addr.sin6_family = AF_INET6;
455 	dst_addr.sin6_len = sizeof(dst_addr);
456 	dst_addr.sin6_scope_id = iwdev->netdev->if_index;
457 
458 	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
459 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
460 	if (err)
461 		return arpindex;
462 
463 	return irdma_add_arp(iwdev->rf, dest, dst_mac);
464 }
465 
466 int
467 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
468 			    struct irdma_cm_info *cm_info)
469 {
470 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
471 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
472 	int arpindex;
473 	int oldarpindex;
474 
475 	if ((cm_node->ipv4 &&
476 	     irdma_ipv4_is_lpb(vnet, cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
477 	    (!cm_node->ipv4 &&
478 	     irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
479 		cm_node->do_lpb = true;
480 		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
481 					   NULL,
482 					   IRDMA_ARP_RESOLVE);
483 	} else {
484 		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
485 					      NULL,
486 					      IRDMA_ARP_RESOLVE);
487 		if (cm_node->ipv4)
488 			arpindex = irdma_addr_resolve_neigh(cm_node,
489 							    cm_info->rem_addr[0],
490 							    oldarpindex);
491 		else
492 			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
493 								 cm_info->rem_addr,
494 								 oldarpindex);
495 	}
496 	return arpindex;
497 }
498 
499 /**
500  * irdma_add_handler - add a handler to the list
501  * @hdl: handler to be added to the handler list
502  */
503 void
504 irdma_add_handler(struct irdma_handler *hdl)
505 {
506 	unsigned long flags;
507 
508 	spin_lock_irqsave(&irdma_handler_lock, flags);
509 	list_add(&hdl->list, &irdma_handlers);
510 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
511 }
512 
513 /**
514  * irdma_del_handler - delete a handler from the list
515  * @hdl: handler to be deleted from the handler list
516  */
517 void
518 irdma_del_handler(struct irdma_handler *hdl)
519 {
520 	unsigned long flags;
521 
522 	spin_lock_irqsave(&irdma_handler_lock, flags);
523 	list_del(&hdl->list);
524 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
525 }
526 
527 /**
528  * irdma_set_rf_user_cfg_params - apply user configurable settings
529  * @rf: RDMA PCI function
530  */
531 void
532 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
533 {
534 	int en_rem_endpoint_trk = 0;
535 	int limits_sel = 4;
536 
537 	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
538 	rf->limits_sel = limits_sel;
539 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
540 	/* Enable DCQCN algorithm by default */
541 	rf->dcqcn_ena = true;
542 }
543 
544 /**
545  * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
546  * @arg1: pointer to rf
547  * @arg2: unused
548  * @oidp: sysctl oid structure
549  * @req: sysctl request pointer
550  */
551 static int
552 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
553 {
554 	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
555 	int ret;
556 	u8 dcqcn_ena = rf->dcqcn_ena;
557 
558 	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
559 	if ((ret) || (req->newptr == NULL))
560 		return ret;
561 	if (dcqcn_ena == 0)
562 		rf->dcqcn_ena = false;
563 	else
564 		rf->dcqcn_ena = true;
565 
566 	return 0;
567 }
568 
569 /**
570  * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
571  * @rf: RDMA PCI function
572  *
573  * Create DCQCN related sysctls for the driver.
574  * dcqcn_ena is writeable settings and applicable to next QP creation or
575  * context setting.
576  * all other settings are of RDTUN type (read on driver load) and are
577  * applicable only to CQP creation.
578  */
579 void
580 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
581 {
582 	struct sysctl_oid_list *irdma_sysctl_oid_list;
583 
584 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
585 
586 	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
587 			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
588 			irdma_sysctl_dcqcn_update, "A",
589 			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
590 
591 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
592 		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
593 		      &rf->dcqcn_params.cc_cfg_valid, 0,
594 		      "set DCQCN parameters to be valid, default=false");
595 
596 	rf->dcqcn_params.min_dec_factor = 1;
597 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
598 		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
599 		      &rf->dcqcn_params.min_dec_factor, 0,
600 		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
601 
602 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
603 		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
604 		      &rf->dcqcn_params.min_rate, 0,
605 		      "set minimum rate limit value, in MBits per second, default=0");
606 
607 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
608 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
609 		      "set number of times to stay in each stage of bandwidth recovery, default=0");
610 
611 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
612 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
613 		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
614 
615 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
616 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
617 		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
618 
619 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
620 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
621 		       &rf->dcqcn_params.rai_factor, 0,
622 		       "set number of MSS to add to the congestion window in additive increase mode, default=0");
623 
624 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
625 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
626 		       &rf->dcqcn_params.hai_factor, 0,
627 		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
628 
629 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
630 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
631 		       &rf->dcqcn_params.rreduce_mperiod, 0,
632 		       "set minimum time between 2 consecutive rate reductions for a single flow, default=0");
633 }
634 
635 /**
636  * irdma_dmamap_cb - callback for bus_dmamap_load
637  */
638 static void
639 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
640 {
641 	if (error)
642 		return;
643 	*(bus_addr_t *) arg = segs->ds_addr;
644 	return;
645 }
646 
647 /**
648  * irdma_allocate_dma_mem - allocate dma memory
649  * @hw: pointer to hw structure
650  * @mem: structure holding memory information
651  * @size: requested size
652  * @alignment: requested alignment
653  */
654 void *
655 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
656 		       u64 size, u32 alignment)
657 {
658 	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
659 	device_t dev = dev_ctx->dev;
660 	void *va;
661 	int ret;
662 
663 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
664 				 alignment, 0,	/* alignment, bounds */
665 				 BUS_SPACE_MAXADDR,	/* lowaddr */
666 				 BUS_SPACE_MAXADDR,	/* highaddr */
667 				 NULL, NULL,	/* filter, filterarg */
668 				 size,	/* maxsize */
669 				 1,	/* nsegments */
670 				 size,	/* maxsegsize */
671 				 BUS_DMA_ALLOCNOW,	/* flags */
672 				 NULL,	/* lockfunc */
673 				 NULL,	/* lockfuncarg */
674 				 &mem->tag);
675 	if (ret != 0) {
676 		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
677 			      __func__, ret);
678 		goto fail_0;
679 	}
680 	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
681 			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
682 	if (ret != 0) {
683 		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
684 			      __func__, ret);
685 		goto fail_1;
686 	}
687 	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
688 			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
689 	if (ret != 0) {
690 		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
691 			      __func__, ret);
692 		goto fail_2;
693 	}
694 	mem->nseg = 1;
695 	mem->size = size;
696 	bus_dmamap_sync(mem->tag, mem->map,
697 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
698 
699 	return va;
700 fail_2:
701 	bus_dmamem_free(mem->tag, va, mem->map);
702 fail_1:
703 	bus_dma_tag_destroy(mem->tag);
704 fail_0:
705 	mem->map = NULL;
706 	mem->tag = NULL;
707 
708 	return NULL;
709 }
710 
711 /**
712  * irdma_free_dma_mem - Memory free helper fn
713  * @hw: pointer to hw structure
714  * @mem: ptr to mem struct to free
715  */
716 int
717 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
718 {
719 	if (!mem)
720 		return -EINVAL;
721 	bus_dmamap_sync(mem->tag, mem->map,
722 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
723 	bus_dmamap_unload(mem->tag, mem->map);
724 	if (!mem->va)
725 		return -ENOMEM;
726 	bus_dmamem_free(mem->tag, mem->va, mem->map);
727 	bus_dma_tag_destroy(mem->tag);
728 
729 	mem->va = NULL;
730 
731 	return 0;
732 }
733 
734 inline void
735 irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
736 {
737 	kfree(chunk->bitmapmem.va);
738 }
739