xref: /freebsd/sys/dev/irdma/fbsd_kcompat.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2021 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "osdep.h"
36 #include "ice_rdma.h"
37 #include "irdma_di_if.h"
38 #include "irdma_main.h"
39 #include <sys/gsb_crc32.h>
40 #include <netinet/in_fib.h>
41 #include <netinet6/in6_fib.h>
42 #include <net/route/nhop.h>
43 #include <net/if_llatbl.h>
44 
45 /* additional QP debuging option. Keep false unless needed */
46 bool irdma_upload_context = false;
47 
48 inline u32
49 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
50 
51 	KASSERT(reg < dev_ctx->mem_bus_space_size,
52 		("irdma: register offset %#jx too large (max is %#jx)",
53 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
54 
55 	return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
56 				 dev_ctx->mem_bus_space_handle, reg));
57 }
58 
59 inline void
60 irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
61 {
62 
63 	KASSERT(reg < dev_ctx->mem_bus_space_size,
64 		("irdma: register offset %#jx too large (max is %#jx)",
65 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
66 
67 	bus_space_write_4(dev_ctx->mem_bus_space_tag,
68 			  dev_ctx->mem_bus_space_handle, reg, value);
69 }
70 
71 inline u64
72 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
73 
74 	KASSERT(reg < dev_ctx->mem_bus_space_size,
75 		("irdma: register offset %#jx too large (max is %#jx)",
76 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
77 
78 	return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
79 				 dev_ctx->mem_bus_space_handle, reg));
80 }
81 
82 inline void
83 irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
84 {
85 
86 	KASSERT(reg < dev_ctx->mem_bus_space_size,
87 		("irdma: register offset %#jx too large (max is %#jx)",
88 		 (uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
89 
90 	bus_space_write_8(dev_ctx->mem_bus_space_tag,
91 			  dev_ctx->mem_bus_space_handle, reg, value);
92 
93 }
94 
95 void
96 irdma_request_reset(struct irdma_pci_f *rf)
97 {
98 	struct ice_rdma_peer *peer = rf->peer_info;
99 	struct ice_rdma_request req = {0};
100 
101 	req.type = ICE_RDMA_EVENT_RESET;
102 
103 	printf("%s:%d requesting pf-reset\n", __func__, __LINE__);
104 	IRDMA_DI_REQ_HANDLER(peer, &req);
105 }
106 
107 int
108 irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
109 {
110 	struct irdma_device *iwdev = vsi->back_vsi;
111 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
112 	struct ice_rdma_request req = {0};
113 	struct ice_rdma_qset_update *res = &req.res;
114 
115 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
116 	res->cnt_req = 1;
117 	res->res_type = ICE_RDMA_QSET_ALLOC;
118 	res->qsets.qs_handle = tc_node->qs_handle;
119 	res->qsets.tc = tc_node->traffic_class;
120 	res->qsets.vsi_id = vsi->vsi_idx;
121 
122 	IRDMA_DI_REQ_HANDLER(peer, &req);
123 
124 	tc_node->l2_sched_node_id = res->qsets.teid;
125 	vsi->qos[tc_node->user_pri].l2_sched_node_id =
126 	    res->qsets.teid;
127 
128 	return 0;
129 }
130 
131 void
132 irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
133 {
134 	struct irdma_device *iwdev = vsi->back_vsi;
135 	struct ice_rdma_peer *peer = iwdev->rf->peer_info;
136 	struct ice_rdma_request req = {0};
137 	struct ice_rdma_qset_update *res = &req.res;
138 
139 	req.type = ICE_RDMA_EVENT_QSET_REGISTER;
140 	res->res_allocated = 1;
141 	res->res_type = ICE_RDMA_QSET_FREE;
142 	res->qsets.vsi_id = vsi->vsi_idx;
143 	res->qsets.teid = tc_node->l2_sched_node_id;
144 	res->qsets.qs_handle = tc_node->qs_handle;
145 
146 	IRDMA_DI_REQ_HANDLER(peer, &req);
147 }
148 
149 void *
150 hw_to_dev(struct irdma_hw *hw)
151 {
152 	struct irdma_pci_f *rf;
153 
154 	rf = container_of(hw, struct irdma_pci_f, hw);
155 	return rf->pcidev;
156 }
157 
158 void
159 irdma_free_hash_desc(void *desc)
160 {
161 	return;
162 }
163 
164 int
165 irdma_init_hash_desc(void **desc)
166 {
167 	return 0;
168 }
169 
170 int
171 irdma_ieq_check_mpacrc(void *desc,
172 		       void *addr, u32 len, u32 val)
173 {
174 	u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
175 	int ret_code = 0;
176 
177 	if (crc != val) {
178 		irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
179 		ret_code = -EINVAL;
180 	}
181 	printf("%s: result crc=%x value=%x\n", __func__, crc, val);
182 	return ret_code;
183 }
184 
185 static u_int
186 irdma_add_ipv6_cb(void *arg, struct ifaddr *addr, u_int count __unused)
187 {
188 	struct irdma_device *iwdev = arg;
189 	struct sockaddr_in6 *sin6;
190 	u32 local_ipaddr6[4] = {};
191 	char ip6buf[INET6_ADDRSTRLEN];
192 	u8 *mac_addr;
193 
194 	sin6 = (struct sockaddr_in6 *)addr->ifa_addr;
195 
196 	irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
197 
198 	mac_addr = if_getlladdr(addr->ifa_ifp);
199 
200 	printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
201 	       __func__, __LINE__,
202 	       ip6_sprintf(ip6buf, &sin6->sin6_addr),
203 	       mac_addr[0], mac_addr[1], mac_addr[2],
204 	       mac_addr[3], mac_addr[4], mac_addr[5]);
205 
206 	irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
207 			       IRDMA_ARP_ADD);
208 	return (0);
209 }
210 
211 /**
212  * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
213  * @iwdev: irdma device
214  * @ifp: interface network device pointer
215  */
216 static void
217 irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
218 {
219 	if_addr_rlock(ifp);
220 	if_foreach_addr_type(ifp, AF_INET6, irdma_add_ipv6_cb, iwdev);
221 	if_addr_runlock(ifp);
222 }
223 
224 static u_int
225 irdma_add_ipv4_cb(void *arg, struct ifaddr *addr, u_int count __unused)
226 {
227 	struct irdma_device *iwdev = arg;
228 	struct sockaddr_in *sin;
229 	u32 ip_addr[4] = {};
230 	uint8_t *mac_addr;
231 
232 	sin = (struct sockaddr_in *)addr->ifa_addr;
233 
234 	ip_addr[0] = ntohl(sin->sin_addr.s_addr);
235 
236 	mac_addr = if_getlladdr(addr->ifa_ifp);
237 
238 	printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
239 	       __func__, __LINE__,
240 	       ip_addr[0] >> 24,
241 	       (ip_addr[0] >> 16) & 0xFF,
242 	       (ip_addr[0] >> 8) & 0xFF,
243 	       ip_addr[0] & 0xFF,
244 	       mac_addr[0], mac_addr[1], mac_addr[2],
245 	       mac_addr[3], mac_addr[4], mac_addr[5]);
246 
247 	irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
248 			       IRDMA_ARP_ADD);
249 	return (0);
250 }
251 
252 /**
253  * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
254  * @iwdev: irdma device
255  * @ifp: interface network device pointer
256  */
257 static void
258 irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
259 {
260 	if_addr_rlock(ifp);
261 	if_foreach_addr_type(ifp, AF_INET, irdma_add_ipv4_cb, iwdev);
262 	if_addr_runlock(ifp);
263 }
264 
265 /**
266  * irdma_add_ip - add ip addresses
267  * @iwdev: irdma device
268  *
269  * Add ipv4/ipv6 addresses to the arp cache
270  */
271 void
272 irdma_add_ip(struct irdma_device *iwdev)
273 {
274 	struct ifnet *ifp = iwdev->netdev;
275 	struct ifnet *ifv;
276 	int i;
277 
278 	irdma_add_ipv4_addr(iwdev, ifp);
279 	irdma_add_ipv6_addr(iwdev, ifp);
280 	for (i = 0; if_getvlantrunk(ifp) != NULL && i < VLAN_N_VID; ++i) {
281 		ifv = VLAN_DEVAT(ifp, i);
282 		if (!ifv)
283 			continue;
284 		irdma_add_ipv4_addr(iwdev, ifv);
285 		irdma_add_ipv6_addr(iwdev, ifv);
286 	}
287 }
288 
289 static void
290 irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
291 {
292 	struct irdma_pci_f *rf = arg;
293 	struct ifnet *ifv = NULL;
294 	struct sockaddr_in *sin;
295 	struct epoch_tracker et;
296 	int arp_index = 0, i = 0;
297 	u32 ip[4] = {};
298 
299 	if (!ifa || !ifa->ifa_addr || !ifp)
300 		return;
301 	if (rf->iwdev->netdev != ifp) {
302 		for (i = 0; if_getvlantrunk(rf->iwdev->netdev) != NULL && i < VLAN_N_VID; ++i) {
303 			NET_EPOCH_ENTER(et);
304 			ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
305 			NET_EPOCH_EXIT(et);
306 			if (ifv == ifp)
307 				break;
308 		}
309 		if (ifv != ifp)
310 			return;
311 	}
312 	sin = (struct sockaddr_in *)ifa->ifa_addr;
313 
314 	switch (event) {
315 	case IFADDR_EVENT_ADD:
316 		if (sin->sin_family == AF_INET)
317 			irdma_add_ipv4_addr(rf->iwdev, ifp);
318 		else if (sin->sin_family == AF_INET6)
319 			irdma_add_ipv6_addr(rf->iwdev, ifp);
320 		break;
321 	case IFADDR_EVENT_DEL:
322 		if (sin->sin_family == AF_INET) {
323 			ip[0] = ntohl(sin->sin_addr.s_addr);
324 		} else if (sin->sin_family == AF_INET6) {
325 			irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
326 		} else {
327 			break;
328 		}
329 		for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
330 			if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
331 				irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
332 						       rf->arp_table[arp_index].ip_addr,
333 						       IRDMA_ARP_DELETE);
334 			}
335 		}
336 		break;
337 	default:
338 		break;
339 	}
340 }
341 
342 void
343 irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
344 {
345 	rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
346 						       irdma_ifaddrevent_handler,
347 						       rf,
348 						       EVENTHANDLER_PRI_ANY);
349 }
350 
351 void
352 irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
353 {
354 	EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
355 }
356 
357 static int
358 irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
359 		    struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
360 {
361 	struct nhop_object *nh;
362 
363 	if (dst_sin->sa_family == AF_INET6)
364 		nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
365 	else
366 		nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
367 	if (!nh || (nh->nh_ifp != netdev &&
368 		    rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
369 		goto rt_not_found;
370 	*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
371 	*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
372 	*ifp = nh->nh_ifp;
373 
374 	return 0;
375 
376 rt_not_found:
377 	pr_err("irdma: route not found\n");
378 	return -ENETUNREACH;
379 }
380 
381 /**
382  * irdma_get_dst_mac - get destination mac address
383  * @cm_node: connection's node
384  * @dst_sin: destination address information
385  * @dst_mac: mac address array to return
386  */
387 int
388 irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
389 {
390 	struct ifnet *netdev = cm_node->iwdev->netdev;
391 #ifdef VIMAGE
392 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
393 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
394 #endif
395 	struct ifnet *ifp;
396 	struct llentry *lle;
397 	struct sockaddr *nexthop;
398 	struct epoch_tracker et;
399 	int err;
400 	bool gateway;
401 
402 	NET_EPOCH_ENTER(et);
403 	CURVNET_SET_QUIET(vnet);
404 	err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
405 	if (err)
406 		goto get_route_fail;
407 
408 	if (dst_sin->sa_family == AF_INET) {
409 		err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
410 	} else if (dst_sin->sa_family == AF_INET6) {
411 		err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
412 				  dst_mac, NULL, &lle);
413 	} else {
414 		err = -EPROTONOSUPPORT;
415 	}
416 
417 get_route_fail:
418 	CURVNET_RESTORE();
419 	NET_EPOCH_EXIT(et);
420 	if (err) {
421 		pr_err("failed to resolve neighbor address (err=%d)\n",
422 		       err);
423 		return -ENETUNREACH;
424 	}
425 
426 	return 0;
427 }
428 
429 /**
430  * irdma_addr_resolve_neigh - resolve neighbor address
431  * @cm_node: connection's node
432  * @dst_ip: remote ip address
433  * @arpindex: if there is an arp entry
434  */
435 int
436 irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
437 			 u32 dst_ip, int arpindex)
438 {
439 	struct irdma_device *iwdev = cm_node->iwdev;
440 	struct sockaddr_in dst_sin = {};
441 	int err;
442 	u32 ip[4] = {};
443 	u8 dst_mac[MAX_ADDR_LEN];
444 
445 	dst_sin.sin_len = sizeof(dst_sin);
446 	dst_sin.sin_family = AF_INET;
447 	dst_sin.sin_port = 0;
448 	dst_sin.sin_addr.s_addr = htonl(dst_ip);
449 
450 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
451 	if (err)
452 		return arpindex;
453 
454 	ip[0] = dst_ip;
455 
456 	return irdma_add_arp(iwdev->rf, ip, dst_mac);
457 }
458 
459 /**
460  * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
461  * @cm_node: connection's node
462  * @dest: remote ip address
463  * @arpindex: if there is an arp entry
464  */
465 int
466 irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
467 			      u32 *dest, int arpindex)
468 {
469 	struct irdma_device *iwdev = cm_node->iwdev;
470 	struct sockaddr_in6 dst_addr = {};
471 	int err;
472 	u8 dst_mac[MAX_ADDR_LEN];
473 
474 	dst_addr.sin6_family = AF_INET6;
475 	dst_addr.sin6_len = sizeof(dst_addr);
476 	dst_addr.sin6_scope_id = if_getindex(iwdev->netdev);
477 
478 	irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
479 	err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
480 	if (err)
481 		return arpindex;
482 
483 	return irdma_add_arp(iwdev->rf, dest, dst_mac);
484 }
485 
486 int
487 irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
488 			    struct irdma_cm_info *cm_info)
489 {
490 #ifdef VIMAGE
491 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
492 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
493 #endif
494 	int arpindex;
495 	int oldarpindex;
496 	bool is_lpb = false;
497 
498 	CURVNET_SET_QUIET(vnet);
499 	is_lpb = cm_node->ipv4 ?
500 	    irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
501 	    irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
502 	CURVNET_RESTORE();
503 	if (is_lpb) {
504 		cm_node->do_lpb = true;
505 		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
506 					   NULL,
507 					   IRDMA_ARP_RESOLVE);
508 	} else {
509 		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
510 					      NULL,
511 					      IRDMA_ARP_RESOLVE);
512 		if (cm_node->ipv4)
513 			arpindex = irdma_addr_resolve_neigh(cm_node,
514 							    cm_info->rem_addr[0],
515 							    oldarpindex);
516 		else
517 			arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
518 								 cm_info->rem_addr,
519 								 oldarpindex);
520 	}
521 	return arpindex;
522 }
523 
524 /**
525  * irdma_add_handler - add a handler to the list
526  * @hdl: handler to be added to the handler list
527  */
528 void
529 irdma_add_handler(struct irdma_handler *hdl)
530 {
531 	unsigned long flags;
532 
533 	spin_lock_irqsave(&irdma_handler_lock, flags);
534 	list_add(&hdl->list, &irdma_handlers);
535 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
536 }
537 
538 /**
539  * irdma_del_handler - delete a handler from the list
540  * @hdl: handler to be deleted from the handler list
541  */
542 void
543 irdma_del_handler(struct irdma_handler *hdl)
544 {
545 	unsigned long flags;
546 
547 	spin_lock_irqsave(&irdma_handler_lock, flags);
548 	list_del(&hdl->list);
549 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
550 }
551 
552 /**
553  * irdma_set_rf_user_cfg_params - apply user configurable settings
554  * @rf: RDMA PCI function
555  */
556 void
557 irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
558 {
559 	int en_rem_endpoint_trk = 0;
560 	int limits_sel = 4;
561 
562 	rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
563 	rf->limits_sel = limits_sel;
564 	rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
565 	/* Enable DCQCN algorithm by default */
566 	rf->dcqcn_ena = true;
567 }
568 
569 /**
570  * irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
571  * @arg1: pointer to rf
572  * @arg2: unused
573  * @oidp: sysctl oid structure
574  * @req: sysctl request pointer
575  */
576 static int
577 irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
578 {
579 	struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
580 	int ret;
581 	u8 dcqcn_ena = rf->dcqcn_ena;
582 
583 	ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
584 	if ((ret) || (req->newptr == NULL))
585 		return ret;
586 	if (dcqcn_ena == 0)
587 		rf->dcqcn_ena = false;
588 	else
589 		rf->dcqcn_ena = true;
590 
591 	return 0;
592 }
593 
594 /**
595  * irdma_dcqcn_tunables_init - create tunables for dcqcn settings
596  * @rf: RDMA PCI function
597  *
598  * Create DCQCN related sysctls for the driver.
599  * dcqcn_ena is writeable settings and applicable to next QP creation or
600  * context setting.
601  * all other settings are of RDTUN type (read on driver load) and are
602  * applicable only to CQP creation.
603  */
604 void
605 irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
606 {
607 	struct sysctl_oid_list *irdma_sysctl_oid_list;
608 
609 	irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
610 
611 	SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
612 			OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
613 			irdma_sysctl_dcqcn_update, "A",
614 			"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
615 
616 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
617 		      OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
618 		      &rf->dcqcn_params.cc_cfg_valid, 0,
619 		      "set DCQCN parameters to be valid, default=false");
620 
621 	rf->dcqcn_params.min_dec_factor = 1;
622 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
623 		      OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
624 		      &rf->dcqcn_params.min_dec_factor, 0,
625 		    "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
626 
627 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
628 		      OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
629 		      &rf->dcqcn_params.min_rate, 0,
630 		      "set minimum rate limit value, in MBits per second, default=0");
631 
632 	rf->dcqcn_params.dcqcn_f = 5;
633 	SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
634 		      OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
635 		      "set number of times to stay in each stage of bandwidth recovery, default=5");
636 
637 	rf->dcqcn_params.dcqcn_t = 0x37;
638 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
639 		       OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
640 		       "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0x37");
641 
642 	rf->dcqcn_params.dcqcn_b = 0x249f0;
643 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
644 		       OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
645 		       "set number of MSS to add to the congestion window in additive increase mode, default=0x249f0");
646 
647 	rf->dcqcn_params.rai_factor = 1;
648 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
649 		       OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
650 		       &rf->dcqcn_params.rai_factor, 0,
651 		       "set number of MSS to add to the congestion window in additive increase mode, default=1");
652 
653 	rf->dcqcn_params.hai_factor = 5;
654 	SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
655 		       OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
656 		       &rf->dcqcn_params.hai_factor, 0,
657 		       "set number of MSS to add to the congestion window in hyperactive increase mode, default=5");
658 
659 	rf->dcqcn_params.rreduce_mperiod = 50;
660 	SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
661 		       OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
662 		       &rf->dcqcn_params.rreduce_mperiod, 0,
663 		       "set minimum time between 2 consecutive rate reductions for a single flow, default=50");
664 }
665 
666 /**
667  * irdma_dmamap_cb - callback for bus_dmamap_load
668  */
669 static void
670 irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
671 {
672 	if (error)
673 		return;
674 	*(bus_addr_t *) arg = segs->ds_addr;
675 	return;
676 }
677 
678 /**
679  * irdma_allocate_dma_mem - allocate dma memory
680  * @hw: pointer to hw structure
681  * @mem: structure holding memory information
682  * @size: requested size
683  * @alignment: requested alignment
684  */
685 void *
686 irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
687 		       u64 size, u32 alignment)
688 {
689 	struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
690 	device_t dev = dev_ctx->dev;
691 	void *va;
692 	int ret;
693 
694 	ret = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
695 				 alignment, 0,	/* alignment, bounds */
696 				 BUS_SPACE_MAXADDR,	/* lowaddr */
697 				 BUS_SPACE_MAXADDR,	/* highaddr */
698 				 NULL, NULL,	/* filter, filterarg */
699 				 size,	/* maxsize */
700 				 1,	/* nsegments */
701 				 size,	/* maxsegsize */
702 				 BUS_DMA_ALLOCNOW,	/* flags */
703 				 NULL,	/* lockfunc */
704 				 NULL,	/* lockfuncarg */
705 				 &mem->tag);
706 	if (ret != 0) {
707 		device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
708 			      __func__, ret);
709 		goto fail_0;
710 	}
711 	ret = bus_dmamem_alloc(mem->tag, (void **)&va,
712 			       BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
713 	if (ret != 0) {
714 		device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
715 			      __func__, ret);
716 		goto fail_1;
717 	}
718 	ret = bus_dmamap_load(mem->tag, mem->map, va, size,
719 			      irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
720 	if (ret != 0) {
721 		device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
722 			      __func__, ret);
723 		goto fail_2;
724 	}
725 	mem->nseg = 1;
726 	mem->size = size;
727 	bus_dmamap_sync(mem->tag, mem->map,
728 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
729 
730 	return va;
731 fail_2:
732 	bus_dmamem_free(mem->tag, va, mem->map);
733 fail_1:
734 	bus_dma_tag_destroy(mem->tag);
735 fail_0:
736 	mem->map = NULL;
737 	mem->tag = NULL;
738 
739 	return NULL;
740 }
741 
742 /**
743  * irdma_free_dma_mem - Memory free helper fn
744  * @hw: pointer to hw structure
745  * @mem: ptr to mem struct to free
746  */
747 int
748 irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
749 {
750 	if (!mem)
751 		return -EINVAL;
752 	bus_dmamap_sync(mem->tag, mem->map,
753 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
754 	bus_dmamap_unload(mem->tag, mem->map);
755 	if (!mem->va)
756 		return -ENOMEM;
757 	bus_dmamem_free(mem->tag, mem->va, mem->map);
758 	bus_dma_tag_destroy(mem->tag);
759 
760 	mem->va = NULL;
761 
762 	return 0;
763 }
764 
765 inline void
766 irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
767 {
768 	kfree(chunk->bitmapmem.va);
769 }
770 
771 void
772 irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
773 {
774 	struct irdma_sc_qp *qp = NULL;
775 	struct irdma_qp *iwqp;
776 	struct irdma_pci_f *rf;
777 	u8 i;
778 
779 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
780 		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
781 		while (qp) {
782 			if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) {
783 				qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
784 				continue;
785 			}
786 			iwqp = qp->qp_uk.back_qp;
787 			rf = iwqp->iwdev->rf;
788 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
789 			irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
790 
791 			kfree(iwqp->kqp.sq_wrid_mem);
792 			kfree(iwqp->kqp.rq_wrid_mem);
793 			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
794 			kfree(iwqp);
795 		}
796 	}
797 }
798