xref: /freebsd/sys/dev/mlx5/mlx5_en/mlx5_en_main.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "en.h"
29 
30 #include <sys/sockio.h>
31 #include <machine/atomic.h>
32 
33 #ifndef ETH_DRIVER_VERSION
34 #define	ETH_DRIVER_VERSION	"3.5.0"
35 #endif
36 #define DRIVER_RELDATE	"November 2018"
37 
38 static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver "
39 	ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
40 
41 static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
42 
43 struct mlx5e_channel_param {
44 	struct mlx5e_rq_param rq;
45 	struct mlx5e_sq_param sq;
46 	struct mlx5e_cq_param rx_cq;
47 	struct mlx5e_cq_param tx_cq;
48 };
49 
50 static const struct {
51 	u32	subtype;
52 	u64	baudrate;
53 }	mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
54 
55 	[MLX5E_1000BASE_CX_SGMII] = {
56 		.subtype = IFM_1000_CX_SGMII,
57 		.baudrate = IF_Mbps(1000ULL),
58 	},
59 	[MLX5E_1000BASE_KX] = {
60 		.subtype = IFM_1000_KX,
61 		.baudrate = IF_Mbps(1000ULL),
62 	},
63 	[MLX5E_10GBASE_CX4] = {
64 		.subtype = IFM_10G_CX4,
65 		.baudrate = IF_Gbps(10ULL),
66 	},
67 	[MLX5E_10GBASE_KX4] = {
68 		.subtype = IFM_10G_KX4,
69 		.baudrate = IF_Gbps(10ULL),
70 	},
71 	[MLX5E_10GBASE_KR] = {
72 		.subtype = IFM_10G_KR,
73 		.baudrate = IF_Gbps(10ULL),
74 	},
75 	[MLX5E_20GBASE_KR2] = {
76 		.subtype = IFM_20G_KR2,
77 		.baudrate = IF_Gbps(20ULL),
78 	},
79 	[MLX5E_40GBASE_CR4] = {
80 		.subtype = IFM_40G_CR4,
81 		.baudrate = IF_Gbps(40ULL),
82 	},
83 	[MLX5E_40GBASE_KR4] = {
84 		.subtype = IFM_40G_KR4,
85 		.baudrate = IF_Gbps(40ULL),
86 	},
87 	[MLX5E_56GBASE_R4] = {
88 		.subtype = IFM_56G_R4,
89 		.baudrate = IF_Gbps(56ULL),
90 	},
91 	[MLX5E_10GBASE_CR] = {
92 		.subtype = IFM_10G_CR1,
93 		.baudrate = IF_Gbps(10ULL),
94 	},
95 	[MLX5E_10GBASE_SR] = {
96 		.subtype = IFM_10G_SR,
97 		.baudrate = IF_Gbps(10ULL),
98 	},
99 	[MLX5E_10GBASE_ER] = {
100 		.subtype = IFM_10G_ER,
101 		.baudrate = IF_Gbps(10ULL),
102 	},
103 	[MLX5E_40GBASE_SR4] = {
104 		.subtype = IFM_40G_SR4,
105 		.baudrate = IF_Gbps(40ULL),
106 	},
107 	[MLX5E_40GBASE_LR4] = {
108 		.subtype = IFM_40G_LR4,
109 		.baudrate = IF_Gbps(40ULL),
110 	},
111 	[MLX5E_100GBASE_CR4] = {
112 		.subtype = IFM_100G_CR4,
113 		.baudrate = IF_Gbps(100ULL),
114 	},
115 	[MLX5E_100GBASE_SR4] = {
116 		.subtype = IFM_100G_SR4,
117 		.baudrate = IF_Gbps(100ULL),
118 	},
119 	[MLX5E_100GBASE_KR4] = {
120 		.subtype = IFM_100G_KR4,
121 		.baudrate = IF_Gbps(100ULL),
122 	},
123 	[MLX5E_100GBASE_LR4] = {
124 		.subtype = IFM_100G_LR4,
125 		.baudrate = IF_Gbps(100ULL),
126 	},
127 	[MLX5E_100BASE_TX] = {
128 		.subtype = IFM_100_TX,
129 		.baudrate = IF_Mbps(100ULL),
130 	},
131 	[MLX5E_1000BASE_T] = {
132 		.subtype = IFM_1000_T,
133 		.baudrate = IF_Mbps(1000ULL),
134 	},
135 	[MLX5E_10GBASE_T] = {
136 		.subtype = IFM_10G_T,
137 		.baudrate = IF_Gbps(10ULL),
138 	},
139 	[MLX5E_25GBASE_CR] = {
140 		.subtype = IFM_25G_CR,
141 		.baudrate = IF_Gbps(25ULL),
142 	},
143 	[MLX5E_25GBASE_KR] = {
144 		.subtype = IFM_25G_KR,
145 		.baudrate = IF_Gbps(25ULL),
146 	},
147 	[MLX5E_25GBASE_SR] = {
148 		.subtype = IFM_25G_SR,
149 		.baudrate = IF_Gbps(25ULL),
150 	},
151 	[MLX5E_50GBASE_CR2] = {
152 		.subtype = IFM_50G_CR2,
153 		.baudrate = IF_Gbps(50ULL),
154 	},
155 	[MLX5E_50GBASE_KR2] = {
156 		.subtype = IFM_50G_KR2,
157 		.baudrate = IF_Gbps(50ULL),
158 	},
159 };
160 
161 MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
162 
163 static void
164 mlx5e_update_carrier(struct mlx5e_priv *priv)
165 {
166 	struct mlx5_core_dev *mdev = priv->mdev;
167 	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
168 	u32 eth_proto_oper;
169 	int error;
170 	u8 port_state;
171 	u8 is_er_type;
172 	u8 i;
173 
174 	port_state = mlx5_query_vport_state(mdev,
175 	    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
176 
177 	if (port_state == VPORT_STATE_UP) {
178 		priv->media_status_last |= IFM_ACTIVE;
179 	} else {
180 		priv->media_status_last &= ~IFM_ACTIVE;
181 		priv->media_active_last = IFM_ETHER;
182 		if_link_state_change(priv->ifp, LINK_STATE_DOWN);
183 		return;
184 	}
185 
186 	error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
187 	if (error) {
188 		priv->media_active_last = IFM_ETHER;
189 		priv->ifp->if_baudrate = 1;
190 		if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
191 		    __func__, error);
192 		return;
193 	}
194 	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
195 
196 	for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
197 		if (mlx5e_mode_table[i].baudrate == 0)
198 			continue;
199 		if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
200 			u32 subtype = mlx5e_mode_table[i].subtype;
201 
202 			priv->ifp->if_baudrate =
203 			    mlx5e_mode_table[i].baudrate;
204 
205 			switch (subtype) {
206 			case IFM_10G_ER:
207 				error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
208 				if (error != 0) {
209 					if_printf(priv->ifp, "%s: query port pddr failed: %d\n",
210 					    __func__, error);
211 				}
212 				if (error != 0 || is_er_type == 0)
213 					subtype = IFM_10G_LR;
214 				break;
215 			case IFM_40G_LR4:
216 				error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type);
217 				if (error != 0) {
218 					if_printf(priv->ifp, "%s: query port pddr failed: %d\n",
219 					    __func__, error);
220 				}
221 				if (error == 0 && is_er_type != 0)
222 					subtype = IFM_40G_ER4;
223 				break;
224 			}
225 			priv->media_active_last = subtype | IFM_ETHER | IFM_FDX;
226 			break;
227 		}
228 	}
229 	if_link_state_change(priv->ifp, LINK_STATE_UP);
230 }
231 
232 static void
233 mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
234 {
235 	struct mlx5e_priv *priv = dev->if_softc;
236 
237 	ifmr->ifm_status = priv->media_status_last;
238 	ifmr->ifm_active = priv->media_active_last |
239 	    (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
240 	    (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
241 
242 }
243 
244 static u32
245 mlx5e_find_link_mode(u32 subtype)
246 {
247 	u32 i;
248 	u32 link_mode = 0;
249 
250 	switch (subtype) {
251 	case IFM_10G_LR:
252 		subtype = IFM_10G_ER;
253 		break;
254 	case IFM_40G_ER4:
255 		subtype = IFM_40G_LR4;
256 		break;
257 	}
258 
259 	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
260 		if (mlx5e_mode_table[i].baudrate == 0)
261 			continue;
262 		if (mlx5e_mode_table[i].subtype == subtype)
263 			link_mode |= MLX5E_PROT_MASK(i);
264 	}
265 
266 	return (link_mode);
267 }
268 
269 static int
270 mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
271 {
272 	return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
273 	    priv->params.rx_pauseframe_control,
274 	    priv->params.tx_pauseframe_control,
275 	    priv->params.rx_priority_flow_control,
276 	    priv->params.tx_priority_flow_control));
277 }
278 
279 static int
280 mlx5e_set_port_pfc(struct mlx5e_priv *priv)
281 {
282 	int error;
283 
284 	if (priv->params.rx_pauseframe_control ||
285 	    priv->params.tx_pauseframe_control) {
286 		if_printf(priv->ifp,
287 		    "Global pauseframes must be disabled before enabling PFC.\n");
288 		error = -EINVAL;
289 	} else {
290 		error = mlx5e_set_port_pause_and_pfc(priv);
291 	}
292 	return (error);
293 }
294 
295 static int
296 mlx5e_media_change(struct ifnet *dev)
297 {
298 	struct mlx5e_priv *priv = dev->if_softc;
299 	struct mlx5_core_dev *mdev = priv->mdev;
300 	u32 eth_proto_cap;
301 	u32 link_mode;
302 	int was_opened;
303 	int locked;
304 	int error;
305 
306 	locked = PRIV_LOCKED(priv);
307 	if (!locked)
308 		PRIV_LOCK(priv);
309 
310 	if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
311 		error = EINVAL;
312 		goto done;
313 	}
314 	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
315 
316 	/* query supported capabilities */
317 	error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
318 	if (error != 0) {
319 		if_printf(dev, "Query port media capability failed\n");
320 		goto done;
321 	}
322 	/* check for autoselect */
323 	if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
324 		link_mode = eth_proto_cap;
325 		if (link_mode == 0) {
326 			if_printf(dev, "Port media capability is zero\n");
327 			error = EINVAL;
328 			goto done;
329 		}
330 	} else {
331 		link_mode = link_mode & eth_proto_cap;
332 		if (link_mode == 0) {
333 			if_printf(dev, "Not supported link mode requested\n");
334 			error = EINVAL;
335 			goto done;
336 		}
337 	}
338 	if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
339 		/* check if PFC is enabled */
340 		if (priv->params.rx_priority_flow_control ||
341 		    priv->params.tx_priority_flow_control) {
342 			if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n");
343 			error = EINVAL;
344 			goto done;
345 		}
346 	}
347 	/* update pauseframe control bits */
348 	priv->params.rx_pauseframe_control =
349 	    (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
350 	priv->params.tx_pauseframe_control =
351 	    (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
352 
353 	/* check if device is opened */
354 	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
355 
356 	/* reconfigure the hardware */
357 	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
358 	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
359 	error = -mlx5e_set_port_pause_and_pfc(priv);
360 	if (was_opened)
361 		mlx5_set_port_status(mdev, MLX5_PORT_UP);
362 
363 done:
364 	if (!locked)
365 		PRIV_UNLOCK(priv);
366 	return (error);
367 }
368 
369 static void
370 mlx5e_update_carrier_work(struct work_struct *work)
371 {
372 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
373 	    update_carrier_work);
374 
375 	PRIV_LOCK(priv);
376 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
377 		mlx5e_update_carrier(priv);
378 	PRIV_UNLOCK(priv);
379 }
380 
381 /*
382  * This function reads the physical port counters from the firmware
383  * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
384  * macros. The output is converted from big-endian 64-bit values into
385  * host endian ones and stored in the "priv->stats.pport" structure.
386  */
387 static void
388 mlx5e_update_pport_counters(struct mlx5e_priv *priv)
389 {
390 	struct mlx5_core_dev *mdev = priv->mdev;
391 	struct mlx5e_pport_stats *s = &priv->stats.pport;
392 	struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
393 	u32 *in;
394 	u32 *out;
395 	const u64 *ptr;
396 	unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
397 	unsigned x;
398 	unsigned y;
399 	unsigned z;
400 
401 	/* allocate firmware request structures */
402 	in = mlx5_vzalloc(sz);
403 	out = mlx5_vzalloc(sz);
404 	if (in == NULL || out == NULL)
405 		goto free_out;
406 
407 	/*
408 	 * Get pointer to the 64-bit counter set which is located at a
409 	 * fixed offset in the output firmware request structure:
410 	 */
411 	ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
412 
413 	MLX5_SET(ppcnt_reg, in, local_port, 1);
414 
415 	/* read IEEE802_3 counter group using predefined counter layout */
416 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
417 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
418 	for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
419 	     x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
420 		s->arg[y] = be64toh(ptr[x]);
421 
422 	/* read RFC2819 counter group using predefined counter layout */
423 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
424 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
425 	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
426 		s->arg[y] = be64toh(ptr[x]);
427 	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
428 	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
429 		s_debug->arg[y] = be64toh(ptr[x]);
430 
431 	/* read RFC2863 counter group using predefined counter layout */
432 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
433 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
434 	for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
435 		s_debug->arg[y] = be64toh(ptr[x]);
436 
437 	/* read physical layer stats counter group using predefined counter layout */
438 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
439 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
440 	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
441 		s_debug->arg[y] = be64toh(ptr[x]);
442 
443 	/* read per-priority counters */
444 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
445 
446 	/* iterate all the priorities */
447 	for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
448 		MLX5_SET(ppcnt_reg, in, prio_tc, z);
449 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
450 
451 		/* read per priority stats counter group using predefined counter layout */
452 		for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
453 		    MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
454 			s->arg[y] = be64toh(ptr[x]);
455 	}
456 free_out:
457 	/* free firmware request structures */
458 	kvfree(in);
459 	kvfree(out);
460 }
461 
462 /*
463  * This function is called regularly to collect all statistics
464  * counters from the firmware. The values can be viewed through the
465  * sysctl interface. Execution is serialized using the priv's global
466  * configuration lock.
467  */
468 static void
469 mlx5e_update_stats_work(struct work_struct *work)
470 {
471 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
472 	    update_stats_work);
473 	struct mlx5_core_dev *mdev = priv->mdev;
474 	struct mlx5e_vport_stats *s = &priv->stats.vport;
475 	struct mlx5e_sq_stats *sq_stats;
476 	struct buf_ring *sq_br;
477 #if (__FreeBSD_version < 1100000)
478 	struct ifnet *ifp = priv->ifp;
479 #endif
480 
481 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
482 	u32 *out;
483 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
484 	u64 tso_packets = 0;
485 	u64 tso_bytes = 0;
486 	u64 tx_queue_dropped = 0;
487 	u64 tx_defragged = 0;
488 	u64 tx_offload_none = 0;
489 	u64 lro_packets = 0;
490 	u64 lro_bytes = 0;
491 	u64 sw_lro_queued = 0;
492 	u64 sw_lro_flushed = 0;
493 	u64 rx_csum_none = 0;
494 	u64 rx_wqe_err = 0;
495 	u32 rx_out_of_buffer = 0;
496 	int i;
497 	int j;
498 
499 	PRIV_LOCK(priv);
500 	out = mlx5_vzalloc(outlen);
501 	if (out == NULL)
502 		goto free_out;
503 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
504 		goto free_out;
505 
506 	/* Collect firts the SW counters and then HW for consistency */
507 	for (i = 0; i < priv->params.num_channels; i++) {
508 		struct mlx5e_channel *pch = priv->channel + i;
509 		struct mlx5e_rq *rq = &pch->rq;
510 		struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
511 
512 		/* collect stats from LRO */
513 		rq_stats->sw_lro_queued = rq->lro.lro_queued;
514 		rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
515 		sw_lro_queued += rq_stats->sw_lro_queued;
516 		sw_lro_flushed += rq_stats->sw_lro_flushed;
517 		lro_packets += rq_stats->lro_packets;
518 		lro_bytes += rq_stats->lro_bytes;
519 		rx_csum_none += rq_stats->csum_none;
520 		rx_wqe_err += rq_stats->wqe_err;
521 
522 		for (j = 0; j < priv->num_tc; j++) {
523 			sq_stats = &pch->sq[j].stats;
524 			sq_br = pch->sq[j].br;
525 
526 			tso_packets += sq_stats->tso_packets;
527 			tso_bytes += sq_stats->tso_bytes;
528 			tx_queue_dropped += sq_stats->dropped;
529 			if (sq_br != NULL)
530 				tx_queue_dropped += sq_br->br_drops;
531 			tx_defragged += sq_stats->defragged;
532 			tx_offload_none += sq_stats->csum_offload_none;
533 		}
534 	}
535 
536 	s->tx_jumbo_packets =
537 	    priv->stats.port_stats_debug.p1519to2047octets +
538 	    priv->stats.port_stats_debug.p2048to4095octets +
539 	    priv->stats.port_stats_debug.p4096to8191octets +
540 	    priv->stats.port_stats_debug.p8192to10239octets;
541 
542 	/* update counters */
543 	s->tso_packets = tso_packets;
544 	s->tso_bytes = tso_bytes;
545 	s->tx_queue_dropped = tx_queue_dropped;
546 	s->tx_defragged = tx_defragged;
547 	s->lro_packets = lro_packets;
548 	s->lro_bytes = lro_bytes;
549 	s->sw_lro_queued = sw_lro_queued;
550 	s->sw_lro_flushed = sw_lro_flushed;
551 	s->rx_csum_none = rx_csum_none;
552 	s->rx_wqe_err = rx_wqe_err;
553 
554 	/* HW counters */
555 	memset(in, 0, sizeof(in));
556 
557 	MLX5_SET(query_vport_counter_in, in, opcode,
558 	    MLX5_CMD_OP_QUERY_VPORT_COUNTER);
559 	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
560 	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
561 
562 	memset(out, 0, outlen);
563 
564 	/* get number of out-of-buffer drops first */
565 	if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
566 	    &rx_out_of_buffer))
567 		goto free_out;
568 
569 	/* accumulate difference into a 64-bit counter */
570 	s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
571 	s->rx_out_of_buffer_prev = rx_out_of_buffer;
572 
573 	/* get port statistics */
574 	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
575 		goto free_out;
576 
577 #define	MLX5_GET_CTR(out, x) \
578 	MLX5_GET64(query_vport_counter_out, out, x)
579 
580 	s->rx_error_packets =
581 	    MLX5_GET_CTR(out, received_errors.packets);
582 	s->rx_error_bytes =
583 	    MLX5_GET_CTR(out, received_errors.octets);
584 	s->tx_error_packets =
585 	    MLX5_GET_CTR(out, transmit_errors.packets);
586 	s->tx_error_bytes =
587 	    MLX5_GET_CTR(out, transmit_errors.octets);
588 
589 	s->rx_unicast_packets =
590 	    MLX5_GET_CTR(out, received_eth_unicast.packets);
591 	s->rx_unicast_bytes =
592 	    MLX5_GET_CTR(out, received_eth_unicast.octets);
593 	s->tx_unicast_packets =
594 	    MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
595 	s->tx_unicast_bytes =
596 	    MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
597 
598 	s->rx_multicast_packets =
599 	    MLX5_GET_CTR(out, received_eth_multicast.packets);
600 	s->rx_multicast_bytes =
601 	    MLX5_GET_CTR(out, received_eth_multicast.octets);
602 	s->tx_multicast_packets =
603 	    MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
604 	s->tx_multicast_bytes =
605 	    MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
606 
607 	s->rx_broadcast_packets =
608 	    MLX5_GET_CTR(out, received_eth_broadcast.packets);
609 	s->rx_broadcast_bytes =
610 	    MLX5_GET_CTR(out, received_eth_broadcast.octets);
611 	s->tx_broadcast_packets =
612 	    MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
613 	s->tx_broadcast_bytes =
614 	    MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
615 
616 	s->rx_packets =
617 	    s->rx_unicast_packets +
618 	    s->rx_multicast_packets +
619 	    s->rx_broadcast_packets -
620 	    s->rx_out_of_buffer;
621 	s->rx_bytes =
622 	    s->rx_unicast_bytes +
623 	    s->rx_multicast_bytes +
624 	    s->rx_broadcast_bytes;
625 	s->tx_packets =
626 	    s->tx_unicast_packets +
627 	    s->tx_multicast_packets +
628 	    s->tx_broadcast_packets;
629 	s->tx_bytes =
630 	    s->tx_unicast_bytes +
631 	    s->tx_multicast_bytes +
632 	    s->tx_broadcast_bytes;
633 
634 	/* Update calculated offload counters */
635 	s->tx_csum_offload = s->tx_packets - tx_offload_none;
636 	s->rx_csum_good = s->rx_packets - s->rx_csum_none;
637 
638 	/* Get physical port counters */
639 	mlx5e_update_pport_counters(priv);
640 
641 #if (__FreeBSD_version < 1100000)
642 	/* no get_counters interface in fbsd 10 */
643 	ifp->if_ipackets = s->rx_packets;
644 	ifp->if_ierrors = s->rx_error_packets +
645 	    priv->stats.pport.alignment_err +
646 	    priv->stats.pport.check_seq_err +
647 	    priv->stats.pport.crc_align_errors +
648 	    priv->stats.pport.in_range_len_errors +
649 	    priv->stats.pport.jabbers +
650 	    priv->stats.pport.out_of_range_len +
651 	    priv->stats.pport.oversize_pkts +
652 	    priv->stats.pport.symbol_err +
653 	    priv->stats.pport.too_long_errors +
654 	    priv->stats.pport.undersize_pkts +
655 	    priv->stats.pport.unsupported_op_rx;
656 	ifp->if_iqdrops = s->rx_out_of_buffer +
657 	    priv->stats.pport.drop_events;
658 	ifp->if_opackets = s->tx_packets;
659 	ifp->if_oerrors = s->tx_error_packets;
660 	ifp->if_snd.ifq_drops = s->tx_queue_dropped;
661 	ifp->if_ibytes = s->rx_bytes;
662 	ifp->if_obytes = s->tx_bytes;
663 	ifp->if_collisions =
664 	    priv->stats.pport.collisions;
665 #endif
666 
667 free_out:
668 	kvfree(out);
669 
670 	/* Update diagnostics, if any */
671 	if (priv->params_ethtool.diag_pci_enable ||
672 	    priv->params_ethtool.diag_general_enable) {
673 		int error = mlx5_core_get_diagnostics_full(mdev,
674 		    priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
675 		    priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
676 		if (error != 0)
677 			if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
678 	}
679 	PRIV_UNLOCK(priv);
680 }
681 
682 static void
683 mlx5e_update_stats(void *arg)
684 {
685 	struct mlx5e_priv *priv = arg;
686 
687 	queue_work(priv->wq, &priv->update_stats_work);
688 
689 	callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
690 }
691 
692 static void
693 mlx5e_async_event_sub(struct mlx5e_priv *priv,
694     enum mlx5_dev_event event)
695 {
696 	switch (event) {
697 	case MLX5_DEV_EVENT_PORT_UP:
698 	case MLX5_DEV_EVENT_PORT_DOWN:
699 		queue_work(priv->wq, &priv->update_carrier_work);
700 		break;
701 
702 	default:
703 		break;
704 	}
705 }
706 
707 static void
708 mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
709     enum mlx5_dev_event event, unsigned long param)
710 {
711 	struct mlx5e_priv *priv = vpriv;
712 
713 	mtx_lock(&priv->async_events_mtx);
714 	if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
715 		mlx5e_async_event_sub(priv, event);
716 	mtx_unlock(&priv->async_events_mtx);
717 }
718 
719 static void
720 mlx5e_enable_async_events(struct mlx5e_priv *priv)
721 {
722 	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
723 }
724 
725 static void
726 mlx5e_disable_async_events(struct mlx5e_priv *priv)
727 {
728 	mtx_lock(&priv->async_events_mtx);
729 	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
730 	mtx_unlock(&priv->async_events_mtx);
731 }
732 
733 static void mlx5e_calibration_callout(void *arg);
734 static int mlx5e_calibration_duration = 20;
735 static int mlx5e_fast_calibration = 1;
736 static int mlx5e_normal_calibration = 30;
737 
738 static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW, 0,
739     "MLX5 timestamp calibration parameteres");
740 
741 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN,
742     &mlx5e_calibration_duration, 0,
743     "Duration of initial calibration");
744 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN,
745     &mlx5e_fast_calibration, 0,
746     "Recalibration interval during initial calibration");
747 SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN,
748     &mlx5e_normal_calibration, 0,
749     "Recalibration interval during normal operations");
750 
751 /*
752  * Ignites the calibration process.
753  */
754 static void
755 mlx5e_reset_calibration_callout(struct mlx5e_priv *priv)
756 {
757 
758 	if (priv->clbr_done == 0)
759 		mlx5e_calibration_callout(priv);
760 	else
761 		callout_reset_curcpu(&priv->tstmp_clbr, (priv->clbr_done <
762 		    mlx5e_calibration_duration ? mlx5e_fast_calibration :
763 		    mlx5e_normal_calibration) * hz, mlx5e_calibration_callout,
764 		    priv);
765 }
766 
767 static uint64_t
768 mlx5e_timespec2usec(const struct timespec *ts)
769 {
770 
771 	return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec);
772 }
773 
774 static uint64_t
775 mlx5e_hw_clock(struct mlx5e_priv *priv)
776 {
777 	struct mlx5_init_seg *iseg;
778 	uint32_t hw_h, hw_h1, hw_l;
779 
780 	iseg = priv->mdev->iseg;
781 	do {
782 		hw_h = ioread32be(&iseg->internal_timer_h);
783 		hw_l = ioread32be(&iseg->internal_timer_l);
784 		hw_h1 = ioread32be(&iseg->internal_timer_h);
785 	} while (hw_h1 != hw_h);
786 	return (((uint64_t)hw_h << 32) | hw_l);
787 }
788 
789 /*
790  * The calibration callout, it runs either in the context of the
791  * thread which enables calibration, or in callout.  It takes the
792  * snapshot of system and adapter clocks, then advances the pointers to
793  * the calibration point to allow rx path to read the consistent data
794  * lockless.
795  */
796 static void
797 mlx5e_calibration_callout(void *arg)
798 {
799 	struct mlx5e_priv *priv;
800 	struct mlx5e_clbr_point *next, *curr;
801 	struct timespec ts;
802 	int clbr_curr_next;
803 
804 	priv = arg;
805 	curr = &priv->clbr_points[priv->clbr_curr];
806 	clbr_curr_next = priv->clbr_curr + 1;
807 	if (clbr_curr_next >= nitems(priv->clbr_points))
808 		clbr_curr_next = 0;
809 	next = &priv->clbr_points[clbr_curr_next];
810 
811 	next->base_prev = curr->base_curr;
812 	next->clbr_hw_prev = curr->clbr_hw_curr;
813 
814 	next->clbr_hw_curr = mlx5e_hw_clock(priv);
815 	if (((next->clbr_hw_curr - curr->clbr_hw_prev) >> MLX5E_TSTMP_PREC) ==
816 	    0) {
817 		if_printf(priv->ifp, "HW failed tstmp frozen %#jx %#jx,"
818 		    "disabling\n", next->clbr_hw_curr, curr->clbr_hw_prev);
819 		priv->clbr_done = 0;
820 		return;
821 	}
822 
823 	nanouptime(&ts);
824 	next->base_curr = mlx5e_timespec2usec(&ts);
825 
826 	curr->clbr_gen = 0;
827 	atomic_thread_fence_rel();
828 	priv->clbr_curr = clbr_curr_next;
829 	atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen));
830 
831 	if (priv->clbr_done < mlx5e_calibration_duration)
832 		priv->clbr_done++;
833 	mlx5e_reset_calibration_callout(priv);
834 }
835 
836 static const char *mlx5e_rq_stats_desc[] = {
837 	MLX5E_RQ_STATS(MLX5E_STATS_DESC)
838 };
839 
840 static int
841 mlx5e_create_rq(struct mlx5e_channel *c,
842     struct mlx5e_rq_param *param,
843     struct mlx5e_rq *rq)
844 {
845 	struct mlx5e_priv *priv = c->priv;
846 	struct mlx5_core_dev *mdev = priv->mdev;
847 	char buffer[16];
848 	void *rqc = param->rqc;
849 	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
850 	int wq_sz;
851 	int err;
852 	int i;
853 	u32 nsegs, wqe_sz;
854 
855 	err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
856 	if (err != 0)
857 		goto done;
858 
859 	/* Create DMA descriptor TAG */
860 	if ((err = -bus_dma_tag_create(
861 	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
862 	    1,				/* any alignment */
863 	    0,				/* no boundary */
864 	    BUS_SPACE_MAXADDR,		/* lowaddr */
865 	    BUS_SPACE_MAXADDR,		/* highaddr */
866 	    NULL, NULL,			/* filter, filterarg */
867 	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsize */
868 	    nsegs,			/* nsegments */
869 	    nsegs * MLX5E_MAX_RX_BYTES,	/* maxsegsize */
870 	    0,				/* flags */
871 	    NULL, NULL,			/* lockfunc, lockfuncarg */
872 	    &rq->dma_tag)))
873 		goto done;
874 
875 	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
876 	    &rq->wq_ctrl);
877 	if (err)
878 		goto err_free_dma_tag;
879 
880 	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
881 
882 	err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
883 	if (err != 0)
884 		goto err_rq_wq_destroy;
885 
886 	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
887 
888 	err = -tcp_lro_init_args(&rq->lro, c->tag.m_snd_tag.ifp, TCP_LRO_ENTRIES, wq_sz);
889 	if (err)
890 		goto err_rq_wq_destroy;
891 
892 	rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
893 	for (i = 0; i != wq_sz; i++) {
894 		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
895 #if (MLX5E_MAX_RX_SEGS == 1)
896 		uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
897 #else
898 		int j;
899 #endif
900 
901 		err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
902 		if (err != 0) {
903 			while (i--)
904 				bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
905 			goto err_rq_mbuf_free;
906 		}
907 
908 		/* set value for constant fields */
909 #if (MLX5E_MAX_RX_SEGS == 1)
910 		wqe->data[0].lkey = c->mkey_be;
911 		wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
912 #else
913 		for (j = 0; j < rq->nsegs; j++)
914 			wqe->data[j].lkey = c->mkey_be;
915 #endif
916 	}
917 
918 	rq->ifp = c->tag.m_snd_tag.ifp;
919 	rq->channel = c;
920 	rq->ix = c->ix;
921 
922 	snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
923 	mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
924 	    buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
925 	    rq->stats.arg);
926 	return (0);
927 
928 err_rq_mbuf_free:
929 	free(rq->mbuf, M_MLX5EN);
930 	tcp_lro_free(&rq->lro);
931 err_rq_wq_destroy:
932 	mlx5_wq_destroy(&rq->wq_ctrl);
933 err_free_dma_tag:
934 	bus_dma_tag_destroy(rq->dma_tag);
935 done:
936 	return (err);
937 }
938 
939 static void
940 mlx5e_destroy_rq(struct mlx5e_rq *rq)
941 {
942 	int wq_sz;
943 	int i;
944 
945 	/* destroy all sysctl nodes */
946 	sysctl_ctx_free(&rq->stats.ctx);
947 
948 	/* free leftover LRO packets, if any */
949 	tcp_lro_free(&rq->lro);
950 
951 	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
952 	for (i = 0; i != wq_sz; i++) {
953 		if (rq->mbuf[i].mbuf != NULL) {
954 			bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
955 			m_freem(rq->mbuf[i].mbuf);
956 		}
957 		bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
958 	}
959 	free(rq->mbuf, M_MLX5EN);
960 	mlx5_wq_destroy(&rq->wq_ctrl);
961 }
962 
963 static int
964 mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
965 {
966 	struct mlx5e_channel *c = rq->channel;
967 	struct mlx5e_priv *priv = c->priv;
968 	struct mlx5_core_dev *mdev = priv->mdev;
969 
970 	void *in;
971 	void *rqc;
972 	void *wq;
973 	int inlen;
974 	int err;
975 
976 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
977 	    sizeof(u64) * rq->wq_ctrl.buf.npages;
978 	in = mlx5_vzalloc(inlen);
979 	if (in == NULL)
980 		return (-ENOMEM);
981 
982 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
983 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
984 
985 	memcpy(rqc, param->rqc, sizeof(param->rqc));
986 
987 	MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
988 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
989 	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
990 	if (priv->counter_set_id >= 0)
991 		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
992 	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
993 	    PAGE_SHIFT);
994 	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
995 
996 	mlx5_fill_page_array(&rq->wq_ctrl.buf,
997 	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
998 
999 	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
1000 
1001 	kvfree(in);
1002 
1003 	return (err);
1004 }
1005 
1006 static int
1007 mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
1008 {
1009 	struct mlx5e_channel *c = rq->channel;
1010 	struct mlx5e_priv *priv = c->priv;
1011 	struct mlx5_core_dev *mdev = priv->mdev;
1012 
1013 	void *in;
1014 	void *rqc;
1015 	int inlen;
1016 	int err;
1017 
1018 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1019 	in = mlx5_vzalloc(inlen);
1020 	if (in == NULL)
1021 		return (-ENOMEM);
1022 
1023 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1024 
1025 	MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
1026 	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
1027 	MLX5_SET(rqc, rqc, state, next_state);
1028 
1029 	err = mlx5_core_modify_rq(mdev, in, inlen);
1030 
1031 	kvfree(in);
1032 
1033 	return (err);
1034 }
1035 
1036 static void
1037 mlx5e_disable_rq(struct mlx5e_rq *rq)
1038 {
1039 	struct mlx5e_channel *c = rq->channel;
1040 	struct mlx5e_priv *priv = c->priv;
1041 	struct mlx5_core_dev *mdev = priv->mdev;
1042 
1043 	mlx5_core_destroy_rq(mdev, rq->rqn);
1044 }
1045 
1046 static int
1047 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
1048 {
1049 	struct mlx5e_channel *c = rq->channel;
1050 	struct mlx5e_priv *priv = c->priv;
1051 	struct mlx5_wq_ll *wq = &rq->wq;
1052 	int i;
1053 
1054 	for (i = 0; i < 1000; i++) {
1055 		if (wq->cur_sz >= priv->params.min_rx_wqes)
1056 			return (0);
1057 
1058 		msleep(4);
1059 	}
1060 	return (-ETIMEDOUT);
1061 }
1062 
1063 static int
1064 mlx5e_open_rq(struct mlx5e_channel *c,
1065     struct mlx5e_rq_param *param,
1066     struct mlx5e_rq *rq)
1067 {
1068 	int err;
1069 
1070 	err = mlx5e_create_rq(c, param, rq);
1071 	if (err)
1072 		return (err);
1073 
1074 	err = mlx5e_enable_rq(rq, param);
1075 	if (err)
1076 		goto err_destroy_rq;
1077 
1078 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1079 	if (err)
1080 		goto err_disable_rq;
1081 
1082 	c->rq.enabled = 1;
1083 
1084 	return (0);
1085 
1086 err_disable_rq:
1087 	mlx5e_disable_rq(rq);
1088 err_destroy_rq:
1089 	mlx5e_destroy_rq(rq);
1090 
1091 	return (err);
1092 }
1093 
1094 static void
1095 mlx5e_close_rq(struct mlx5e_rq *rq)
1096 {
1097 	mtx_lock(&rq->mtx);
1098 	rq->enabled = 0;
1099 	callout_stop(&rq->watchdog);
1100 	mtx_unlock(&rq->mtx);
1101 
1102 	callout_drain(&rq->watchdog);
1103 
1104 	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
1105 }
1106 
1107 static void
1108 mlx5e_close_rq_wait(struct mlx5e_rq *rq)
1109 {
1110 	struct mlx5_core_dev *mdev = rq->channel->priv->mdev;
1111 
1112 	/* wait till RQ is empty */
1113 	while (!mlx5_wq_ll_is_empty(&rq->wq) &&
1114 	       (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
1115 		msleep(4);
1116 		rq->cq.mcq.comp(&rq->cq.mcq);
1117 	}
1118 
1119 	mlx5e_disable_rq(rq);
1120 	mlx5e_destroy_rq(rq);
1121 }
1122 
1123 void
1124 mlx5e_free_sq_db(struct mlx5e_sq *sq)
1125 {
1126 	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1127 	int x;
1128 
1129 	for (x = 0; x != wq_sz; x++)
1130 		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1131 	free(sq->mbuf, M_MLX5EN);
1132 }
1133 
1134 int
1135 mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
1136 {
1137 	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1138 	int err;
1139 	int x;
1140 
1141 	sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
1142 
1143 	/* Create DMA descriptor MAPs */
1144 	for (x = 0; x != wq_sz; x++) {
1145 		err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
1146 		if (err != 0) {
1147 			while (x--)
1148 				bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1149 			free(sq->mbuf, M_MLX5EN);
1150 			return (err);
1151 		}
1152 	}
1153 	return (0);
1154 }
1155 
1156 static const char *mlx5e_sq_stats_desc[] = {
1157 	MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1158 };
1159 
1160 void
1161 mlx5e_update_sq_inline(struct mlx5e_sq *sq)
1162 {
1163 	sq->max_inline = sq->priv->params.tx_max_inline;
1164 	sq->min_inline_mode = sq->priv->params.tx_min_inline_mode;
1165 
1166 	/*
1167 	 * Check if trust state is DSCP or if inline mode is NONE which
1168 	 * indicates CX-5 or newer hardware.
1169 	 */
1170 	if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP ||
1171 	    sq->min_inline_mode == MLX5_INLINE_MODE_NONE) {
1172 		if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert))
1173 			sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN;
1174 		else
1175 			sq->min_insert_caps = MLX5E_INSERT_NON_VLAN;
1176 	} else {
1177 		sq->min_insert_caps = 0;
1178 	}
1179 }
1180 
1181 static void
1182 mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1183 {
1184 	int i;
1185 
1186 	for (i = 0; i != c->num_tc; i++) {
1187 		mtx_lock(&c->sq[i].lock);
1188 		mlx5e_update_sq_inline(&c->sq[i]);
1189 		mtx_unlock(&c->sq[i].lock);
1190 	}
1191 }
1192 
1193 void
1194 mlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
1195 {
1196 	int i;
1197 
1198 	/* check if channels are closed */
1199 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
1200 		return;
1201 
1202 	for (i = 0; i < priv->params.num_channels; i++)
1203 		mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
1204 }
1205 
1206 static int
1207 mlx5e_create_sq(struct mlx5e_channel *c,
1208     int tc,
1209     struct mlx5e_sq_param *param,
1210     struct mlx5e_sq *sq)
1211 {
1212 	struct mlx5e_priv *priv = c->priv;
1213 	struct mlx5_core_dev *mdev = priv->mdev;
1214 	char buffer[16];
1215 	void *sqc = param->sqc;
1216 	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1217 	int err;
1218 
1219 	/* Create DMA descriptor TAG */
1220 	if ((err = -bus_dma_tag_create(
1221 	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
1222 	    1,				/* any alignment */
1223 	    0,				/* no boundary */
1224 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1225 	    BUS_SPACE_MAXADDR,		/* highaddr */
1226 	    NULL, NULL,			/* filter, filterarg */
1227 	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
1228 	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
1229 	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
1230 	    0,				/* flags */
1231 	    NULL, NULL,			/* lockfunc, lockfuncarg */
1232 	    &sq->dma_tag)))
1233 		goto done;
1234 
1235 	err = mlx5_alloc_map_uar(mdev, &sq->uar);
1236 	if (err)
1237 		goto err_free_dma_tag;
1238 
1239 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1240 	    &sq->wq_ctrl);
1241 	if (err)
1242 		goto err_unmap_free_uar;
1243 
1244 	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1245 	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1246 
1247 	err = mlx5e_alloc_sq_db(sq);
1248 	if (err)
1249 		goto err_sq_wq_destroy;
1250 
1251 	sq->mkey_be = c->mkey_be;
1252 	sq->ifp = priv->ifp;
1253 	sq->priv = priv;
1254 	sq->tc = tc;
1255 
1256 	mlx5e_update_sq_inline(sq);
1257 
1258 	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1259 	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1260 	    buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1261 	    sq->stats.arg);
1262 
1263 	return (0);
1264 
1265 err_sq_wq_destroy:
1266 	mlx5_wq_destroy(&sq->wq_ctrl);
1267 
1268 err_unmap_free_uar:
1269 	mlx5_unmap_free_uar(mdev, &sq->uar);
1270 
1271 err_free_dma_tag:
1272 	bus_dma_tag_destroy(sq->dma_tag);
1273 done:
1274 	return (err);
1275 }
1276 
1277 static void
1278 mlx5e_destroy_sq(struct mlx5e_sq *sq)
1279 {
1280 	/* destroy all sysctl nodes */
1281 	sysctl_ctx_free(&sq->stats.ctx);
1282 
1283 	mlx5e_free_sq_db(sq);
1284 	mlx5_wq_destroy(&sq->wq_ctrl);
1285 	mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1286 }
1287 
1288 int
1289 mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1290     int tis_num)
1291 {
1292 	void *in;
1293 	void *sqc;
1294 	void *wq;
1295 	int inlen;
1296 	int err;
1297 
1298 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1299 	    sizeof(u64) * sq->wq_ctrl.buf.npages;
1300 	in = mlx5_vzalloc(inlen);
1301 	if (in == NULL)
1302 		return (-ENOMEM);
1303 
1304 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1305 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1306 
1307 	memcpy(sqc, param->sqc, sizeof(param->sqc));
1308 
1309 	MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1310 	MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1311 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1312 	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1313 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1314 
1315 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1316 	MLX5_SET(wq, wq, uar_page, sq->uar.index);
1317 	MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1318 	    PAGE_SHIFT);
1319 	MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1320 
1321 	mlx5_fill_page_array(&sq->wq_ctrl.buf,
1322 	    (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1323 
1324 	err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1325 
1326 	kvfree(in);
1327 
1328 	return (err);
1329 }
1330 
1331 int
1332 mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1333 {
1334 	void *in;
1335 	void *sqc;
1336 	int inlen;
1337 	int err;
1338 
1339 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1340 	in = mlx5_vzalloc(inlen);
1341 	if (in == NULL)
1342 		return (-ENOMEM);
1343 
1344 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1345 
1346 	MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1347 	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1348 	MLX5_SET(sqc, sqc, state, next_state);
1349 
1350 	err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1351 
1352 	kvfree(in);
1353 
1354 	return (err);
1355 }
1356 
1357 void
1358 mlx5e_disable_sq(struct mlx5e_sq *sq)
1359 {
1360 
1361 	mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1362 }
1363 
1364 static int
1365 mlx5e_open_sq(struct mlx5e_channel *c,
1366     int tc,
1367     struct mlx5e_sq_param *param,
1368     struct mlx5e_sq *sq)
1369 {
1370 	int err;
1371 
1372 	err = mlx5e_create_sq(c, tc, param, sq);
1373 	if (err)
1374 		return (err);
1375 
1376 	err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1377 	if (err)
1378 		goto err_destroy_sq;
1379 
1380 	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1381 	if (err)
1382 		goto err_disable_sq;
1383 
1384 	WRITE_ONCE(sq->running, 1);
1385 
1386 	return (0);
1387 
1388 err_disable_sq:
1389 	mlx5e_disable_sq(sq);
1390 err_destroy_sq:
1391 	mlx5e_destroy_sq(sq);
1392 
1393 	return (err);
1394 }
1395 
1396 static void
1397 mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1398 {
1399 	/* fill up remainder with NOPs */
1400 	while (sq->cev_counter != 0) {
1401 		while (!mlx5e_sq_has_room_for(sq, 1)) {
1402 			if (can_sleep != 0) {
1403 				mtx_unlock(&sq->lock);
1404 				msleep(4);
1405 				mtx_lock(&sq->lock);
1406 			} else {
1407 				goto done;
1408 			}
1409 		}
1410 		/* send a single NOP */
1411 		mlx5e_send_nop(sq, 1);
1412 		atomic_thread_fence_rel();
1413 	}
1414 done:
1415 	/* Check if we need to write the doorbell */
1416 	if (likely(sq->doorbell.d64 != 0)) {
1417 		mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1418 		sq->doorbell.d64 = 0;
1419 	}
1420 }
1421 
1422 void
1423 mlx5e_sq_cev_timeout(void *arg)
1424 {
1425 	struct mlx5e_sq *sq = arg;
1426 
1427 	mtx_assert(&sq->lock, MA_OWNED);
1428 
1429 	/* check next state */
1430 	switch (sq->cev_next_state) {
1431 	case MLX5E_CEV_STATE_SEND_NOPS:
1432 		/* fill TX ring with NOPs, if any */
1433 		mlx5e_sq_send_nops_locked(sq, 0);
1434 
1435 		/* check if completed */
1436 		if (sq->cev_counter == 0) {
1437 			sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1438 			return;
1439 		}
1440 		break;
1441 	default:
1442 		/* send NOPs on next timeout */
1443 		sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1444 		break;
1445 	}
1446 
1447 	/* restart timer */
1448 	callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1449 }
1450 
1451 void
1452 mlx5e_drain_sq(struct mlx5e_sq *sq)
1453 {
1454 	int error;
1455 	struct mlx5_core_dev *mdev= sq->priv->mdev;
1456 
1457 	/*
1458 	 * Check if already stopped.
1459 	 *
1460 	 * NOTE: Serialization of this function is managed by the
1461 	 * caller ensuring the priv's state lock is locked or in case
1462 	 * of rate limit support, a single thread manages drain and
1463 	 * resume of SQs. The "running" variable can therefore safely
1464 	 * be read without any locks.
1465 	 */
1466 	if (READ_ONCE(sq->running) == 0)
1467 		return;
1468 
1469 	/* don't put more packets into the SQ */
1470 	WRITE_ONCE(sq->running, 0);
1471 
1472 	/* serialize access to DMA rings */
1473 	mtx_lock(&sq->lock);
1474 
1475 	/* teardown event factor timer, if any */
1476 	sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1477 	callout_stop(&sq->cev_callout);
1478 
1479 	/* send dummy NOPs in order to flush the transmit ring */
1480 	mlx5e_sq_send_nops_locked(sq, 1);
1481 	mtx_unlock(&sq->lock);
1482 
1483 	/* make sure it is safe to free the callout */
1484 	callout_drain(&sq->cev_callout);
1485 
1486 	/* wait till SQ is empty or link is down */
1487 	mtx_lock(&sq->lock);
1488 	while (sq->cc != sq->pc &&
1489 	    (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1490 	    mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1491 		mtx_unlock(&sq->lock);
1492 		msleep(1);
1493 		sq->cq.mcq.comp(&sq->cq.mcq);
1494 		mtx_lock(&sq->lock);
1495 	}
1496 	mtx_unlock(&sq->lock);
1497 
1498 	/* error out remaining requests */
1499 	error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1500 	if (error != 0) {
1501 		if_printf(sq->ifp,
1502 		    "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1503 	}
1504 
1505 	/* wait till SQ is empty */
1506 	mtx_lock(&sq->lock);
1507 	while (sq->cc != sq->pc &&
1508 	       mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1509 		mtx_unlock(&sq->lock);
1510 		msleep(1);
1511 		sq->cq.mcq.comp(&sq->cq.mcq);
1512 		mtx_lock(&sq->lock);
1513 	}
1514 	mtx_unlock(&sq->lock);
1515 }
1516 
1517 static void
1518 mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1519 {
1520 
1521 	mlx5e_drain_sq(sq);
1522 	mlx5e_disable_sq(sq);
1523 	mlx5e_destroy_sq(sq);
1524 }
1525 
1526 static int
1527 mlx5e_create_cq(struct mlx5e_priv *priv,
1528     struct mlx5e_cq_param *param,
1529     struct mlx5e_cq *cq,
1530     mlx5e_cq_comp_t *comp,
1531     int eq_ix)
1532 {
1533 	struct mlx5_core_dev *mdev = priv->mdev;
1534 	struct mlx5_core_cq *mcq = &cq->mcq;
1535 	int eqn_not_used;
1536 	int irqn;
1537 	int err;
1538 	u32 i;
1539 
1540 	param->wq.buf_numa_node = 0;
1541 	param->wq.db_numa_node = 0;
1542 
1543 	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1544 	    &cq->wq_ctrl);
1545 	if (err)
1546 		return (err);
1547 
1548 	mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1549 
1550 	mcq->cqe_sz = 64;
1551 	mcq->set_ci_db = cq->wq_ctrl.db.db;
1552 	mcq->arm_db = cq->wq_ctrl.db.db + 1;
1553 	*mcq->set_ci_db = 0;
1554 	*mcq->arm_db = 0;
1555 	mcq->vector = eq_ix;
1556 	mcq->comp = comp;
1557 	mcq->event = mlx5e_cq_error_event;
1558 	mcq->irqn = irqn;
1559 	mcq->uar = &priv->cq_uar;
1560 
1561 	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1562 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1563 
1564 		cqe->op_own = 0xf1;
1565 	}
1566 
1567 	cq->priv = priv;
1568 
1569 	return (0);
1570 }
1571 
1572 static void
1573 mlx5e_destroy_cq(struct mlx5e_cq *cq)
1574 {
1575 	mlx5_wq_destroy(&cq->wq_ctrl);
1576 }
1577 
1578 static int
1579 mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1580 {
1581 	struct mlx5_core_cq *mcq = &cq->mcq;
1582 	void *in;
1583 	void *cqc;
1584 	int inlen;
1585 	int irqn_not_used;
1586 	int eqn;
1587 	int err;
1588 
1589 	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1590 	    sizeof(u64) * cq->wq_ctrl.buf.npages;
1591 	in = mlx5_vzalloc(inlen);
1592 	if (in == NULL)
1593 		return (-ENOMEM);
1594 
1595 	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1596 
1597 	memcpy(cqc, param->cqc, sizeof(param->cqc));
1598 
1599 	mlx5_fill_page_array(&cq->wq_ctrl.buf,
1600 	    (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1601 
1602 	mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1603 
1604 	MLX5_SET(cqc, cqc, c_eqn, eqn);
1605 	MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1606 	MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1607 	    PAGE_SHIFT);
1608 	MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1609 
1610 	err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1611 
1612 	kvfree(in);
1613 
1614 	if (err)
1615 		return (err);
1616 
1617 	mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1618 
1619 	return (0);
1620 }
1621 
1622 static void
1623 mlx5e_disable_cq(struct mlx5e_cq *cq)
1624 {
1625 
1626 	mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1627 }
1628 
1629 int
1630 mlx5e_open_cq(struct mlx5e_priv *priv,
1631     struct mlx5e_cq_param *param,
1632     struct mlx5e_cq *cq,
1633     mlx5e_cq_comp_t *comp,
1634     int eq_ix)
1635 {
1636 	int err;
1637 
1638 	err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1639 	if (err)
1640 		return (err);
1641 
1642 	err = mlx5e_enable_cq(cq, param, eq_ix);
1643 	if (err)
1644 		goto err_destroy_cq;
1645 
1646 	return (0);
1647 
1648 err_destroy_cq:
1649 	mlx5e_destroy_cq(cq);
1650 
1651 	return (err);
1652 }
1653 
1654 void
1655 mlx5e_close_cq(struct mlx5e_cq *cq)
1656 {
1657 	mlx5e_disable_cq(cq);
1658 	mlx5e_destroy_cq(cq);
1659 }
1660 
1661 static int
1662 mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1663     struct mlx5e_channel_param *cparam)
1664 {
1665 	int err;
1666 	int tc;
1667 
1668 	for (tc = 0; tc < c->num_tc; tc++) {
1669 		/* open completion queue */
1670 		err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1671 		    &mlx5e_tx_cq_comp, c->ix);
1672 		if (err)
1673 			goto err_close_tx_cqs;
1674 	}
1675 	return (0);
1676 
1677 err_close_tx_cqs:
1678 	for (tc--; tc >= 0; tc--)
1679 		mlx5e_close_cq(&c->sq[tc].cq);
1680 
1681 	return (err);
1682 }
1683 
1684 static void
1685 mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1686 {
1687 	int tc;
1688 
1689 	for (tc = 0; tc < c->num_tc; tc++)
1690 		mlx5e_close_cq(&c->sq[tc].cq);
1691 }
1692 
1693 static int
1694 mlx5e_open_sqs(struct mlx5e_channel *c,
1695     struct mlx5e_channel_param *cparam)
1696 {
1697 	int err;
1698 	int tc;
1699 
1700 	for (tc = 0; tc < c->num_tc; tc++) {
1701 		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1702 		if (err)
1703 			goto err_close_sqs;
1704 	}
1705 
1706 	return (0);
1707 
1708 err_close_sqs:
1709 	for (tc--; tc >= 0; tc--)
1710 		mlx5e_close_sq_wait(&c->sq[tc]);
1711 
1712 	return (err);
1713 }
1714 
1715 static void
1716 mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1717 {
1718 	int tc;
1719 
1720 	for (tc = 0; tc < c->num_tc; tc++)
1721 		mlx5e_close_sq_wait(&c->sq[tc]);
1722 }
1723 
1724 static void
1725 mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1726 {
1727 	int tc;
1728 
1729 	mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1730 
1731 	callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
1732 
1733 	for (tc = 0; tc < c->num_tc; tc++) {
1734 		struct mlx5e_sq *sq = c->sq + tc;
1735 
1736 		mtx_init(&sq->lock, "mlx5tx",
1737 		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1738 		mtx_init(&sq->comp_lock, "mlx5comp",
1739 		    MTX_NETWORK_LOCK " TX", MTX_DEF);
1740 
1741 		callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
1742 
1743 		sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1744 
1745 		/* ensure the TX completion event factor is not zero */
1746 		if (sq->cev_factor == 0)
1747 			sq->cev_factor = 1;
1748 	}
1749 }
1750 
1751 static void
1752 mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1753 {
1754 	int tc;
1755 
1756 	mtx_destroy(&c->rq.mtx);
1757 
1758 	for (tc = 0; tc < c->num_tc; tc++) {
1759 		mtx_destroy(&c->sq[tc].lock);
1760 		mtx_destroy(&c->sq[tc].comp_lock);
1761 	}
1762 }
1763 
1764 static int
1765 mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1766     struct mlx5e_channel_param *cparam,
1767     struct mlx5e_channel *c)
1768 {
1769 	int err;
1770 
1771 	memset(c, 0, sizeof(*c));
1772 
1773 	c->priv = priv;
1774 	c->ix = ix;
1775 	/* setup send tag */
1776 	c->tag.m_snd_tag.ifp = priv->ifp;
1777 	c->tag.type = IF_SND_TAG_TYPE_UNLIMITED;
1778 	c->mkey_be = cpu_to_be32(priv->mr.key);
1779 	c->num_tc = priv->num_tc;
1780 
1781 	/* init mutexes */
1782 	mlx5e_chan_mtx_init(c);
1783 
1784 	/* open transmit completion queue */
1785 	err = mlx5e_open_tx_cqs(c, cparam);
1786 	if (err)
1787 		goto err_free;
1788 
1789 	/* open receive completion queue */
1790 	err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
1791 	    &mlx5e_rx_cq_comp, c->ix);
1792 	if (err)
1793 		goto err_close_tx_cqs;
1794 
1795 	err = mlx5e_open_sqs(c, cparam);
1796 	if (err)
1797 		goto err_close_rx_cq;
1798 
1799 	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1800 	if (err)
1801 		goto err_close_sqs;
1802 
1803 	/* poll receive queue initially */
1804 	c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1805 
1806 	return (0);
1807 
1808 err_close_sqs:
1809 	mlx5e_close_sqs_wait(c);
1810 
1811 err_close_rx_cq:
1812 	mlx5e_close_cq(&c->rq.cq);
1813 
1814 err_close_tx_cqs:
1815 	mlx5e_close_tx_cqs(c);
1816 
1817 err_free:
1818 	/* destroy mutexes */
1819 	mlx5e_chan_mtx_destroy(c);
1820 	return (err);
1821 }
1822 
1823 static void
1824 mlx5e_close_channel(struct mlx5e_channel *c)
1825 {
1826 	mlx5e_close_rq(&c->rq);
1827 }
1828 
1829 static void
1830 mlx5e_close_channel_wait(struct mlx5e_channel *c)
1831 {
1832 	mlx5e_close_rq_wait(&c->rq);
1833 	mlx5e_close_sqs_wait(c);
1834 	mlx5e_close_cq(&c->rq.cq);
1835 	mlx5e_close_tx_cqs(c);
1836 	/* destroy mutexes */
1837 	mlx5e_chan_mtx_destroy(c);
1838 }
1839 
1840 static int
1841 mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
1842 {
1843 	u32 r, n;
1844 
1845 	r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
1846 	    MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
1847 	if (r > MJUM16BYTES)
1848 		return (-ENOMEM);
1849 
1850 	if (r > MJUM9BYTES)
1851 		r = MJUM16BYTES;
1852 	else if (r > MJUMPAGESIZE)
1853 		r = MJUM9BYTES;
1854 	else if (r > MCLBYTES)
1855 		r = MJUMPAGESIZE;
1856 	else
1857 		r = MCLBYTES;
1858 
1859 	/*
1860 	 * n + 1 must be a power of two, because stride size must be.
1861 	 * Stride size is 16 * (n + 1), as the first segment is
1862 	 * control.
1863 	 */
1864 	for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
1865 		;
1866 
1867 	*wqe_sz = r;
1868 	*nsegs = n;
1869 	return (0);
1870 }
1871 
1872 static void
1873 mlx5e_build_rq_param(struct mlx5e_priv *priv,
1874     struct mlx5e_rq_param *param)
1875 {
1876 	void *rqc = param->rqc;
1877 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1878 	u32 wqe_sz, nsegs;
1879 
1880 	mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
1881 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1882 	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1883 	MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
1884 	    nsegs * sizeof(struct mlx5_wqe_data_seg)));
1885 	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1886 	MLX5_SET(wq, wq, pd, priv->pdn);
1887 
1888 	param->wq.buf_numa_node = 0;
1889 	param->wq.db_numa_node = 0;
1890 	param->wq.linear = 1;
1891 }
1892 
1893 static void
1894 mlx5e_build_sq_param(struct mlx5e_priv *priv,
1895     struct mlx5e_sq_param *param)
1896 {
1897 	void *sqc = param->sqc;
1898 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1899 
1900 	MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1901 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1902 	MLX5_SET(wq, wq, pd, priv->pdn);
1903 
1904 	param->wq.buf_numa_node = 0;
1905 	param->wq.db_numa_node = 0;
1906 	param->wq.linear = 1;
1907 }
1908 
1909 static void
1910 mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1911     struct mlx5e_cq_param *param)
1912 {
1913 	void *cqc = param->cqc;
1914 
1915 	MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1916 }
1917 
1918 static void
1919 mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1920     struct mlx5e_cq_param *param)
1921 {
1922 	void *cqc = param->cqc;
1923 
1924 
1925 	/*
1926 	 * TODO The sysctl to control on/off is a bool value for now, which means
1927 	 * we only support CSUM, once HASH is implemnted we'll need to address that.
1928 	 */
1929 	if (priv->params.cqe_zipping_en) {
1930 		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1931 		MLX5_SET(cqc, cqc, cqe_compression_en, 1);
1932 	}
1933 
1934 	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1935 	MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1936 	MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1937 
1938 	switch (priv->params.rx_cq_moderation_mode) {
1939 	case 0:
1940 		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1941 		break;
1942 	default:
1943 		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1944 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1945 		else
1946 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1947 		break;
1948 	}
1949 
1950 	mlx5e_build_common_cq_param(priv, param);
1951 }
1952 
1953 static void
1954 mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1955     struct mlx5e_cq_param *param)
1956 {
1957 	void *cqc = param->cqc;
1958 
1959 	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1960 	MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1961 	MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1962 
1963 	switch (priv->params.tx_cq_moderation_mode) {
1964 	case 0:
1965 		MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1966 		break;
1967 	default:
1968 		if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1969 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1970 		else
1971 			MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1972 		break;
1973 	}
1974 
1975 	mlx5e_build_common_cq_param(priv, param);
1976 }
1977 
1978 static void
1979 mlx5e_build_channel_param(struct mlx5e_priv *priv,
1980     struct mlx5e_channel_param *cparam)
1981 {
1982 	memset(cparam, 0, sizeof(*cparam));
1983 
1984 	mlx5e_build_rq_param(priv, &cparam->rq);
1985 	mlx5e_build_sq_param(priv, &cparam->sq);
1986 	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1987 	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1988 }
1989 
1990 static int
1991 mlx5e_open_channels(struct mlx5e_priv *priv)
1992 {
1993 	struct mlx5e_channel_param cparam;
1994 	int err;
1995 	int i;
1996 	int j;
1997 
1998 	mlx5e_build_channel_param(priv, &cparam);
1999 	for (i = 0; i < priv->params.num_channels; i++) {
2000 		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
2001 		if (err)
2002 			goto err_close_channels;
2003 	}
2004 
2005 	for (j = 0; j < priv->params.num_channels; j++) {
2006 		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
2007 		if (err)
2008 			goto err_close_channels;
2009 	}
2010 	return (0);
2011 
2012 err_close_channels:
2013 	while (i--) {
2014 		mlx5e_close_channel(&priv->channel[i]);
2015 		mlx5e_close_channel_wait(&priv->channel[i]);
2016 	}
2017 	return (err);
2018 }
2019 
2020 static void
2021 mlx5e_close_channels(struct mlx5e_priv *priv)
2022 {
2023 	int i;
2024 
2025 	for (i = 0; i < priv->params.num_channels; i++)
2026 		mlx5e_close_channel(&priv->channel[i]);
2027 	for (i = 0; i < priv->params.num_channels; i++)
2028 		mlx5e_close_channel_wait(&priv->channel[i]);
2029 }
2030 
2031 static int
2032 mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
2033 {
2034 
2035 	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2036 		uint8_t cq_mode;
2037 
2038 		switch (priv->params.tx_cq_moderation_mode) {
2039 		case 0:
2040 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2041 			break;
2042 		default:
2043 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2044 			break;
2045 		}
2046 
2047 		return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
2048 		    priv->params.tx_cq_moderation_usec,
2049 		    priv->params.tx_cq_moderation_pkts,
2050 		    cq_mode));
2051 	}
2052 
2053 	return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
2054 	    priv->params.tx_cq_moderation_usec,
2055 	    priv->params.tx_cq_moderation_pkts));
2056 }
2057 
2058 static int
2059 mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
2060 {
2061 
2062 	if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
2063 		uint8_t cq_mode;
2064 		int retval;
2065 
2066 		switch (priv->params.rx_cq_moderation_mode) {
2067 		case 0:
2068 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2069 			break;
2070 		default:
2071 			cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
2072 			break;
2073 		}
2074 
2075 		retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
2076 		    priv->params.rx_cq_moderation_usec,
2077 		    priv->params.rx_cq_moderation_pkts,
2078 		    cq_mode);
2079 
2080 		return (retval);
2081 	}
2082 
2083 	return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
2084 	    priv->params.rx_cq_moderation_usec,
2085 	    priv->params.rx_cq_moderation_pkts));
2086 }
2087 
2088 static int
2089 mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
2090 {
2091 	int err;
2092 	int i;
2093 
2094 	err = mlx5e_refresh_rq_params(priv, &c->rq);
2095 	if (err)
2096 		goto done;
2097 
2098 	for (i = 0; i != c->num_tc; i++) {
2099 		err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
2100 		if (err)
2101 			goto done;
2102 	}
2103 done:
2104 	return (err);
2105 }
2106 
2107 int
2108 mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
2109 {
2110 	int i;
2111 
2112 	/* check if channels are closed */
2113 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2114 		return (EINVAL);
2115 
2116 	for (i = 0; i < priv->params.num_channels; i++) {
2117 		int err;
2118 
2119 		err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
2120 		if (err)
2121 			return (err);
2122 	}
2123 	return (0);
2124 }
2125 
2126 static int
2127 mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
2128 {
2129 	struct mlx5_core_dev *mdev = priv->mdev;
2130 	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2131 	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2132 
2133 	memset(in, 0, sizeof(in));
2134 
2135 	MLX5_SET(tisc, tisc, prio, tc);
2136 	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
2137 
2138 	return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
2139 }
2140 
2141 static void
2142 mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
2143 {
2144 	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2145 }
2146 
2147 static int
2148 mlx5e_open_tises(struct mlx5e_priv *priv)
2149 {
2150 	int num_tc = priv->num_tc;
2151 	int err;
2152 	int tc;
2153 
2154 	for (tc = 0; tc < num_tc; tc++) {
2155 		err = mlx5e_open_tis(priv, tc);
2156 		if (err)
2157 			goto err_close_tises;
2158 	}
2159 
2160 	return (0);
2161 
2162 err_close_tises:
2163 	for (tc--; tc >= 0; tc--)
2164 		mlx5e_close_tis(priv, tc);
2165 
2166 	return (err);
2167 }
2168 
2169 static void
2170 mlx5e_close_tises(struct mlx5e_priv *priv)
2171 {
2172 	int num_tc = priv->num_tc;
2173 	int tc;
2174 
2175 	for (tc = 0; tc < num_tc; tc++)
2176 		mlx5e_close_tis(priv, tc);
2177 }
2178 
2179 static int
2180 mlx5e_open_rqt(struct mlx5e_priv *priv)
2181 {
2182 	struct mlx5_core_dev *mdev = priv->mdev;
2183 	u32 *in;
2184 	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2185 	void *rqtc;
2186 	int inlen;
2187 	int err;
2188 	int sz;
2189 	int i;
2190 
2191 	sz = 1 << priv->params.rx_hash_log_tbl_sz;
2192 
2193 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2194 	in = mlx5_vzalloc(inlen);
2195 	if (in == NULL)
2196 		return (-ENOMEM);
2197 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2198 
2199 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2200 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2201 
2202 	for (i = 0; i < sz; i++) {
2203 		int ix = i;
2204 #ifdef RSS
2205 		ix = rss_get_indirection_to_bucket(ix);
2206 #endif
2207 		/* ensure we don't overflow */
2208 		ix %= priv->params.num_channels;
2209 
2210 		/* apply receive side scaling stride, if any */
2211 		ix -= ix % (int)priv->params.channels_rsss;
2212 
2213 		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
2214 	}
2215 
2216 	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2217 
2218 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2219 	if (!err)
2220 		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2221 
2222 	kvfree(in);
2223 
2224 	return (err);
2225 }
2226 
2227 static void
2228 mlx5e_close_rqt(struct mlx5e_priv *priv)
2229 {
2230 	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2231 	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2232 
2233 	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2234 	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2235 
2236 	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2237 }
2238 
2239 static void
2240 mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2241 {
2242 	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2243 	__be32 *hkey;
2244 
2245 	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2246 
2247 #define	ROUGH_MAX_L2_L3_HDR_SZ 256
2248 
2249 #define	MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2250 			  MLX5_HASH_FIELD_SEL_DST_IP)
2251 
2252 #define	MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2253 			  MLX5_HASH_FIELD_SEL_DST_IP   |\
2254 			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2255 			  MLX5_HASH_FIELD_SEL_L4_DPORT)
2256 
2257 #define	MLX5_HASH_IP_IPSEC_SPI	(MLX5_HASH_FIELD_SEL_SRC_IP   |\
2258 				 MLX5_HASH_FIELD_SEL_DST_IP   |\
2259 				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2260 
2261 	if (priv->params.hw_lro_en) {
2262 		MLX5_SET(tirc, tirc, lro_enable_mask,
2263 		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2264 		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2265 		MLX5_SET(tirc, tirc, lro_max_msg_sz,
2266 		    (priv->params.lro_wqe_sz -
2267 		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2268 		/* TODO: add the option to choose timer value dynamically */
2269 		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2270 		    MLX5_CAP_ETH(priv->mdev,
2271 		    lro_timer_supported_periods[2]));
2272 	}
2273 
2274 	/* setup parameters for hashing TIR type, if any */
2275 	switch (tt) {
2276 	case MLX5E_TT_ANY:
2277 		MLX5_SET(tirc, tirc, disp_type,
2278 		    MLX5_TIRC_DISP_TYPE_DIRECT);
2279 		MLX5_SET(tirc, tirc, inline_rqn,
2280 		    priv->channel[0].rq.rqn);
2281 		break;
2282 	default:
2283 		MLX5_SET(tirc, tirc, disp_type,
2284 		    MLX5_TIRC_DISP_TYPE_INDIRECT);
2285 		MLX5_SET(tirc, tirc, indirect_table,
2286 		    priv->rqtn);
2287 		MLX5_SET(tirc, tirc, rx_hash_fn,
2288 		    MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2289 		hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2290 #ifdef RSS
2291 		/*
2292 		 * The FreeBSD RSS implementation does currently not
2293 		 * support symmetric Toeplitz hashes:
2294 		 */
2295 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2296 		rss_getkey((uint8_t *)hkey);
2297 #else
2298 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2299 		hkey[0] = cpu_to_be32(0xD181C62C);
2300 		hkey[1] = cpu_to_be32(0xF7F4DB5B);
2301 		hkey[2] = cpu_to_be32(0x1983A2FC);
2302 		hkey[3] = cpu_to_be32(0x943E1ADB);
2303 		hkey[4] = cpu_to_be32(0xD9389E6B);
2304 		hkey[5] = cpu_to_be32(0xD1039C2C);
2305 		hkey[6] = cpu_to_be32(0xA74499AD);
2306 		hkey[7] = cpu_to_be32(0x593D56D9);
2307 		hkey[8] = cpu_to_be32(0xF3253C06);
2308 		hkey[9] = cpu_to_be32(0x2ADC1FFC);
2309 #endif
2310 		break;
2311 	}
2312 
2313 	switch (tt) {
2314 	case MLX5E_TT_IPV4_TCP:
2315 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2316 		    MLX5_L3_PROT_TYPE_IPV4);
2317 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2318 		    MLX5_L4_PROT_TYPE_TCP);
2319 #ifdef RSS
2320 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2321 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2322 			    MLX5_HASH_IP);
2323 		} else
2324 #endif
2325 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2326 		    MLX5_HASH_ALL);
2327 		break;
2328 
2329 	case MLX5E_TT_IPV6_TCP:
2330 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2331 		    MLX5_L3_PROT_TYPE_IPV6);
2332 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2333 		    MLX5_L4_PROT_TYPE_TCP);
2334 #ifdef RSS
2335 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2336 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2337 			    MLX5_HASH_IP);
2338 		} else
2339 #endif
2340 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2341 		    MLX5_HASH_ALL);
2342 		break;
2343 
2344 	case MLX5E_TT_IPV4_UDP:
2345 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2346 		    MLX5_L3_PROT_TYPE_IPV4);
2347 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2348 		    MLX5_L4_PROT_TYPE_UDP);
2349 #ifdef RSS
2350 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2351 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2352 			    MLX5_HASH_IP);
2353 		} else
2354 #endif
2355 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2356 		    MLX5_HASH_ALL);
2357 		break;
2358 
2359 	case MLX5E_TT_IPV6_UDP:
2360 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2361 		    MLX5_L3_PROT_TYPE_IPV6);
2362 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2363 		    MLX5_L4_PROT_TYPE_UDP);
2364 #ifdef RSS
2365 		if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2366 			MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2367 			    MLX5_HASH_IP);
2368 		} else
2369 #endif
2370 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2371 		    MLX5_HASH_ALL);
2372 		break;
2373 
2374 	case MLX5E_TT_IPV4_IPSEC_AH:
2375 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2376 		    MLX5_L3_PROT_TYPE_IPV4);
2377 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2378 		    MLX5_HASH_IP_IPSEC_SPI);
2379 		break;
2380 
2381 	case MLX5E_TT_IPV6_IPSEC_AH:
2382 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2383 		    MLX5_L3_PROT_TYPE_IPV6);
2384 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2385 		    MLX5_HASH_IP_IPSEC_SPI);
2386 		break;
2387 
2388 	case MLX5E_TT_IPV4_IPSEC_ESP:
2389 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2390 		    MLX5_L3_PROT_TYPE_IPV4);
2391 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2392 		    MLX5_HASH_IP_IPSEC_SPI);
2393 		break;
2394 
2395 	case MLX5E_TT_IPV6_IPSEC_ESP:
2396 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2397 		    MLX5_L3_PROT_TYPE_IPV6);
2398 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2399 		    MLX5_HASH_IP_IPSEC_SPI);
2400 		break;
2401 
2402 	case MLX5E_TT_IPV4:
2403 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2404 		    MLX5_L3_PROT_TYPE_IPV4);
2405 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2406 		    MLX5_HASH_IP);
2407 		break;
2408 
2409 	case MLX5E_TT_IPV6:
2410 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2411 		    MLX5_L3_PROT_TYPE_IPV6);
2412 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2413 		    MLX5_HASH_IP);
2414 		break;
2415 
2416 	default:
2417 		break;
2418 	}
2419 }
2420 
2421 static int
2422 mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2423 {
2424 	struct mlx5_core_dev *mdev = priv->mdev;
2425 	u32 *in;
2426 	void *tirc;
2427 	int inlen;
2428 	int err;
2429 
2430 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2431 	in = mlx5_vzalloc(inlen);
2432 	if (in == NULL)
2433 		return (-ENOMEM);
2434 	tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2435 
2436 	mlx5e_build_tir_ctx(priv, tirc, tt);
2437 
2438 	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2439 
2440 	kvfree(in);
2441 
2442 	return (err);
2443 }
2444 
2445 static void
2446 mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2447 {
2448 	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2449 }
2450 
2451 static int
2452 mlx5e_open_tirs(struct mlx5e_priv *priv)
2453 {
2454 	int err;
2455 	int i;
2456 
2457 	for (i = 0; i < MLX5E_NUM_TT; i++) {
2458 		err = mlx5e_open_tir(priv, i);
2459 		if (err)
2460 			goto err_close_tirs;
2461 	}
2462 
2463 	return (0);
2464 
2465 err_close_tirs:
2466 	for (i--; i >= 0; i--)
2467 		mlx5e_close_tir(priv, i);
2468 
2469 	return (err);
2470 }
2471 
2472 static void
2473 mlx5e_close_tirs(struct mlx5e_priv *priv)
2474 {
2475 	int i;
2476 
2477 	for (i = 0; i < MLX5E_NUM_TT; i++)
2478 		mlx5e_close_tir(priv, i);
2479 }
2480 
2481 /*
2482  * SW MTU does not include headers,
2483  * HW MTU includes all headers and checksums.
2484  */
2485 static int
2486 mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2487 {
2488 	struct mlx5e_priv *priv = ifp->if_softc;
2489 	struct mlx5_core_dev *mdev = priv->mdev;
2490 	int hw_mtu;
2491 	int err;
2492 
2493 	hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2494 
2495 	err = mlx5_set_port_mtu(mdev, hw_mtu);
2496 	if (err) {
2497 		if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2498 		    __func__, sw_mtu, err);
2499 		return (err);
2500 	}
2501 
2502 	/* Update vport context MTU */
2503 	err = mlx5_set_vport_mtu(mdev, hw_mtu);
2504 	if (err) {
2505 		if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
2506 		    __func__, err);
2507 	}
2508 
2509 	ifp->if_mtu = sw_mtu;
2510 
2511 	err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2512 	if (err || !hw_mtu) {
2513 		/* fallback to port oper mtu */
2514 		err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2515 	}
2516 	if (err) {
2517 		if_printf(ifp, "Query port MTU, after setting new "
2518 		    "MTU value, failed\n");
2519 		return (err);
2520 	} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2521 		err = -E2BIG,
2522 		if_printf(ifp, "Port MTU %d is smaller than "
2523                     "ifp mtu %d\n", hw_mtu, sw_mtu);
2524 	} else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2525 		err = -EINVAL;
2526                 if_printf(ifp, "Port MTU %d is bigger than "
2527                     "ifp mtu %d\n", hw_mtu, sw_mtu);
2528 	}
2529 	priv->params_ethtool.hw_mtu = hw_mtu;
2530 
2531 	return (err);
2532 }
2533 
2534 int
2535 mlx5e_open_locked(struct ifnet *ifp)
2536 {
2537 	struct mlx5e_priv *priv = ifp->if_softc;
2538 	int err;
2539 	u16 set_id;
2540 
2541 	/* check if already opened */
2542 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2543 		return (0);
2544 
2545 #ifdef RSS
2546 	if (rss_getnumbuckets() > priv->params.num_channels) {
2547 		if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2548 		    "channels(%u) available\n", rss_getnumbuckets(),
2549 		    priv->params.num_channels);
2550 	}
2551 #endif
2552 	err = mlx5e_open_tises(priv);
2553 	if (err) {
2554 		if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2555 		    __func__, err);
2556 		return (err);
2557 	}
2558 	err = mlx5_vport_alloc_q_counter(priv->mdev,
2559 	    MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2560 	if (err) {
2561 		if_printf(priv->ifp,
2562 		    "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2563 		    __func__, err);
2564 		goto err_close_tises;
2565 	}
2566 	/* store counter set ID */
2567 	priv->counter_set_id = set_id;
2568 
2569 	err = mlx5e_open_channels(priv);
2570 	if (err) {
2571 		if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2572 		    __func__, err);
2573 		goto err_dalloc_q_counter;
2574 	}
2575 	err = mlx5e_open_rqt(priv);
2576 	if (err) {
2577 		if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2578 		    __func__, err);
2579 		goto err_close_channels;
2580 	}
2581 	err = mlx5e_open_tirs(priv);
2582 	if (err) {
2583 		if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2584 		    __func__, err);
2585 		goto err_close_rqls;
2586 	}
2587 	err = mlx5e_open_flow_table(priv);
2588 	if (err) {
2589 		if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2590 		    __func__, err);
2591 		goto err_close_tirs;
2592 	}
2593 	err = mlx5e_add_all_vlan_rules(priv);
2594 	if (err) {
2595 		if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2596 		    __func__, err);
2597 		goto err_close_flow_table;
2598 	}
2599 	set_bit(MLX5E_STATE_OPENED, &priv->state);
2600 
2601 	mlx5e_update_carrier(priv);
2602 	mlx5e_set_rx_mode_core(priv);
2603 
2604 	return (0);
2605 
2606 err_close_flow_table:
2607 	mlx5e_close_flow_table(priv);
2608 
2609 err_close_tirs:
2610 	mlx5e_close_tirs(priv);
2611 
2612 err_close_rqls:
2613 	mlx5e_close_rqt(priv);
2614 
2615 err_close_channels:
2616 	mlx5e_close_channels(priv);
2617 
2618 err_dalloc_q_counter:
2619 	mlx5_vport_dealloc_q_counter(priv->mdev,
2620 	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2621 
2622 err_close_tises:
2623 	mlx5e_close_tises(priv);
2624 
2625 	return (err);
2626 }
2627 
2628 static void
2629 mlx5e_open(void *arg)
2630 {
2631 	struct mlx5e_priv *priv = arg;
2632 
2633 	PRIV_LOCK(priv);
2634 	if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2635 		if_printf(priv->ifp,
2636 		    "%s: Setting port status to up failed\n",
2637 		    __func__);
2638 
2639 	mlx5e_open_locked(priv->ifp);
2640 	priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2641 	PRIV_UNLOCK(priv);
2642 }
2643 
2644 int
2645 mlx5e_close_locked(struct ifnet *ifp)
2646 {
2647 	struct mlx5e_priv *priv = ifp->if_softc;
2648 
2649 	/* check if already closed */
2650 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2651 		return (0);
2652 
2653 	clear_bit(MLX5E_STATE_OPENED, &priv->state);
2654 
2655 	mlx5e_set_rx_mode_core(priv);
2656 	mlx5e_del_all_vlan_rules(priv);
2657 	if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2658 	mlx5e_close_flow_table(priv);
2659 	mlx5e_close_tirs(priv);
2660 	mlx5e_close_rqt(priv);
2661 	mlx5e_close_channels(priv);
2662 	mlx5_vport_dealloc_q_counter(priv->mdev,
2663 	    MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2664 	mlx5e_close_tises(priv);
2665 
2666 	return (0);
2667 }
2668 
2669 #if (__FreeBSD_version >= 1100000)
2670 static uint64_t
2671 mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2672 {
2673 	struct mlx5e_priv *priv = ifp->if_softc;
2674 	u64 retval;
2675 
2676 	/* PRIV_LOCK(priv); XXX not allowed */
2677 	switch (cnt) {
2678 	case IFCOUNTER_IPACKETS:
2679 		retval = priv->stats.vport.rx_packets;
2680 		break;
2681 	case IFCOUNTER_IERRORS:
2682 		retval = priv->stats.vport.rx_error_packets +
2683 		    priv->stats.pport.alignment_err +
2684 		    priv->stats.pport.check_seq_err +
2685 		    priv->stats.pport.crc_align_errors +
2686 		    priv->stats.pport.in_range_len_errors +
2687 		    priv->stats.pport.jabbers +
2688 		    priv->stats.pport.out_of_range_len +
2689 		    priv->stats.pport.oversize_pkts +
2690 		    priv->stats.pport.symbol_err +
2691 		    priv->stats.pport.too_long_errors +
2692 		    priv->stats.pport.undersize_pkts +
2693 		    priv->stats.pport.unsupported_op_rx;
2694 		break;
2695 	case IFCOUNTER_IQDROPS:
2696 		retval = priv->stats.vport.rx_out_of_buffer +
2697 		    priv->stats.pport.drop_events;
2698 		break;
2699 	case IFCOUNTER_OPACKETS:
2700 		retval = priv->stats.vport.tx_packets;
2701 		break;
2702 	case IFCOUNTER_OERRORS:
2703 		retval = priv->stats.vport.tx_error_packets;
2704 		break;
2705 	case IFCOUNTER_IBYTES:
2706 		retval = priv->stats.vport.rx_bytes;
2707 		break;
2708 	case IFCOUNTER_OBYTES:
2709 		retval = priv->stats.vport.tx_bytes;
2710 		break;
2711 	case IFCOUNTER_IMCASTS:
2712 		retval = priv->stats.vport.rx_multicast_packets;
2713 		break;
2714 	case IFCOUNTER_OMCASTS:
2715 		retval = priv->stats.vport.tx_multicast_packets;
2716 		break;
2717 	case IFCOUNTER_OQDROPS:
2718 		retval = priv->stats.vport.tx_queue_dropped;
2719 		break;
2720 	case IFCOUNTER_COLLISIONS:
2721 		retval = priv->stats.pport.collisions;
2722 		break;
2723 	default:
2724 		retval = if_get_counter_default(ifp, cnt);
2725 		break;
2726 	}
2727 	/* PRIV_UNLOCK(priv); XXX not allowed */
2728 	return (retval);
2729 }
2730 #endif
2731 
2732 static void
2733 mlx5e_set_rx_mode(struct ifnet *ifp)
2734 {
2735 	struct mlx5e_priv *priv = ifp->if_softc;
2736 
2737 	queue_work(priv->wq, &priv->set_rx_mode_work);
2738 }
2739 
2740 static int
2741 mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2742 {
2743 	struct mlx5e_priv *priv;
2744 	struct ifreq *ifr;
2745 	struct ifi2creq i2c;
2746 	int error = 0;
2747 	int mask = 0;
2748 	int size_read = 0;
2749 	int module_status;
2750 	int module_num;
2751 	int max_mtu;
2752 	uint8_t read_addr;
2753 
2754 	priv = ifp->if_softc;
2755 
2756 	/* check if detaching */
2757 	if (priv == NULL || priv->gone != 0)
2758 		return (ENXIO);
2759 
2760 	switch (command) {
2761 	case SIOCSIFMTU:
2762 		ifr = (struct ifreq *)data;
2763 
2764 		PRIV_LOCK(priv);
2765 		mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2766 
2767 		if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2768 		    ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2769 			int was_opened;
2770 
2771 			was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2772 			if (was_opened)
2773 				mlx5e_close_locked(ifp);
2774 
2775 			/* set new MTU */
2776 			mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2777 
2778 			if (was_opened)
2779 				mlx5e_open_locked(ifp);
2780 		} else {
2781 			error = EINVAL;
2782 			if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2783 			    MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2784 		}
2785 		PRIV_UNLOCK(priv);
2786 		break;
2787 	case SIOCSIFFLAGS:
2788 		if ((ifp->if_flags & IFF_UP) &&
2789 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2790 			mlx5e_set_rx_mode(ifp);
2791 			break;
2792 		}
2793 		PRIV_LOCK(priv);
2794 		if (ifp->if_flags & IFF_UP) {
2795 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2796 				if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2797 					mlx5e_open_locked(ifp);
2798 				ifp->if_drv_flags |= IFF_DRV_RUNNING;
2799 				mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2800 			}
2801 		} else {
2802 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2803 				mlx5_set_port_status(priv->mdev,
2804 				    MLX5_PORT_DOWN);
2805 				if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2806 					mlx5e_close_locked(ifp);
2807 				mlx5e_update_carrier(priv);
2808 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2809 			}
2810 		}
2811 		PRIV_UNLOCK(priv);
2812 		break;
2813 	case SIOCADDMULTI:
2814 	case SIOCDELMULTI:
2815 		mlx5e_set_rx_mode(ifp);
2816 		break;
2817 	case SIOCSIFMEDIA:
2818 	case SIOCGIFMEDIA:
2819 	case SIOCGIFXMEDIA:
2820 		ifr = (struct ifreq *)data;
2821 		error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2822 		break;
2823 	case SIOCSIFCAP:
2824 		ifr = (struct ifreq *)data;
2825 		PRIV_LOCK(priv);
2826 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2827 
2828 		if (mask & IFCAP_TXCSUM) {
2829 			ifp->if_capenable ^= IFCAP_TXCSUM;
2830 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2831 
2832 			if (IFCAP_TSO4 & ifp->if_capenable &&
2833 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2834 				ifp->if_capenable &= ~IFCAP_TSO4;
2835 				ifp->if_hwassist &= ~CSUM_IP_TSO;
2836 				if_printf(ifp,
2837 				    "tso4 disabled due to -txcsum.\n");
2838 			}
2839 		}
2840 		if (mask & IFCAP_TXCSUM_IPV6) {
2841 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2842 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2843 
2844 			if (IFCAP_TSO6 & ifp->if_capenable &&
2845 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2846 				ifp->if_capenable &= ~IFCAP_TSO6;
2847 				ifp->if_hwassist &= ~CSUM_IP6_TSO;
2848 				if_printf(ifp,
2849 				    "tso6 disabled due to -txcsum6.\n");
2850 			}
2851 		}
2852 		if (mask & IFCAP_RXCSUM)
2853 			ifp->if_capenable ^= IFCAP_RXCSUM;
2854 		if (mask & IFCAP_RXCSUM_IPV6)
2855 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2856 		if (mask & IFCAP_TSO4) {
2857 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2858 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
2859 				if_printf(ifp, "enable txcsum first.\n");
2860 				error = EAGAIN;
2861 				goto out;
2862 			}
2863 			ifp->if_capenable ^= IFCAP_TSO4;
2864 			ifp->if_hwassist ^= CSUM_IP_TSO;
2865 		}
2866 		if (mask & IFCAP_TSO6) {
2867 			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2868 			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2869 				if_printf(ifp, "enable txcsum6 first.\n");
2870 				error = EAGAIN;
2871 				goto out;
2872 			}
2873 			ifp->if_capenable ^= IFCAP_TSO6;
2874 			ifp->if_hwassist ^= CSUM_IP6_TSO;
2875 		}
2876 		if (mask & IFCAP_VLAN_HWFILTER) {
2877 			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2878 				mlx5e_disable_vlan_filter(priv);
2879 			else
2880 				mlx5e_enable_vlan_filter(priv);
2881 
2882 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2883 		}
2884 		if (mask & IFCAP_VLAN_HWTAGGING)
2885 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2886 		if (mask & IFCAP_WOL_MAGIC)
2887 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2888 
2889 		VLAN_CAPABILITIES(ifp);
2890 		/* turn off LRO means also turn of HW LRO - if it's on */
2891 		if (mask & IFCAP_LRO) {
2892 			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2893 			bool need_restart = false;
2894 
2895 			ifp->if_capenable ^= IFCAP_LRO;
2896 
2897 			/* figure out if updating HW LRO is needed */
2898 			if (!(ifp->if_capenable & IFCAP_LRO)) {
2899 				if (priv->params.hw_lro_en) {
2900 					priv->params.hw_lro_en = false;
2901 					need_restart = true;
2902 				}
2903 			} else {
2904 				if (priv->params.hw_lro_en == false &&
2905 				    priv->params_ethtool.hw_lro != 0) {
2906 					priv->params.hw_lro_en = true;
2907 					need_restart = true;
2908 				}
2909 			}
2910 			if (was_opened && need_restart) {
2911 				mlx5e_close_locked(ifp);
2912 				mlx5e_open_locked(ifp);
2913 			}
2914 		}
2915 		if (mask & IFCAP_HWRXTSTMP) {
2916 			ifp->if_capenable ^= IFCAP_HWRXTSTMP;
2917 			if (ifp->if_capenable & IFCAP_HWRXTSTMP) {
2918 				if (priv->clbr_done == 0)
2919 					mlx5e_reset_calibration_callout(priv);
2920 			} else {
2921 				callout_drain(&priv->tstmp_clbr);
2922 				priv->clbr_done = 0;
2923 			}
2924 		}
2925 out:
2926 		PRIV_UNLOCK(priv);
2927 		break;
2928 
2929 	case SIOCGI2C:
2930 		ifr = (struct ifreq *)data;
2931 
2932 		/*
2933 		 * Copy from the user-space address ifr_data to the
2934 		 * kernel-space address i2c
2935 		 */
2936 		error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2937 		if (error)
2938 			break;
2939 
2940 		if (i2c.len > sizeof(i2c.data)) {
2941 			error = EINVAL;
2942 			break;
2943 		}
2944 
2945 		PRIV_LOCK(priv);
2946 		/* Get module_num which is required for the query_eeprom */
2947 		error = mlx5_query_module_num(priv->mdev, &module_num);
2948 		if (error) {
2949 			if_printf(ifp, "Query module num failed, eeprom "
2950 			    "reading is not supported\n");
2951 			error = EINVAL;
2952 			goto err_i2c;
2953 		}
2954 		/* Check if module is present before doing an access */
2955 		module_status = mlx5_query_module_status(priv->mdev, module_num);
2956 		if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED &&
2957 		    module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) {
2958 			error = EINVAL;
2959 			goto err_i2c;
2960 		}
2961 		/*
2962 		 * Currently 0XA0 and 0xA2 are the only addresses permitted.
2963 		 * The internal conversion is as follows:
2964 		 */
2965 		if (i2c.dev_addr == 0xA0)
2966 			read_addr = MLX5E_I2C_ADDR_LOW;
2967 		else if (i2c.dev_addr == 0xA2)
2968 			read_addr = MLX5E_I2C_ADDR_HIGH;
2969 		else {
2970 			if_printf(ifp, "Query eeprom failed, "
2971 			    "Invalid Address: %X\n", i2c.dev_addr);
2972 			error = EINVAL;
2973 			goto err_i2c;
2974 		}
2975 		error = mlx5_query_eeprom(priv->mdev,
2976 		    read_addr, MLX5E_EEPROM_LOW_PAGE,
2977 		    (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2978 		    (uint32_t *)i2c.data, &size_read);
2979 		if (error) {
2980 			if_printf(ifp, "Query eeprom failed, eeprom "
2981 			    "reading is not supported\n");
2982 			error = EINVAL;
2983 			goto err_i2c;
2984 		}
2985 
2986 		if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2987 			error = mlx5_query_eeprom(priv->mdev,
2988 			    read_addr, MLX5E_EEPROM_LOW_PAGE,
2989 			    (uint32_t)(i2c.offset + size_read),
2990 			    (uint32_t)(i2c.len - size_read), module_num,
2991 			    (uint32_t *)(i2c.data + size_read), &size_read);
2992 		}
2993 		if (error) {
2994 			if_printf(ifp, "Query eeprom failed, eeprom "
2995 			    "reading is not supported\n");
2996 			error = EINVAL;
2997 			goto err_i2c;
2998 		}
2999 
3000 		error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
3001 err_i2c:
3002 		PRIV_UNLOCK(priv);
3003 		break;
3004 
3005 	default:
3006 		error = ether_ioctl(ifp, command, data);
3007 		break;
3008 	}
3009 	return (error);
3010 }
3011 
3012 static int
3013 mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3014 {
3015 	/*
3016 	 * TODO: uncoment once FW really sets all these bits if
3017 	 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
3018 	 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
3019 	 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
3020 	 * -ENOTSUPP;
3021 	 */
3022 
3023 	/* TODO: add more must-to-have features */
3024 
3025 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3026 		return (-ENODEV);
3027 
3028 	return (0);
3029 }
3030 
3031 static u16
3032 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3033 {
3034 	uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U;
3035 
3036 	bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2;
3037 
3038 	/* verify against driver hardware limit */
3039 	if (bf_buf_size > MLX5E_MAX_TX_INLINE)
3040 		bf_buf_size = MLX5E_MAX_TX_INLINE;
3041 
3042 	return (bf_buf_size);
3043 }
3044 
3045 static int
3046 mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
3047     struct mlx5e_priv *priv,
3048     int num_comp_vectors)
3049 {
3050 	int err;
3051 
3052 	/*
3053 	 * TODO: Consider link speed for setting "log_sq_size",
3054 	 * "log_rq_size" and "cq_moderation_xxx":
3055 	 */
3056 	priv->params.log_sq_size =
3057 	    MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3058 	priv->params.log_rq_size =
3059 	    MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
3060 	priv->params.rx_cq_moderation_usec =
3061 	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3062 	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
3063 	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3064 	priv->params.rx_cq_moderation_mode =
3065 	    MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
3066 	priv->params.rx_cq_moderation_pkts =
3067 	    MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3068 	priv->params.tx_cq_moderation_usec =
3069 	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3070 	priv->params.tx_cq_moderation_pkts =
3071 	    MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3072 	priv->params.min_rx_wqes =
3073 	    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
3074 	priv->params.rx_hash_log_tbl_sz =
3075 	    (order_base_2(num_comp_vectors) >
3076 	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
3077 	    order_base_2(num_comp_vectors) :
3078 	    MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
3079 	priv->params.num_tc = 1;
3080 	priv->params.default_vlan_prio = 0;
3081 	priv->counter_set_id = -1;
3082 	priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3083 
3084 	err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3085 	if (err)
3086 		return (err);
3087 
3088 	/*
3089 	 * hw lro is currently defaulted to off. when it won't anymore we
3090 	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
3091 	 */
3092 	priv->params.hw_lro_en = false;
3093 	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
3094 
3095 	priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
3096 
3097 	priv->mdev = mdev;
3098 	priv->params.num_channels = num_comp_vectors;
3099 	priv->params.channels_rsss = 1;
3100 	priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
3101 	priv->queue_mapping_channel_mask =
3102 	    roundup_pow_of_two(num_comp_vectors) - 1;
3103 	priv->num_tc = priv->params.num_tc;
3104 	priv->default_vlan_prio = priv->params.default_vlan_prio;
3105 
3106 	INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3107 	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3108 	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3109 
3110 	return (0);
3111 }
3112 
3113 static int
3114 mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
3115 		  struct mlx5_core_mr *mkey)
3116 {
3117 	struct ifnet *ifp = priv->ifp;
3118 	struct mlx5_core_dev *mdev = priv->mdev;
3119 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3120 	void *mkc;
3121 	u32 *in;
3122 	int err;
3123 
3124 	in = mlx5_vzalloc(inlen);
3125 	if (in == NULL) {
3126 		if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
3127 		return (-ENOMEM);
3128 	}
3129 
3130 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3131 	MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
3132 	MLX5_SET(mkc, mkc, lw, 1);
3133 	MLX5_SET(mkc, mkc, lr, 1);
3134 
3135 	MLX5_SET(mkc, mkc, pd, pdn);
3136 	MLX5_SET(mkc, mkc, length64, 1);
3137 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
3138 
3139 	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
3140 	if (err)
3141 		if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
3142 		    __func__, err);
3143 
3144 	kvfree(in);
3145 	return (err);
3146 }
3147 
3148 static const char *mlx5e_vport_stats_desc[] = {
3149 	MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
3150 };
3151 
3152 static const char *mlx5e_pport_stats_desc[] = {
3153 	MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
3154 };
3155 
3156 static void
3157 mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
3158 {
3159 	mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
3160 	sx_init(&priv->state_lock, "mlx5state");
3161 	callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
3162 	MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
3163 }
3164 
3165 static void
3166 mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
3167 {
3168 	mtx_destroy(&priv->async_events_mtx);
3169 	sx_destroy(&priv->state_lock);
3170 }
3171 
3172 static int
3173 sysctl_firmware(SYSCTL_HANDLER_ARGS)
3174 {
3175 	/*
3176 	 * %d.%d%.d the string format.
3177 	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
3178 	 * We need at most 5 chars to store that.
3179 	 * It also has: two "." and NULL at the end, which means we need 18
3180 	 * (5*3 + 3) chars at most.
3181 	 */
3182 	char fw[18];
3183 	struct mlx5e_priv *priv = arg1;
3184 	int error;
3185 
3186 	snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3187 	    fw_rev_sub(priv->mdev));
3188 	error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3189 	return (error);
3190 }
3191 
3192 static void
3193 mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3194 {
3195 	int i;
3196 
3197 	for (i = 0; i < ch->num_tc; i++)
3198 		mlx5e_drain_sq(&ch->sq[i]);
3199 }
3200 
3201 static void
3202 mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3203 {
3204 
3205 	sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3206 	sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3207 	mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3208 	sq->doorbell.d64 = 0;
3209 }
3210 
3211 void
3212 mlx5e_resume_sq(struct mlx5e_sq *sq)
3213 {
3214 	int err;
3215 
3216 	/* check if already enabled */
3217 	if (READ_ONCE(sq->running) != 0)
3218 		return;
3219 
3220 	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3221 	    MLX5_SQC_STATE_RST);
3222 	if (err != 0) {
3223 		if_printf(sq->ifp,
3224 		    "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3225 	}
3226 
3227 	sq->cc = 0;
3228 	sq->pc = 0;
3229 
3230 	/* reset doorbell prior to moving from RST to RDY */
3231 	mlx5e_reset_sq_doorbell_record(sq);
3232 
3233 	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3234 	    MLX5_SQC_STATE_RDY);
3235 	if (err != 0) {
3236 		if_printf(sq->ifp,
3237 		    "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3238 	}
3239 
3240 	sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3241 	WRITE_ONCE(sq->running, 1);
3242 }
3243 
3244 static void
3245 mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3246 {
3247         int i;
3248 
3249 	for (i = 0; i < ch->num_tc; i++)
3250 		mlx5e_resume_sq(&ch->sq[i]);
3251 }
3252 
3253 static void
3254 mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3255 {
3256 	struct mlx5e_rq *rq = &ch->rq;
3257 	int err;
3258 
3259 	mtx_lock(&rq->mtx);
3260 	rq->enabled = 0;
3261 	callout_stop(&rq->watchdog);
3262 	mtx_unlock(&rq->mtx);
3263 
3264 	callout_drain(&rq->watchdog);
3265 
3266 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3267 	if (err != 0) {
3268 		if_printf(rq->ifp,
3269 		    "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3270 	}
3271 
3272 	while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3273 		msleep(1);
3274 		rq->cq.mcq.comp(&rq->cq.mcq);
3275 	}
3276 
3277 	/*
3278 	 * Transitioning into RST state will allow the FW to track less ERR state queues,
3279 	 * thus reducing the recv queue flushing time
3280 	 */
3281 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3282 	if (err != 0) {
3283 		if_printf(rq->ifp,
3284 		    "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3285 	}
3286 }
3287 
3288 static void
3289 mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3290 {
3291 	struct mlx5e_rq *rq = &ch->rq;
3292 	int err;
3293 
3294 	rq->wq.wqe_ctr = 0;
3295 	mlx5_wq_ll_update_db_record(&rq->wq);
3296 	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3297 	if (err != 0) {
3298 		if_printf(rq->ifp,
3299 		    "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3300         }
3301 
3302 	rq->enabled = 1;
3303 
3304 	rq->cq.mcq.comp(&rq->cq.mcq);
3305 }
3306 
3307 void
3308 mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3309 {
3310 	int i;
3311 
3312 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3313 		return;
3314 
3315 	for (i = 0; i < priv->params.num_channels; i++) {
3316 		if (value)
3317 			mlx5e_disable_tx_dma(&priv->channel[i]);
3318 		else
3319 			mlx5e_enable_tx_dma(&priv->channel[i]);
3320 	}
3321 }
3322 
3323 void
3324 mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3325 {
3326 	int i;
3327 
3328 	if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
3329 		return;
3330 
3331 	for (i = 0; i < priv->params.num_channels; i++) {
3332 		if (value)
3333 			mlx5e_disable_rx_dma(&priv->channel[i]);
3334 		else
3335 			mlx5e_enable_rx_dma(&priv->channel[i]);
3336 	}
3337 }
3338 
3339 static void
3340 mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3341 {
3342 	SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3343 	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3344 	    sysctl_firmware, "A", "HCA firmware version");
3345 
3346 	SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3347 	    OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3348 	    "Board ID");
3349 }
3350 
3351 static int
3352 mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3353 {
3354 	struct mlx5e_priv *priv = arg1;
3355 	uint32_t tx_pfc;
3356 	uint32_t value;
3357 	int error;
3358 
3359 	PRIV_LOCK(priv);
3360 
3361 	tx_pfc = priv->params.tx_priority_flow_control;
3362 
3363 	/* get current value */
3364 	value = (tx_pfc >> arg2) & 1;
3365 
3366 	error = sysctl_handle_32(oidp, &value, 0, req);
3367 
3368 	/* range check value */
3369 	if (value != 0)
3370 		priv->params.tx_priority_flow_control |= (1 << arg2);
3371 	else
3372 		priv->params.tx_priority_flow_control &= ~(1 << arg2);
3373 
3374 	/* check if update is required */
3375 	if (error == 0 && priv->gone == 0 &&
3376 	    tx_pfc != priv->params.tx_priority_flow_control) {
3377 		error = -mlx5e_set_port_pfc(priv);
3378 		/* restore previous value */
3379 		if (error != 0)
3380 			priv->params.tx_priority_flow_control= tx_pfc;
3381 	}
3382 	PRIV_UNLOCK(priv);
3383 
3384 	return (error);
3385 }
3386 
3387 static int
3388 mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3389 {
3390 	struct mlx5e_priv *priv = arg1;
3391 	uint32_t rx_pfc;
3392 	uint32_t value;
3393 	int error;
3394 
3395 	PRIV_LOCK(priv);
3396 
3397 	rx_pfc = priv->params.rx_priority_flow_control;
3398 
3399 	/* get current value */
3400 	value = (rx_pfc >> arg2) & 1;
3401 
3402 	error = sysctl_handle_32(oidp, &value, 0, req);
3403 
3404 	/* range check value */
3405 	if (value != 0)
3406 		priv->params.rx_priority_flow_control |= (1 << arg2);
3407 	else
3408 		priv->params.rx_priority_flow_control &= ~(1 << arg2);
3409 
3410 	/* check if update is required */
3411 	if (error == 0 && priv->gone == 0 &&
3412 	    rx_pfc != priv->params.rx_priority_flow_control) {
3413 		error = -mlx5e_set_port_pfc(priv);
3414 		/* restore previous value */
3415 		if (error != 0)
3416 			priv->params.rx_priority_flow_control= rx_pfc;
3417 	}
3418 	PRIV_UNLOCK(priv);
3419 
3420 	return (error);
3421 }
3422 
3423 static void
3424 mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3425 {
3426 	unsigned int x;
3427 	char path[96];
3428 	int error;
3429 
3430 	/* enable pauseframes by default */
3431 	priv->params.tx_pauseframe_control = 1;
3432 	priv->params.rx_pauseframe_control = 1;
3433 
3434 	/* disable ports flow control, PFC, by default */
3435 	priv->params.tx_priority_flow_control = 0;
3436 	priv->params.rx_priority_flow_control = 0;
3437 
3438 #if (__FreeBSD_version < 1100000)
3439 	/* compute path for sysctl */
3440 	snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3441 	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3442 
3443 	/* try to fetch tunable, if any */
3444 	TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3445 
3446 	/* compute path for sysctl */
3447 	snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3448 	    device_get_unit(priv->mdev->pdev->dev.bsddev));
3449 
3450 	/* try to fetch tunable, if any */
3451 	TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3452 
3453 	for (x = 0; x != 8; x++) {
3454 
3455 		/* compute path for sysctl */
3456 		snprintf(path, sizeof(path), "dev.mce.%d.tx_priority_flow_control_%u",
3457 		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3458 
3459 		/* try to fetch tunable, if any */
3460 		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3461 			priv->params.tx_priority_flow_control |= 1 << x;
3462 
3463 		/* compute path for sysctl */
3464 		snprintf(path, sizeof(path), "dev.mce.%d.rx_priority_flow_control_%u",
3465 		    device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3466 
3467 		/* try to fetch tunable, if any */
3468 		if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3469 			priv->params.rx_priority_flow_control |= 1 << x;
3470 	}
3471 #endif
3472 
3473 	/* register pauseframe SYSCTLs */
3474 	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3475 	    OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3476 	    &priv->params.tx_pauseframe_control, 0,
3477 	    "Set to enable TX pause frames. Clear to disable.");
3478 
3479 	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3480 	    OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3481 	    &priv->params.rx_pauseframe_control, 0,
3482 	    "Set to enable RX pause frames. Clear to disable.");
3483 
3484 	/* register priority_flow control, PFC, SYSCTLs */
3485 	for (x = 0; x != 8; x++) {
3486 		snprintf(path, sizeof(path), "tx_priority_flow_control_%u", x);
3487 
3488 		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3489 		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3490 		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_tx_priority_flow_control, "IU",
3491 		    "Set to enable TX ports flow control frames for given priority. Clear to disable.");
3492 
3493 		snprintf(path, sizeof(path), "rx_priority_flow_control_%u", x);
3494 
3495 		SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3496 		    OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3497 		    CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_rx_priority_flow_control, "IU",
3498 		    "Set to enable RX ports flow control frames for given priority. Clear to disable.");
3499 	}
3500 
3501 	PRIV_LOCK(priv);
3502 
3503 	/* range check */
3504 	priv->params.tx_pauseframe_control =
3505 	    priv->params.tx_pauseframe_control ? 1 : 0;
3506 	priv->params.rx_pauseframe_control =
3507 	    priv->params.rx_pauseframe_control ? 1 : 0;
3508 
3509 	/* update firmware */
3510 	error = mlx5e_set_port_pause_and_pfc(priv);
3511 	if (error == -EINVAL) {
3512 		if_printf(priv->ifp,
3513 		    "Global pauseframes must be disabled before enabling PFC.\n");
3514 		priv->params.rx_priority_flow_control = 0;
3515 		priv->params.tx_priority_flow_control = 0;
3516 
3517 		/* update firmware */
3518 		(void) mlx5e_set_port_pause_and_pfc(priv);
3519 	}
3520 	PRIV_UNLOCK(priv);
3521 }
3522 
3523 static int
3524 mlx5e_ul_snd_tag_alloc(struct ifnet *ifp,
3525     union if_snd_tag_alloc_params *params,
3526     struct m_snd_tag **ppmt)
3527 {
3528 	struct mlx5e_priv *priv;
3529 	struct mlx5e_channel *pch;
3530 
3531 	priv = ifp->if_softc;
3532 
3533 	if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) {
3534 		return (EOPNOTSUPP);
3535 	} else {
3536 		/* keep this code synced with mlx5e_select_queue() */
3537 		u32 ch = priv->params.num_channels;
3538 #ifdef RSS
3539 		u32 temp;
3540 
3541 		if (rss_hash2bucket(params->hdr.flowid,
3542 		    params->hdr.flowtype, &temp) == 0)
3543 			ch = temp % ch;
3544 		else
3545 #endif
3546 			ch = (params->hdr.flowid % 128) % ch;
3547 
3548 		/*
3549 		 * NOTE: The channels array is only freed at detach
3550 		 * and it safe to return a pointer to the send tag
3551 		 * inside the channels structure as long as we
3552 		 * reference the priv.
3553 		 */
3554 		pch = priv->channel + ch;
3555 
3556 		/* check if send queue is not running */
3557 		if (unlikely(pch->sq[0].running == 0))
3558 			return (ENXIO);
3559 		mlx5e_ref_channel(priv);
3560 		*ppmt = &pch->tag.m_snd_tag;
3561 		return (0);
3562 	}
3563 }
3564 
3565 static int
3566 mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
3567 {
3568 	struct mlx5e_channel *pch =
3569 	    container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
3570 
3571 	params->unlimited.max_rate = -1ULL;
3572 	params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]);
3573 	return (0);
3574 }
3575 
3576 static void
3577 mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt)
3578 {
3579 	struct mlx5e_channel *pch =
3580 	    container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
3581 
3582 	mlx5e_unref_channel(pch->priv);
3583 }
3584 
3585 static int
3586 mlx5e_snd_tag_alloc(struct ifnet *ifp,
3587     union if_snd_tag_alloc_params *params,
3588     struct m_snd_tag **ppmt)
3589 {
3590 
3591 	switch (params->hdr.type) {
3592 #ifdef RATELIMIT
3593 	case IF_SND_TAG_TYPE_RATE_LIMIT:
3594 		return (mlx5e_rl_snd_tag_alloc(ifp, params, ppmt));
3595 #endif
3596 	case IF_SND_TAG_TYPE_UNLIMITED:
3597 		return (mlx5e_ul_snd_tag_alloc(ifp, params, ppmt));
3598 	default:
3599 		return (EOPNOTSUPP);
3600 	}
3601 }
3602 
3603 static int
3604 mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
3605 {
3606 	struct mlx5e_snd_tag *tag =
3607 	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
3608 
3609 	switch (tag->type) {
3610 #ifdef RATELIMIT
3611 	case IF_SND_TAG_TYPE_RATE_LIMIT:
3612 		return (mlx5e_rl_snd_tag_modify(pmt, params));
3613 #endif
3614 	case IF_SND_TAG_TYPE_UNLIMITED:
3615 	default:
3616 		return (EOPNOTSUPP);
3617 	}
3618 }
3619 
3620 static int
3621 mlx5e_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
3622 {
3623 	struct mlx5e_snd_tag *tag =
3624 	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
3625 
3626 	switch (tag->type) {
3627 #ifdef RATELIMIT
3628 	case IF_SND_TAG_TYPE_RATE_LIMIT:
3629 		return (mlx5e_rl_snd_tag_query(pmt, params));
3630 #endif
3631 	case IF_SND_TAG_TYPE_UNLIMITED:
3632 		return (mlx5e_ul_snd_tag_query(pmt, params));
3633 	default:
3634 		return (EOPNOTSUPP);
3635 	}
3636 }
3637 
3638 static void
3639 mlx5e_snd_tag_free(struct m_snd_tag *pmt)
3640 {
3641 	struct mlx5e_snd_tag *tag =
3642 	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
3643 
3644 	switch (tag->type) {
3645 #ifdef RATELIMIT
3646 	case IF_SND_TAG_TYPE_RATE_LIMIT:
3647 		mlx5e_rl_snd_tag_free(pmt);
3648 		break;
3649 #endif
3650 	case IF_SND_TAG_TYPE_UNLIMITED:
3651 		mlx5e_ul_snd_tag_free(pmt);
3652 		break;
3653 	default:
3654 		break;
3655 	}
3656 }
3657 
3658 static void *
3659 mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3660 {
3661 	struct ifnet *ifp;
3662 	struct mlx5e_priv *priv;
3663 	u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3664 	struct sysctl_oid_list *child;
3665 	int ncv = mdev->priv.eq_table.num_comp_vectors;
3666 	char unit[16];
3667 	int err;
3668 	int i;
3669 	u32 eth_proto_cap;
3670 
3671 	if (mlx5e_check_required_hca_cap(mdev)) {
3672 		mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3673 		return (NULL);
3674 	}
3675 	/*
3676 	 * Try to allocate the priv and make room for worst-case
3677 	 * number of channel structures:
3678 	 */
3679 	priv = malloc(sizeof(*priv) +
3680 	    (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
3681 	    M_MLX5EN, M_WAITOK | M_ZERO);
3682 	mlx5e_priv_mtx_init(priv);
3683 
3684 	ifp = priv->ifp = if_alloc(IFT_ETHER);
3685 	if (ifp == NULL) {
3686 		mlx5_core_err(mdev, "if_alloc() failed\n");
3687 		goto err_free_priv;
3688 	}
3689 	ifp->if_softc = priv;
3690 	if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
3691 	ifp->if_mtu = ETHERMTU;
3692 	ifp->if_init = mlx5e_open;
3693 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3694 	ifp->if_ioctl = mlx5e_ioctl;
3695 	ifp->if_transmit = mlx5e_xmit;
3696 	ifp->if_qflush = if_qflush;
3697 #if (__FreeBSD_version >= 1100000)
3698 	ifp->if_get_counter = mlx5e_get_counter;
3699 #endif
3700 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
3701 	/*
3702          * Set driver features
3703          */
3704 	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3705 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3706 	ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3707 	ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3708 	ifp->if_capabilities |= IFCAP_LRO;
3709 	ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3710 	ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP;
3711 	ifp->if_capabilities |= IFCAP_TXRTLMT;
3712 	ifp->if_snd_tag_alloc = mlx5e_snd_tag_alloc;
3713 	ifp->if_snd_tag_free = mlx5e_snd_tag_free;
3714 	ifp->if_snd_tag_modify = mlx5e_snd_tag_modify;
3715 	ifp->if_snd_tag_query = mlx5e_snd_tag_query;
3716 
3717 	/* set TSO limits so that we don't have to drop TX packets */
3718 	ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3719 	ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3720 	ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3721 
3722 	ifp->if_capenable = ifp->if_capabilities;
3723 	ifp->if_hwassist = 0;
3724 	if (ifp->if_capenable & IFCAP_TSO)
3725 		ifp->if_hwassist |= CSUM_TSO;
3726 	if (ifp->if_capenable & IFCAP_TXCSUM)
3727 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3728 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3729 		ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3730 
3731 	/* ifnet sysctl tree */
3732 	sysctl_ctx_init(&priv->sysctl_ctx);
3733 	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3734 	    OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3735 	if (priv->sysctl_ifnet == NULL) {
3736 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3737 		goto err_free_sysctl;
3738 	}
3739 	snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3740 	priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3741 	    OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3742 	if (priv->sysctl_ifnet == NULL) {
3743 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3744 		goto err_free_sysctl;
3745 	}
3746 
3747 	/* HW sysctl tree */
3748 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3749 	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3750 	    OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3751 	if (priv->sysctl_hw == NULL) {
3752 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3753 		goto err_free_sysctl;
3754 	}
3755 
3756 	err = mlx5e_build_ifp_priv(mdev, priv, ncv);
3757 	if (err) {
3758 		mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err);
3759 		goto err_free_sysctl;
3760 	}
3761 
3762 	snprintf(unit, sizeof(unit), "mce%u_wq",
3763 	    device_get_unit(mdev->pdev->dev.bsddev));
3764 	priv->wq = alloc_workqueue(unit, 0, 1);
3765 	if (priv->wq == NULL) {
3766 		if_printf(ifp, "%s: alloc_workqueue failed\n", __func__);
3767 		goto err_free_sysctl;
3768 	}
3769 
3770 	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3771 	if (err) {
3772 		if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
3773 		    __func__, err);
3774 		goto err_free_wq;
3775 	}
3776 	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3777 	if (err) {
3778 		if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
3779 		    __func__, err);
3780 		goto err_unmap_free_uar;
3781 	}
3782 	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3783 	if (err) {
3784 		if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
3785 		    __func__, err);
3786 		goto err_dealloc_pd;
3787 	}
3788 	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3789 	if (err) {
3790 		if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
3791 		    __func__, err);
3792 		goto err_dealloc_transport_domain;
3793 	}
3794 	mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3795 
3796 	/* check if we should generate a random MAC address */
3797 	if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3798 	    is_zero_ether_addr(dev_addr)) {
3799 		random_ether_addr(dev_addr);
3800 		if_printf(ifp, "Assigned random MAC address\n");
3801 	}
3802 #ifdef RATELIMIT
3803 	err = mlx5e_rl_init(priv);
3804 	if (err) {
3805 		if_printf(ifp, "%s: mlx5e_rl_init failed, %d\n",
3806 		    __func__, err);
3807 		goto err_create_mkey;
3808 	}
3809 #endif
3810 
3811 	/* set default MTU */
3812 	mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3813 
3814 	/* Set default media status */
3815 	priv->media_status_last = IFM_AVALID;
3816 	priv->media_active_last = IFM_ETHER | IFM_AUTO |
3817 	    IFM_ETH_RXPAUSE | IFM_FDX;
3818 
3819 	/* setup default pauseframes configuration */
3820 	mlx5e_setup_pauseframes(priv);
3821 
3822 	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
3823 	if (err) {
3824 		eth_proto_cap = 0;
3825 		if_printf(ifp, "%s: Query port media capability failed, %d\n",
3826 		    __func__, err);
3827 	}
3828 
3829 	/* Setup supported medias */
3830 	ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
3831 	    mlx5e_media_change, mlx5e_media_status);
3832 
3833 	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
3834 		if (mlx5e_mode_table[i].baudrate == 0)
3835 			continue;
3836 		if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
3837 			ifmedia_add(&priv->media,
3838 			    mlx5e_mode_table[i].subtype |
3839 			    IFM_ETHER, 0, NULL);
3840 			ifmedia_add(&priv->media,
3841 			    mlx5e_mode_table[i].subtype |
3842 			    IFM_ETHER | IFM_FDX |
3843 			    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3844 		}
3845 	}
3846 
3847 	/* Additional supported medias */
3848 	ifmedia_add(&priv->media, IFM_10G_LR | IFM_ETHER, 0, NULL);
3849 	ifmedia_add(&priv->media, IFM_10G_LR |
3850 	    IFM_ETHER | IFM_FDX |
3851 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3852 
3853 	ifmedia_add(&priv->media, IFM_40G_ER4 | IFM_ETHER, 0, NULL);
3854 	ifmedia_add(&priv->media, IFM_40G_ER4 |
3855 	    IFM_ETHER | IFM_FDX |
3856 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3857 
3858 	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3859 	ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3860 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3861 
3862 	/* Set autoselect by default */
3863 	ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3864 	    IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
3865 	ether_ifattach(ifp, dev_addr);
3866 
3867 	/* Register for VLAN events */
3868 	priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
3869 	    mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
3870 	priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
3871 	    mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
3872 
3873 	/* Link is down by default */
3874 	if_link_state_change(ifp, LINK_STATE_DOWN);
3875 
3876 	mlx5e_enable_async_events(priv);
3877 
3878 	mlx5e_add_hw_stats(priv);
3879 
3880 	mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3881 	    "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
3882 	    priv->stats.vport.arg);
3883 
3884 	mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3885 	    "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
3886 	    priv->stats.pport.arg);
3887 
3888 	mlx5e_create_ethtool(priv);
3889 
3890 	mtx_lock(&priv->async_events_mtx);
3891 	mlx5e_update_stats(priv);
3892 	mtx_unlock(&priv->async_events_mtx);
3893 
3894 	SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3895 	    OID_AUTO, "rx_clbr_done", CTLFLAG_RD,
3896 	    &priv->clbr_done, 0,
3897 	    "RX timestamps calibration state");
3898 	callout_init(&priv->tstmp_clbr, CALLOUT_DIRECT);
3899 	mlx5e_reset_calibration_callout(priv);
3900 
3901 	return (priv);
3902 
3903 #ifdef RATELIMIT
3904 err_create_mkey:
3905 	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3906 #endif
3907 err_dealloc_transport_domain:
3908 	mlx5_dealloc_transport_domain(mdev, priv->tdn);
3909 
3910 err_dealloc_pd:
3911 	mlx5_core_dealloc_pd(mdev, priv->pdn);
3912 
3913 err_unmap_free_uar:
3914 	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3915 
3916 err_free_wq:
3917 	destroy_workqueue(priv->wq);
3918 
3919 err_free_sysctl:
3920 	sysctl_ctx_free(&priv->sysctl_ctx);
3921 	if (priv->sysctl_debug)
3922 		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3923 	if_free(ifp);
3924 
3925 err_free_priv:
3926 	mlx5e_priv_mtx_destroy(priv);
3927 	free(priv, M_MLX5EN);
3928 	return (NULL);
3929 }
3930 
3931 static void
3932 mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
3933 {
3934 	struct mlx5e_priv *priv = vpriv;
3935 	struct ifnet *ifp = priv->ifp;
3936 
3937 	/* don't allow more IOCTLs */
3938 	priv->gone = 1;
3939 
3940 	/* XXX wait a bit to allow IOCTL handlers to complete */
3941 	pause("W", hz);
3942 
3943 #ifdef RATELIMIT
3944 	/*
3945 	 * The kernel can have reference(s) via the m_snd_tag's into
3946 	 * the ratelimit channels, and these must go away before
3947 	 * detaching:
3948 	 */
3949 	while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) {
3950 		if_printf(priv->ifp, "Waiting for all ratelimit connections "
3951 		    "to terminate\n");
3952 		pause("W", hz);
3953 	}
3954 #endif
3955 	/* stop watchdog timer */
3956 	callout_drain(&priv->watchdog);
3957 
3958 	callout_drain(&priv->tstmp_clbr);
3959 
3960 	if (priv->vlan_attach != NULL)
3961 		EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
3962 	if (priv->vlan_detach != NULL)
3963 		EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
3964 
3965 	/* make sure device gets closed */
3966 	PRIV_LOCK(priv);
3967 	mlx5e_close_locked(ifp);
3968 	PRIV_UNLOCK(priv);
3969 
3970 	/* wait for all unlimited send tags to go away */
3971 	while (priv->channel_refs != 0) {
3972 		if_printf(priv->ifp, "Waiting for all unlimited connections "
3973 		    "to terminate\n");
3974 		pause("W", hz);
3975 	}
3976 
3977 	/* unregister device */
3978 	ifmedia_removeall(&priv->media);
3979 	ether_ifdetach(ifp);
3980 	if_free(ifp);
3981 
3982 #ifdef RATELIMIT
3983 	mlx5e_rl_cleanup(priv);
3984 #endif
3985 	/* destroy all remaining sysctl nodes */
3986 	sysctl_ctx_free(&priv->stats.vport.ctx);
3987 	sysctl_ctx_free(&priv->stats.pport.ctx);
3988 	sysctl_ctx_free(&priv->sysctl_ctx);
3989 	if (priv->sysctl_debug)
3990 		sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3991 
3992 	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3993 	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
3994 	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3995 	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3996 	mlx5e_disable_async_events(priv);
3997 	destroy_workqueue(priv->wq);
3998 	mlx5e_priv_mtx_destroy(priv);
3999 	free(priv, M_MLX5EN);
4000 }
4001 
4002 static void *
4003 mlx5e_get_ifp(void *vpriv)
4004 {
4005 	struct mlx5e_priv *priv = vpriv;
4006 
4007 	return (priv->ifp);
4008 }
4009 
4010 static struct mlx5_interface mlx5e_interface = {
4011 	.add = mlx5e_create_ifp,
4012 	.remove = mlx5e_destroy_ifp,
4013 	.event = mlx5e_async_event,
4014 	.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4015 	.get_dev = mlx5e_get_ifp,
4016 };
4017 
4018 void
4019 mlx5e_init(void)
4020 {
4021 	mlx5_register_interface(&mlx5e_interface);
4022 }
4023 
4024 void
4025 mlx5e_cleanup(void)
4026 {
4027 	mlx5_unregister_interface(&mlx5e_interface);
4028 }
4029 
4030 static void
4031 mlx5e_show_version(void __unused *arg)
4032 {
4033 
4034 	printf("%s", mlx5e_version);
4035 }
4036 SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL);
4037 
4038 module_init_order(mlx5e_init, SI_ORDER_THIRD);
4039 module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
4040 
4041 #if (__FreeBSD_version >= 1100000)
4042 MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
4043 #endif
4044 MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
4045 MODULE_VERSION(mlx5en, 1);
4046