xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision 535af610)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2023 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * The DPAA2 Network Interface (DPNI) driver.
34  *
35  * The DPNI object is a network interface that is configurable to support a wide
36  * range of features from a very basic Ethernet interface up to a
37  * high-functioning network interface. The DPNI supports features that are
38  * expected by standard network stacks, from basic features to offloads.
39  *
40  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
41  * functions are provided for standard network protocols (L2, L3, L4, etc.).
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49 #include <sys/module.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sysctl.h>
58 #include <sys/buf_ring.h>
59 #include <sys/smp.h>
60 #include <sys/proc.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 
65 #include <machine/bus.h>
66 #include <machine/resource.h>
67 #include <machine/atomic.h>
68 #include <machine/vmparam.h>
69 
70 #include <net/ethernet.h>
71 #include <net/bpf.h>
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_types.h>
76 #include <net/if_var.h>
77 
78 #include <dev/pci/pcivar.h>
79 #include <dev/mii/mii.h>
80 #include <dev/mii/miivar.h>
81 #include <dev/mdio/mdio.h>
82 
83 #include "opt_acpi.h"
84 #include "opt_platform.h"
85 
86 #include "pcib_if.h"
87 #include "pci_if.h"
88 #include "miibus_if.h"
89 #include "memac_mdio_if.h"
90 
91 #include "dpaa2_types.h"
92 #include "dpaa2_mc.h"
93 #include "dpaa2_mc_if.h"
94 #include "dpaa2_mcp.h"
95 #include "dpaa2_swp.h"
96 #include "dpaa2_swp_if.h"
97 #include "dpaa2_cmd_if.h"
98 #include "dpaa2_ni.h"
99 
100 #define BIT(x)			(1ul << (x))
101 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
103 
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
106 
107 #define	ALIGN_UP(x, y)		roundup2((x), (y))
108 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
110 
111 #define DPNI_LOCK(__sc) do {			\
112 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
113 	mtx_lock(&(__sc)->lock);		\
114 } while (0)
115 #define	DPNI_UNLOCK(__sc) do {			\
116 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
117 	mtx_unlock(&(__sc)->lock);		\
118 } while (0)
119 
120 #define TX_LOCK(__tx) do {			\
121 	mtx_assert(&(__tx)->lock, MA_NOTOWNED);	\
122 	mtx_lock(&(__tx)->lock);		\
123 } while (0)
124 #define	TX_UNLOCK(__tx) do {			\
125 	mtx_assert(&(__tx)->lock, MA_OWNED);	\
126 	mtx_unlock(&(__tx)->lock);		\
127 } while (0)
128 
129 #define DPAA2_TX_RING(sc, chan, tc)				\
130 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
131 
132 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
133 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
134 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
135 
136 /* Default maximum frame length. */
137 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
138 
139 /* Minimally supported version of the DPNI API. */
140 #define DPNI_VER_MAJOR		7
141 #define DPNI_VER_MINOR		0
142 
143 /* Rx/Tx buffers configuration. */
144 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
145 #define BUF_ALIGN		64
146 #define BUF_SWA_SIZE		64  /* SW annotation size */
147 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
148 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
149 #define BUF_SIZE		(MJUM9BYTES)
150 
151 #define DPAA2_TX_BUFRING_SZ	(4096u)
152 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
153 #define DPAA2_TX_SEG_SZ		(4096u)
154 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
155 #define DPAA2_TX_SGT_SZ		(PAGE_SIZE) /* bytes */
156 
157 /* Size of a buffer to keep a QoS table key configuration. */
158 #define ETH_QOS_KCFG_BUF_SIZE	256
159 
160 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
161 #define DPAA2_CLASSIFIER_DMA_SIZE 256
162 
163 /* Channel storage buffer configuration. */
164 #define ETH_STORE_FRAMES	16u
165 #define ETH_STORE_SIZE		((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
166 #define ETH_STORE_ALIGN		64u
167 
168 /* Buffers layout options. */
169 #define BUF_LOPT_TIMESTAMP	0x1
170 #define BUF_LOPT_PARSER_RESULT	0x2
171 #define BUF_LOPT_FRAME_STATUS	0x4
172 #define BUF_LOPT_PRIV_DATA_SZ	0x8
173 #define BUF_LOPT_DATA_ALIGN	0x10
174 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
175 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
176 
177 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
178 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
179 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
180 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
181 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
182 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
183 #define DPAA2_NI_TX_IDX_SHIFT	(57)
184 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
185 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
186 
187 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
188 #define DPAA2_NI_FD_FMT_SHIFT	(12)
189 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
190 #define DPAA2_NI_FD_ERR_SHIFT	(0)
191 #define DPAA2_NI_FD_SL_MASK	(0x1u)
192 #define DPAA2_NI_FD_SL_SHIFT	(14)
193 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
194 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
195 
196 /* Enables TCAM for Flow Steering and QoS look-ups. */
197 #define DPNI_OPT_HAS_KEY_MASKING 0x10
198 
199 /* Unique IDs for the supported Rx classification header fields. */
200 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
201 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
202 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
203 #define DPAA2_ETH_DIST_VLAN	BIT(3)
204 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
205 #define DPAA2_ETH_DIST_IPDST	BIT(5)
206 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
207 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
208 #define DPAA2_ETH_DIST_L4DST	BIT(8)
209 #define DPAA2_ETH_DIST_ALL	(~0ULL)
210 
211 /* L3-L4 network traffic flow hash options. */
212 #define	RXH_L2DA		(1 << 1)
213 #define	RXH_VLAN		(1 << 2)
214 #define	RXH_L3_PROTO		(1 << 3)
215 #define	RXH_IP_SRC		(1 << 4)
216 #define	RXH_IP_DST		(1 << 5)
217 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
218 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
219 #define	RXH_DISCARD		(1 << 31)
220 
221 /* Default Rx hash options, set during attaching. */
222 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
223 
224 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
225 
226 /* DPAA2 Network Interface resource specification. */
227 struct resource_spec dpaa2_ni_spec[] = {
228 	/*
229 	 * DPMCP resources.
230 	 *
231 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
232 	 *	 receive responses from, the MC firmware. One portal per DPNI.
233 	 */
234 #define MCP_RES_NUM	(1u)
235 #define MCP_RID_OFF	(0u)
236 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
237 	/* --- */
238 	{ DPAA2_DEV_MCP, MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
239 	/*
240 	 * DPIO resources (software portals).
241 	 *
242 	 * NOTE: One per running core. While DPIOs are the source of data
243 	 *	 availability interrupts, the DPCONs are used to identify the
244 	 *	 network interface that has produced ingress data to that core.
245 	 */
246 #define IO_RES_NUM	(16u)
247 #define IO_RID_OFF	(MCP_RID_OFF + MCP_RES_NUM)
248 #define IO_RID(rid)	((rid) + IO_RID_OFF)
249 	/* --- */
250 	{ DPAA2_DEV_IO,  IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
251 	{ DPAA2_DEV_IO,  IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
252 	{ DPAA2_DEV_IO,  IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 	{ DPAA2_DEV_IO,  IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	{ DPAA2_DEV_IO,  IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 	{ DPAA2_DEV_IO,  IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
266 	/*
267 	 * DPBP resources (buffer pools).
268 	 *
269 	 * NOTE: One per network interface.
270 	 */
271 #define BP_RES_NUM	(1u)
272 #define BP_RID_OFF	(IO_RID_OFF + IO_RES_NUM)
273 #define BP_RID(rid)	((rid) + BP_RID_OFF)
274 	/* --- */
275 	{ DPAA2_DEV_BP,  BP_RID(0),   RF_ACTIVE },
276 	/*
277 	 * DPCON resources (channels).
278 	 *
279 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
280 	 *	 distributed to.
281 	 * NOTE: Since it is necessary to distinguish between traffic from
282 	 *	 different network interfaces arriving on the same core, the
283 	 *	 DPCONs must be private to the DPNIs.
284 	 */
285 #define CON_RES_NUM	(16u)
286 #define CON_RID_OFF	(BP_RID_OFF + BP_RES_NUM)
287 #define CON_RID(rid)	((rid) + CON_RID_OFF)
288 	/* --- */
289 	{ DPAA2_DEV_CON, CON_RID(0),   RF_ACTIVE },
290 	{ DPAA2_DEV_CON, CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
291 	{ DPAA2_DEV_CON, CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
292  	{ DPAA2_DEV_CON, CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
293  	{ DPAA2_DEV_CON, CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
294  	{ DPAA2_DEV_CON, CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
295  	{ DPAA2_DEV_CON, CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
296  	{ DPAA2_DEV_CON, CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
297  	{ DPAA2_DEV_CON, CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
298  	{ DPAA2_DEV_CON, CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
299  	{ DPAA2_DEV_CON, CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
300  	{ DPAA2_DEV_CON, CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
301  	{ DPAA2_DEV_CON, CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
302  	{ DPAA2_DEV_CON, CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
303  	{ DPAA2_DEV_CON, CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
304  	{ DPAA2_DEV_CON, CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
305 	/* --- */
306 	RESOURCE_SPEC_END
307 };
308 
309 /* Supported header fields for Rx hash distribution key */
310 static const struct dpaa2_eth_dist_fields dist_fields[] = {
311 	{
312 		/* L2 header */
313 		.rxnfc_field = RXH_L2DA,
314 		.cls_prot = NET_PROT_ETH,
315 		.cls_field = NH_FLD_ETH_DA,
316 		.id = DPAA2_ETH_DIST_ETHDST,
317 		.size = 6,
318 	}, {
319 		.cls_prot = NET_PROT_ETH,
320 		.cls_field = NH_FLD_ETH_SA,
321 		.id = DPAA2_ETH_DIST_ETHSRC,
322 		.size = 6,
323 	}, {
324 		/* This is the last ethertype field parsed:
325 		 * depending on frame format, it can be the MAC ethertype
326 		 * or the VLAN etype.
327 		 */
328 		.cls_prot = NET_PROT_ETH,
329 		.cls_field = NH_FLD_ETH_TYPE,
330 		.id = DPAA2_ETH_DIST_ETHTYPE,
331 		.size = 2,
332 	}, {
333 		/* VLAN header */
334 		.rxnfc_field = RXH_VLAN,
335 		.cls_prot = NET_PROT_VLAN,
336 		.cls_field = NH_FLD_VLAN_TCI,
337 		.id = DPAA2_ETH_DIST_VLAN,
338 		.size = 2,
339 	}, {
340 		/* IP header */
341 		.rxnfc_field = RXH_IP_SRC,
342 		.cls_prot = NET_PROT_IP,
343 		.cls_field = NH_FLD_IP_SRC,
344 		.id = DPAA2_ETH_DIST_IPSRC,
345 		.size = 4,
346 	}, {
347 		.rxnfc_field = RXH_IP_DST,
348 		.cls_prot = NET_PROT_IP,
349 		.cls_field = NH_FLD_IP_DST,
350 		.id = DPAA2_ETH_DIST_IPDST,
351 		.size = 4,
352 	}, {
353 		.rxnfc_field = RXH_L3_PROTO,
354 		.cls_prot = NET_PROT_IP,
355 		.cls_field = NH_FLD_IP_PROTO,
356 		.id = DPAA2_ETH_DIST_IPPROTO,
357 		.size = 1,
358 	}, {
359 		/* Using UDP ports, this is functionally equivalent to raw
360 		 * byte pairs from L4 header.
361 		 */
362 		.rxnfc_field = RXH_L4_B_0_1,
363 		.cls_prot = NET_PROT_UDP,
364 		.cls_field = NH_FLD_UDP_PORT_SRC,
365 		.id = DPAA2_ETH_DIST_L4SRC,
366 		.size = 2,
367 	}, {
368 		.rxnfc_field = RXH_L4_B_2_3,
369 		.cls_prot = NET_PROT_UDP,
370 		.cls_field = NH_FLD_UDP_PORT_DST,
371 		.id = DPAA2_ETH_DIST_L4DST,
372 		.size = 2,
373 	},
374 };
375 
376 static struct dpni_stat {
377 	int	 page;
378 	int	 cnt;
379 	char	*name;
380 	char	*desc;
381 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
382 	/* PAGE, COUNTER, NAME, DESCRIPTION */
383 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
384 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
385 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
386 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
387 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
388 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
389 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
390 	   				"filtering" },
391 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
392 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
393 	   				"depletion in DPNI buffer pools" },
394 };
395 
396 /* Device interface */
397 static int dpaa2_ni_probe(device_t);
398 static int dpaa2_ni_attach(device_t);
399 static int dpaa2_ni_detach(device_t);
400 
401 /* DPAA2 network interface setup and configuration */
402 static int dpaa2_ni_setup(device_t);
403 static int dpaa2_ni_setup_channels(device_t);
404 static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
405     enum dpaa2_ni_queue_type);
406 static int dpaa2_ni_bind(device_t);
407 static int dpaa2_ni_setup_rx_dist(device_t);
408 static int dpaa2_ni_setup_irqs(device_t);
409 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
410 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
411 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
412 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
413 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
414 
415 /* Tx/Rx flow configuration */
416 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
417 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
418 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
419 
420 /* Configuration subroutines */
421 static int dpaa2_ni_set_buf_layout(device_t);
422 static int dpaa2_ni_set_pause_frame(device_t);
423 static int dpaa2_ni_set_qos_table(device_t);
424 static int dpaa2_ni_set_mac_addr(device_t);
425 static int dpaa2_ni_set_hash(device_t, uint64_t);
426 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
427 
428 /* Buffers and buffer pools */
429 static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
430 static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
431 static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
432 static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
433     struct dpaa2_ni_channel *);
434 
435 /* Frame descriptor routines */
436 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
437     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
438 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
439 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
440 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
441 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
442 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
443 
444 /* Various subroutines */
445 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
446 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
447 static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
448     struct dpaa2_dq **);
449 
450 /* Network interface routines */
451 static void dpaa2_ni_init(void *);
452 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
453 static void dpaa2_ni_qflush(if_t );
454 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
455 static int  dpaa2_ni_update_mac_filters(if_t );
456 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
457 
458 /* Interrupt handlers */
459 static void dpaa2_ni_intr(void *);
460 
461 /* MII handlers */
462 static void dpaa2_ni_miibus_statchg(device_t);
463 static int  dpaa2_ni_media_change(if_t );
464 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
465 static void dpaa2_ni_media_tick(void *);
466 
467 /* DMA mapping callback */
468 static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
469 
470 /* Tx/Rx routines. */
471 static void dpaa2_ni_poll(void *);
472 static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
473     struct dpaa2_ni_tx_ring *, struct mbuf *);
474 static void dpaa2_ni_bp_task(void *, int);
475 
476 /* Tx/Rx subroutines */
477 static int  dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
478     struct dpaa2_ni_fq **, uint32_t *);
479 static int  dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
480     struct dpaa2_fd *);
481 static int  dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
482     struct dpaa2_fd *);
483 static int  dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
484     struct dpaa2_fd *);
485 
486 /* sysctl(9) */
487 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
488 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
489 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
490 
491 static int
492 dpaa2_ni_probe(device_t dev)
493 {
494 	/* DPNI device will be added by a parent resource container itself. */
495 	device_set_desc(dev, "DPAA2 Network Interface");
496 	return (BUS_PROBE_DEFAULT);
497 }
498 
499 static int
500 dpaa2_ni_attach(device_t dev)
501 {
502 	device_t pdev = device_get_parent(dev);
503 	device_t child = dev;
504 	device_t mcp_dev;
505 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
506 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
507 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
508 	struct dpaa2_devinfo *mcp_dinfo;
509 	struct dpaa2_cmd cmd;
510 	uint16_t rc_token, ni_token;
511 	if_t ifp;
512 	char tq_name[32];
513 	int error;
514 
515 	sc->dev = dev;
516 	sc->ifp = NULL;
517 	sc->miibus = NULL;
518 	sc->mii = NULL;
519 	sc->media_status = 0;
520 	sc->if_flags = 0;
521 	sc->link_state = LINK_STATE_UNKNOWN;
522 	sc->buf_align = 0;
523 
524 	/* For debug purposes only! */
525 	sc->rx_anomaly_frames = 0;
526 	sc->rx_single_buf_frames = 0;
527 	sc->rx_sg_buf_frames = 0;
528 	sc->rx_enq_rej_frames = 0;
529 	sc->rx_ieoi_err_frames = 0;
530 	sc->tx_single_buf_frames = 0;
531 	sc->tx_sg_frames = 0;
532 
533 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
534 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
535 
536 	sc->bp_dmat = NULL;
537 	sc->st_dmat = NULL;
538 	sc->rxd_dmat = NULL;
539 	sc->qos_dmat = NULL;
540 
541 	sc->qos_kcfg.type = DPAA2_BUF_STORE;
542 	sc->qos_kcfg.store.dmap = NULL;
543 	sc->qos_kcfg.store.paddr = 0;
544 	sc->qos_kcfg.store.vaddr = NULL;
545 
546 	sc->rxd_kcfg.type = DPAA2_BUF_STORE;
547 	sc->rxd_kcfg.store.dmap = NULL;
548 	sc->rxd_kcfg.store.paddr = 0;
549 	sc->rxd_kcfg.store.vaddr = NULL;
550 
551 	sc->mac.dpmac_id = 0;
552 	sc->mac.phy_dev = NULL;
553 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
554 
555 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
556 	if (error) {
557 		device_printf(dev, "%s: failed to allocate resources: "
558 		    "error=%d\n", __func__, error);
559 		goto err_exit;
560 	}
561 
562 	/* Obtain MC portal. */
563 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
564 	mcp_dinfo = device_get_ivars(mcp_dev);
565 	dinfo->portal = mcp_dinfo->portal;
566 
567 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
568 
569 	/* Allocate network interface */
570 	ifp = if_alloc(IFT_ETHER);
571 	if (ifp == NULL) {
572 		device_printf(dev, "%s: failed to allocate network interface\n",
573 		    __func__);
574 		goto err_exit;
575 	}
576 	sc->ifp = ifp;
577 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
578 
579 	if_setsoftc(ifp, sc);
580 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
581 	if_setinitfn(ifp, dpaa2_ni_init);
582 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
583 	if_settransmitfn(ifp, dpaa2_ni_transmit);
584 	if_setqflushfn(ifp, dpaa2_ni_qflush);
585 
586 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
587 	if_setcapenable(ifp, if_getcapabilities(ifp));
588 
589 	DPAA2_CMD_INIT(&cmd);
590 
591 	/* Open resource container and network interface object. */
592 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
593 	if (error) {
594 		device_printf(dev, "%s: failed to open resource container: "
595 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
596 		goto err_exit;
597 	}
598 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
599 	if (error) {
600 		device_printf(dev, "%s: failed to open network interface: "
601 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
602 		goto close_rc;
603 	}
604 
605 	/*
606 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
607 	 *          (BPSCN) returned as a result to the VDQ command instead.
608 	 *          It is similar to CDAN processed in dpaa2_io_intr().
609 	 */
610 	/* Create a taskqueue thread to release new buffers to the pool. */
611 	TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
612 	bzero(tq_name, sizeof (tq_name));
613 	snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
614 	    device_get_nameunit(dev));
615 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
616 	    taskqueue_thread_enqueue, &sc->bp_taskq);
617 	if (sc->bp_taskq == NULL) {
618 		device_printf(dev, "%s: failed to allocate task queue: %s\n",
619 		    __func__, tq_name);
620 		goto close_ni;
621 	}
622 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
623 
624 	error = dpaa2_ni_setup(dev);
625 	if (error) {
626 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
627 		    __func__, error);
628 		goto close_ni;
629 	}
630 	error = dpaa2_ni_setup_channels(dev);
631 	if (error) {
632 		device_printf(dev, "%s: failed to setup QBMan channels: "
633 		    "error=%d\n", __func__, error);
634 		goto close_ni;
635 	}
636 
637 	error = dpaa2_ni_bind(dev);
638 	if (error) {
639 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
640 		    __func__, error);
641 		goto close_ni;
642 	}
643 	error = dpaa2_ni_setup_irqs(dev);
644 	if (error) {
645 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
646 		    __func__, error);
647 		goto close_ni;
648 	}
649 	error = dpaa2_ni_setup_sysctls(sc);
650 	if (error) {
651 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
652 		    __func__, error);
653 		goto close_ni;
654 	}
655 
656 	ether_ifattach(sc->ifp, sc->mac.addr);
657 	callout_init(&sc->mii_callout, 0);
658 
659 	return (0);
660 
661 close_ni:
662 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
663 close_rc:
664 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
665 err_exit:
666 	return (ENXIO);
667 }
668 
669 static void
670 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
671 {
672 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
673 
674 	DPNI_LOCK(sc);
675 	ifmr->ifm_count = 0;
676 	ifmr->ifm_mask = 0;
677 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
678 	ifmr->ifm_current = ifmr->ifm_active =
679 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
680 
681 	/*
682 	 * In non-PHY usecases, we need to signal link state up, otherwise
683 	 * certain things requiring a link event (e.g async DHCP client) from
684 	 * devd do not happen.
685 	 */
686 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
687 		if_link_state_change(ifp, LINK_STATE_UP);
688 	}
689 
690 	/*
691 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
692 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
693 	 * the MC firmware sets the status, instead of us telling the MC what
694 	 * it is.
695 	 */
696 	DPNI_UNLOCK(sc);
697 
698 	return;
699 }
700 
701 static void
702 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
703 {
704 	/*
705 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
706 	 * 'apparent' speed from it.
707 	 */
708 	sc->fixed_link = true;
709 
710 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
711 		     dpaa2_ni_fixed_media_status);
712 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
713 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
714 }
715 
716 static int
717 dpaa2_ni_detach(device_t dev)
718 {
719 	/* TBD */
720 	return (0);
721 }
722 
723 /**
724  * @brief Configure DPAA2 network interface object.
725  */
726 static int
727 dpaa2_ni_setup(device_t dev)
728 {
729 	device_t pdev = device_get_parent(dev);
730 	device_t child = dev;
731 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
732 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
733 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
734 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
735 	struct dpaa2_cmd cmd;
736 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
737 	uint16_t rc_token, ni_token, mac_token;
738 	struct dpaa2_mac_attr attr;
739 	enum dpaa2_mac_link_type link_type;
740 	uint32_t link;
741 	int error;
742 
743 	DPAA2_CMD_INIT(&cmd);
744 
745 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
746 	if (error) {
747 		device_printf(dev, "%s: failed to open resource container: "
748 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
749 		goto err_exit;
750 	}
751 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
752 	if (error) {
753 		device_printf(dev, "%s: failed to open network interface: "
754 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
755 		goto close_rc;
756 	}
757 
758 	/* Check if we can work with this DPNI object. */
759 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
760 	    &sc->api_minor);
761 	if (error) {
762 		device_printf(dev, "%s: failed to get DPNI API version\n",
763 		    __func__);
764 		goto close_ni;
765 	}
766 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
767 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
768 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
769 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
770 		error = ENODEV;
771 		goto close_ni;
772 	}
773 
774 	/* Reset the DPNI object. */
775 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
776 	if (error) {
777 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
778 		    __func__, dinfo->id);
779 		goto close_ni;
780 	}
781 
782 	/* Obtain attributes of the DPNI object. */
783 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
784 	if (error) {
785 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
786 		    "id=%d\n", __func__, dinfo->id);
787 		goto close_ni;
788 	}
789 	if (bootverbose) {
790 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
791 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
792 		    sc->attr.num.channels, sc->attr.wriop_ver);
793 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
794 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
795 		    sc->attr.num.cgs);
796 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
797 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
798 		    sc->attr.entries.qos, sc->attr.entries.fs);
799 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
800 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
801 	}
802 
803 	/* Configure buffer layouts of the DPNI queues. */
804 	error = dpaa2_ni_set_buf_layout(dev);
805 	if (error) {
806 		device_printf(dev, "%s: failed to configure buffer layout\n",
807 		    __func__);
808 		goto close_ni;
809 	}
810 
811 	/* Configure DMA resources. */
812 	error = dpaa2_ni_setup_dma(sc);
813 	if (error) {
814 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
815 		goto close_ni;
816 	}
817 
818 	/* Setup link between DPNI and an object it's connected to. */
819 	ep1_desc.obj_id = dinfo->id;
820 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
821 	ep1_desc.type = dinfo->dtype;
822 
823 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
824 	    &ep1_desc, &ep2_desc, &link);
825 	if (error) {
826 		device_printf(dev, "%s: failed to obtain an object DPNI is "
827 		    "connected to: error=%d\n", __func__, error);
828 	} else {
829 		device_printf(dev, "connected to %s (id=%d)\n",
830 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
831 
832 		error = dpaa2_ni_set_mac_addr(dev);
833 		if (error) {
834 			device_printf(dev, "%s: failed to set MAC address: "
835 			    "error=%d\n", __func__, error);
836 		}
837 
838 		if (ep2_desc.type == DPAA2_DEV_MAC) {
839 			/*
840 			 * This is the simplest case when DPNI is connected to
841 			 * DPMAC directly.
842 			 */
843 			sc->mac.dpmac_id = ep2_desc.obj_id;
844 
845 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
846 
847 			/*
848 			 * Need to determine if DPMAC type is PHY (attached to
849 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
850 			 * link state managed by MC firmware).
851 			 */
852 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
853 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
854 			    &mac_token);
855 			/*
856 			 * Under VFIO, the DPMAC might be sitting in another
857 			 * container (DPRC) we don't have access to.
858 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
859 			 * the case.
860 			 */
861 			if (error) {
862 				device_printf(dev, "%s: failed to open "
863 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
864 				    sc->mac.dpmac_id);
865 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
866 			} else {
867 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
868 				    &cmd, &attr);
869 				if (error) {
870 					device_printf(dev, "%s: failed to get "
871 					    "DPMAC attributes: id=%d, "
872 					    "error=%d\n", __func__, dinfo->id,
873 					    error);
874 				} else {
875 					link_type = attr.link_type;
876 				}
877 			}
878 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
879 
880 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
881 				device_printf(dev, "connected DPMAC is in FIXED "
882 				    "mode\n");
883 				dpaa2_ni_setup_fixed_link(sc);
884 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
885 				device_printf(dev, "connected DPMAC is in PHY "
886 				    "mode\n");
887 				error = DPAA2_MC_GET_PHY_DEV(dev,
888 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
889 				if (error == 0) {
890 					error = MEMAC_MDIO_SET_NI_DEV(
891 					    sc->mac.phy_dev, dev);
892 					if (error != 0) {
893 						device_printf(dev, "%s: failed "
894 						    "to set dpni dev on memac "
895 						    "mdio dev %s: error=%d\n",
896 						    __func__,
897 						    device_get_nameunit(
898 						    sc->mac.phy_dev), error);
899 					}
900 				}
901 				if (error == 0) {
902 					error = MEMAC_MDIO_GET_PHY_LOC(
903 					    sc->mac.phy_dev, &sc->mac.phy_loc);
904 					if (error == ENODEV) {
905 						error = 0;
906 					}
907 					if (error != 0) {
908 						device_printf(dev, "%s: failed "
909 						    "to get phy location from "
910 						    "memac mdio dev %s: error=%d\n",
911 						    __func__, device_get_nameunit(
912 						    sc->mac.phy_dev), error);
913 					}
914 				}
915 				if (error == 0) {
916 					error = mii_attach(sc->mac.phy_dev,
917 					    &sc->miibus, sc->ifp,
918 					    dpaa2_ni_media_change,
919 					    dpaa2_ni_media_status,
920 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
921 					    MII_OFFSET_ANY, 0);
922 					if (error != 0) {
923 						device_printf(dev, "%s: failed "
924 						    "to attach to miibus: "
925 						    "error=%d\n",
926 						    __func__, error);
927 					}
928 				}
929 				if (error == 0) {
930 					sc->mii = device_get_softc(sc->miibus);
931 				}
932 			} else {
933 				device_printf(dev, "%s: DPMAC link type is not "
934 				    "supported\n", __func__);
935 			}
936 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
937 			   ep2_desc.type == DPAA2_DEV_MUX ||
938 			   ep2_desc.type == DPAA2_DEV_SW) {
939 			dpaa2_ni_setup_fixed_link(sc);
940 		}
941 	}
942 
943 	/* Select mode to enqueue frames. */
944 	/* ... TBD ... */
945 
946 	/*
947 	 * Update link configuration to enable Rx/Tx pause frames support.
948 	 *
949 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
950 	 *       in link configuration. It might be necessary to attach miibus
951 	 *       and PHY before this point.
952 	 */
953 	error = dpaa2_ni_set_pause_frame(dev);
954 	if (error) {
955 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
956 		    "frames\n", __func__);
957 		goto close_ni;
958 	}
959 
960 	/* Configure ingress traffic classification. */
961 	error = dpaa2_ni_set_qos_table(dev);
962 	if (error) {
963 		device_printf(dev, "%s: failed to configure QoS table: "
964 		    "error=%d\n", __func__, error);
965 		goto close_ni;
966 	}
967 
968 	/* Add broadcast physical address to the MAC filtering table. */
969 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
970 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
971 	    ni_token), eth_bca);
972 	if (error) {
973 		device_printf(dev, "%s: failed to add broadcast physical "
974 		    "address to the MAC filtering table\n", __func__);
975 		goto close_ni;
976 	}
977 
978 	/* Set the maximum allowed length for received frames. */
979 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
980 	if (error) {
981 		device_printf(dev, "%s: failed to set maximum length for "
982 		    "received frames\n", __func__);
983 		goto close_ni;
984 	}
985 
986 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
987 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
988 	return (0);
989 
990 close_ni:
991 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
992 close_rc:
993 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
994 err_exit:
995 	return (error);
996 }
997 
998 /**
999  * @brief Сonfigure QBMan channels and register data availability notifications.
1000  */
1001 static int
1002 dpaa2_ni_setup_channels(device_t dev)
1003 {
1004 	device_t pdev = device_get_parent(dev);
1005 	device_t child = dev;
1006 	device_t io_dev, con_dev;
1007 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1008 	struct dpaa2_ni_channel *channel;
1009 	struct dpaa2_con_softc *consc;
1010 	struct dpaa2_con_notif_cfg notif_cfg;
1011 	struct dpaa2_devinfo *rc_info = device_get_ivars(pdev);
1012 	struct dpaa2_devinfo *io_info;
1013 	struct dpaa2_devinfo *con_info;
1014 	struct dpaa2_io_notif_ctx *ctx;
1015 	struct dpaa2_buf *buf;
1016 	struct dpaa2_cmd cmd;
1017 	struct sysctl_ctx_list *sysctl_ctx;
1018 	struct sysctl_oid *node;
1019 	struct sysctl_oid_list *parent;
1020 	uint32_t i, num_chan;
1021 	uint16_t rc_token, con_token;
1022 	int error;
1023 
1024 	/* Calculate number of the channels based on the allocated resources. */
1025 	for (i = 0; i < IO_RES_NUM; i++) {
1026 		if (!sc->res[IO_RID(i)]) {
1027 			break;
1028 		}
1029 	}
1030 	num_chan = i;
1031 	for (i = 0; i < CON_RES_NUM; i++) {
1032 		if (!sc->res[CON_RID(i)]) {
1033 			break;
1034 		}
1035 	}
1036 	num_chan = i < num_chan ? i : num_chan;
1037 	sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
1038 	    ? DPAA2_NI_MAX_CHANNELS : num_chan;
1039 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1040 	    ? sc->attr.num.queues : sc->chan_n;
1041 
1042 	device_printf(dev, "channels=%d\n", sc->chan_n);
1043 
1044 	sysctl_ctx = device_get_sysctl_ctx(sc->dev);
1045 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1046 	node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
1047 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1048 	parent = SYSCTL_CHILDREN(node);
1049 
1050 	/* Setup channels for the portal. */
1051 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1052 		io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
1053 		io_info = device_get_ivars(io_dev);
1054 
1055 		con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
1056 		consc = device_get_softc(con_dev);
1057 		con_info = device_get_ivars(con_dev);
1058 
1059 		DPAA2_CMD_INIT(&cmd);
1060 
1061 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rc_info->id,
1062 		    &rc_token);
1063 		if (error) {
1064 			device_printf(dev, "%s: failed to open resource "
1065 			    "container: id=%d, error=%d\n", __func__,
1066 			    rc_info->id, error);
1067 			return (error);
1068 		}
1069 		error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, con_info->id,
1070 		    &con_token);
1071 		if (error) {
1072 			device_printf(dev, "%s: failed to open DPCON: id=%d, "
1073 			    "error=%d\n", __func__, con_info->id, error);
1074 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1075 			    rc_token));
1076 			return (error);
1077 		}
1078 
1079 		error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
1080 		if (error) {
1081 			device_printf(dev, "%s: failed to enable channel: "
1082 			    "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
1083 			    consc->attr.chan_id);
1084 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1085 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1086 			    rc_token));
1087 			return (error);
1088 		}
1089 
1090 		channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
1091 		    M_WAITOK | M_ZERO);
1092 		if (!channel) {
1093 			device_printf(dev, "%s: failed to allocate a channel\n",
1094 			    __func__);
1095 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1096 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1097 			    rc_token));
1098 			return (ENOMEM);
1099 		}
1100 
1101 		sc->channels[i] = channel;
1102 
1103 		channel->id = consc->attr.chan_id;
1104 		channel->flowid = i;
1105 		channel->ni_dev = dev;
1106 		channel->io_dev = io_dev;
1107 		channel->con_dev = con_dev;
1108 		channel->recycled_n = 0;
1109 		channel->tx_frames = 0; /* for debug purposes */
1110 		channel->tx_dropped = 0; /* for debug purposes */
1111 		channel->rxq_n = 0;
1112 
1113 		buf = &channel->store;
1114 		buf->type = DPAA2_BUF_STORE;
1115 		buf->store.dmat = NULL;
1116 		buf->store.dmap = NULL;
1117 		buf->store.paddr = 0;
1118 		buf->store.vaddr = NULL;
1119 
1120 		/* Setup WQ channel notification context. */
1121 		ctx = &channel->ctx;
1122 		ctx->qman_ctx = (uint64_t) ctx;
1123 		ctx->cdan_en = true;
1124 		ctx->fq_chan_id = channel->id;
1125 		ctx->io_dev = channel->io_dev;
1126 		ctx->channel = channel;
1127 		ctx->poll = dpaa2_ni_poll;
1128 
1129 		/* Register the new notification context. */
1130 		error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
1131 		if (error) {
1132 			device_printf(dev, "%s: failed to register notification "
1133 			    "context\n", __func__);
1134 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1135 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1136 			    rc_token));
1137 			return (error);
1138 		}
1139 
1140 		/* Register DPCON notification with Management Complex. */
1141 		notif_cfg.dpio_id = io_info->id;
1142 		notif_cfg.prior = 0;
1143 		notif_cfg.qman_ctx = ctx->qman_ctx;
1144 		error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
1145 		if (error) {
1146 			device_printf(dev, "%s: failed to set DPCON "
1147 			    "notification: dpcon_id=%d, chan_id=%d\n", __func__,
1148 			    con_info->id, consc->attr.chan_id);
1149 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1150 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1151 			    rc_token));
1152 			return (error);
1153 		}
1154 
1155 		/* Allocate initial # of Rx buffers and a channel storage. */
1156 		error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
1157 		if (error) {
1158 			device_printf(dev, "%s: failed to seed buffer pool\n",
1159 			    __func__);
1160 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1161 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1162 			    rc_token));
1163 			return (error);
1164 		}
1165 		error = dpaa2_ni_seed_chan_storage(sc, channel);
1166 		if (error) {
1167 			device_printf(dev, "%s: failed to seed channel "
1168 			    "storage\n", __func__);
1169 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1170 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1171 			    rc_token));
1172 			return (error);
1173 		}
1174 
1175 		/* Prepare queues for this channel. */
1176 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
1177 		if (error) {
1178 			device_printf(dev, "%s: failed to prepare TxConf "
1179 			    "queue: error=%d\n", __func__, error);
1180 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1181 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1182 			    rc_token));
1183 			return (error);
1184 		}
1185 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
1186 		if (error) {
1187 			device_printf(dev, "%s: failed to prepare Rx queue: "
1188 			    "error=%d\n", __func__, error);
1189 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1190 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1191 			    rc_token));
1192 			return (error);
1193 		}
1194 
1195 		if (bootverbose) {
1196 			device_printf(dev, "channel: dpio_id=%d "
1197 			    "dpcon_id=%d chan_id=%d, priorities=%d\n",
1198 			    io_info->id, con_info->id, channel->id,
1199 			    consc->attr.prior_num);
1200 		}
1201 
1202 		(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1203 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1204 		    rc_token));
1205 	}
1206 
1207 	/* There is exactly one Rx error queue per DPNI. */
1208 	error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1209 	if (error) {
1210 		device_printf(dev, "%s: failed to prepare RxError queue: "
1211 		    "error=%d\n", __func__, error);
1212 		return (error);
1213 	}
1214 
1215 	return (0);
1216 }
1217 
1218 /**
1219  * @brief Performs an initial configuration of the frame queues.
1220  */
1221 static int
1222 dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
1223     enum dpaa2_ni_queue_type queue_type)
1224 {
1225 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1226 	struct dpaa2_ni_fq *fq;
1227 
1228 	switch (queue_type) {
1229 	case DPAA2_NI_QUEUE_TX_CONF:
1230 		/* One queue per channel. */
1231 		fq = &chan->txc_queue;
1232 
1233 		fq->consume = dpaa2_ni_tx_conf;
1234 		fq->chan = chan;
1235 		fq->flowid = chan->flowid;
1236 		fq->tc = 0; /* ignored */
1237 		fq->type = queue_type;
1238 
1239 		break;
1240 	case DPAA2_NI_QUEUE_RX:
1241 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
1242 		    ("too many Rx traffic classes: rx_tcs=%d\n",
1243 		    sc->attr.num.rx_tcs));
1244 
1245 		/* One queue per Rx traffic class within a channel. */
1246 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
1247 			fq = &chan->rx_queues[i];
1248 
1249 			fq->consume = dpaa2_ni_rx;
1250 			fq->chan = chan;
1251 			fq->flowid = chan->flowid;
1252 			fq->tc = (uint8_t) i;
1253 			fq->type = queue_type;
1254 
1255 			chan->rxq_n++;
1256 		}
1257 		break;
1258 	case DPAA2_NI_QUEUE_RX_ERR:
1259 		/* One queue per network interface. */
1260 		fq = &sc->rxe_queue;
1261 
1262 		fq->consume = dpaa2_ni_rx_err;
1263 		fq->chan = chan;
1264 		fq->flowid = 0; /* ignored */
1265 		fq->tc = 0; /* ignored */
1266 		fq->type = queue_type;
1267 		break;
1268 	default:
1269 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
1270 		    __func__, queue_type);
1271 		return (EINVAL);
1272 	}
1273 
1274 	return (0);
1275 }
1276 
1277 /**
1278  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1279  */
1280 static int
1281 dpaa2_ni_bind(device_t dev)
1282 {
1283 	device_t pdev = device_get_parent(dev);
1284 	device_t child = dev;
1285 	device_t bp_dev;
1286 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1287 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1288 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1289 	struct dpaa2_devinfo *bp_info;
1290 	struct dpaa2_cmd cmd;
1291 	struct dpaa2_ni_pools_cfg pools_cfg;
1292 	struct dpaa2_ni_err_cfg err_cfg;
1293 	struct dpaa2_ni_channel *chan;
1294 	uint16_t rc_token, ni_token;
1295 	int error;
1296 
1297 	DPAA2_CMD_INIT(&cmd);
1298 
1299 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1300 	if (error) {
1301 		device_printf(dev, "%s: failed to open resource container: "
1302 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1303 		goto err_exit;
1304 	}
1305 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1306 	if (error) {
1307 		device_printf(dev, "%s: failed to open network interface: "
1308 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1309 		goto close_rc;
1310 	}
1311 
1312 	/* Select buffer pool (only one available at the moment). */
1313 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
1314 	bp_info = device_get_ivars(bp_dev);
1315 
1316 	/* Configure buffers pool. */
1317 	pools_cfg.pools_num = 1;
1318 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1319 	pools_cfg.pools[0].backup_flag = 0;
1320 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1321 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1322 	if (error) {
1323 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1324 		goto close_ni;
1325 	}
1326 
1327 	/* Setup ingress traffic distribution. */
1328 	error = dpaa2_ni_setup_rx_dist(dev);
1329 	if (error && error != EOPNOTSUPP) {
1330 		device_printf(dev, "%s: failed to setup ingress traffic "
1331 		    "distribution\n", __func__);
1332 		goto close_ni;
1333 	}
1334 	if (bootverbose && error == EOPNOTSUPP) {
1335 		device_printf(dev, "Ingress traffic distribution not "
1336 		    "supported\n");
1337 	}
1338 
1339 	/* Configure handling of error frames. */
1340 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1341 	err_cfg.set_err_fas = false;
1342 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1343 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1344 	if (error) {
1345 		device_printf(dev, "%s: failed to set errors behavior\n",
1346 		    __func__);
1347 		goto close_ni;
1348 	}
1349 
1350 	/* Configure channel queues to generate CDANs. */
1351 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1352 		chan = sc->channels[i];
1353 
1354 		/* Setup Rx flows. */
1355 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1356 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1357 			if (error) {
1358 				device_printf(dev, "%s: failed to setup Rx "
1359 				    "flow: error=%d\n", __func__, error);
1360 				goto close_ni;
1361 			}
1362 		}
1363 
1364 		/* Setup Tx flow. */
1365 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1366 		if (error) {
1367 			device_printf(dev, "%s: failed to setup Tx "
1368 			    "flow: error=%d\n", __func__, error);
1369 			goto close_ni;
1370 		}
1371 	}
1372 
1373 	/* Configure RxError queue to generate CDAN. */
1374 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1375 	if (error) {
1376 		device_printf(dev, "%s: failed to setup RxError flow: "
1377 		    "error=%d\n", __func__, error);
1378 		goto close_ni;
1379 	}
1380 
1381 	/*
1382 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1383 	 * enqueue operations.
1384 	 */
1385 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1386 	    &sc->tx_qdid);
1387 	if (error) {
1388 		device_printf(dev, "%s: failed to get Tx queuing destination "
1389 		    "ID\n", __func__);
1390 		goto close_ni;
1391 	}
1392 
1393 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1394 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1395 	return (0);
1396 
1397 close_ni:
1398 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1399 close_rc:
1400 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1401 err_exit:
1402 	return (error);
1403 }
1404 
1405 /**
1406  * @brief Setup ingress traffic distribution.
1407  *
1408  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1409  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1410  */
1411 static int
1412 dpaa2_ni_setup_rx_dist(device_t dev)
1413 {
1414 	/*
1415 	 * Have the interface implicitly distribute traffic based on the default
1416 	 * hash key.
1417 	 */
1418 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1419 }
1420 
1421 static int
1422 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1423 {
1424 	device_t pdev = device_get_parent(dev);
1425 	device_t child = dev;
1426 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1427 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1428 	struct dpaa2_devinfo *con_info;
1429 	struct dpaa2_cmd cmd;
1430 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1431 	uint16_t rc_token, ni_token;
1432 	int error;
1433 
1434 	DPAA2_CMD_INIT(&cmd);
1435 
1436 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1437 	if (error) {
1438 		device_printf(dev, "%s: failed to open resource container: "
1439 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1440 		goto err_exit;
1441 	}
1442 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1443 	if (error) {
1444 		device_printf(dev, "%s: failed to open network interface: "
1445 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1446 		goto close_rc;
1447 	}
1448 
1449 	/* Obtain DPCON associated with the FQ's channel. */
1450 	con_info = device_get_ivars(fq->chan->con_dev);
1451 
1452 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1453 	queue_cfg.tc = fq->tc;
1454 	queue_cfg.idx = fq->flowid;
1455 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1456 	if (error) {
1457 		device_printf(dev, "%s: failed to obtain Rx queue "
1458 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1459 		    queue_cfg.idx);
1460 		goto close_ni;
1461 	}
1462 
1463 	fq->fqid = queue_cfg.fqid;
1464 
1465 	queue_cfg.dest_id = con_info->id;
1466 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1467 	queue_cfg.priority = 1;
1468 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1469 	queue_cfg.options =
1470 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1471 	    DPAA2_NI_QUEUE_OPT_DEST;
1472 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1473 	if (error) {
1474 		device_printf(dev, "%s: failed to update Rx queue "
1475 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1476 		    queue_cfg.idx);
1477 		goto close_ni;
1478 	}
1479 
1480 	if (bootverbose) {
1481 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1482 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1483 		    fq->fqid, (uint64_t) fq);
1484 	}
1485 
1486 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1487 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1488 	return (0);
1489 
1490 close_ni:
1491 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1492 close_rc:
1493 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1494 err_exit:
1495 	return (error);
1496 }
1497 
1498 static int
1499 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1500 {
1501 	device_t pdev = device_get_parent(dev);
1502 	device_t child = dev;
1503 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1504 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1505 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1506 	struct dpaa2_devinfo *con_info;
1507 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1508 	struct dpaa2_ni_tx_ring *tx;
1509 	struct dpaa2_buf *buf;
1510 	struct dpaa2_cmd cmd;
1511 	uint32_t tx_rings_n = 0;
1512 	uint16_t rc_token, ni_token;
1513 	int error;
1514 
1515 	DPAA2_CMD_INIT(&cmd);
1516 
1517 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1518 	if (error) {
1519 		device_printf(dev, "%s: failed to open resource container: "
1520 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1521 		goto err_exit;
1522 	}
1523 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1524 	if (error) {
1525 		device_printf(dev, "%s: failed to open network interface: "
1526 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1527 		goto close_rc;
1528 	}
1529 
1530 	/* Obtain DPCON associated with the FQ's channel. */
1531 	con_info = device_get_ivars(fq->chan->con_dev);
1532 
1533 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
1534 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1535 	    sc->attr.num.tx_tcs));
1536 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1537 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1538 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1539 
1540 	/* Setup Tx rings. */
1541 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1542 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1543 		queue_cfg.tc = i;
1544 		queue_cfg.idx = fq->flowid;
1545 		queue_cfg.chan_id = fq->chan->id;
1546 
1547 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1548 		if (error) {
1549 			device_printf(dev, "%s: failed to obtain Tx queue "
1550 			    "configuration: tc=%d, flowid=%d\n", __func__,
1551 			    queue_cfg.tc, queue_cfg.idx);
1552 			goto close_ni;
1553 		}
1554 
1555 		tx = &fq->tx_rings[i];
1556 		tx->fq = fq;
1557 		tx->fqid = queue_cfg.fqid;
1558 		tx->txid = tx_rings_n;
1559 
1560 		if (bootverbose) {
1561 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1562 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1563 			    queue_cfg.fqid);
1564 		}
1565 
1566 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1567 
1568 		/* Allocate Tx ring buffer. */
1569 		tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
1570 		    M_NOWAIT, &tx->lock);
1571 		if (tx->idx_br == NULL) {
1572 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1573 			    " (2) fqid=%d\n", __func__, tx->fqid);
1574 			goto close_ni;
1575 		}
1576 
1577 		/* Configure Tx buffers. */
1578 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1579 			buf = &tx->buf[j];
1580 			buf->type = DPAA2_BUF_TX;
1581 			buf->tx.dmat = buf->tx.sgt_dmat = NULL;
1582 			buf->tx.dmap = buf->tx.sgt_dmap = NULL;
1583 			buf->tx.paddr = buf->tx.sgt_paddr = 0;
1584 			buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
1585 			buf->tx.m = NULL;
1586 			buf->tx.idx = j;
1587 
1588 			error = dpaa2_ni_seed_txbuf(sc, buf);
1589 
1590 			/* Add index of the Tx buffer to the ring. */
1591 			buf_ring_enqueue(tx->idx_br, (void *) j);
1592 		}
1593 
1594 		tx_rings_n++;
1595 	}
1596 
1597 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1598 	fq->tx_qdbin = queue_cfg.qdbin;
1599 
1600 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1601 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1602 	queue_cfg.idx = fq->flowid;
1603 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1604 	if (error) {
1605 		device_printf(dev, "%s: failed to obtain TxConf queue "
1606 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1607 		    queue_cfg.idx);
1608 		goto close_ni;
1609 	}
1610 
1611 	fq->fqid = queue_cfg.fqid;
1612 
1613 	queue_cfg.dest_id = con_info->id;
1614 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1615 	queue_cfg.priority = 0;
1616 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1617 	queue_cfg.options =
1618 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1619 	    DPAA2_NI_QUEUE_OPT_DEST;
1620 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1621 	if (error) {
1622 		device_printf(dev, "%s: failed to update TxConf queue "
1623 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1624 		    queue_cfg.idx);
1625 		goto close_ni;
1626 	}
1627 
1628 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1629 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1630 	return (0);
1631 
1632 close_ni:
1633 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1634 close_rc:
1635 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1636 err_exit:
1637 	return (error);
1638 }
1639 
1640 static int
1641 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1642 {
1643 	device_t pdev = device_get_parent(dev);
1644 	device_t child = dev;
1645 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1646 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1647 	struct dpaa2_devinfo *con_info;
1648 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1649 	struct dpaa2_cmd cmd;
1650 	uint16_t rc_token, ni_token;
1651 	int error;
1652 
1653 	DPAA2_CMD_INIT(&cmd);
1654 
1655 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1656 	if (error) {
1657 		device_printf(dev, "%s: failed to open resource container: "
1658 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1659 		goto err_exit;
1660 	}
1661 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1662 	if (error) {
1663 		device_printf(dev, "%s: failed to open network interface: "
1664 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1665 		goto close_rc;
1666 	}
1667 
1668 	/* Obtain DPCON associated with the FQ's channel. */
1669 	con_info = device_get_ivars(fq->chan->con_dev);
1670 
1671 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1672 	queue_cfg.tc = fq->tc; /* ignored */
1673 	queue_cfg.idx = fq->flowid; /* ignored */
1674 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1675 	if (error) {
1676 		device_printf(dev, "%s: failed to obtain RxErr queue "
1677 		    "configuration\n", __func__);
1678 		goto close_ni;
1679 	}
1680 
1681 	fq->fqid = queue_cfg.fqid;
1682 
1683 	queue_cfg.dest_id = con_info->id;
1684 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1685 	queue_cfg.priority = 1;
1686 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1687 	queue_cfg.options =
1688 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1689 	    DPAA2_NI_QUEUE_OPT_DEST;
1690 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1691 	if (error) {
1692 		device_printf(dev, "%s: failed to update RxErr queue "
1693 		    "configuration\n", __func__);
1694 		goto close_ni;
1695 	}
1696 
1697 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1698 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1699 	return (0);
1700 
1701 close_ni:
1702 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1703 close_rc:
1704 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1705 err_exit:
1706 	return (error);
1707 }
1708 
1709 /**
1710  * @brief Configure DPNI object to generate interrupts.
1711  */
1712 static int
1713 dpaa2_ni_setup_irqs(device_t dev)
1714 {
1715 	device_t pdev = device_get_parent(dev);
1716 	device_t child = dev;
1717 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1718 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1719 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1720 	struct dpaa2_cmd cmd;
1721 	uint16_t rc_token, ni_token;
1722 	int error;
1723 
1724 	DPAA2_CMD_INIT(&cmd);
1725 
1726 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1727 	if (error) {
1728 		device_printf(dev, "%s: failed to open resource container: "
1729 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1730 		goto err_exit;
1731 	}
1732 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1733 	if (error) {
1734 		device_printf(dev, "%s: failed to open network interface: "
1735 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1736 		goto close_rc;
1737 	}
1738 
1739 	/* Configure IRQs. */
1740 	error = dpaa2_ni_setup_msi(sc);
1741 	if (error) {
1742 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1743 		goto close_ni;
1744 	}
1745 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1746 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1747 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1748 		    __func__);
1749 		goto close_ni;
1750 	}
1751 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1752 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1753 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1754 		    __func__);
1755 		goto close_ni;
1756 	}
1757 
1758 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1759 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1760 	if (error) {
1761 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1762 		    __func__);
1763 		goto close_ni;
1764 	}
1765 
1766 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1767 	    true);
1768 	if (error) {
1769 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1770 		goto close_ni;
1771 	}
1772 
1773 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1774 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1775 	return (0);
1776 
1777 close_ni:
1778 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1779 close_rc:
1780 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1781 err_exit:
1782 	return (error);
1783 }
1784 
1785 /**
1786  * @brief Allocate MSI interrupts for DPNI.
1787  */
1788 static int
1789 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1790 {
1791 	int val;
1792 
1793 	val = pci_msi_count(sc->dev);
1794 	if (val < DPAA2_NI_MSI_COUNT)
1795 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1796 		    DPAA2_IO_MSI_COUNT);
1797 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1798 
1799 	if (pci_alloc_msi(sc->dev, &val) != 0)
1800 		return (EINVAL);
1801 
1802 	for (int i = 0; i < val; i++)
1803 		sc->irq_rid[i] = i + 1;
1804 
1805 	return (0);
1806 }
1807 
1808 /**
1809  * @brief Update DPNI according to the updated interface capabilities.
1810  */
1811 static int
1812 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1813 {
1814 	const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
1815 	const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
1816 	device_t pdev = device_get_parent(sc->dev);
1817 	device_t dev = sc->dev;
1818 	device_t child = dev;
1819 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1820 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1821 	struct dpaa2_cmd cmd;
1822 	uint16_t rc_token, ni_token;
1823 	int error;
1824 
1825 	DPAA2_CMD_INIT(&cmd);
1826 
1827 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1828 	if (error) {
1829 		device_printf(dev, "%s: failed to open resource container: "
1830 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1831 		goto err_exit;
1832 	}
1833 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1834 	if (error) {
1835 		device_printf(dev, "%s: failed to open network interface: "
1836 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1837 		goto close_rc;
1838 	}
1839 
1840 	/* Setup checksums validation. */
1841 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1842 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1843 	if (error) {
1844 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1845 		    __func__, en_rxcsum ? "enable" : "disable");
1846 		goto close_ni;
1847 	}
1848 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1849 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1850 	if (error) {
1851 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1852 		    __func__, en_rxcsum ? "enable" : "disable");
1853 		goto close_ni;
1854 	}
1855 
1856 	/* Setup checksums generation. */
1857 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1858 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1859 	if (error) {
1860 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1861 		    __func__, en_txcsum ? "enable" : "disable");
1862 		goto close_ni;
1863 	}
1864 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1865 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1866 	if (error) {
1867 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1868 		    __func__, en_txcsum ? "enable" : "disable");
1869 		goto close_ni;
1870 	}
1871 
1872 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1873 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1874 	return (0);
1875 
1876 close_ni:
1877 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1878 close_rc:
1879 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1880 err_exit:
1881 	return (error);
1882 }
1883 
1884 /**
1885  * @brief Update DPNI according to the updated interface flags.
1886  */
1887 static int
1888 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1889 {
1890 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1891 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1892 	device_t pdev = device_get_parent(sc->dev);
1893 	device_t dev = sc->dev;
1894 	device_t child = dev;
1895 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1896 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1897 	struct dpaa2_cmd cmd;
1898 	uint16_t rc_token, ni_token;
1899 	int error;
1900 
1901 	DPAA2_CMD_INIT(&cmd);
1902 
1903 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1904 	if (error) {
1905 		device_printf(dev, "%s: failed to open resource container: "
1906 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1907 		goto err_exit;
1908 	}
1909 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1910 	if (error) {
1911 		device_printf(dev, "%s: failed to open network interface: "
1912 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1913 		goto close_rc;
1914 	}
1915 
1916 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1917 	    en_promisc ? true : en_allmulti);
1918 	if (error) {
1919 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1920 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1921 		goto close_ni;
1922 	}
1923 
1924 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1925 	if (error) {
1926 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1927 		    __func__, en_promisc ? "enable" : "disable");
1928 		goto close_ni;
1929 	}
1930 
1931 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1932 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1933 	return (0);
1934 
1935 close_ni:
1936 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1937 close_rc:
1938 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1939 err_exit:
1940 	return (error);
1941 }
1942 
1943 static int
1944 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1945 {
1946 	struct sysctl_ctx_list *ctx;
1947 	struct sysctl_oid *node, *node2;
1948 	struct sysctl_oid_list *parent, *parent2;
1949 	char cbuf[128];
1950 	int i;
1951 
1952 	ctx = device_get_sysctl_ctx(sc->dev);
1953 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1954 
1955 	/* Add DPNI statistics. */
1956 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1957 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1958 	parent = SYSCTL_CHILDREN(node);
1959 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1960 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1961 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1962 		    "IU", dpni_stat_sysctls[i].desc);
1963 	}
1964 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1965 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1966 	    "Rx frames in the buffers outside of the buffer pools");
1967 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1968 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1969 	    "Rx frames in single buffers");
1970 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1971 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1972 	    "Rx frames in scatter/gather list");
1973 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1974 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1975 	    "Enqueue rejected by QMan");
1976 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1977 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1978 	    "QMan IEOI error");
1979 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1980 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1981 	    "Tx single buffer frames");
1982 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1983 	    CTLFLAG_RD, &sc->tx_sg_frames,
1984 	    "Tx S/G frames");
1985 
1986 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1987 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1988 	    "IU", "number of Rx buffers in the buffer pool");
1989 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1990 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1991 	    "IU", "number of free Rx buffers in the buffer pool");
1992 
1993  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1994 
1995 	/* Add channels statistics. */
1996 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1997 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1998 	parent = SYSCTL_CHILDREN(node);
1999 	for (int i = 0; i < sc->chan_n; i++) {
2000 		snprintf(cbuf, sizeof(cbuf), "%d", i);
2001 
2002 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
2003 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
2004 		parent2 = SYSCTL_CHILDREN(node2);
2005 
2006 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
2007 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
2008 		    "Tx frames counter");
2009 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
2010 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
2011 		    "Tx dropped counter");
2012 	}
2013 
2014 	return (0);
2015 }
2016 
2017 static int
2018 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
2019 {
2020 	device_t dev = sc->dev;
2021 	int error;
2022 
2023 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
2024 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
2025 
2026 	/* DMA tag to allocate buffers for Rx buffer pool. */
2027 	error = bus_dma_tag_create(
2028 	    bus_get_dma_tag(dev),
2029 	    sc->buf_align, 0,		/* alignment, boundary */
2030 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2031 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2032 	    NULL, NULL,			/* filter, filterarg */
2033 	    BUF_SIZE, 1,		/* maxsize, nsegments */
2034 	    BUF_SIZE, 0,		/* maxsegsize, flags */
2035 	    NULL, NULL,			/* lockfunc, lockarg */
2036 	    &sc->bp_dmat);
2037 	if (error) {
2038 		device_printf(dev, "%s: failed to create DMA tag for buffer "
2039 		    "pool\n", __func__);
2040 		return (error);
2041 	}
2042 
2043 	/* DMA tag to map Tx mbufs. */
2044 	error = bus_dma_tag_create(
2045 	    bus_get_dma_tag(dev),
2046 	    sc->buf_align, 0,		/* alignment, boundary */
2047 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2048 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2049 	    NULL, NULL,			/* filter, filterarg */
2050 	    DPAA2_TX_SEGS_MAXSZ,	/* maxsize */
2051 	    DPAA2_TX_SEGLIMIT,		/* nsegments */
2052 	    DPAA2_TX_SEG_SZ, 0,		/* maxsegsize, flags */
2053 	    NULL, NULL,			/* lockfunc, lockarg */
2054 	    &sc->tx_dmat);
2055 	if (error) {
2056 		device_printf(dev, "%s: failed to create DMA tag for Tx "
2057 		    "buffers\n", __func__);
2058 		return (error);
2059 	}
2060 
2061 	/* DMA tag to allocate channel storage. */
2062 	error = bus_dma_tag_create(
2063 	    bus_get_dma_tag(dev),
2064 	    ETH_STORE_ALIGN, 0,		/* alignment, boundary */
2065 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2066 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2067 	    NULL, NULL,			/* filter, filterarg */
2068 	    ETH_STORE_SIZE, 1,		/* maxsize, nsegments */
2069 	    ETH_STORE_SIZE, 0,		/* maxsegsize, flags */
2070 	    NULL, NULL,			/* lockfunc, lockarg */
2071 	    &sc->st_dmat);
2072 	if (error) {
2073 		device_printf(dev, "%s: failed to create DMA tag for channel "
2074 		    "storage\n", __func__);
2075 		return (error);
2076 	}
2077 
2078 	/* DMA tag for Rx distribution key. */
2079 	error = bus_dma_tag_create(
2080 	    bus_get_dma_tag(dev),
2081 	    PAGE_SIZE, 0,		/* alignment, boundary */
2082 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2083 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2084 	    NULL, NULL,			/* filter, filterarg */
2085 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
2086 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
2087 	    NULL, NULL,			/* lockfunc, lockarg */
2088 	    &sc->rxd_dmat);
2089 	if (error) {
2090 		device_printf(dev, "%s: failed to create DMA tag for Rx "
2091 		    "distribution key\n", __func__);
2092 		return (error);
2093 	}
2094 
2095 	error = bus_dma_tag_create(
2096 	    bus_get_dma_tag(dev),
2097 	    PAGE_SIZE, 0,		/* alignment, boundary */
2098 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2099 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2100 	    NULL, NULL,			/* filter, filterarg */
2101 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
2102 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
2103 	    NULL, NULL,			/* lockfunc, lockarg */
2104 	    &sc->qos_dmat);
2105 	if (error) {
2106 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
2107 		    __func__);
2108 		return (error);
2109 	}
2110 
2111 	error = bus_dma_tag_create(
2112 	    bus_get_dma_tag(dev),
2113 	    PAGE_SIZE, 0,		/* alignment, boundary */
2114 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2115 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2116 	    NULL, NULL,			/* filter, filterarg */
2117 	    DPAA2_TX_SGT_SZ, 1,		/* maxsize, nsegments */
2118 	    DPAA2_TX_SGT_SZ, 0,		/* maxsegsize, flags */
2119 	    NULL, NULL,			/* lockfunc, lockarg */
2120 	    &sc->sgt_dmat);
2121 	if (error) {
2122 		device_printf(dev, "%s: failed to create DMA tag for S/G "
2123 		    "tables\n", __func__);
2124 		return (error);
2125 	}
2126 
2127 	return (0);
2128 }
2129 
2130 /**
2131  * @brief Configure buffer layouts of the different DPNI queues.
2132  */
2133 static int
2134 dpaa2_ni_set_buf_layout(device_t dev)
2135 {
2136 	device_t pdev = device_get_parent(dev);
2137 	device_t child = dev;
2138 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2139 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2140 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2141 	struct dpaa2_ni_buf_layout buf_layout = {0};
2142 	struct dpaa2_cmd cmd;
2143 	uint16_t rc_token, ni_token;
2144 	int error;
2145 
2146 	DPAA2_CMD_INIT(&cmd);
2147 
2148 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2149 	if (error) {
2150 		device_printf(dev, "%s: failed to open resource container: "
2151 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2152 		goto err_exit;
2153 	}
2154 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2155 	if (error) {
2156 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2157 		    "error=%d\n", __func__, dinfo->id, error);
2158 		goto close_rc;
2159 	}
2160 
2161 	/*
2162 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
2163 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
2164 	 * on the WRIOP version.
2165 	 */
2166 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
2167 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
2168 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
2169 
2170 	/*
2171 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
2172 	 * of 64 or 256 bytes depending on the WRIOP version.
2173 	 */
2174 	sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
2175 
2176 	if (bootverbose) {
2177 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
2178 		    sc->buf_sz, sc->buf_align);
2179 	}
2180 
2181 	/*
2182 	 *    Frame Descriptor       Tx buffer layout
2183 	 *
2184 	 *                ADDR -> |---------------------|
2185 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
2186 	 *                        |---------------------|
2187 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
2188 	 *                        |---------------------|
2189 	 *                        |    DATA HEADROOM    |
2190 	 *       ADDR + OFFSET -> |---------------------|
2191 	 *                        |                     |
2192 	 *                        |                     |
2193 	 *                        |     FRAME DATA      |
2194 	 *                        |                     |
2195 	 *                        |                     |
2196 	 *                        |---------------------|
2197 	 *                        |    DATA TAILROOM    |
2198 	 *                        |---------------------|
2199 	 *
2200 	 * NOTE: It's for a single buffer frame only.
2201 	 */
2202 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
2203 	buf_layout.pd_size = BUF_SWA_SIZE;
2204 	buf_layout.pass_timestamp = true;
2205 	buf_layout.pass_frame_status = true;
2206 	buf_layout.options =
2207 	    BUF_LOPT_PRIV_DATA_SZ |
2208 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
2209 	    BUF_LOPT_FRAME_STATUS;
2210 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2211 	if (error) {
2212 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
2213 		    __func__);
2214 		goto close_ni;
2215 	}
2216 
2217 	/* Tx-confirmation buffer layout */
2218 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
2219 	buf_layout.options =
2220 	    BUF_LOPT_TIMESTAMP |
2221 	    BUF_LOPT_FRAME_STATUS;
2222 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2223 	if (error) {
2224 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
2225 		    __func__);
2226 		goto close_ni;
2227 	}
2228 
2229 	/*
2230 	 * Driver should reserve the amount of space indicated by this command
2231 	 * as headroom in all Tx frames.
2232 	 */
2233 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
2234 	if (error) {
2235 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
2236 		    __func__);
2237 		goto close_ni;
2238 	}
2239 
2240 	if (bootverbose) {
2241 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
2242 	}
2243 	if ((sc->tx_data_off % 64) != 0) {
2244 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
2245 		    "of 64 bytes\n", sc->tx_data_off);
2246 	}
2247 
2248 	/*
2249 	 *    Frame Descriptor       Rx buffer layout
2250 	 *
2251 	 *                ADDR -> |---------------------|
2252 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
2253 	 *                        |---------------------|
2254 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
2255 	 *                        |---------------------|
2256 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
2257 	 *       ADDR + OFFSET -> |---------------------|
2258 	 *                        |                     |
2259 	 *                        |                     |
2260 	 *                        |     FRAME DATA      |
2261 	 *                        |                     |
2262 	 *                        |                     |
2263 	 *                        |---------------------|
2264 	 *                        |    DATA TAILROOM    | 0 bytes
2265 	 *                        |---------------------|
2266 	 *
2267 	 * NOTE: It's for a single buffer frame only.
2268 	 */
2269 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
2270 	buf_layout.pd_size = BUF_SWA_SIZE;
2271 	buf_layout.fd_align = sc->buf_align;
2272 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
2273 	buf_layout.tail_size = 0;
2274 	buf_layout.pass_frame_status = true;
2275 	buf_layout.pass_parser_result = true;
2276 	buf_layout.pass_timestamp = true;
2277 	buf_layout.options =
2278 	    BUF_LOPT_PRIV_DATA_SZ |
2279 	    BUF_LOPT_DATA_ALIGN |
2280 	    BUF_LOPT_DATA_HEAD_ROOM |
2281 	    BUF_LOPT_DATA_TAIL_ROOM |
2282 	    BUF_LOPT_FRAME_STATUS |
2283 	    BUF_LOPT_PARSER_RESULT |
2284 	    BUF_LOPT_TIMESTAMP;
2285 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2286 	if (error) {
2287 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
2288 		    __func__);
2289 		goto close_ni;
2290 	}
2291 
2292 	error = 0;
2293 close_ni:
2294 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2295 close_rc:
2296 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2297 err_exit:
2298 	return (error);
2299 }
2300 
2301 /**
2302  * @brief Enable Rx/Tx pause frames.
2303  *
2304  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2305  *       itself generates pause frames (Tx frame).
2306  */
2307 static int
2308 dpaa2_ni_set_pause_frame(device_t dev)
2309 {
2310 	device_t pdev = device_get_parent(dev);
2311 	device_t child = dev;
2312 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2313 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2314 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2315 	struct dpaa2_ni_link_cfg link_cfg = {0};
2316 	struct dpaa2_cmd cmd;
2317 	uint16_t rc_token, ni_token;
2318 	int error;
2319 
2320 	DPAA2_CMD_INIT(&cmd);
2321 
2322 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2323 	if (error) {
2324 		device_printf(dev, "%s: failed to open resource container: "
2325 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2326 		goto err_exit;
2327 	}
2328 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2329 	if (error) {
2330 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2331 		    "error=%d\n", __func__, dinfo->id, error);
2332 		goto close_rc;
2333 	}
2334 
2335 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2336 	if (error) {
2337 		device_printf(dev, "%s: failed to obtain link configuration: "
2338 		    "error=%d\n", __func__, error);
2339 		goto close_ni;
2340 	}
2341 
2342 	/* Enable both Rx and Tx pause frames by default. */
2343 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2344 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2345 
2346 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2347 	if (error) {
2348 		device_printf(dev, "%s: failed to set link configuration: "
2349 		    "error=%d\n", __func__, error);
2350 		goto close_ni;
2351 	}
2352 
2353 	sc->link_options = link_cfg.options;
2354 	error = 0;
2355 close_ni:
2356 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2357 close_rc:
2358 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2359 err_exit:
2360 	return (error);
2361 }
2362 
2363 /**
2364  * @brief Configure QoS table to determine the traffic class for the received
2365  * frame.
2366  */
2367 static int
2368 dpaa2_ni_set_qos_table(device_t dev)
2369 {
2370 	device_t pdev = device_get_parent(dev);
2371 	device_t child = dev;
2372 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2373 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2374 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2375 	struct dpaa2_ni_qos_table tbl;
2376 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2377 	struct dpaa2_cmd cmd;
2378 	uint16_t rc_token, ni_token;
2379 	int error;
2380 
2381 	if (sc->attr.num.rx_tcs == 1 ||
2382 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2383 		if (bootverbose) {
2384 			device_printf(dev, "Ingress traffic classification is "
2385 			    "not supported\n");
2386 		}
2387 		return (0);
2388 	}
2389 
2390 	/*
2391 	 * Allocate a buffer visible to the device to hold the QoS table key
2392 	 * configuration.
2393 	 */
2394 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
2395 	    __func__));
2396 	if (__predict_true(buf->store.dmat == NULL)) {
2397 		buf->store.dmat = sc->qos_dmat;
2398 	}
2399 
2400 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
2401 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
2402 	if (error) {
2403 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2404 		    "configuration\n", __func__);
2405 		goto err_exit;
2406 	}
2407 
2408 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
2409 	    buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
2410 	    &buf->store.paddr, BUS_DMA_NOWAIT);
2411 	if (error) {
2412 		device_printf(dev, "%s: failed to map QoS key configuration "
2413 		    "buffer into bus space\n", __func__);
2414 		goto err_exit;
2415 	}
2416 
2417 	DPAA2_CMD_INIT(&cmd);
2418 
2419 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2420 	if (error) {
2421 		device_printf(dev, "%s: failed to open resource container: "
2422 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2423 		goto err_exit;
2424 	}
2425 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2426 	if (error) {
2427 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2428 		    "error=%d\n", __func__, dinfo->id, error);
2429 		goto close_rc;
2430 	}
2431 
2432 	tbl.default_tc = 0;
2433 	tbl.discard_on_miss = false;
2434 	tbl.keep_entries = false;
2435 	tbl.kcfg_busaddr = buf->store.paddr;
2436 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2437 	if (error) {
2438 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2439 		goto close_ni;
2440 	}
2441 
2442 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2443 	if (error) {
2444 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2445 		goto close_ni;
2446 	}
2447 
2448 	error = 0;
2449 close_ni:
2450 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2451 close_rc:
2452 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2453 err_exit:
2454 	return (error);
2455 }
2456 
2457 static int
2458 dpaa2_ni_set_mac_addr(device_t dev)
2459 {
2460 	device_t pdev = device_get_parent(dev);
2461 	device_t child = dev;
2462 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2463 	if_t ifp = sc->ifp;
2464 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2465 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2466 	struct dpaa2_cmd cmd;
2467 	struct ether_addr rnd_mac_addr;
2468 	uint16_t rc_token, ni_token;
2469 	uint8_t mac_addr[ETHER_ADDR_LEN];
2470 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2471 	int error;
2472 
2473 	DPAA2_CMD_INIT(&cmd);
2474 
2475 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2476 	if (error) {
2477 		device_printf(dev, "%s: failed to open resource container: "
2478 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2479 		goto err_exit;
2480 	}
2481 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2482 	if (error) {
2483 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2484 		    "error=%d\n", __func__, dinfo->id, error);
2485 		goto close_rc;
2486 	}
2487 
2488 	/*
2489 	 * Get the MAC address associated with the physical port, if the DPNI is
2490 	 * connected to a DPMAC directly associated with one of the physical
2491 	 * ports.
2492 	 */
2493 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2494 	if (error) {
2495 		device_printf(dev, "%s: failed to obtain the MAC address "
2496 		    "associated with the physical port\n", __func__);
2497 		goto close_ni;
2498 	}
2499 
2500 	/* Get primary MAC address from the DPNI attributes. */
2501 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2502 	if (error) {
2503 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2504 		    __func__);
2505 		goto close_ni;
2506 	}
2507 
2508 	if (!ETHER_IS_ZERO(mac_addr)) {
2509 		/* Set MAC address of the physical port as DPNI's primary one. */
2510 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2511 		    mac_addr);
2512 		if (error) {
2513 			device_printf(dev, "%s: failed to set primary MAC "
2514 			    "address\n", __func__);
2515 			goto close_ni;
2516 		}
2517 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2518 			sc->mac.addr[i] = mac_addr[i];
2519 		}
2520 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2521 		/* Generate random MAC address as DPNI's primary one. */
2522 		ether_gen_addr(ifp, &rnd_mac_addr);
2523 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2524 			mac_addr[i] = rnd_mac_addr.octet[i];
2525 		}
2526 
2527 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2528 		    mac_addr);
2529 		if (error) {
2530 			device_printf(dev, "%s: failed to set random primary "
2531 			    "MAC address\n", __func__);
2532 			goto close_ni;
2533 		}
2534 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2535 			sc->mac.addr[i] = mac_addr[i];
2536 		}
2537 	} else {
2538 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2539 			sc->mac.addr[i] = dpni_mac_addr[i];
2540 		}
2541 	}
2542 
2543 	error = 0;
2544 close_ni:
2545 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2546 close_rc:
2547 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2548 err_exit:
2549 	return (error);
2550 }
2551 
2552 static void
2553 dpaa2_ni_miibus_statchg(device_t dev)
2554 {
2555 	device_t pdev = device_get_parent(dev);
2556 	device_t child = dev;
2557 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2558 	struct dpaa2_mac_link_state mac_link = { 0 };
2559 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2560 	struct dpaa2_cmd cmd;
2561 	uint16_t rc_token, mac_token;
2562 	int error, link_state;
2563 
2564 	if (sc->fixed_link || sc->mii == NULL) {
2565 		return;
2566 	}
2567 
2568 	/*
2569 	 * Note: ifp link state will only be changed AFTER we are called so we
2570 	 * cannot rely on ifp->if_linkstate here.
2571 	 */
2572 	if (sc->mii->mii_media_status & IFM_AVALID) {
2573 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
2574 			link_state = LINK_STATE_UP;
2575 		} else {
2576 			link_state = LINK_STATE_DOWN;
2577 		}
2578 	} else {
2579 		link_state = LINK_STATE_UNKNOWN;
2580 	}
2581 
2582 	if (link_state != sc->link_state) {
2583 		sc->link_state = link_state;
2584 
2585 		DPAA2_CMD_INIT(&cmd);
2586 
2587 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2588 		    &rc_token);
2589 		if (error) {
2590 			device_printf(dev, "%s: failed to open resource "
2591 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2592 			    error);
2593 			goto err_exit;
2594 		}
2595 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2596 		    &mac_token);
2597 		if (error) {
2598 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2599 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2600 			    error);
2601 			goto close_rc;
2602 		}
2603 
2604 		if (link_state == LINK_STATE_UP ||
2605 		    link_state == LINK_STATE_DOWN) {
2606 			/* Update DPMAC link state. */
2607 			mac_link.supported = sc->mii->mii_media.ifm_media;
2608 			mac_link.advert = sc->mii->mii_media.ifm_media;
2609 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2610 			mac_link.options =
2611 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2612 			    DPAA2_MAC_LINK_OPT_PAUSE;
2613 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2614 			mac_link.state_valid = true;
2615 
2616 			/* Inform DPMAC about link state. */
2617 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2618 			    &mac_link);
2619 			if (error) {
2620 				device_printf(sc->dev, "%s: failed to set DPMAC "
2621 				    "link state: id=%d, error=%d\n", __func__,
2622 				    sc->mac.dpmac_id, error);
2623 			}
2624 		}
2625 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2626 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2627 		    rc_token));
2628 	}
2629 
2630 	return;
2631 
2632 close_rc:
2633 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2634 err_exit:
2635 	return;
2636 }
2637 
2638 /**
2639  * @brief Callback function to process media change request.
2640  */
2641 static int
2642 dpaa2_ni_media_change(if_t ifp)
2643 {
2644 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2645 
2646 	DPNI_LOCK(sc);
2647 	if (sc->mii) {
2648 		mii_mediachg(sc->mii);
2649 		sc->media_status = sc->mii->mii_media.ifm_media;
2650 	} else if (sc->fixed_link) {
2651 		if_printf(ifp, "%s: can't change media in fixed mode\n",
2652 		    __func__);
2653 	}
2654 	DPNI_UNLOCK(sc);
2655 
2656 	return (0);
2657 }
2658 
2659 /**
2660  * @brief Callback function to process media status request.
2661  */
2662 static void
2663 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2664 {
2665 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2666 
2667 	DPNI_LOCK(sc);
2668 	if (sc->mii) {
2669 		mii_pollstat(sc->mii);
2670 		ifmr->ifm_active = sc->mii->mii_media_active;
2671 		ifmr->ifm_status = sc->mii->mii_media_status;
2672 	}
2673 	DPNI_UNLOCK(sc);
2674 }
2675 
2676 /**
2677  * @brief Callout function to check and update media status.
2678  */
2679 static void
2680 dpaa2_ni_media_tick(void *arg)
2681 {
2682 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2683 
2684 	/* Check for media type change */
2685 	if (sc->mii) {
2686 		mii_tick(sc->mii);
2687 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2688 			printf("%s: media type changed (ifm_media=%x)\n",
2689 			    __func__, sc->mii->mii_media.ifm_media);
2690 			dpaa2_ni_media_change(sc->ifp);
2691 		}
2692 	}
2693 
2694 	/* Schedule another timeout one second from now */
2695 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2696 }
2697 
2698 static void
2699 dpaa2_ni_init(void *arg)
2700 {
2701 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2702 	if_t ifp = sc->ifp;
2703 	device_t pdev = device_get_parent(sc->dev);
2704 	device_t dev = sc->dev;
2705 	device_t child = dev;
2706 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2707 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2708 	struct dpaa2_cmd cmd;
2709 	uint16_t rc_token, ni_token;
2710 	int error;
2711 
2712 	DPNI_LOCK(sc);
2713 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2714 		DPNI_UNLOCK(sc);
2715 		return;
2716 	}
2717 	DPNI_UNLOCK(sc);
2718 
2719 	DPAA2_CMD_INIT(&cmd);
2720 
2721 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2722 	if (error) {
2723 		device_printf(dev, "%s: failed to open resource container: "
2724 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2725 		goto err_exit;
2726 	}
2727 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2728 	if (error) {
2729 		device_printf(dev, "%s: failed to open network interface: "
2730 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2731 		goto close_rc;
2732 	}
2733 
2734 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2735 	if (error) {
2736 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2737 		    __func__, error);
2738 	}
2739 
2740 	DPNI_LOCK(sc);
2741 	if (sc->mii) {
2742 		mii_mediachg(sc->mii);
2743 	}
2744 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2745 
2746 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2747 	DPNI_UNLOCK(sc);
2748 
2749 	/* Force link-state update to initilize things. */
2750 	dpaa2_ni_miibus_statchg(dev);
2751 
2752 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2753 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2754 	return;
2755 
2756 close_rc:
2757 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2758 err_exit:
2759 	return;
2760 }
2761 
2762 static int
2763 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2764 {
2765 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2766 	struct dpaa2_ni_channel	*chan;
2767 	struct dpaa2_ni_tx_ring *tx;
2768 	uint32_t fqid;
2769 	bool found = false;
2770 	int chan_n = 0;
2771 
2772 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
2773 		return (0);
2774 
2775 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2776 		fqid = m->m_pkthdr.flowid;
2777 		for (int i = 0; i < sc->chan_n; i++) {
2778 			chan = sc->channels[i];
2779 			for (int j = 0; j < chan->rxq_n; j++) {
2780 				if (fqid == chan->rx_queues[j].fqid) {
2781 					chan_n = chan->flowid;
2782 					found = true;
2783 					break;
2784 				}
2785 			}
2786 			if (found) {
2787 				break;
2788 			}
2789 		}
2790 	}
2791 	tx = DPAA2_TX_RING(sc, chan_n, 0);
2792 
2793 	TX_LOCK(tx);
2794 	dpaa2_ni_tx_locked(sc, tx, m);
2795 	TX_UNLOCK(tx);
2796 
2797 	return (0);
2798 }
2799 
2800 static void
2801 dpaa2_ni_qflush(if_t ifp)
2802 {
2803 	/* TODO: Find a way to drain Tx queues in QBMan. */
2804 	if_qflush(ifp);
2805 }
2806 
2807 static int
2808 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2809 {
2810 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2811 	struct ifreq *ifr = (struct ifreq *) data;
2812 	device_t pdev = device_get_parent(sc->dev);
2813 	device_t dev = sc->dev;
2814 	device_t child = dev;
2815 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2816 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2817 	struct dpaa2_cmd cmd;
2818 	uint32_t changed = 0;
2819 	uint16_t rc_token, ni_token;
2820 	int mtu, error, rc = 0;
2821 
2822 	DPAA2_CMD_INIT(&cmd);
2823 
2824 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2825 	if (error) {
2826 		device_printf(dev, "%s: failed to open resource container: "
2827 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2828 		goto err_exit;
2829 	}
2830 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2831 	if (error) {
2832 		device_printf(dev, "%s: failed to open network interface: "
2833 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2834 		goto close_rc;
2835 	}
2836 
2837 	switch (c) {
2838 	case SIOCSIFMTU:
2839 		DPNI_LOCK(sc);
2840 		mtu = ifr->ifr_mtu;
2841 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2842 			DPNI_UNLOCK(sc);
2843 			error = EINVAL;
2844 			goto close_ni;
2845 		}
2846 		if_setmtu(ifp, mtu);
2847 		DPNI_UNLOCK(sc);
2848 
2849 		/* Update maximum frame length. */
2850 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
2851 		    mtu + ETHER_HDR_LEN);
2852 		if (error) {
2853 			device_printf(dev, "%s: failed to update maximum frame "
2854 			    "length: error=%d\n", __func__, error);
2855 			goto close_ni;
2856 		}
2857 		break;
2858 	case SIOCSIFCAP:
2859 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2860 		if (changed & IFCAP_HWCSUM) {
2861 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
2862 				if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
2863 			} else {
2864 				if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
2865 			}
2866 		}
2867 		rc = dpaa2_ni_setup_if_caps(sc);
2868 		if (rc) {
2869 			printf("%s: failed to update iface capabilities: "
2870 			    "error=%d\n", __func__, rc);
2871 			rc = ENXIO;
2872 		}
2873 		break;
2874 	case SIOCSIFFLAGS:
2875 		DPNI_LOCK(sc);
2876 		if (if_getflags(ifp) & IFF_UP) {
2877 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2878 				changed = if_getflags(ifp) ^ sc->if_flags;
2879 				if (changed & IFF_PROMISC ||
2880 				    changed & IFF_ALLMULTI) {
2881 					rc = dpaa2_ni_setup_if_flags(sc);
2882 				}
2883 			} else {
2884 				DPNI_UNLOCK(sc);
2885 				dpaa2_ni_init(sc);
2886 				DPNI_LOCK(sc);
2887 			}
2888 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2889 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2890 		}
2891 
2892 		sc->if_flags = if_getflags(ifp);
2893 		DPNI_UNLOCK(sc);
2894 		break;
2895 	case SIOCADDMULTI:
2896 	case SIOCDELMULTI:
2897 		DPNI_LOCK(sc);
2898 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2899 			DPNI_UNLOCK(sc);
2900 			rc = dpaa2_ni_update_mac_filters(ifp);
2901 			if (rc) {
2902 				device_printf(dev, "%s: failed to update MAC "
2903 				    "filters: error=%d\n", __func__, rc);
2904 			}
2905 			DPNI_LOCK(sc);
2906 		}
2907 		DPNI_UNLOCK(sc);
2908 		break;
2909 	case SIOCGIFMEDIA:
2910 	case SIOCSIFMEDIA:
2911 		if (sc->mii)
2912 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2913 		else if(sc->fixed_link) {
2914 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2915 		}
2916 		break;
2917 	default:
2918 		rc = ether_ioctl(ifp, c, data);
2919 		break;
2920 	}
2921 
2922 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2923 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2924 	return (rc);
2925 
2926 close_ni:
2927 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2928 close_rc:
2929 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2930 err_exit:
2931 	return (error);
2932 }
2933 
2934 static int
2935 dpaa2_ni_update_mac_filters(if_t ifp)
2936 {
2937 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2938 	struct dpaa2_ni_mcaddr_ctx ctx;
2939 	device_t pdev = device_get_parent(sc->dev);
2940 	device_t dev = sc->dev;
2941 	device_t child = dev;
2942 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2943 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2944 	struct dpaa2_cmd cmd;
2945 	uint16_t rc_token, ni_token;
2946 	int error;
2947 
2948 	DPAA2_CMD_INIT(&cmd);
2949 
2950 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2951 	if (error) {
2952 		device_printf(dev, "%s: failed to open resource container: "
2953 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2954 		goto err_exit;
2955 	}
2956 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2957 	if (error) {
2958 		device_printf(dev, "%s: failed to open network interface: "
2959 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2960 		goto close_rc;
2961 	}
2962 
2963 	/* Remove all multicast MAC filters. */
2964 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2965 	if (error) {
2966 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2967 		    "error=%d\n", __func__, error);
2968 		goto close_ni;
2969 	}
2970 
2971 	ctx.ifp = ifp;
2972 	ctx.error = 0;
2973 	ctx.nent = 0;
2974 
2975 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2976 
2977 	error = ctx.error;
2978 close_ni:
2979 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2980 close_rc:
2981 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2982 err_exit:
2983 	return (error);
2984 }
2985 
2986 static u_int
2987 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2988 {
2989 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2990 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2991 	device_t pdev = device_get_parent(sc->dev);
2992 	device_t dev = sc->dev;
2993 	device_t child = dev;
2994 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2995 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2996 	struct dpaa2_cmd cmd;
2997 	uint16_t rc_token, ni_token;
2998 	int error;
2999 
3000 	if (ctx->error != 0) {
3001 		return (0);
3002 	}
3003 
3004 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
3005 		DPAA2_CMD_INIT(&cmd);
3006 
3007 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3008 		    &rc_token);
3009 		if (error) {
3010 			device_printf(dev, "%s: failed to open resource "
3011 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
3012 			    error);
3013 			return (0);
3014 		}
3015 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3016 		    &ni_token);
3017 		if (error) {
3018 			device_printf(dev, "%s: failed to open network interface: "
3019 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
3020 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3021 			    rc_token));
3022 			return (0);
3023 		}
3024 
3025 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
3026 		    LLADDR(sdl));
3027 
3028 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3029 		    ni_token));
3030 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3031 		    rc_token));
3032 
3033 		if (ctx->error != 0) {
3034 			device_printf(dev, "%s: can't add more then %d MAC "
3035 			    "addresses, switching to the multicast promiscuous "
3036 			    "mode\n", __func__, ctx->nent);
3037 
3038 			/* Enable multicast promiscuous mode. */
3039 			DPNI_LOCK(sc);
3040 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
3041 			sc->if_flags |= IFF_ALLMULTI;
3042 			ctx->error = dpaa2_ni_setup_if_flags(sc);
3043 			DPNI_UNLOCK(sc);
3044 
3045 			return (0);
3046 		}
3047 		ctx->nent++;
3048 	}
3049 
3050 	return (1);
3051 }
3052 
3053 static void
3054 dpaa2_ni_intr(void *arg)
3055 {
3056 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
3057 	device_t pdev = device_get_parent(sc->dev);
3058 	device_t dev = sc->dev;
3059 	device_t child = dev;
3060 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3061 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3062 	struct dpaa2_cmd cmd;
3063 	uint32_t status = ~0u; /* clear all IRQ status bits */
3064 	uint16_t rc_token, ni_token;
3065 	int error;
3066 
3067 	DPAA2_CMD_INIT(&cmd);
3068 
3069 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3070 	if (error) {
3071 		device_printf(dev, "%s: failed to open resource container: "
3072 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3073 		goto err_exit;
3074 	}
3075 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3076 	if (error) {
3077 		device_printf(dev, "%s: failed to open network interface: "
3078 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3079 		goto close_rc;
3080 	}
3081 
3082 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
3083 	    &status);
3084 	if (error) {
3085 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
3086 		    "error=%d\n", __func__, error);
3087 	}
3088 
3089 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3090 close_rc:
3091 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3092 err_exit:
3093 	return;
3094 }
3095 
3096 /**
3097  * @brief Callback to obtain a physical address of the only DMA segment mapped.
3098  */
3099 static void
3100 dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3101 {
3102 	if (error == 0) {
3103 		KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
3104 		*(bus_addr_t *) arg = segs[0].ds_addr;
3105 	}
3106 }
3107 
3108 /**
3109  * @brief Release new buffers to the buffer pool if necessary.
3110  */
3111 static void
3112 dpaa2_ni_bp_task(void *arg, int count)
3113 {
3114 	device_t bp_dev;
3115 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
3116 	struct dpaa2_bp_softc *bpsc;
3117 	struct dpaa2_bp_conf bp_conf;
3118 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3119 	int error;
3120 
3121 	/* There's only one buffer pool for now. */
3122 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3123 	bpsc = device_get_softc(bp_dev);
3124 
3125 	/* Get state of the buffer pool. */
3126 	error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
3127 	    &bp_conf);
3128 	if (error) {
3129 		device_printf(sc->dev, "%s: failed to query buffer pool "
3130 		    "configuration: error=%d\n", __func__, error);
3131 		return;
3132 	}
3133 
3134 	/* Double allocated buffers number if free buffers < 25%. */
3135 	if (bp_conf.free_bufn < (buf_num >> 2)) {
3136 		(void)dpaa2_ni_seed_buf_pool(sc, buf_num);
3137 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
3138 	}
3139 }
3140 
3141 /**
3142  * @brief Poll frames from a specific channel when CDAN is received.
3143  *
3144  * NOTE: To be called from the DPIO interrupt handler.
3145  */
3146 static void
3147 dpaa2_ni_poll(void *arg)
3148 {
3149 	struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
3150 	struct dpaa2_io_softc *iosc;
3151 	struct dpaa2_swp *swp;
3152 	struct dpaa2_ni_fq *fq;
3153 	int error, consumed = 0;
3154 
3155 	KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
3156 
3157 	iosc = device_get_softc(chan->io_dev);
3158 	swp = iosc->swp;
3159 
3160 	do {
3161 		error = dpaa2_swp_pull(swp, chan->id, &chan->store,
3162 		    ETH_STORE_FRAMES);
3163 		if (error) {
3164 			device_printf(chan->ni_dev, "%s: failed to pull frames: "
3165 			    "chan_id=%d, error=%d\n", __func__, chan->id, error);
3166 			break;
3167 		}
3168 
3169 		/*
3170 		 * TODO: Combine frames from the same Rx queue returned as
3171 		 * a result to the current VDQ command into a chain (linked
3172 		 * with m_nextpkt) to ammortize the FQ lock.
3173 		 */
3174 		error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
3175 		if (error == ENOENT) {
3176 			break;
3177 		}
3178 		if (error == ETIMEDOUT) {
3179 			device_printf(chan->ni_dev, "%s: timeout to consume "
3180 			    "frames: chan_id=%d\n", __func__, chan->id);
3181 		}
3182 	} while (true);
3183 
3184 	/* Re-arm channel to generate CDAN. */
3185 	error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
3186 	if (error) {
3187 		device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
3188 		    "error=%d\n", __func__, chan->id, error);
3189 	}
3190 }
3191 
3192 /**
3193  * @brief Transmit mbufs.
3194  */
3195 static void
3196 dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3197     struct mbuf *m)
3198 {
3199 	struct dpaa2_ni_fq *fq = tx->fq;
3200 	struct dpaa2_buf *buf;
3201 	struct dpaa2_fd fd;
3202 	struct mbuf *m_d;
3203 	bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
3204 	uint64_t idx;
3205 	void *pidx;
3206 	int error, rc, txnsegs;
3207 
3208 	/* Obtain an index of a Tx buffer. */
3209 	pidx = buf_ring_dequeue_sc(tx->idx_br);
3210 	if (__predict_false(pidx == NULL)) {
3211 		/* TODO: Do not give up easily. */
3212 		m_freem(m);
3213 		return;
3214 	} else {
3215 		idx = (uint64_t) pidx;
3216 		buf = &tx->buf[idx];
3217 		buf->tx.m = m;
3218 		buf->tx.sgt_paddr = 0;
3219 	}
3220 
3221 	/* Load mbuf to transmit. */
3222 	error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
3223 	    txsegs, &txnsegs, BUS_DMA_NOWAIT);
3224 	if (__predict_false(error != 0)) {
3225 		/* Too many fragments, trying to defragment... */
3226 		m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
3227 		if (m_d == NULL) {
3228 			device_printf(sc->dev, "%s: mbuf "
3229 			    "defragmentation failed\n", __func__);
3230 			fq->chan->tx_dropped++;
3231 			goto err;
3232 		}
3233 
3234 		buf->tx.m = m = m_d;
3235 		error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
3236 		    buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
3237 		if (__predict_false(error != 0)) {
3238 			device_printf(sc->dev, "%s: failed to load "
3239 			    "mbuf: error=%d\n", __func__, error);
3240 			fq->chan->tx_dropped++;
3241 			goto err;
3242 		}
3243 	}
3244 
3245 	/* Build frame descriptor. */
3246 	error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
3247 	if (__predict_false(error != 0)) {
3248 		device_printf(sc->dev, "%s: failed to build frame "
3249 		    "descriptor: error=%d\n", __func__, error);
3250 		fq->chan->tx_dropped++;
3251 		goto err_unload;
3252 	}
3253 
3254 	/* TODO: Enqueue several frames in a single command. */
3255 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
3256 		/* TODO: Return error codes instead of # of frames. */
3257 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
3258 		    &fd, 1);
3259 		if (rc == 1) {
3260 			break;
3261 		}
3262 	}
3263 
3264 	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_PREWRITE);
3265 	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_PREWRITE);
3266 
3267 	if (rc != 1) {
3268 		fq->chan->tx_dropped++;
3269 		goto err_unload;
3270 	} else {
3271 		fq->chan->tx_frames++;
3272 	}
3273 	return;
3274 
3275 err_unload:
3276 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3277 	if (buf->tx.sgt_paddr != 0) {
3278 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3279 	}
3280 err:
3281 	m_freem(buf->tx.m);
3282 	buf_ring_enqueue(tx->idx_br, pidx);
3283 }
3284 
3285 static int
3286 dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
3287     uint32_t *consumed)
3288 {
3289 	struct dpaa2_ni_fq *fq = NULL;
3290 	struct dpaa2_dq *dq;
3291 	struct dpaa2_fd *fd;
3292 	int rc, frames = 0;
3293 
3294 	do {
3295 		rc = dpaa2_ni_chan_storage_next(chan, &dq);
3296 		if (rc == EINPROGRESS) {
3297 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3298 				fd = &dq->fdr.fd;
3299 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3300 				fq->consume(chan, fq, fd);
3301 				frames++;
3302 			}
3303 		} else if (rc == EALREADY || rc == ENOENT) {
3304 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3305 				fd = &dq->fdr.fd;
3306 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3307 				fq->consume(chan, fq, fd);
3308 				frames++;
3309 			}
3310 			break;
3311 		} else {
3312 			KASSERT(1 == 0, ("%s: should not reach here", __func__));
3313 		}
3314 	} while (true);
3315 
3316 	KASSERT(chan->store_idx < chan->store_sz,
3317 	    ("channel store idx >= size: store_idx=%d, store_sz=%d",
3318 	    chan->store_idx, chan->store_sz));
3319 
3320 	/*
3321 	 * A dequeue operation pulls frames from a single queue into the store.
3322 	 * Return the frame queue and a number of consumed frames as an output.
3323 	 */
3324 	if (src != NULL)
3325 		*src = fq;
3326 	if (consumed != NULL)
3327 		*consumed = frames;
3328 
3329 	return (rc);
3330 }
3331 
3332 /**
3333  * @brief Receive frames.
3334  */
3335 static int
3336 dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3337     struct dpaa2_fd *fd)
3338 {
3339 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
3340 	struct dpaa2_bp_softc *bpsc;
3341 	struct dpaa2_buf *buf;
3342 	struct dpaa2_fa *fa;
3343 	if_t ifp = sc->ifp;
3344 	struct mbuf *m;
3345 	device_t bp_dev;
3346 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3347 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3348 	void *buf_data;
3349 	int buf_len, error, released_n = 0;
3350 
3351 	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
3352 	buf = fa->buf;
3353 
3354 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3355 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3356 	if (__predict_false(paddr != buf->rx.paddr)) {
3357 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3358 		    __func__, paddr, buf->rx.paddr);
3359 	}
3360 
3361 	/* Update statistics. */
3362 	switch (dpaa2_ni_fd_err(fd)) {
3363 	case 1: /* Enqueue rejected by QMan */
3364 		sc->rx_enq_rej_frames++;
3365 		break;
3366 	case 2: /* QMan IEOI error */
3367 		sc->rx_ieoi_err_frames++;
3368 		break;
3369 	default:
3370 		break;
3371 	}
3372 	switch (dpaa2_ni_fd_format(fd)) {
3373 	case DPAA2_FD_SINGLE:
3374 		sc->rx_single_buf_frames++;
3375 		break;
3376 	case DPAA2_FD_SG:
3377 		sc->rx_sg_buf_frames++;
3378 		break;
3379 	default:
3380 		break;
3381 	}
3382 
3383 	m = buf->rx.m;
3384 	buf->rx.m = NULL;
3385 	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_POSTREAD);
3386 	bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3387 
3388 	buf_len = dpaa2_ni_fd_data_len(fd);
3389 	buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
3390 
3391 	/* Prefetch mbuf data. */
3392 	__builtin_prefetch(buf_data);
3393 
3394 	/* Write value to mbuf (avoid reading). */
3395 	m->m_flags |= M_PKTHDR;
3396 	m->m_data = buf_data;
3397 	m->m_len = buf_len;
3398 	m->m_pkthdr.len = buf_len;
3399 	m->m_pkthdr.rcvif = ifp;
3400 	m->m_pkthdr.flowid = fq->fqid;
3401 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3402 
3403 	if_input(ifp, m);
3404 
3405 	/* Keep the buffer to be recycled. */
3406 	chan->recycled[chan->recycled_n++] = buf;
3407 	KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
3408 	    ("%s: too many buffers to recycle", __func__));
3409 
3410 	/* Re-seed and release recycled buffers back to the pool. */
3411 	if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3412 		/* Release new buffers to the pool if needed. */
3413 		taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
3414 
3415 		for (int i = 0; i < chan->recycled_n; i++) {
3416 			buf = chan->recycled[i];
3417 
3418 			/* Seed recycled buffer. */
3419 			error = dpaa2_ni_seed_rxbuf(sc, buf);
3420 			KASSERT(error == 0, ("%s: failed to seed recycled "
3421 			    "buffer: error=%d", __func__, error));
3422 			if (__predict_false(error != 0)) {
3423 				device_printf(sc->dev, "%s: failed to seed "
3424 				    "recycled buffer: error=%d\n", __func__,
3425 				    error);
3426 				continue;
3427 			}
3428 
3429 			/* Prepare buffer to be released in a single command. */
3430 			released[released_n++] = buf->rx.paddr;
3431 		}
3432 
3433 		/* There's only one buffer pool for now. */
3434 		bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3435 		bpsc = device_get_softc(bp_dev);
3436 
3437 		error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
3438 		    released, released_n);
3439 		if (__predict_false(error != 0)) {
3440 			device_printf(sc->dev, "%s: failed to release buffers "
3441 			    "to the pool: error=%d\n", __func__, error);
3442 			return (error);
3443 		}
3444 
3445 		/* Be ready to recycle the next portion of the buffers. */
3446 		chan->recycled_n = 0;
3447 	}
3448 
3449 	return (0);
3450 }
3451 
3452 /**
3453  * @brief Receive Rx error frames.
3454  */
3455 static int
3456 dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3457     struct dpaa2_fd *fd)
3458 {
3459 	device_t bp_dev;
3460 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
3461 	struct dpaa2_bp_softc *bpsc;
3462 	struct dpaa2_buf *buf;
3463 	struct dpaa2_fa *fa;
3464 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3465 	int error;
3466 
3467 	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
3468 	buf = fa->buf;
3469 
3470 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3471 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3472 	if (__predict_false(paddr != buf->rx.paddr)) {
3473 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3474 		    __func__, paddr, buf->rx.paddr);
3475 	}
3476 
3477 	/* There's only one buffer pool for now. */
3478 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3479 	bpsc = device_get_softc(bp_dev);
3480 
3481 	/* Release buffer to QBMan buffer pool. */
3482 	error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
3483 	if (error != 0) {
3484 		device_printf(sc->dev, "%s: failed to release frame buffer to "
3485 		    "the pool: error=%d\n", __func__, error);
3486 		return (error);
3487 	}
3488 
3489 	return (0);
3490 }
3491 
3492 /**
3493  * @brief Receive Tx confirmation frames.
3494  */
3495 static int
3496 dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3497     struct dpaa2_fd *fd)
3498 {
3499 	struct dpaa2_ni_tx_ring *tx;
3500 	struct dpaa2_buf *buf;
3501 	struct dpaa2_fa *fa;
3502 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3503 
3504 	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
3505 	buf = fa->buf;
3506 	tx = fa->tx;
3507 
3508 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3509 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3510 	if (paddr != buf->tx.paddr) {
3511 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3512 		    __func__, paddr, buf->tx.paddr);
3513 	}
3514 
3515 	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_POSTWRITE);
3516 	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_POSTWRITE);
3517 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3518 	bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3519 	m_freem(buf->tx.m);
3520 
3521 	/* Return Tx buffer index back to the ring. */
3522 	buf_ring_enqueue(tx->idx_br, (void *) buf->tx.idx);
3523 
3524 	return (0);
3525 }
3526 
3527 /**
3528  * @brief Compare versions of the DPAA2 network interface API.
3529  */
3530 static int
3531 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3532     uint16_t minor)
3533 {
3534 	if (sc->api_major == major)
3535 		return sc->api_minor - minor;
3536 	return sc->api_major - major;
3537 }
3538 
3539 /**
3540  * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
3541  */
3542 static int
3543 dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
3544 {
3545 	device_t bp_dev;
3546 	struct dpaa2_bp_softc *bpsc;
3547 	struct dpaa2_buf *buf;
3548 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
3549 	const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
3550 	int i, error, bufn = 0;
3551 
3552 	KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
3553 	    "created?", __func__));
3554 
3555 	/* There's only one buffer pool for now. */
3556 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3557 	bpsc = device_get_softc(bp_dev);
3558 
3559 	/* Limit # of buffers released to the pool. */
3560 	if (allocated + seedn > DPAA2_NI_BUFS_MAX)
3561 		seedn = DPAA2_NI_BUFS_MAX - allocated;
3562 
3563 	/* Release "seedn" buffers to the pool. */
3564 	for (i = allocated; i < (allocated + seedn); i++) {
3565 		/* Enough buffers were allocated for a single command. */
3566 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
3567 			error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3568 			    bpsc->attr.bpid, paddr, bufn);
3569 			if (error) {
3570 				device_printf(sc->dev, "%s: failed to release "
3571 				    "buffers to the pool (1)\n", __func__);
3572 				return (error);
3573 			}
3574 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3575 			bufn = 0;
3576 		}
3577 
3578 		buf = &sc->buf[i];
3579 		buf->type = DPAA2_BUF_RX;
3580 		buf->rx.m = NULL;
3581 		buf->rx.dmap = NULL;
3582 		buf->rx.paddr = 0;
3583 		buf->rx.vaddr = NULL;
3584 		error = dpaa2_ni_seed_rxbuf(sc, buf);
3585 		if (error != 0) {
3586 			break;
3587 		}
3588 		paddr[bufn] = buf->rx.paddr;
3589 		bufn++;
3590 	}
3591 
3592 	/* Release if there are buffers left. */
3593 	if (bufn > 0) {
3594 		error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3595 		    bpsc->attr.bpid, paddr, bufn);
3596 		if (error) {
3597 			device_printf(sc->dev, "%s: failed to release "
3598 			    "buffers to the pool (2)\n", __func__);
3599 			return (error);
3600 		}
3601 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3602 	}
3603 
3604 	return (0);
3605 }
3606 
3607 /**
3608  * @brief Prepare Rx buffer to be released to the buffer pool.
3609  */
3610 static int
3611 dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
3612 {
3613 	struct mbuf *m;
3614 	struct dpaa2_fa *fa;
3615 	bus_dmamap_t dmap;
3616 	bus_dma_segment_t segs;
3617 	int error, nsegs;
3618 
3619 	KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
3620 	    "allocated?", __func__));
3621 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3622 
3623 	/* Keep DMA tag for this buffer. */
3624 	if (__predict_false(buf->rx.dmat == NULL))
3625 		buf->rx.dmat = sc->bp_dmat;
3626 
3627 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3628 	if (__predict_false(buf->rx.dmap == NULL)) {
3629 		error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
3630 		if (error) {
3631 			device_printf(sc->dev, "%s: failed to create DMA map "
3632 			    "for buffer: error=%d\n", __func__, error);
3633 			return (error);
3634 		}
3635 		buf->rx.dmap = dmap;
3636 	}
3637 
3638 	/* Allocate mbuf if needed. */
3639 	if (__predict_false(buf->rx.m == NULL)) {
3640 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
3641 		if (__predict_false(m == NULL)) {
3642 			device_printf(sc->dev, "%s: failed to allocate mbuf for "
3643 			    "buffer\n", __func__);
3644 			return (ENOMEM);
3645 		}
3646 		m->m_len = m->m_ext.ext_size;
3647 		m->m_pkthdr.len = m->m_ext.ext_size;
3648 		buf->rx.m = m;
3649 	} else
3650 		m = buf->rx.m;
3651 
3652 	error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
3653 	    m, &segs, &nsegs, BUS_DMA_NOWAIT);
3654 	KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
3655 	KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
3656 	if (__predict_false(error != 0 || nsegs != 1)) {
3657 		device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
3658 		    "nsegs=%d\n", __func__, error, nsegs);
3659 		bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3660 		m_freem(m);
3661 		return (error);
3662 	}
3663 	buf->rx.paddr = segs.ds_addr;
3664 	buf->rx.vaddr = m->m_data;
3665 
3666 	/* Populate frame annotation for future use. */
3667 	fa = (struct dpaa2_fa *) m->m_data;
3668 	fa->magic = DPAA2_MAGIC;
3669 	fa->buf = buf;
3670 
3671 	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_PREREAD);
3672 
3673 	return (0);
3674 }
3675 
3676 /**
3677  * @brief Prepare Tx buffer to be added to the Tx ring.
3678  */
3679 static int
3680 dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
3681 {
3682 	bus_dmamap_t dmap;
3683 	int error;
3684 
3685 	KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
3686 	    __func__));
3687 	KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
3688 	    __func__));
3689 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3690 
3691 	/* Keep DMA tags for this buffer. */
3692 	if (__predict_true(buf->tx.dmat == NULL))
3693 		buf->tx.dmat = sc->tx_dmat;
3694 	if (__predict_true(buf->tx.sgt_dmat == NULL))
3695 		buf->tx.sgt_dmat = sc->sgt_dmat;
3696 
3697 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3698 	if (__predict_true(buf->tx.dmap == NULL)) {
3699 		error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
3700 		if (error != 0) {
3701 			device_printf(sc->dev, "%s: failed to create "
3702 			    "Tx DMA map: error=%d\n", __func__, error);
3703 			return (error);
3704 		}
3705 		buf->tx.dmap = dmap;
3706 	}
3707 
3708 	/* Allocate a buffer to store scatter/gather table. */
3709 	if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
3710 		error = bus_dmamem_alloc(buf->tx.sgt_dmat,
3711 		    &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
3712 		    &buf->tx.sgt_dmap);
3713 		if (error != 0) {
3714 			device_printf(sc->dev, "%s: failed to allocate "
3715 			    "S/G table: error=%d\n", __func__, error);
3716 			return (error);
3717 		}
3718 	}
3719 
3720 	return (0);
3721 }
3722 
3723 /**
3724  * @brief Allocate channel storage visible to QBMan.
3725  */
3726 static int
3727 dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
3728     struct dpaa2_ni_channel *chan)
3729 {
3730 	struct dpaa2_buf *buf = &chan->store;
3731 	int error;
3732 
3733 	KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
3734 	    "allocated?", __func__));
3735 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
3736 	    __func__));
3737 
3738 	/* Keep DMA tag for this buffer. */
3739 	if (__predict_false(buf->store.dmat == NULL)) {
3740 		buf->store.dmat = sc->st_dmat;
3741 	}
3742 
3743 	if (__predict_false(buf->store.vaddr == NULL)) {
3744 		error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
3745 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
3746 		if (error) {
3747 			device_printf(sc->dev, "%s: failed to allocate channel "
3748 			    "storage\n", __func__);
3749 			return (error);
3750 		}
3751 	}
3752 
3753 	if (__predict_false(buf->store.paddr == 0)) {
3754 		error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
3755 		    buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
3756 		    &buf->store.paddr, BUS_DMA_NOWAIT);
3757 		if (error) {
3758 			device_printf(sc->dev, "%s: failed to map channel "
3759 			    "storage\n", __func__);
3760 			return (error);
3761 		}
3762 	}
3763 
3764 	chan->store_sz = ETH_STORE_FRAMES;
3765 	chan->store_idx = 0;
3766 
3767 	return (0);
3768 }
3769 
3770 /**
3771  * @brief Build a DPAA2 frame descriptor.
3772  */
3773 static int
3774 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3775     struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
3776     struct dpaa2_fd *fd)
3777 {
3778 	struct dpaa2_sg_entry *sgt;
3779 	struct dpaa2_fa *fa;
3780 	int i, error;
3781 
3782 	KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
3783 	    "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
3784 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3785 	KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
3786 	    __func__));
3787 
3788 	/* Reset frame descriptor fields. */
3789 	memset(fd, 0, sizeof(*fd));
3790 
3791 	if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
3792 		/* Populate S/G table. */
3793 		sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
3794 		    sc->tx_data_off;
3795 		for (i = 0; i < txnsegs; i++) {
3796 			sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
3797 			sgt[i].len = (uint32_t) txsegs[i].ds_len;
3798 			sgt[i].offset_fmt = 0u;
3799 		}
3800 		sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3801 
3802 		KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
3803 		    __func__, buf->tx.sgt_paddr));
3804 
3805 		/* Load S/G table. */
3806 		error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
3807 		    buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
3808 		    &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
3809 		if (__predict_false(error != 0)) {
3810 			device_printf(sc->dev, "%s: failed to map S/G table: "
3811 			    "error=%d\n", __func__, error);
3812 			return (error);
3813 		}
3814 
3815 		buf->tx.paddr = buf->tx.sgt_paddr;
3816 		buf->tx.vaddr = buf->tx.sgt_vaddr;
3817 		sc->tx_sg_frames++; /* for sysctl(9) */
3818 	} else {
3819 		return (EINVAL);
3820 	}
3821 
3822 	fa = (struct dpaa2_fa *) buf->tx.sgt_vaddr;
3823 	fa->magic = DPAA2_MAGIC;
3824 	fa->buf = buf;
3825 	fa->tx = tx;
3826 
3827 	fd->addr = buf->tx.paddr;
3828 	fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
3829 	fd->bpid_ivp_bmt = 0;
3830 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3831 	fd->ctrl = 0x00800000u;
3832 
3833 	return (0);
3834 }
3835 
3836 static int
3837 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3838 {
3839 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3840 }
3841 
3842 static uint32_t
3843 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3844 {
3845 	if (dpaa2_ni_fd_short_len(fd))
3846 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3847 
3848 	return (fd->data_length);
3849 }
3850 
3851 static int
3852 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3853 {
3854 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3855 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3856 }
3857 
3858 static bool
3859 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3860 {
3861 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3862 	    & DPAA2_NI_FD_SL_MASK) == 1);
3863 }
3864 
3865 static int
3866 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3867 {
3868 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3869 }
3870 
3871 /**
3872  * @brief Collect statistics of the network interface.
3873  */
3874 static int
3875 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3876 {
3877 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3878 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3879 	device_t pdev = device_get_parent(sc->dev);
3880 	device_t dev = sc->dev;
3881 	device_t child = dev;
3882 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3883 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3884 	struct dpaa2_cmd cmd;
3885 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3886 	uint64_t result = 0;
3887 	uint16_t rc_token, ni_token;
3888 	int error;
3889 
3890 	DPAA2_CMD_INIT(&cmd);
3891 
3892 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3893 	if (error) {
3894 		device_printf(dev, "%s: failed to open resource container: "
3895 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3896 		goto exit;
3897 	}
3898 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3899 	if (error) {
3900 		device_printf(dev, "%s: failed to open network interface: "
3901 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3902 		goto close_rc;
3903 	}
3904 
3905 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3906 	if (!error) {
3907 		result = cnt[stat->cnt];
3908 	}
3909 
3910 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3911 close_rc:
3912 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3913 exit:
3914 	return (sysctl_handle_64(oidp, &result, 0, req));
3915 }
3916 
3917 static int
3918 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3919 {
3920 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3921 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3922 
3923 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3924 }
3925 
3926 static int
3927 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3928 {
3929 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3930 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3931 
3932 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3933 }
3934 
3935 static int
3936 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3937 {
3938 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3939 	uint64_t key = 0;
3940 	int i;
3941 
3942 	if (!(sc->attr.num.queues > 1)) {
3943 		return (EOPNOTSUPP);
3944 	}
3945 
3946 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3947 		if (dist_fields[i].rxnfc_field & flags) {
3948 			key |= dist_fields[i].id;
3949 		}
3950 	}
3951 
3952 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3953 }
3954 
3955 /**
3956  * @brief Set Rx distribution (hash or flow classification) key flags is a
3957  * combination of RXH_ bits.
3958  */
3959 static int
3960 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3961 {
3962 	device_t pdev = device_get_parent(dev);
3963 	device_t child = dev;
3964 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3965 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3966 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3967 	struct dpkg_profile_cfg cls_cfg;
3968 	struct dpkg_extract *key;
3969 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3970 	struct dpaa2_cmd cmd;
3971 	uint16_t rc_token, ni_token;
3972 	int i, error = 0;
3973 
3974 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
3975 	    __func__));
3976 	if (__predict_true(buf->store.dmat == NULL)) {
3977 		buf->store.dmat = sc->rxd_dmat;
3978 	}
3979 
3980 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3981 
3982 	/* Configure extracts according to the given flags. */
3983 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3984 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3985 
3986 		if (!(flags & dist_fields[i].id)) {
3987 			continue;
3988 		}
3989 
3990 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3991 			device_printf(dev, "%s: failed to add key extraction "
3992 			    "rule\n", __func__);
3993 			return (E2BIG);
3994 		}
3995 
3996 		key->type = DPKG_EXTRACT_FROM_HDR;
3997 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3998 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3999 		key->extract.from_hdr.field = dist_fields[i].cls_field;
4000 		cls_cfg.num_extracts++;
4001 	}
4002 
4003 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
4004 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
4005 	if (error != 0) {
4006 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
4007 		    "traffic distribution key configuration\n", __func__);
4008 		return (error);
4009 	}
4010 
4011 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
4012 	if (error != 0) {
4013 		device_printf(dev, "%s: failed to prepare key configuration: "
4014 		    "error=%d\n", __func__, error);
4015 		return (error);
4016 	}
4017 
4018 	/* Prepare for setting the Rx dist. */
4019 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
4020 	    buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
4021 	    &buf->store.paddr, BUS_DMA_NOWAIT);
4022 	if (error != 0) {
4023 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
4024 		    "traffic distribution key configuration\n", __func__);
4025 		return (error);
4026 	}
4027 
4028 	if (type == DPAA2_NI_DIST_MODE_HASH) {
4029 		DPAA2_CMD_INIT(&cmd);
4030 
4031 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
4032 		    &rc_token);
4033 		if (error) {
4034 			device_printf(dev, "%s: failed to open resource "
4035 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
4036 			    error);
4037 			goto err_exit;
4038 		}
4039 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
4040 		    &ni_token);
4041 		if (error) {
4042 			device_printf(dev, "%s: failed to open network "
4043 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
4044 			    error);
4045 			goto close_rc;
4046 		}
4047 
4048 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
4049 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH,
4050 		    buf->store.paddr);
4051 		if (error != 0) {
4052 			device_printf(dev, "%s: failed to set distribution mode "
4053 			    "and size for the traffic class\n", __func__);
4054 		}
4055 
4056 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
4057 		    ni_token));
4058 close_rc:
4059 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
4060 		    rc_token));
4061 	}
4062 
4063 err_exit:
4064 	return (error);
4065 }
4066 
4067 /**
4068  * @brief Prepares extract parameters.
4069  *
4070  * cfg:		Defining a full Key Generation profile.
4071  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
4072  */
4073 static int
4074 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
4075 {
4076 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
4077 	struct dpni_dist_extract *extr;
4078 	int i, j;
4079 
4080 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
4081 		return (EINVAL);
4082 
4083 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
4084 	dpni_ext->num_extracts = cfg->num_extracts;
4085 
4086 	for (i = 0; i < cfg->num_extracts; i++) {
4087 		extr = &dpni_ext->extracts[i];
4088 
4089 		switch (cfg->extracts[i].type) {
4090 		case DPKG_EXTRACT_FROM_HDR:
4091 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
4092 			extr->efh_type =
4093 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
4094 			extr->size = cfg->extracts[i].extract.from_hdr.size;
4095 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
4096 			extr->field = cfg->extracts[i].extract.from_hdr.field;
4097 			extr->hdr_index =
4098 				cfg->extracts[i].extract.from_hdr.hdr_index;
4099 			break;
4100 		case DPKG_EXTRACT_FROM_DATA:
4101 			extr->size = cfg->extracts[i].extract.from_data.size;
4102 			extr->offset =
4103 				cfg->extracts[i].extract.from_data.offset;
4104 			break;
4105 		case DPKG_EXTRACT_FROM_PARSE:
4106 			extr->size = cfg->extracts[i].extract.from_parse.size;
4107 			extr->offset =
4108 				cfg->extracts[i].extract.from_parse.offset;
4109 			break;
4110 		default:
4111 			return (EINVAL);
4112 		}
4113 
4114 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
4115 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
4116 
4117 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
4118 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
4119 			extr->masks[j].offset =
4120 				cfg->extracts[i].masks[j].offset;
4121 		}
4122 	}
4123 
4124 	return (0);
4125 }
4126 
4127 /**
4128  * @brief Obtain the next dequeue response from the channel storage.
4129  */
4130 static int
4131 dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
4132 {
4133 	struct dpaa2_buf *buf = &chan->store;
4134 	struct dpaa2_dq *msgs = buf->store.vaddr;
4135 	struct dpaa2_dq *msg = &msgs[chan->store_idx];
4136 	int rc = EINPROGRESS;
4137 
4138 	chan->store_idx++;
4139 
4140 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
4141 		rc = EALREADY; /* VDQ command is expired */
4142 		chan->store_idx = 0;
4143 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
4144 			msg = NULL; /* Null response, FD is invalid */
4145 	}
4146 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
4147 		rc = ENOENT; /* FQ is empty */
4148 		chan->store_idx = 0;
4149 	}
4150 
4151 	if (dq != NULL)
4152 		*dq = msg;
4153 
4154 	return (rc);
4155 }
4156 
4157 static device_method_t dpaa2_ni_methods[] = {
4158 	/* Device interface */
4159 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
4160 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
4161 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
4162 
4163 	/* mii via memac_mdio */
4164 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
4165 
4166 	DEVMETHOD_END
4167 };
4168 
4169 static driver_t dpaa2_ni_driver = {
4170 	"dpaa2_ni",
4171 	dpaa2_ni_methods,
4172 	sizeof(struct dpaa2_ni_softc),
4173 };
4174 
4175 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
4176 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
4177 
4178 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
4179 #ifdef DEV_ACPI
4180 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
4181 #endif
4182 #ifdef FDT
4183 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
4184 #endif
4185