1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright © 2021-2023 Dmitry Salychev
5 * Copyright © 2022 Mathew McBride
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 /*
31 * The DPAA2 Network Interface (DPNI) driver.
32 *
33 * The DPNI object is a network interface that is configurable to support a wide
34 * range of features from a very basic Ethernet interface up to a
35 * high-functioning network interface. The DPNI supports features that are
36 * expected by standard network stacks, from basic features to offloads.
37 *
38 * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39 * functions are provided for standard network protocols (L2, L3, L4, etc.).
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 #include "dpaa2_channel.h"
98 #include "dpaa2_buf.h"
99
100 #define BIT(x) (1ul << (x))
101 #define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
103
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0)
106
107 #define ALIGN_UP(x, y) roundup2((x), (y))
108 #define ALIGN_DOWN(x, y) rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE)
110
111 #define DPNI_LOCK(__sc) do { \
112 mtx_assert(&(__sc)->lock, MA_NOTOWNED); \
113 mtx_lock(&(__sc)->lock); \
114 } while (0)
115 #define DPNI_UNLOCK(__sc) do { \
116 mtx_assert(&(__sc)->lock, MA_OWNED); \
117 mtx_unlock(&(__sc)->lock); \
118 } while (0)
119 #define DPNI_LOCK_ASSERT(__sc) do { \
120 mtx_assert(&(__sc)->lock, MA_OWNED); \
121 } while (0)
122
123 #define DPAA2_TX_RING(sc, chan, tc) \
124 (&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
125
126 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
127
128 /*
129 * How many times channel cleanup routine will be repeated if the RX or TX
130 * budget was depleted.
131 */
132 #define DPAA2_CLEAN_BUDGET 64 /* sysctl(9)? */
133 /* TX/RX budget for the channel cleanup task */
134 #define DPAA2_TX_BUDGET 128 /* sysctl(9)? */
135 #define DPAA2_RX_BUDGET 256 /* sysctl(9)? */
136
137 #define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */
138 #define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */
139 #define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */
140
141 /* Default maximum frame length. */
142 #define DPAA2_ETH_MFL (ETHER_MAX_LEN - ETHER_CRC_LEN)
143
144 /* Minimally supported version of the DPNI API. */
145 #define DPNI_VER_MAJOR 7
146 #define DPNI_VER_MINOR 0
147
148 /* Rx/Tx buffers configuration. */
149 #define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */
150 #define BUF_ALIGN 64
151 #define BUF_SWA_SIZE 64 /* SW annotation size */
152 #define BUF_RX_HWA_SIZE 64 /* HW annotation size */
153 #define BUF_TX_HWA_SIZE 128 /* HW annotation size */
154
155 #define DPAA2_RX_BUFRING_SZ (4096u)
156 #define DPAA2_RXE_BUFRING_SZ (1024u)
157 #define DPAA2_TXC_BUFRING_SZ (4096u)
158 #define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */
159 #define DPAA2_TX_SEG_SZ (PAGE_SIZE)
160 #define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
161 #define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */
162
163 /* Size of a buffer to keep a QoS table key configuration. */
164 #define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE)
165
166 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
167 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
168
169 /* Buffers layout options. */
170 #define BUF_LOPT_TIMESTAMP 0x1
171 #define BUF_LOPT_PARSER_RESULT 0x2
172 #define BUF_LOPT_FRAME_STATUS 0x4
173 #define BUF_LOPT_PRIV_DATA_SZ 0x8
174 #define BUF_LOPT_DATA_ALIGN 0x10
175 #define BUF_LOPT_DATA_HEAD_ROOM 0x20
176 #define BUF_LOPT_DATA_TAIL_ROOM 0x40
177
178 #define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
179 #define DPAA2_NI_BUF_CHAN_MASK (0xFu)
180 #define DPAA2_NI_BUF_CHAN_SHIFT (60)
181 #define DPAA2_NI_BUF_IDX_MASK (0x7FFFu)
182 #define DPAA2_NI_BUF_IDX_SHIFT (49)
183 #define DPAA2_NI_TX_IDX_MASK (0x7u)
184 #define DPAA2_NI_TX_IDX_SHIFT (57)
185 #define DPAA2_NI_TXBUF_IDX_MASK (0xFFu)
186 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
187
188 #define DPAA2_NI_FD_FMT_MASK (0x3u)
189 #define DPAA2_NI_FD_FMT_SHIFT (12)
190 #define DPAA2_NI_FD_ERR_MASK (0xFFu)
191 #define DPAA2_NI_FD_ERR_SHIFT (0)
192 #define DPAA2_NI_FD_SL_MASK (0x1u)
193 #define DPAA2_NI_FD_SL_SHIFT (14)
194 #define DPAA2_NI_FD_LEN_MASK (0x3FFFFu)
195 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
196
197 /* Enables TCAM for Flow Steering and QoS look-ups. */
198 #define DPNI_OPT_HAS_KEY_MASKING 0x10
199
200 /* Unique IDs for the supported Rx classification header fields. */
201 #define DPAA2_ETH_DIST_ETHDST BIT(0)
202 #define DPAA2_ETH_DIST_ETHSRC BIT(1)
203 #define DPAA2_ETH_DIST_ETHTYPE BIT(2)
204 #define DPAA2_ETH_DIST_VLAN BIT(3)
205 #define DPAA2_ETH_DIST_IPSRC BIT(4)
206 #define DPAA2_ETH_DIST_IPDST BIT(5)
207 #define DPAA2_ETH_DIST_IPPROTO BIT(6)
208 #define DPAA2_ETH_DIST_L4SRC BIT(7)
209 #define DPAA2_ETH_DIST_L4DST BIT(8)
210 #define DPAA2_ETH_DIST_ALL (~0ULL)
211
212 /* L3-L4 network traffic flow hash options. */
213 #define RXH_L2DA (1 << 1)
214 #define RXH_VLAN (1 << 2)
215 #define RXH_L3_PROTO (1 << 3)
216 #define RXH_IP_SRC (1 << 4)
217 #define RXH_IP_DST (1 << 5)
218 #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
219 #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
220 #define RXH_DISCARD (1 << 31)
221
222 /* Default Rx hash options, set during attaching. */
223 #define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
224
225 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
226
227 /*
228 * DPAA2 Network Interface resource specification.
229 *
230 * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
231 * the specification!
232 */
233 struct resource_spec dpaa2_ni_spec[] = {
234 /*
235 * DPMCP resources.
236 *
237 * NOTE: MC command portals (MCPs) are used to send commands to, and
238 * receive responses from, the MC firmware. One portal per DPNI.
239 */
240 { DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
241 /*
242 * DPIO resources (software portals).
243 *
244 * NOTE: One per running core. While DPIOs are the source of data
245 * availability interrupts, the DPCONs are used to identify the
246 * network interface that has produced ingress data to that core.
247 */
248 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
249 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
250 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
251 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
252 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 /*
265 * DPBP resources (buffer pools).
266 *
267 * NOTE: One per network interface.
268 */
269 { DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE },
270 /*
271 * DPCON resources (channels).
272 *
273 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
274 * distributed to.
275 * NOTE: Since it is necessary to distinguish between traffic from
276 * different network interfaces arriving on the same core, the
277 * DPCONs must be private to the DPNIs.
278 */
279 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE },
280 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
281 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
282 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
283 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
284 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
285 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
286 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
287 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
288 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
289 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
290 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
291 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
292 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
293 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
294 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
295
296 RESOURCE_SPEC_END
297 };
298
299 /* Supported header fields for Rx hash distribution key */
300 static const struct dpaa2_eth_dist_fields dist_fields[] = {
301 {
302 /* L2 header */
303 .rxnfc_field = RXH_L2DA,
304 .cls_prot = NET_PROT_ETH,
305 .cls_field = NH_FLD_ETH_DA,
306 .id = DPAA2_ETH_DIST_ETHDST,
307 .size = 6,
308 }, {
309 .cls_prot = NET_PROT_ETH,
310 .cls_field = NH_FLD_ETH_SA,
311 .id = DPAA2_ETH_DIST_ETHSRC,
312 .size = 6,
313 }, {
314 /* This is the last ethertype field parsed:
315 * depending on frame format, it can be the MAC ethertype
316 * or the VLAN etype.
317 */
318 .cls_prot = NET_PROT_ETH,
319 .cls_field = NH_FLD_ETH_TYPE,
320 .id = DPAA2_ETH_DIST_ETHTYPE,
321 .size = 2,
322 }, {
323 /* VLAN header */
324 .rxnfc_field = RXH_VLAN,
325 .cls_prot = NET_PROT_VLAN,
326 .cls_field = NH_FLD_VLAN_TCI,
327 .id = DPAA2_ETH_DIST_VLAN,
328 .size = 2,
329 }, {
330 /* IP header */
331 .rxnfc_field = RXH_IP_SRC,
332 .cls_prot = NET_PROT_IP,
333 .cls_field = NH_FLD_IP_SRC,
334 .id = DPAA2_ETH_DIST_IPSRC,
335 .size = 4,
336 }, {
337 .rxnfc_field = RXH_IP_DST,
338 .cls_prot = NET_PROT_IP,
339 .cls_field = NH_FLD_IP_DST,
340 .id = DPAA2_ETH_DIST_IPDST,
341 .size = 4,
342 }, {
343 .rxnfc_field = RXH_L3_PROTO,
344 .cls_prot = NET_PROT_IP,
345 .cls_field = NH_FLD_IP_PROTO,
346 .id = DPAA2_ETH_DIST_IPPROTO,
347 .size = 1,
348 }, {
349 /* Using UDP ports, this is functionally equivalent to raw
350 * byte pairs from L4 header.
351 */
352 .rxnfc_field = RXH_L4_B_0_1,
353 .cls_prot = NET_PROT_UDP,
354 .cls_field = NH_FLD_UDP_PORT_SRC,
355 .id = DPAA2_ETH_DIST_L4SRC,
356 .size = 2,
357 }, {
358 .rxnfc_field = RXH_L4_B_2_3,
359 .cls_prot = NET_PROT_UDP,
360 .cls_field = NH_FLD_UDP_PORT_DST,
361 .id = DPAA2_ETH_DIST_L4DST,
362 .size = 2,
363 },
364 };
365
366 static struct dpni_stat {
367 int page;
368 int cnt;
369 char *name;
370 char *desc;
371 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
372 /* PAGE, COUNTER, NAME, DESCRIPTION */
373 { 0, 0, "in_all_frames", "All accepted ingress frames" },
374 { 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" },
375 { 0, 2, "in_multi_frames", "Multicast accepted ingress frames" },
376 { 1, 0, "eg_all_frames", "All egress frames transmitted" },
377 { 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" },
378 { 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" },
379 { 2, 0, "in_filtered_frames", "All ingress frames discarded due to "
380 "filtering" },
381 { 2, 1, "in_discarded_frames", "All frames discarded due to errors" },
382 { 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer "
383 "depletion in DPNI buffer pools" },
384 };
385
386 struct dpaa2_ni_rx_ctx {
387 struct mbuf *head;
388 struct mbuf *tail;
389 int cnt;
390 bool last;
391 };
392
393 /* Device interface */
394 static int dpaa2_ni_probe(device_t);
395 static int dpaa2_ni_attach(device_t);
396 static int dpaa2_ni_detach(device_t);
397
398 /* DPAA2 network interface setup and configuration */
399 static int dpaa2_ni_setup(device_t);
400 static int dpaa2_ni_setup_channels(device_t);
401 static int dpaa2_ni_bind(device_t);
402 static int dpaa2_ni_setup_rx_dist(device_t);
403 static int dpaa2_ni_setup_irqs(device_t);
404 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
405 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
406 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
407 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
408 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
409
410 /* Tx/Rx flow configuration */
411 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
412 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
413 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
414
415 /* Configuration subroutines */
416 static int dpaa2_ni_set_buf_layout(device_t);
417 static int dpaa2_ni_set_pause_frame(device_t);
418 static int dpaa2_ni_set_qos_table(device_t);
419 static int dpaa2_ni_set_mac_addr(device_t);
420 static int dpaa2_ni_set_hash(device_t, uint64_t);
421 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
422
423 /* Frame descriptor routines */
424 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
425 struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
426 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
427 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
428 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
429 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
430 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
431
432 /* Various subroutines */
433 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
434 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
435
436 /* Network interface routines */
437 static void dpaa2_ni_init(void *);
438 static int dpaa2_ni_transmit(if_t , struct mbuf *);
439 static void dpaa2_ni_qflush(if_t );
440 static int dpaa2_ni_ioctl(if_t , u_long, caddr_t);
441 static int dpaa2_ni_update_mac_filters(if_t );
442 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
443
444 /* Interrupt handlers */
445 static void dpaa2_ni_intr(void *);
446
447 /* MII handlers */
448 static void dpaa2_ni_miibus_statchg(device_t);
449 static int dpaa2_ni_media_change(if_t );
450 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
451 static void dpaa2_ni_media_tick(void *);
452
453 /* Tx/Rx routines. */
454 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
455 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
456 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
457 struct dpaa2_ni_tx_ring *, struct mbuf *);
458 static void dpaa2_ni_cleanup_task(void *, int);
459
460 /* Tx/Rx subroutines */
461 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
462 uint32_t *);
463 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
464 struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
465 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
466 struct dpaa2_fd *);
467 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
468 struct dpaa2_fd *);
469
470 /* sysctl(9) */
471 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
472 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
473 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
474
475 static int
dpaa2_ni_probe(device_t dev)476 dpaa2_ni_probe(device_t dev)
477 {
478 /* DPNI device will be added by a parent resource container itself. */
479 device_set_desc(dev, "DPAA2 Network Interface");
480 return (BUS_PROBE_DEFAULT);
481 }
482
483 static int
dpaa2_ni_attach(device_t dev)484 dpaa2_ni_attach(device_t dev)
485 {
486 device_t pdev = device_get_parent(dev);
487 device_t child = dev;
488 device_t mcp_dev;
489 struct dpaa2_ni_softc *sc = device_get_softc(dev);
490 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
491 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
492 struct dpaa2_devinfo *mcp_dinfo;
493 struct dpaa2_cmd cmd;
494 uint16_t rc_token, ni_token;
495 if_t ifp;
496 char tq_name[32];
497 int error;
498
499 sc->dev = dev;
500 sc->ifp = NULL;
501 sc->miibus = NULL;
502 sc->mii = NULL;
503 sc->media_status = 0;
504 sc->if_flags = 0;
505 sc->link_state = LINK_STATE_UNKNOWN;
506 sc->buf_align = 0;
507
508 /* For debug purposes only! */
509 sc->rx_anomaly_frames = 0;
510 sc->rx_single_buf_frames = 0;
511 sc->rx_sg_buf_frames = 0;
512 sc->rx_enq_rej_frames = 0;
513 sc->rx_ieoi_err_frames = 0;
514 sc->tx_single_buf_frames = 0;
515 sc->tx_sg_frames = 0;
516
517 DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
518 DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
519
520 sc->rxd_dmat = NULL;
521 sc->qos_dmat = NULL;
522
523 sc->qos_kcfg.dmap = NULL;
524 sc->qos_kcfg.paddr = 0;
525 sc->qos_kcfg.vaddr = NULL;
526
527 sc->rxd_kcfg.dmap = NULL;
528 sc->rxd_kcfg.paddr = 0;
529 sc->rxd_kcfg.vaddr = NULL;
530
531 sc->mac.dpmac_id = 0;
532 sc->mac.phy_dev = NULL;
533 memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
534
535 error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
536 if (error) {
537 device_printf(dev, "%s: failed to allocate resources: "
538 "error=%d\n", __func__, error);
539 goto err_exit;
540 }
541
542 /* Obtain MC portal. */
543 mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
544 mcp_dinfo = device_get_ivars(mcp_dev);
545 dinfo->portal = mcp_dinfo->portal;
546
547 mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
548
549 /* Allocate network interface */
550 ifp = if_alloc(IFT_ETHER);
551 sc->ifp = ifp;
552 if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
553
554 if_setsoftc(ifp, sc);
555 if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
556 if_setinitfn(ifp, dpaa2_ni_init);
557 if_setioctlfn(ifp, dpaa2_ni_ioctl);
558 if_settransmitfn(ifp, dpaa2_ni_transmit);
559 if_setqflushfn(ifp, dpaa2_ni_qflush);
560
561 if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
562 if_setcapenable(ifp, if_getcapabilities(ifp));
563
564 DPAA2_CMD_INIT(&cmd);
565
566 /* Open resource container and network interface object. */
567 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
568 if (error) {
569 device_printf(dev, "%s: failed to open resource container: "
570 "id=%d, error=%d\n", __func__, rcinfo->id, error);
571 goto err_exit;
572 }
573 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
574 if (error) {
575 device_printf(dev, "%s: failed to open network interface: "
576 "id=%d, error=%d\n", __func__, dinfo->id, error);
577 goto close_rc;
578 }
579
580 bzero(tq_name, sizeof(tq_name));
581 snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
582
583 /*
584 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
585 * (BPSCN) returned as a result to the VDQ command instead.
586 * It is similar to CDAN processed in dpaa2_io_intr().
587 */
588 /* Create a taskqueue thread to release new buffers to the pool. */
589 sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
590 taskqueue_thread_enqueue, &sc->bp_taskq);
591 if (sc->bp_taskq == NULL) {
592 device_printf(dev, "%s: failed to allocate task queue: %s\n",
593 __func__, tq_name);
594 goto close_ni;
595 }
596 taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
597
598 /* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
599 /* taskqueue_thread_enqueue, &sc->cleanup_taskq); */
600 /* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
601 /* "dpaa2_ch cleanup"); */
602
603 error = dpaa2_ni_setup(dev);
604 if (error) {
605 device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
606 __func__, error);
607 goto close_ni;
608 }
609 error = dpaa2_ni_setup_channels(dev);
610 if (error) {
611 device_printf(dev, "%s: failed to setup QBMan channels: "
612 "error=%d\n", __func__, error);
613 goto close_ni;
614 }
615
616 error = dpaa2_ni_bind(dev);
617 if (error) {
618 device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
619 __func__, error);
620 goto close_ni;
621 }
622 error = dpaa2_ni_setup_irqs(dev);
623 if (error) {
624 device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
625 __func__, error);
626 goto close_ni;
627 }
628 error = dpaa2_ni_setup_sysctls(sc);
629 if (error) {
630 device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
631 __func__, error);
632 goto close_ni;
633 }
634
635 ether_ifattach(sc->ifp, sc->mac.addr);
636 callout_init(&sc->mii_callout, 0);
637
638 return (0);
639
640 close_ni:
641 DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
642 close_rc:
643 DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
644 err_exit:
645 return (ENXIO);
646 }
647
648 static void
dpaa2_ni_fixed_media_status(if_t ifp,struct ifmediareq * ifmr)649 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
650 {
651 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
652
653 DPNI_LOCK(sc);
654 ifmr->ifm_count = 0;
655 ifmr->ifm_mask = 0;
656 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
657 ifmr->ifm_current = ifmr->ifm_active =
658 sc->fixed_ifmedia.ifm_cur->ifm_media;
659
660 /*
661 * In non-PHY usecases, we need to signal link state up, otherwise
662 * certain things requiring a link event (e.g async DHCP client) from
663 * devd do not happen.
664 */
665 if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
666 if_link_state_change(ifp, LINK_STATE_UP);
667 }
668
669 /*
670 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
671 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
672 * the MC firmware sets the status, instead of us telling the MC what
673 * it is.
674 */
675 DPNI_UNLOCK(sc);
676
677 return;
678 }
679
680 static void
dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc * sc)681 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
682 {
683 /*
684 * FIXME: When the DPNI is connected to a DPMAC, we can get the
685 * 'apparent' speed from it.
686 */
687 sc->fixed_link = true;
688
689 ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
690 dpaa2_ni_fixed_media_status);
691 ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
692 ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
693 }
694
695 static int
dpaa2_ni_detach(device_t dev)696 dpaa2_ni_detach(device_t dev)
697 {
698 /* TBD */
699 return (0);
700 }
701
702 /**
703 * @brief Configure DPAA2 network interface object.
704 */
705 static int
dpaa2_ni_setup(device_t dev)706 dpaa2_ni_setup(device_t dev)
707 {
708 device_t pdev = device_get_parent(dev);
709 device_t child = dev;
710 struct dpaa2_ni_softc *sc = device_get_softc(dev);
711 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
712 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
713 struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
714 struct dpaa2_cmd cmd;
715 uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
716 uint16_t rc_token, ni_token, mac_token;
717 struct dpaa2_mac_attr attr;
718 enum dpaa2_mac_link_type link_type;
719 uint32_t link;
720 int error;
721
722 DPAA2_CMD_INIT(&cmd);
723
724 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
725 if (error) {
726 device_printf(dev, "%s: failed to open resource container: "
727 "id=%d, error=%d\n", __func__, rcinfo->id, error);
728 goto err_exit;
729 }
730 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
731 if (error) {
732 device_printf(dev, "%s: failed to open network interface: "
733 "id=%d, error=%d\n", __func__, dinfo->id, error);
734 goto close_rc;
735 }
736
737 /* Check if we can work with this DPNI object. */
738 error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
739 &sc->api_minor);
740 if (error) {
741 device_printf(dev, "%s: failed to get DPNI API version\n",
742 __func__);
743 goto close_ni;
744 }
745 if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
746 device_printf(dev, "%s: DPNI API version %u.%u not supported, "
747 "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
748 DPNI_VER_MAJOR, DPNI_VER_MINOR);
749 error = ENODEV;
750 goto close_ni;
751 }
752
753 /* Reset the DPNI object. */
754 error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
755 if (error) {
756 device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
757 __func__, dinfo->id);
758 goto close_ni;
759 }
760
761 /* Obtain attributes of the DPNI object. */
762 error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
763 if (error) {
764 device_printf(dev, "%s: failed to obtain DPNI attributes: "
765 "id=%d\n", __func__, dinfo->id);
766 goto close_ni;
767 }
768 if (bootverbose) {
769 device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
770 "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
771 sc->attr.num.channels, sc->attr.wriop_ver);
772 device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
773 "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
774 sc->attr.num.cgs);
775 device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
776 "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
777 sc->attr.entries.qos, sc->attr.entries.fs);
778 device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
779 sc->attr.key_size.qos, sc->attr.key_size.fs);
780 }
781
782 /* Configure buffer layouts of the DPNI queues. */
783 error = dpaa2_ni_set_buf_layout(dev);
784 if (error) {
785 device_printf(dev, "%s: failed to configure buffer layout\n",
786 __func__);
787 goto close_ni;
788 }
789
790 /* Configure DMA resources. */
791 error = dpaa2_ni_setup_dma(sc);
792 if (error) {
793 device_printf(dev, "%s: failed to setup DMA\n", __func__);
794 goto close_ni;
795 }
796
797 /* Setup link between DPNI and an object it's connected to. */
798 ep1_desc.obj_id = dinfo->id;
799 ep1_desc.if_id = 0; /* DPNI has the only endpoint */
800 ep1_desc.type = dinfo->dtype;
801
802 error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
803 &ep1_desc, &ep2_desc, &link);
804 if (error) {
805 device_printf(dev, "%s: failed to obtain an object DPNI is "
806 "connected to: error=%d\n", __func__, error);
807 } else {
808 device_printf(dev, "connected to %s (id=%d)\n",
809 dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
810
811 error = dpaa2_ni_set_mac_addr(dev);
812 if (error) {
813 device_printf(dev, "%s: failed to set MAC address: "
814 "error=%d\n", __func__, error);
815 }
816
817 if (ep2_desc.type == DPAA2_DEV_MAC) {
818 /*
819 * This is the simplest case when DPNI is connected to
820 * DPMAC directly.
821 */
822 sc->mac.dpmac_id = ep2_desc.obj_id;
823
824 link_type = DPAA2_MAC_LINK_TYPE_NONE;
825
826 /*
827 * Need to determine if DPMAC type is PHY (attached to
828 * conventional MII PHY) or FIXED (usually SFP/SerDes,
829 * link state managed by MC firmware).
830 */
831 error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
832 DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
833 &mac_token);
834 /*
835 * Under VFIO, the DPMAC might be sitting in another
836 * container (DPRC) we don't have access to.
837 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
838 * the case.
839 */
840 if (error) {
841 device_printf(dev, "%s: failed to open "
842 "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
843 sc->mac.dpmac_id);
844 link_type = DPAA2_MAC_LINK_TYPE_FIXED;
845 } else {
846 error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
847 &cmd, &attr);
848 if (error) {
849 device_printf(dev, "%s: failed to get "
850 "DPMAC attributes: id=%d, "
851 "error=%d\n", __func__, dinfo->id,
852 error);
853 } else {
854 link_type = attr.link_type;
855 }
856 }
857 DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
858
859 if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
860 device_printf(dev, "connected DPMAC is in FIXED "
861 "mode\n");
862 dpaa2_ni_setup_fixed_link(sc);
863 } else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
864 device_printf(dev, "connected DPMAC is in PHY "
865 "mode\n");
866 error = DPAA2_MC_GET_PHY_DEV(dev,
867 &sc->mac.phy_dev, sc->mac.dpmac_id);
868 if (error == 0) {
869 error = MEMAC_MDIO_SET_NI_DEV(
870 sc->mac.phy_dev, dev);
871 if (error != 0) {
872 device_printf(dev, "%s: failed "
873 "to set dpni dev on memac "
874 "mdio dev %s: error=%d\n",
875 __func__,
876 device_get_nameunit(
877 sc->mac.phy_dev), error);
878 }
879 }
880 if (error == 0) {
881 error = MEMAC_MDIO_GET_PHY_LOC(
882 sc->mac.phy_dev, &sc->mac.phy_loc);
883 if (error == ENODEV) {
884 error = 0;
885 }
886 if (error != 0) {
887 device_printf(dev, "%s: failed "
888 "to get phy location from "
889 "memac mdio dev %s: error=%d\n",
890 __func__, device_get_nameunit(
891 sc->mac.phy_dev), error);
892 }
893 }
894 if (error == 0) {
895 error = mii_attach(sc->mac.phy_dev,
896 &sc->miibus, sc->ifp,
897 dpaa2_ni_media_change,
898 dpaa2_ni_media_status,
899 BMSR_DEFCAPMASK, sc->mac.phy_loc,
900 MII_OFFSET_ANY, 0);
901 if (error != 0) {
902 device_printf(dev, "%s: failed "
903 "to attach to miibus: "
904 "error=%d\n",
905 __func__, error);
906 }
907 }
908 if (error == 0) {
909 sc->mii = device_get_softc(sc->miibus);
910 }
911 } else {
912 device_printf(dev, "%s: DPMAC link type is not "
913 "supported\n", __func__);
914 }
915 } else if (ep2_desc.type == DPAA2_DEV_NI ||
916 ep2_desc.type == DPAA2_DEV_MUX ||
917 ep2_desc.type == DPAA2_DEV_SW) {
918 dpaa2_ni_setup_fixed_link(sc);
919 }
920 }
921
922 /* Select mode to enqueue frames. */
923 /* ... TBD ... */
924
925 /*
926 * Update link configuration to enable Rx/Tx pause frames support.
927 *
928 * NOTE: MC may generate an interrupt to the DPMAC and request changes
929 * in link configuration. It might be necessary to attach miibus
930 * and PHY before this point.
931 */
932 error = dpaa2_ni_set_pause_frame(dev);
933 if (error) {
934 device_printf(dev, "%s: failed to configure Rx/Tx pause "
935 "frames\n", __func__);
936 goto close_ni;
937 }
938
939 /* Configure ingress traffic classification. */
940 error = dpaa2_ni_set_qos_table(dev);
941 if (error) {
942 device_printf(dev, "%s: failed to configure QoS table: "
943 "error=%d\n", __func__, error);
944 goto close_ni;
945 }
946
947 /* Add broadcast physical address to the MAC filtering table. */
948 memset(eth_bca, 0xff, ETHER_ADDR_LEN);
949 error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
950 ni_token), eth_bca);
951 if (error) {
952 device_printf(dev, "%s: failed to add broadcast physical "
953 "address to the MAC filtering table\n", __func__);
954 goto close_ni;
955 }
956
957 /* Set the maximum allowed length for received frames. */
958 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
959 if (error) {
960 device_printf(dev, "%s: failed to set maximum length for "
961 "received frames\n", __func__);
962 goto close_ni;
963 }
964
965 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
966 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
967 return (0);
968
969 close_ni:
970 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
971 close_rc:
972 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
973 err_exit:
974 return (error);
975 }
976
977 /**
978 * @brief Сonfigure QBMan channels and register data availability notifications.
979 */
980 static int
dpaa2_ni_setup_channels(device_t dev)981 dpaa2_ni_setup_channels(device_t dev)
982 {
983 device_t iodev, condev, bpdev;
984 struct dpaa2_ni_softc *sc = device_get_softc(dev);
985 uint32_t i, num_chan;
986 int error;
987
988 /* Calculate number of the channels based on the allocated resources */
989 for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
990 if (!sc->res[DPAA2_NI_IO_RID(i)]) {
991 break;
992 }
993 }
994 num_chan = i;
995 for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
996 if (!sc->res[DPAA2_NI_CON_RID(i)]) {
997 break;
998 }
999 }
1000 num_chan = i < num_chan ? i : num_chan;
1001 sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
1002 ? DPAA2_MAX_CHANNELS : num_chan;
1003 sc->chan_n = sc->chan_n > sc->attr.num.queues
1004 ? sc->attr.num.queues : sc->chan_n;
1005
1006 KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
1007 "chan_n=%d", __func__, sc->chan_n));
1008
1009 device_printf(dev, "channels=%d\n", sc->chan_n);
1010
1011 for (i = 0; i < sc->chan_n; i++) {
1012 iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
1013 condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
1014 /* Only one buffer pool available at the moment */
1015 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1016
1017 error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
1018 &sc->channels[i], i, dpaa2_ni_cleanup_task);
1019 if (error != 0) {
1020 device_printf(dev, "%s: dpaa2_chan_setup() failed: "
1021 "error=%d, chan_id=%d\n", __func__, error, i);
1022 return (error);
1023 }
1024 }
1025
1026 /* There is exactly one Rx error queue per network interface */
1027 error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1028 if (error != 0) {
1029 device_printf(dev, "%s: failed to prepare RxError queue: "
1030 "error=%d\n", __func__, error);
1031 return (error);
1032 }
1033
1034 return (0);
1035 }
1036
1037 /**
1038 * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1039 */
1040 static int
dpaa2_ni_bind(device_t dev)1041 dpaa2_ni_bind(device_t dev)
1042 {
1043 device_t pdev = device_get_parent(dev);
1044 device_t child = dev;
1045 device_t bp_dev;
1046 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1047 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1048 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1049 struct dpaa2_devinfo *bp_info;
1050 struct dpaa2_cmd cmd;
1051 struct dpaa2_ni_pools_cfg pools_cfg;
1052 struct dpaa2_ni_err_cfg err_cfg;
1053 struct dpaa2_channel *chan;
1054 uint16_t rc_token, ni_token;
1055 int error;
1056
1057 DPAA2_CMD_INIT(&cmd);
1058
1059 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1060 if (error) {
1061 device_printf(dev, "%s: failed to open resource container: "
1062 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1063 goto err_exit;
1064 }
1065 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1066 if (error) {
1067 device_printf(dev, "%s: failed to open network interface: "
1068 "id=%d, error=%d\n", __func__, dinfo->id, error);
1069 goto close_rc;
1070 }
1071
1072 /* Select buffer pool (only one available at the moment). */
1073 bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1074 bp_info = device_get_ivars(bp_dev);
1075
1076 /* Configure buffers pool. */
1077 pools_cfg.pools_num = 1;
1078 pools_cfg.pools[0].bp_obj_id = bp_info->id;
1079 pools_cfg.pools[0].backup_flag = 0;
1080 pools_cfg.pools[0].buf_sz = sc->buf_sz;
1081 error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1082 if (error) {
1083 device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1084 goto close_ni;
1085 }
1086
1087 /* Setup ingress traffic distribution. */
1088 error = dpaa2_ni_setup_rx_dist(dev);
1089 if (error && error != EOPNOTSUPP) {
1090 device_printf(dev, "%s: failed to setup ingress traffic "
1091 "distribution\n", __func__);
1092 goto close_ni;
1093 }
1094 if (bootverbose && error == EOPNOTSUPP) {
1095 device_printf(dev, "Ingress traffic distribution not "
1096 "supported\n");
1097 }
1098
1099 /* Configure handling of error frames. */
1100 err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1101 err_cfg.set_err_fas = false;
1102 err_cfg.action = DPAA2_NI_ERR_DISCARD;
1103 error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1104 if (error) {
1105 device_printf(dev, "%s: failed to set errors behavior\n",
1106 __func__);
1107 goto close_ni;
1108 }
1109
1110 /* Configure channel queues to generate CDANs. */
1111 for (uint32_t i = 0; i < sc->chan_n; i++) {
1112 chan = sc->channels[i];
1113
1114 /* Setup Rx flows. */
1115 for (uint32_t j = 0; j < chan->rxq_n; j++) {
1116 error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1117 if (error) {
1118 device_printf(dev, "%s: failed to setup Rx "
1119 "flow: error=%d\n", __func__, error);
1120 goto close_ni;
1121 }
1122 }
1123
1124 /* Setup Tx flow. */
1125 error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1126 if (error) {
1127 device_printf(dev, "%s: failed to setup Tx "
1128 "flow: error=%d\n", __func__, error);
1129 goto close_ni;
1130 }
1131 }
1132
1133 /* Configure RxError queue to generate CDAN. */
1134 error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1135 if (error) {
1136 device_printf(dev, "%s: failed to setup RxError flow: "
1137 "error=%d\n", __func__, error);
1138 goto close_ni;
1139 }
1140
1141 /*
1142 * Get the Queuing Destination ID (QDID) that should be used for frame
1143 * enqueue operations.
1144 */
1145 error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1146 &sc->tx_qdid);
1147 if (error) {
1148 device_printf(dev, "%s: failed to get Tx queuing destination "
1149 "ID\n", __func__);
1150 goto close_ni;
1151 }
1152
1153 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1154 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1155 return (0);
1156
1157 close_ni:
1158 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1159 close_rc:
1160 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1161 err_exit:
1162 return (error);
1163 }
1164
1165 /**
1166 * @brief Setup ingress traffic distribution.
1167 *
1168 * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1169 * hasn't been set for DPNI and a number of DPNI queues > 1.
1170 */
1171 static int
dpaa2_ni_setup_rx_dist(device_t dev)1172 dpaa2_ni_setup_rx_dist(device_t dev)
1173 {
1174 /*
1175 * Have the interface implicitly distribute traffic based on the default
1176 * hash key.
1177 */
1178 return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1179 }
1180
1181 static int
dpaa2_ni_setup_rx_flow(device_t dev,struct dpaa2_ni_fq * fq)1182 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1183 {
1184 device_t pdev = device_get_parent(dev);
1185 device_t child = dev;
1186 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1187 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1188 struct dpaa2_devinfo *con_info;
1189 struct dpaa2_cmd cmd;
1190 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1191 uint16_t rc_token, ni_token;
1192 int error;
1193
1194 DPAA2_CMD_INIT(&cmd);
1195
1196 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1197 if (error) {
1198 device_printf(dev, "%s: failed to open resource container: "
1199 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1200 goto err_exit;
1201 }
1202 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1203 if (error) {
1204 device_printf(dev, "%s: failed to open network interface: "
1205 "id=%d, error=%d\n", __func__, dinfo->id, error);
1206 goto close_rc;
1207 }
1208
1209 /* Obtain DPCON associated with the FQ's channel. */
1210 con_info = device_get_ivars(fq->chan->con_dev);
1211
1212 queue_cfg.type = DPAA2_NI_QUEUE_RX;
1213 queue_cfg.tc = fq->tc;
1214 queue_cfg.idx = fq->flowid;
1215 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1216 if (error) {
1217 device_printf(dev, "%s: failed to obtain Rx queue "
1218 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1219 queue_cfg.idx);
1220 goto close_ni;
1221 }
1222
1223 fq->fqid = queue_cfg.fqid;
1224
1225 queue_cfg.dest_id = con_info->id;
1226 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1227 queue_cfg.priority = 1;
1228 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1229 queue_cfg.options =
1230 DPAA2_NI_QUEUE_OPT_USER_CTX |
1231 DPAA2_NI_QUEUE_OPT_DEST;
1232 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1233 if (error) {
1234 device_printf(dev, "%s: failed to update Rx queue "
1235 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1236 queue_cfg.idx);
1237 goto close_ni;
1238 }
1239
1240 if (bootverbose) {
1241 device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1242 "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1243 fq->fqid, (uint64_t) fq);
1244 }
1245
1246 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1247 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1248 return (0);
1249
1250 close_ni:
1251 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1252 close_rc:
1253 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1254 err_exit:
1255 return (error);
1256 }
1257
1258 static int
dpaa2_ni_setup_tx_flow(device_t dev,struct dpaa2_ni_fq * fq)1259 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1260 {
1261 device_t pdev = device_get_parent(dev);
1262 device_t child = dev;
1263 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1264 struct dpaa2_channel *ch = fq->chan;
1265 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1266 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1267 struct dpaa2_devinfo *con_info;
1268 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1269 struct dpaa2_ni_tx_ring *tx;
1270 struct dpaa2_buf *buf;
1271 struct dpaa2_cmd cmd;
1272 uint32_t tx_rings_n = 0;
1273 uint16_t rc_token, ni_token;
1274 int error;
1275
1276 DPAA2_CMD_INIT(&cmd);
1277
1278 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1279 if (error) {
1280 device_printf(dev, "%s: failed to open resource container: "
1281 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1282 goto err_exit;
1283 }
1284 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1285 if (error) {
1286 device_printf(dev, "%s: failed to open network interface: "
1287 "id=%d, error=%d\n", __func__, dinfo->id, error);
1288 goto close_rc;
1289 }
1290
1291 /* Obtain DPCON associated with the FQ's channel. */
1292 con_info = device_get_ivars(fq->chan->con_dev);
1293
1294 KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
1295 ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1296 sc->attr.num.tx_tcs));
1297 KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1298 ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1299 DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1300
1301 /* Setup Tx rings. */
1302 for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1303 queue_cfg.type = DPAA2_NI_QUEUE_TX;
1304 queue_cfg.tc = i;
1305 queue_cfg.idx = fq->flowid;
1306 queue_cfg.chan_id = fq->chan->id;
1307
1308 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1309 if (error) {
1310 device_printf(dev, "%s: failed to obtain Tx queue "
1311 "configuration: tc=%d, flowid=%d\n", __func__,
1312 queue_cfg.tc, queue_cfg.idx);
1313 goto close_ni;
1314 }
1315
1316 tx = &fq->tx_rings[i];
1317 tx->fq = fq;
1318 tx->fqid = queue_cfg.fqid;
1319 tx->txid = tx_rings_n;
1320
1321 if (bootverbose) {
1322 device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1323 "fqid=%d\n", fq->flowid, i, fq->chan->id,
1324 queue_cfg.fqid);
1325 }
1326
1327 mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1328
1329 /* Allocate Tx ring buffer. */
1330 tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
1331 &tx->lock);
1332 if (tx->br == NULL) {
1333 device_printf(dev, "%s: failed to setup Tx ring buffer"
1334 " (2) fqid=%d\n", __func__, tx->fqid);
1335 goto close_ni;
1336 }
1337
1338 /* Configure Tx buffers */
1339 for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1340 buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1341 M_WAITOK);
1342 if (buf == NULL) {
1343 device_printf(dev, "%s: malloc() failed (buf)\n",
1344 __func__);
1345 return (ENOMEM);
1346 }
1347 /* Keep DMA tag and Tx ring linked to the buffer */
1348 DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
1349
1350 buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1351 M_WAITOK);
1352 if (buf->sgt == NULL) {
1353 device_printf(dev, "%s: malloc() failed (sgt)\n",
1354 __func__);
1355 return (ENOMEM);
1356 }
1357 /* Link SGT to DMA tag and back to its Tx buffer */
1358 DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
1359
1360 error = dpaa2_buf_seed_txb(dev, buf);
1361
1362 /* Add Tx buffer to the ring */
1363 buf_ring_enqueue(tx->br, buf);
1364 }
1365
1366 tx_rings_n++;
1367 }
1368
1369 /* All Tx queues which belong to the same flowid have the same qdbin. */
1370 fq->tx_qdbin = queue_cfg.qdbin;
1371
1372 queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1373 queue_cfg.tc = 0; /* ignored for TxConf queue */
1374 queue_cfg.idx = fq->flowid;
1375 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1376 if (error) {
1377 device_printf(dev, "%s: failed to obtain TxConf queue "
1378 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1379 queue_cfg.idx);
1380 goto close_ni;
1381 }
1382
1383 fq->fqid = queue_cfg.fqid;
1384
1385 queue_cfg.dest_id = con_info->id;
1386 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1387 queue_cfg.priority = 0;
1388 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1389 queue_cfg.options =
1390 DPAA2_NI_QUEUE_OPT_USER_CTX |
1391 DPAA2_NI_QUEUE_OPT_DEST;
1392 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1393 if (error) {
1394 device_printf(dev, "%s: failed to update TxConf queue "
1395 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1396 queue_cfg.idx);
1397 goto close_ni;
1398 }
1399
1400 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1401 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1402 return (0);
1403
1404 close_ni:
1405 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1406 close_rc:
1407 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1408 err_exit:
1409 return (error);
1410 }
1411
1412 static int
dpaa2_ni_setup_rx_err_flow(device_t dev,struct dpaa2_ni_fq * fq)1413 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1414 {
1415 device_t pdev = device_get_parent(dev);
1416 device_t child = dev;
1417 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1418 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1419 struct dpaa2_devinfo *con_info;
1420 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1421 struct dpaa2_cmd cmd;
1422 uint16_t rc_token, ni_token;
1423 int error;
1424
1425 DPAA2_CMD_INIT(&cmd);
1426
1427 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1428 if (error) {
1429 device_printf(dev, "%s: failed to open resource container: "
1430 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1431 goto err_exit;
1432 }
1433 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1434 if (error) {
1435 device_printf(dev, "%s: failed to open network interface: "
1436 "id=%d, error=%d\n", __func__, dinfo->id, error);
1437 goto close_rc;
1438 }
1439
1440 /* Obtain DPCON associated with the FQ's channel. */
1441 con_info = device_get_ivars(fq->chan->con_dev);
1442
1443 queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1444 queue_cfg.tc = fq->tc; /* ignored */
1445 queue_cfg.idx = fq->flowid; /* ignored */
1446 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1447 if (error) {
1448 device_printf(dev, "%s: failed to obtain RxErr queue "
1449 "configuration\n", __func__);
1450 goto close_ni;
1451 }
1452
1453 fq->fqid = queue_cfg.fqid;
1454
1455 queue_cfg.dest_id = con_info->id;
1456 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1457 queue_cfg.priority = 1;
1458 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1459 queue_cfg.options =
1460 DPAA2_NI_QUEUE_OPT_USER_CTX |
1461 DPAA2_NI_QUEUE_OPT_DEST;
1462 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1463 if (error) {
1464 device_printf(dev, "%s: failed to update RxErr queue "
1465 "configuration\n", __func__);
1466 goto close_ni;
1467 }
1468
1469 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1470 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1471 return (0);
1472
1473 close_ni:
1474 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1475 close_rc:
1476 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1477 err_exit:
1478 return (error);
1479 }
1480
1481 /**
1482 * @brief Configure DPNI object to generate interrupts.
1483 */
1484 static int
dpaa2_ni_setup_irqs(device_t dev)1485 dpaa2_ni_setup_irqs(device_t dev)
1486 {
1487 device_t pdev = device_get_parent(dev);
1488 device_t child = dev;
1489 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1490 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1491 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1492 struct dpaa2_cmd cmd;
1493 uint16_t rc_token, ni_token;
1494 int error;
1495
1496 DPAA2_CMD_INIT(&cmd);
1497
1498 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1499 if (error) {
1500 device_printf(dev, "%s: failed to open resource container: "
1501 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1502 goto err_exit;
1503 }
1504 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1505 if (error) {
1506 device_printf(dev, "%s: failed to open network interface: "
1507 "id=%d, error=%d\n", __func__, dinfo->id, error);
1508 goto close_rc;
1509 }
1510
1511 /* Configure IRQs. */
1512 error = dpaa2_ni_setup_msi(sc);
1513 if (error) {
1514 device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1515 goto close_ni;
1516 }
1517 if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1518 &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1519 device_printf(dev, "%s: failed to allocate IRQ resource\n",
1520 __func__);
1521 goto close_ni;
1522 }
1523 if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1524 NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1525 device_printf(dev, "%s: failed to setup IRQ resource\n",
1526 __func__);
1527 goto close_ni;
1528 }
1529
1530 error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1531 DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1532 if (error) {
1533 device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1534 __func__);
1535 goto close_ni;
1536 }
1537
1538 error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1539 true);
1540 if (error) {
1541 device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1542 goto close_ni;
1543 }
1544
1545 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1546 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1547 return (0);
1548
1549 close_ni:
1550 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1551 close_rc:
1552 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1553 err_exit:
1554 return (error);
1555 }
1556
1557 /**
1558 * @brief Allocate MSI interrupts for DPNI.
1559 */
1560 static int
dpaa2_ni_setup_msi(struct dpaa2_ni_softc * sc)1561 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1562 {
1563 int val;
1564
1565 val = pci_msi_count(sc->dev);
1566 if (val < DPAA2_NI_MSI_COUNT)
1567 device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1568 DPAA2_IO_MSI_COUNT);
1569 val = MIN(val, DPAA2_NI_MSI_COUNT);
1570
1571 if (pci_alloc_msi(sc->dev, &val) != 0)
1572 return (EINVAL);
1573
1574 for (int i = 0; i < val; i++)
1575 sc->irq_rid[i] = i + 1;
1576
1577 return (0);
1578 }
1579
1580 /**
1581 * @brief Update DPNI according to the updated interface capabilities.
1582 */
1583 static int
dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc * sc)1584 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1585 {
1586 const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
1587 const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
1588 device_t pdev = device_get_parent(sc->dev);
1589 device_t dev = sc->dev;
1590 device_t child = dev;
1591 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1592 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1593 struct dpaa2_cmd cmd;
1594 uint16_t rc_token, ni_token;
1595 int error;
1596
1597 DPAA2_CMD_INIT(&cmd);
1598
1599 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1600 if (error) {
1601 device_printf(dev, "%s: failed to open resource container: "
1602 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1603 goto err_exit;
1604 }
1605 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1606 if (error) {
1607 device_printf(dev, "%s: failed to open network interface: "
1608 "id=%d, error=%d\n", __func__, dinfo->id, error);
1609 goto close_rc;
1610 }
1611
1612 /* Setup checksums validation. */
1613 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1614 DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1615 if (error) {
1616 device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1617 __func__, en_rxcsum ? "enable" : "disable");
1618 goto close_ni;
1619 }
1620 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1621 DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1622 if (error) {
1623 device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1624 __func__, en_rxcsum ? "enable" : "disable");
1625 goto close_ni;
1626 }
1627
1628 /* Setup checksums generation. */
1629 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1630 DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1631 if (error) {
1632 device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1633 __func__, en_txcsum ? "enable" : "disable");
1634 goto close_ni;
1635 }
1636 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1637 DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1638 if (error) {
1639 device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1640 __func__, en_txcsum ? "enable" : "disable");
1641 goto close_ni;
1642 }
1643
1644 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1645 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1646 return (0);
1647
1648 close_ni:
1649 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1650 close_rc:
1651 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1652 err_exit:
1653 return (error);
1654 }
1655
1656 /**
1657 * @brief Update DPNI according to the updated interface flags.
1658 */
1659 static int
dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc * sc)1660 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1661 {
1662 const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1663 const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1664 device_t pdev = device_get_parent(sc->dev);
1665 device_t dev = sc->dev;
1666 device_t child = dev;
1667 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1668 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1669 struct dpaa2_cmd cmd;
1670 uint16_t rc_token, ni_token;
1671 int error;
1672
1673 DPAA2_CMD_INIT(&cmd);
1674
1675 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1676 if (error) {
1677 device_printf(dev, "%s: failed to open resource container: "
1678 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1679 goto err_exit;
1680 }
1681 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1682 if (error) {
1683 device_printf(dev, "%s: failed to open network interface: "
1684 "id=%d, error=%d\n", __func__, dinfo->id, error);
1685 goto close_rc;
1686 }
1687
1688 error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1689 en_promisc ? true : en_allmulti);
1690 if (error) {
1691 device_printf(dev, "%s: failed to %s multicast promiscuous "
1692 "mode\n", __func__, en_allmulti ? "enable" : "disable");
1693 goto close_ni;
1694 }
1695
1696 error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1697 if (error) {
1698 device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1699 __func__, en_promisc ? "enable" : "disable");
1700 goto close_ni;
1701 }
1702
1703 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1704 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1705 return (0);
1706
1707 close_ni:
1708 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1709 close_rc:
1710 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1711 err_exit:
1712 return (error);
1713 }
1714
1715 static int
dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc * sc)1716 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1717 {
1718 struct sysctl_ctx_list *ctx;
1719 struct sysctl_oid *node, *node2;
1720 struct sysctl_oid_list *parent, *parent2;
1721 char cbuf[128];
1722 int i;
1723
1724 ctx = device_get_sysctl_ctx(sc->dev);
1725 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1726
1727 /* Add DPNI statistics. */
1728 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1729 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1730 parent = SYSCTL_CHILDREN(node);
1731 for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1732 SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1733 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1734 "IU", dpni_stat_sysctls[i].desc);
1735 }
1736 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1737 CTLFLAG_RD, &sc->rx_anomaly_frames,
1738 "Rx frames in the buffers outside of the buffer pools");
1739 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1740 CTLFLAG_RD, &sc->rx_single_buf_frames,
1741 "Rx frames in single buffers");
1742 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1743 CTLFLAG_RD, &sc->rx_sg_buf_frames,
1744 "Rx frames in scatter/gather list");
1745 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1746 CTLFLAG_RD, &sc->rx_enq_rej_frames,
1747 "Enqueue rejected by QMan");
1748 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1749 CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1750 "QMan IEOI error");
1751 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1752 CTLFLAG_RD, &sc->tx_single_buf_frames,
1753 "Tx single buffer frames");
1754 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1755 CTLFLAG_RD, &sc->tx_sg_frames,
1756 "Tx S/G frames");
1757
1758 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1759 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1760 "IU", "number of Rx buffers in the buffer pool");
1761 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1762 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1763 "IU", "number of free Rx buffers in the buffer pool");
1764
1765 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1766
1767 /* Add channels statistics. */
1768 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1769 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1770 parent = SYSCTL_CHILDREN(node);
1771 for (int i = 0; i < sc->chan_n; i++) {
1772 snprintf(cbuf, sizeof(cbuf), "%d", i);
1773
1774 node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1775 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1776 parent2 = SYSCTL_CHILDREN(node2);
1777
1778 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1779 CTLFLAG_RD, &sc->channels[i]->tx_frames,
1780 "Tx frames counter");
1781 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1782 CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1783 "Tx dropped counter");
1784 }
1785
1786 return (0);
1787 }
1788
1789 static int
dpaa2_ni_setup_dma(struct dpaa2_ni_softc * sc)1790 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1791 {
1792 device_t dev = sc->dev;
1793 int error;
1794
1795 KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1796 ("unexpected buffer alignment: %d\n", sc->buf_align));
1797
1798 /* DMA tag for Rx distribution key. */
1799 error = bus_dma_tag_create(
1800 bus_get_dma_tag(dev),
1801 PAGE_SIZE, 0, /* alignment, boundary */
1802 BUS_SPACE_MAXADDR, /* low restricted addr */
1803 BUS_SPACE_MAXADDR, /* high restricted addr */
1804 NULL, NULL, /* filter, filterarg */
1805 DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1806 DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1807 NULL, NULL, /* lockfunc, lockarg */
1808 &sc->rxd_dmat);
1809 if (error) {
1810 device_printf(dev, "%s: failed to create DMA tag for Rx "
1811 "distribution key\n", __func__);
1812 return (error);
1813 }
1814
1815 error = bus_dma_tag_create(
1816 bus_get_dma_tag(dev),
1817 PAGE_SIZE, 0, /* alignment, boundary */
1818 BUS_SPACE_MAXADDR, /* low restricted addr */
1819 BUS_SPACE_MAXADDR, /* high restricted addr */
1820 NULL, NULL, /* filter, filterarg */
1821 ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */
1822 ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */
1823 NULL, NULL, /* lockfunc, lockarg */
1824 &sc->qos_dmat);
1825 if (error) {
1826 device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1827 __func__);
1828 return (error);
1829 }
1830
1831 return (0);
1832 }
1833
1834 /**
1835 * @brief Configure buffer layouts of the different DPNI queues.
1836 */
1837 static int
dpaa2_ni_set_buf_layout(device_t dev)1838 dpaa2_ni_set_buf_layout(device_t dev)
1839 {
1840 device_t pdev = device_get_parent(dev);
1841 device_t child = dev;
1842 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1843 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1844 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1845 struct dpaa2_ni_buf_layout buf_layout = {0};
1846 struct dpaa2_cmd cmd;
1847 uint16_t rc_token, ni_token;
1848 int error;
1849
1850 DPAA2_CMD_INIT(&cmd);
1851
1852 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1853 if (error) {
1854 device_printf(dev, "%s: failed to open resource container: "
1855 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1856 goto err_exit;
1857 }
1858 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1859 if (error) {
1860 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
1861 "error=%d\n", __func__, dinfo->id, error);
1862 goto close_rc;
1863 }
1864
1865 /*
1866 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1867 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1868 * on the WRIOP version.
1869 */
1870 sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1871 sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1872 ? BUF_ALIGN_V1 : BUF_ALIGN;
1873
1874 /*
1875 * We need to ensure that the buffer size seen by WRIOP is a multiple
1876 * of 64 or 256 bytes depending on the WRIOP version.
1877 */
1878 sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
1879
1880 if (bootverbose) {
1881 device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1882 sc->buf_sz, sc->buf_align);
1883 }
1884
1885 /*
1886 * Frame Descriptor Tx buffer layout
1887 *
1888 * ADDR -> |---------------------|
1889 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1890 * |---------------------|
1891 * | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1892 * |---------------------|
1893 * | DATA HEADROOM |
1894 * ADDR + OFFSET -> |---------------------|
1895 * | |
1896 * | |
1897 * | FRAME DATA |
1898 * | |
1899 * | |
1900 * |---------------------|
1901 * | DATA TAILROOM |
1902 * |---------------------|
1903 *
1904 * NOTE: It's for a single buffer frame only.
1905 */
1906 buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1907 buf_layout.pd_size = BUF_SWA_SIZE;
1908 buf_layout.pass_timestamp = true;
1909 buf_layout.pass_frame_status = true;
1910 buf_layout.options =
1911 BUF_LOPT_PRIV_DATA_SZ |
1912 BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1913 BUF_LOPT_FRAME_STATUS;
1914 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1915 if (error) {
1916 device_printf(dev, "%s: failed to set Tx buffer layout\n",
1917 __func__);
1918 goto close_ni;
1919 }
1920
1921 /* Tx-confirmation buffer layout */
1922 buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1923 buf_layout.options =
1924 BUF_LOPT_TIMESTAMP |
1925 BUF_LOPT_FRAME_STATUS;
1926 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1927 if (error) {
1928 device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1929 __func__);
1930 goto close_ni;
1931 }
1932
1933 /*
1934 * Driver should reserve the amount of space indicated by this command
1935 * as headroom in all Tx frames.
1936 */
1937 error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
1938 if (error) {
1939 device_printf(dev, "%s: failed to obtain Tx data offset\n",
1940 __func__);
1941 goto close_ni;
1942 }
1943
1944 if (bootverbose) {
1945 device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1946 }
1947 if ((sc->tx_data_off % 64) != 0) {
1948 device_printf(dev, "Tx data offset (%d) is not a multiplication "
1949 "of 64 bytes\n", sc->tx_data_off);
1950 }
1951
1952 /*
1953 * Frame Descriptor Rx buffer layout
1954 *
1955 * ADDR -> |---------------------|
1956 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1957 * |---------------------|
1958 * | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1959 * |---------------------|
1960 * | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE
1961 * ADDR + OFFSET -> |---------------------|
1962 * | |
1963 * | |
1964 * | FRAME DATA |
1965 * | |
1966 * | |
1967 * |---------------------|
1968 * | DATA TAILROOM | 0 bytes
1969 * |---------------------|
1970 *
1971 * NOTE: It's for a single buffer frame only.
1972 */
1973 buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
1974 buf_layout.pd_size = BUF_SWA_SIZE;
1975 buf_layout.fd_align = sc->buf_align;
1976 buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
1977 buf_layout.tail_size = 0;
1978 buf_layout.pass_frame_status = true;
1979 buf_layout.pass_parser_result = true;
1980 buf_layout.pass_timestamp = true;
1981 buf_layout.options =
1982 BUF_LOPT_PRIV_DATA_SZ |
1983 BUF_LOPT_DATA_ALIGN |
1984 BUF_LOPT_DATA_HEAD_ROOM |
1985 BUF_LOPT_DATA_TAIL_ROOM |
1986 BUF_LOPT_FRAME_STATUS |
1987 BUF_LOPT_PARSER_RESULT |
1988 BUF_LOPT_TIMESTAMP;
1989 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1990 if (error) {
1991 device_printf(dev, "%s: failed to set Rx buffer layout\n",
1992 __func__);
1993 goto close_ni;
1994 }
1995
1996 error = 0;
1997 close_ni:
1998 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1999 close_rc:
2000 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2001 err_exit:
2002 return (error);
2003 }
2004
2005 /**
2006 * @brief Enable Rx/Tx pause frames.
2007 *
2008 * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2009 * itself generates pause frames (Tx frame).
2010 */
2011 static int
dpaa2_ni_set_pause_frame(device_t dev)2012 dpaa2_ni_set_pause_frame(device_t dev)
2013 {
2014 device_t pdev = device_get_parent(dev);
2015 device_t child = dev;
2016 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2017 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2018 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2019 struct dpaa2_ni_link_cfg link_cfg = {0};
2020 struct dpaa2_cmd cmd;
2021 uint16_t rc_token, ni_token;
2022 int error;
2023
2024 DPAA2_CMD_INIT(&cmd);
2025
2026 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2027 if (error) {
2028 device_printf(dev, "%s: failed to open resource container: "
2029 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2030 goto err_exit;
2031 }
2032 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2033 if (error) {
2034 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2035 "error=%d\n", __func__, dinfo->id, error);
2036 goto close_rc;
2037 }
2038
2039 error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2040 if (error) {
2041 device_printf(dev, "%s: failed to obtain link configuration: "
2042 "error=%d\n", __func__, error);
2043 goto close_ni;
2044 }
2045
2046 /* Enable both Rx and Tx pause frames by default. */
2047 link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2048 link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2049
2050 error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2051 if (error) {
2052 device_printf(dev, "%s: failed to set link configuration: "
2053 "error=%d\n", __func__, error);
2054 goto close_ni;
2055 }
2056
2057 sc->link_options = link_cfg.options;
2058 error = 0;
2059 close_ni:
2060 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2061 close_rc:
2062 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2063 err_exit:
2064 return (error);
2065 }
2066
2067 /**
2068 * @brief Configure QoS table to determine the traffic class for the received
2069 * frame.
2070 */
2071 static int
dpaa2_ni_set_qos_table(device_t dev)2072 dpaa2_ni_set_qos_table(device_t dev)
2073 {
2074 device_t pdev = device_get_parent(dev);
2075 device_t child = dev;
2076 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2077 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2078 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2079 struct dpaa2_ni_qos_table tbl;
2080 struct dpaa2_buf *buf = &sc->qos_kcfg;
2081 struct dpaa2_cmd cmd;
2082 uint16_t rc_token, ni_token;
2083 int error;
2084
2085 if (sc->attr.num.rx_tcs == 1 ||
2086 !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2087 if (bootverbose) {
2088 device_printf(dev, "Ingress traffic classification is "
2089 "not supported\n");
2090 }
2091 return (0);
2092 }
2093
2094 /*
2095 * Allocate a buffer visible to the device to hold the QoS table key
2096 * configuration.
2097 */
2098
2099 if (__predict_true(buf->dmat == NULL)) {
2100 buf->dmat = sc->qos_dmat;
2101 }
2102
2103 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
2104 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
2105 if (error) {
2106 device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2107 "configuration\n", __func__);
2108 goto err_exit;
2109 }
2110
2111 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
2112 ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
2113 BUS_DMA_NOWAIT);
2114 if (error) {
2115 device_printf(dev, "%s: failed to map QoS key configuration "
2116 "buffer into bus space\n", __func__);
2117 goto err_exit;
2118 }
2119
2120 DPAA2_CMD_INIT(&cmd);
2121
2122 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2123 if (error) {
2124 device_printf(dev, "%s: failed to open resource container: "
2125 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2126 goto err_exit;
2127 }
2128 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2129 if (error) {
2130 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2131 "error=%d\n", __func__, dinfo->id, error);
2132 goto close_rc;
2133 }
2134
2135 tbl.default_tc = 0;
2136 tbl.discard_on_miss = false;
2137 tbl.keep_entries = false;
2138 tbl.kcfg_busaddr = buf->paddr;
2139 error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2140 if (error) {
2141 device_printf(dev, "%s: failed to set QoS table\n", __func__);
2142 goto close_ni;
2143 }
2144
2145 error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2146 if (error) {
2147 device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2148 goto close_ni;
2149 }
2150
2151 error = 0;
2152 close_ni:
2153 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2154 close_rc:
2155 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2156 err_exit:
2157 return (error);
2158 }
2159
2160 static int
dpaa2_ni_set_mac_addr(device_t dev)2161 dpaa2_ni_set_mac_addr(device_t dev)
2162 {
2163 device_t pdev = device_get_parent(dev);
2164 device_t child = dev;
2165 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2166 if_t ifp = sc->ifp;
2167 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2168 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2169 struct dpaa2_cmd cmd;
2170 struct ether_addr rnd_mac_addr;
2171 uint16_t rc_token, ni_token;
2172 uint8_t mac_addr[ETHER_ADDR_LEN];
2173 uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2174 int error;
2175
2176 DPAA2_CMD_INIT(&cmd);
2177
2178 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2179 if (error) {
2180 device_printf(dev, "%s: failed to open resource container: "
2181 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2182 goto err_exit;
2183 }
2184 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2185 if (error) {
2186 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2187 "error=%d\n", __func__, dinfo->id, error);
2188 goto close_rc;
2189 }
2190
2191 /*
2192 * Get the MAC address associated with the physical port, if the DPNI is
2193 * connected to a DPMAC directly associated with one of the physical
2194 * ports.
2195 */
2196 error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2197 if (error) {
2198 device_printf(dev, "%s: failed to obtain the MAC address "
2199 "associated with the physical port\n", __func__);
2200 goto close_ni;
2201 }
2202
2203 /* Get primary MAC address from the DPNI attributes. */
2204 error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2205 if (error) {
2206 device_printf(dev, "%s: failed to obtain primary MAC address\n",
2207 __func__);
2208 goto close_ni;
2209 }
2210
2211 if (!ETHER_IS_ZERO(mac_addr)) {
2212 /* Set MAC address of the physical port as DPNI's primary one. */
2213 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2214 mac_addr);
2215 if (error) {
2216 device_printf(dev, "%s: failed to set primary MAC "
2217 "address\n", __func__);
2218 goto close_ni;
2219 }
2220 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2221 sc->mac.addr[i] = mac_addr[i];
2222 }
2223 } else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2224 /* Generate random MAC address as DPNI's primary one. */
2225 ether_gen_addr(ifp, &rnd_mac_addr);
2226 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2227 mac_addr[i] = rnd_mac_addr.octet[i];
2228 }
2229
2230 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2231 mac_addr);
2232 if (error) {
2233 device_printf(dev, "%s: failed to set random primary "
2234 "MAC address\n", __func__);
2235 goto close_ni;
2236 }
2237 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2238 sc->mac.addr[i] = mac_addr[i];
2239 }
2240 } else {
2241 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2242 sc->mac.addr[i] = dpni_mac_addr[i];
2243 }
2244 }
2245
2246 error = 0;
2247 close_ni:
2248 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2249 close_rc:
2250 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2251 err_exit:
2252 return (error);
2253 }
2254
2255 static void
dpaa2_ni_miibus_statchg(device_t dev)2256 dpaa2_ni_miibus_statchg(device_t dev)
2257 {
2258 device_t pdev = device_get_parent(dev);
2259 device_t child = dev;
2260 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2261 struct dpaa2_mac_link_state mac_link = { 0 };
2262 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2263 struct dpaa2_cmd cmd;
2264 uint16_t rc_token, mac_token;
2265 int error, link_state;
2266
2267 if (sc->fixed_link || sc->mii == NULL) {
2268 return;
2269 }
2270 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
2271 /*
2272 * We will receive calls and adjust the changes but
2273 * not have setup everything (called before dpaa2_ni_init()
2274 * really). This will then setup the link and internal
2275 * sc->link_state and not trigger the update once needed,
2276 * so basically dpmac never knows about it.
2277 */
2278 return;
2279 }
2280
2281 /*
2282 * Note: ifp link state will only be changed AFTER we are called so we
2283 * cannot rely on ifp->if_linkstate here.
2284 */
2285 if (sc->mii->mii_media_status & IFM_AVALID) {
2286 if (sc->mii->mii_media_status & IFM_ACTIVE) {
2287 link_state = LINK_STATE_UP;
2288 } else {
2289 link_state = LINK_STATE_DOWN;
2290 }
2291 } else {
2292 link_state = LINK_STATE_UNKNOWN;
2293 }
2294
2295 if (link_state != sc->link_state) {
2296 sc->link_state = link_state;
2297
2298 DPAA2_CMD_INIT(&cmd);
2299
2300 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2301 &rc_token);
2302 if (error) {
2303 device_printf(dev, "%s: failed to open resource "
2304 "container: id=%d, error=%d\n", __func__, rcinfo->id,
2305 error);
2306 goto err_exit;
2307 }
2308 error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2309 &mac_token);
2310 if (error) {
2311 device_printf(sc->dev, "%s: failed to open DPMAC: "
2312 "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2313 error);
2314 goto close_rc;
2315 }
2316
2317 if (link_state == LINK_STATE_UP ||
2318 link_state == LINK_STATE_DOWN) {
2319 /* Update DPMAC link state. */
2320 mac_link.supported = sc->mii->mii_media.ifm_media;
2321 mac_link.advert = sc->mii->mii_media.ifm_media;
2322 mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */
2323 mac_link.options =
2324 DPAA2_MAC_LINK_OPT_AUTONEG |
2325 DPAA2_MAC_LINK_OPT_PAUSE;
2326 mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2327 mac_link.state_valid = true;
2328
2329 /* Inform DPMAC about link state. */
2330 error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2331 &mac_link);
2332 if (error) {
2333 device_printf(sc->dev, "%s: failed to set DPMAC "
2334 "link state: id=%d, error=%d\n", __func__,
2335 sc->mac.dpmac_id, error);
2336 }
2337 }
2338 (void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2339 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2340 rc_token));
2341 }
2342
2343 return;
2344
2345 close_rc:
2346 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2347 err_exit:
2348 return;
2349 }
2350
2351 /**
2352 * @brief Callback function to process media change request.
2353 */
2354 static int
dpaa2_ni_media_change_locked(struct dpaa2_ni_softc * sc)2355 dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
2356 {
2357
2358 DPNI_LOCK_ASSERT(sc);
2359 if (sc->mii) {
2360 mii_mediachg(sc->mii);
2361 sc->media_status = sc->mii->mii_media.ifm_media;
2362 } else if (sc->fixed_link) {
2363 if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
2364 __func__);
2365 }
2366
2367 return (0);
2368 }
2369
2370 static int
dpaa2_ni_media_change(if_t ifp)2371 dpaa2_ni_media_change(if_t ifp)
2372 {
2373 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2374 int error;
2375
2376 DPNI_LOCK(sc);
2377 error = dpaa2_ni_media_change_locked(sc);
2378 DPNI_UNLOCK(sc);
2379 return (error);
2380 }
2381
2382 /**
2383 * @brief Callback function to process media status request.
2384 */
2385 static void
dpaa2_ni_media_status(if_t ifp,struct ifmediareq * ifmr)2386 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2387 {
2388 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2389
2390 DPNI_LOCK(sc);
2391 if (sc->mii) {
2392 mii_pollstat(sc->mii);
2393 ifmr->ifm_active = sc->mii->mii_media_active;
2394 ifmr->ifm_status = sc->mii->mii_media_status;
2395 }
2396 DPNI_UNLOCK(sc);
2397 }
2398
2399 /**
2400 * @brief Callout function to check and update media status.
2401 */
2402 static void
dpaa2_ni_media_tick(void * arg)2403 dpaa2_ni_media_tick(void *arg)
2404 {
2405 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2406
2407 /* Check for media type change */
2408 if (sc->mii) {
2409 mii_tick(sc->mii);
2410 if (sc->media_status != sc->mii->mii_media.ifm_media) {
2411 printf("%s: media type changed (ifm_media=%x)\n",
2412 __func__, sc->mii->mii_media.ifm_media);
2413 dpaa2_ni_media_change(sc->ifp);
2414 }
2415 }
2416
2417 /* Schedule another timeout one second from now */
2418 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2419 }
2420
2421 static void
dpaa2_ni_init(void * arg)2422 dpaa2_ni_init(void *arg)
2423 {
2424 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2425 if_t ifp = sc->ifp;
2426 device_t pdev = device_get_parent(sc->dev);
2427 device_t dev = sc->dev;
2428 device_t child = dev;
2429 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2430 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2431 struct dpaa2_cmd cmd;
2432 uint16_t rc_token, ni_token;
2433 int error;
2434
2435 DPNI_LOCK(sc);
2436 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2437 DPNI_UNLOCK(sc);
2438 return;
2439 }
2440 DPNI_UNLOCK(sc);
2441
2442 DPAA2_CMD_INIT(&cmd);
2443
2444 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2445 if (error) {
2446 device_printf(dev, "%s: failed to open resource container: "
2447 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2448 goto err_exit;
2449 }
2450 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2451 if (error) {
2452 device_printf(dev, "%s: failed to open network interface: "
2453 "id=%d, error=%d\n", __func__, dinfo->id, error);
2454 goto close_rc;
2455 }
2456
2457 error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2458 if (error) {
2459 device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2460 __func__, error);
2461 }
2462
2463 DPNI_LOCK(sc);
2464 /* Announce we are up and running and can queue packets. */
2465 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2466
2467 if (sc->mii) {
2468 /*
2469 * mii_mediachg() will trigger a call into
2470 * dpaa2_ni_miibus_statchg() to setup link state.
2471 */
2472 dpaa2_ni_media_change_locked(sc);
2473 }
2474 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2475
2476 DPNI_UNLOCK(sc);
2477
2478 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2479 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2480 return;
2481
2482 close_rc:
2483 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2484 err_exit:
2485 return;
2486 }
2487
2488 static int
dpaa2_ni_transmit(if_t ifp,struct mbuf * m)2489 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2490 {
2491 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2492 struct dpaa2_channel *ch;
2493 uint32_t fqid;
2494 bool found = false;
2495 int chidx = 0, error;
2496
2497 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
2498 return (0);
2499 }
2500
2501 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2502 fqid = m->m_pkthdr.flowid;
2503 for (int i = 0; i < sc->chan_n; i++) {
2504 ch = sc->channels[i];
2505 for (int j = 0; j < ch->rxq_n; j++) {
2506 if (fqid == ch->rx_queues[j].fqid) {
2507 chidx = ch->flowid;
2508 found = true;
2509 break;
2510 }
2511 }
2512 if (found) {
2513 break;
2514 }
2515 }
2516 }
2517
2518 ch = sc->channels[chidx];
2519 error = buf_ring_enqueue(ch->xmit_br, m);
2520 if (__predict_false(error != 0)) {
2521 m_freem(m);
2522 } else {
2523 taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
2524 }
2525
2526 return (error);
2527 }
2528
2529 static void
dpaa2_ni_qflush(if_t ifp)2530 dpaa2_ni_qflush(if_t ifp)
2531 {
2532 /* TODO: Find a way to drain Tx queues in QBMan. */
2533 if_qflush(ifp);
2534 }
2535
2536 static int
dpaa2_ni_ioctl(if_t ifp,u_long c,caddr_t data)2537 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2538 {
2539 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2540 struct ifreq *ifr = (struct ifreq *) data;
2541 device_t pdev = device_get_parent(sc->dev);
2542 device_t dev = sc->dev;
2543 device_t child = dev;
2544 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2545 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2546 struct dpaa2_cmd cmd;
2547 uint32_t changed = 0;
2548 uint16_t rc_token, ni_token;
2549 int mtu, error, rc = 0;
2550
2551 DPAA2_CMD_INIT(&cmd);
2552
2553 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2554 if (error) {
2555 device_printf(dev, "%s: failed to open resource container: "
2556 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2557 goto err_exit;
2558 }
2559 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2560 if (error) {
2561 device_printf(dev, "%s: failed to open network interface: "
2562 "id=%d, error=%d\n", __func__, dinfo->id, error);
2563 goto close_rc;
2564 }
2565
2566 switch (c) {
2567 case SIOCSIFMTU:
2568 DPNI_LOCK(sc);
2569 mtu = ifr->ifr_mtu;
2570 if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2571 DPNI_UNLOCK(sc);
2572 error = EINVAL;
2573 goto close_ni;
2574 }
2575 if_setmtu(ifp, mtu);
2576 DPNI_UNLOCK(sc);
2577
2578 /* Update maximum frame length. */
2579 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
2580 mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2581 if (error) {
2582 device_printf(dev, "%s: failed to update maximum frame "
2583 "length: error=%d\n", __func__, error);
2584 goto close_ni;
2585 }
2586 break;
2587 case SIOCSIFCAP:
2588 changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2589 if (changed & IFCAP_HWCSUM) {
2590 if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
2591 if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
2592 } else {
2593 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
2594 }
2595 }
2596 rc = dpaa2_ni_setup_if_caps(sc);
2597 if (rc) {
2598 printf("%s: failed to update iface capabilities: "
2599 "error=%d\n", __func__, rc);
2600 rc = ENXIO;
2601 }
2602 break;
2603 case SIOCSIFFLAGS:
2604 DPNI_LOCK(sc);
2605 if (if_getflags(ifp) & IFF_UP) {
2606 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2607 changed = if_getflags(ifp) ^ sc->if_flags;
2608 if (changed & IFF_PROMISC ||
2609 changed & IFF_ALLMULTI) {
2610 rc = dpaa2_ni_setup_if_flags(sc);
2611 }
2612 } else {
2613 DPNI_UNLOCK(sc);
2614 dpaa2_ni_init(sc);
2615 DPNI_LOCK(sc);
2616 }
2617 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2618 /* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2619 }
2620
2621 sc->if_flags = if_getflags(ifp);
2622 DPNI_UNLOCK(sc);
2623 break;
2624 case SIOCADDMULTI:
2625 case SIOCDELMULTI:
2626 DPNI_LOCK(sc);
2627 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2628 DPNI_UNLOCK(sc);
2629 rc = dpaa2_ni_update_mac_filters(ifp);
2630 if (rc) {
2631 device_printf(dev, "%s: failed to update MAC "
2632 "filters: error=%d\n", __func__, rc);
2633 }
2634 DPNI_LOCK(sc);
2635 }
2636 DPNI_UNLOCK(sc);
2637 break;
2638 case SIOCGIFMEDIA:
2639 case SIOCSIFMEDIA:
2640 if (sc->mii)
2641 rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2642 else if(sc->fixed_link) {
2643 rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2644 }
2645 break;
2646 default:
2647 rc = ether_ioctl(ifp, c, data);
2648 break;
2649 }
2650
2651 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2652 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2653 return (rc);
2654
2655 close_ni:
2656 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2657 close_rc:
2658 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2659 err_exit:
2660 return (error);
2661 }
2662
2663 static int
dpaa2_ni_update_mac_filters(if_t ifp)2664 dpaa2_ni_update_mac_filters(if_t ifp)
2665 {
2666 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2667 struct dpaa2_ni_mcaddr_ctx ctx;
2668 device_t pdev = device_get_parent(sc->dev);
2669 device_t dev = sc->dev;
2670 device_t child = dev;
2671 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2672 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2673 struct dpaa2_cmd cmd;
2674 uint16_t rc_token, ni_token;
2675 int error;
2676
2677 DPAA2_CMD_INIT(&cmd);
2678
2679 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2680 if (error) {
2681 device_printf(dev, "%s: failed to open resource container: "
2682 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2683 goto err_exit;
2684 }
2685 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2686 if (error) {
2687 device_printf(dev, "%s: failed to open network interface: "
2688 "id=%d, error=%d\n", __func__, dinfo->id, error);
2689 goto close_rc;
2690 }
2691
2692 /* Remove all multicast MAC filters. */
2693 error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2694 if (error) {
2695 device_printf(dev, "%s: failed to clear multicast MAC filters: "
2696 "error=%d\n", __func__, error);
2697 goto close_ni;
2698 }
2699
2700 ctx.ifp = ifp;
2701 ctx.error = 0;
2702 ctx.nent = 0;
2703
2704 if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2705
2706 error = ctx.error;
2707 close_ni:
2708 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2709 close_rc:
2710 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2711 err_exit:
2712 return (error);
2713 }
2714
2715 static u_int
dpaa2_ni_add_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2716 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2717 {
2718 struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2719 struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2720 device_t pdev = device_get_parent(sc->dev);
2721 device_t dev = sc->dev;
2722 device_t child = dev;
2723 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2724 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2725 struct dpaa2_cmd cmd;
2726 uint16_t rc_token, ni_token;
2727 int error;
2728
2729 if (ctx->error != 0) {
2730 return (0);
2731 }
2732
2733 if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2734 DPAA2_CMD_INIT(&cmd);
2735
2736 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2737 &rc_token);
2738 if (error) {
2739 device_printf(dev, "%s: failed to open resource "
2740 "container: id=%d, error=%d\n", __func__, rcinfo->id,
2741 error);
2742 return (0);
2743 }
2744 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
2745 &ni_token);
2746 if (error) {
2747 device_printf(dev, "%s: failed to open network interface: "
2748 "id=%d, error=%d\n", __func__, dinfo->id, error);
2749 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2750 rc_token));
2751 return (0);
2752 }
2753
2754 ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
2755 LLADDR(sdl));
2756
2757 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2758 ni_token));
2759 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2760 rc_token));
2761
2762 if (ctx->error != 0) {
2763 device_printf(dev, "%s: can't add more then %d MAC "
2764 "addresses, switching to the multicast promiscuous "
2765 "mode\n", __func__, ctx->nent);
2766
2767 /* Enable multicast promiscuous mode. */
2768 DPNI_LOCK(sc);
2769 if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
2770 sc->if_flags |= IFF_ALLMULTI;
2771 ctx->error = dpaa2_ni_setup_if_flags(sc);
2772 DPNI_UNLOCK(sc);
2773
2774 return (0);
2775 }
2776 ctx->nent++;
2777 }
2778
2779 return (1);
2780 }
2781
2782 static void
dpaa2_ni_intr(void * arg)2783 dpaa2_ni_intr(void *arg)
2784 {
2785 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2786 device_t pdev = device_get_parent(sc->dev);
2787 device_t dev = sc->dev;
2788 device_t child = dev;
2789 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2790 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2791 struct dpaa2_cmd cmd;
2792 uint32_t status = ~0u; /* clear all IRQ status bits */
2793 uint16_t rc_token, ni_token;
2794 int error;
2795
2796 DPAA2_CMD_INIT(&cmd);
2797
2798 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2799 if (error) {
2800 device_printf(dev, "%s: failed to open resource container: "
2801 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2802 goto err_exit;
2803 }
2804 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2805 if (error) {
2806 device_printf(dev, "%s: failed to open network interface: "
2807 "id=%d, error=%d\n", __func__, dinfo->id, error);
2808 goto close_rc;
2809 }
2810
2811 error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
2812 &status);
2813 if (error) {
2814 device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2815 "error=%d\n", __func__, error);
2816 }
2817
2818 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2819 close_rc:
2820 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2821 err_exit:
2822 return;
2823 }
2824
2825 /**
2826 * @brief Execute channel's Rx/Tx routines.
2827 *
2828 * NOTE: Should not be re-entrant for the same channel. It is achieved by
2829 * enqueuing the cleanup routine on a single-threaded taskqueue.
2830 */
2831 static void
dpaa2_ni_cleanup_task(void * arg,int count)2832 dpaa2_ni_cleanup_task(void *arg, int count)
2833 {
2834 struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
2835 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2836 int error, rxc, txc;
2837
2838 for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
2839 rxc = dpaa2_ni_rx_cleanup(ch);
2840 txc = dpaa2_ni_tx_cleanup(ch);
2841
2842 if (__predict_false((if_getdrvflags(sc->ifp) &
2843 IFF_DRV_RUNNING) == 0)) {
2844 return;
2845 }
2846
2847 if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
2848 break;
2849 }
2850 }
2851
2852 /* Re-arm channel to generate CDAN */
2853 error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
2854 if (error != 0) {
2855 panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
2856 __func__, ch->id, error);
2857 }
2858 }
2859
2860 /**
2861 * @brief Poll frames from a specific channel when CDAN is received.
2862 */
2863 static int
dpaa2_ni_rx_cleanup(struct dpaa2_channel * ch)2864 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
2865 {
2866 struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
2867 struct dpaa2_swp *swp = iosc->swp;
2868 struct dpaa2_ni_fq *fq;
2869 struct dpaa2_buf *buf = &ch->store;
2870 int budget = DPAA2_RX_BUDGET;
2871 int error, consumed = 0;
2872
2873 do {
2874 error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
2875 if (error) {
2876 device_printf(ch->ni_dev, "%s: failed to pull frames: "
2877 "chan_id=%d, error=%d\n", __func__, ch->id, error);
2878 break;
2879 }
2880 error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
2881 if (error == ENOENT || error == EALREADY) {
2882 break;
2883 }
2884 if (error == ETIMEDOUT) {
2885 device_printf(ch->ni_dev, "%s: timeout to consume "
2886 "frames: chan_id=%d\n", __func__, ch->id);
2887 }
2888 } while (--budget);
2889
2890 return (DPAA2_RX_BUDGET - budget);
2891 }
2892
2893 static int
dpaa2_ni_tx_cleanup(struct dpaa2_channel * ch)2894 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
2895 {
2896 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2897 struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
2898 struct mbuf *m = NULL;
2899 int budget = DPAA2_TX_BUDGET;
2900
2901 do {
2902 mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
2903 mtx_lock(&ch->xmit_mtx);
2904 m = buf_ring_dequeue_sc(ch->xmit_br);
2905 mtx_unlock(&ch->xmit_mtx);
2906
2907 if (__predict_false(m == NULL)) {
2908 /* TODO: Do not give up easily */
2909 break;
2910 } else {
2911 dpaa2_ni_tx(sc, ch, tx, m);
2912 }
2913 } while (--budget);
2914
2915 return (DPAA2_TX_BUDGET - budget);
2916 }
2917
2918 static void
dpaa2_ni_tx(struct dpaa2_ni_softc * sc,struct dpaa2_channel * ch,struct dpaa2_ni_tx_ring * tx,struct mbuf * m)2919 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
2920 struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
2921 {
2922 device_t dev = sc->dev;
2923 struct dpaa2_ni_fq *fq = tx->fq;
2924 struct dpaa2_buf *buf, *sgt;
2925 struct dpaa2_fd fd;
2926 struct mbuf *md;
2927 bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
2928 int rc, nsegs;
2929 int error;
2930
2931 mtx_assert(&tx->lock, MA_NOTOWNED);
2932 mtx_lock(&tx->lock);
2933 buf = buf_ring_dequeue_sc(tx->br);
2934 mtx_unlock(&tx->lock);
2935 if (__predict_false(buf == NULL)) {
2936 /* TODO: Do not give up easily */
2937 m_freem(m);
2938 return;
2939 } else {
2940 DPAA2_BUF_ASSERT_TXREADY(buf);
2941 buf->m = m;
2942 sgt = buf->sgt;
2943 }
2944
2945 #if defined(INVARIANTS)
2946 struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
2947 KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
2948 KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
2949 #endif /* INVARIANTS */
2950
2951 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
2952 BUS_DMA_NOWAIT);
2953 if (__predict_false(error != 0)) {
2954 /* Too many fragments, trying to defragment... */
2955 md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2956 if (md == NULL) {
2957 device_printf(dev, "%s: m_collapse() failed\n", __func__);
2958 fq->chan->tx_dropped++;
2959 goto err;
2960 }
2961
2962 buf->m = m = md;
2963 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
2964 &nsegs, BUS_DMA_NOWAIT);
2965 if (__predict_false(error != 0)) {
2966 device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
2967 "failed: error=%d\n", __func__, error);
2968 fq->chan->tx_dropped++;
2969 goto err;
2970 }
2971 }
2972
2973 error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
2974 if (__predict_false(error != 0)) {
2975 device_printf(dev, "%s: failed to build frame descriptor: "
2976 "error=%d\n", __func__, error);
2977 fq->chan->tx_dropped++;
2978 goto err_unload;
2979 }
2980
2981 /* TODO: Enqueue several frames in a single command */
2982 for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
2983 /* TODO: Return error codes instead of # of frames */
2984 rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
2985 if (rc == 1) {
2986 break;
2987 }
2988 }
2989
2990 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
2991 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
2992
2993 if (rc != 1) {
2994 fq->chan->tx_dropped++;
2995 goto err_unload;
2996 } else {
2997 fq->chan->tx_frames++;
2998 }
2999 return;
3000
3001 err_unload:
3002 bus_dmamap_unload(buf->dmat, buf->dmap);
3003 if (sgt->paddr != 0) {
3004 bus_dmamap_unload(sgt->dmat, sgt->dmap);
3005 }
3006 err:
3007 m_freem(buf->m);
3008 buf_ring_enqueue(tx->br, buf);
3009 }
3010
3011 static int
dpaa2_ni_consume_frames(struct dpaa2_channel * chan,struct dpaa2_ni_fq ** src,uint32_t * consumed)3012 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
3013 uint32_t *consumed)
3014 {
3015 struct dpaa2_ni_fq *fq = NULL;
3016 struct dpaa2_dq *dq;
3017 struct dpaa2_fd *fd;
3018 struct dpaa2_ni_rx_ctx ctx = {
3019 .head = NULL,
3020 .tail = NULL,
3021 .cnt = 0,
3022 .last = false
3023 };
3024 int rc, frames = 0;
3025
3026 do {
3027 rc = dpaa2_chan_next_frame(chan, &dq);
3028 if (rc == EINPROGRESS) {
3029 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3030 fd = &dq->fdr.fd;
3031 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3032
3033 switch (fq->type) {
3034 case DPAA2_NI_QUEUE_RX:
3035 (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3036 break;
3037 case DPAA2_NI_QUEUE_RX_ERR:
3038 (void)dpaa2_ni_rx_err(chan, fq, fd);
3039 break;
3040 case DPAA2_NI_QUEUE_TX_CONF:
3041 (void)dpaa2_ni_tx_conf(chan, fq, fd);
3042 break;
3043 default:
3044 panic("%s: unknown queue type (1)",
3045 __func__);
3046 }
3047 frames++;
3048 }
3049 } else if (rc == EALREADY || rc == ENOENT) {
3050 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3051 fd = &dq->fdr.fd;
3052 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3053
3054 switch (fq->type) {
3055 case DPAA2_NI_QUEUE_RX:
3056 /*
3057 * Last VDQ response (mbuf) in a chain
3058 * obtained from the Rx queue.
3059 */
3060 ctx.last = true;
3061 (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3062 break;
3063 case DPAA2_NI_QUEUE_RX_ERR:
3064 (void)dpaa2_ni_rx_err(chan, fq, fd);
3065 break;
3066 case DPAA2_NI_QUEUE_TX_CONF:
3067 (void)dpaa2_ni_tx_conf(chan, fq, fd);
3068 break;
3069 default:
3070 panic("%s: unknown queue type (2)",
3071 __func__);
3072 }
3073 frames++;
3074 }
3075 break;
3076 } else {
3077 panic("%s: should not reach here: rc=%d", __func__, rc);
3078 }
3079 } while (true);
3080
3081 KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
3082 "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
3083
3084 /*
3085 * VDQ operation pulls frames from a single queue into the store.
3086 * Return the frame queue and a number of consumed frames as an output.
3087 */
3088 if (src != NULL) {
3089 *src = fq;
3090 }
3091 if (consumed != NULL) {
3092 *consumed = frames;
3093 }
3094
3095 return (rc);
3096 }
3097
3098 /**
3099 * @brief Receive frames.
3100 */
3101 static int
dpaa2_ni_rx(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd,struct dpaa2_ni_rx_ctx * ctx)3102 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
3103 struct dpaa2_ni_rx_ctx *ctx)
3104 {
3105 bus_addr_t paddr = (bus_addr_t)fd->addr;
3106 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3107 struct dpaa2_buf *buf = fa->buf;
3108 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3109 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3110 struct dpaa2_bp_softc *bpsc;
3111 struct mbuf *m;
3112 device_t bpdev;
3113 bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3114 void *buf_data;
3115 int buf_len, error, released_n = 0;
3116
3117 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3118 /*
3119 * NOTE: Current channel might not be the same as the "buffer" channel
3120 * and it's fine. It must not be NULL though.
3121 */
3122 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3123
3124 if (__predict_false(paddr != buf->paddr)) {
3125 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3126 __func__, paddr, buf->paddr);
3127 }
3128
3129 switch (dpaa2_ni_fd_err(fd)) {
3130 case 1: /* Enqueue rejected by QMan */
3131 sc->rx_enq_rej_frames++;
3132 break;
3133 case 2: /* QMan IEOI error */
3134 sc->rx_ieoi_err_frames++;
3135 break;
3136 default:
3137 break;
3138 }
3139 switch (dpaa2_ni_fd_format(fd)) {
3140 case DPAA2_FD_SINGLE:
3141 sc->rx_single_buf_frames++;
3142 break;
3143 case DPAA2_FD_SG:
3144 sc->rx_sg_buf_frames++;
3145 break;
3146 default:
3147 break;
3148 }
3149
3150 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3151 mtx_lock(&bch->dma_mtx);
3152
3153 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
3154 bus_dmamap_unload(buf->dmat, buf->dmap);
3155 m = buf->m;
3156 buf_len = dpaa2_ni_fd_data_len(fd);
3157 buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
3158 /* Prepare buffer to be re-cycled */
3159 buf->m = NULL;
3160 buf->paddr = 0;
3161 buf->vaddr = NULL;
3162 buf->seg.ds_addr = 0;
3163 buf->seg.ds_len = 0;
3164 buf->nseg = 0;
3165
3166 mtx_unlock(&bch->dma_mtx);
3167
3168 m->m_flags |= M_PKTHDR;
3169 m->m_data = buf_data;
3170 m->m_len = buf_len;
3171 m->m_pkthdr.len = buf_len;
3172 m->m_pkthdr.rcvif = sc->ifp;
3173 m->m_pkthdr.flowid = fq->fqid;
3174 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3175
3176 if (ctx->head == NULL) {
3177 KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
3178 ctx->head = m;
3179 ctx->tail = m;
3180 } else {
3181 KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
3182 ctx->tail->m_nextpkt = m;
3183 ctx->tail = m;
3184 }
3185 ctx->cnt++;
3186
3187 if (ctx->last) {
3188 ctx->tail->m_nextpkt = NULL;
3189 if_input(sc->ifp, ctx->head);
3190 }
3191
3192 /* Keep the buffer to be recycled */
3193 ch->recycled[ch->recycled_n++] = buf;
3194
3195 /* Re-seed and release recycled buffers back to the pool */
3196 if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3197 /* Release new buffers to the pool if needed */
3198 taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
3199
3200 for (int i = 0; i < ch->recycled_n; i++) {
3201 buf = ch->recycled[i];
3202 bch = (struct dpaa2_channel *)buf->opt;
3203
3204 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3205 mtx_lock(&bch->dma_mtx);
3206 error = dpaa2_buf_seed_rxb(sc->dev, buf,
3207 DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
3208 mtx_unlock(&bch->dma_mtx);
3209
3210 if (__predict_false(error != 0)) {
3211 /* TODO: What else to do with the buffer? */
3212 panic("%s: failed to recycle buffer: error=%d",
3213 __func__, error);
3214 }
3215
3216 /* Prepare buffer to be released in a single command */
3217 released[released_n++] = buf->paddr;
3218 }
3219
3220 /* There's only one buffer pool for now */
3221 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3222 bpsc = device_get_softc(bpdev);
3223
3224 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
3225 released, released_n);
3226 if (__predict_false(error != 0)) {
3227 device_printf(sc->dev, "%s: failed to release buffers "
3228 "to the pool: error=%d\n", __func__, error);
3229 return (error);
3230 }
3231 ch->recycled_n = 0;
3232 }
3233
3234 return (0);
3235 }
3236
3237 /**
3238 * @brief Receive Rx error frames.
3239 */
3240 static int
dpaa2_ni_rx_err(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd)3241 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3242 struct dpaa2_fd *fd)
3243 {
3244 bus_addr_t paddr = (bus_addr_t)fd->addr;
3245 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3246 struct dpaa2_buf *buf = fa->buf;
3247 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3248 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3249 device_t bpdev;
3250 struct dpaa2_bp_softc *bpsc;
3251 int error;
3252
3253 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3254 /*
3255 * NOTE: Current channel might not be the same as the "buffer" channel
3256 * and it's fine. It must not be NULL though.
3257 */
3258 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3259
3260 if (__predict_false(paddr != buf->paddr)) {
3261 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3262 __func__, paddr, buf->paddr);
3263 }
3264
3265 /* There's only one buffer pool for now */
3266 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3267 bpsc = device_get_softc(bpdev);
3268
3269 /* Release buffer to QBMan buffer pool */
3270 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
3271 if (error != 0) {
3272 device_printf(sc->dev, "%s: failed to release frame buffer to "
3273 "the pool: error=%d\n", __func__, error);
3274 return (error);
3275 }
3276
3277 return (0);
3278 }
3279
3280 /**
3281 * @brief Receive Tx confirmation frames.
3282 */
3283 static int
dpaa2_ni_tx_conf(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd)3284 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3285 struct dpaa2_fd *fd)
3286 {
3287 bus_addr_t paddr = (bus_addr_t)fd->addr;
3288 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3289 struct dpaa2_buf *buf = fa->buf;
3290 struct dpaa2_buf *sgt = buf->sgt;
3291 struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
3292 struct dpaa2_channel *bch = tx->fq->chan;
3293
3294 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3295 KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
3296 KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
3297 /*
3298 * NOTE: Current channel might not be the same as the "buffer" channel
3299 * and it's fine. It must not be NULL though.
3300 */
3301 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3302
3303 if (paddr != buf->paddr) {
3304 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3305 __func__, paddr, buf->paddr);
3306 }
3307
3308 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3309 mtx_lock(&bch->dma_mtx);
3310
3311 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
3312 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
3313 bus_dmamap_unload(buf->dmat, buf->dmap);
3314 bus_dmamap_unload(sgt->dmat, sgt->dmap);
3315 m_freem(buf->m);
3316 buf->m = NULL;
3317 buf->paddr = 0;
3318 buf->vaddr = NULL;
3319 sgt->paddr = 0;
3320
3321 mtx_unlock(&bch->dma_mtx);
3322
3323 /* Return Tx buffer back to the ring */
3324 buf_ring_enqueue(tx->br, buf);
3325
3326 return (0);
3327 }
3328
3329 /**
3330 * @brief Compare versions of the DPAA2 network interface API.
3331 */
3332 static int
dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc * sc,uint16_t major,uint16_t minor)3333 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3334 uint16_t minor)
3335 {
3336 if (sc->api_major == major) {
3337 return sc->api_minor - minor;
3338 }
3339 return sc->api_major - major;
3340 }
3341
3342 /**
3343 * @brief Build a DPAA2 frame descriptor.
3344 */
3345 static int
dpaa2_ni_build_fd(struct dpaa2_ni_softc * sc,struct dpaa2_ni_tx_ring * tx,struct dpaa2_buf * buf,bus_dma_segment_t * segs,int nsegs,struct dpaa2_fd * fd)3346 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3347 struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
3348 {
3349 struct dpaa2_buf *sgt = buf->sgt;
3350 struct dpaa2_sg_entry *sge;
3351 struct dpaa2_fa *fa;
3352 int i, error;
3353
3354 KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
3355 KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
3356 KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
3357 KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
3358
3359 memset(fd, 0, sizeof(*fd));
3360
3361 /* Populate and map S/G table */
3362 if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
3363 sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
3364 for (i = 0; i < nsegs; i++) {
3365 sge[i].addr = (uint64_t)segs[i].ds_addr;
3366 sge[i].len = (uint32_t)segs[i].ds_len;
3367 sge[i].offset_fmt = 0u;
3368 }
3369 sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3370
3371 KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
3372 sgt->paddr));
3373
3374 error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
3375 DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
3376 BUS_DMA_NOWAIT);
3377 if (__predict_false(error != 0)) {
3378 device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
3379 "error=%d\n", __func__, error);
3380 return (error);
3381 }
3382
3383 buf->paddr = sgt->paddr;
3384 buf->vaddr = sgt->vaddr;
3385 sc->tx_sg_frames++; /* for sysctl(9) */
3386 } else {
3387 return (EINVAL);
3388 }
3389
3390 fa = (struct dpaa2_fa *)sgt->vaddr;
3391 fa->magic = DPAA2_MAGIC;
3392 fa->buf = buf;
3393
3394 fd->addr = buf->paddr;
3395 fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
3396 fd->bpid_ivp_bmt = 0;
3397 fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3398 fd->ctrl = 0x00800000u;
3399
3400 return (0);
3401 }
3402
3403 static int
dpaa2_ni_fd_err(struct dpaa2_fd * fd)3404 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3405 {
3406 return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3407 }
3408
3409 static uint32_t
dpaa2_ni_fd_data_len(struct dpaa2_fd * fd)3410 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3411 {
3412 if (dpaa2_ni_fd_short_len(fd)) {
3413 return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3414 }
3415 return (fd->data_length);
3416 }
3417
3418 static int
dpaa2_ni_fd_format(struct dpaa2_fd * fd)3419 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3420 {
3421 return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3422 DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3423 }
3424
3425 static bool
dpaa2_ni_fd_short_len(struct dpaa2_fd * fd)3426 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3427 {
3428 return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3429 & DPAA2_NI_FD_SL_MASK) == 1);
3430 }
3431
3432 static int
dpaa2_ni_fd_offset(struct dpaa2_fd * fd)3433 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3434 {
3435 return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3436 }
3437
3438 /**
3439 * @brief Collect statistics of the network interface.
3440 */
3441 static int
dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)3442 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3443 {
3444 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3445 struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3446 device_t pdev = device_get_parent(sc->dev);
3447 device_t dev = sc->dev;
3448 device_t child = dev;
3449 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3450 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3451 struct dpaa2_cmd cmd;
3452 uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3453 uint64_t result = 0;
3454 uint16_t rc_token, ni_token;
3455 int error;
3456
3457 DPAA2_CMD_INIT(&cmd);
3458
3459 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3460 if (error) {
3461 device_printf(dev, "%s: failed to open resource container: "
3462 "id=%d, error=%d\n", __func__, rcinfo->id, error);
3463 goto exit;
3464 }
3465 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3466 if (error) {
3467 device_printf(dev, "%s: failed to open network interface: "
3468 "id=%d, error=%d\n", __func__, dinfo->id, error);
3469 goto close_rc;
3470 }
3471
3472 error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3473 if (!error) {
3474 result = cnt[stat->cnt];
3475 }
3476
3477 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3478 close_rc:
3479 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3480 exit:
3481 return (sysctl_handle_64(oidp, &result, 0, req));
3482 }
3483
3484 static int
dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)3485 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3486 {
3487 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3488 uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3489
3490 return (sysctl_handle_32(oidp, &buf_num, 0, req));
3491 }
3492
3493 static int
dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)3494 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3495 {
3496 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3497 uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3498
3499 return (sysctl_handle_32(oidp, &buf_free, 0, req));
3500 }
3501
3502 static int
dpaa2_ni_set_hash(device_t dev,uint64_t flags)3503 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3504 {
3505 struct dpaa2_ni_softc *sc = device_get_softc(dev);
3506 uint64_t key = 0;
3507 int i;
3508
3509 if (!(sc->attr.num.queues > 1)) {
3510 return (EOPNOTSUPP);
3511 }
3512
3513 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3514 if (dist_fields[i].rxnfc_field & flags) {
3515 key |= dist_fields[i].id;
3516 }
3517 }
3518
3519 return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3520 }
3521
3522 /**
3523 * @brief Set Rx distribution (hash or flow classification) key flags is a
3524 * combination of RXH_ bits.
3525 */
3526 static int
dpaa2_ni_set_dist_key(device_t dev,enum dpaa2_ni_dist_mode type,uint64_t flags)3527 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3528 {
3529 device_t pdev = device_get_parent(dev);
3530 device_t child = dev;
3531 struct dpaa2_ni_softc *sc = device_get_softc(dev);
3532 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3533 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3534 struct dpkg_profile_cfg cls_cfg;
3535 struct dpkg_extract *key;
3536 struct dpaa2_buf *buf = &sc->rxd_kcfg;
3537 struct dpaa2_cmd cmd;
3538 uint16_t rc_token, ni_token;
3539 int i, error = 0;
3540
3541 if (__predict_true(buf->dmat == NULL)) {
3542 buf->dmat = sc->rxd_dmat;
3543 }
3544
3545 memset(&cls_cfg, 0, sizeof(cls_cfg));
3546
3547 /* Configure extracts according to the given flags. */
3548 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3549 key = &cls_cfg.extracts[cls_cfg.num_extracts];
3550
3551 if (!(flags & dist_fields[i].id)) {
3552 continue;
3553 }
3554
3555 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3556 device_printf(dev, "%s: failed to add key extraction "
3557 "rule\n", __func__);
3558 return (E2BIG);
3559 }
3560
3561 key->type = DPKG_EXTRACT_FROM_HDR;
3562 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3563 key->extract.from_hdr.type = DPKG_FULL_FIELD;
3564 key->extract.from_hdr.field = dist_fields[i].cls_field;
3565 cls_cfg.num_extracts++;
3566 }
3567
3568 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
3569 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
3570 if (error != 0) {
3571 device_printf(dev, "%s: failed to allocate a buffer for Rx "
3572 "traffic distribution key configuration\n", __func__);
3573 return (error);
3574 }
3575
3576 error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
3577 if (error != 0) {
3578 device_printf(dev, "%s: failed to prepare key configuration: "
3579 "error=%d\n", __func__, error);
3580 return (error);
3581 }
3582
3583 /* Prepare for setting the Rx dist. */
3584 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
3585 DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
3586 BUS_DMA_NOWAIT);
3587 if (error != 0) {
3588 device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3589 "traffic distribution key configuration\n", __func__);
3590 return (error);
3591 }
3592
3593 if (type == DPAA2_NI_DIST_MODE_HASH) {
3594 DPAA2_CMD_INIT(&cmd);
3595
3596 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3597 &rc_token);
3598 if (error) {
3599 device_printf(dev, "%s: failed to open resource "
3600 "container: id=%d, error=%d\n", __func__, rcinfo->id,
3601 error);
3602 goto err_exit;
3603 }
3604 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3605 &ni_token);
3606 if (error) {
3607 device_printf(dev, "%s: failed to open network "
3608 "interface: id=%d, error=%d\n", __func__, dinfo->id,
3609 error);
3610 goto close_rc;
3611 }
3612
3613 error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
3614 sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
3615 if (error != 0) {
3616 device_printf(dev, "%s: failed to set distribution mode "
3617 "and size for the traffic class\n", __func__);
3618 }
3619
3620 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3621 ni_token));
3622 close_rc:
3623 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3624 rc_token));
3625 }
3626
3627 err_exit:
3628 return (error);
3629 }
3630
3631 /**
3632 * @brief Prepares extract parameters.
3633 *
3634 * cfg: Defining a full Key Generation profile.
3635 * key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA.
3636 */
3637 static int
dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg * cfg,uint8_t * key_cfg_buf)3638 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3639 {
3640 struct dpni_ext_set_rx_tc_dist *dpni_ext;
3641 struct dpni_dist_extract *extr;
3642 int i, j;
3643
3644 if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3645 return (EINVAL);
3646
3647 dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3648 dpni_ext->num_extracts = cfg->num_extracts;
3649
3650 for (i = 0; i < cfg->num_extracts; i++) {
3651 extr = &dpni_ext->extracts[i];
3652
3653 switch (cfg->extracts[i].type) {
3654 case DPKG_EXTRACT_FROM_HDR:
3655 extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3656 extr->efh_type =
3657 cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3658 extr->size = cfg->extracts[i].extract.from_hdr.size;
3659 extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3660 extr->field = cfg->extracts[i].extract.from_hdr.field;
3661 extr->hdr_index =
3662 cfg->extracts[i].extract.from_hdr.hdr_index;
3663 break;
3664 case DPKG_EXTRACT_FROM_DATA:
3665 extr->size = cfg->extracts[i].extract.from_data.size;
3666 extr->offset =
3667 cfg->extracts[i].extract.from_data.offset;
3668 break;
3669 case DPKG_EXTRACT_FROM_PARSE:
3670 extr->size = cfg->extracts[i].extract.from_parse.size;
3671 extr->offset =
3672 cfg->extracts[i].extract.from_parse.offset;
3673 break;
3674 default:
3675 return (EINVAL);
3676 }
3677
3678 extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3679 extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3680
3681 for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3682 extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3683 extr->masks[j].offset =
3684 cfg->extracts[i].masks[j].offset;
3685 }
3686 }
3687
3688 return (0);
3689 }
3690
3691 static device_method_t dpaa2_ni_methods[] = {
3692 /* Device interface */
3693 DEVMETHOD(device_probe, dpaa2_ni_probe),
3694 DEVMETHOD(device_attach, dpaa2_ni_attach),
3695 DEVMETHOD(device_detach, dpaa2_ni_detach),
3696
3697 /* mii via memac_mdio */
3698 DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg),
3699
3700 DEVMETHOD_END
3701 };
3702
3703 static driver_t dpaa2_ni_driver = {
3704 "dpaa2_ni",
3705 dpaa2_ni_methods,
3706 sizeof(struct dpaa2_ni_softc),
3707 };
3708
3709 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3710 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3711
3712 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3713 #ifdef DEV_ACPI
3714 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3715 #endif
3716 #ifdef FDT
3717 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3718 #endif
3719