xref: /freebsd/sys/dev/dpaa2/dpaa2_io.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2022 Dmitry Salychev
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 /*
30  * QBMan command interface and the DPAA2 I/O (DPIO) driver.
31  *
32  * The DPIO object allows configuration of the QBMan software portal with
33  * optional notification capabilities.
34  *
35  * Software portals are used by the driver to communicate with the QBMan. The
36  * DPIO object’s main purpose is to enable the driver to perform I/O – enqueue
37  * and dequeue operations, as well as buffer release and acquire operations –
38  * using QBMan.
39  */
40 
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/bus.h>
44 #include <sys/rman.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/_cpuset.h>
50 #include <sys/cpuset.h>
51 #include <sys/taskqueue.h>
52 #include <sys/smp.h>
53 
54 #include <vm/vm.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 
59 #include <dev/pci/pcivar.h>
60 
61 #include "pcib_if.h"
62 #include "pci_if.h"
63 
64 #include "dpaa2_mc.h"
65 #include "dpaa2_mcp.h"
66 #include "dpaa2_swp.h"
67 #include "dpaa2_swp_if.h"
68 #include "dpaa2_cmd_if.h"
69 #include "dpaa2_io.h"
70 #include "dpaa2_ni.h"
71 
72 #define DPIO_IRQ_INDEX		0 /* index of the only DPIO IRQ */
73 #define DPIO_POLL_MAX		32
74 
75 /*
76  * Memory:
77  *	0: cache-enabled part of the QBMan software portal.
78  *	1: cache-inhibited part of the QBMan software portal.
79  *	2: control registers of the QBMan software portal?
80  *
81  * Note that MSI should be allocated separately using pseudo-PCI interface.
82  */
83 struct resource_spec dpaa2_io_spec[] = {
84 	/*
85 	 * System Memory resources.
86 	 */
87 #define MEM_RES_NUM	(3u)
88 #define MEM_RID_OFF	(0u)
89 #define MEM_RID(rid)	((rid) + MEM_RID_OFF)
90 	{ SYS_RES_MEMORY, MEM_RID(0),   RF_ACTIVE | RF_UNMAPPED },
91 	{ SYS_RES_MEMORY, MEM_RID(1),   RF_ACTIVE | RF_UNMAPPED },
92 	{ SYS_RES_MEMORY, MEM_RID(2),   RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
93 	/*
94 	 * DPMCP resources.
95 	 *
96 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
97 	 *	 receive responses from, the MC firmware. One portal per DPIO.
98 	 */
99 #define MCP_RES_NUM	(1u)
100 #define MCP_RID_OFF	(MEM_RID_OFF + MEM_RES_NUM)
101 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
102 	/* --- */
103 	{ DPAA2_DEV_MCP,  MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
104 	/* --- */
105 	RESOURCE_SPEC_END
106 };
107 
108 /* Configuration routines. */
109 static int dpaa2_io_setup_irqs(device_t dev);
110 static int dpaa2_io_release_irqs(device_t dev);
111 static int dpaa2_io_setup_msi(struct dpaa2_io_softc *sc);
112 static int dpaa2_io_release_msi(struct dpaa2_io_softc *sc);
113 
114 /* Interrupt handlers */
115 static void dpaa2_io_intr(void *arg);
116 
117 static int
118 dpaa2_io_probe(device_t dev)
119 {
120 	/* DPIO device will be added by a parent resource container itself. */
121 	device_set_desc(dev, "DPAA2 I/O");
122 	return (BUS_PROBE_DEFAULT);
123 }
124 
125 static int
126 dpaa2_io_detach(device_t dev)
127 {
128 	device_t pdev = device_get_parent(dev);
129 	device_t child = dev;
130 	struct dpaa2_io_softc *sc = device_get_softc(dev);
131 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
132 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
133 	struct dpaa2_cmd cmd;
134 	uint16_t rc_token, io_token;
135 	int error;
136 
137 	DPAA2_CMD_INIT(&cmd);
138 
139 	/* Tear down interrupt handler and release IRQ resources. */
140 	dpaa2_io_release_irqs(dev);
141 
142 	/* Free software portal helper object. */
143 	dpaa2_swp_free_portal(sc->swp);
144 
145 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
146 	if (error) {
147 		device_printf(dev, "%s: failed to open DPRC: error=%d\n",
148 		    __func__, error);
149 		goto err_exit;
150 	}
151 	error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token);
152 	if (error) {
153 		device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n",
154 		    __func__, dinfo->id, error);
155 		goto close_rc;
156 	}
157 
158 	error = DPAA2_CMD_IO_DISABLE(dev, child, &cmd);
159 	if (error && bootverbose) {
160 		device_printf(dev, "%s: failed to disable DPIO: id=%d, "
161 		    "error=%d\n", __func__, dinfo->id, error);
162 	}
163 
164 	(void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd);
165 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
166 
167 	/* Unmap memory resources of the portal. */
168 	for (int i = 0; i < MEM_RES_NUM; i++) {
169 		if (sc->res[MEM_RID(i)] == NULL) {
170 			continue;
171 		}
172 		error = bus_unmap_resource(sc->dev, SYS_RES_MEMORY,
173 		    sc->res[MEM_RID(i)], &sc->map[MEM_RID(i)]);
174 		if (error && bootverbose) {
175 			device_printf(dev, "%s: failed to unmap memory "
176 			    "resource: rid=%d, error=%d\n", __func__, MEM_RID(i),
177 			    error);
178 		}
179 	}
180 
181 	/* Release allocated resources. */
182 	bus_release_resources(dev, dpaa2_io_spec, sc->res);
183 
184 	return (0);
185 
186 close_rc:
187 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
188 err_exit:
189 	return (error);
190 }
191 
192 static int
193 dpaa2_io_attach(device_t dev)
194 {
195 	device_t pdev = device_get_parent(dev);
196 	device_t child = dev;
197 	device_t mcp_dev;
198 	struct dpaa2_io_softc *sc = device_get_softc(dev);
199 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
200 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
201 	struct dpaa2_devinfo *mcp_dinfo;
202 	struct dpaa2_cmd cmd;
203 	struct resource_map_request req;
204 	struct {
205 		vm_memattr_t memattr;
206 		char *label;
207 	} map_args[MEM_RES_NUM] = {
208 		{ VM_MEMATTR_WRITE_BACK, "cache-enabled part" },
209 		{ VM_MEMATTR_DEVICE, "cache-inhibited part" },
210 		{ VM_MEMATTR_DEVICE, "control registers" }
211 	};
212 	uint16_t rc_token, io_token;
213 	int error;
214 
215 	sc->dev = dev;
216 	sc->swp = NULL;
217 	sc->intr = NULL;
218 	sc->irq_resource = NULL;
219 
220 	/* Allocate resources. */
221 	error = bus_alloc_resources(sc->dev, dpaa2_io_spec, sc->res);
222 	if (error) {
223 		device_printf(dev, "%s: failed to allocate resources: "
224 		    "error=%d\n", __func__, error);
225 		return (ENXIO);
226 	}
227 
228 	/* Set allocated MC portal up. */
229 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
230 	mcp_dinfo = device_get_ivars(mcp_dev);
231 	dinfo->portal = mcp_dinfo->portal;
232 
233 	/* Map memory resources of the portal. */
234 	for (int i = 0; i < MEM_RES_NUM; i++) {
235 		if (sc->res[MEM_RID(i)] == NULL) {
236 			continue;
237 		}
238 
239 		resource_init_map_request(&req);
240 		req.memattr = map_args[i].memattr;
241 		error = bus_map_resource(sc->dev, SYS_RES_MEMORY,
242 		    sc->res[MEM_RID(i)], &req, &sc->map[MEM_RID(i)]);
243 		if (error) {
244 			device_printf(dev, "%s: failed to map %s: error=%d\n",
245 			    __func__, map_args[i].label, error);
246 			goto err_exit;
247 		}
248 	}
249 
250 	DPAA2_CMD_INIT(&cmd);
251 
252 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
253 	if (error) {
254 		device_printf(dev, "%s: failed to open DPRC: error=%d\n",
255 		    __func__, error);
256 		goto err_exit;
257 	}
258 	error = DPAA2_CMD_IO_OPEN(dev, child, &cmd, dinfo->id, &io_token);
259 	if (error) {
260 		device_printf(dev, "%s: failed to open DPIO: id=%d, error=%d\n",
261 		    __func__, dinfo->id, error);
262 		goto close_rc;
263 	}
264 	error = DPAA2_CMD_IO_RESET(dev, child, &cmd);
265 	if (error) {
266 		device_printf(dev, "%s: failed to reset DPIO: id=%d, error=%d\n",
267 		    __func__, dinfo->id, error);
268 		goto close_io;
269 	}
270 	error = DPAA2_CMD_IO_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
271 	if (error) {
272 		device_printf(dev, "%s: failed to get DPIO attributes: id=%d, "
273 		    "error=%d\n", __func__, dinfo->id, error);
274 		goto close_io;
275 	}
276 	error = DPAA2_CMD_IO_ENABLE(dev, child, &cmd);
277 	if (error) {
278 		device_printf(dev, "%s: failed to enable DPIO: id=%d, "
279 		    "error=%d\n", __func__, dinfo->id, error);
280 		goto close_io;
281 	}
282 
283 	/* Prepare descriptor of the QBMan software portal. */
284 	sc->swp_desc.dpio_dev = dev;
285 	sc->swp_desc.swp_version = sc->attr.swp_version;
286 	sc->swp_desc.swp_clk = sc->attr.swp_clk;
287 	sc->swp_desc.swp_id = sc->attr.swp_id;
288 	sc->swp_desc.has_notif = sc->attr.priors_num ? true : false;
289 	sc->swp_desc.has_8prio = sc->attr.priors_num == 8u ? true : false;
290 
291 	sc->swp_desc.cena_res = sc->res[0];
292 	sc->swp_desc.cena_map = &sc->map[0];
293 	sc->swp_desc.cinh_res = sc->res[1];
294 	sc->swp_desc.cinh_map = &sc->map[1];
295 
296 	/*
297 	 * Compute how many 256 QBMAN cycles fit into one ns. This is because
298 	 * the interrupt timeout period register needs to be specified in QBMAN
299 	 * clock cycles in increments of 256.
300 	 */
301 	sc->swp_desc.swp_cycles_ratio = 256000 /
302 	    (sc->swp_desc.swp_clk / 1000000);
303 
304 	/* Initialize QBMan software portal. */
305 	error = dpaa2_swp_init_portal(&sc->swp, &sc->swp_desc, DPAA2_SWP_DEF);
306 	if (error) {
307 		device_printf(dev, "%s: failed to initialize dpaa2_swp: "
308 		    "error=%d\n", __func__, error);
309 		goto err_exit;
310 	}
311 
312 	error = dpaa2_io_setup_irqs(dev);
313 	if (error) {
314 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
315 		    __func__, error);
316 		goto err_exit;
317 	}
318 
319 	if (bootverbose) {
320 		device_printf(dev, "dpio_id=%d, swp_id=%d, chan_mode=%s, "
321 		    "notif_priors=%d, swp_version=0x%x\n",
322 		    sc->attr.id, sc->attr.swp_id,
323 		    sc->attr.chan_mode == DPAA2_IO_LOCAL_CHANNEL
324 		    ? "local_channel" : "no_channel", sc->attr.priors_num,
325 		    sc->attr.swp_version);
326 	}
327 
328 	(void)DPAA2_CMD_IO_CLOSE(dev, child, &cmd);
329 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
330 	return (0);
331 
332 close_io:
333 	(void)DPAA2_CMD_IO_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, io_token));
334 close_rc:
335 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
336 err_exit:
337 	dpaa2_io_detach(dev);
338 	return (ENXIO);
339 }
340 
341 /**
342  * @brief Enqueue multiple frames to a frame queue using one FQID.
343  */
344 static int
345 dpaa2_io_enq_multiple_fq(device_t iodev, uint32_t fqid,
346     struct dpaa2_fd *fd, int frames_n)
347 {
348 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
349 	struct dpaa2_swp *swp = sc->swp;
350 	struct dpaa2_eq_desc ed;
351 	uint32_t flags = 0;
352 
353 	memset(&ed, 0, sizeof(ed));
354 
355 	/* Setup enqueue descriptor. */
356 	dpaa2_swp_set_ed_norp(&ed, false);
357 	dpaa2_swp_set_ed_fq(&ed, fqid);
358 
359 	return (dpaa2_swp_enq_mult(swp, &ed, fd, &flags, frames_n));
360 }
361 
362 /**
363  * @brief Configure the channel data availability notification (CDAN)
364  * in a particular WQ channel paired with DPIO.
365  */
366 static int
367 dpaa2_io_conf_wq_channel(device_t iodev, struct dpaa2_io_notif_ctx *ctx)
368 {
369 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
370 
371 	/* Enable generation of the CDAN notifications. */
372 	if (ctx->cdan_en) {
373 		return (dpaa2_swp_conf_wq_channel(sc->swp, ctx->fq_chan_id,
374 		    DPAA2_WQCHAN_WE_EN | DPAA2_WQCHAN_WE_CTX, ctx->cdan_en,
375 		    ctx->qman_ctx));
376 	}
377 
378 	return (0);
379 }
380 
381 /**
382  * @brief Query current configuration/state of the buffer pool.
383  */
384 static int
385 dpaa2_io_query_bp(device_t iodev, uint16_t bpid, struct dpaa2_bp_conf *conf)
386 {
387 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
388 
389 	return (dpaa2_swp_query_bp(sc->swp, bpid, conf));
390 }
391 
392 /**
393  * @brief Release one or more buffer pointers to the QBMan buffer pool.
394  */
395 static int
396 dpaa2_io_release_bufs(device_t iodev, uint16_t bpid, bus_addr_t *buf,
397     uint32_t buf_num)
398 {
399 	struct dpaa2_io_softc *sc = device_get_softc(iodev);
400 
401 	return (dpaa2_swp_release_bufs(sc->swp, bpid, buf, buf_num));
402 }
403 
404 /**
405  * @brief Configure DPNI object to generate interrupts.
406  */
407 static int
408 dpaa2_io_setup_irqs(device_t dev)
409 {
410 	struct dpaa2_io_softc *sc = device_get_softc(dev);
411 	int error;
412 
413 	/*
414 	 * Setup interrupts generated by the software portal.
415 	 */
416 	dpaa2_swp_set_intr_trigger(sc->swp, DPAA2_SWP_INTR_DQRI);
417 	dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu);
418 
419 	/* Configure IRQs. */
420 	error = dpaa2_io_setup_msi(sc);
421 	if (error) {
422 		device_printf(dev, "%s: failed to allocate MSI: error=%d\n",
423 		    __func__, error);
424 		return (error);
425 	}
426 	if ((sc->irq_resource = bus_alloc_resource_any(dev, SYS_RES_IRQ,
427 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
428 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
429 		    __func__);
430 		return (ENXIO);
431 	}
432 	if (bus_setup_intr(dev, sc->irq_resource, INTR_TYPE_NET | INTR_MPSAFE |
433 	    INTR_ENTROPY, NULL, dpaa2_io_intr, sc, &sc->intr)) {
434 		device_printf(dev, "%s: failed to setup IRQ resource\n",
435 		    __func__);
436 		return (ENXIO);
437 	}
438 
439 	/* Wrap DPIO ID around number of CPUs. */
440 	bus_bind_intr(dev, sc->irq_resource, sc->attr.id % mp_ncpus);
441 
442 	/*
443 	 * Setup and enable Static Dequeue Command to receive CDANs from
444 	 * channel 0.
445 	 */
446 	if (sc->swp_desc.has_notif)
447 		dpaa2_swp_set_push_dequeue(sc->swp, 0, true);
448 
449 	return (0);
450 }
451 
452 static int
453 dpaa2_io_release_irqs(device_t dev)
454 {
455 	struct dpaa2_io_softc *sc = device_get_softc(dev);
456 
457 	/* Disable receiving CDANs from channel 0. */
458 	if (sc->swp_desc.has_notif)
459 		dpaa2_swp_set_push_dequeue(sc->swp, 0, false);
460 
461 	/* Release IRQ resources. */
462 	if (sc->intr != NULL)
463 		bus_teardown_intr(dev, sc->irq_resource, &sc->intr);
464 	if (sc->irq_resource != NULL)
465 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid[0],
466 		    sc->irq_resource);
467 
468 	(void)dpaa2_io_release_msi(device_get_softc(dev));
469 
470 	/* Configure software portal to stop generating interrupts. */
471 	dpaa2_swp_set_intr_trigger(sc->swp, 0);
472 	dpaa2_swp_clear_intr_status(sc->swp, 0xFFFFFFFFu);
473 
474 	return (0);
475 }
476 
477 /**
478  * @brief Allocate MSI interrupts for this DPAA2 I/O object.
479  */
480 static int
481 dpaa2_io_setup_msi(struct dpaa2_io_softc *sc)
482 {
483 	int val;
484 
485 	val = pci_msi_count(sc->dev);
486 	if (val < DPAA2_IO_MSI_COUNT)
487 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
488 		    DPAA2_IO_MSI_COUNT);
489 	val = MIN(val, DPAA2_IO_MSI_COUNT);
490 
491 	if (pci_alloc_msi(sc->dev, &val) != 0)
492 		return (EINVAL);
493 
494 	for (int i = 0; i < val; i++)
495 		sc->irq_rid[i] = i + 1;
496 
497 	return (0);
498 }
499 
500 static int
501 dpaa2_io_release_msi(struct dpaa2_io_softc *sc)
502 {
503 	int error;
504 
505 	error = pci_release_msi(sc->dev);
506 	if (error) {
507 		device_printf(sc->dev, "%s: failed to release MSI: error=%d/n",
508 		    __func__, error);
509 		return (error);
510 	}
511 
512 	return (0);
513 }
514 
515 /**
516  * @brief DPAA2 I/O interrupt handler.
517  */
518 static void
519 dpaa2_io_intr(void *arg)
520 {
521 	struct dpaa2_io_softc *sc = (struct dpaa2_io_softc *) arg;
522 	struct dpaa2_io_notif_ctx *ctx[DPIO_POLL_MAX];
523 	struct dpaa2_dq dq;
524 	uint32_t idx, status;
525 	uint16_t flags;
526 	int rc, cdan_n = 0;
527 
528 	status = dpaa2_swp_read_intr_status(sc->swp);
529 	if (status == 0) {
530 		return;
531 	}
532 
533 	DPAA2_SWP_LOCK(sc->swp, &flags);
534 	if (flags & DPAA2_SWP_DESTROYED) {
535 		/* Terminate operation if portal is destroyed. */
536 		DPAA2_SWP_UNLOCK(sc->swp);
537 		return;
538 	}
539 
540 	for (int i = 0; i < DPIO_POLL_MAX; i++) {
541 		rc = dpaa2_swp_dqrr_next_locked(sc->swp, &dq, &idx);
542 		if (rc) {
543 			break;
544 		}
545 
546 		if ((dq.common.verb & DPAA2_DQRR_RESULT_MASK) ==
547 		    DPAA2_DQRR_RESULT_CDAN) {
548 			ctx[cdan_n++] = (struct dpaa2_io_notif_ctx *) dq.scn.ctx;
549 		} else {
550 			/* TODO: Report unknown DQRR entry. */
551 		}
552 		dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_DCAP, idx);
553 	}
554 	DPAA2_SWP_UNLOCK(sc->swp);
555 
556 	for (int i = 0; i < cdan_n; i++) {
557 		ctx[i]->poll(ctx[i]->channel);
558 	}
559 
560 	/* Enable software portal interrupts back */
561 	dpaa2_swp_clear_intr_status(sc->swp, status);
562 	dpaa2_swp_write_reg(sc->swp, DPAA2_SWP_CINH_IIR, 0);
563 }
564 
565 static device_method_t dpaa2_io_methods[] = {
566 	/* Device interface */
567 	DEVMETHOD(device_probe,		dpaa2_io_probe),
568 	DEVMETHOD(device_attach,	dpaa2_io_attach),
569 	DEVMETHOD(device_detach,	dpaa2_io_detach),
570 
571 	/* QBMan software portal interface */
572 	DEVMETHOD(dpaa2_swp_enq_multiple_fq,	dpaa2_io_enq_multiple_fq),
573 	DEVMETHOD(dpaa2_swp_conf_wq_channel,	dpaa2_io_conf_wq_channel),
574 	DEVMETHOD(dpaa2_swp_query_bp,		dpaa2_io_query_bp),
575 	DEVMETHOD(dpaa2_swp_release_bufs,	dpaa2_io_release_bufs),
576 
577 	DEVMETHOD_END
578 };
579 
580 static driver_t dpaa2_io_driver = {
581 	"dpaa2_io",
582 	dpaa2_io_methods,
583 	sizeof(struct dpaa2_io_softc),
584 };
585 
586 DRIVER_MODULE(dpaa2_io, dpaa2_rc, dpaa2_io_driver, 0, 0);
587