xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision 685dc743)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include "opt_platform.h"
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 
44 #include <machine/bus.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_page.h>
50 
51 #ifdef FDT
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
55 #endif
56 
57 #include <dev/xdma/xdma.h>
58 
59 #include <xdma_if.h>
60 
61 struct seg_load_request {
62 	struct bus_dma_segment *seg;
63 	uint32_t nsegs;
64 	uint32_t error;
65 };
66 
67 static void
xchan_bufs_free_reserved(xdma_channel_t * xchan)68 xchan_bufs_free_reserved(xdma_channel_t *xchan)
69 {
70 	struct xdma_request *xr;
71 	vm_size_t size;
72 	int i;
73 
74 	for (i = 0; i < xchan->xr_num; i++) {
75 		xr = &xchan->xr_mem[i];
76 		size = xr->buf.size;
77 		if (xr->buf.vaddr) {
78 			pmap_kremove_device(xr->buf.vaddr, size);
79 			kva_free(xr->buf.vaddr, size);
80 			xr->buf.vaddr = 0;
81 		}
82 		if (xr->buf.paddr) {
83 			vmem_free(xchan->vmem, xr->buf.paddr, size);
84 			xr->buf.paddr = 0;
85 		}
86 		xr->buf.size = 0;
87 	}
88 }
89 
90 static int
xchan_bufs_alloc_reserved(xdma_channel_t * xchan)91 xchan_bufs_alloc_reserved(xdma_channel_t *xchan)
92 {
93 	xdma_controller_t *xdma;
94 	struct xdma_request *xr;
95 	vmem_addr_t addr;
96 	vm_size_t size;
97 	int i;
98 
99 	xdma = xchan->xdma;
100 
101 	if (xchan->vmem == NULL)
102 		return (ENOBUFS);
103 
104 	for (i = 0; i < xchan->xr_num; i++) {
105 		xr = &xchan->xr_mem[i];
106 		size = round_page(xchan->maxsegsize);
107 		if (vmem_alloc(xchan->vmem, size,
108 		    M_BESTFIT | M_NOWAIT, &addr)) {
109 			device_printf(xdma->dev,
110 			    "%s: Can't allocate memory\n", __func__);
111 			xchan_bufs_free_reserved(xchan);
112 			return (ENOMEM);
113 		}
114 
115 		xr->buf.size = size;
116 		xr->buf.paddr = addr;
117 		xr->buf.vaddr = kva_alloc(size);
118 		if (xr->buf.vaddr == 0) {
119 			device_printf(xdma->dev,
120 			    "%s: Can't allocate KVA\n", __func__);
121 			xchan_bufs_free_reserved(xchan);
122 			return (ENOMEM);
123 		}
124 		pmap_kenter_device(xr->buf.vaddr, size, addr);
125 	}
126 
127 	return (0);
128 }
129 
130 static int
xchan_bufs_alloc_busdma(xdma_channel_t * xchan)131 xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
132 {
133 	xdma_controller_t *xdma;
134 	struct xdma_request *xr;
135 	int err;
136 	int i;
137 
138 	xdma = xchan->xdma;
139 
140 	/* Create bus_dma tag */
141 	err = bus_dma_tag_create(
142 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
143 	    xchan->alignment,		/* alignment */
144 	    xchan->boundary,		/* boundary */
145 	    xchan->lowaddr,		/* lowaddr */
146 	    xchan->highaddr,		/* highaddr */
147 	    NULL, NULL,			/* filter, filterarg */
148 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
149 	    xchan->maxnsegs,		/* nsegments */
150 	    xchan->maxsegsize,		/* maxsegsize */
151 	    0,				/* flags */
152 	    NULL, NULL,			/* lockfunc, lockarg */
153 	    &xchan->dma_tag_bufs);
154 	if (err != 0) {
155 		device_printf(xdma->dev,
156 		    "%s: Can't create bus_dma tag.\n", __func__);
157 		return (-1);
158 	}
159 
160 	for (i = 0; i < xchan->xr_num; i++) {
161 		xr = &xchan->xr_mem[i];
162 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
163 		    &xr->buf.map);
164 		if (err != 0) {
165 			device_printf(xdma->dev,
166 			    "%s: Can't create buf DMA map.\n", __func__);
167 
168 			/* Cleanup. */
169 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
170 
171 			return (-1);
172 		}
173 	}
174 
175 	return (0);
176 }
177 
178 static int
xchan_bufs_alloc(xdma_channel_t * xchan)179 xchan_bufs_alloc(xdma_channel_t *xchan)
180 {
181 	xdma_controller_t *xdma;
182 	int ret;
183 
184 	xdma = xchan->xdma;
185 
186 	if (xdma == NULL) {
187 		printf("%s: Channel was not allocated properly.\n", __func__);
188 		return (-1);
189 	}
190 
191 	if (xchan->caps & XCHAN_CAP_BUSDMA)
192 		ret = xchan_bufs_alloc_busdma(xchan);
193 	else {
194 		ret = xchan_bufs_alloc_reserved(xchan);
195 	}
196 	if (ret != 0) {
197 		device_printf(xdma->dev,
198 		    "%s: Can't allocate bufs.\n", __func__);
199 		return (-1);
200 	}
201 
202 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
203 
204 	return (0);
205 }
206 
207 static int
xchan_bufs_free(xdma_channel_t * xchan)208 xchan_bufs_free(xdma_channel_t *xchan)
209 {
210 	struct xdma_request *xr;
211 	struct xchan_buf *b;
212 	int i;
213 
214 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
215 		return (-1);
216 
217 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
218 		for (i = 0; i < xchan->xr_num; i++) {
219 			xr = &xchan->xr_mem[i];
220 			b = &xr->buf;
221 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
222 		}
223 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
224 	} else
225 		xchan_bufs_free_reserved(xchan);
226 
227 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
228 
229 	return (0);
230 }
231 
232 void
xdma_channel_free_sg(xdma_channel_t * xchan)233 xdma_channel_free_sg(xdma_channel_t *xchan)
234 {
235 
236 	xchan_bufs_free(xchan);
237 	xchan_sglist_free(xchan);
238 	xchan_bank_free(xchan);
239 }
240 
241 /*
242  * Prepare xchan for a scatter-gather transfer.
243  * xr_num - xdma requests queue size,
244  * maxsegsize - maximum allowed scatter-gather list element size in bytes
245  */
246 int
xdma_prep_sg(xdma_channel_t * xchan,uint32_t xr_num,bus_size_t maxsegsize,bus_size_t maxnsegs,bus_size_t alignment,bus_addr_t boundary,bus_addr_t lowaddr,bus_addr_t highaddr)247 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
248     bus_size_t maxsegsize, bus_size_t maxnsegs,
249     bus_size_t alignment, bus_addr_t boundary,
250     bus_addr_t lowaddr, bus_addr_t highaddr)
251 {
252 	xdma_controller_t *xdma;
253 	int ret;
254 
255 	xdma = xchan->xdma;
256 
257 	KASSERT(xdma != NULL, ("xdma is NULL"));
258 
259 	if (xchan->flags & XCHAN_CONFIGURED) {
260 		device_printf(xdma->dev,
261 		    "%s: Channel is already configured.\n", __func__);
262 		return (-1);
263 	}
264 
265 	xchan->xr_num = xr_num;
266 	xchan->maxsegsize = maxsegsize;
267 	xchan->maxnsegs = maxnsegs;
268 	xchan->alignment = alignment;
269 	xchan->boundary = boundary;
270 	xchan->lowaddr = lowaddr;
271 	xchan->highaddr = highaddr;
272 
273 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
274 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
275 		    __func__);
276 		return (-1);
277 	}
278 
279 	xchan_bank_init(xchan);
280 
281 	/* Allocate sglist. */
282 	ret = xchan_sglist_alloc(xchan);
283 	if (ret != 0) {
284 		device_printf(xdma->dev,
285 		    "%s: Can't allocate sglist.\n", __func__);
286 		return (-1);
287 	}
288 
289 	/* Allocate buffers if required. */
290 	if (xchan->caps & (XCHAN_CAP_BUSDMA | XCHAN_CAP_BOUNCE)) {
291 		ret = xchan_bufs_alloc(xchan);
292 		if (ret != 0) {
293 			device_printf(xdma->dev,
294 			    "%s: Can't allocate bufs.\n", __func__);
295 
296 			/* Cleanup */
297 			xchan_sglist_free(xchan);
298 			xchan_bank_free(xchan);
299 
300 			return (-1);
301 		}
302 	}
303 
304 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
305 
306 	XCHAN_LOCK(xchan);
307 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
308 	if (ret != 0) {
309 		device_printf(xdma->dev,
310 		    "%s: Can't prepare SG transfer.\n", __func__);
311 		XCHAN_UNLOCK(xchan);
312 
313 		return (-1);
314 	}
315 	XCHAN_UNLOCK(xchan);
316 
317 	return (0);
318 }
319 
320 void
xchan_seg_done(xdma_channel_t * xchan,struct xdma_transfer_status * st)321 xchan_seg_done(xdma_channel_t *xchan,
322     struct xdma_transfer_status *st)
323 {
324 	struct xdma_request *xr;
325 	struct xchan_buf *b;
326 	bus_addr_t addr;
327 
328 	xr = TAILQ_FIRST(&xchan->processing);
329 	if (xr == NULL)
330 		panic("request not found\n");
331 
332 	b = &xr->buf;
333 
334 	atomic_subtract_int(&b->nsegs_left, 1);
335 
336 	if (b->nsegs_left == 0) {
337 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
338 			if (xr->direction == XDMA_MEM_TO_DEV)
339 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
340 				    BUS_DMASYNC_POSTWRITE);
341 			else
342 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
343 				    BUS_DMASYNC_POSTREAD);
344 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
345 		} else if (xchan->caps & XCHAN_CAP_BOUNCE) {
346 			if (xr->req_type == XR_TYPE_MBUF &&
347 			    xr->direction == XDMA_DEV_TO_MEM)
348 				m_copyback(xr->m, 0, st->transferred,
349 				    (void *)xr->buf.vaddr);
350 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
351 			if (xr->direction == XDMA_MEM_TO_DEV)
352 				addr = xr->src_addr;
353 			else
354 				addr = xr->dst_addr;
355 			xdma_iommu_remove_entry(xchan, addr);
356 		}
357 		xr->status.error = st->error;
358 		xr->status.transferred = st->transferred;
359 
360 		QUEUE_PROC_LOCK(xchan);
361 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
362 		QUEUE_PROC_UNLOCK(xchan);
363 
364 		QUEUE_OUT_LOCK(xchan);
365 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
366 		QUEUE_OUT_UNLOCK(xchan);
367 	}
368 }
369 
370 static void
xdma_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)371 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
372 {
373 	struct seg_load_request *slr;
374 	struct bus_dma_segment *seg;
375 	int i;
376 
377 	slr = arg;
378 	seg = slr->seg;
379 
380 	if (error != 0) {
381 		slr->error = error;
382 		return;
383 	}
384 
385 	slr->nsegs = nsegs;
386 
387 	for (i = 0; i < nsegs; i++) {
388 		seg[i].ds_addr = segs[i].ds_addr;
389 		seg[i].ds_len = segs[i].ds_len;
390 	}
391 }
392 
393 static int
_xdma_load_data_busdma(xdma_channel_t * xchan,struct xdma_request * xr,struct bus_dma_segment * seg)394 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
395     struct bus_dma_segment *seg)
396 {
397 	xdma_controller_t *xdma;
398 	struct seg_load_request slr;
399 	uint32_t nsegs;
400 	void *addr;
401 	int error;
402 
403 	xdma = xchan->xdma;
404 
405 	error = 0;
406 	nsegs = 0;
407 
408 	switch (xr->req_type) {
409 	case XR_TYPE_MBUF:
410 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
411 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
412 		break;
413 	case XR_TYPE_BIO:
414 		slr.nsegs = 0;
415 		slr.error = 0;
416 		slr.seg = seg;
417 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
418 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
419 		if (slr.error != 0) {
420 			device_printf(xdma->dma_dev,
421 			    "%s: bus_dmamap_load failed, err %d\n",
422 			    __func__, slr.error);
423 			return (0);
424 		}
425 		nsegs = slr.nsegs;
426 		break;
427 	case XR_TYPE_VIRT:
428 		switch (xr->direction) {
429 		case XDMA_MEM_TO_DEV:
430 			addr = (void *)xr->src_addr;
431 			break;
432 		case XDMA_DEV_TO_MEM:
433 			addr = (void *)xr->dst_addr;
434 			break;
435 		default:
436 			device_printf(xdma->dma_dev,
437 			    "%s: Direction is not supported\n", __func__);
438 			return (0);
439 		}
440 		slr.nsegs = 0;
441 		slr.error = 0;
442 		slr.seg = seg;
443 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
444 		    addr, (xr->block_len * xr->block_num),
445 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
446 		if (slr.error != 0) {
447 			device_printf(xdma->dma_dev,
448 			    "%s: bus_dmamap_load failed, err %d\n",
449 			    __func__, slr.error);
450 			return (0);
451 		}
452 		nsegs = slr.nsegs;
453 		break;
454 	default:
455 		break;
456 	}
457 
458 	if (error != 0) {
459 		if (error == ENOMEM) {
460 			/*
461 			 * Out of memory. Try again later.
462 			 * TODO: count errors.
463 			 */
464 		} else
465 			device_printf(xdma->dma_dev,
466 			    "%s: bus_dmamap_load failed with err %d\n",
467 			    __func__, error);
468 		return (0);
469 	}
470 
471 	if (xr->direction == XDMA_MEM_TO_DEV)
472 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
473 		    BUS_DMASYNC_PREWRITE);
474 	else
475 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
476 		    BUS_DMASYNC_PREREAD);
477 
478 	return (nsegs);
479 }
480 
481 static int
_xdma_load_data(xdma_channel_t * xchan,struct xdma_request * xr,struct bus_dma_segment * seg)482 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
483     struct bus_dma_segment *seg)
484 {
485 	struct mbuf *m;
486 	uint32_t nsegs;
487 	vm_offset_t va, addr;
488 	bus_addr_t pa;
489 	vm_prot_t prot;
490 
491 	m = xr->m;
492 
493 	KASSERT(xchan->caps & (XCHAN_CAP_NOSEG | XCHAN_CAP_BOUNCE),
494 	    ("Handling segmented data is not implemented here."));
495 
496 	nsegs = 1;
497 
498 	switch (xr->req_type) {
499 	case XR_TYPE_MBUF:
500 		if (xchan->caps & XCHAN_CAP_BOUNCE) {
501 			if (xr->direction == XDMA_MEM_TO_DEV)
502 				m_copydata(m, 0, m->m_pkthdr.len,
503 				    (void *)xr->buf.vaddr);
504 			seg[0].ds_addr = (bus_addr_t)xr->buf.paddr;
505 		} else if (xchan->caps & XCHAN_CAP_IOMMU) {
506 			addr = mtod(m, bus_addr_t);
507 			pa = vtophys(addr);
508 
509 			if (xr->direction == XDMA_MEM_TO_DEV)
510 				prot = VM_PROT_READ;
511 			else
512 				prot = VM_PROT_WRITE;
513 
514 			xdma_iommu_add_entry(xchan, &va,
515 			    pa, m->m_pkthdr.len, prot);
516 
517 			/*
518 			 * Save VA so we can unload data later
519 			 * after completion of this transfer.
520 			 */
521 			if (xr->direction == XDMA_MEM_TO_DEV)
522 				xr->src_addr = va;
523 			else
524 				xr->dst_addr = va;
525 			seg[0].ds_addr = va;
526 		} else
527 			seg[0].ds_addr = mtod(m, bus_addr_t);
528 		seg[0].ds_len = m->m_pkthdr.len;
529 		break;
530 	case XR_TYPE_BIO:
531 	case XR_TYPE_VIRT:
532 	default:
533 		panic("implement me\n");
534 	}
535 
536 	return (nsegs);
537 }
538 
539 static int
xdma_load_data(xdma_channel_t * xchan,struct xdma_request * xr,struct bus_dma_segment * seg)540 xdma_load_data(xdma_channel_t *xchan,
541     struct xdma_request *xr, struct bus_dma_segment *seg)
542 {
543 	int nsegs;
544 
545 	nsegs = 0;
546 
547 	if (xchan->caps & XCHAN_CAP_BUSDMA)
548 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
549 	else
550 		nsegs = _xdma_load_data(xchan, xr, seg);
551 	if (nsegs == 0)
552 		return (0); /* Try again later. */
553 
554 	xr->buf.nsegs = nsegs;
555 	xr->buf.nsegs_left = nsegs;
556 
557 	return (nsegs);
558 }
559 
560 static int
xdma_process(xdma_channel_t * xchan,struct xdma_sglist * sg)561 xdma_process(xdma_channel_t *xchan,
562     struct xdma_sglist *sg)
563 {
564 	struct bus_dma_segment seg[XDMA_MAX_SEG];
565 	struct xdma_request *xr;
566 	struct xdma_request *xr_tmp;
567 	xdma_controller_t *xdma;
568 	uint32_t capacity;
569 	uint32_t n;
570 	uint32_t c;
571 	int nsegs;
572 	int ret;
573 
574 	XCHAN_ASSERT_LOCKED(xchan);
575 
576 	xdma = xchan->xdma;
577 
578 	n = 0;
579 	c = 0;
580 
581 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
582 	if (ret != 0) {
583 		device_printf(xdma->dev,
584 		    "%s: Can't get DMA controller capacity.\n", __func__);
585 		return (-1);
586 	}
587 
588 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
589 		switch (xr->req_type) {
590 		case XR_TYPE_MBUF:
591 			if ((xchan->caps & XCHAN_CAP_NOSEG) ||
592 			    (c > xchan->maxnsegs))
593 				c = xdma_mbuf_defrag(xchan, xr);
594 			break;
595 		case XR_TYPE_BIO:
596 		case XR_TYPE_VIRT:
597 		default:
598 			c = 1;
599 		}
600 
601 		if (capacity <= (c + n)) {
602 			/*
603 			 * No space yet available for the entire
604 			 * request in the DMA engine.
605 			 */
606 			break;
607 		}
608 
609 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
610 			/* Sglist is full. */
611 			break;
612 		}
613 
614 		nsegs = xdma_load_data(xchan, xr, seg);
615 		if (nsegs == 0)
616 			break;
617 
618 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
619 		n += nsegs;
620 
621 		QUEUE_IN_LOCK(xchan);
622 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
623 		QUEUE_IN_UNLOCK(xchan);
624 
625 		QUEUE_PROC_LOCK(xchan);
626 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
627 		QUEUE_PROC_UNLOCK(xchan);
628 	}
629 
630 	return (n);
631 }
632 
633 int
xdma_queue_submit_sg(xdma_channel_t * xchan)634 xdma_queue_submit_sg(xdma_channel_t *xchan)
635 {
636 	struct xdma_sglist *sg;
637 	xdma_controller_t *xdma;
638 	uint32_t sg_n;
639 	int ret;
640 
641 	xdma = xchan->xdma;
642 	KASSERT(xdma != NULL, ("xdma is NULL"));
643 
644 	XCHAN_ASSERT_LOCKED(xchan);
645 
646 	sg = xchan->sg;
647 
648 	if ((xchan->caps & (XCHAN_CAP_BOUNCE | XCHAN_CAP_BUSDMA)) &&
649 	   (xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
650 		device_printf(xdma->dev,
651 		    "%s: Can't submit a transfer: no bufs\n",
652 		    __func__);
653 		return (-1);
654 	}
655 
656 	sg_n = xdma_process(xchan, sg);
657 	if (sg_n == 0)
658 		return (0); /* Nothing to submit */
659 
660 	/* Now submit sglist to DMA engine driver. */
661 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
662 	if (ret != 0) {
663 		device_printf(xdma->dev,
664 		    "%s: Can't submit an sglist.\n", __func__);
665 		return (-1);
666 	}
667 
668 	return (0);
669 }
670