xref: /freebsd/sys/dev/xdma/xdma_sg.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_platform.h"
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/sx.h>
42 
43 #include <machine/bus.h>
44 
45 #ifdef FDT
46 #include <dev/fdt/fdt_common.h>
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49 #endif
50 
51 #include <dev/xdma/xdma.h>
52 
53 #include <xdma_if.h>
54 
55 struct seg_load_request {
56 	struct bus_dma_segment *seg;
57 	uint32_t nsegs;
58 	uint32_t error;
59 };
60 
61 static int
62 _xchan_bufs_alloc(xdma_channel_t *xchan)
63 {
64 	xdma_controller_t *xdma;
65 	struct xdma_request *xr;
66 	int i;
67 
68 	xdma = xchan->xdma;
69 
70 	for (i = 0; i < xchan->xr_num; i++) {
71 		xr = &xchan->xr_mem[i];
72 		xr->buf.cbuf = contigmalloc(xchan->maxsegsize,
73 		    M_XDMA, 0, 0, ~0, PAGE_SIZE, 0);
74 		if (xr->buf.cbuf == NULL) {
75 			device_printf(xdma->dev,
76 			    "%s: Can't allocate contiguous kernel"
77 			    " physical memory\n", __func__);
78 			return (-1);
79 		}
80 	}
81 
82 	return (0);
83 }
84 
85 static int
86 _xchan_bufs_alloc_busdma(xdma_channel_t *xchan)
87 {
88 	xdma_controller_t *xdma;
89 	struct xdma_request *xr;
90 	int err;
91 	int i;
92 
93 	xdma = xchan->xdma;
94 
95 	/* Create bus_dma tag */
96 	err = bus_dma_tag_create(
97 	    bus_get_dma_tag(xdma->dev),	/* Parent tag. */
98 	    xchan->alignment,		/* alignment */
99 	    xchan->boundary,		/* boundary */
100 	    xchan->lowaddr,		/* lowaddr */
101 	    xchan->highaddr,		/* highaddr */
102 	    NULL, NULL,			/* filter, filterarg */
103 	    xchan->maxsegsize * xchan->maxnsegs, /* maxsize */
104 	    xchan->maxnsegs,		/* nsegments */
105 	    xchan->maxsegsize,		/* maxsegsize */
106 	    0,				/* flags */
107 	    NULL, NULL,			/* lockfunc, lockarg */
108 	    &xchan->dma_tag_bufs);
109 	if (err != 0) {
110 		device_printf(xdma->dev,
111 		    "%s: Can't create bus_dma tag.\n", __func__);
112 		return (-1);
113 	}
114 
115 	for (i = 0; i < xchan->xr_num; i++) {
116 		xr = &xchan->xr_mem[i];
117 		err = bus_dmamap_create(xchan->dma_tag_bufs, 0,
118 		    &xr->buf.map);
119 		if (err != 0) {
120 			device_printf(xdma->dev,
121 			    "%s: Can't create buf DMA map.\n", __func__);
122 
123 			/* Cleanup. */
124 			bus_dma_tag_destroy(xchan->dma_tag_bufs);
125 
126 			return (-1);
127 		}
128 	}
129 
130 	return (0);
131 }
132 
133 static int
134 xchan_bufs_alloc(xdma_channel_t *xchan)
135 {
136 	xdma_controller_t *xdma;
137 	int ret;
138 
139 	xdma = xchan->xdma;
140 
141 	if (xdma == NULL) {
142 		device_printf(xdma->dev,
143 		    "%s: Channel was not allocated properly.\n", __func__);
144 		return (-1);
145 	}
146 
147 	if (xchan->caps & XCHAN_CAP_BUSDMA)
148 		ret = _xchan_bufs_alloc_busdma(xchan);
149 	else
150 		ret = _xchan_bufs_alloc(xchan);
151 	if (ret != 0) {
152 		device_printf(xdma->dev,
153 		    "%s: Can't allocate bufs.\n", __func__);
154 		return (-1);
155 	}
156 
157 	xchan->flags |= XCHAN_BUFS_ALLOCATED;
158 
159 	return (0);
160 }
161 
162 static int
163 xchan_bufs_free(xdma_channel_t *xchan)
164 {
165 	struct xdma_request *xr;
166 	struct xchan_buf *b;
167 	int i;
168 
169 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0)
170 		return (-1);
171 
172 	if (xchan->caps & XCHAN_CAP_BUSDMA) {
173 		for (i = 0; i < xchan->xr_num; i++) {
174 			xr = &xchan->xr_mem[i];
175 			b = &xr->buf;
176 			bus_dmamap_destroy(xchan->dma_tag_bufs, b->map);
177 		}
178 		bus_dma_tag_destroy(xchan->dma_tag_bufs);
179 	} else {
180 		for (i = 0; i < xchan->xr_num; i++) {
181 			xr = &xchan->xr_mem[i];
182 			contigfree(xr->buf.cbuf, xchan->maxsegsize, M_XDMA);
183 		}
184 	}
185 
186 	xchan->flags &= ~XCHAN_BUFS_ALLOCATED;
187 
188 	return (0);
189 }
190 
191 void
192 xdma_channel_free_sg(xdma_channel_t *xchan)
193 {
194 
195 	xchan_bufs_free(xchan);
196 	xchan_sglist_free(xchan);
197 	xchan_bank_free(xchan);
198 }
199 
200 /*
201  * Prepare xchan for a scatter-gather transfer.
202  * xr_num - xdma requests queue size,
203  * maxsegsize - maximum allowed scatter-gather list element size in bytes
204  */
205 int
206 xdma_prep_sg(xdma_channel_t *xchan, uint32_t xr_num,
207     bus_size_t maxsegsize, bus_size_t maxnsegs,
208     bus_size_t alignment, bus_addr_t boundary,
209     bus_addr_t lowaddr, bus_addr_t highaddr)
210 {
211 	xdma_controller_t *xdma;
212 	int ret;
213 
214 	xdma = xchan->xdma;
215 
216 	KASSERT(xdma != NULL, ("xdma is NULL"));
217 
218 	if (xchan->flags & XCHAN_CONFIGURED) {
219 		device_printf(xdma->dev,
220 		    "%s: Channel is already configured.\n", __func__);
221 		return (-1);
222 	}
223 
224 	xchan->xr_num = xr_num;
225 	xchan->maxsegsize = maxsegsize;
226 	xchan->maxnsegs = maxnsegs;
227 	xchan->alignment = alignment;
228 	xchan->boundary = boundary;
229 	xchan->lowaddr = lowaddr;
230 	xchan->highaddr = highaddr;
231 
232 	if (xchan->maxnsegs > XDMA_MAX_SEG) {
233 		device_printf(xdma->dev, "%s: maxnsegs is too big\n",
234 		    __func__);
235 		return (-1);
236 	}
237 
238 	xchan_bank_init(xchan);
239 
240 	/* Allocate sglist. */
241 	ret = xchan_sglist_alloc(xchan);
242 	if (ret != 0) {
243 		device_printf(xdma->dev,
244 		    "%s: Can't allocate sglist.\n", __func__);
245 		return (-1);
246 	}
247 
248 	/* Allocate bufs. */
249 	ret = xchan_bufs_alloc(xchan);
250 	if (ret != 0) {
251 		device_printf(xdma->dev,
252 		    "%s: Can't allocate bufs.\n", __func__);
253 
254 		/* Cleanup */
255 		xchan_sglist_free(xchan);
256 		xchan_bank_free(xchan);
257 
258 		return (-1);
259 	}
260 
261 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_SG);
262 
263 	XCHAN_LOCK(xchan);
264 	ret = XDMA_CHANNEL_PREP_SG(xdma->dma_dev, xchan);
265 	if (ret != 0) {
266 		device_printf(xdma->dev,
267 		    "%s: Can't prepare SG transfer.\n", __func__);
268 		XCHAN_UNLOCK(xchan);
269 
270 		return (-1);
271 	}
272 	XCHAN_UNLOCK(xchan);
273 
274 	return (0);
275 }
276 
277 void
278 xchan_seg_done(xdma_channel_t *xchan,
279     struct xdma_transfer_status *st)
280 {
281 	struct xdma_request *xr;
282 	xdma_controller_t *xdma;
283 	struct xchan_buf *b;
284 
285 	xdma = xchan->xdma;
286 
287 	xr = TAILQ_FIRST(&xchan->processing);
288 	if (xr == NULL)
289 		panic("request not found\n");
290 
291 	b = &xr->buf;
292 
293 	atomic_subtract_int(&b->nsegs_left, 1);
294 
295 	if (b->nsegs_left == 0) {
296 		if (xchan->caps & XCHAN_CAP_BUSDMA) {
297 			if (xr->direction == XDMA_MEM_TO_DEV)
298 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
299 				    BUS_DMASYNC_POSTWRITE);
300 			else
301 				bus_dmamap_sync(xchan->dma_tag_bufs, b->map,
302 				    BUS_DMASYNC_POSTREAD);
303 			bus_dmamap_unload(xchan->dma_tag_bufs, b->map);
304 		}
305 		xr->status.error = st->error;
306 		xr->status.transferred = st->transferred;
307 
308 		QUEUE_PROC_LOCK(xchan);
309 		TAILQ_REMOVE(&xchan->processing, xr, xr_next);
310 		QUEUE_PROC_UNLOCK(xchan);
311 
312 		QUEUE_OUT_LOCK(xchan);
313 		TAILQ_INSERT_TAIL(&xchan->queue_out, xr, xr_next);
314 		QUEUE_OUT_UNLOCK(xchan);
315 	}
316 }
317 
318 static void
319 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
320 {
321 	struct seg_load_request *slr;
322 	struct bus_dma_segment *seg;
323 	int i;
324 
325 	slr = arg;
326 	seg = slr->seg;
327 
328 	if (error != 0) {
329 		slr->error = error;
330 		return;
331 	}
332 
333 	slr->nsegs = nsegs;
334 
335 	for (i = 0; i < nsegs; i++) {
336 		seg[i].ds_addr = segs[i].ds_addr;
337 		seg[i].ds_len = segs[i].ds_len;
338 	}
339 }
340 
341 static int
342 _xdma_load_data_busdma(xdma_channel_t *xchan, struct xdma_request *xr,
343     struct bus_dma_segment *seg)
344 {
345 	xdma_controller_t *xdma;
346 	struct seg_load_request slr;
347 	uint32_t nsegs;
348 	void *addr;
349 	int error;
350 
351 	xdma = xchan->xdma;
352 
353 	error = 0;
354 	nsegs = 0;
355 
356 	switch (xr->req_type) {
357 	case XR_TYPE_MBUF:
358 		error = bus_dmamap_load_mbuf_sg(xchan->dma_tag_bufs,
359 		    xr->buf.map, xr->m, seg, &nsegs, BUS_DMA_NOWAIT);
360 		break;
361 	case XR_TYPE_BIO:
362 		slr.nsegs = 0;
363 		slr.error = 0;
364 		slr.seg = seg;
365 		error = bus_dmamap_load_bio(xchan->dma_tag_bufs,
366 		    xr->buf.map, xr->bp, xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
367 		if (slr.error != 0) {
368 			device_printf(xdma->dma_dev,
369 			    "%s: bus_dmamap_load failed, err %d\n",
370 			    __func__, slr.error);
371 			return (0);
372 		}
373 		nsegs = slr.nsegs;
374 		break;
375 	case XR_TYPE_VIRT:
376 		switch (xr->direction) {
377 		case XDMA_MEM_TO_DEV:
378 			addr = (void *)xr->src_addr;
379 			break;
380 		case XDMA_DEV_TO_MEM:
381 			addr = (void *)xr->dst_addr;
382 			break;
383 		default:
384 			device_printf(xdma->dma_dev,
385 			    "%s: Direction is not supported\n", __func__);
386 			return (0);
387 		}
388 		slr.nsegs = 0;
389 		slr.error = 0;
390 		slr.seg = seg;
391 		error = bus_dmamap_load(xchan->dma_tag_bufs, xr->buf.map,
392 		    addr, (xr->block_len * xr->block_num),
393 		    xdma_dmamap_cb, &slr, BUS_DMA_NOWAIT);
394 		if (slr.error != 0) {
395 			device_printf(xdma->dma_dev,
396 			    "%s: bus_dmamap_load failed, err %d\n",
397 			    __func__, slr.error);
398 			return (0);
399 		}
400 		nsegs = slr.nsegs;
401 		break;
402 	default:
403 		break;
404 	}
405 
406 	if (error != 0) {
407 		if (error == ENOMEM) {
408 			/*
409 			 * Out of memory. Try again later.
410 			 * TODO: count errors.
411 			 */
412 		} else
413 			device_printf(xdma->dma_dev,
414 			    "%s: bus_dmamap_load failed with err %d\n",
415 			    __func__, error);
416 		return (0);
417 	}
418 
419 	if (xr->direction == XDMA_MEM_TO_DEV)
420 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
421 		    BUS_DMASYNC_PREWRITE);
422 	else
423 		bus_dmamap_sync(xchan->dma_tag_bufs, xr->buf.map,
424 		    BUS_DMASYNC_PREREAD);
425 
426 	return (nsegs);
427 }
428 
429 static int
430 _xdma_load_data(xdma_channel_t *xchan, struct xdma_request *xr,
431     struct bus_dma_segment *seg)
432 {
433 	xdma_controller_t *xdma;
434 	struct mbuf *m;
435 	uint32_t nsegs;
436 
437 	xdma = xchan->xdma;
438 
439 	m = xr->m;
440 
441 	nsegs = 1;
442 
443 	switch (xr->req_type) {
444 	case XR_TYPE_MBUF:
445 		if (xr->direction == XDMA_MEM_TO_DEV) {
446 			m_copydata(m, 0, m->m_pkthdr.len, xr->buf.cbuf);
447 			seg[0].ds_addr = (bus_addr_t)xr->buf.cbuf;
448 			seg[0].ds_len = m->m_pkthdr.len;
449 		} else {
450 			seg[0].ds_addr = mtod(m, bus_addr_t);
451 			seg[0].ds_len = m->m_pkthdr.len;
452 		}
453 		break;
454 	case XR_TYPE_BIO:
455 	case XR_TYPE_VIRT:
456 	default:
457 		panic("implement me\n");
458 	}
459 
460 	return (nsegs);
461 }
462 
463 static int
464 xdma_load_data(xdma_channel_t *xchan,
465     struct xdma_request *xr, struct bus_dma_segment *seg)
466 {
467 	xdma_controller_t *xdma;
468 	int error;
469 	int nsegs;
470 
471 	xdma = xchan->xdma;
472 
473 	error = 0;
474 	nsegs = 0;
475 
476 	if (xchan->caps & XCHAN_CAP_BUSDMA)
477 		nsegs = _xdma_load_data_busdma(xchan, xr, seg);
478 	else
479 		nsegs = _xdma_load_data(xchan, xr, seg);
480 	if (nsegs == 0)
481 		return (0); /* Try again later. */
482 
483 	xr->buf.nsegs = nsegs;
484 	xr->buf.nsegs_left = nsegs;
485 
486 	return (nsegs);
487 }
488 
489 static int
490 xdma_process(xdma_channel_t *xchan,
491     struct xdma_sglist *sg)
492 {
493 	struct bus_dma_segment seg[XDMA_MAX_SEG];
494 	struct xdma_request *xr;
495 	struct xdma_request *xr_tmp;
496 	xdma_controller_t *xdma;
497 	uint32_t capacity;
498 	uint32_t n;
499 	uint32_t c;
500 	int nsegs;
501 	int ret;
502 
503 	XCHAN_ASSERT_LOCKED(xchan);
504 
505 	xdma = xchan->xdma;
506 
507 	n = 0;
508 
509 	ret = XDMA_CHANNEL_CAPACITY(xdma->dma_dev, xchan, &capacity);
510 	if (ret != 0) {
511 		device_printf(xdma->dev,
512 		    "%s: Can't get DMA controller capacity.\n", __func__);
513 		return (-1);
514 	}
515 
516 	TAILQ_FOREACH_SAFE(xr, &xchan->queue_in, xr_next, xr_tmp) {
517 		switch (xr->req_type) {
518 		case XR_TYPE_MBUF:
519 			c = xdma_mbuf_defrag(xchan, xr);
520 			break;
521 		case XR_TYPE_BIO:
522 		case XR_TYPE_VIRT:
523 		default:
524 			c = 1;
525 		}
526 
527 		if (capacity <= (c + n)) {
528 			/*
529 			 * No space yet available for the entire
530 			 * request in the DMA engine.
531 			 */
532 			break;
533 		}
534 
535 		if ((c + n + xchan->maxnsegs) >= XDMA_SGLIST_MAXLEN) {
536 			/* Sglist is full. */
537 			break;
538 		}
539 
540 		nsegs = xdma_load_data(xchan, xr, seg);
541 		if (nsegs == 0)
542 			break;
543 
544 		xdma_sglist_add(&sg[n], seg, nsegs, xr);
545 		n += nsegs;
546 
547 		QUEUE_IN_LOCK(xchan);
548 		TAILQ_REMOVE(&xchan->queue_in, xr, xr_next);
549 		QUEUE_IN_UNLOCK(xchan);
550 
551 		QUEUE_PROC_LOCK(xchan);
552 		TAILQ_INSERT_TAIL(&xchan->processing, xr, xr_next);
553 		QUEUE_PROC_UNLOCK(xchan);
554 	}
555 
556 	return (n);
557 }
558 
559 int
560 xdma_queue_submit_sg(xdma_channel_t *xchan)
561 {
562 	struct xdma_sglist *sg;
563 	xdma_controller_t *xdma;
564 	uint32_t sg_n;
565 	int ret;
566 
567 	xdma = xchan->xdma;
568 	KASSERT(xdma != NULL, ("xdma is NULL"));
569 
570 	XCHAN_ASSERT_LOCKED(xchan);
571 
572 	sg = xchan->sg;
573 
574 	if ((xchan->flags & XCHAN_BUFS_ALLOCATED) == 0) {
575 		device_printf(xdma->dev,
576 		    "%s: Can't submit a transfer: no bufs\n",
577 		    __func__);
578 		return (-1);
579 	}
580 
581 	sg_n = xdma_process(xchan, sg);
582 	if (sg_n == 0)
583 		return (0); /* Nothing to submit */
584 
585 	/* Now submit sglist to DMA engine driver. */
586 	ret = XDMA_CHANNEL_SUBMIT_SG(xdma->dma_dev, xchan, sg, sg_n);
587 	if (ret != 0) {
588 		device_printf(xdma->dev,
589 		    "%s: Can't submit an sglist.\n", __func__);
590 		return (-1);
591 	}
592 
593 	return (0);
594 }
595