xref: /freebsd/sys/dev/xdma/xdma.c (revision 85debf7f)
1 /*-
2  * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_platform.h"
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/kobj.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/limits.h>
44 #include <sys/lock.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/sx.h>
48 #include <sys/bus_dma.h>
49 
50 #include <machine/bus.h>
51 
52 #ifdef FDT
53 #include <dev/fdt/fdt_common.h>
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
56 #endif
57 
58 #include <dev/xdma/xdma.h>
59 
60 #include <xdma_if.h>
61 
62 MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
63 
64 /*
65  * Multiple xDMA controllers may work with single DMA device,
66  * so we have global lock for physical channel management.
67  */
68 static struct mtx xdma_mtx;
69 #define	XDMA_LOCK()		mtx_lock(&xdma_mtx)
70 #define	XDMA_UNLOCK()		mtx_unlock(&xdma_mtx)
71 #define	XDMA_ASSERT_LOCKED()	mtx_assert(&xdma_mtx, MA_OWNED)
72 
73 /*
74  * Per channel locks.
75  */
76 #define	XCHAN_LOCK(xchan)		mtx_lock(&xchan->mtx_lock)
77 #define	XCHAN_UNLOCK(xchan)		mtx_unlock(&xchan->mtx_lock)
78 #define	XCHAN_ASSERT_LOCKED(xchan)	mtx_assert(&xchan->mtx_lock, MA_OWNED)
79 
80 /*
81  * Allocate virtual xDMA channel.
82  */
83 xdma_channel_t *
84 xdma_channel_alloc(xdma_controller_t *xdma)
85 {
86 	xdma_channel_t *xchan;
87 	int ret;
88 
89 	xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
90 	if (xchan == NULL) {
91 		device_printf(xdma->dev,
92 		    "%s: Can't allocate memory for channel.\n", __func__);
93 		return (NULL);
94 	}
95 	xchan->xdma = xdma;
96 
97 	XDMA_LOCK();
98 
99 	/* Request a real channel from hardware driver. */
100 	ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
101 	if (ret != 0) {
102 		device_printf(xdma->dev,
103 		    "%s: Can't request hardware channel.\n", __func__);
104 		XDMA_UNLOCK();
105 		free(xchan, M_XDMA);
106 
107 		return (NULL);
108 	}
109 
110 	TAILQ_INIT(&xchan->ie_handlers);
111 	mtx_init(&xchan->mtx_lock, "xDMA", NULL, MTX_DEF);
112 
113 	TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
114 
115 	XDMA_UNLOCK();
116 
117 	return (xchan);
118 }
119 
120 int
121 xdma_channel_free(xdma_channel_t *xchan)
122 {
123 	xdma_controller_t *xdma;
124 	int err;
125 
126 	xdma = xchan->xdma;
127 
128 	XDMA_LOCK();
129 
130 	/* Free the real DMA channel. */
131 	err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
132 	if (err != 0) {
133 		device_printf(xdma->dev,
134 		    "%s: Can't free real hw channel.\n", __func__);
135 		XDMA_UNLOCK();
136 		return (-1);
137 	}
138 
139 	xdma_teardown_all_intr(xchan);
140 
141 	/* Deallocate descriptors, if any. */
142 	xdma_desc_free(xchan);
143 
144 	mtx_destroy(&xchan->mtx_lock);
145 
146 	TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
147 
148 	free(xchan, M_XDMA);
149 
150 	XDMA_UNLOCK();
151 
152 	return (0);
153 }
154 
155 int
156 xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg,
157     void **ihandler)
158 {
159 	struct xdma_intr_handler *ih;
160 	xdma_controller_t *xdma;
161 
162 	xdma = xchan->xdma;
163 	KASSERT(xdma != NULL, ("xdma is NULL"));
164 
165 	/* Sanity check. */
166 	if (cb == NULL) {
167 		device_printf(xdma->dev,
168 		    "%s: Can't setup interrupt handler.\n",
169 		    __func__);
170 
171 		return (-1);
172 	}
173 
174 	ih = malloc(sizeof(struct xdma_intr_handler),
175 	    M_XDMA, M_WAITOK | M_ZERO);
176 	if (ih == NULL) {
177 		device_printf(xdma->dev,
178 		    "%s: Can't allocate memory for interrupt handler.\n",
179 		    __func__);
180 
181 		return (-1);
182 	}
183 
184 	ih->cb = cb;
185 	ih->cb_user = arg;
186 
187 	TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
188 
189 	if (ihandler != NULL) {
190 		*ihandler = ih;
191 	}
192 
193 	return (0);
194 }
195 
196 int
197 xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
198 {
199 	xdma_controller_t *xdma;
200 
201 	xdma = xchan->xdma;
202 	KASSERT(xdma != NULL, ("xdma is NULL"));
203 
204 	/* Sanity check. */
205 	if (ih == NULL) {
206 		device_printf(xdma->dev,
207 		    "%s: Can't teardown interrupt.\n", __func__);
208 		return (-1);
209 	}
210 
211 	TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
212 	free(ih, M_XDMA);
213 
214 	return (0);
215 }
216 
217 int
218 xdma_teardown_all_intr(xdma_channel_t *xchan)
219 {
220 	struct xdma_intr_handler *ih_tmp;
221 	struct xdma_intr_handler *ih;
222 	xdma_controller_t *xdma;
223 
224 	xdma = xchan->xdma;
225 	KASSERT(xdma != NULL, ("xdma is NULL"));
226 
227 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
228 		TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
229 		free(ih, M_XDMA);
230 	}
231 
232 	return (0);
233 }
234 
235 static void
236 xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
237 {
238 	xdma_channel_t *xchan;
239 	int i;
240 
241 	xchan = (xdma_channel_t *)arg;
242 	KASSERT(xchan != NULL, ("xchan is NULL"));
243 
244 	if (err) {
245 		xchan->map_err = 1;
246 		return;
247 	}
248 
249 	for (i = 0; i < nseg; i++) {
250 		xchan->descs_phys[i].ds_addr = segs[i].ds_addr;
251 		xchan->descs_phys[i].ds_len = segs[i].ds_len;
252 	}
253 }
254 
255 static int
256 xdma_desc_alloc_bus_dma(xdma_channel_t *xchan, uint32_t desc_size,
257     uint32_t align)
258 {
259 	xdma_controller_t *xdma;
260 	bus_size_t all_desc_sz;
261 	xdma_config_t *conf;
262 	int nsegments;
263 	int err;
264 
265 	xdma = xchan->xdma;
266 	conf = &xchan->conf;
267 
268 	nsegments = conf->block_num;
269 	all_desc_sz = (nsegments * desc_size);
270 
271 	err = bus_dma_tag_create(
272 	    bus_get_dma_tag(xdma->dev),
273 	    align, desc_size,		/* alignment, boundary */
274 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
275 	    BUS_SPACE_MAXADDR,		/* highaddr */
276 	    NULL, NULL,			/* filter, filterarg */
277 	    all_desc_sz, nsegments,	/* maxsize, nsegments*/
278 	    desc_size, 0,		/* maxsegsize, flags */
279 	    NULL, NULL,			/* lockfunc, lockarg */
280 	    &xchan->dma_tag);
281 	if (err) {
282 		device_printf(xdma->dev,
283 		    "%s: Can't create bus_dma tag.\n", __func__);
284 		return (-1);
285 	}
286 
287 	err = bus_dmamem_alloc(xchan->dma_tag, (void **)&xchan->descs,
288 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT, &xchan->dma_map);
289 	if (err) {
290 		device_printf(xdma->dev,
291 		    "%s: Can't allocate memory for descriptors.\n", __func__);
292 		return (-1);
293 	}
294 
295 	xchan->descs_phys = malloc(nsegments * sizeof(xdma_descriptor_t), M_XDMA,
296 	    (M_WAITOK | M_ZERO));
297 
298 	xchan->map_err = 0;
299 	err = bus_dmamap_load(xchan->dma_tag, xchan->dma_map, xchan->descs,
300 	    all_desc_sz, xdma_dmamap_cb, xchan, BUS_DMA_WAITOK);
301 	if (err) {
302 		device_printf(xdma->dev,
303 		    "%s: Can't load DMA map.\n", __func__);
304 		return (-1);
305 	}
306 
307 	if (xchan->map_err != 0) {
308 		device_printf(xdma->dev,
309 		    "%s: Can't load DMA map.\n", __func__);
310 		return (-1);
311 	}
312 
313 	return (0);
314 }
315 
316 /*
317  * This function called by DMA controller driver.
318  */
319 int
320 xdma_desc_alloc(xdma_channel_t *xchan, uint32_t desc_size, uint32_t align)
321 {
322 	xdma_controller_t *xdma;
323 	xdma_config_t *conf;
324 	int ret;
325 
326 	XCHAN_ASSERT_LOCKED(xchan);
327 
328 	xdma = xchan->xdma;
329 	if (xdma == NULL) {
330 		device_printf(xdma->dev,
331 		    "%s: Channel was not allocated properly.\n", __func__);
332 		return (-1);
333 	}
334 
335 	if (xchan->flags & XCHAN_DESC_ALLOCATED) {
336 		device_printf(xdma->dev,
337 		    "%s: Descriptors already allocated.\n", __func__);
338 		return (-1);
339 	}
340 
341 	if ((xchan->flags & XCHAN_CONFIGURED) == 0) {
342 		device_printf(xdma->dev,
343 		    "%s: Channel has no configuration.\n", __func__);
344 		return (-1);
345 	}
346 
347 	conf = &xchan->conf;
348 
349 	XCHAN_UNLOCK(xchan);
350 	ret = xdma_desc_alloc_bus_dma(xchan, desc_size, align);
351 	XCHAN_LOCK(xchan);
352 	if (ret != 0) {
353 		device_printf(xdma->dev,
354 		    "%s: Can't allocate memory for descriptors.\n",
355 		    __func__);
356 		return (-1);
357 	}
358 
359 	xchan->flags |= XCHAN_DESC_ALLOCATED;
360 
361 	/* We are going to write to descriptors. */
362 	bus_dmamap_sync(xchan->dma_tag, xchan->dma_map, BUS_DMASYNC_PREWRITE);
363 
364 	return (0);
365 }
366 
367 int
368 xdma_desc_free(xdma_channel_t *xchan)
369 {
370 
371 	if ((xchan->flags & XCHAN_DESC_ALLOCATED) == 0) {
372 		/* No descriptors allocated. */
373 		return (-1);
374 	}
375 
376 	bus_dmamap_unload(xchan->dma_tag, xchan->dma_map);
377 	bus_dmamem_free(xchan->dma_tag, xchan->descs, xchan->dma_map);
378 	bus_dma_tag_destroy(xchan->dma_tag);
379 	free(xchan->descs_phys, M_XDMA);
380 
381 	xchan->flags &= ~(XCHAN_DESC_ALLOCATED);
382 
383 	return (0);
384 }
385 
386 int
387 xdma_prep_memcpy(xdma_channel_t *xchan, uintptr_t src_addr,
388     uintptr_t dst_addr, size_t len)
389 {
390 	xdma_controller_t *xdma;
391 	xdma_config_t *conf;
392 	int ret;
393 
394 	xdma = xchan->xdma;
395 	KASSERT(xdma != NULL, ("xdma is NULL"));
396 
397 	conf = &xchan->conf;
398 	conf->direction = XDMA_MEM_TO_MEM;
399 	conf->src_addr = src_addr;
400 	conf->dst_addr = dst_addr;
401 	conf->block_len = len;
402 	conf->block_num = 1;
403 
404 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_MEMCPY);
405 
406 	XCHAN_LOCK(xchan);
407 
408 	/* Deallocate old descriptors, if any. */
409 	xdma_desc_free(xchan);
410 
411 	ret = XDMA_CHANNEL_PREP_MEMCPY(xdma->dma_dev, xchan);
412 	if (ret != 0) {
413 		device_printf(xdma->dev,
414 		    "%s: Can't prepare memcpy transfer.\n", __func__);
415 		XDMA_UNLOCK();
416 
417 		return (-1);
418 	}
419 
420 	if (xchan->flags & XCHAN_DESC_ALLOCATED) {
421 		/* Driver created xDMA descriptors. */
422 		bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
423 		    BUS_DMASYNC_POSTWRITE);
424 	}
425 
426 	XCHAN_UNLOCK(xchan);
427 
428 	return (0);
429 }
430 
431 int
432 xdma_prep_cyclic(xdma_channel_t *xchan, enum xdma_direction dir,
433     uintptr_t src_addr, uintptr_t dst_addr, int block_len,
434     int block_num, int src_width, int dst_width)
435 {
436 	xdma_controller_t *xdma;
437 	xdma_config_t *conf;
438 	int ret;
439 
440 	xdma = xchan->xdma;
441 	KASSERT(xdma != NULL, ("xdma is NULL"));
442 
443 	conf = &xchan->conf;
444 	conf->direction = dir;
445 	conf->src_addr = src_addr;
446 	conf->dst_addr = dst_addr;
447 	conf->block_len = block_len;
448 	conf->block_num = block_num;
449 	conf->src_width = src_width;
450 	conf->dst_width = dst_width;
451 
452 	xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_CYCLIC);
453 
454 	XCHAN_LOCK(xchan);
455 
456 	/* Deallocate old descriptors, if any. */
457 	xdma_desc_free(xchan);
458 
459 	ret = XDMA_CHANNEL_PREP_CYCLIC(xdma->dma_dev, xchan);
460 	if (ret != 0) {
461 		device_printf(xdma->dev,
462 		    "%s: Can't prepare cyclic transfer.\n", __func__);
463 		XDMA_UNLOCK();
464 		return (-1);
465 	}
466 
467 	if (xchan->flags & XCHAN_DESC_ALLOCATED) {
468 		/* Driver has created xDMA descriptors. */
469 		bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
470 		    BUS_DMASYNC_POSTWRITE);
471 	}
472 
473 	XCHAN_UNLOCK(xchan);
474 
475 	return (0);
476 }
477 
478 int
479 xdma_begin(xdma_channel_t *xchan)
480 {
481 	xdma_controller_t *xdma;
482 	int ret;
483 
484 	xdma = xchan->xdma;
485 
486 	ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_BEGIN);
487 	if (ret != 0) {
488 		device_printf(xdma->dev,
489 		    "%s: Can't begin the channel operation.\n", __func__);
490 		return (-1);
491 	}
492 
493 	return (0);
494 }
495 
496 int
497 xdma_terminate(xdma_channel_t *xchan)
498 {
499 	xdma_controller_t *xdma;
500 	int ret;
501 
502 	xdma = xchan->xdma;
503 
504 	ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_TERMINATE);
505 	if (ret != 0) {
506 		device_printf(xdma->dev,
507 		    "%s: Can't terminate the channel operation.\n", __func__);
508 		return (-1);
509 	}
510 
511 	return (0);
512 }
513 
514 int
515 xdma_pause(xdma_channel_t *xchan)
516 {
517 	xdma_controller_t *xdma;
518 	int ret;
519 
520 	xdma = xchan->xdma;
521 
522 	ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_PAUSE);
523 	if (ret != 0) {
524 		device_printf(xdma->dev,
525 		    "%s: Can't pause the channel operation.\n", __func__);
526 		return (-1);
527 	}
528 
529 	return (ret);
530 }
531 
532 int
533 xdma_callback(xdma_channel_t *xchan)
534 {
535 	struct xdma_intr_handler *ih_tmp;
536 	struct xdma_intr_handler *ih;
537 
538 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
539 		if (ih->cb != NULL) {
540 			ih->cb(ih->cb_user);
541 		}
542 	}
543 
544 	return (0);
545 }
546 
547 void
548 xdma_assert_locked(void)
549 {
550 
551 	XDMA_ASSERT_LOCKED();
552 }
553 
554 #ifdef FDT
555 /*
556  * Notify the DMA driver we have machine-dependent data in FDT.
557  */
558 static int
559 xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
560 {
561 	uint32_t ret;
562 
563 	ret = XDMA_OFW_MD_DATA(xdma->dma_dev, cells, ncells, (void **)&xdma->data);
564 
565 	return (ret);
566 }
567 
568 /*
569  * Allocate xdma controller.
570  */
571 xdma_controller_t *
572 xdma_ofw_get(device_t dev, const char *prop)
573 {
574 	phandle_t node, parent;
575 	xdma_controller_t *xdma;
576 	device_t dma_dev;
577 	pcell_t *cells;
578 	int ncells;
579 	int error;
580 	int ndmas;
581 	int idx;
582 
583 	node = ofw_bus_get_node(dev);
584 	if (node <= 0) {
585 		device_printf(dev,
586 		    "%s called on not ofw based device.\n", __func__);
587 	}
588 
589 	error = ofw_bus_parse_xref_list_get_length(node,
590 	    "dmas", "#dma-cells", &ndmas);
591 	if (error) {
592 		device_printf(dev,
593 		    "%s can't get dmas list.\n", __func__);
594 		return (NULL);
595 	}
596 
597 	if (ndmas == 0) {
598 		device_printf(dev,
599 		    "%s dmas list is empty.\n", __func__);
600 		return (NULL);
601 	}
602 
603 	error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
604 	if (error != 0) {
605 		device_printf(dev,
606 		    "%s can't find string index.\n", __func__);
607 		return (NULL);
608 	}
609 
610 	error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
611 	    idx, &parent, &ncells, &cells);
612 	if (error != 0) {
613 		device_printf(dev,
614 		    "%s can't get dma device xref.\n", __func__);
615 		return (NULL);
616 	}
617 
618 	dma_dev = OF_device_from_xref(parent);
619 	if (dma_dev == NULL) {
620 		device_printf(dev,
621 		    "%s can't get dma device.\n", __func__);
622 		return (NULL);
623 	}
624 
625 	xdma = malloc(sizeof(struct xdma_controller), M_XDMA, M_WAITOK | M_ZERO);
626 	if (xdma == NULL) {
627 		device_printf(dev,
628 		    "%s can't allocate memory for xdma.\n", __func__);
629 		return (NULL);
630 	}
631 	xdma->dev = dev;
632 	xdma->dma_dev = dma_dev;
633 
634 	TAILQ_INIT(&xdma->channels);
635 
636 	xdma_ofw_md_data(xdma, cells, ncells);
637 	free(cells, M_OFWPROP);
638 
639 	return (xdma);
640 }
641 #endif
642 
643 /*
644  * Free xDMA controller object.
645  */
646 int
647 xdma_put(xdma_controller_t *xdma)
648 {
649 
650 	XDMA_LOCK();
651 
652 	/* Ensure no channels allocated. */
653 	if (!TAILQ_EMPTY(&xdma->channels)) {
654 		device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
655 		return (-1);
656 	}
657 
658 	free(xdma->data, M_DEVBUF);
659 	free(xdma, M_XDMA);
660 
661 	XDMA_UNLOCK();
662 
663 	return (0);
664 }
665 
666 static void
667 xdma_init(void)
668 {
669 
670 	mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
671 }
672 
673 SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
674