xref: /freebsd/sys/dev/xdma/xdma.c (revision 42249ef2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_platform.h"
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/queue.h>
41 #include <sys/kobj.h>
42 #include <sys/malloc.h>
43 #include <sys/limits.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47 #include <sys/systm.h>
48 
49 #include <machine/bus.h>
50 
51 #ifdef FDT
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
55 #endif
56 
57 #include <dev/xdma/xdma.h>
58 
59 #include <xdma_if.h>
60 
61 /*
62  * Multiple xDMA controllers may work with single DMA device,
63  * so we have global lock for physical channel management.
64  */
65 static struct mtx xdma_mtx;
66 
67 #define	XDMA_LOCK()			mtx_lock(&xdma_mtx)
68 #define	XDMA_UNLOCK()			mtx_unlock(&xdma_mtx)
69 #define	XDMA_ASSERT_LOCKED()		mtx_assert(&xdma_mtx, MA_OWNED)
70 
71 #define	FDT_REG_CELLS	4
72 
73 #ifdef FDT
74 static int
75 xdma_get_iommu_fdt(xdma_controller_t *xdma, xdma_channel_t *xchan)
76 {
77 	struct xdma_iommu *xio;
78 	phandle_t node;
79 	pcell_t prop;
80 	size_t len;
81 
82 	node = ofw_bus_get_node(xdma->dma_dev);
83 	if (OF_getproplen(node, "xdma,iommu") <= 0)
84 		return (0);
85 
86 	len = OF_getencprop(node, "xdma,iommu", &prop, sizeof(prop));
87 	if (len != sizeof(prop)) {
88 		device_printf(xdma->dev,
89 		    "%s: Can't get iommu device node\n", __func__);
90 		return (0);
91 	}
92 
93 	xio = &xchan->xio;
94 	xio->dev = OF_device_from_xref(prop);
95 	if (xio->dev == NULL) {
96 		device_printf(xdma->dev,
97 		    "%s: Can't get iommu device\n", __func__);
98 		return (0);
99 	}
100 
101 	/* Found */
102 	return (1);
103 }
104 #endif
105 
106 /*
107  * Allocate virtual xDMA channel.
108  */
109 xdma_channel_t *
110 xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
111 {
112 	xdma_channel_t *xchan;
113 	int ret;
114 
115 	xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
116 	xchan->xdma = xdma;
117 
118 #ifdef FDT
119 	/* Check if this DMA controller supports IOMMU. */
120 	if (xdma_get_iommu_fdt(xdma, xchan))
121 		caps |= XCHAN_CAP_IOMMU | XCHAN_CAP_NOSEG;
122 #endif
123 
124 	xchan->caps = caps;
125 
126 	XDMA_LOCK();
127 
128 	/* Request a real channel from hardware driver. */
129 	ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
130 	if (ret != 0) {
131 		device_printf(xdma->dev,
132 		    "%s: Can't request hardware channel.\n", __func__);
133 		XDMA_UNLOCK();
134 		free(xchan, M_XDMA);
135 
136 		return (NULL);
137 	}
138 
139 	TAILQ_INIT(&xchan->ie_handlers);
140 
141 	mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
142 	mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
143 	mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
144 	mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
145 	mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
146 
147 	TAILQ_INIT(&xchan->bank);
148 	TAILQ_INIT(&xchan->queue_in);
149 	TAILQ_INIT(&xchan->queue_out);
150 	TAILQ_INIT(&xchan->processing);
151 
152 	if (xchan->caps & XCHAN_CAP_IOMMU)
153 		xdma_iommu_init(&xchan->xio);
154 
155 	TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
156 
157 	XDMA_UNLOCK();
158 
159 	return (xchan);
160 }
161 
162 int
163 xdma_channel_free(xdma_channel_t *xchan)
164 {
165 	xdma_controller_t *xdma;
166 	int err;
167 
168 	xdma = xchan->xdma;
169 	KASSERT(xdma != NULL, ("xdma is NULL"));
170 
171 	XDMA_LOCK();
172 
173 	/* Free the real DMA channel. */
174 	err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
175 	if (err != 0) {
176 		device_printf(xdma->dev,
177 		    "%s: Can't free real hw channel.\n", __func__);
178 		XDMA_UNLOCK();
179 		return (-1);
180 	}
181 
182 	if (xchan->flags & XCHAN_TYPE_SG)
183 		xdma_channel_free_sg(xchan);
184 
185 	if (xchan->caps & XCHAN_CAP_IOMMU)
186 		xdma_iommu_release(&xchan->xio);
187 
188 	xdma_teardown_all_intr(xchan);
189 
190 	mtx_destroy(&xchan->mtx_lock);
191 	mtx_destroy(&xchan->mtx_qin_lock);
192 	mtx_destroy(&xchan->mtx_qout_lock);
193 	mtx_destroy(&xchan->mtx_bank_lock);
194 	mtx_destroy(&xchan->mtx_proc_lock);
195 
196 	TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
197 
198 	free(xchan, M_XDMA);
199 
200 	XDMA_UNLOCK();
201 
202 	return (0);
203 }
204 
205 int
206 xdma_setup_intr(xdma_channel_t *xchan,
207     int (*cb)(void *, xdma_transfer_status_t *),
208     void *arg, void **ihandler)
209 {
210 	struct xdma_intr_handler *ih;
211 	xdma_controller_t *xdma;
212 
213 	xdma = xchan->xdma;
214 	KASSERT(xdma != NULL, ("xdma is NULL"));
215 
216 	/* Sanity check. */
217 	if (cb == NULL) {
218 		device_printf(xdma->dev,
219 		    "%s: Can't setup interrupt handler.\n",
220 		    __func__);
221 
222 		return (-1);
223 	}
224 
225 	ih = malloc(sizeof(struct xdma_intr_handler),
226 	    M_XDMA, M_WAITOK | M_ZERO);
227 	ih->cb = cb;
228 	ih->cb_user = arg;
229 
230 	XCHAN_LOCK(xchan);
231 	TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
232 	XCHAN_UNLOCK(xchan);
233 
234 	if (ihandler != NULL)
235 		*ihandler = ih;
236 
237 	return (0);
238 }
239 
240 int
241 xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
242 {
243 	xdma_controller_t *xdma;
244 
245 	xdma = xchan->xdma;
246 	KASSERT(xdma != NULL, ("xdma is NULL"));
247 
248 	/* Sanity check. */
249 	if (ih == NULL) {
250 		device_printf(xdma->dev,
251 		    "%s: Can't teardown interrupt.\n", __func__);
252 		return (-1);
253 	}
254 
255 	TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
256 	free(ih, M_XDMA);
257 
258 	return (0);
259 }
260 
261 int
262 xdma_teardown_all_intr(xdma_channel_t *xchan)
263 {
264 	struct xdma_intr_handler *ih_tmp;
265 	struct xdma_intr_handler *ih;
266 	xdma_controller_t *xdma;
267 
268 	xdma = xchan->xdma;
269 	KASSERT(xdma != NULL, ("xdma is NULL"));
270 
271 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
272 		TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
273 		free(ih, M_XDMA);
274 	}
275 
276 	return (0);
277 }
278 
279 int
280 xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
281 {
282 	xdma_controller_t *xdma;
283 	int ret;
284 
285 	xdma = xchan->xdma;
286 
287 	KASSERT(xdma != NULL, ("xdma is NULL"));
288 
289 	XCHAN_LOCK(xchan);
290 	ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
291 	if (ret != 0) {
292 		device_printf(xdma->dev,
293 		    "%s: Can't request a transfer.\n", __func__);
294 		XCHAN_UNLOCK(xchan);
295 
296 		return (-1);
297 	}
298 	XCHAN_UNLOCK(xchan);
299 
300 	return (0);
301 }
302 
303 int
304 xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
305 {
306 	xdma_controller_t *xdma;
307 	int ret;
308 
309 	xdma = xchan->xdma;
310 	KASSERT(xdma != NULL, ("xdma is NULL"));
311 
312 	ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
313 	if (ret != 0) {
314 		device_printf(xdma->dev,
315 		    "%s: Can't process command.\n", __func__);
316 		return (-1);
317 	}
318 
319 	return (0);
320 }
321 
322 void
323 xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
324 {
325 	struct xdma_intr_handler *ih_tmp;
326 	struct xdma_intr_handler *ih;
327 	xdma_controller_t *xdma;
328 
329 	xdma = xchan->xdma;
330 	KASSERT(xdma != NULL, ("xdma is NULL"));
331 
332 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp)
333 		if (ih->cb != NULL)
334 			ih->cb(ih->cb_user, status);
335 
336 	if (xchan->flags & XCHAN_TYPE_SG)
337 		xdma_queue_submit(xchan);
338 }
339 
340 #ifdef FDT
341 /*
342  * Notify the DMA driver we have machine-dependent data in FDT.
343  */
344 static int
345 xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
346 {
347 	uint32_t ret;
348 
349 	ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
350 	    cells, ncells, (void **)&xdma->data);
351 
352 	return (ret);
353 }
354 
355 int
356 xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
357 {
358 	pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
359 	pcell_t *regp;
360 	int addr_cells, size_cells;
361 	int i, reg_len, ret, tuple_size, tuples;
362 	u_long mem_start, mem_size;
363 
364 	if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
365 	    &size_cells)) != 0)
366 		return (ret);
367 
368 	if (addr_cells > 2)
369 		return (ERANGE);
370 
371 	tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
372 	reg_len = OF_getproplen(memory, "reg");
373 	if (reg_len <= 0 || reg_len > sizeof(reg))
374 		return (ERANGE);
375 
376 	if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
377 		return (ENXIO);
378 
379 	tuples = reg_len / tuple_size;
380 	regp = (pcell_t *)&reg;
381 	for (i = 0; i < tuples; i++) {
382 		ret = fdt_data_to_res(regp, addr_cells, size_cells,
383 		    &mem_start, &mem_size);
384 		if (ret != 0)
385 			return (ret);
386 
387 		vmem_add(vmem, mem_start, mem_size, 0);
388 		regp += addr_cells + size_cells;
389 	}
390 
391 	return (0);
392 }
393 
394 vmem_t *
395 xdma_get_memory(device_t dev)
396 {
397 	phandle_t mem_node, node;
398 	pcell_t mem_handle;
399 	vmem_t *vmem;
400 
401 	node = ofw_bus_get_node(dev);
402 	if (node <= 0) {
403 		device_printf(dev,
404 		    "%s called on not ofw based device.\n", __func__);
405 		return (NULL);
406 	}
407 
408 	if (!OF_hasprop(node, "memory-region"))
409 		return (NULL);
410 
411 	if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
412 	    sizeof(mem_handle)) <= 0)
413 		return (NULL);
414 
415 	vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
416 	    PAGE_SIZE, M_BESTFIT | M_WAITOK);
417 	if (vmem == NULL)
418 		return (NULL);
419 
420 	mem_node = OF_node_from_xref(mem_handle);
421 	if (xdma_handle_mem_node(vmem, mem_node) != 0) {
422 		vmem_destroy(vmem);
423 		return (NULL);
424 	}
425 
426 	return (vmem);
427 }
428 
429 void
430 xdma_put_memory(vmem_t *vmem)
431 {
432 
433 	vmem_destroy(vmem);
434 }
435 
436 void
437 xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
438 {
439 
440 	xchan->vmem = vmem;
441 }
442 
443 /*
444  * Allocate xdma controller.
445  */
446 xdma_controller_t *
447 xdma_ofw_get(device_t dev, const char *prop)
448 {
449 	phandle_t node, parent;
450 	xdma_controller_t *xdma;
451 	device_t dma_dev;
452 	pcell_t *cells;
453 	int ncells;
454 	int error;
455 	int ndmas;
456 	int idx;
457 
458 	node = ofw_bus_get_node(dev);
459 	if (node <= 0)
460 		device_printf(dev,
461 		    "%s called on not ofw based device.\n", __func__);
462 
463 	error = ofw_bus_parse_xref_list_get_length(node,
464 	    "dmas", "#dma-cells", &ndmas);
465 	if (error) {
466 		device_printf(dev,
467 		    "%s can't get dmas list.\n", __func__);
468 		return (NULL);
469 	}
470 
471 	if (ndmas == 0) {
472 		device_printf(dev,
473 		    "%s dmas list is empty.\n", __func__);
474 		return (NULL);
475 	}
476 
477 	error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
478 	if (error != 0) {
479 		device_printf(dev,
480 		    "%s can't find string index.\n", __func__);
481 		return (NULL);
482 	}
483 
484 	error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
485 	    idx, &parent, &ncells, &cells);
486 	if (error != 0) {
487 		device_printf(dev,
488 		    "%s can't get dma device xref.\n", __func__);
489 		return (NULL);
490 	}
491 
492 	dma_dev = OF_device_from_xref(parent);
493 	if (dma_dev == NULL) {
494 		device_printf(dev,
495 		    "%s can't get dma device.\n", __func__);
496 		return (NULL);
497 	}
498 
499 	xdma = malloc(sizeof(struct xdma_controller),
500 	    M_XDMA, M_WAITOK | M_ZERO);
501 	xdma->dev = dev;
502 	xdma->dma_dev = dma_dev;
503 
504 	TAILQ_INIT(&xdma->channels);
505 
506 	xdma_ofw_md_data(xdma, cells, ncells);
507 	free(cells, M_OFWPROP);
508 
509 	return (xdma);
510 }
511 #endif
512 
513 /*
514  * Free xDMA controller object.
515  */
516 int
517 xdma_put(xdma_controller_t *xdma)
518 {
519 
520 	XDMA_LOCK();
521 
522 	/* Ensure no channels allocated. */
523 	if (!TAILQ_EMPTY(&xdma->channels)) {
524 		device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
525 		return (-1);
526 	}
527 
528 	free(xdma->data, M_DEVBUF);
529 	free(xdma, M_XDMA);
530 
531 	XDMA_UNLOCK();
532 
533 	return (0);
534 }
535 
536 static void
537 xdma_init(void)
538 {
539 
540 	mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
541 }
542 
543 SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
544