xref: /freebsd/sys/dev/proto/proto_busdma.c (revision d0b2dbfa)
1 /*-
2  * Copyright (c) 2015, 2019 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <machine/bus.h>
30 #include <machine/bus_dma.h>
31 #include <machine/resource.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/proc.h>
38 #include <sys/queue.h>
39 #include <sys/rman.h>
40 #include <sys/sbuf.h>
41 #include <sys/sx.h>
42 #include <sys/uio.h>
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_map.h>
46 
47 #include <dev/proto/proto.h>
48 #include <dev/proto/proto_dev.h>
49 #include <dev/proto/proto_busdma.h>
50 
51 MALLOC_DEFINE(M_PROTO_BUSDMA, "proto_busdma", "DMA management data");
52 
53 #define	BNDRY_MIN(a, b)		\
54 	(((a) == 0) ? (b) : (((b) == 0) ? (a) : MIN((a), (b))))
55 
56 struct proto_callback_bundle {
57 	struct proto_busdma *busdma;
58 	struct proto_md *md;
59 	struct proto_ioc_busdma *ioc;
60 };
61 
62 static int
63 proto_busdma_tag_create(struct proto_busdma *busdma, struct proto_tag *parent,
64     struct proto_ioc_busdma *ioc)
65 {
66 	struct proto_tag *tag;
67 
68 	/* Make sure that when a boundary is specified, it's a power of 2 */
69 	if (ioc->u.tag.bndry != 0 &&
70 	    (ioc->u.tag.bndry & (ioc->u.tag.bndry - 1)) != 0)
71 		return (EINVAL);
72 
73 	/*
74 	 * If nsegs is 1, ignore maxsegsz. What this means is that if we have
75 	 * just 1 segment, then maxsz should be equal to maxsegsz. To keep it
76 	 * simple for us, limit maxsegsz to maxsz in any case.
77 	 */
78 	if (ioc->u.tag.maxsegsz > ioc->u.tag.maxsz || ioc->u.tag.nsegs == 1)
79 		ioc->u.tag.maxsegsz = ioc->u.tag.maxsz;
80 
81 	tag = malloc(sizeof(*tag), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
82 	if (parent != NULL) {
83 		tag->parent = parent;
84 		LIST_INSERT_HEAD(&parent->children, tag, peers);
85 		tag->align = MAX(ioc->u.tag.align, parent->align);
86 		tag->bndry = BNDRY_MIN(ioc->u.tag.bndry, parent->bndry);
87 		tag->maxaddr = MIN(ioc->u.tag.maxaddr, parent->maxaddr);
88 		tag->maxsz = MIN(ioc->u.tag.maxsz, parent->maxsz);
89 		tag->maxsegsz = MIN(ioc->u.tag.maxsegsz, parent->maxsegsz);
90 		tag->nsegs = MIN(ioc->u.tag.nsegs, parent->nsegs);
91 		tag->datarate = MIN(ioc->u.tag.datarate, parent->datarate);
92 		/* Write constraints back */
93 		ioc->u.tag.align = tag->align;
94 		ioc->u.tag.bndry = tag->bndry;
95 		ioc->u.tag.maxaddr = tag->maxaddr;
96 		ioc->u.tag.maxsz = tag->maxsz;
97 		ioc->u.tag.maxsegsz = tag->maxsegsz;
98 		ioc->u.tag.nsegs = tag->nsegs;
99 		ioc->u.tag.datarate = tag->datarate;
100 	} else {
101 		tag->align = ioc->u.tag.align;
102 		tag->bndry = ioc->u.tag.bndry;
103 		tag->maxaddr = ioc->u.tag.maxaddr;
104 		tag->maxsz = ioc->u.tag.maxsz;
105 		tag->maxsegsz = ioc->u.tag.maxsegsz;
106 		tag->nsegs = ioc->u.tag.nsegs;
107 		tag->datarate = ioc->u.tag.datarate;
108 	}
109 	LIST_INSERT_HEAD(&busdma->tags, tag, tags);
110 	ioc->result = (uintptr_t)(void *)tag;
111 	return (0);
112 }
113 
114 static int
115 proto_busdma_tag_destroy(struct proto_busdma *busdma, struct proto_tag *tag)
116 {
117 
118 	if (!LIST_EMPTY(&tag->mds))
119 		return (EBUSY);
120 	if (!LIST_EMPTY(&tag->children))
121 		return (EBUSY);
122 
123 	if (tag->parent != NULL) {
124 		LIST_REMOVE(tag, peers);
125 		tag->parent = NULL;
126 	}
127 	LIST_REMOVE(tag, tags);
128 	free(tag, M_PROTO_BUSDMA);
129 	return (0);
130 }
131 
132 static struct proto_tag *
133 proto_busdma_tag_lookup(struct proto_busdma *busdma, u_long key)
134 {
135 	struct proto_tag *tag;
136 
137 	LIST_FOREACH(tag, &busdma->tags, tags) {
138 		if ((void *)tag == (void *)key)
139 			return (tag);
140 	}
141 	return (NULL);
142 }
143 
144 static int
145 proto_busdma_md_destroy_internal(struct proto_busdma *busdma,
146     struct proto_md *md)
147 {
148 
149 	LIST_REMOVE(md, mds);
150 	LIST_REMOVE(md, peers);
151 	if (md->physaddr)
152 		bus_dmamap_unload(md->bd_tag, md->bd_map);
153 	if (md->virtaddr != NULL)
154 		bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
155 	else
156 		bus_dmamap_destroy(md->bd_tag, md->bd_map);
157 	bus_dma_tag_destroy(md->bd_tag);
158 	free(md, M_PROTO_BUSDMA);
159 	return (0);
160 }
161 
162 static void
163 proto_busdma_mem_alloc_callback(void *arg, bus_dma_segment_t *segs, int	nseg,
164     int error)
165 {
166 	struct proto_callback_bundle *pcb = arg;
167 
168 	pcb->ioc->u.md.bus_nsegs = nseg;
169 	pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
170 }
171 
172 static int
173 proto_busdma_mem_alloc(struct proto_busdma *busdma, struct proto_tag *tag,
174     struct proto_ioc_busdma *ioc)
175 {
176 	struct proto_callback_bundle pcb;
177 	struct proto_md *md;
178 	int error;
179 
180 	md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
181 	md->tag = tag;
182 
183 	error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
184 	    tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
185 	    tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
186 	if (error) {
187 		free(md, M_PROTO_BUSDMA);
188 		return (error);
189 	}
190 	error = bus_dmamem_alloc(md->bd_tag, &md->virtaddr, 0, &md->bd_map);
191 	if (error) {
192 		bus_dma_tag_destroy(md->bd_tag);
193 		free(md, M_PROTO_BUSDMA);
194 		return (error);
195 	}
196 	md->physaddr = pmap_kextract((uintptr_t)(md->virtaddr));
197 	pcb.busdma = busdma;
198 	pcb.md = md;
199 	pcb.ioc = ioc;
200 	error = bus_dmamap_load(md->bd_tag, md->bd_map, md->virtaddr,
201 	    tag->maxsz, proto_busdma_mem_alloc_callback, &pcb, BUS_DMA_NOWAIT);
202 	if (error) {
203 		bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
204 		bus_dma_tag_destroy(md->bd_tag);
205 		free(md, M_PROTO_BUSDMA);
206 		return (error);
207 	}
208 	LIST_INSERT_HEAD(&tag->mds, md, peers);
209 	LIST_INSERT_HEAD(&busdma->mds, md, mds);
210 	ioc->u.md.virt_addr = (uintptr_t)md->virtaddr;
211 	ioc->u.md.virt_size = tag->maxsz;
212 	ioc->u.md.phys_nsegs = 1;
213 	ioc->u.md.phys_addr = md->physaddr;
214 	ioc->result = (uintptr_t)(void *)md;
215 	return (0);
216 }
217 
218 static int
219 proto_busdma_mem_free(struct proto_busdma *busdma, struct proto_md *md)
220 {
221 
222 	if (md->virtaddr == NULL)
223 		return (ENXIO);
224 	return (proto_busdma_md_destroy_internal(busdma, md));
225 }
226 
227 static int
228 proto_busdma_md_create(struct proto_busdma *busdma, struct proto_tag *tag,
229     struct proto_ioc_busdma *ioc)
230 {
231 	struct proto_md *md;
232 	int error;
233 
234 	md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
235 	md->tag = tag;
236 
237 	error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
238 	    tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
239 	    tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
240 	if (error) {
241 		free(md, M_PROTO_BUSDMA);
242 		return (error);
243 	}
244 	error = bus_dmamap_create(md->bd_tag, 0, &md->bd_map);
245 	if (error) {
246 		bus_dma_tag_destroy(md->bd_tag);
247 		free(md, M_PROTO_BUSDMA);
248 		return (error);
249 	}
250 
251 	LIST_INSERT_HEAD(&tag->mds, md, peers);
252 	LIST_INSERT_HEAD(&busdma->mds, md, mds);
253 	ioc->result = (uintptr_t)(void *)md;
254 	return (0);
255 }
256 
257 static int
258 proto_busdma_md_destroy(struct proto_busdma *busdma, struct proto_md *md)
259 {
260 
261 	if (md->virtaddr != NULL)
262 		return (ENXIO);
263 	return (proto_busdma_md_destroy_internal(busdma, md));
264 }
265 
266 static void
267 proto_busdma_md_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
268     bus_size_t sz, int error)
269 {
270 	struct proto_callback_bundle *pcb = arg;
271 
272 	pcb->ioc->u.md.bus_nsegs = nseg;
273 	pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
274 }
275 
276 static int
277 proto_busdma_md_load(struct proto_busdma *busdma, struct proto_md *md,
278     struct proto_ioc_busdma *ioc, struct thread *td)
279 {
280 	struct proto_callback_bundle pcb;
281 	struct iovec iov;
282 	struct uio uio;
283 	pmap_t pmap;
284 	int error;
285 
286 	iov.iov_base = (void *)(uintptr_t)ioc->u.md.virt_addr;
287 	iov.iov_len = ioc->u.md.virt_size;
288 	uio.uio_iov = &iov;
289 	uio.uio_iovcnt = 1;
290 	uio.uio_offset = 0;
291 	uio.uio_resid = iov.iov_len;
292 	uio.uio_segflg = UIO_USERSPACE;
293 	uio.uio_rw = UIO_READ;
294 	uio.uio_td = td;
295 
296 	pcb.busdma = busdma;
297 	pcb.md = md;
298 	pcb.ioc = ioc;
299 	error = bus_dmamap_load_uio(md->bd_tag, md->bd_map, &uio,
300 	    proto_busdma_md_load_callback, &pcb, BUS_DMA_NOWAIT);
301 	if (error)
302 		return (error);
303 
304 	/* XXX determine *all* physical memory segments */
305 	pmap = vmspace_pmap(td->td_proc->p_vmspace);
306 	md->physaddr = pmap_extract(pmap, ioc->u.md.virt_addr);
307 	ioc->u.md.phys_nsegs = 1;	/* XXX */
308 	ioc->u.md.phys_addr = md->physaddr;
309 	return (0);
310 }
311 
312 static int
313 proto_busdma_md_unload(struct proto_busdma *busdma, struct proto_md *md)
314 {
315 
316 	if (!md->physaddr)
317 		return (ENXIO);
318 	bus_dmamap_unload(md->bd_tag, md->bd_map);
319 	md->physaddr = 0;
320 	return (0);
321 }
322 
323 static int
324 proto_busdma_sync(struct proto_busdma *busdma, struct proto_md *md,
325     struct proto_ioc_busdma *ioc)
326 {
327 	u_int ops;
328 
329 	ops = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
330 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
331 	if (ioc->u.sync.op & ~ops)
332 		return (EINVAL);
333 	if (!md->physaddr)
334 		return (ENXIO);
335 	bus_dmamap_sync(md->bd_tag, md->bd_map, ioc->u.sync.op);
336 	return (0);
337 }
338 
339 static struct proto_md *
340 proto_busdma_md_lookup(struct proto_busdma *busdma, u_long key)
341 {
342 	struct proto_md *md;
343 
344 	LIST_FOREACH(md, &busdma->mds, mds) {
345 		if ((void *)md == (void *)key)
346 			return (md);
347 	}
348 	return (NULL);
349 }
350 
351 struct proto_busdma *
352 proto_busdma_attach(struct proto_softc *sc)
353 {
354 	struct proto_busdma *busdma;
355 
356 	busdma = malloc(sizeof(*busdma), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
357 	sx_init(&busdma->sxlck, "proto-busdma");
358 	return (busdma);
359 }
360 
361 int
362 proto_busdma_detach(struct proto_softc *sc, struct proto_busdma *busdma)
363 {
364 
365 	proto_busdma_cleanup(sc, busdma);
366 	sx_destroy(&busdma->sxlck);
367 	free(busdma, M_PROTO_BUSDMA);
368 	return (0);
369 }
370 
371 int
372 proto_busdma_cleanup(struct proto_softc *sc, struct proto_busdma *busdma)
373 {
374 	struct proto_md *md, *md1;
375 	struct proto_tag *tag, *tag1;
376 
377 	sx_xlock(&busdma->sxlck);
378 	LIST_FOREACH_SAFE(md, &busdma->mds, mds, md1)
379 		proto_busdma_md_destroy_internal(busdma, md);
380 	LIST_FOREACH_SAFE(tag, &busdma->tags, tags, tag1)
381 		proto_busdma_tag_destroy(busdma, tag);
382 	sx_xunlock(&busdma->sxlck);
383 	return (0);
384 }
385 
386 int
387 proto_busdma_ioctl(struct proto_softc *sc, struct proto_busdma *busdma,
388     struct proto_ioc_busdma *ioc, struct thread *td)
389 {
390 	struct proto_tag *tag;
391 	struct proto_md *md;
392 	int error;
393 
394 	sx_xlock(&busdma->sxlck);
395 
396 	error = 0;
397 	switch (ioc->request) {
398 	case PROTO_IOC_BUSDMA_TAG_CREATE:
399 		busdma->bd_roottag = bus_get_dma_tag(sc->sc_dev);
400 		error = proto_busdma_tag_create(busdma, NULL, ioc);
401 		break;
402 	case PROTO_IOC_BUSDMA_TAG_DERIVE:
403 		tag = proto_busdma_tag_lookup(busdma, ioc->key);
404 		if (tag == NULL) {
405 			error = EINVAL;
406 			break;
407 		}
408 		error = proto_busdma_tag_create(busdma, tag, ioc);
409 		break;
410 	case PROTO_IOC_BUSDMA_TAG_DESTROY:
411 		tag = proto_busdma_tag_lookup(busdma, ioc->key);
412 		if (tag == NULL) {
413 			error = EINVAL;
414 			break;
415 		}
416 		error = proto_busdma_tag_destroy(busdma, tag);
417 		break;
418 	case PROTO_IOC_BUSDMA_MEM_ALLOC:
419 		tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
420 		if (tag == NULL) {
421 			error = EINVAL;
422 			break;
423 		}
424 		error = proto_busdma_mem_alloc(busdma, tag, ioc);
425 		break;
426 	case PROTO_IOC_BUSDMA_MEM_FREE:
427 		md = proto_busdma_md_lookup(busdma, ioc->key);
428 		if (md == NULL) {
429 			error = EINVAL;
430 			break;
431 		}
432 		error = proto_busdma_mem_free(busdma, md);
433 		break;
434 	case PROTO_IOC_BUSDMA_MD_CREATE:
435 		tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
436 		if (tag == NULL) {
437 			error = EINVAL;
438 			break;
439 		}
440 		error = proto_busdma_md_create(busdma, tag, ioc);
441 		break;
442 	case PROTO_IOC_BUSDMA_MD_DESTROY:
443 		md = proto_busdma_md_lookup(busdma, ioc->key);
444 		if (md == NULL) {
445 			error = EINVAL;
446 			break;
447 		}
448 		error = proto_busdma_md_destroy(busdma, md);
449 		break;
450 	case PROTO_IOC_BUSDMA_MD_LOAD:
451 		md = proto_busdma_md_lookup(busdma, ioc->key);
452 		if (md == NULL) {
453 			error = EINVAL;
454 			break;
455 		}
456 		error = proto_busdma_md_load(busdma, md, ioc, td);
457 		break;
458 	case PROTO_IOC_BUSDMA_MD_UNLOAD:
459 		md = proto_busdma_md_lookup(busdma, ioc->key);
460 		if (md == NULL) {
461 			error = EINVAL;
462 			break;
463 		}
464 		error = proto_busdma_md_unload(busdma, md);
465 		break;
466 	case PROTO_IOC_BUSDMA_SYNC:
467 		md = proto_busdma_md_lookup(busdma, ioc->key);
468 		if (md == NULL) {
469 			error = EINVAL;
470 			break;
471 		}
472 		error = proto_busdma_sync(busdma, md, ioc);
473 		break;
474 	default:
475 		error = EINVAL;
476 		break;
477 	}
478 
479 	sx_xunlock(&busdma->sxlck);
480 
481 	return (error);
482 }
483 
484 int
485 proto_busdma_mmap_allowed(struct proto_busdma *busdma, vm_paddr_t physaddr)
486 {
487 	struct proto_md *md;
488 	int result;
489 
490 	sx_xlock(&busdma->sxlck);
491 
492 	result = 0;
493 	LIST_FOREACH(md, &busdma->mds, mds) {
494 		if (physaddr >= trunc_page(md->physaddr) &&
495 		    physaddr <= trunc_page(md->physaddr + md->tag->maxsz)) {
496 			result = 1;
497 			break;
498 		}
499 	}
500 
501 	sx_xunlock(&busdma->sxlck);
502 
503 	return (result);
504 }
505