1 /* $NetBSD: bus_dma.c,v 1.95 2016/06/18 16:51:44 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #define _ARM32_BUS_DMA_PRIVATE
34
35 #include "opt_arm_bus_space.h"
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.95 2016/06/18 16:51:44 skrll Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/buf.h>
45 #include <sys/bus.h>
46 #include <sys/cpu.h>
47 #include <sys/reboot.h>
48 #include <sys/conf.h>
49 #include <sys/file.h>
50 #include <sys/kmem.h>
51 #include <sys/mbuf.h>
52 #include <sys/vnode.h>
53 #include <sys/device.h>
54
55 #include <uvm/uvm.h>
56
57 #include <arm/cpufunc.h>
58
59 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
60 #include <dev/mm.h>
61 #endif
62
63 #ifdef BUSDMA_COUNTERS
64 static struct evcnt bus_dma_creates =
65 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
66 static struct evcnt bus_dma_bounced_creates =
67 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates");
68 static struct evcnt bus_dma_loads =
69 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads");
70 static struct evcnt bus_dma_bounced_loads =
71 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads");
72 static struct evcnt bus_dma_coherent_loads =
73 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "coherent loads");
74 static struct evcnt bus_dma_read_bounces =
75 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces");
76 static struct evcnt bus_dma_write_bounces =
77 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces");
78 static struct evcnt bus_dma_bounced_unloads =
79 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads");
80 static struct evcnt bus_dma_unloads =
81 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads");
82 static struct evcnt bus_dma_bounced_destroys =
83 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
84 static struct evcnt bus_dma_destroys =
85 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
86 static struct evcnt bus_dma_sync_prereadwrite =
87 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prereadwrite");
88 static struct evcnt bus_dma_sync_preread_begin =
89 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread begin");
90 static struct evcnt bus_dma_sync_preread =
91 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread");
92 static struct evcnt bus_dma_sync_preread_tail =
93 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync preread tail");
94 static struct evcnt bus_dma_sync_prewrite =
95 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync prewrite");
96 static struct evcnt bus_dma_sync_postread =
97 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postread");
98 static struct evcnt bus_dma_sync_postreadwrite =
99 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postreadwrite");
100 static struct evcnt bus_dma_sync_postwrite =
101 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "sync postwrite");
102
103 EVCNT_ATTACH_STATIC(bus_dma_creates);
104 EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
105 EVCNT_ATTACH_STATIC(bus_dma_loads);
106 EVCNT_ATTACH_STATIC(bus_dma_bounced_loads);
107 EVCNT_ATTACH_STATIC(bus_dma_coherent_loads);
108 EVCNT_ATTACH_STATIC(bus_dma_read_bounces);
109 EVCNT_ATTACH_STATIC(bus_dma_write_bounces);
110 EVCNT_ATTACH_STATIC(bus_dma_unloads);
111 EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
112 EVCNT_ATTACH_STATIC(bus_dma_destroys);
113 EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
114 EVCNT_ATTACH_STATIC(bus_dma_sync_prereadwrite);
115 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_begin);
116 EVCNT_ATTACH_STATIC(bus_dma_sync_preread);
117 EVCNT_ATTACH_STATIC(bus_dma_sync_preread_tail);
118 EVCNT_ATTACH_STATIC(bus_dma_sync_prewrite);
119 EVCNT_ATTACH_STATIC(bus_dma_sync_postread);
120 EVCNT_ATTACH_STATIC(bus_dma_sync_postreadwrite);
121 EVCNT_ATTACH_STATIC(bus_dma_sync_postwrite);
122
123 #define STAT_INCR(x) (bus_dma_ ## x.ev_count++)
124 #else
125 #define STAT_INCR(x) /*(bus_dma_ ## x.ev_count++)*/
126 #endif
127
128 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
129 bus_size_t, struct vmspace *, int);
130 static struct arm32_dma_range *
131 _bus_dma_paddr_inrange(struct arm32_dma_range *, int, paddr_t);
132
133 /*
134 * Check to see if the specified page is in an allowed DMA range.
135 */
136 inline struct arm32_dma_range *
_bus_dma_paddr_inrange(struct arm32_dma_range * ranges,int nranges,bus_addr_t curaddr)137 _bus_dma_paddr_inrange(struct arm32_dma_range *ranges, int nranges,
138 bus_addr_t curaddr)
139 {
140 struct arm32_dma_range *dr;
141 int i;
142
143 for (i = 0, dr = ranges; i < nranges; i++, dr++) {
144 if (curaddr >= dr->dr_sysbase &&
145 curaddr < (dr->dr_sysbase + dr->dr_len))
146 return (dr);
147 }
148
149 return (NULL);
150 }
151
152 /*
153 * Check to see if the specified busaddr is in an allowed DMA range.
154 */
155 static inline paddr_t
_bus_dma_busaddr_to_paddr(bus_dma_tag_t t,bus_addr_t curaddr)156 _bus_dma_busaddr_to_paddr(bus_dma_tag_t t, bus_addr_t curaddr)
157 {
158 struct arm32_dma_range *dr;
159 u_int i;
160
161 if (t->_nranges == 0)
162 return curaddr;
163
164 for (i = 0, dr = t->_ranges; i < t->_nranges; i++, dr++) {
165 if (dr->dr_busbase <= curaddr
166 && curaddr < dr->dr_busbase + dr->dr_len)
167 return curaddr - dr->dr_busbase + dr->dr_sysbase;
168 }
169 panic("%s: curaddr %#lx not in range", __func__, curaddr);
170 }
171
172 /*
173 * Common function to load the specified physical address into the
174 * DMA map, coalescing segments and boundary checking as necessary.
175 */
176 static int
_bus_dmamap_load_paddr(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t paddr,bus_size_t size,bool coherent)177 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
178 bus_addr_t paddr, bus_size_t size, bool coherent)
179 {
180 bus_dma_segment_t * const segs = map->dm_segs;
181 int nseg = map->dm_nsegs;
182 bus_addr_t lastaddr;
183 bus_addr_t bmask = ~(map->_dm_boundary - 1);
184 bus_addr_t curaddr;
185 bus_size_t sgsize;
186 uint32_t _ds_flags = coherent ? _BUS_DMAMAP_COHERENT : 0;
187
188 if (nseg > 0)
189 lastaddr = segs[nseg-1].ds_addr + segs[nseg-1].ds_len;
190 else
191 lastaddr = 0xdead;
192
193 again:
194 sgsize = size;
195
196 /* Make sure we're in an allowed DMA range. */
197 if (t->_ranges != NULL) {
198 /* XXX cache last result? */
199 const struct arm32_dma_range * const dr =
200 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, paddr);
201 if (dr == NULL)
202 return (EINVAL);
203
204 /*
205 * If this region is coherent, mark the segment as coherent.
206 */
207 _ds_flags |= dr->dr_flags & _BUS_DMAMAP_COHERENT;
208
209 /*
210 * In a valid DMA range. Translate the physical
211 * memory address to an address in the DMA window.
212 */
213 curaddr = (paddr - dr->dr_sysbase) + dr->dr_busbase;
214 #if 0
215 printf("%p: %#lx: range %#lx/%#lx/%#lx/%#x: %#x <-- %#lx\n",
216 t, paddr, dr->dr_sysbase, dr->dr_busbase,
217 dr->dr_len, dr->dr_flags, _ds_flags, curaddr);
218 #endif
219 } else
220 curaddr = paddr;
221
222 /*
223 * Make sure we don't cross any boundaries.
224 */
225 if (map->_dm_boundary > 0) {
226 bus_addr_t baddr; /* next boundary address */
227
228 baddr = (curaddr + map->_dm_boundary) & bmask;
229 if (sgsize > (baddr - curaddr))
230 sgsize = (baddr - curaddr);
231 }
232
233 /*
234 * Insert chunk into a segment, coalescing with the
235 * previous segment if possible.
236 */
237 if (nseg > 0 && curaddr == lastaddr &&
238 segs[nseg-1].ds_len + sgsize <= map->dm_maxsegsz &&
239 ((segs[nseg-1]._ds_flags ^ _ds_flags) & _BUS_DMAMAP_COHERENT) == 0 &&
240 (map->_dm_boundary == 0 ||
241 (segs[nseg-1].ds_addr & bmask) == (curaddr & bmask))) {
242 /* coalesce */
243 segs[nseg-1].ds_len += sgsize;
244 } else if (nseg >= map->_dm_segcnt) {
245 return (EFBIG);
246 } else {
247 /* new segment */
248 segs[nseg].ds_addr = curaddr;
249 segs[nseg].ds_len = sgsize;
250 segs[nseg]._ds_flags = _ds_flags;
251 nseg++;
252 }
253
254 lastaddr = curaddr + sgsize;
255
256 paddr += sgsize;
257 size -= sgsize;
258 if (size > 0)
259 goto again;
260
261 map->_dm_flags &= (_ds_flags & _BUS_DMAMAP_COHERENT);
262 map->dm_nsegs = nseg;
263 return (0);
264 }
265
266 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
267 static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
268 bus_size_t size, int flags);
269 static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
270 static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n,
271 int direction);
272
273 static int
_bus_dma_load_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map,void * buf,size_t buflen,int buftype,int flags)274 _bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
275 size_t buflen, int buftype, int flags)
276 {
277 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
278 struct vmspace * const vm = vmspace_kernel();
279 int error;
280
281 KASSERT(cookie != NULL);
282 KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);
283
284 /*
285 * Allocate bounce pages, if necessary.
286 */
287 if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
288 error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
289 if (error)
290 return (error);
291 }
292
293 /*
294 * Cache a pointer to the caller's buffer and load the DMA map
295 * with the bounce buffer.
296 */
297 cookie->id_origbuf = buf;
298 cookie->id_origbuflen = buflen;
299 error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
300 buflen, vm, flags);
301 if (error)
302 return (error);
303
304 STAT_INCR(bounced_loads);
305 map->dm_mapsize = buflen;
306 map->_dm_vmspace = vm;
307 map->_dm_buftype = buftype;
308
309 /* ...so _bus_dmamap_sync() knows we're bouncing */
310 map->_dm_flags |= _BUS_DMAMAP_IS_BOUNCING;
311 cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
312 return 0;
313 }
314 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
315
316 /*
317 * Common function for DMA map creation. May be called by bus-specific
318 * DMA map creation functions.
319 */
320 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)321 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
322 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
323 {
324 struct arm32_bus_dmamap *map;
325 void *mapstore;
326
327 #ifdef DEBUG_DMA
328 printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
329 t, size, nsegments, maxsegsz, boundary, flags);
330 #endif /* DEBUG_DMA */
331
332 /*
333 * Allocate and initialize the DMA map. The end of the map
334 * is a variable-sized array of segments, so we allocate enough
335 * room for them in one shot.
336 *
337 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
338 * of ALLOCNOW notifies others that we've reserved these resources,
339 * and they are not to be freed.
340 *
341 * The bus_dmamap_t includes one bus_dma_segment_t, hence
342 * the (nsegments - 1).
343 */
344 const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
345 (sizeof(bus_dma_segment_t) * (nsegments - 1));
346 const int zallocflags = (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
347 if ((mapstore = kmem_intr_zalloc(mapsize, zallocflags)) == NULL)
348 return (ENOMEM);
349
350 map = (struct arm32_bus_dmamap *)mapstore;
351 map->_dm_size = size;
352 map->_dm_segcnt = nsegments;
353 map->_dm_maxmaxsegsz = maxsegsz;
354 map->_dm_boundary = boundary;
355 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
356 map->_dm_origbuf = NULL;
357 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
358 map->_dm_vmspace = vmspace_kernel();
359 map->_dm_cookie = NULL;
360 map->dm_maxsegsz = maxsegsz;
361 map->dm_mapsize = 0; /* no valid mappings */
362 map->dm_nsegs = 0;
363
364 *dmamp = map;
365
366 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
367 struct arm32_bus_dma_cookie *cookie;
368 int cookieflags;
369 void *cookiestore;
370 int error;
371
372 cookieflags = 0;
373
374 if (t->_may_bounce != NULL) {
375 error = (*t->_may_bounce)(t, map, flags, &cookieflags);
376 if (error != 0)
377 goto out;
378 }
379
380 if (t->_ranges != NULL)
381 cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;
382
383 if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
384 STAT_INCR(creates);
385 return 0;
386 }
387
388 const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
389 (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
390
391 /*
392 * Allocate our cookie.
393 */
394 if ((cookiestore = kmem_intr_zalloc(cookiesize, zallocflags)) == NULL) {
395 error = ENOMEM;
396 goto out;
397 }
398 cookie = (struct arm32_bus_dma_cookie *)cookiestore;
399 cookie->id_flags = cookieflags;
400 map->_dm_cookie = cookie;
401 STAT_INCR(bounced_creates);
402
403 error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
404 out:
405 if (error)
406 _bus_dmamap_destroy(t, map);
407 #else
408 STAT_INCR(creates);
409 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
410
411 #ifdef DEBUG_DMA
412 printf("dmamap_create:map=%p\n", map);
413 #endif /* DEBUG_DMA */
414 return (0);
415 }
416
417 /*
418 * Common function for DMA map destruction. May be called by bus-specific
419 * DMA map destruction functions.
420 */
421 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)422 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
423 {
424
425 #ifdef DEBUG_DMA
426 printf("dmamap_destroy: t=%p map=%p\n", t, map);
427 #endif /* DEBUG_DMA */
428 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
429 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
430
431 /*
432 * Free any bounce pages this map might hold.
433 */
434 if (cookie != NULL) {
435 const size_t cookiesize = sizeof(struct arm32_bus_dma_cookie) +
436 (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
437
438 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
439 STAT_INCR(bounced_unloads);
440 map->dm_nsegs = 0;
441 if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
442 _bus_dma_free_bouncebuf(t, map);
443 STAT_INCR(bounced_destroys);
444 kmem_intr_free(cookie, cookiesize);
445 } else
446 #endif
447 STAT_INCR(destroys);
448
449 if (map->dm_nsegs > 0)
450 STAT_INCR(unloads);
451
452 const size_t mapsize = sizeof(struct arm32_bus_dmamap) +
453 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
454 kmem_intr_free(map, mapsize);
455 }
456
457 /*
458 * Common function for loading a DMA map with a linear buffer. May
459 * be called by bus-specific DMA map load functions.
460 */
461 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)462 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
463 bus_size_t buflen, struct proc *p, int flags)
464 {
465 struct vmspace *vm;
466 int error;
467
468 #ifdef DEBUG_DMA
469 printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
470 t, map, buf, buflen, p, flags);
471 #endif /* DEBUG_DMA */
472
473 if (map->dm_nsegs > 0) {
474 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
475 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
476 if (cookie != NULL) {
477 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
478 STAT_INCR(bounced_unloads);
479 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
480 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
481 }
482 } else
483 #endif
484 STAT_INCR(unloads);
485 }
486
487 /*
488 * Make sure that on error condition we return "no valid mappings".
489 */
490 map->dm_mapsize = 0;
491 map->dm_nsegs = 0;
492 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
493 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
494 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
495 map->dm_maxsegsz, map->_dm_maxmaxsegsz);
496
497 if (buflen > map->_dm_size)
498 return (EINVAL);
499
500 if (p != NULL) {
501 vm = p->p_vmspace;
502 } else {
503 vm = vmspace_kernel();
504 }
505
506 /* _bus_dmamap_load_buffer() clears this if we're not... */
507 map->_dm_flags |= _BUS_DMAMAP_COHERENT;
508
509 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags);
510 if (error == 0) {
511 map->dm_mapsize = buflen;
512 map->_dm_vmspace = vm;
513 map->_dm_origbuf = buf;
514 map->_dm_buftype = _BUS_DMA_BUFTYPE_LINEAR;
515 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
516 STAT_INCR(coherent_loads);
517 } else {
518 STAT_INCR(loads);
519 }
520 return 0;
521 }
522 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
523 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
524 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
525 error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
526 _BUS_DMA_BUFTYPE_LINEAR, flags);
527 }
528 #endif
529 return (error);
530 }
531
532 /*
533 * Like _bus_dmamap_load(), but for mbufs.
534 */
535 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)536 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
537 int flags)
538 {
539 int error;
540 struct mbuf *m;
541
542 #ifdef DEBUG_DMA
543 printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
544 t, map, m0, flags);
545 #endif /* DEBUG_DMA */
546
547 if (map->dm_nsegs > 0) {
548 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
549 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
550 if (cookie != NULL) {
551 if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
552 STAT_INCR(bounced_unloads);
553 cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
554 map->_dm_flags &= ~_BUS_DMAMAP_IS_BOUNCING;
555 }
556 } else
557 #endif
558 STAT_INCR(unloads);
559 }
560
561 /*
562 * Make sure that on error condition we return "no valid mappings."
563 */
564 map->dm_mapsize = 0;
565 map->dm_nsegs = 0;
566 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
567 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
568 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
569 map->dm_maxsegsz, map->_dm_maxmaxsegsz);
570
571 KASSERT(m0->m_flags & M_PKTHDR);
572
573 if (m0->m_pkthdr.len > map->_dm_size)
574 return (EINVAL);
575
576 /* _bus_dmamap_load_paddr() clears this if we're not... */
577 map->_dm_flags |= _BUS_DMAMAP_COHERENT;
578
579 error = 0;
580 for (m = m0; m != NULL && error == 0; m = m->m_next) {
581 int offset;
582 int remainbytes;
583 const struct vm_page * const *pgs;
584 paddr_t paddr;
585 int size;
586
587 if (m->m_len == 0)
588 continue;
589 /*
590 * Don't allow reads in read-only mbufs.
591 */
592 if (M_ROMAP(m) && (flags & BUS_DMA_READ)) {
593 error = EFAULT;
594 break;
595 }
596 switch (m->m_flags & (M_EXT|M_CLUSTER|M_EXT_PAGES)) {
597 case M_EXT|M_CLUSTER:
598 /* XXX KDASSERT */
599 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
600 paddr = m->m_ext.ext_paddr +
601 (m->m_data - m->m_ext.ext_buf);
602 size = m->m_len;
603 error = _bus_dmamap_load_paddr(t, map, paddr, size,
604 false);
605 break;
606
607 case M_EXT|M_EXT_PAGES:
608 KASSERT(m->m_ext.ext_buf <= m->m_data);
609 KASSERT(m->m_data <=
610 m->m_ext.ext_buf + m->m_ext.ext_size);
611
612 offset = (vaddr_t)m->m_data -
613 trunc_page((vaddr_t)m->m_ext.ext_buf);
614 remainbytes = m->m_len;
615
616 /* skip uninteresting pages */
617 pgs = (const struct vm_page * const *)
618 m->m_ext.ext_pgs + (offset >> PAGE_SHIFT);
619
620 offset &= PAGE_MASK; /* offset in the first page */
621
622 /* load each page */
623 while (remainbytes > 0) {
624 const struct vm_page *pg;
625
626 size = MIN(remainbytes, PAGE_SIZE - offset);
627
628 pg = *pgs++;
629 KASSERT(pg);
630 paddr = VM_PAGE_TO_PHYS(pg) + offset;
631
632 error = _bus_dmamap_load_paddr(t, map,
633 paddr, size, false);
634 if (error)
635 break;
636 offset = 0;
637 remainbytes -= size;
638 }
639 break;
640
641 case 0:
642 paddr = m->m_paddr + M_BUFOFFSET(m) +
643 (m->m_data - M_BUFADDR(m));
644 size = m->m_len;
645 error = _bus_dmamap_load_paddr(t, map, paddr, size,
646 false);
647 break;
648
649 default:
650 error = _bus_dmamap_load_buffer(t, map, m->m_data,
651 m->m_len, vmspace_kernel(), flags);
652 }
653 }
654 if (error == 0) {
655 map->dm_mapsize = m0->m_pkthdr.len;
656 map->_dm_origbuf = m0;
657 map->_dm_buftype = _BUS_DMA_BUFTYPE_MBUF;
658 map->_dm_vmspace = vmspace_kernel(); /* always kernel */
659 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
660 STAT_INCR(coherent_loads);
661 } else {
662 STAT_INCR(loads);
663 }
664 return 0;
665 }
666 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
667 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
668 if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
669 error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
670 _BUS_DMA_BUFTYPE_MBUF, flags);
671 }
672 #endif
673 return (error);
674 }
675
676 /*
677 * Like _bus_dmamap_load(), but for uios.
678 */
679 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)680 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
681 int flags)
682 {
683 int i, error;
684 bus_size_t minlen, resid;
685 struct iovec *iov;
686 void *addr;
687
688 /*
689 * Make sure that on error condition we return "no valid mappings."
690 */
691 map->dm_mapsize = 0;
692 map->dm_nsegs = 0;
693 KASSERTMSG(map->dm_maxsegsz <= map->_dm_maxmaxsegsz,
694 "dm_maxsegsz %lu _dm_maxmaxsegsz %lu",
695 map->dm_maxsegsz, map->_dm_maxmaxsegsz);
696
697 resid = uio->uio_resid;
698 iov = uio->uio_iov;
699
700 /* _bus_dmamap_load_buffer() clears this if we're not... */
701 map->_dm_flags |= _BUS_DMAMAP_COHERENT;
702
703 error = 0;
704 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
705 /*
706 * Now at the first iovec to load. Load each iovec
707 * until we have exhausted the residual count.
708 */
709 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
710 addr = (void *)iov[i].iov_base;
711
712 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
713 uio->uio_vmspace, flags);
714
715 resid -= minlen;
716 }
717 if (error == 0) {
718 map->dm_mapsize = uio->uio_resid;
719 map->_dm_origbuf = uio;
720 map->_dm_buftype = _BUS_DMA_BUFTYPE_UIO;
721 map->_dm_vmspace = uio->uio_vmspace;
722 if (map->_dm_flags & _BUS_DMAMAP_COHERENT) {
723 STAT_INCR(coherent_loads);
724 } else {
725 STAT_INCR(loads);
726 }
727 }
728 return (error);
729 }
730
731 /*
732 * Like _bus_dmamap_load(), but for raw memory allocated with
733 * bus_dmamem_alloc().
734 */
735 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size0,int flags)736 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
737 bus_dma_segment_t *segs, int nsegs, bus_size_t size0, int flags)
738 {
739
740 bus_size_t size;
741 int i, error = 0;
742
743 /*
744 * Make sure that on error conditions we return "no valid mappings."
745 */
746 map->dm_mapsize = 0;
747 map->dm_nsegs = 0;
748 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
749
750 if (size0 > map->_dm_size)
751 return EINVAL;
752
753 for (i = 0, size = size0; i < nsegs && size > 0; i++) {
754 bus_dma_segment_t *ds = &segs[i];
755 bus_size_t sgsize;
756
757 sgsize = MIN(ds->ds_len, size);
758 if (sgsize == 0)
759 continue;
760 error = _bus_dmamap_load_paddr(t, map, ds->ds_addr,
761 sgsize, false);
762 if (error != 0)
763 break;
764 size -= sgsize;
765 }
766
767 if (error != 0) {
768 map->dm_mapsize = 0;
769 map->dm_nsegs = 0;
770 return error;
771 }
772
773 /* XXX TBD bounce */
774
775 map->dm_mapsize = size0;
776 return 0;
777 }
778
779 /*
780 * Common function for unloading a DMA map. May be called by
781 * bus-specific DMA map unload functions.
782 */
783 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)784 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
785 {
786
787 #ifdef DEBUG_DMA
788 printf("dmamap_unload: t=%p map=%p\n", t, map);
789 #endif /* DEBUG_DMA */
790
791 /*
792 * No resources to free; just mark the mappings as
793 * invalid.
794 */
795 map->dm_mapsize = 0;
796 map->dm_nsegs = 0;
797 map->_dm_origbuf = NULL;
798 map->_dm_buftype = _BUS_DMA_BUFTYPE_INVALID;
799 map->_dm_vmspace = NULL;
800 }
801
802 static void
_bus_dmamap_sync_segment(vaddr_t va,paddr_t pa,vsize_t len,int ops,bool readonly_p)803 _bus_dmamap_sync_segment(vaddr_t va, paddr_t pa, vsize_t len, int ops, bool readonly_p)
804 {
805 KASSERTMSG((va & PAGE_MASK) == (pa & PAGE_MASK),
806 "va %#lx pa %#lx", va, pa);
807 #if 0
808 printf("sync_segment: va=%#lx pa=%#lx len=%#lx ops=%#x ro=%d\n",
809 va, pa, len, ops, readonly_p);
810 #endif
811
812 switch (ops) {
813 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
814 #ifdef ARM_MMU_EXTENDED
815 (void)readonly_p;
816 #else
817 if (!readonly_p) {
818 #endif
819 STAT_INCR(sync_prereadwrite);
820 cpu_dcache_wbinv_range(va, len);
821 cpu_sdcache_wbinv_range(va, pa, len);
822 break;
823 #ifndef ARM_MMU_EXTENDED
824 }
825 /* FALLTHROUGH */
826 #endif
827
828 case BUS_DMASYNC_PREREAD: {
829 const size_t line_size = arm_dcache_align;
830 const size_t line_mask = arm_dcache_align_mask;
831 vsize_t misalignment = va & line_mask;
832 if (misalignment) {
833 va -= misalignment;
834 pa -= misalignment;
835 len += misalignment;
836 STAT_INCR(sync_preread_begin);
837 cpu_dcache_wbinv_range(va, line_size);
838 cpu_sdcache_wbinv_range(va, pa, line_size);
839 if (len <= line_size)
840 break;
841 va += line_size;
842 pa += line_size;
843 len -= line_size;
844 }
845 misalignment = len & line_mask;
846 len -= misalignment;
847 if (len > 0) {
848 STAT_INCR(sync_preread);
849 cpu_dcache_inv_range(va, len);
850 cpu_sdcache_inv_range(va, pa, len);
851 }
852 if (misalignment) {
853 va += len;
854 pa += len;
855 STAT_INCR(sync_preread_tail);
856 cpu_dcache_wbinv_range(va, line_size);
857 cpu_sdcache_wbinv_range(va, pa, line_size);
858 }
859 break;
860 }
861
862 case BUS_DMASYNC_PREWRITE:
863 STAT_INCR(sync_prewrite);
864 cpu_dcache_wb_range(va, len);
865 cpu_sdcache_wb_range(va, pa, len);
866 break;
867
868 #ifdef CPU_CORTEX
869 /*
870 * Cortex CPUs can do speculative loads so we need to clean the cache
871 * after a DMA read to deal with any speculatively loaded cache lines.
872 * Since these can't be dirty, we can just invalidate them and don't
873 * have to worry about having to write back their contents.
874 */
875 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
876 STAT_INCR(sync_postreadwrite);
877 arm_dmb();
878 cpu_dcache_inv_range(va, len);
879 cpu_sdcache_inv_range(va, pa, len);
880 break;
881 case BUS_DMASYNC_POSTREAD:
882 STAT_INCR(sync_postread);
883 arm_dmb();
884 cpu_dcache_inv_range(va, len);
885 cpu_sdcache_inv_range(va, pa, len);
886 break;
887 #endif
888 }
889 }
890
891 static inline void
_bus_dmamap_sync_linear(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)892 _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
893 bus_size_t len, int ops)
894 {
895 bus_dma_segment_t *ds = map->dm_segs;
896 vaddr_t va = (vaddr_t) map->_dm_origbuf;
897 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
898 if (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING) {
899 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
900 va = (vaddr_t) cookie->id_bouncebuf;
901 }
902 #endif
903
904 while (len > 0) {
905 while (offset >= ds->ds_len) {
906 offset -= ds->ds_len;
907 va += ds->ds_len;
908 ds++;
909 }
910
911 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + offset);
912 size_t seglen = min(len, ds->ds_len - offset);
913
914 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
915 _bus_dmamap_sync_segment(va + offset, pa, seglen, ops,
916 false);
917
918 offset += seglen;
919 len -= seglen;
920 }
921 }
922
923 static inline void
_bus_dmamap_sync_mbuf(bus_dma_tag_t t,bus_dmamap_t map,bus_size_t offset,bus_size_t len,int ops)924 _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t offset,
925 bus_size_t len, int ops)
926 {
927 bus_dma_segment_t *ds = map->dm_segs;
928 struct mbuf *m = map->_dm_origbuf;
929 bus_size_t voff = offset;
930 bus_size_t ds_off = offset;
931
932 while (len > 0) {
933 /* Find the current dma segment */
934 while (ds_off >= ds->ds_len) {
935 ds_off -= ds->ds_len;
936 ds++;
937 }
938 /* Find the current mbuf. */
939 while (voff >= m->m_len) {
940 voff -= m->m_len;
941 m = m->m_next;
942 }
943
944 /*
945 * Now at the first mbuf to sync; nail each one until
946 * we have exhausted the length.
947 */
948 vsize_t seglen = min(len, min(m->m_len - voff, ds->ds_len - ds_off));
949 vaddr_t va = mtod(m, vaddr_t) + voff;
950 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
951
952 /*
953 * We can save a lot of work here if we know the mapping
954 * is read-only at the MMU and we aren't using the armv6+
955 * MMU:
956 *
957 * If a mapping is read-only, no dirty cache blocks will
958 * exist for it. If a writable mapping was made read-only,
959 * we know any dirty cache lines for the range will have
960 * been cleaned for us already. Therefore, if the upper
961 * layer can tell us we have a read-only mapping, we can
962 * skip all cache cleaning.
963 *
964 * NOTE: This only works if we know the pmap cleans pages
965 * before making a read-write -> read-only transition. If
966 * this ever becomes non-true (e.g. Physically Indexed
967 * cache), this will have to be revisited.
968 */
969
970 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0) {
971 /*
972 * If we are doing preread (DMAing into the mbuf),
973 * this mbuf better not be readonly,
974 */
975 KASSERT(!(ops & BUS_DMASYNC_PREREAD) || !M_ROMAP(m));
976 _bus_dmamap_sync_segment(va, pa, seglen, ops,
977 M_ROMAP(m));
978 }
979 voff += seglen;
980 ds_off += seglen;
981 len -= seglen;
982 }
983 }
984
985 static inline void
_bus_dmamap_sync_uio(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)986 _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
987 bus_size_t len, int ops)
988 {
989 bus_dma_segment_t *ds = map->dm_segs;
990 struct uio *uio = map->_dm_origbuf;
991 struct iovec *iov = uio->uio_iov;
992 bus_size_t voff = offset;
993 bus_size_t ds_off = offset;
994
995 while (len > 0) {
996 /* Find the current dma segment */
997 while (ds_off >= ds->ds_len) {
998 ds_off -= ds->ds_len;
999 ds++;
1000 }
1001
1002 /* Find the current iovec. */
1003 while (voff >= iov->iov_len) {
1004 voff -= iov->iov_len;
1005 iov++;
1006 }
1007
1008 /*
1009 * Now at the first iovec to sync; nail each one until
1010 * we have exhausted the length.
1011 */
1012 vsize_t seglen = min(len, min(iov->iov_len - voff, ds->ds_len - ds_off));
1013 vaddr_t va = (vaddr_t) iov->iov_base + voff;
1014 paddr_t pa = _bus_dma_busaddr_to_paddr(t, ds->ds_addr + ds_off);
1015
1016 if ((ds->_ds_flags & _BUS_DMAMAP_COHERENT) == 0)
1017 _bus_dmamap_sync_segment(va, pa, seglen, ops, false);
1018
1019 voff += seglen;
1020 ds_off += seglen;
1021 len -= seglen;
1022 }
1023 }
1024
1025 /*
1026 * Common function for DMA map synchronization. May be called
1027 * by bus-specific DMA map synchronization functions.
1028 *
1029 * This version works for the Virtually Indexed Virtually Tagged
1030 * cache found on 32-bit ARM processors.
1031 *
1032 * XXX Should have separate versions for write-through vs.
1033 * XXX write-back caches. We currently assume write-back
1034 * XXX here, which is not as efficient as it could be for
1035 * XXX the write-through case.
1036 */
1037 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1038 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
1039 bus_size_t len, int ops)
1040 {
1041 #ifdef DEBUG_DMA
1042 printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
1043 t, map, offset, len, ops);
1044 #endif /* DEBUG_DMA */
1045
1046 /*
1047 * Mixing of PRE and POST operations is not allowed.
1048 */
1049 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
1050 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
1051 panic("_bus_dmamap_sync: mix PRE and POST");
1052
1053 KASSERTMSG(offset < map->dm_mapsize,
1054 "offset %lu mapsize %lu",
1055 offset, map->dm_mapsize);
1056 KASSERTMSG(len > 0 && offset + len <= map->dm_mapsize,
1057 "len %lu offset %lu mapsize %lu",
1058 len, offset, map->dm_mapsize);
1059
1060 /*
1061 * For a virtually-indexed write-back cache, we need
1062 * to do the following things:
1063 *
1064 * PREREAD -- Invalidate the D-cache. We do this
1065 * here in case a write-back is required by the back-end.
1066 *
1067 * PREWRITE -- Write-back the D-cache. Note that if
1068 * we are doing a PREREAD|PREWRITE, we can collapse
1069 * the whole thing into a single Wb-Inv.
1070 *
1071 * POSTREAD -- Re-invalidate the D-cache in case speculative
1072 * memory accesses caused cachelines to become valid with now
1073 * invalid data.
1074 *
1075 * POSTWRITE -- Nothing.
1076 */
1077 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1078 const bool bouncing = (map->_dm_flags & _BUS_DMAMAP_IS_BOUNCING);
1079 #else
1080 const bool bouncing = false;
1081 #endif
1082
1083 const int pre_ops = ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1084 #ifdef CPU_CORTEX
1085 const int post_ops = ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1086 #else
1087 const int post_ops = 0;
1088 #endif
1089 if (!bouncing && pre_ops == 0 && post_ops == BUS_DMASYNC_POSTWRITE) {
1090 STAT_INCR(sync_postwrite);
1091 return;
1092 }
1093 KASSERTMSG(bouncing || pre_ops != 0 || (post_ops & BUS_DMASYNC_POSTREAD),
1094 "pre_ops %#x post_ops %#x", pre_ops, post_ops);
1095 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1096 if (bouncing && (ops & BUS_DMASYNC_PREWRITE)) {
1097 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1098 STAT_INCR(write_bounces);
1099 char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1100 /*
1101 * Copy the caller's buffer to the bounce buffer.
1102 */
1103 switch (map->_dm_buftype) {
1104 case _BUS_DMA_BUFTYPE_LINEAR:
1105 memcpy(dataptr, cookie->id_origlinearbuf + offset, len);
1106 break;
1107 case _BUS_DMA_BUFTYPE_MBUF:
1108 m_copydata(cookie->id_origmbuf, offset, len, dataptr);
1109 break;
1110 case _BUS_DMA_BUFTYPE_UIO:
1111 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_WRITE);
1112 break;
1113 #ifdef DIAGNOSTIC
1114 case _BUS_DMA_BUFTYPE_RAW:
1115 panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_RAW");
1116 break;
1117
1118 case _BUS_DMA_BUFTYPE_INVALID:
1119 panic("_bus_dmamap_sync(pre): _BUS_DMA_BUFTYPE_INVALID");
1120 break;
1121
1122 default:
1123 panic("_bus_dmamap_sync(pre): map %p: unknown buffer type %d\n",
1124 map, map->_dm_buftype);
1125 break;
1126 #endif /* DIAGNOSTIC */
1127 }
1128 }
1129 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1130
1131 /* Skip cache frobbing if mapping was COHERENT. */
1132 if (!bouncing && (map->_dm_flags & _BUS_DMAMAP_COHERENT)) {
1133 /* Drain the write buffer. */
1134 if (pre_ops & BUS_DMASYNC_PREWRITE)
1135 cpu_drain_writebuf();
1136 return;
1137 }
1138
1139 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1140 if (bouncing && ((map->_dm_flags & _BUS_DMAMAP_COHERENT) || pre_ops == 0)) {
1141 goto bounce_it;
1142 }
1143 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1144
1145 #ifndef ARM_MMU_EXTENDED
1146 /*
1147 * If the mapping belongs to a non-kernel vmspace, and the
1148 * vmspace has not been active since the last time a full
1149 * cache flush was performed, we don't need to do anything.
1150 */
1151 if (__predict_false(!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
1152 vm_map_pmap(&map->_dm_vmspace->vm_map)->pm_cstate.cs_cache_d == 0))
1153 return;
1154 #endif
1155
1156 int buftype = map->_dm_buftype;
1157 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1158 if (bouncing) {
1159 buftype = _BUS_DMA_BUFTYPE_LINEAR;
1160 }
1161 #endif
1162
1163 switch (buftype) {
1164 case _BUS_DMA_BUFTYPE_LINEAR:
1165 _bus_dmamap_sync_linear(t, map, offset, len, ops);
1166 break;
1167
1168 case _BUS_DMA_BUFTYPE_MBUF:
1169 _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
1170 break;
1171
1172 case _BUS_DMA_BUFTYPE_UIO:
1173 _bus_dmamap_sync_uio(t, map, offset, len, ops);
1174 break;
1175
1176 case _BUS_DMA_BUFTYPE_RAW:
1177 panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
1178 break;
1179
1180 case _BUS_DMA_BUFTYPE_INVALID:
1181 panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
1182 break;
1183
1184 default:
1185 panic("_bus_dmamap_sync: map %p: unknown buffer type %d\n",
1186 map, map->_dm_buftype);
1187 }
1188
1189 /* Drain the write buffer. */
1190 cpu_drain_writebuf();
1191
1192 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1193 bounce_it:
1194 if (!bouncing || (ops & BUS_DMASYNC_POSTREAD) == 0)
1195 return;
1196
1197 struct arm32_bus_dma_cookie * const cookie = map->_dm_cookie;
1198 char * const dataptr = (char *)cookie->id_bouncebuf + offset;
1199 STAT_INCR(read_bounces);
1200 /*
1201 * Copy the bounce buffer to the caller's buffer.
1202 */
1203 switch (map->_dm_buftype) {
1204 case _BUS_DMA_BUFTYPE_LINEAR:
1205 memcpy(cookie->id_origlinearbuf + offset, dataptr, len);
1206 break;
1207
1208 case _BUS_DMA_BUFTYPE_MBUF:
1209 m_copyback(cookie->id_origmbuf, offset, len, dataptr);
1210 break;
1211
1212 case _BUS_DMA_BUFTYPE_UIO:
1213 _bus_dma_uiomove(dataptr, cookie->id_origuio, len, UIO_READ);
1214 break;
1215 #ifdef DIAGNOSTIC
1216 case _BUS_DMA_BUFTYPE_RAW:
1217 panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_RAW");
1218 break;
1219
1220 case _BUS_DMA_BUFTYPE_INVALID:
1221 panic("_bus_dmamap_sync(post): _BUS_DMA_BUFTYPE_INVALID");
1222 break;
1223
1224 default:
1225 panic("_bus_dmamap_sync(post): map %p: unknown buffer type %d\n",
1226 map, map->_dm_buftype);
1227 break;
1228 #endif
1229 }
1230 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1231 }
1232
1233 /*
1234 * Common function for DMA-safe memory allocation. May be called
1235 * by bus-specific DMA memory allocation functions.
1236 */
1237
1238 extern paddr_t physical_start;
1239 extern paddr_t physical_end;
1240
1241 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1242 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1243 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1244 int flags)
1245 {
1246 struct arm32_dma_range *dr;
1247 int error, i;
1248
1249 #ifdef DEBUG_DMA
1250 printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
1251 "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
1252 boundary, segs, nsegs, rsegs, flags);
1253 #endif
1254
1255 if ((dr = t->_ranges) != NULL) {
1256 error = ENOMEM;
1257 for (i = 0; i < t->_nranges; i++, dr++) {
1258 if (dr->dr_len == 0
1259 || (dr->dr_flags & _BUS_DMAMAP_NOALLOC))
1260 continue;
1261 error = _bus_dmamem_alloc_range(t, size, alignment,
1262 boundary, segs, nsegs, rsegs, flags,
1263 trunc_page(dr->dr_sysbase),
1264 trunc_page(dr->dr_sysbase + dr->dr_len));
1265 if (error == 0)
1266 break;
1267 }
1268 } else {
1269 error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
1270 segs, nsegs, rsegs, flags, trunc_page(physical_start),
1271 trunc_page(physical_end));
1272 }
1273
1274 #ifdef DEBUG_DMA
1275 printf("dmamem_alloc: =%d\n", error);
1276 #endif
1277
1278 return(error);
1279 }
1280
1281 /*
1282 * Common function for freeing DMA-safe memory. May be called by
1283 * bus-specific DMA memory free functions.
1284 */
1285 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)1286 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1287 {
1288 struct vm_page *m;
1289 bus_addr_t addr;
1290 struct pglist mlist;
1291 int curseg;
1292
1293 #ifdef DEBUG_DMA
1294 printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
1295 #endif /* DEBUG_DMA */
1296
1297 /*
1298 * Build a list of pages to free back to the VM system.
1299 */
1300 TAILQ_INIT(&mlist);
1301 for (curseg = 0; curseg < nsegs; curseg++) {
1302 for (addr = segs[curseg].ds_addr;
1303 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1304 addr += PAGE_SIZE) {
1305 m = PHYS_TO_VM_PAGE(addr);
1306 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1307 }
1308 }
1309 uvm_pglistfree(&mlist);
1310 }
1311
1312 /*
1313 * Common function for mapping DMA-safe memory. May be called by
1314 * bus-specific DMA memory map functions.
1315 */
1316 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1317 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1318 size_t size, void **kvap, int flags)
1319 {
1320 vaddr_t va;
1321 paddr_t pa;
1322 int curseg;
1323 const uvm_flag_t kmflags = UVM_KMF_VAONLY
1324 | ((flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0);
1325 vsize_t align = 0;
1326
1327 #ifdef DEBUG_DMA
1328 printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
1329 segs, nsegs, (unsigned long)size, flags);
1330 #endif /* DEBUG_DMA */
1331
1332 #ifdef PMAP_MAP_POOLPAGE
1333 /*
1334 * If all of memory is mapped, and we are mapping a single physically
1335 * contiguous area then this area is already mapped. Let's see if we
1336 * avoid having a separate mapping for it.
1337 */
1338 if (nsegs == 1) {
1339 /*
1340 * If this is a non-COHERENT mapping, then the existing kernel
1341 * mapping is already compatible with it.
1342 */
1343 bool direct_mapable = (flags & BUS_DMA_COHERENT) == 0;
1344 pa = segs[0].ds_addr;
1345
1346 /*
1347 * This is a COHERENT mapping which, unless this address is in
1348 * a COHERENT dma range, will not be compatible.
1349 */
1350 if (t->_ranges != NULL) {
1351 const struct arm32_dma_range * const dr =
1352 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1353 if (dr != NULL
1354 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1355 direct_mapable = true;
1356 }
1357 }
1358
1359 #ifdef PMAP_NEED_ALLOC_POOLPAGE
1360 /*
1361 * The page can only be direct mapped if was allocated out
1362 * of the arm poolpage vm freelist.
1363 */
1364 int lcv = vm_physseg_find(atop(pa), NULL);
1365 KASSERT(lcv != -1);
1366 if (direct_mapable) {
1367 direct_mapable =
1368 (arm_poolpage_vmfreelist == VM_PHYSMEM_PTR(lcv)->free_list);
1369 }
1370 #endif
1371
1372 if (direct_mapable) {
1373 *kvap = (void *)PMAP_MAP_POOLPAGE(pa);
1374 #ifdef DEBUG_DMA
1375 printf("dmamem_map: =%p\n", *kvap);
1376 #endif /* DEBUG_DMA */
1377 return 0;
1378 }
1379 }
1380 #endif
1381
1382 size = round_page(size);
1383 if (__predict_true(size > L2_L_SIZE)) {
1384 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1385 if (size >= L1_SS_SIZE)
1386 align = L1_SS_SIZE;
1387 else
1388 #endif
1389 if (size >= L1_S_SIZE)
1390 align = L1_S_SIZE;
1391 else
1392 align = L2_L_SIZE;
1393 }
1394
1395 va = uvm_km_alloc(kernel_map, size, align, kmflags);
1396 if (__predict_false(va == 0 && align > 0)) {
1397 align = 0;
1398 va = uvm_km_alloc(kernel_map, size, 0, kmflags);
1399 }
1400
1401 if (va == 0)
1402 return (ENOMEM);
1403
1404 *kvap = (void *)va;
1405
1406 for (curseg = 0; curseg < nsegs; curseg++) {
1407 for (pa = segs[curseg].ds_addr;
1408 pa < (segs[curseg].ds_addr + segs[curseg].ds_len);
1409 pa += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1410 bool uncached = (flags & BUS_DMA_COHERENT);
1411 #ifdef DEBUG_DMA
1412 printf("wiring p%lx to v%lx", pa, va);
1413 #endif /* DEBUG_DMA */
1414 if (size == 0)
1415 panic("_bus_dmamem_map: size botch");
1416
1417 const struct arm32_dma_range * const dr =
1418 _bus_dma_paddr_inrange(t->_ranges, t->_nranges, pa);
1419 /*
1420 * If this dma region is coherent then there is
1421 * no need for an uncached mapping.
1422 */
1423 if (dr != NULL
1424 && (dr->dr_flags & _BUS_DMAMAP_COHERENT)) {
1425 uncached = false;
1426 }
1427
1428 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE,
1429 PMAP_WIRED | (uncached ? PMAP_NOCACHE : 0));
1430 }
1431 }
1432 pmap_update(pmap_kernel());
1433 #ifdef DEBUG_DMA
1434 printf("dmamem_map: =%p\n", *kvap);
1435 #endif /* DEBUG_DMA */
1436 return (0);
1437 }
1438
1439 /*
1440 * Common function for unmapping DMA-safe memory. May be called by
1441 * bus-specific DMA memory unmapping functions.
1442 */
1443 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)1444 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1445 {
1446
1447 #ifdef DEBUG_DMA
1448 printf("dmamem_unmap: t=%p kva=%p size=%zx\n", t, kva, size);
1449 #endif /* DEBUG_DMA */
1450 KASSERTMSG(((uintptr_t)kva & PAGE_MASK) == 0,
1451 "kva %p (%#"PRIxPTR")", kva, ((uintptr_t)kva & PAGE_MASK));
1452
1453 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
1454 /*
1455 * Check to see if this used direct mapped memory. Get its physical
1456 * address and try to map it. If the resultant matches the kva, then
1457 * it was and so we can just return since we have notice to free up.
1458 */
1459 paddr_t pa;
1460 vaddr_t va;
1461 (void)pmap_extract(pmap_kernel(), (vaddr_t)kva, &pa);
1462 if (mm_md_direct_mapped_phys(pa, &va) && va == (vaddr_t)kva)
1463 return;
1464 #endif
1465
1466 size = round_page(size);
1467 pmap_kremove((vaddr_t)kva, size);
1468 pmap_update(pmap_kernel());
1469 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1470 }
1471
1472 /*
1473 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
1474 * bus-specific DMA mmap(2)'ing functions.
1475 */
1476 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1477 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1478 off_t off, int prot, int flags)
1479 {
1480 paddr_t map_flags;
1481 int i;
1482
1483 for (i = 0; i < nsegs; i++) {
1484 KASSERTMSG((off & PAGE_MASK) == 0,
1485 "off %#qx (%#x)", off, (int)off & PAGE_MASK);
1486 KASSERTMSG((segs[i].ds_addr & PAGE_MASK) == 0,
1487 "ds_addr %#lx (%#x)", segs[i].ds_addr,
1488 (int)segs[i].ds_addr & PAGE_MASK);
1489 KASSERTMSG((segs[i].ds_len & PAGE_MASK) == 0,
1490 "ds_len %#lx (%#x)", segs[i].ds_addr,
1491 (int)segs[i].ds_addr & PAGE_MASK);
1492 if (off >= segs[i].ds_len) {
1493 off -= segs[i].ds_len;
1494 continue;
1495 }
1496
1497 map_flags = 0;
1498 if (flags & BUS_DMA_PREFETCHABLE)
1499 map_flags |= ARM32_MMAP_WRITECOMBINE;
1500
1501 return (arm_btop((u_long)segs[i].ds_addr + off) | map_flags);
1502
1503 }
1504
1505 /* Page not found. */
1506 return (-1);
1507 }
1508
1509 /**********************************************************************
1510 * DMA utility functions
1511 **********************************************************************/
1512
1513 /*
1514 * Utility function to load a linear buffer. lastaddrp holds state
1515 * between invocations (for multiple-buffer loads). segp contains
1516 * the starting segment on entrace, and the ending segment on exit.
1517 * first indicates if this is the first invocation of this function.
1518 */
1519 int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct vmspace * vm,int flags)1520 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1521 bus_size_t buflen, struct vmspace *vm, int flags)
1522 {
1523 bus_size_t sgsize;
1524 bus_addr_t curaddr;
1525 vaddr_t vaddr = (vaddr_t)buf;
1526 int error;
1527 pmap_t pmap;
1528
1529 #ifdef DEBUG_DMA
1530 printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d)\n",
1531 buf, buflen, flags);
1532 #endif /* DEBUG_DMA */
1533
1534 pmap = vm_map_pmap(&vm->vm_map);
1535
1536 while (buflen > 0) {
1537 /*
1538 * Get the physical address for this segment.
1539 *
1540 * XXX Doesn't support checking for coherent mappings
1541 * XXX in user address space.
1542 */
1543 bool coherent;
1544 if (__predict_true(pmap == pmap_kernel())) {
1545 pd_entry_t *pde;
1546 pt_entry_t *ptep;
1547 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
1548 if (__predict_false(pmap_pde_section(pde))) {
1549 paddr_t s_frame = L1_S_FRAME;
1550 paddr_t s_offset = L1_S_OFFSET;
1551 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1552 if (__predict_false(pmap_pde_supersection(pde))) {
1553 s_frame = L1_SS_FRAME;
1554 s_offset = L1_SS_OFFSET;
1555 }
1556 #endif
1557 curaddr = (*pde & s_frame) | (vaddr & s_offset);
1558 coherent = (*pde & L1_S_CACHE_MASK) == 0;
1559 } else {
1560 pt_entry_t pte = *ptep;
1561 KDASSERTMSG((pte & L2_TYPE_MASK) != L2_TYPE_INV,
1562 "va=%#"PRIxVADDR" pde=%#x ptep=%p pte=%#x",
1563 vaddr, *pde, ptep, pte);
1564 if (__predict_false((pte & L2_TYPE_MASK)
1565 == L2_TYPE_L)) {
1566 curaddr = (pte & L2_L_FRAME) |
1567 (vaddr & L2_L_OFFSET);
1568 coherent = (pte & L2_L_CACHE_MASK) == 0;
1569 } else {
1570 curaddr = (pte & ~PAGE_MASK) |
1571 (vaddr & PAGE_MASK);
1572 coherent = (pte & L2_S_CACHE_MASK) == 0;
1573 }
1574 }
1575 } else {
1576 (void) pmap_extract(pmap, vaddr, &curaddr);
1577 coherent = false;
1578 }
1579 KASSERTMSG((vaddr & PAGE_MASK) == (curaddr & PAGE_MASK),
1580 "va %#lx curaddr %#lx", vaddr, curaddr);
1581
1582 /*
1583 * Compute the segment size, and adjust counts.
1584 */
1585 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1586 if (buflen < sgsize)
1587 sgsize = buflen;
1588
1589 error = _bus_dmamap_load_paddr(t, map, curaddr, sgsize,
1590 coherent);
1591 if (error)
1592 return (error);
1593
1594 vaddr += sgsize;
1595 buflen -= sgsize;
1596 }
1597
1598 return (0);
1599 }
1600
1601 /*
1602 * Allocate physical memory from the given physical address range.
1603 * Called by DMA-safe memory allocation methods.
1604 */
1605 int
_bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)1606 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1607 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1608 int flags, paddr_t low, paddr_t high)
1609 {
1610 paddr_t curaddr, lastaddr;
1611 struct vm_page *m;
1612 struct pglist mlist;
1613 int curseg, error;
1614
1615 KASSERTMSG(boundary == 0 || (boundary & (boundary-1)) == 0,
1616 "invalid boundary %#lx", boundary);
1617
1618 #ifdef DEBUG_DMA
1619 printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
1620 t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
1621 #endif /* DEBUG_DMA */
1622
1623 /* Always round the size. */
1624 size = round_page(size);
1625
1626 /*
1627 * We accept boundaries < size, splitting in multiple segments
1628 * if needed. uvm_pglistalloc does not, so compute an appropriate
1629 * boundary: next power of 2 >= size
1630 */
1631 bus_size_t uboundary = boundary;
1632 if (uboundary <= PAGE_SIZE) {
1633 uboundary = 0;
1634 } else {
1635 while (uboundary < size) {
1636 uboundary <<= 1;
1637 }
1638 }
1639
1640 /*
1641 * Allocate pages from the VM system.
1642 */
1643 error = uvm_pglistalloc(size, low, high, alignment, uboundary,
1644 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1645 if (error)
1646 return (error);
1647
1648 /*
1649 * Compute the location, size, and number of segments actually
1650 * returned by the VM code.
1651 */
1652 m = TAILQ_FIRST(&mlist);
1653 curseg = 0;
1654 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
1655 segs[curseg].ds_len = PAGE_SIZE;
1656 #ifdef DEBUG_DMA
1657 printf("alloc: page %lx\n", lastaddr);
1658 #endif /* DEBUG_DMA */
1659 m = TAILQ_NEXT(m, pageq.queue);
1660
1661 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1662 curaddr = VM_PAGE_TO_PHYS(m);
1663 KASSERTMSG(low <= curaddr && curaddr < high,
1664 "uvm_pglistalloc returned non-sensicaladdress %#lx "
1665 "(low=%#lx, high=%#lx\n", curaddr, low, high);
1666 #ifdef DEBUG_DMA
1667 printf("alloc: page %lx\n", curaddr);
1668 #endif /* DEBUG_DMA */
1669 if (curaddr == lastaddr + PAGE_SIZE
1670 && (lastaddr & boundary) == (curaddr & boundary))
1671 segs[curseg].ds_len += PAGE_SIZE;
1672 else {
1673 curseg++;
1674 if (curseg >= nsegs) {
1675 uvm_pglistfree(&mlist);
1676 return EFBIG;
1677 }
1678 segs[curseg].ds_addr = curaddr;
1679 segs[curseg].ds_len = PAGE_SIZE;
1680 }
1681 lastaddr = curaddr;
1682 }
1683
1684 *rsegs = curseg + 1;
1685
1686 return (0);
1687 }
1688
1689 /*
1690 * Check if a memory region intersects with a DMA range, and return the
1691 * page-rounded intersection if it does.
1692 */
1693 int
arm32_dma_range_intersect(struct arm32_dma_range * ranges,int nranges,paddr_t pa,psize_t size,paddr_t * pap,psize_t * sizep)1694 arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
1695 paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
1696 {
1697 struct arm32_dma_range *dr;
1698 int i;
1699
1700 if (ranges == NULL)
1701 return (0);
1702
1703 for (i = 0, dr = ranges; i < nranges; i++, dr++) {
1704 if (dr->dr_sysbase <= pa &&
1705 pa < (dr->dr_sysbase + dr->dr_len)) {
1706 /*
1707 * Beginning of region intersects with this range.
1708 */
1709 *pap = trunc_page(pa);
1710 *sizep = round_page(min(pa + size,
1711 dr->dr_sysbase + dr->dr_len) - pa);
1712 return (1);
1713 }
1714 if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
1715 /*
1716 * End of region intersects with this range.
1717 */
1718 *pap = trunc_page(dr->dr_sysbase);
1719 *sizep = round_page(min((pa + size) - dr->dr_sysbase,
1720 dr->dr_len));
1721 return (1);
1722 }
1723 }
1724
1725 /* No intersection found. */
1726 return (0);
1727 }
1728
1729 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1730 static int
_bus_dma_alloc_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map,bus_size_t size,int flags)1731 _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
1732 bus_size_t size, int flags)
1733 {
1734 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1735 int error = 0;
1736
1737 KASSERT(cookie != NULL);
1738
1739 cookie->id_bouncebuflen = round_page(size);
1740 error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1741 PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
1742 map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1743 if (error == 0) {
1744 error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1745 cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1746 (void **)&cookie->id_bouncebuf, flags);
1747 if (error) {
1748 _bus_dmamem_free(t, cookie->id_bouncesegs,
1749 cookie->id_nbouncesegs);
1750 cookie->id_bouncebuflen = 0;
1751 cookie->id_nbouncesegs = 0;
1752 } else {
1753 cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
1754 }
1755 } else {
1756 cookie->id_bouncebuflen = 0;
1757 cookie->id_nbouncesegs = 0;
1758 }
1759
1760 return (error);
1761 }
1762
1763 static void
_bus_dma_free_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map)1764 _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1765 {
1766 struct arm32_bus_dma_cookie *cookie = map->_dm_cookie;
1767
1768 KASSERT(cookie != NULL);
1769
1770 _bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1771 _bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs);
1772 cookie->id_bouncebuflen = 0;
1773 cookie->id_nbouncesegs = 0;
1774 cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE;
1775 }
1776
1777 /*
1778 * This function does the same as uiomove, but takes an explicit
1779 * direction, and does not update the uio structure.
1780 */
1781 static int
_bus_dma_uiomove(void * buf,struct uio * uio,size_t n,int direction)1782 _bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1783 {
1784 struct iovec *iov;
1785 int error;
1786 struct vmspace *vm;
1787 char *cp;
1788 size_t resid, cnt;
1789 int i;
1790
1791 iov = uio->uio_iov;
1792 vm = uio->uio_vmspace;
1793 cp = buf;
1794 resid = n;
1795
1796 for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1797 iov = &uio->uio_iov[i];
1798 if (iov->iov_len == 0)
1799 continue;
1800 cnt = MIN(resid, iov->iov_len);
1801
1802 if (!VMSPACE_IS_KERNEL_P(vm) &&
1803 (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
1804 != 0) {
1805 preempt();
1806 }
1807 if (direction == UIO_READ) {
1808 error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1809 } else {
1810 error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1811 }
1812 if (error)
1813 return (error);
1814 cp += cnt;
1815 resid -= cnt;
1816 }
1817 return (0);
1818 }
1819 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1820
1821 int
_bus_dmatag_subregion(bus_dma_tag_t tag,bus_addr_t min_addr,bus_addr_t max_addr,bus_dma_tag_t * newtag,int flags)1822 _bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1823 bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1824 {
1825
1826 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1827 struct arm32_dma_range *dr;
1828 bool subset = false;
1829 size_t nranges = 0;
1830 size_t i;
1831 for (i = 0, dr = tag->_ranges; i < tag->_nranges; i++, dr++) {
1832 if (dr->dr_sysbase <= min_addr
1833 && max_addr <= dr->dr_sysbase + dr->dr_len - 1) {
1834 subset = true;
1835 }
1836 if (min_addr <= dr->dr_sysbase + dr->dr_len
1837 && max_addr >= dr->dr_sysbase) {
1838 nranges++;
1839 }
1840 }
1841 if (subset) {
1842 *newtag = tag;
1843 /* if the tag must be freed, add a reference */
1844 if (tag->_tag_needs_free)
1845 (tag->_tag_needs_free)++;
1846 return 0;
1847 }
1848 if (nranges == 0) {
1849 nranges = 1;
1850 }
1851
1852 const size_t tagsize = sizeof(*tag) + nranges * sizeof(*dr);
1853 if ((*newtag = kmem_intr_zalloc(tagsize,
1854 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1855 return ENOMEM;
1856
1857 dr = (void *)(*newtag + 1);
1858 **newtag = *tag;
1859 (*newtag)->_tag_needs_free = 1;
1860 (*newtag)->_ranges = dr;
1861 (*newtag)->_nranges = nranges;
1862
1863 if (tag->_ranges == NULL) {
1864 dr->dr_sysbase = min_addr;
1865 dr->dr_busbase = min_addr;
1866 dr->dr_len = max_addr + 1 - min_addr;
1867 } else {
1868 for (i = 0; i < nranges; i++) {
1869 if (min_addr > dr->dr_sysbase + dr->dr_len
1870 || max_addr < dr->dr_sysbase)
1871 continue;
1872 dr[0] = tag->_ranges[i];
1873 if (dr->dr_sysbase < min_addr) {
1874 psize_t diff = min_addr - dr->dr_sysbase;
1875 dr->dr_busbase += diff;
1876 dr->dr_len -= diff;
1877 dr->dr_sysbase += diff;
1878 }
1879 if (max_addr != 0xffffffff
1880 && max_addr + 1 < dr->dr_sysbase + dr->dr_len) {
1881 dr->dr_len = max_addr + 1 - dr->dr_sysbase;
1882 }
1883 dr++;
1884 }
1885 }
1886
1887 return 0;
1888 #else
1889 return EOPNOTSUPP;
1890 #endif /* _ARM32_NEED_BUS_DMA_BOUNCE */
1891 }
1892
1893 void
_bus_dmatag_destroy(bus_dma_tag_t tag)1894 _bus_dmatag_destroy(bus_dma_tag_t tag)
1895 {
1896 #ifdef _ARM32_NEED_BUS_DMA_BOUNCE
1897 switch (tag->_tag_needs_free) {
1898 case 0:
1899 break; /* not allocated with kmem */
1900 case 1: {
1901 const size_t tagsize = sizeof(*tag)
1902 + tag->_nranges * sizeof(*tag->_ranges);
1903 kmem_intr_free(tag, tagsize); /* last reference to tag */
1904 break;
1905 }
1906 default:
1907 (tag->_tag_needs_free)--; /* one less reference */
1908 }
1909 #endif
1910 }
1911