1 /*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.94 2008/08/15 20:51:31 kmacy Exp $
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/uio.h>
34 #include <sys/thread2.h>
35 #include <sys/bus_dma.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39 #include <sys/spinlock2.h>
40
41 #include <vm/vm.h>
42 #include <vm/vm_page.h>
43
44 /* XXX needed for to access pmap to convert per-proc virtual to physical */
45 #include <sys/proc.h>
46 #include <vm/vm_map.h>
47
48 #include <machine/md_var.h>
49
50 #define MAX_BPAGES 1024
51
52 struct bounce_zone;
53 struct bus_dmamap;
54
55 struct bus_dma_tag {
56 bus_size_t alignment;
57 bus_size_t boundary;
58 bus_addr_t lowaddr;
59 bus_addr_t highaddr;
60 bus_size_t maxsize;
61 u_int nsegments;
62 bus_size_t maxsegsz;
63 int flags;
64 int map_count;
65 bus_dma_segment_t *segments;
66 struct bounce_zone *bounce_zone;
67 };
68
69 /*
70 * bus_dma_tag private flags
71 */
72 #define BUS_DMA_BOUNCE_ALIGN BUS_DMA_BUS2
73 #define BUS_DMA_BOUNCE_LOWADDR BUS_DMA_BUS3
74 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
75
76 #define BUS_DMA_COULD_BOUNCE (BUS_DMA_BOUNCE_LOWADDR | BUS_DMA_BOUNCE_ALIGN)
77
78 #define BUS_DMAMEM_KMALLOC(dmat) \
79 ((dmat)->maxsize <= PAGE_SIZE && \
80 (dmat)->alignment <= PAGE_SIZE && \
81 (dmat)->lowaddr >= ptoa(Maxmem))
82
83 struct bounce_page {
84 vm_offset_t vaddr; /* kva of bounce buffer */
85 bus_addr_t busaddr; /* Physical address */
86 vm_offset_t datavaddr; /* kva of client data */
87 bus_size_t datacount; /* client data count */
88 STAILQ_ENTRY(bounce_page) links;
89 };
90
91 struct bounce_zone {
92 STAILQ_ENTRY(bounce_zone) links;
93 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
94 STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
95 struct spinlock spin;
96 int total_bpages;
97 int free_bpages;
98 int reserved_bpages;
99 int active_bpages;
100 int total_bounced;
101 int total_deferred;
102 int reserve_failed;
103 bus_size_t alignment;
104 bus_addr_t lowaddr;
105 char zoneid[8];
106 char lowaddrid[20];
107 struct sysctl_ctx_list sysctl_ctx;
108 struct sysctl_oid *sysctl_tree;
109 };
110
111 #define BZ_LOCK(bz) spin_lock(&(bz)->spin)
112 #define BZ_UNLOCK(bz) spin_unlock(&(bz)->spin)
113
114 static struct lwkt_token bounce_zone_tok =
115 LWKT_TOKEN_INITIALIZER(bounce_zone_token);
116 static int busdma_zonecount;
117 static STAILQ_HEAD(, bounce_zone) bounce_zone_list =
118 STAILQ_HEAD_INITIALIZER(bounce_zone_list);
119
120 int busdma_swi_pending;
121 static int total_bounce_pages;
122 static int max_bounce_pages = MAX_BPAGES;
123 static int bounce_alignment = 1; /* XXX temporary */
124
125 TUNABLE_INT("hw.busdma.max_bpages", &max_bounce_pages);
126 TUNABLE_INT("hw.busdma.bounce_alignment", &bounce_alignment);
127
128 struct bus_dmamap {
129 struct bp_list bpages;
130 int pagesneeded;
131 int pagesreserved;
132 bus_dma_tag_t dmat;
133 void *buf; /* unmapped buffer pointer */
134 bus_size_t buflen; /* unmapped buffer length */
135 bus_dmamap_callback_t *callback;
136 void *callback_arg;
137 STAILQ_ENTRY(bus_dmamap) links;
138 };
139
140 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist =
141 STAILQ_HEAD_INITIALIZER(bounce_map_callbacklist);
142
143 static struct bus_dmamap nobounce_dmamap;
144
145 static int alloc_bounce_zone(bus_dma_tag_t);
146 static int alloc_bounce_pages(bus_dma_tag_t, u_int, int);
147 static int reserve_bounce_pages(bus_dma_tag_t, bus_dmamap_t, int);
148 static void return_bounce_pages(bus_dma_tag_t, bus_dmamap_t);
149 static bus_addr_t add_bounce_page(bus_dma_tag_t, bus_dmamap_t,
150 vm_offset_t, bus_size_t);
151 static void free_bounce_page(bus_dma_tag_t, struct bounce_page *);
152
153 static bus_dmamap_t get_map_waiting(bus_dma_tag_t);
154 static void add_map_callback(bus_dmamap_t);
155
156 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
157 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bounce_pages,
158 0, "Total bounce pages");
159 SYSCTL_INT(_hw_busdma, OID_AUTO, max_bpages, CTLFLAG_RD, &max_bounce_pages,
160 0, "Max bounce pages per bounce zone");
161 SYSCTL_INT(_hw_busdma, OID_AUTO, bounce_alignment, CTLFLAG_RD,
162 &bounce_alignment, 0, "Obey alignment constraint");
163
164
165 /*
166 * Returns true if the address falls within the tag's exclusion window, or
167 * fails to meet its alignment requirements.
168 */
169 static __inline int
addr_needs_bounce(bus_dma_tag_t dmat,bus_addr_t paddr)170 addr_needs_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
171 {
172 if ((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
173 (bounce_alignment && (paddr & (dmat->alignment - 1)) != 0))
174 return (1);
175
176 return (0);
177 }
178
179 /*
180 * Allocate a device specific dma_tag.
181 */
182 int
bus_dma_tag_create(bus_dma_tag_t parent,bus_size_t alignment,bus_size_t boundary,bus_addr_t lowaddr,bus_addr_t highaddr,bus_size_t maxsize,int nsegments,bus_size_t maxsegsz,int flags,bus_dma_tag_t * dmat)183 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
184 bus_size_t boundary, bus_addr_t lowaddr,
185 bus_addr_t highaddr, bus_size_t maxsize, int nsegments,
186 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
187 {
188 bus_dma_tag_t newtag;
189 int error = 0;
190
191 /*
192 * Sanity checks
193 */
194
195 if (alignment == 0)
196 alignment = 1;
197 if (alignment & (alignment - 1))
198 panic("alignment must be power of 2");
199
200 if (boundary != 0) {
201 if (boundary & (boundary - 1))
202 panic("boundary must be power of 2");
203 if (boundary < maxsegsz) {
204 kprintf("boundary < maxsegsz:\n");
205 print_backtrace(-1);
206 maxsegsz = boundary;
207 }
208 }
209
210 /* Return a NULL tag on failure */
211 *dmat = NULL;
212
213 newtag = kmalloc(sizeof(*newtag), M_DEVBUF, M_INTWAIT);
214
215 newtag->alignment = alignment;
216 newtag->boundary = boundary;
217 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
218 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
219 newtag->maxsize = maxsize;
220 newtag->nsegments = nsegments;
221 newtag->maxsegsz = maxsegsz;
222 newtag->flags = flags;
223 newtag->map_count = 0;
224 newtag->segments = NULL;
225 newtag->bounce_zone = NULL;
226
227 /* Take into account any restrictions imposed by our parent tag */
228 if (parent != NULL) {
229 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
230 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
231
232 if (newtag->boundary == 0) {
233 newtag->boundary = parent->boundary;
234 } else if (parent->boundary != 0) {
235 newtag->boundary = MIN(parent->boundary,
236 newtag->boundary);
237 }
238
239 #ifdef notyet
240 newtag->alignment = MAX(parent->alignment, newtag->alignment);
241 #endif
242
243 }
244
245 if (newtag->lowaddr < ptoa(Maxmem))
246 newtag->flags |= BUS_DMA_BOUNCE_LOWADDR;
247 if (bounce_alignment && newtag->alignment > 1 &&
248 !(newtag->flags & BUS_DMA_ALIGNED))
249 newtag->flags |= BUS_DMA_BOUNCE_ALIGN;
250
251 if ((newtag->flags & BUS_DMA_COULD_BOUNCE) &&
252 (flags & BUS_DMA_ALLOCNOW) != 0) {
253 struct bounce_zone *bz;
254
255 /* Must bounce */
256
257 error = alloc_bounce_zone(newtag);
258 if (error)
259 goto back;
260 bz = newtag->bounce_zone;
261
262 if (ptoa(bz->total_bpages) < maxsize) {
263 int pages;
264
265 if (flags & BUS_DMA_ONEBPAGE) {
266 pages = 1;
267 } else {
268 pages = atop(round_page(maxsize)) -
269 bz->total_bpages;
270 pages = MAX(pages, 1);
271 }
272
273 /* Add pages to our bounce pool */
274 if (alloc_bounce_pages(newtag, pages, flags) < pages)
275 error = ENOMEM;
276
277 /* Performed initial allocation */
278 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
279 }
280 }
281 back:
282 if (error)
283 kfree(newtag, M_DEVBUF);
284 else
285 *dmat = newtag;
286 return error;
287 }
288
289 int
bus_dma_tag_destroy(bus_dma_tag_t dmat)290 bus_dma_tag_destroy(bus_dma_tag_t dmat)
291 {
292 if (dmat != NULL) {
293 if (dmat->map_count != 0)
294 return (EBUSY);
295
296 if (dmat->segments != NULL)
297 kfree(dmat->segments, M_DEVBUF);
298 kfree(dmat, M_DEVBUF);
299 }
300 return (0);
301 }
302
303 /*
304 * Allocate a handle for mapping from kva/uva/physical
305 * address space into bus device space.
306 */
307 int
bus_dmamap_create(bus_dma_tag_t dmat,int flags,bus_dmamap_t * mapp)308 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
309 {
310 int error;
311
312 error = 0;
313
314 if (dmat->segments == NULL) {
315 KKASSERT(dmat->nsegments && dmat->nsegments < 16384);
316 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
317 dmat->nsegments, M_DEVBUF, M_INTWAIT);
318 }
319
320 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
321 struct bounce_zone *bz;
322 int maxpages;
323
324 /* Must bounce */
325
326 if (dmat->bounce_zone == NULL) {
327 error = alloc_bounce_zone(dmat);
328 if (error)
329 return error;
330 }
331 bz = dmat->bounce_zone;
332
333 *mapp = kmalloc(sizeof(**mapp), M_DEVBUF, M_INTWAIT | M_ZERO);
334
335 /* Initialize the new map */
336 STAILQ_INIT(&((*mapp)->bpages));
337
338 /*
339 * Attempt to add pages to our pool on a per-instance
340 * basis up to a sane limit.
341 */
342 if (dmat->flags & BUS_DMA_BOUNCE_ALIGN) {
343 maxpages = max_bounce_pages;
344 } else {
345 maxpages = MIN(max_bounce_pages,
346 Maxmem - atop(dmat->lowaddr));
347 }
348 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
349 || (dmat->map_count > 0
350 && bz->total_bpages < maxpages)) {
351 int pages;
352
353 if (flags & BUS_DMA_ONEBPAGE) {
354 pages = 1;
355 } else {
356 pages = atop(round_page(dmat->maxsize));
357 pages = MIN(maxpages - bz->total_bpages, pages);
358 pages = MAX(pages, 1);
359 }
360 if (alloc_bounce_pages(dmat, pages, flags) < pages)
361 error = ENOMEM;
362
363 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
364 if (!error)
365 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
366 } else {
367 error = 0;
368 }
369 }
370 } else {
371 *mapp = NULL;
372 }
373 if (!error)
374 dmat->map_count++;
375 return error;
376 }
377
378 /*
379 * Destroy a handle for mapping from kva/uva/physical
380 * address space into bus device space.
381 */
382 int
bus_dmamap_destroy(bus_dma_tag_t dmat,bus_dmamap_t map)383 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
384 {
385 if (map != NULL) {
386 if (STAILQ_FIRST(&map->bpages) != NULL)
387 return (EBUSY);
388 kfree(map, M_DEVBUF);
389 }
390 dmat->map_count--;
391 return (0);
392 }
393
394 static __inline bus_size_t
check_kmalloc(bus_dma_tag_t dmat,const void * vaddr0,int verify)395 check_kmalloc(bus_dma_tag_t dmat, const void *vaddr0, int verify)
396 {
397 bus_size_t maxsize = 0;
398 uintptr_t vaddr = (uintptr_t)vaddr0;
399
400 if ((vaddr ^ (vaddr + dmat->maxsize - 1)) & ~PAGE_MASK) {
401 kprintf("boundary check failed\n");
402 if (verify)
403 print_backtrace(-1); /* XXX panic */
404 maxsize = dmat->maxsize;
405 }
406 if (vaddr & (dmat->alignment - 1)) {
407 kprintf("alignment check failed\n");
408 if (verify)
409 print_backtrace(-1); /* XXX panic */
410 if (dmat->maxsize < dmat->alignment)
411 maxsize = dmat->alignment;
412 else
413 maxsize = dmat->maxsize;
414 }
415 return maxsize;
416 }
417
418 /*
419 * Allocate a piece of memory that can be efficiently mapped into
420 * bus device space based on the constraints lited in the dma tag.
421 *
422 * mapp is degenerate. By definition this allocation should not require
423 * bounce buffers so do not allocate a dma map.
424 */
425 int
bus_dmamem_alloc(bus_dma_tag_t dmat,void ** vaddr,int flags,bus_dmamap_t * mapp)426 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
427 bus_dmamap_t *mapp)
428 {
429 int mflags;
430
431 /* If we succeed, no mapping/bouncing will be required */
432 *mapp = NULL;
433
434 if (dmat->segments == NULL) {
435 KKASSERT(dmat->nsegments < 16384);
436 dmat->segments = kmalloc(sizeof(bus_dma_segment_t) *
437 dmat->nsegments, M_DEVBUF, M_INTWAIT);
438 }
439
440 if (flags & BUS_DMA_NOWAIT)
441 mflags = M_NOWAIT;
442 else
443 mflags = M_WAITOK;
444 if (flags & BUS_DMA_ZERO)
445 mflags |= M_ZERO;
446
447 if (BUS_DMAMEM_KMALLOC(dmat)) {
448 bus_size_t maxsize;
449
450 *vaddr = kmalloc(dmat->maxsize, M_DEVBUF, mflags);
451
452 /*
453 * XXX
454 * Check whether the allocation
455 * - crossed a page boundary
456 * - was not aligned
457 * Retry with power-of-2 alignment in the above cases.
458 */
459 maxsize = check_kmalloc(dmat, *vaddr, 0);
460 if (maxsize) {
461 size_t size;
462
463 kfree(*vaddr, M_DEVBUF);
464 /* XXX check for overflow? */
465 for (size = 1; size <= maxsize; size <<= 1)
466 ;
467 *vaddr = kmalloc(size, M_DEVBUF, mflags);
468 check_kmalloc(dmat, *vaddr, 1);
469 }
470 } else {
471 /*
472 * XXX Use Contigmalloc until it is merged into this facility
473 * and handles multi-seg allocations. Nobody is doing
474 * multi-seg allocations yet though.
475 */
476 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
477 0ul, dmat->lowaddr, dmat->alignment, dmat->boundary);
478 }
479 if (*vaddr == NULL)
480 return (ENOMEM);
481 return (0);
482 }
483
484 /*
485 * Free a piece of memory and it's allociated dmamap, that was allocated
486 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
487 */
488 void
bus_dmamem_free(bus_dma_tag_t dmat,void * vaddr,bus_dmamap_t map)489 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
490 {
491 /*
492 * dmamem does not need to be bounced, so the map should be
493 * NULL
494 */
495 if (map != NULL)
496 panic("bus_dmamem_free: Invalid map freed");
497 if (BUS_DMAMEM_KMALLOC(dmat))
498 kfree(vaddr, M_DEVBUF);
499 else
500 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
501 }
502
503 static __inline vm_paddr_t
_bus_dma_extract(pmap_t pmap,vm_offset_t vaddr)504 _bus_dma_extract(pmap_t pmap, vm_offset_t vaddr)
505 {
506 if (pmap)
507 return pmap_extract(pmap, vaddr, NULL);
508 else
509 return pmap_kextract(vaddr);
510 }
511
512 /*
513 * Utility function to load a linear buffer. lastaddrp holds state
514 * between invocations (for multiple-buffer loads). segp contains
515 * the segment following the starting one on entrace, and the ending
516 * segment on exit. first indicates if this is the first invocation
517 * of this function.
518 */
519 static int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,bus_dmamap_t map,void * buf,bus_size_t buflen,bus_dma_segment_t * segments,int nsegments,pmap_t pmap,int flags,vm_paddr_t * lastpaddrp,int * segp,int first)520 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
521 bus_dmamap_t map,
522 void *buf, bus_size_t buflen,
523 bus_dma_segment_t *segments,
524 int nsegments,
525 pmap_t pmap,
526 int flags,
527 vm_paddr_t *lastpaddrp,
528 int *segp,
529 int first)
530 {
531 vm_offset_t vaddr;
532 vm_paddr_t paddr, nextpaddr;
533 bus_dma_segment_t *sg;
534 bus_addr_t bmask;
535 int seg, error = 0;
536
537 if (map == NULL)
538 map = &nobounce_dmamap;
539
540 #ifdef INVARIANTS
541 if (dmat->flags & BUS_DMA_ALIGNED)
542 KKASSERT(((uintptr_t)buf & (dmat->alignment - 1)) == 0);
543 #endif
544
545 /*
546 * If we are being called during a callback, pagesneeded will
547 * be non-zero, so we can avoid doing the work twice.
548 */
549 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) &&
550 map != &nobounce_dmamap && map->pagesneeded == 0) {
551 vm_offset_t vendaddr;
552
553 /*
554 * Count the number of bounce pages
555 * needed in order to complete this transfer
556 */
557 vaddr = (vm_offset_t)buf;
558 vendaddr = (vm_offset_t)buf + buflen;
559
560 while (vaddr < vendaddr) {
561 paddr = _bus_dma_extract(pmap, vaddr);
562 if (addr_needs_bounce(dmat, paddr))
563 map->pagesneeded++;
564 vaddr += (PAGE_SIZE - (vaddr & PAGE_MASK));
565 }
566 }
567
568 /* Reserve Necessary Bounce Pages */
569 if (map->pagesneeded != 0) {
570 struct bounce_zone *bz;
571
572 bz = dmat->bounce_zone;
573 BZ_LOCK(bz);
574 if (flags & BUS_DMA_NOWAIT) {
575 if (reserve_bounce_pages(dmat, map, 0) != 0) {
576 BZ_UNLOCK(bz);
577 error = ENOMEM;
578 goto free_bounce;
579 }
580 } else {
581 if (reserve_bounce_pages(dmat, map, 1) != 0) {
582 /* Queue us for resources */
583 map->dmat = dmat;
584 map->buf = buf;
585 map->buflen = buflen;
586
587 STAILQ_INSERT_TAIL(
588 &dmat->bounce_zone->bounce_map_waitinglist,
589 map, links);
590 BZ_UNLOCK(bz);
591
592 return (EINPROGRESS);
593 }
594 }
595 BZ_UNLOCK(bz);
596 }
597
598 KKASSERT(*segp >= 1 && *segp <= nsegments);
599 seg = *segp;
600 sg = &segments[seg - 1];
601
602 vaddr = (vm_offset_t)buf;
603 nextpaddr = *lastpaddrp;
604 bmask = ~(dmat->boundary - 1); /* note: will be 0 if boundary is 0 */
605
606 /* force at least one segment */
607 do {
608 bus_size_t size;
609
610 /*
611 * Per-page main loop
612 */
613 paddr = _bus_dma_extract(pmap, vaddr);
614 size = PAGE_SIZE - (paddr & PAGE_MASK);
615 if (size > buflen)
616 size = buflen;
617 if (map->pagesneeded != 0 && addr_needs_bounce(dmat, paddr)) {
618 /*
619 * note: this paddr has the same in-page offset
620 * as vaddr and thus the paddr above, so the
621 * size does not have to be recalculated
622 */
623 paddr = add_bounce_page(dmat, map, vaddr, size);
624 }
625
626 /*
627 * Fill in the bus_dma_segment
628 */
629 if (first) {
630 sg->ds_addr = paddr;
631 sg->ds_len = size;
632 first = 0;
633 } else if (paddr == nextpaddr) {
634 sg->ds_len += size;
635 } else {
636 sg++;
637 seg++;
638 if (seg > nsegments)
639 break;
640 sg->ds_addr = paddr;
641 sg->ds_len = size;
642 }
643 nextpaddr = paddr + size;
644
645 /*
646 * Handle maxsegsz and boundary issues with a nested loop
647 */
648 for (;;) {
649 bus_size_t tmpsize;
650
651 /*
652 * Limit to the boundary and maximum segment size
653 */
654 if (((nextpaddr - 1) ^ sg->ds_addr) & bmask) {
655 tmpsize = dmat->boundary -
656 (sg->ds_addr & ~bmask);
657 if (tmpsize > dmat->maxsegsz)
658 tmpsize = dmat->maxsegsz;
659 KKASSERT(tmpsize < sg->ds_len);
660 } else if (sg->ds_len > dmat->maxsegsz) {
661 tmpsize = dmat->maxsegsz;
662 } else {
663 break;
664 }
665
666 /*
667 * Futz, split the data into a new segment.
668 */
669 if (seg >= nsegments)
670 goto fail;
671 sg[1].ds_len = sg[0].ds_len - tmpsize;
672 sg[1].ds_addr = sg[0].ds_addr + tmpsize;
673 sg[0].ds_len = tmpsize;
674 sg++;
675 seg++;
676 }
677
678 /*
679 * Adjust for loop
680 */
681 buflen -= size;
682 vaddr += size;
683 } while (buflen > 0);
684 fail:
685 if (buflen != 0)
686 error = EFBIG;
687
688 *segp = seg;
689 *lastpaddrp = nextpaddr;
690
691 free_bounce:
692 if (error && (dmat->flags & BUS_DMA_COULD_BOUNCE) &&
693 map != &nobounce_dmamap) {
694 _bus_dmamap_unload(dmat, map);
695 return_bounce_pages(dmat, map);
696 }
697 return error;
698 }
699
700 /*
701 * Map the buffer buf into bus space using the dmamap map.
702 */
703 int
bus_dmamap_load(bus_dma_tag_t dmat,bus_dmamap_t map,void * buf,bus_size_t buflen,bus_dmamap_callback_t * callback,void * callback_arg,int flags)704 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
705 bus_size_t buflen, bus_dmamap_callback_t *callback,
706 void *callback_arg, int flags)
707 {
708 vm_paddr_t lastaddr = 0;
709 int error, nsegs = 1;
710
711 if (map != NULL) {
712 /*
713 * XXX
714 * Follow old semantics. Once all of the callers are fixed,
715 * we should get rid of these internal flag "adjustment".
716 */
717 flags &= ~BUS_DMA_NOWAIT;
718 flags |= BUS_DMA_WAITOK;
719
720 map->callback = callback;
721 map->callback_arg = callback_arg;
722 }
723
724 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen,
725 dmat->segments, dmat->nsegments,
726 NULL, flags, &lastaddr, &nsegs, 1);
727 if (error == EINPROGRESS)
728 return error;
729
730 callback(callback_arg, dmat->segments, nsegs, error);
731 return 0;
732 }
733
734 /*
735 * Like _bus_dmamap_load(), but for mbufs.
736 */
737 int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * m0,bus_dmamap_callback2_t * callback,void * callback_arg,int flags)738 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
739 struct mbuf *m0,
740 bus_dmamap_callback2_t *callback, void *callback_arg,
741 int flags)
742 {
743 int nsegs, error;
744
745 /*
746 * XXX
747 * Follow old semantics. Once all of the callers are fixed,
748 * we should get rid of these internal flag "adjustment".
749 */
750 flags &= ~BUS_DMA_WAITOK;
751 flags |= BUS_DMA_NOWAIT;
752
753 error = bus_dmamap_load_mbuf_segment(dmat, map, m0,
754 dmat->segments, dmat->nsegments, &nsegs, flags);
755 if (error) {
756 /* force "no valid mappings" in callback */
757 callback(callback_arg, dmat->segments, 0, 0, error);
758 } else {
759 callback(callback_arg, dmat->segments, nsegs,
760 m0->m_pkthdr.len, error);
761 }
762 return error;
763 }
764
765 int
bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * m0,bus_dma_segment_t * segs,int maxsegs,int * nsegs,int flags)766 bus_dmamap_load_mbuf_segment(bus_dma_tag_t dmat, bus_dmamap_t map,
767 struct mbuf *m0,
768 bus_dma_segment_t *segs, int maxsegs,
769 int *nsegs, int flags)
770 {
771 int error;
772
773 M_ASSERTPKTHDR(m0);
774
775 KASSERT(maxsegs >= 1, ("invalid maxsegs %d", maxsegs));
776 KASSERT(maxsegs <= dmat->nsegments,
777 ("%d too many segments, dmat only supports %d segments",
778 maxsegs, dmat->nsegments));
779 KASSERT(flags & BUS_DMA_NOWAIT,
780 ("only BUS_DMA_NOWAIT is supported"));
781
782 if (m0->m_pkthdr.len <= dmat->maxsize) {
783 int first = 1;
784 vm_paddr_t lastaddr = 0;
785 struct mbuf *m;
786
787 *nsegs = 1;
788 error = 0;
789 for (m = m0; m != NULL && error == 0; m = m->m_next) {
790 if (m->m_len == 0)
791 continue;
792
793 error = _bus_dmamap_load_buffer(dmat, map,
794 m->m_data, m->m_len,
795 segs, maxsegs,
796 NULL, flags, &lastaddr,
797 nsegs, first);
798 if (error == ENOMEM && !first) {
799 /*
800 * Out of bounce pages due to too many
801 * fragments in the mbuf chain; return
802 * EFBIG instead.
803 */
804 error = EFBIG;
805 }
806 first = 0;
807 }
808 #ifdef INVARIANTS
809 if (!error)
810 KKASSERT(*nsegs <= maxsegs && *nsegs >= 1);
811 #endif
812 } else {
813 *nsegs = 0;
814 error = EINVAL;
815 }
816 KKASSERT(error != EINPROGRESS);
817 return error;
818 }
819
820 /*
821 * Like _bus_dmamap_load(), but for uios.
822 */
823 int
bus_dmamap_load_uio(bus_dma_tag_t dmat,bus_dmamap_t map,struct uio * uio,bus_dmamap_callback2_t * callback,void * callback_arg,int flags)824 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
825 struct uio *uio,
826 bus_dmamap_callback2_t *callback, void *callback_arg,
827 int flags)
828 {
829 vm_paddr_t lastaddr;
830 int nsegs, error, first, i;
831 bus_size_t resid;
832 struct iovec *iov;
833 pmap_t pmap;
834
835 /*
836 * XXX
837 * Follow old semantics. Once all of the callers are fixed,
838 * we should get rid of these internal flag "adjustment".
839 */
840 flags &= ~BUS_DMA_WAITOK;
841 flags |= BUS_DMA_NOWAIT;
842
843 resid = (bus_size_t)uio->uio_resid;
844 iov = uio->uio_iov;
845
846 if (uio->uio_segflg == UIO_USERSPACE) {
847 struct thread *td;
848
849 td = uio->uio_td;
850 KASSERT(td != NULL && td->td_proc != NULL,
851 ("bus_dmamap_load_uio: USERSPACE but no proc"));
852 pmap = vmspace_pmap(td->td_proc->p_vmspace);
853 } else {
854 pmap = NULL;
855 }
856
857 error = 0;
858 nsegs = 1;
859 first = 1;
860 lastaddr = 0;
861 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
862 /*
863 * Now at the first iovec to load. Load each iovec
864 * until we have exhausted the residual count.
865 */
866 bus_size_t minlen =
867 resid < iov[i].iov_len ? resid : iov[i].iov_len;
868 caddr_t addr = (caddr_t) iov[i].iov_base;
869
870 error = _bus_dmamap_load_buffer(dmat, map, addr, minlen,
871 dmat->segments, dmat->nsegments,
872 pmap, flags, &lastaddr, &nsegs, first);
873 first = 0;
874
875 resid -= minlen;
876 }
877
878 if (error) {
879 /* force "no valid mappings" in callback */
880 callback(callback_arg, dmat->segments, 0, 0, error);
881 } else {
882 callback(callback_arg, dmat->segments, nsegs,
883 (bus_size_t)uio->uio_resid, error);
884 }
885 return error;
886 }
887
888 /*
889 * Release the mapping held by map.
890 */
891 void
_bus_dmamap_unload(bus_dma_tag_t dmat,bus_dmamap_t map)892 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
893 {
894 struct bounce_page *bpage;
895
896 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
897 STAILQ_REMOVE_HEAD(&map->bpages, links);
898 free_bounce_page(dmat, bpage);
899 }
900 }
901
902 void
_bus_dmamap_sync(bus_dma_tag_t dmat,bus_dmamap_t map,bus_dmasync_op_t op)903 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
904 {
905 struct bounce_page *bpage;
906
907 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
908 /*
909 * Handle data bouncing. We might also
910 * want to add support for invalidating
911 * the caches on broken hardware
912 */
913 switch (op) {
914 case BUS_DMASYNC_PREWRITE:
915 while (bpage != NULL) {
916 bcopy((void *)bpage->datavaddr,
917 (void *)bpage->vaddr,
918 bpage->datacount);
919 bpage = STAILQ_NEXT(bpage, links);
920 }
921 dmat->bounce_zone->total_bounced++;
922 break;
923
924 case BUS_DMASYNC_POSTREAD:
925 while (bpage != NULL) {
926 bcopy((void *)bpage->vaddr,
927 (void *)bpage->datavaddr,
928 bpage->datacount);
929 bpage = STAILQ_NEXT(bpage, links);
930 }
931 dmat->bounce_zone->total_bounced++;
932 break;
933
934 case BUS_DMASYNC_PREREAD:
935 case BUS_DMASYNC_POSTWRITE:
936 /* No-ops */
937 break;
938 }
939 }
940 }
941
942 static int
alloc_bounce_zone(bus_dma_tag_t dmat)943 alloc_bounce_zone(bus_dma_tag_t dmat)
944 {
945 struct bounce_zone *bz, *new_bz;
946
947 KASSERT(dmat->bounce_zone == NULL,
948 ("bounce zone was already assigned"));
949
950 new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO);
951
952 lwkt_gettoken(&bounce_zone_tok);
953
954 /* Check to see if we already have a suitable zone */
955 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
956 if (dmat->alignment <= bz->alignment &&
957 dmat->lowaddr >= bz->lowaddr) {
958 lwkt_reltoken(&bounce_zone_tok);
959
960 dmat->bounce_zone = bz;
961 kfree(new_bz, M_DEVBUF);
962 return 0;
963 }
964 }
965 bz = new_bz;
966
967 spin_init(&bz->spin, "allocbouncezone");
968 STAILQ_INIT(&bz->bounce_page_list);
969 STAILQ_INIT(&bz->bounce_map_waitinglist);
970 bz->free_bpages = 0;
971 bz->reserved_bpages = 0;
972 bz->active_bpages = 0;
973 bz->lowaddr = dmat->lowaddr;
974 bz->alignment = round_page(dmat->alignment);
975 ksnprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
976 busdma_zonecount++;
977 ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
978 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
979
980 lwkt_reltoken(&bounce_zone_tok);
981
982 dmat->bounce_zone = bz;
983
984 sysctl_ctx_init(&bz->sysctl_ctx);
985 bz->sysctl_tree = SYSCTL_ADD_NODE(&bz->sysctl_ctx,
986 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
987 CTLFLAG_RD, 0, "");
988 if (bz->sysctl_tree == NULL) {
989 sysctl_ctx_free(&bz->sysctl_ctx);
990 return 0; /* XXX error code? */
991 }
992
993 SYSCTL_ADD_INT(&bz->sysctl_ctx,
994 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
995 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
996 "Total bounce pages");
997 SYSCTL_ADD_INT(&bz->sysctl_ctx,
998 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
999 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1000 "Free bounce pages");
1001 SYSCTL_ADD_INT(&bz->sysctl_ctx,
1002 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1003 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1004 "Reserved bounce pages");
1005 SYSCTL_ADD_INT(&bz->sysctl_ctx,
1006 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1007 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1008 "Active bounce pages");
1009 SYSCTL_ADD_INT(&bz->sysctl_ctx,
1010 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1011 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1012 "Total bounce requests");
1013 SYSCTL_ADD_INT(&bz->sysctl_ctx,
1014 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1015 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1016 "Total bounce requests that were deferred");
1017 SYSCTL_ADD_INT(&bz->sysctl_ctx,
1018 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1019 "reserve_failed", CTLFLAG_RD, &bz->reserve_failed, 0,
1020 "Total bounce page reservations that were failed");
1021 SYSCTL_ADD_STRING(&bz->sysctl_ctx,
1022 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1023 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1024 SYSCTL_ADD_INT(&bz->sysctl_ctx,
1025 SYSCTL_CHILDREN(bz->sysctl_tree), OID_AUTO,
1026 "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1027
1028 return 0;
1029 }
1030
1031 static int
alloc_bounce_pages(bus_dma_tag_t dmat,u_int numpages,int flags)1032 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages, int flags)
1033 {
1034 struct bounce_zone *bz = dmat->bounce_zone;
1035 int count = 0, mflags;
1036
1037 if (flags & BUS_DMA_NOWAIT)
1038 mflags = M_NOWAIT;
1039 else
1040 mflags = M_WAITOK;
1041
1042 while (numpages > 0) {
1043 struct bounce_page *bpage;
1044
1045 bpage = kmalloc(sizeof(*bpage), M_DEVBUF, M_INTWAIT | M_ZERO);
1046
1047 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1048 mflags, 0ul,
1049 bz->lowaddr,
1050 bz->alignment, 0);
1051 if (bpage->vaddr == 0) {
1052 kfree(bpage, M_DEVBUF);
1053 break;
1054 }
1055 bpage->busaddr = pmap_kextract(bpage->vaddr);
1056
1057 BZ_LOCK(bz);
1058 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1059 total_bounce_pages++;
1060 bz->total_bpages++;
1061 bz->free_bpages++;
1062 BZ_UNLOCK(bz);
1063
1064 count++;
1065 numpages--;
1066 }
1067 return count;
1068 }
1069
1070 /* Assume caller holds bounce zone spinlock */
1071 static int
reserve_bounce_pages(bus_dma_tag_t dmat,bus_dmamap_t map,int commit)1072 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1073 {
1074 struct bounce_zone *bz = dmat->bounce_zone;
1075 int pages;
1076
1077 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1078 if (!commit && map->pagesneeded > (map->pagesreserved + pages)) {
1079 bz->reserve_failed++;
1080 return (map->pagesneeded - (map->pagesreserved + pages));
1081 }
1082
1083 bz->free_bpages -= pages;
1084
1085 bz->reserved_bpages += pages;
1086 KKASSERT(bz->reserved_bpages <= bz->total_bpages);
1087
1088 map->pagesreserved += pages;
1089 pages = map->pagesneeded - map->pagesreserved;
1090
1091 return pages;
1092 }
1093
1094 static void
return_bounce_pages(bus_dma_tag_t dmat,bus_dmamap_t map)1095 return_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
1096 {
1097 struct bounce_zone *bz = dmat->bounce_zone;
1098 int reserved = map->pagesreserved;
1099 bus_dmamap_t wait_map;
1100
1101 map->pagesreserved = 0;
1102 map->pagesneeded = 0;
1103
1104 if (reserved == 0)
1105 return;
1106
1107 BZ_LOCK(bz);
1108
1109 bz->free_bpages += reserved;
1110 KKASSERT(bz->free_bpages <= bz->total_bpages);
1111
1112 KKASSERT(bz->reserved_bpages >= reserved);
1113 bz->reserved_bpages -= reserved;
1114
1115 wait_map = get_map_waiting(dmat);
1116
1117 BZ_UNLOCK(bz);
1118
1119 if (wait_map != NULL)
1120 add_map_callback(map);
1121 }
1122
1123 static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat,bus_dmamap_t map,vm_offset_t vaddr,bus_size_t size)1124 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1125 bus_size_t size)
1126 {
1127 struct bounce_zone *bz = dmat->bounce_zone;
1128 struct bounce_page *bpage;
1129
1130 KASSERT(map->pagesneeded > 0, ("map doesn't need any pages"));
1131 map->pagesneeded--;
1132
1133 KASSERT(map->pagesreserved > 0, ("map doesn't reserve any pages"));
1134 map->pagesreserved--;
1135
1136 BZ_LOCK(bz);
1137
1138 bpage = STAILQ_FIRST(&bz->bounce_page_list);
1139 KASSERT(bpage != NULL, ("free page list is empty"));
1140 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1141
1142 KKASSERT(bz->reserved_bpages > 0);
1143 bz->reserved_bpages--;
1144
1145 bz->active_bpages++;
1146 KKASSERT(bz->active_bpages <= bz->total_bpages);
1147
1148 BZ_UNLOCK(bz);
1149
1150 bpage->datavaddr = vaddr;
1151 bpage->datacount = size;
1152 STAILQ_INSERT_TAIL(&map->bpages, bpage, links);
1153 return bpage->busaddr;
1154 }
1155
1156 static void
free_bounce_page(bus_dma_tag_t dmat,struct bounce_page * bpage)1157 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1158 {
1159 struct bounce_zone *bz = dmat->bounce_zone;
1160 bus_dmamap_t map;
1161
1162 bpage->datavaddr = 0;
1163 bpage->datacount = 0;
1164
1165 BZ_LOCK(bz);
1166
1167 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1168
1169 bz->free_bpages++;
1170 KKASSERT(bz->free_bpages <= bz->total_bpages);
1171
1172 KKASSERT(bz->active_bpages > 0);
1173 bz->active_bpages--;
1174
1175 map = get_map_waiting(dmat);
1176
1177 BZ_UNLOCK(bz);
1178
1179 if (map != NULL)
1180 add_map_callback(map);
1181 }
1182
1183 /* Assume caller holds bounce zone spinlock */
1184 static bus_dmamap_t
get_map_waiting(bus_dma_tag_t dmat)1185 get_map_waiting(bus_dma_tag_t dmat)
1186 {
1187 struct bounce_zone *bz = dmat->bounce_zone;
1188 bus_dmamap_t map;
1189
1190 map = STAILQ_FIRST(&bz->bounce_map_waitinglist);
1191 if (map != NULL) {
1192 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1193 STAILQ_REMOVE_HEAD(&bz->bounce_map_waitinglist, links);
1194 bz->total_deferred++;
1195 } else {
1196 map = NULL;
1197 }
1198 }
1199 return map;
1200 }
1201
1202 static void
add_map_callback(bus_dmamap_t map)1203 add_map_callback(bus_dmamap_t map)
1204 {
1205 #ifdef notyet
1206 /* XXX callbacklist is not MPSAFE */
1207 crit_enter();
1208 get_mplock();
1209 STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links);
1210 busdma_swi_pending = 1;
1211 setsoftvm();
1212 rel_mplock();
1213 crit_exit();
1214 #else
1215 panic("%s uncoded", __func__);
1216 #endif
1217 }
1218
1219 #ifdef notyet
1220 void
busdma_swi(void)1221 busdma_swi(void)
1222 {
1223 bus_dmamap_t map;
1224
1225 crit_enter();
1226 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1227 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1228 crit_exit();
1229 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1230 map->callback, map->callback_arg, /*flags*/0);
1231 crit_enter();
1232 }
1233 crit_exit();
1234 }
1235 #endif
1236