xref: /openbsd/sys/arch/loongson/loongson/bus_dma.c (revision 94483548)
1 /*	$OpenBSD: bus_dma.c,v 1.21 2019/12/20 13:34:41 visa Exp $ */
2 
3 /*
4  * Copyright (c) 2003-2004 Opsycon AB  (www.opsycon.se / www.opsycon.com)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 /*-
29  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
30  * All rights reserved.
31  *
32  * This code is derived from software contributed to The NetBSD Foundation
33  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
34  * NASA Ames Research Center.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
46  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
47  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
48  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
49  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
50  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
51  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
52  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
53  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
54  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
55  * POSSIBILITY OF SUCH DAMAGE.
56  */
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
60 #include <sys/proc.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 
64 #include <uvm/uvm_extern.h>
65 
66 #include <mips64/cache.h>
67 #include <machine/cpu.h>
68 #include <machine/autoconf.h>
69 
70 #include <machine/bus.h>
71 
72 /*
73  * Common function for DMA map creation.  May be called by bus-specific
74  * DMA map creation functions.
75  */
76 int
_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)77 _dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
78     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
79 {
80 	struct machine_bus_dmamap *map;
81 	void *mapstore;
82 	size_t mapsize;
83 
84 	/*
85 	 * Allocate and initialize the DMA map.  The end of the map
86 	 * is a variable-sized array of segments, so we allocate enough
87 	 * room for them in one shot.
88 	 *
89 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
90 	 * of ALLOCNOW notifies others that we've reserved these resources,
91 	 * and they are not to be freed.
92 	 *
93 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
94 	 * the (nsegments - 1).
95 	 */
96 	mapsize = sizeof(struct machine_bus_dmamap) +
97 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
98 	if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
99 	    (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
100 		return (ENOMEM);
101 
102 	map = (struct machine_bus_dmamap *)mapstore;
103 	map->_dm_size = size;
104 	map->_dm_segcnt = nsegments;
105 	map->_dm_maxsegsz = maxsegsz;
106 	map->_dm_boundary = boundary;
107 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
108 
109 	*dmamp = map;
110 	return (0);
111 }
112 
113 /*
114  * Common function for DMA map destruction.  May be called by bus-specific
115  * DMA map destruction functions.
116  */
117 void
_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)118 _dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
119 {
120 	size_t mapsize;
121 
122 	mapsize = sizeof(struct machine_bus_dmamap) +
123 	    (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
124 	free(map, M_DEVBUF, mapsize);
125 }
126 
127 /*
128  * Common function for loading a DMA map with a linear buffer.  May
129  * be called by bus-specific DMA map load functions.
130  */
131 int
_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)132 _dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen,
133     struct proc *p, int flags)
134 {
135 	paddr_t lastaddr;
136 	int seg, error;
137 
138 	/*
139 	 * Make sure that on error condition we return "no valid mappings".
140 	 */
141 	map->dm_nsegs = 0;
142 	map->dm_mapsize = 0;
143 
144 	if (buflen > map->_dm_size)
145 		return (EINVAL);
146 
147 	seg = 0;
148 	error = (*t->_dmamap_load_buffer)(t, map, buf, buflen, p, flags,
149 	    &lastaddr, &seg, 1);
150 	if (error == 0) {
151 		map->dm_nsegs = seg + 1;
152 		map->dm_mapsize = buflen;
153 	}
154 
155 	return (error);
156 }
157 
158 /*
159  * Like _bus_dmamap_load(), but for mbufs.
160  */
161 int
_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)162 _dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags)
163 {
164 	paddr_t lastaddr;
165 	int seg, error, first;
166 	struct mbuf *m;
167 
168 	/*
169 	 * Make sure that on error condition we return "no valid mappings".
170 	 */
171 	map->dm_nsegs = 0;
172 	map->dm_mapsize = 0;
173 
174 #ifdef DIAGNOSTIC
175 	if ((m0->m_flags & M_PKTHDR) == 0)
176 		panic("_dmamap_load_mbuf: no packet header");
177 #endif
178 
179 	if (m0->m_pkthdr.len > map->_dm_size)
180 		return (EINVAL);
181 
182 	first = 1;
183 	seg = 0;
184 	error = 0;
185 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
186 		if (m->m_len == 0)
187 			continue;
188 		error = (*t->_dmamap_load_buffer)(t, map, m->m_data, m->m_len,
189 		    NULL, flags, &lastaddr, &seg, first);
190 		first = 0;
191 	}
192 	if (error == 0) {
193 		map->dm_nsegs = seg + 1;
194 		map->dm_mapsize = m0->m_pkthdr.len;
195 	}
196 
197 	return (error);
198 }
199 
200 /*
201  * Like _dmamap_load(), but for uios.
202  */
203 int
_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)204 _dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
205 {
206 	paddr_t lastaddr;
207 	int seg, i, error, first;
208 	bus_size_t minlen, resid;
209 	struct proc *p = NULL;
210 	struct iovec *iov;
211 	void *addr;
212 
213 	/*
214 	 * Make sure that on error condition we return "no valid mappings".
215 	 */
216 	map->dm_nsegs = 0;
217 	map->dm_mapsize = 0;
218 
219 	resid = uio->uio_resid;
220 	iov = uio->uio_iov;
221 
222 	if (uio->uio_segflg == UIO_USERSPACE) {
223 		p = uio->uio_procp;
224 #ifdef DIAGNOSTIC
225 		if (p == NULL)
226 			panic("_dmamap_load_uio: USERSPACE but no proc");
227 #endif
228 	}
229 
230 	first = 1;
231 	seg = 0;
232 	error = 0;
233 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
234 		/*
235 		 * Now at the first iovec to load.  Load each iovec
236 		 * until we have exhausted the residual count.
237 		 */
238 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
239 		addr = (void *)iov[i].iov_base;
240 
241 		error = (*t->_dmamap_load_buffer)(t, map, addr, minlen,
242 		    p, flags, &lastaddr, &seg, first);
243 		first = 0;
244 
245 		resid -= minlen;
246 	}
247 	if (error == 0) {
248 		map->dm_nsegs = seg + 1;
249 		map->dm_mapsize = uio->uio_resid;
250 	}
251 
252 	return (error);
253 }
254 
255 /*
256  * Like _dmamap_load(), but for raw memory allocated with
257  * bus_dmamem_alloc().
258  */
259 int
_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)260 _dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
261     int nsegs, bus_size_t size, int flags)
262 {
263 	if (nsegs > map->_dm_segcnt || size > map->_dm_size)
264 		return (EINVAL);
265 
266 	/*
267 	 * Make sure we don't cross any boundaries.
268 	 */
269 	if (map->_dm_boundary) {
270 		bus_addr_t bmask = ~(map->_dm_boundary - 1);
271 		int i;
272 
273 		if (t->_dma_mask != 0)
274 			bmask &= t->_dma_mask;
275 		for (i = 0; i < nsegs; i++) {
276 			if (segs[i].ds_len > map->_dm_maxsegsz)
277 				return (EINVAL);
278 			if ((segs[i].ds_addr & bmask) !=
279 			    ((segs[i].ds_addr + segs[i].ds_len - 1) & bmask))
280 				return (EINVAL);
281 		}
282 	}
283 
284 	bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
285 	map->dm_nsegs = nsegs;
286 	map->dm_mapsize = size;
287 	return (0);
288 }
289 
290 /*
291  * Common function for unloading a DMA map.  May be called by
292  * bus-specific DMA map unload functions.
293  */
294 void
_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)295 _dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
296 {
297 	/*
298 	 * No resources to free; just mark the mappings as
299 	 * invalid.
300 	 */
301 	map->dm_nsegs = 0;
302 	map->dm_mapsize = 0;
303 }
304 
305 /*
306  * Common function for DMA map synchronization.  May be called
307  * by bus-specific DMA map synchronization functions.
308  */
309 void
_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t addr,bus_size_t size,int op)310 _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
311     bus_size_t size, int op)
312 {
313 	int nsegs;
314 	int curseg;
315 	int cacheop;
316 	struct cpu_info *ci = curcpu();
317 
318 	nsegs = map->dm_nsegs;
319 	curseg = 0;
320 
321 	while (size && nsegs) {
322 		vaddr_t vaddr;
323 		paddr_t paddr;
324 		bus_size_t ssize;
325 
326 		ssize = map->dm_segs[curseg].ds_len;
327 		paddr = map->dm_segs[curseg]._ds_paddr;
328 		vaddr = map->dm_segs[curseg]._ds_vaddr;
329 
330 		if (addr != 0) {
331 			if (addr >= ssize) {
332 				addr -= ssize;
333 				ssize = 0;
334 			} else {
335 				vaddr += addr;
336 				paddr += addr;
337 				ssize -= addr;
338 				addr = 0;
339 			}
340 		}
341 		if (ssize > size)
342 			ssize = size;
343 
344 		if (IS_XKPHYS(vaddr) && XKPHYS_TO_CCA(vaddr) == CCA_NC) {
345 			size -= ssize;
346 			ssize = 0;
347 		}
348 
349 		if (ssize != 0) {
350 			/*
351 			 * If only PREWRITE is requested, writeback.
352 			 * PREWRITE with PREREAD writebacks
353 			 * and invalidates (if noncoherent) *all* cache levels.
354 			 * Otherwise, just invalidate (if noncoherent).
355 			 */
356 			if (op & BUS_DMASYNC_PREWRITE) {
357 				if (op & BUS_DMASYNC_PREREAD)
358 					cacheop = CACHE_SYNC_X;
359 				else
360 					cacheop = CACHE_SYNC_W;
361 			} else {
362 				if (op & BUS_DMASYNC_PREREAD)
363 					cacheop = CACHE_SYNC_R;
364 				else if (op & BUS_DMASYNC_POSTREAD)
365 					cacheop = CACHE_SYNC_R;
366 				else
367 					cacheop = -1;
368 			}
369 
370 			if (cacheop >= 0)
371 				Mips_IOSyncDCache(ci, vaddr, ssize, cacheop);
372 			size -= ssize;
373 		}
374 		curseg++;
375 		nsegs--;
376 	}
377 
378 #ifdef DIAGNOSTIC
379 	if (size != 0) {
380 		panic("_dmamap_sync: ran off map!");
381 	}
382 #endif
383 }
384 
385 /*
386  * Common function for DMA-safe memory allocation.  May be called
387  * by bus-specific DMA memory allocation functions.
388  */
389 int
_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)390 _dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
391     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
392     int flags)
393 {
394 	return _dmamem_alloc_range(t, size, alignment, boundary,
395 	    segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1);
396 }
397 
398 /*
399  * Common function for freeing DMA-safe memory.  May be called by
400  * bus-specific DMA memory free functions.
401  */
402 void
_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)403 _dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
404 {
405 	vm_page_t m;
406 	bus_addr_t addr;
407 	struct pglist mlist;
408 	int curseg;
409 
410 	/*
411 	 * Build a list of pages to free back to the VM system.
412 	 */
413 	TAILQ_INIT(&mlist);
414 	for (curseg = 0; curseg < nsegs; curseg++) {
415 		for (addr = segs[curseg].ds_addr;
416 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
417 		    addr += PAGE_SIZE) {
418 			m = PHYS_TO_VM_PAGE((*t->_device_to_pa)(addr));
419 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
420 		}
421 	}
422 
423 	uvm_pglistfree(&mlist);
424 }
425 
426 /*
427  * Common function for mapping DMA-safe memory.  May be called by
428  * bus-specific DMA memory map functions.
429  */
430 int
_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)431 _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
432     caddr_t *kvap, int flags)
433 {
434 	vaddr_t va, sva;
435 	size_t ssize;
436 	paddr_t pa;
437 	bus_addr_t addr;
438 	int curseg, error, pmap_flags;
439 	const struct kmem_dyn_mode *kd;
440 
441 #ifdef CPU_LOONGSON3
442 	/*
443 	 * Loongson 3 caches are coherent.
444 	 */
445 	if (loongson_ver >= 0x3a)
446 		flags &= ~BUS_DMA_COHERENT;
447 #endif
448 
449 	if (nsegs == 1) {
450 		pa = (*t->_device_to_pa)(segs[0].ds_addr);
451 		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
452 			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
453 		else
454 			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
455 		return (0);
456 	}
457 
458 	size = round_page(size);
459 	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
460 	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
461 	if (va == 0)
462 		return (ENOMEM);
463 
464 	*kvap = (caddr_t)va;
465 
466 	sva = va;
467 	ssize = size;
468 	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
469 	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
470 		pmap_flags |= PMAP_NOCACHE;
471 	for (curseg = 0; curseg < nsegs; curseg++) {
472 		for (addr = segs[curseg].ds_addr;
473 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
474 		    addr += NBPG, va += NBPG, size -= NBPG) {
475 			if (size == 0)
476 				panic("_dmamem_map: size botch");
477 			pa = (*t->_device_to_pa)(addr);
478 			error = pmap_enter(pmap_kernel(), va, pa,
479 			    PROT_READ | PROT_WRITE,
480 			    PROT_READ | PROT_WRITE | pmap_flags);
481 			if (error) {
482 				pmap_update(pmap_kernel());
483 				km_free((void *)sva, ssize, &kv_any, &kp_none);
484 				return (error);
485 			}
486 
487 			/*
488 			 * This is redundant with what pmap_enter() did
489 			 * above, but will take care of forcing other
490 			 * mappings of the same page (if any) to be
491 			 * uncached.
492 			 * If there are no multiple mappings of that
493 			 * page, this amounts to a noop.
494 			 */
495 			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
496 				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
497 				    PGF_UNCACHED);
498 		}
499 		pmap_update(pmap_kernel());
500 	}
501 
502 	return (0);
503 }
504 
505 /*
506  * Common function for unmapping DMA-safe memory.  May be called by
507  * bus-specific DMA memory unmapping functions.
508  */
509 void
_dmamem_unmap(bus_dma_tag_t t,caddr_t kva,size_t size)510 _dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
511 {
512 	if (IS_XKPHYS((vaddr_t)kva))
513 		return;
514 
515 	km_free(kva, round_page(size), &kv_any, &kp_none);
516 }
517 
518 /*
519  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
520  * bus-specific DMA mmap(2)'ing functions.
521  */
522 paddr_t
_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)523 _dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
524     int prot, int flags)
525 {
526 	int i;
527 
528 	for (i = 0; i < nsegs; i++) {
529 #ifdef DIAGNOSTIC
530 		if (off & PGOFSET)
531 			panic("_dmamem_mmap: offset unaligned");
532 		if (segs[i].ds_addr & PGOFSET)
533 			panic("_dmamem_mmap: segment unaligned");
534 		if (segs[i].ds_len & PGOFSET)
535 			panic("_dmamem_mmap: segment size not multiple"
536 			    " of page size");
537 #endif
538 		if (off >= segs[i].ds_len) {
539 			off -= segs[i].ds_len;
540 			continue;
541 		}
542 
543 		return ((*t->_device_to_pa)(segs[i].ds_addr) + off);
544 	}
545 
546 	/* Page not found. */
547 	return (-1);
548 }
549 
550 /**********************************************************************
551  * DMA utility functions
552  **********************************************************************/
553 
554 /*
555  * Utility function to load a linear buffer.  lastaddrp holds state
556  * between invocations (for multiple-buffer loads).  segp contains
557  * the starting segment on entrance, and the ending segment on exit.
558  * first indicates if this is the first invocation of this function.
559  */
560 int
_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags,paddr_t * lastaddrp,int * segp,int first)561 _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
562     bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
563     int *segp, int first)
564 {
565 	bus_size_t sgsize;
566 	bus_addr_t lastaddr, baddr, bmask;
567 	paddr_t curaddr;
568 	vaddr_t vaddr = (vaddr_t)buf;
569 	int seg;
570 	pmap_t pmap;
571 
572 	if (p != NULL)
573 		pmap = p->p_vmspace->vm_map.pmap;
574 	else
575 		pmap = pmap_kernel();
576 
577 	lastaddr = *lastaddrp;
578 	bmask  = ~(map->_dm_boundary - 1);
579 	if (t->_dma_mask != 0)
580 		bmask &= t->_dma_mask;
581 
582 	for (seg = *segp; buflen > 0; ) {
583 		/*
584 		 * Get the physical address for this segment.
585 		 */
586 		if (pmap_extract(pmap, vaddr, &curaddr) == 0)
587 			panic("_dmapmap_load_buffer: pmap_extract(%p, %lx) "
588 			    "failed!", pmap, vaddr);
589 
590 		/*
591 		 * Compute the segment size, and adjust counts.
592 		 */
593 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
594 		if (buflen < sgsize)
595 			sgsize = buflen;
596 
597 		/*
598 		 * Make sure we don't cross any boundaries.
599 		 */
600 		if (map->_dm_boundary > 0) {
601 			baddr = ((bus_addr_t)curaddr + map->_dm_boundary) &
602 			    bmask;
603 			if (sgsize > (baddr - (bus_addr_t)curaddr))
604 				sgsize = (baddr - (bus_addr_t)curaddr);
605 		}
606 
607 		/*
608 		 * Insert chunk into a segment, coalescing with
609 		 * previous segment if possible.
610 		 */
611 		if (first) {
612 			map->dm_segs[seg].ds_addr =
613 			    (*t->_pa_to_device)(curaddr);
614 			map->dm_segs[seg].ds_len = sgsize;
615 			map->dm_segs[seg]._ds_paddr = curaddr;
616 			map->dm_segs[seg]._ds_vaddr = vaddr;
617 			first = 0;
618 		} else {
619 			if ((bus_addr_t)curaddr == lastaddr &&
620 			    (map->dm_segs[seg].ds_len + sgsize) <=
621 			     map->_dm_maxsegsz &&
622 			     (map->_dm_boundary == 0 ||
623 			     (map->dm_segs[seg].ds_addr & bmask) ==
624 			     ((bus_addr_t)curaddr & bmask)))
625 				map->dm_segs[seg].ds_len += sgsize;
626 			else {
627 				if (++seg >= map->_dm_segcnt)
628 					break;
629 				map->dm_segs[seg].ds_addr =
630 				    (*t->_pa_to_device)(curaddr);
631 				map->dm_segs[seg].ds_len = sgsize;
632 				map->dm_segs[seg]._ds_paddr = curaddr;
633 				map->dm_segs[seg]._ds_vaddr = vaddr;
634 			}
635 		}
636 
637 		lastaddr = (bus_addr_t)curaddr + sgsize;
638 		vaddr += sgsize;
639 		buflen -= sgsize;
640 	}
641 
642 	*segp = seg;
643 	*lastaddrp = lastaddr;
644 
645 	/*
646 	 * Did we fit?
647 	 */
648 	if (buflen != 0)
649 		return (EFBIG);		/* XXX better return value here? */
650 
651 	return (0);
652 }
653 
654 /*
655  * Allocate physical memory from the given physical address range.
656  * Called by DMA-safe memory allocation methods.
657  */
658 int
_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)659 _dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
660     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
661     int flags, paddr_t low, paddr_t high)
662 {
663 	paddr_t curaddr, lastaddr;
664 	vm_page_t m;
665 	struct pglist mlist;
666 	int curseg, error, plaflag;
667 
668 	/* Always round the size. */
669 	size = round_page(size);
670 
671 	/*
672 	 * Allocate pages from the VM system.
673 	 */
674 	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
675 	if (flags & BUS_DMA_ZERO)
676 		plaflag |= UVM_PLA_ZERO;
677 
678 	TAILQ_INIT(&mlist);
679 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
680 	    &mlist, nsegs, plaflag);
681 	if (error)
682 		return (error);
683 
684 	/*
685 	 * Compute the location, size, and number of segments actually
686 	 * returned by the VM code.
687 	 */
688 	m = TAILQ_FIRST(&mlist);
689 	curseg = 0;
690 	lastaddr = segs[curseg].ds_addr =
691 	    (*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
692 	segs[curseg].ds_len = PAGE_SIZE;
693 	m = TAILQ_NEXT(m, pageq);
694 
695 	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
696 		curaddr = VM_PAGE_TO_PHYS(m);
697 #ifdef DIAGNOSTIC
698 		if (curaddr < low || curaddr >= high) {
699 			printf("vm_page_alloc_memory returned non-sensical"
700 			    " address 0x%lx\n", curaddr);
701 			panic("_dmamem_alloc_range");
702 		}
703 #endif
704 		curaddr = (*t->_pa_to_device)(curaddr);
705 		if (curaddr == (lastaddr + PAGE_SIZE))
706 			segs[curseg].ds_len += PAGE_SIZE;
707 		else {
708 			curseg++;
709 			segs[curseg].ds_addr = curaddr;
710 			segs[curseg].ds_len = PAGE_SIZE;
711 		}
712 		lastaddr = curaddr;
713 	}
714 
715 	*rsegs = curseg + 1;
716 
717 	return (0);
718 }
719