xref: /netbsd/sys/arch/x68k/x68k/bus.c (revision bf9ec67e)
1 /*	$NetBSD: bus.c,v 1.18 2001/12/19 14:53:26 minoura Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * bus_space(9) and bus_dma(9) implementation for NetBSD/x68k.
41  * These are default implementations; some buses may use their own.
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/kernel.h>
49 #include <sys/conf.h>
50 #include <sys/device.h>
51 #include <sys/proc.h>
52 
53 #include <uvm/uvm_extern.h>
54 
55 #include <m68k/cacheops.h>
56 #include <machine/bus.h>
57 
58 #if defined(M68040) || defined(M68060)
59 static inline void dmasync_flush(bus_addr_t, bus_size_t);
60 static inline void dmasync_inval(bus_addr_t, bus_size_t);
61 #endif
62 
63 int
64 x68k_bus_space_alloc(t, rstart, rend, size, alignment, boundary, flags,
65     bpap, bshp)
66 	bus_space_tag_t t;
67 	bus_addr_t rstart, rend;
68 	bus_size_t size, alignment, boundary;
69 	int flags;
70 	bus_addr_t *bpap;
71 	bus_space_handle_t *bshp;
72 {
73 	return (EINVAL);
74 }
75 
76 void
77 x68k_bus_space_free(t, bsh, size)
78 	bus_space_tag_t t;
79 	bus_space_handle_t bsh;
80 	bus_size_t size;
81 {
82 	panic("bus_space_free: shouldn't be here");
83 }
84 
85 
86 extern paddr_t avail_end;
87 
88 /*
89  * Common function for DMA map creation.  May be called by bus-specific
90  * DMA map creation functions.
91  */
92 int
93 x68k_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
94 	bus_dma_tag_t t;
95 	bus_size_t size;
96 	int nsegments;
97 	bus_size_t maxsegsz;
98 	bus_size_t boundary;
99 	int flags;
100 	bus_dmamap_t *dmamp;
101 {
102 	struct x68k_bus_dmamap *map;
103 	void *mapstore;
104 	size_t mapsize;
105 
106 	/*
107 	 * Allocate and initialize the DMA map.  The end of the map
108 	 * is a variable-sized array of segments, so we allocate enough
109 	 * room for them in one shot.
110 	 *
111 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
112 	 * of ALLOCNOW notifies others that we've reserved these resources,
113 	 * and they are not to be freed.
114 	 *
115 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
116 	 * the (nsegments - 1).
117 	 */
118 	mapsize = sizeof(struct x68k_bus_dmamap) +
119 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
120 	if ((mapstore = malloc(mapsize, M_DMAMAP,
121 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
122 		return (ENOMEM);
123 
124 	memset(mapstore, 0, mapsize);
125 	map = (struct x68k_bus_dmamap *)mapstore;
126 	map->x68k_dm_size = size;
127 	map->x68k_dm_segcnt = nsegments;
128 	map->x68k_dm_maxsegsz = maxsegsz;
129 	map->x68k_dm_boundary = boundary;
130 	map->x68k_dm_bounce_thresh = t->_bounce_thresh;
131 	map->x68k_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
132 	map->dm_mapsize = 0;		/* no valid mappings */
133 	map->dm_nsegs = 0;
134 
135 	*dmamp = map;
136 	return (0);
137 }
138 
139 /*
140  * Common function for DMA map destruction.  May be called by bus-specific
141  * DMA map destruction functions.
142  */
143 void
144 x68k_bus_dmamap_destroy(t, map)
145 	bus_dma_tag_t t;
146 	bus_dmamap_t map;
147 {
148 
149 	free(map, M_DMAMAP);
150 }
151 
152 /*
153  * Common function for loading a DMA map with a linear buffer.  May
154  * be called by bus-specific DMA map load functions.
155  */
156 int
157 x68k_bus_dmamap_load(t, map, buf, buflen, p, flags)
158 	bus_dma_tag_t t;
159 	bus_dmamap_t map;
160 	void *buf;
161 	bus_size_t buflen;
162 	struct proc *p;
163 	int flags;
164 {
165 	paddr_t lastaddr;
166 	int seg, error;
167 
168 	/*
169 	 * Make sure that on error condition we return "no valid mappings".
170 	 */
171 	map->dm_mapsize = 0;
172 	map->dm_nsegs = 0;
173 
174 	if (buflen > map->x68k_dm_size)
175 		return (EINVAL);
176 
177 	seg = 0;
178 	error = x68k_bus_dmamap_load_buffer(map, buf, buflen, p, flags,
179 	    &lastaddr, &seg, 1);
180 	if (error == 0) {
181 		map->dm_mapsize = buflen;
182 		map->dm_nsegs = seg + 1;
183 	}
184 	return (error);
185 }
186 
187 /*
188  * Like x68k_bus_dmamap_load(), but for mbufs.
189  */
190 int
191 x68k_bus_dmamap_load_mbuf(t, map, m0, flags)
192 	bus_dma_tag_t t;
193 	bus_dmamap_t map;
194 	struct mbuf *m0;
195 	int flags;
196 {
197 	paddr_t lastaddr;
198 	int seg, error, first;
199 	struct mbuf *m;
200 
201 	/*
202 	 * Make sure that on error condition we return "no valid mappings."
203 	 */
204 	map->dm_mapsize = 0;
205 	map->dm_nsegs = 0;
206 
207 #ifdef DIAGNOSTIC
208 	if ((m0->m_flags & M_PKTHDR) == 0)
209 		panic("x68k_bus_dmamap_load_mbuf: no packet header");
210 #endif
211 
212 	if (m0->m_pkthdr.len > map->x68k_dm_size)
213 		return (EINVAL);
214 
215 	first = 1;
216 	seg = 0;
217 	error = 0;
218 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
219 		error = x68k_bus_dmamap_load_buffer(map, m->m_data, m->m_len,
220 		    NULL, flags, &lastaddr, &seg, first);
221 		first = 0;
222 	}
223 	if (error == 0) {
224 		map->dm_mapsize = m0->m_pkthdr.len;
225 		map->dm_nsegs = seg + 1;
226 	}
227 	return (error);
228 }
229 
230 /*
231  * Like x68k_bus_dmamap_load(), but for uios.
232  */
233 int
234 x68k_bus_dmamap_load_uio(t, map, uio, flags)
235 	bus_dma_tag_t t;
236 	bus_dmamap_t map;
237 	struct uio *uio;
238 	int flags;
239 {
240 #if 0
241 	paddr_t lastaddr;
242 	int seg, i, error, first;
243 	bus_size_t minlen, resid;
244 	struct proc *p = NULL;
245 	struct iovec *iov;
246 	caddr_t addr;
247 
248 	/*
249 	 * Make sure that on error condition we return "no valid mappings."
250 	 */
251 	map->dm_mapsize = 0;
252 	map->dm_nsegs = 0;
253 
254 	resid = uio->uio_resid;
255 	iov = uio->uio_iov;
256 
257 	if (uio->uio_segflg == UIO_USERSPACE) {
258 		p = uio->uio_procp;
259 #ifdef DIAGNOSTIC
260 		if (p == NULL)
261 			panic("_bus_dmamap_load_uio: USERSPACE but no proc");
262 #endif
263 	}
264 
265 	first = 1;
266 	seg = 0;
267 	error = 0;
268 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
269 		/*
270 		 * Now at the first iovec to load.  Load each iovec
271 		 * until we have exhausted the residual count.
272 		 */
273 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
274 		addr = (caddr_t)iov[i].iov_base;
275 
276 		error = x68k_bus_dmamap_load_buffer(map, addr, minlen,
277 		    p, flags, &lastaddr, &seg, first);
278 		first = 0;
279 
280 		resid -= minlen;
281 	}
282 	if (error == 0) {
283 		map->dm_mapsize = uio->uio_resid;
284 		map->dm_nsegs = seg + 1;
285 	}
286 	return (error);
287 #else
288 	panic ("x68k_bus_dmamap_load_uio: not implemented");
289 #endif
290 }
291 
292 /*
293  * Like x68k_bus_dmamap_load(), but for raw memory allocated with
294  * bus_dmamem_alloc().
295  */
296 int
297 x68k_bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
298 	bus_dma_tag_t t;
299 	bus_dmamap_t map;
300 	bus_dma_segment_t *segs;
301 	int nsegs;
302 	bus_size_t size;
303 	int flags;
304 {
305 
306 	panic("x68k_bus_dmamap_load_raw: not implemented");
307 }
308 
309 /*
310  * Common function for unloading a DMA map.  May be called by
311  * bus-specific DMA map unload functions.
312  */
313 void
314 x68k_bus_dmamap_unload(t, map)
315 	bus_dma_tag_t t;
316 	bus_dmamap_t map;
317 {
318 
319 	/*
320 	 * No resources to free; just mark the mappings as
321 	 * invalid.
322 	 */
323 	map->dm_mapsize = 0;
324 	map->dm_nsegs = 0;
325 }
326 
327 #if defined(M68040) || defined(M68060)
328 static inline void
329 dmasync_flush(bus_addr_t addr, bus_size_t len)
330 {
331 	bus_addr_t end = addr+len;
332 
333 	if (len <= 1024) {
334 		addr = addr & ~0xF;
335 
336 		do {
337 			DCFL(addr);
338 			addr += 16;
339 		} while (addr < end);
340 	} else {
341 		addr = m68k_trunc_page(addr);
342 
343 		do {
344 			DCFP(addr);
345 			addr += NBPG;
346 		} while (addr < end);
347 	}
348 }
349 
350 static inline void
351 dmasync_inval(bus_addr_t addr, bus_size_t len)
352 {
353 	bus_addr_t end = addr+len;
354 
355 	if (len <= 1024) {
356 		addr = addr & ~0xF;
357 
358 		do {
359 			DCFL(addr);
360 			ICPL(addr);
361 			addr += 16;
362 		} while (addr < end);
363 	} else {
364 		addr = m68k_trunc_page(addr);
365 
366 		do {
367 			DCPL(addr);
368 			ICPP(addr);
369 			addr += NBPG;
370 		} while (addr < end);
371 	}
372 }
373 #endif
374 
375 /*
376  * Common function for DMA map synchronization.  May be called
377  * by bus-specific DMA map synchronization functions.
378  */
379 void
380 x68k_bus_dmamap_sync(t, map, offset, len, ops)
381 	bus_dma_tag_t t;
382 	bus_dmamap_t map;
383 	bus_addr_t offset;
384 	bus_size_t len;
385 	int ops;
386 {
387 #if defined(M68040) || defined(M68060)
388 	bus_dma_segment_t *ds = map->dm_segs;
389 	bus_addr_t seg;
390 	int i;
391 
392 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE)) == 0)
393 		return;
394 #if defined(M68020) || defined(M68030)
395 	if (mmutype != MMU_68040) {
396 		if ((ops & BUS_DMASYNC_POSTWRITE) == 0)
397 			return;	/* no copyback cache */
398 		ICIA();		/* no per-page/per-line control */
399 		DCIA();
400 		return;
401 	}
402 #endif
403 	if (offset >= map->dm_mapsize)
404 		return;	/* driver bug; warn it? */
405 	if (offset+len > map->dm_mapsize)
406 		len = map->dm_mapsize; /* driver bug; warn it? */
407 
408 	i = 0;
409 	while (ds[i].ds_len <= offset) {
410 		offset -= ds[i++].ds_len;
411 		continue;
412 	}
413 	while (len > 0) {
414 		seg = ds[i].ds_len - offset;
415 		if (seg > len)
416 			seg = len;
417 		if (mmutype == MMU_68040 && (ops & BUS_DMASYNC_PREWRITE))
418 			dmasync_flush(ds[i].ds_addr+offset, seg);
419 		if (ops & BUS_DMASYNC_POSTREAD)
420 			dmasync_inval(ds[i].ds_addr+offset, seg);
421 		offset = 0;
422 		len -= seg;
423 		i++;
424 	}
425 #else  /* no 040/060 */
426 	if ((ops & BUS_DMASYNC_POSTWRITE)) {
427 		ICIA();		/* no per-page/per-line control */
428 		DCIA();
429 	}
430 #endif
431 }
432 
433 /*
434  * Common function for DMA-safe memory allocation.  May be called
435  * by bus-specific DMA memory allocation functions.
436  */
437 int
438 x68k_bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
439 	bus_dma_tag_t t;
440 	bus_size_t size, alignment, boundary;
441 	bus_dma_segment_t *segs;
442 	int nsegs;
443 	int *rsegs;
444 	int flags;
445 {
446 
447 	return (x68k_bus_dmamem_alloc_range(t, size, alignment, boundary,
448 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
449 }
450 
451 /*
452  * Common function for freeing DMA-safe memory.  May be called by
453  * bus-specific DMA memory free functions.
454  */
455 void
456 x68k_bus_dmamem_free(t, segs, nsegs)
457 	bus_dma_tag_t t;
458 	bus_dma_segment_t *segs;
459 	int nsegs;
460 {
461 	struct vm_page *m;
462 	bus_addr_t addr;
463 	struct pglist mlist;
464 	int curseg;
465 
466 	/*
467 	 * Build a list of pages to free back to the VM system.
468 	 */
469 	TAILQ_INIT(&mlist);
470 	for (curseg = 0; curseg < nsegs; curseg++) {
471 		for (addr = segs[curseg].ds_addr;
472 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
473 		    addr += PAGE_SIZE) {
474 			m = PHYS_TO_VM_PAGE(addr);
475 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
476 		}
477 	}
478 
479 	uvm_pglistfree(&mlist);
480 }
481 
482 /*
483  * Common function for mapping DMA-safe memory.  May be called by
484  * bus-specific DMA memory map functions.
485  */
486 int
487 x68k_bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
488 	bus_dma_tag_t t;
489 	bus_dma_segment_t *segs;
490 	int nsegs;
491 	size_t size;
492 	caddr_t *kvap;
493 	int flags;
494 {
495 	vaddr_t va;
496 	bus_addr_t addr;
497 	int curseg;
498 
499 	size = round_page(size);
500 
501 	va = uvm_km_valloc(kernel_map, size);
502 
503 	if (va == 0)
504 		return (ENOMEM);
505 
506 	*kvap = (caddr_t)va;
507 
508 	for (curseg = 0; curseg < nsegs; curseg++) {
509 		for (addr = segs[curseg].ds_addr;
510 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
511 		    addr += NBPG, va += NBPG, size -= NBPG) {
512 			if (size == 0)
513 				panic("x68k_bus_dmamem_map: size botch");
514 			pmap_enter(pmap_kernel(), va, addr,
515 			    VM_PROT_READ | VM_PROT_WRITE,
516 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
517 		}
518 	}
519 	pmap_update(pmap_kernel());
520 
521 	return (0);
522 }
523 
524 /*
525  * Common function for unmapping DMA-safe memory.  May be called by
526  * bus-specific DMA memory unmapping functions.
527  */
528 void
529 x68k_bus_dmamem_unmap(t, kva, size)
530 	bus_dma_tag_t t;
531 	caddr_t kva;
532 	size_t size;
533 {
534 #ifdef DIAGNOSTIC
535 	if (m68k_page_offset(kva))
536 		panic("x68k_bus_dmamem_unmap");
537 #endif
538 
539 	size = round_page(size);
540 
541 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
542 }
543 
544 /*
545  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
546  * bus-specific DMA mmap(2)'ing functions.
547  */
548 paddr_t
549 x68k_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
550 	bus_dma_tag_t t;
551 	bus_dma_segment_t *segs;
552 	int nsegs;
553 	off_t off;
554 	int prot, flags;
555 {
556 	int i;
557 
558 	for (i = 0; i < nsegs; i++) {
559 #ifdef DIAGNOSTIC
560 		if (m68k_page_offset(off))
561 			panic("x68k_bus_dmamem_mmap: offset unaligned");
562 		if (m68k_page_offset(segs[i].ds_addr))
563 			panic("x68k_bus_dmamem_mmap: segment unaligned");
564 		if (m68k_page_offset(segs[i].ds_len))
565 			panic("x68k_bus_dmamem_mmap: segment size not multiple"
566 			    " of page size");
567 #endif
568 		if (off >= segs[i].ds_len) {
569 			off -= segs[i].ds_len;
570 			continue;
571 		}
572 
573 		return (m68k_btop((caddr_t)segs[i].ds_addr + off));
574 	}
575 
576 	/* Page not found. */
577 	return (-1);
578 }
579 
580 
581 /**********************************************************************
582  * DMA utility functions
583  **********************************************************************/
584 
585 /*
586  * Utility function to load a linear buffer.  lastaddrp holds state
587  * between invocations (for multiple-buffer loads).  segp contains
588  * the starting segment on entrace, and the ending segment on exit.
589  * first indicates if this is the first invocation of this function.
590  */
591 int
592 x68k_bus_dmamap_load_buffer(map, buf, buflen, p, flags,
593     lastaddrp, segp, first)
594 	bus_dmamap_t map;
595 	void *buf;
596 	bus_size_t buflen;
597 	struct proc *p;
598 	int flags;
599 	paddr_t *lastaddrp;
600 	int *segp;
601 	int first;
602 {
603 	bus_size_t sgsize;
604 	bus_addr_t curaddr, lastaddr, baddr, bmask;
605 	vaddr_t vaddr = (vaddr_t)buf;
606 	int seg;
607 	pmap_t pmap;
608 
609 	if (p != NULL)
610 		pmap = p->p_vmspace->vm_map.pmap;
611 	else
612 		pmap = pmap_kernel();
613 
614 	lastaddr = *lastaddrp;
615 	bmask  = ~(map->x68k_dm_boundary - 1);
616 
617 	for (seg = *segp; buflen > 0 ; ) {
618 		/*
619 		 * Get the physical address for this segment.
620 		 */
621 		(void) pmap_extract(pmap, vaddr, &curaddr);
622 
623 		/*
624 		 * If we're beyond the bounce threshold, notify
625 		 * the caller.
626 		 */
627 		if (map->x68k_dm_bounce_thresh != 0 &&
628 		    curaddr >= map->x68k_dm_bounce_thresh)
629 			return (EINVAL);
630 
631 		/*
632 		 * Compute the segment size, and adjust counts.
633 		 */
634 		sgsize = NBPG - m68k_page_offset(vaddr);
635 		if (buflen < sgsize)
636 			sgsize = buflen;
637 
638 		/*
639 		 * Make sure we don't cross any boundaries.
640 		 */
641 		if (map->x68k_dm_boundary > 0) {
642 			baddr = (curaddr + map->x68k_dm_boundary) & bmask;
643 			if (sgsize > (baddr - curaddr))
644 				sgsize = (baddr - curaddr);
645 		}
646 
647 		/*
648 		 * Insert chunk into a segment, coalescing with
649 		 * previous segment if possible.
650 		 */
651 		if (first) {
652 			map->dm_segs[seg].ds_addr = curaddr;
653 			map->dm_segs[seg].ds_len = sgsize;
654 			first = 0;
655 		} else {
656 			if (curaddr == lastaddr &&
657 			    (map->dm_segs[seg].ds_len + sgsize) <=
658 			     map->x68k_dm_maxsegsz &&
659 			    (map->x68k_dm_boundary == 0 ||
660 			     (map->dm_segs[seg].ds_addr & bmask) ==
661 			     (curaddr & bmask)))
662 				map->dm_segs[seg].ds_len += sgsize;
663 			else {
664 				if (++seg >= map->x68k_dm_segcnt)
665 					break;
666 				map->dm_segs[seg].ds_addr = curaddr;
667 				map->dm_segs[seg].ds_len = sgsize;
668 			}
669 		}
670 
671 		lastaddr = curaddr + sgsize;
672 		vaddr += sgsize;
673 		buflen -= sgsize;
674 	}
675 
676 	*segp = seg;
677 	*lastaddrp = lastaddr;
678 
679 	/*
680 	 * Did we fit?
681 	 */
682 	if (buflen != 0)
683 		return (EFBIG);		/* XXX better return value here? */
684 	return (0);
685 }
686 
687 /*
688  * Allocate physical memory from the given physical address range.
689  * Called by DMA-safe memory allocation methods.
690  */
691 int
692 x68k_bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
693     flags, low, high)
694 	bus_dma_tag_t t;
695 	bus_size_t size, alignment, boundary;
696 	bus_dma_segment_t *segs;
697 	int nsegs;
698 	int *rsegs;
699 	int flags;
700 	paddr_t low;
701 	paddr_t high;
702 {
703 	paddr_t curaddr, lastaddr;
704 	struct vm_page *m;
705 	struct pglist mlist;
706 	int curseg, error;
707 
708 	/* Always round the size. */
709 	size = round_page(size);
710 
711 	/*
712 	 * Allocate pages from the VM system.
713 	 */
714 	TAILQ_INIT(&mlist);
715 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
716 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
717 	if (error)
718 		return (error);
719 
720 	/*
721 	 * Compute the location, size, and number of segments actually
722 	 * returned by the VM code.
723 	 */
724 	m = mlist.tqh_first;
725 	curseg = 0;
726 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
727 	segs[curseg].ds_len = PAGE_SIZE;
728 	m = m->pageq.tqe_next;
729 
730 	for (; m != NULL; m = m->pageq.tqe_next) {
731 		curaddr = VM_PAGE_TO_PHYS(m);
732 #ifdef DIAGNOSTIC
733 		if (curaddr < low || curaddr >= high) {
734 			printf("uvm_pglistalloc returned non-sensical"
735 			    " address 0x%lx\n", curaddr);
736 			panic("x68k_bus_dmamem_alloc_range");
737 		}
738 #endif
739 		if (curaddr == (lastaddr + PAGE_SIZE))
740 			segs[curseg].ds_len += PAGE_SIZE;
741 		else {
742 			curseg++;
743 			segs[curseg].ds_addr = curaddr;
744 			segs[curseg].ds_len = PAGE_SIZE;
745 		}
746 		lastaddr = curaddr;
747 	}
748 
749 	*rsegs = curseg + 1;
750 
751 	return (0);
752 }
753