xref: /netbsd/sys/arch/atari/atari/bus.c (revision c4a72b64)
1 /*	$NetBSD: bus.c,v 1.32 2002/06/02 14:44:36 drochner Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/extent.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/proc.h>
46 
47 #include <uvm/uvm_extern.h>
48 
49 #include <machine/cpu.h>
50 #include <m68k/cacheops.h>
51 #define	_ATARI_BUS_DMA_PRIVATE
52 #include <machine/bus.h>
53 
54 int  bus_dmamem_alloc_range __P((bus_dma_tag_t tag, bus_size_t size,
55 		bus_size_t alignment, bus_size_t boundary,
56 		bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
57 		paddr_t low, paddr_t high));
58 static int  _bus_dmamap_load_buffer __P((bus_dma_tag_t tag, bus_dmamap_t,
59 		void *, bus_size_t, struct proc *, int, paddr_t *,
60 		int *, int));
61 static int  bus_mem_add_mapping __P((bus_space_tag_t t, bus_addr_t bpa,
62 		bus_size_t size, int flags, bus_space_handle_t *bsph));
63 
64 extern struct extent *iomem_ex;
65 extern int iomem_malloc_safe;
66 
67 extern paddr_t avail_end;
68 
69 /*
70  * We need these for the early memory allocator. The idea is this:
71  * Allocate VA-space through ptextra (atari_init.c:startc()). When
72  * The VA & size of this space are known, call bootm_init().
73  * Until the VM-system is up, bus_mem_add_mapping() allocates it's virtual
74  * addresses from this extent-map.
75  *
76  * This allows for the console code to use the bus_space interface at a
77  * very early stage of the system configuration.
78  */
79 static pt_entry_t	*bootm_ptep;
80 static long		bootm_ex_storage[EXTENT_FIXED_STORAGE_SIZE(32) /
81 								sizeof(long)];
82 static struct extent	*bootm_ex;
83 
84 void bootm_init(vaddr_t, pt_entry_t *, u_long);
85 static vaddr_t	bootm_alloc(paddr_t pa, u_long size, int flags);
86 static int	bootm_free(vaddr_t va, u_long size);
87 
88 void
89 bootm_init(va, ptep, size)
90 vaddr_t		va;
91 pt_entry_t	*ptep;
92 u_long		size;
93 {
94 	bootm_ex = extent_create("bootmem", va, va + size, M_DEVBUF,
95 	    (caddr_t)bootm_ex_storage, sizeof(bootm_ex_storage),
96 	    EX_NOCOALESCE|EX_NOWAIT);
97 	bootm_ptep = ptep;
98 }
99 
100 vaddr_t
101 bootm_alloc(pa, size, flags)
102 paddr_t	pa;
103 u_long	size;
104 int	flags;
105 {
106 	pt_entry_t	*pg, *epg;
107 	pt_entry_t	pg_proto;
108 	vaddr_t		va, rva;
109 
110 	if (extent_alloc(bootm_ex, size, NBPG, 0, EX_NOWAIT, &rva)) {
111 		printf("bootm_alloc fails! Not enough fixed extents?\n");
112 		printf("Requested extent: pa=%lx, size=%lx\n",
113 						(u_long)pa, size);
114 		return 0;
115 	}
116 
117 	pg  = &bootm_ptep[btoc(rva - bootm_ex->ex_start)];
118 	epg = &pg[btoc(size)];
119 	va  = rva;
120 	pg_proto = pa | PG_RW | PG_V;
121 	if (!(flags & BUS_SPACE_MAP_CACHEABLE))
122 		pg_proto |= PG_CI;
123 	while(pg < epg) {
124 		*pg++     = pg_proto;
125 		pg_proto += NBPG;
126 #if defined(M68040) || defined(M68060)
127 		if (mmutype == MMU_68040) {
128 			DCFP(pa);
129 			pa += NBPG;
130 		}
131 #endif
132 		TBIS(va);
133 		va += NBPG;
134 	}
135 	return rva;
136 }
137 
138 int
139 bootm_free(va, size)
140 vaddr_t	va;
141 u_long	size;
142 {
143 	if ((va < bootm_ex->ex_start) || ((va + size) > bootm_ex->ex_end))
144 		return 0; /* Not for us! */
145 	extent_free(bootm_ex, va, size, EX_NOWAIT);
146 	return 1;
147 }
148 
149 int
150 bus_space_map(t, bpa, size, flags, mhp)
151 bus_space_tag_t		t;
152 bus_addr_t		bpa;
153 bus_size_t		size;
154 int			flags;
155 bus_space_handle_t	*mhp;
156 {
157 	int	error;
158 
159 	/*
160 	 * Before we go any further, let's make sure that this
161 	 * region is available.
162 	 */
163 	error = extent_alloc_region(iomem_ex, bpa + t->base, size,
164 			EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0));
165 
166 	if (error)
167 		return (error);
168 
169 	error = bus_mem_add_mapping(t, bpa, size, flags, mhp);
170 	if (error) {
171 		if (extent_free(iomem_ex, bpa + t->base, size, EX_NOWAIT |
172 				(iomem_malloc_safe ? EX_MALLOCOK : 0))) {
173 		    printf("bus_space_map: pa 0x%lx, size 0x%lx\n", bpa, size);
174 		    printf("bus_space_map: can't free region\n");
175 		}
176 	}
177 	return (error);
178 }
179 
180 int
181 bus_space_alloc(t, rstart, rend, size, alignment, boundary, flags, bpap, bshp)
182 	bus_space_tag_t t;
183 	bus_addr_t rstart, rend;
184 	bus_size_t size, alignment, boundary;
185 	int flags;
186 	bus_addr_t *bpap;
187 	bus_space_handle_t *bshp;
188 {
189 	u_long bpa;
190 	int error;
191 
192 #ifdef DIAGNOSTIC
193 	/*
194 	 * Sanity check the allocation against the extent's boundaries.
195 	 * XXX: Since we manage the whole of memory in a single map,
196 	 *      this is nonsense for now! Brace it DIAGNOSTIC....
197 	 */
198 	if ((rstart + t->base) < iomem_ex->ex_start
199 				|| (rend + t->base) > iomem_ex->ex_end)
200 		panic("bus_space_alloc: bad region start/end");
201 #endif /* DIAGNOSTIC */
202 
203 	/*
204 	 * Do the requested allocation.
205 	 */
206 	error = extent_alloc_subregion(iomem_ex, rstart + t->base,
207 	    rend + t->base, size, alignment, boundary,
208 	    EX_FAST | EX_NOWAIT | (iomem_malloc_safe ?  EX_MALLOCOK : 0),
209 	    &bpa);
210 
211 	if (error)
212 		return (error);
213 
214 	/*
215 	 * Map the bus physical address to a kernel virtual address.
216 	 */
217 	error = bus_mem_add_mapping(t, bpa, size, flags, bshp);
218 	if (error) {
219 		if (extent_free(iomem_ex, bpa, size, EX_NOWAIT |
220 		    (iomem_malloc_safe ? EX_MALLOCOK : 0))) {
221 			printf("bus_space_alloc: pa 0x%lx, size 0x%lx\n",
222 			    bpa, size);
223 			printf("bus_space_alloc: can't free region\n");
224 		}
225 	}
226 
227 	*bpap = bpa;
228 
229 	return (error);
230 }
231 
232 static int
233 bus_mem_add_mapping(t, bpa, size, flags, bshp)
234 bus_space_tag_t		t;
235 bus_addr_t		bpa;
236 bus_size_t		size;
237 int			flags;
238 bus_space_handle_t	*bshp;
239 {
240 	vaddr_t	va;
241 	paddr_t	pa, endpa;
242 
243 	pa    = m68k_trunc_page(bpa + t->base);
244 	endpa = m68k_round_page((bpa + t->base + size) - 1);
245 
246 #ifdef DIAGNOSTIC
247 	if (endpa <= pa)
248 		panic("bus_mem_add_mapping: overflow");
249 #endif
250 
251 	if (kernel_map == NULL) {
252 		/*
253 		 * The VM-system is not yet operational, allocate from
254 		 * a special pool.
255 		 */
256 		va = bootm_alloc(pa, endpa - pa, flags);
257 		if (va == 0)
258 			return (ENOMEM);
259 		*bshp = (caddr_t)(va + (bpa & PGOFSET));
260 		return (0);
261 	}
262 
263 	va = uvm_km_valloc(kernel_map, endpa - pa);
264 	if (va == 0)
265 		return (ENOMEM);
266 
267 	*bshp = (caddr_t)(va + (bpa & PGOFSET));
268 
269 	for(; pa < endpa; pa += NBPG, va += NBPG) {
270 		u_int	*ptep, npte;
271 
272 		pmap_enter(pmap_kernel(), (vaddr_t)va, pa,
273 				VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
274 
275 		ptep = kvtopte(va);
276 		npte = *ptep & ~PG_CMASK;
277 
278 		if (!(flags & BUS_SPACE_MAP_CACHEABLE))
279 			npte |= PG_CI;
280 		else if (mmutype == MMU_68040)
281 			npte |= PG_CCB;
282 
283 		*ptep = npte;
284 	}
285 	pmap_update(pmap_kernel());
286 	TBIAS();
287 	return (0);
288 }
289 
290 void
291 bus_space_unmap(t, bsh, size)
292 bus_space_tag_t		t;
293 bus_space_handle_t	bsh;
294 bus_size_t		size;
295 {
296 	vaddr_t	va, endva;
297 	paddr_t bpa;
298 
299 	va = m68k_trunc_page(bsh);
300 	endva = m68k_round_page((bsh + size) - 1);
301 #ifdef DIAGNOSTIC
302 	if (endva < va)
303 		panic("unmap_iospace: overflow");
304 #endif
305 
306 	(void) pmap_extract(pmap_kernel(), va, &bpa);
307 	bpa += ((u_long)bsh & PGOFSET);
308 
309 	/*
310 	 * Free the kernel virtual mapping.
311 	 */
312 	if (!bootm_free(va, endva - va))
313 		uvm_km_free(kernel_map, va, endva - va);
314 
315 	/*
316 	 * Mark as free in the extent map.
317 	 */
318 	if (extent_free(iomem_ex, bpa, size,
319 			EX_NOWAIT | (iomem_malloc_safe ? EX_MALLOCOK : 0))) {
320 		printf("bus_space_unmap: pa 0x%lx, size 0x%lx\n", bpa, size);
321 		printf("bus_space_unmap: can't free region\n");
322 	}
323 }
324 
325 /*
326  * Get a new handle for a subregion of an already-mapped area of bus space.
327  */
328 int bus_space_subregion(t, memh, off, sz, mhp)
329 bus_space_tag_t		t;
330 bus_space_handle_t	memh;
331 bus_size_t		off, sz;
332 bus_space_handle_t	*mhp;
333 {
334 	*mhp = memh + off;
335 	return 0;
336 }
337 
338 paddr_t
339 bus_space_mmap(t, addr, off, prot, flags)
340 	bus_space_tag_t t;
341 	bus_addr_t addr;
342 	off_t off;
343 	int prot;
344 	int flags;
345 {
346 
347 	/*
348 	 * "addr" is the base address of the device we're mapping.
349 	 * "off" is the offset into that device.
350 	 *
351 	 * Note we are called for each "page" in the device that
352 	 * the upper layers want to map.
353 	 */
354 	return (m68k_btop(addr + off));
355 }
356 
357 /*
358  * Common function for DMA map creation.  May be called by bus-specific
359  * DMA map creation functions.
360  */
361 int
362 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
363 	bus_dma_tag_t t;
364 	bus_size_t size;
365 	int nsegments;
366 	bus_size_t maxsegsz;
367 	bus_size_t boundary;
368 	int flags;
369 	bus_dmamap_t *dmamp;
370 {
371 	struct atari_bus_dmamap *map;
372 	void *mapstore;
373 	size_t mapsize;
374 
375 	/*
376 	 * Allocate and initialize the DMA map.  The end of the map
377 	 * is a variable-sized array of segments, so we allocate enough
378 	 * room for them in one shot.
379 	 *
380 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
381 	 * of ALLOCNOW notifies others that we've reserved these resources,
382 	 * and they are not to be freed.
383 	 *
384 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
385 	 * the (nsegments - 1).
386 	 */
387 	mapsize = sizeof(struct atari_bus_dmamap) +
388 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
389 	if ((mapstore = malloc(mapsize, M_DMAMAP,
390 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
391 		return (ENOMEM);
392 
393 	bzero(mapstore, mapsize);
394 	map = (struct atari_bus_dmamap *)mapstore;
395 	map->_dm_size = size;
396 	map->_dm_segcnt = nsegments;
397 	map->_dm_maxsegsz = maxsegsz;
398 	map->_dm_boundary = boundary;
399 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
400 	map->dm_mapsize = 0;		/* no valid mappings */
401 	map->dm_nsegs = 0;
402 
403 	*dmamp = map;
404 	return (0);
405 }
406 
407 /*
408  * Common function for DMA map destruction.  May be called by bus-specific
409  * DMA map destruction functions.
410  */
411 void
412 _bus_dmamap_destroy(t, map)
413 	bus_dma_tag_t t;
414 	bus_dmamap_t map;
415 {
416 
417 	free(map, M_DMAMAP);
418 }
419 
420 /*
421  * Common function for loading a DMA map with a linear buffer.  May
422  * be called by bus-specific DMA map load functions.
423  */
424 int
425 _bus_dmamap_load(t, map, buf, buflen, p, flags)
426 	bus_dma_tag_t t;
427 	bus_dmamap_t map;
428 	void *buf;
429 	bus_size_t buflen;
430 	struct proc *p;
431 	int flags;
432 {
433 	paddr_t lastaddr;
434 	int seg, error;
435 
436 	/*
437 	 * Make sure that on error condition we return "no valid mappings".
438 	 */
439 	map->dm_mapsize = 0;
440 	map->dm_nsegs = 0;
441 
442 	if (buflen > map->_dm_size)
443 		return (EINVAL);
444 
445 	seg = 0;
446 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
447 	    &lastaddr, &seg, 1);
448 	if (error == 0) {
449 		map->dm_mapsize = buflen;
450 		map->dm_nsegs = seg + 1;
451 	}
452 	return (error);
453 }
454 
455 /*
456  * Like _bus_dmamap_load(), but for mbufs.
457  */
458 int
459 _bus_dmamap_load_mbuf(t, map, m0, flags)
460 	bus_dma_tag_t t;
461 	bus_dmamap_t map;
462 	struct mbuf *m0;
463 	int flags;
464 {
465 	paddr_t lastaddr;
466 	int seg, error, first;
467 	struct mbuf *m;
468 
469 	/*
470 	 * Make sure that on error condition we return "no valid mappings."
471 	 */
472 	map->dm_mapsize = 0;
473 	map->dm_nsegs = 0;
474 
475 #ifdef DIAGNOSTIC
476 	if ((m0->m_flags & M_PKTHDR) == 0)
477 		panic("_bus_dmamap_load_mbuf: no packet header");
478 #endif
479 
480 	if (m0->m_pkthdr.len > map->_dm_size)
481 		return (EINVAL);
482 
483 	first = 1;
484 	seg = 0;
485 	error = 0;
486 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
487 		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
488 		    NULL, flags, &lastaddr, &seg, first);
489 		first = 0;
490 	}
491 	if (error == 0) {
492 		map->dm_mapsize = m0->m_pkthdr.len;
493 		map->dm_nsegs = seg + 1;
494 	}
495 	return (error);
496 }
497 
498 /*
499  * Like _bus_dmamap_load(), but for uios.
500  */
501 int
502 _bus_dmamap_load_uio(t, map, uio, flags)
503 	bus_dma_tag_t t;
504 	bus_dmamap_t map;
505 	struct uio *uio;
506 	int flags;
507 {
508 	paddr_t lastaddr;
509 	int seg, i, error, first;
510 	bus_size_t minlen, resid;
511 	struct proc *p = NULL;
512 	struct iovec *iov;
513 	caddr_t addr;
514 
515 	/*
516 	 * Make sure that on error condition we return "no valid mappings."
517 	 */
518 	map->dm_mapsize = 0;
519 	map->dm_nsegs = 0;
520 
521 	resid = uio->uio_resid;
522 	iov = uio->uio_iov;
523 
524 	if (uio->uio_segflg == UIO_USERSPACE) {
525 		p = uio->uio_procp;
526 #ifdef DIAGNOSTIC
527 		if (p == NULL)
528 			panic("_bus_dmamap_load_uio: USERSPACE but no proc");
529 #endif
530 	}
531 
532 	first = 1;
533 	seg = 0;
534 	error = 0;
535 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
536 		/*
537 		 * Now at the first iovec to load.  Load each iovec
538 		 * until we have exhausted the residual count.
539 		 */
540 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
541 		addr = (caddr_t)iov[i].iov_base;
542 
543 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
544 		    p, flags, &lastaddr, &seg, first);
545 		first = 0;
546 
547 		resid -= minlen;
548 	}
549 	if (error == 0) {
550 		map->dm_mapsize = uio->uio_resid;
551 		map->dm_nsegs = seg + 1;
552 	}
553 	return (error);
554 }
555 
556 /*
557  * Like _bus_dmamap_load(), but for raw memory allocated with
558  * bus_dmamem_alloc().
559  */
560 int
561 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
562 	bus_dma_tag_t t;
563 	bus_dmamap_t map;
564 	bus_dma_segment_t *segs;
565 	int nsegs;
566 	bus_size_t size;
567 	int flags;
568 {
569 
570 	panic("bus_dmamap_load_raw: not implemented");
571 }
572 
573 /*
574  * Common function for unloading a DMA map.  May be called by
575  * bus-specific DMA map unload functions.
576  */
577 void
578 _bus_dmamap_unload(t, map)
579 	bus_dma_tag_t t;
580 	bus_dmamap_t map;
581 {
582 
583 	/*
584 	 * No resources to free; just mark the mappings as
585 	 * invalid.
586 	 */
587 	map->dm_mapsize = 0;
588 	map->dm_nsegs = 0;
589 }
590 
591 /*
592  * Common function for DMA map synchronization.  May be called
593  * by bus-specific DMA map synchronization functions.
594  */
595 void
596 _bus_dmamap_sync(t, map, off, len, ops)
597 	bus_dma_tag_t t;
598 	bus_dmamap_t map;
599 	bus_addr_t off;
600 	bus_size_t len;
601 	int ops;
602 {
603 #if defined(M68040) || defined(M68060)
604 	int	i, pa_off, inc, seglen;
605 	u_long	pa, end_pa;
606 
607 	pa_off = t->_displacement;
608 
609 	/* Flush granularity */
610 	inc = (len > 1024) ? NBPG : 16;
611 
612 	for (i = 0; i < map->dm_nsegs && len > 0; i++) {
613 		if (map->dm_segs[i].ds_len <= off) {
614 			/* Segment irrelevant - before requested offset */
615 			off -= map->dm_segs[i].ds_len;
616 			continue;
617 		}
618 		seglen = map->dm_segs[i].ds_len - off;
619 		if (seglen > len)
620 			seglen = len;
621 		len -= seglen;
622 		pa = map->dm_segs[i].ds_addr + off - pa_off;
623 		end_pa = pa + seglen;
624 
625 		if (inc == 16) {
626 			pa &= ~15;
627 			while (pa < end_pa) {
628 				DCFL(pa);
629 				pa += 16;
630 			}
631 		} else {
632 			pa &= ~PGOFSET;
633 			while (pa < end_pa) {
634 				DCFP(pa);
635 				pa += NBPG;
636 			}
637 		}
638 	}
639 #endif
640 }
641 
642 /*
643  * Common function for DMA-safe memory allocation.  May be called
644  * by bus-specific DMA memory allocation functions.
645  */
646 int
647 bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
648 	bus_dma_tag_t t;
649 	bus_size_t size, alignment, boundary;
650 	bus_dma_segment_t *segs;
651 	int nsegs;
652 	int *rsegs;
653 	int flags;
654 {
655 
656 	return (bus_dmamem_alloc_range(t, size, alignment, boundary,
657 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
658 }
659 
660 /*
661  * Common function for freeing DMA-safe memory.  May be called by
662  * bus-specific DMA memory free functions.
663  */
664 void
665 bus_dmamem_free(t, segs, nsegs)
666 	bus_dma_tag_t t;
667 	bus_dma_segment_t *segs;
668 	int nsegs;
669 {
670 	struct vm_page *m;
671 	bus_addr_t addr, offset;
672 	struct pglist mlist;
673 	int curseg;
674 
675 	offset = t->_displacement;
676 
677 	/*
678 	 * Build a list of pages to free back to the VM system.
679 	 */
680 	TAILQ_INIT(&mlist);
681 	for (curseg = 0; curseg < nsegs; curseg++) {
682 		for (addr = segs[curseg].ds_addr;
683 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
684 		    addr += PAGE_SIZE) {
685 			m = PHYS_TO_VM_PAGE(addr - offset);
686 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
687 		}
688 	}
689 
690 	uvm_pglistfree(&mlist);
691 }
692 
693 /*
694  * Common function for mapping DMA-safe memory.  May be called by
695  * bus-specific DMA memory map functions.
696  */
697 int
698 bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
699 	bus_dma_tag_t t;
700 	bus_dma_segment_t *segs;
701 	int nsegs;
702 	size_t size;
703 	caddr_t *kvap;
704 	int flags;
705 {
706 	vaddr_t va;
707 	bus_addr_t addr, offset;
708 	int curseg;
709 
710 	offset = t->_displacement;
711 
712 	size = round_page(size);
713 
714 	va = uvm_km_valloc(kernel_map, size);
715 
716 	if (va == 0)
717 		return (ENOMEM);
718 
719 	*kvap = (caddr_t)va;
720 
721 	for (curseg = 0; curseg < nsegs; curseg++) {
722 		for (addr = segs[curseg].ds_addr;
723 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
724 		    addr += NBPG, va += NBPG, size -= NBPG) {
725 			if (size == 0)
726 				panic("_bus_dmamem_map: size botch");
727 			pmap_enter(pmap_kernel(), va, addr - offset,
728 			    VM_PROT_READ | VM_PROT_WRITE,
729 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
730 		}
731 	}
732 	pmap_update(pmap_kernel());
733 
734 	return (0);
735 }
736 
737 /*
738  * Common function for unmapping DMA-safe memory.  May be called by
739  * bus-specific DMA memory unmapping functions.
740  */
741 void
742 bus_dmamem_unmap(t, kva, size)
743 	bus_dma_tag_t t;
744 	caddr_t kva;
745 	size_t size;
746 {
747 
748 #ifdef DIAGNOSTIC
749 	if ((u_long)kva & PGOFSET)
750 		panic("_bus_dmamem_unmap");
751 #endif
752 
753 	size = round_page(size);
754 
755 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
756 }
757 
758 /*
759  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
760  * bus-specific DMA mmap(2)'ing functions.
761  */
762 paddr_t
763 bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
764 	bus_dma_tag_t t;
765 	bus_dma_segment_t *segs;
766 	int nsegs;
767 	off_t off;
768 	int prot, flags;
769 {
770 	int i, offset;
771 
772 	offset = t->_displacement;
773 
774 	for (i = 0; i < nsegs; i++) {
775 #ifdef DIAGNOSTIC
776 		if (off & PGOFSET)
777 			panic("_bus_dmamem_mmap: offset unaligned");
778 		if (segs[i].ds_addr & PGOFSET)
779 			panic("_bus_dmamem_mmap: segment unaligned");
780 		if (segs[i].ds_len & PGOFSET)
781 			panic("_bus_dmamem_mmap: segment size not multiple"
782 			    " of page size");
783 #endif
784 		if (off >= segs[i].ds_len) {
785 			off -= segs[i].ds_len;
786 			continue;
787 		}
788 
789 		return (m68k_btop((caddr_t)segs[i].ds_addr - offset + off));
790 	}
791 
792 	/* Page not found. */
793 	return (-1);
794 }
795 
796 /**********************************************************************
797  * DMA utility functions
798  **********************************************************************/
799 
800 /*
801  * Utility function to load a linear buffer.  lastaddrp holds state
802  * between invocations (for multiple-buffer loads).  segp contains
803  * the starting segment on entrace, and the ending segment on exit.
804  * first indicates if this is the first invocation of this function.
805  */
806 static int
807 _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
808 	bus_dma_tag_t t;
809 	bus_dmamap_t map;
810 	void *buf;
811 	bus_size_t buflen;
812 	struct proc *p;
813 	int flags;
814 	paddr_t *lastaddrp;
815 	int *segp;
816 	int first;
817 {
818 	bus_size_t sgsize;
819 	bus_addr_t curaddr, lastaddr, offset, baddr, bmask;
820 	vaddr_t vaddr = (vaddr_t)buf;
821 	int seg;
822 	pmap_t pmap;
823 
824 	offset = t->_displacement;
825 
826 	if (p != NULL)
827 		pmap = p->p_vmspace->vm_map.pmap;
828 	else
829 		pmap = pmap_kernel();
830 
831 	lastaddr = *lastaddrp;
832 	bmask = ~(map->_dm_boundary - 1);
833 
834 	for (seg = *segp; buflen > 0 ; ) {
835 		/*
836 		 * Get the physical address for this segment.
837 		 */
838 		(void) pmap_extract(pmap, vaddr, &curaddr);
839 
840 		/*
841 		 * Compute the segment size, and adjust counts.
842 		 */
843 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
844 		if (buflen < sgsize)
845 			sgsize = buflen;
846 
847 		/*
848 		 * Make sure we don't cross any boundaries.
849 		 */
850 		if (map->_dm_boundary > 0) {
851 			baddr = (curaddr + map->_dm_boundary) & bmask;
852 			if (sgsize > (baddr - curaddr))
853 				sgsize = (baddr - curaddr);
854 		}
855 
856 		/*
857 		 * Insert chunk into a segment, coalescing with
858 		 * previous segment if possible.
859 		 */
860 		if (first) {
861 			map->dm_segs[seg].ds_addr = curaddr + offset;
862 			map->dm_segs[seg].ds_len = sgsize;
863 			first = 0;
864 		} else {
865 			if (curaddr == lastaddr &&
866 			    (map->dm_segs[seg].ds_len + sgsize) <=
867 			     map->_dm_maxsegsz &&
868 			    (map->_dm_boundary == 0 ||
869 			     (map->dm_segs[seg].ds_addr & bmask) ==
870 			     (curaddr & bmask)))
871 				map->dm_segs[seg].ds_len += sgsize;
872 			else {
873 				if (++seg >= map->_dm_segcnt)
874 					break;
875 				map->dm_segs[seg].ds_addr = curaddr + offset;
876 				map->dm_segs[seg].ds_len = sgsize;
877 			}
878 		}
879 
880 		lastaddr = curaddr + sgsize;
881 		vaddr += sgsize;
882 		buflen -= sgsize;
883 	}
884 
885 	*segp = seg;
886 	*lastaddrp = lastaddr;
887 
888 	/*
889 	 * Did we fit?
890 	 */
891 	if (buflen != 0)
892 		return (EFBIG);		/* XXX better return value here? */
893 	return (0);
894 }
895 
896 /*
897  * Allocate physical memory from the given physical address range.
898  * Called by DMA-safe memory allocation methods.
899  */
900 int
901 bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
902     flags, low, high)
903 	bus_dma_tag_t t;
904 	bus_size_t size, alignment, boundary;
905 	bus_dma_segment_t *segs;
906 	int nsegs;
907 	int *rsegs;
908 	int flags;
909 	paddr_t low;
910 	paddr_t high;
911 {
912 	paddr_t curaddr, lastaddr;
913 	bus_addr_t offset;
914 	struct vm_page *m;
915 	struct pglist mlist;
916 	int curseg, error;
917 
918 	offset = t->_displacement;
919 
920 	/* Always round the size. */
921 	size = round_page(size);
922 
923 	/*
924 	 * Allocate pages from the VM system.
925 	 */
926 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
927 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
928 	if (error)
929 		return (error);
930 
931 	/*
932 	 * Compute the location, size, and number of segments actually
933 	 * returned by the VM code.
934 	 */
935 	m = mlist.tqh_first;
936 	curseg = 0;
937 	lastaddr = VM_PAGE_TO_PHYS(m);
938 	segs[curseg].ds_addr = lastaddr + offset;
939 	segs[curseg].ds_len = PAGE_SIZE;
940 	m = m->pageq.tqe_next;
941 
942 	for (; m != NULL; m = m->pageq.tqe_next) {
943 		curaddr = VM_PAGE_TO_PHYS(m);
944 #ifdef DIAGNOSTIC
945 		if (curaddr < low || curaddr >= high) {
946 			printf("uvm_pglistalloc returned non-sensical"
947 			    " address 0x%lx\n", curaddr);
948 			panic("_bus_dmamem_alloc_range");
949 		}
950 #endif
951 		if (curaddr == (lastaddr + PAGE_SIZE))
952 			segs[curseg].ds_len += PAGE_SIZE;
953 		else {
954 			curseg++;
955 			segs[curseg].ds_addr = curaddr + offset;
956 			segs[curseg].ds_len = PAGE_SIZE;
957 		}
958 		lastaddr = curaddr;
959 	}
960 
961 	*rsegs = curseg + 1;
962 
963 	return (0);
964 }
965