xref: /netbsd/sys/arch/newsmips/newsmips/bus.c (revision 6550d01e)
1 /*	$NetBSD: bus.c,v 1.30 2009/12/14 00:46:09 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.30 2009/12/14 00:46:09 matt Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/mbuf.h>
43 
44 #define _NEWSMIPS_BUS_DMA_PRIVATE
45 #include <machine/bus.h>
46 #include <machine/cpu.h>
47 
48 #include <common/bus_dma/bus_dmamem_common.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #include <mips/cache.h>
53 
54 static int	_bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t,
55 				struct vmspace *, int, vaddr_t *, int *, int);
56 
57 struct newsmips_bus_dma_tag newsmips_default_bus_dma_tag = {
58 	_bus_dmamap_create,
59 	_bus_dmamap_destroy,
60 	_bus_dmamap_load,
61 	_bus_dmamap_load_mbuf,
62 	_bus_dmamap_load_uio,
63 	_bus_dmamap_load_raw,
64 	_bus_dmamap_unload,
65 	NULL,
66 	_bus_dmamem_alloc,
67 	_bus_dmamem_free,
68 	_bus_dmamem_map,
69 	_bus_dmamem_unmap,
70 	_bus_dmamem_mmap,
71 };
72 
73 void
74 newsmips_bus_dma_init(void)
75 {
76 
77 #ifdef MIPS1
78 	if (CPUISMIPS3 == 0)
79 		newsmips_default_bus_dma_tag._dmamap_sync =
80 		    _bus_dmamap_sync_r3k;
81 #endif
82 #ifdef MIPS3
83 	if (CPUISMIPS3)
84 		newsmips_default_bus_dma_tag._dmamap_sync =
85 		    _bus_dmamap_sync_r4k;
86 #endif
87 }
88 
89 int
90 bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags,
91     bus_space_handle_t *bshp)
92 {
93 	int cacheable = flags & BUS_SPACE_MAP_CACHEABLE;
94 
95 	if (cacheable)
96 		*bshp = MIPS_PHYS_TO_KSEG0(bpa);
97 	else
98 		*bshp = MIPS_PHYS_TO_KSEG1(bpa);
99 
100 	return 0;
101 }
102 
103 int
104 bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
105     bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags,
106     bus_addr_t *bpap, bus_space_handle_t *bshp)
107 {
108 
109 	panic("bus_space_alloc: not implemented");
110 }
111 
112 void
113 bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
114 {
115 
116 	panic("bus_space_free: not implemented");
117 }
118 
119 void
120 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
121 {
122 
123 	return;
124 }
125 
126 int
127 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
128     bus_size_t offset, bus_size_t size, bus_space_handle_t *nbshp)
129 {
130 
131 	*nbshp = bsh + offset;
132 	return 0;
133 }
134 
135 /*
136  * Common function for DMA map creation.  May be called by bus-specific
137  * DMA map creation functions.
138  */
139 int
140 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
141     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
142 {
143 	struct newsmips_bus_dmamap *map;
144 	void *mapstore;
145 	size_t mapsize;
146 
147 	/*
148 	 * Allocate and initialize the DMA map.  The end of the map
149 	 * is a variable-sized array of segments, so we allocate enough
150 	 * room for them in one shot.
151 	 *
152 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
153 	 * of ALLOCNOW notifies others that we've reserved these resources,
154 	 * and they are not to be freed.
155 	 *
156 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
157 	 * the (nsegments - 1).
158 	 */
159 	mapsize = sizeof(struct newsmips_bus_dmamap) +
160 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
161 	if ((mapstore = malloc(mapsize, M_DMAMAP,
162 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)|M_ZERO)) == NULL)
163 		return ENOMEM;
164 
165 	map = (struct newsmips_bus_dmamap *)mapstore;
166 	map->_dm_size = size;
167 	map->_dm_segcnt = nsegments;
168 	map->_dm_maxmaxsegsz = maxsegsz;
169 	map->_dm_boundary = boundary;
170 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
171 	map->_dm_vmspace = NULL;
172 	map->dm_maxsegsz = maxsegsz;
173 	map->dm_mapsize = 0;		/* no valid mappings */
174 	map->dm_nsegs = 0;
175 
176 	*dmamp = map;
177 	return 0;
178 }
179 
180 /*
181  * Common function for DMA map destruction.  May be called by bus-specific
182  * DMA map destruction functions.
183  */
184 void
185 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
186 {
187 
188 	free(map, M_DMAMAP);
189 }
190 
191 extern	paddr_t kvtophys(vaddr_t);		/* XXX */
192 
193 /*
194  * Utility function to load a linear buffer.  lastaddrp holds state
195  * between invocations (for multiple-buffer loads).  segp contains
196  * the starting segment on entrance, and the ending segment on exit.
197  * first indicates if this is the first invocation of this function.
198  */
199 int
200 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
201     struct vmspace *vm, int flags, vaddr_t *lastaddrp, int *segp, int first)
202 {
203 	bus_size_t sgsize;
204 	bus_addr_t curaddr, lastaddr, baddr, bmask;
205 	vaddr_t vaddr = (vaddr_t)buf;
206 	paddr_t pa;
207 	size_t seg;
208 
209 	lastaddr = *lastaddrp;
210 	bmask  = ~(map->_dm_boundary - 1);
211 
212 	for (seg = *segp; buflen > 0 ; ) {
213 		/*
214 		 * Get the physical address for this segment.
215 		 */
216 		if (!VMSPACE_IS_KERNEL_P(vm))
217 			(void) pmap_extract(vm_map_pmap(&vm->vm_map),
218 			    vaddr, &pa);
219 		else
220 			pa = kvtophys(vaddr);
221 		curaddr = pa;
222 
223 		/*
224 		 * Compute the segment size, and adjust counts.
225 		 */
226 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
227 		if (buflen < sgsize)
228 			sgsize = buflen;
229 
230 		/*
231 		 * Make sure we don't cross any boundaries.
232 		 */
233 		if (map->_dm_boundary > 0) {
234 			baddr = (curaddr + map->_dm_boundary) & bmask;
235 			if (sgsize > (baddr - curaddr))
236 				sgsize = (baddr - curaddr);
237 		}
238 
239 		/*
240 		 * Insert chunk into a segment, coalescing with
241 		 * the previous segment if possible.
242 		 */
243 		if (first) {
244 			map->dm_segs[seg].ds_addr = curaddr;
245 			map->dm_segs[seg].ds_len = sgsize;
246 			map->dm_segs[seg]._ds_vaddr = vaddr;
247 			first = 0;
248 		} else {
249 			if (curaddr == lastaddr &&
250 			    (map->dm_segs[seg].ds_len + sgsize) <=
251 			     map->dm_maxsegsz &&
252 			    (map->_dm_boundary == 0 ||
253 			     (map->dm_segs[seg].ds_addr & bmask) ==
254 			     (curaddr & bmask)))
255 				map->dm_segs[seg].ds_len += sgsize;
256 			else {
257 				if (++seg >= map->_dm_segcnt)
258 					break;
259 				map->dm_segs[seg].ds_addr = curaddr;
260 				map->dm_segs[seg].ds_len = sgsize;
261 				map->dm_segs[seg]._ds_vaddr = vaddr;
262 			}
263 		}
264 
265 		lastaddr = curaddr + sgsize;
266 		vaddr += sgsize;
267 		buflen -= sgsize;
268 	}
269 
270 	*segp = seg;
271 	*lastaddrp = lastaddr;
272 
273 	/*
274 	 * Did we fit?
275 	 */
276 	if (buflen != 0)
277 		return EFBIG;		/* XXX Better return value here? */
278 
279 	return 0;
280 }
281 
282 /*
283  * Common function for loading a direct-mapped DMA map with a linear
284  * buffer.
285  */
286 int
287 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
288     bus_size_t buflen, struct proc *p, int flags)
289 {
290 	vaddr_t lastaddr;
291 	int seg, error;
292 	struct vmspace *vm;
293 
294 	/*
295 	 * Make sure that on error condition we return "no valid mappings".
296 	 */
297 	map->dm_mapsize = 0;
298 	map->dm_nsegs = 0;
299 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
300 
301 	if (buflen > map->_dm_size)
302 		return EINVAL;
303 
304 	if (p != NULL) {
305 		vm = p->p_vmspace;
306 	} else {
307 		vm = vmspace_kernel();
308 	}
309 
310 	seg = 0;
311 	error = _bus_dmamap_load_buffer(map, buf, buflen,
312 	    vm, flags, &lastaddr, &seg, 1);
313 	if (error == 0) {
314 		map->dm_mapsize = buflen;
315 		map->dm_nsegs = seg + 1;
316 		map->_dm_vmspace = vm;
317 
318 		/*
319 		 * For linear buffers, we support marking the mapping
320 		 * as COHERENT.
321 		 *
322 		 * XXX Check TLB entries for cache-inhibit bits?
323 		 */
324 		if (buf >= (void *)MIPS_KSEG1_START &&
325 		    buf < (void *)MIPS_KSEG2_START)
326 			map->_dm_flags |= NEWSMIPS_DMAMAP_COHERENT;
327 	}
328 	return error;
329 }
330 
331 /*
332  * Like _bus_dmamap_load(), but for mbufs.
333  */
334 int
335 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
336     int flags)
337 {
338 	vaddr_t lastaddr;
339 	int seg, error, first;
340 	struct mbuf *m;
341 
342 	/*
343 	 * Make sure that on error condition we return "no valid mappings."
344 	 */
345 	map->dm_mapsize = 0;
346 	map->dm_nsegs = 0;
347 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
348 
349 #ifdef DIAGNOSTIC
350 	if ((m0->m_flags & M_PKTHDR) == 0)
351 		panic("_bus_dmamap_load_mbuf: no packet header");
352 #endif
353 
354 	if (m0->m_pkthdr.len > map->_dm_size)
355 		return EINVAL;
356 
357 	first = 1;
358 	seg = 0;
359 	error = 0;
360 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
361 		if (m->m_len == 0)
362 			continue;
363 		error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len,
364 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
365 		first = 0;
366 	}
367 	if (error == 0) {
368 		map->dm_mapsize = m0->m_pkthdr.len;
369 		map->dm_nsegs = seg + 1;
370 		map->_dm_vmspace = vmspace_kernel();	/* always kernel */
371 	}
372 	return error;
373 }
374 
375 /*
376  * Like _bus_dmamap_load(), but for uios.
377  */
378 int
379 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
380     int flags)
381 {
382 	vaddr_t lastaddr;
383 	int seg, i, error, first;
384 	bus_size_t minlen, resid;
385 	struct iovec *iov;
386 	void *addr;
387 
388 	/*
389 	 * Make sure that on error condition we return "no valid mappings."
390 	 */
391 	map->dm_mapsize = 0;
392 	map->dm_nsegs = 0;
393 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
394 
395 	resid = uio->uio_resid;
396 	iov = uio->uio_iov;
397 
398 	first = 1;
399 	seg = 0;
400 	error = 0;
401 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
402 		/*
403 		 * Now at the first iovec to load.  Load each iovec
404 		 * until we have exhausted the residual count.
405 		 */
406 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
407 		addr = (void *)iov[i].iov_base;
408 
409 		error = _bus_dmamap_load_buffer(map, addr, minlen,
410 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
411 		first = 0;
412 
413 		resid -= minlen;
414 	}
415 	if (error == 0) {
416 		map->dm_mapsize = uio->uio_resid;
417 		map->dm_nsegs = seg + 1;
418 		map->_dm_vmspace = uio->uio_vmspace;
419 	}
420 	return error;
421 }
422 
423 /*
424  * Like _bus_dmamap_load(), but for raw memory.
425  */
426 int
427 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,    int nsegs, bus_size_t size, int flags)
428 {
429 
430 	panic("_bus_dmamap_load_raw: not implemented");
431 }
432 
433 /*
434  * Common function for unloading a DMA map.  May be called by
435  * chipset-specific DMA map unload functions.
436  */
437 void
438 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
439 {
440 
441 	/*
442 	 * No resources to free; just mark the mappings as
443 	 * invalid.
444 	 */
445 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
446 	map->dm_mapsize = 0;
447 	map->dm_nsegs = 0;
448 	map->_dm_flags &= ~NEWSMIPS_DMAMAP_COHERENT;
449 	map->_dm_vmspace = NULL;
450 }
451 
452 #ifdef MIPS1
453 /*
454  * Common function for DMA map synchronization.  May be called
455  * by chipset-specific DMA map synchronization functions.
456  *
457  * This is the R3000 version.
458  */
459 void
460 _bus_dmamap_sync_r3k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
461     bus_size_t len, int ops)
462 {
463 	bus_size_t minlen;
464 	bus_addr_t addr;
465 	int i;
466 
467 	/*
468 	 * Mixing PRE and POST operations is not allowed.
469 	 */
470 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
471 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
472 		panic("_bus_dmamap_sync_r3k: mix PRE and POST");
473 
474 #ifdef DIAGNOSTIC
475 	if (offset >= map->dm_mapsize)
476 		panic("_bus_dmamap_sync_r3k: bad offset %lu (map size is %lu)",
477 		      offset, map->dm_mapsize);
478 	if (len == 0 || (offset + len) > map->dm_mapsize)
479 		panic("_bus_dmamap_sync_r3k: bad length");
480 #endif
481 
482 	/*
483 	 * The R3000 cache is write-though.  Therefore, we only need
484 	 * to drain the write buffer on PREWRITE.  The cache is not
485 	 * coherent, however, so we need to invalidate the data cache
486 	 * on PREREAD (should we do it POSTREAD instead?).
487 	 *
488 	 * POSTWRITE (and POSTREAD, currently) are noops.
489 	 */
490 
491 	if (ops & BUS_DMASYNC_PREWRITE) {
492 		/*
493 		 * Flush the write buffer.
494 		 */
495 		wbflush();
496 	}
497 
498 	/*
499 	 * If we're not doing PREREAD, nothing more to do.
500 	 */
501 	if ((ops & BUS_DMASYNC_PREREAD) == 0)
502 		return;
503 
504 	/*
505 	 * No cache invlidation is necessary if the DMA map covers
506 	 * COHERENT DMA-safe memory (which is mapped un-cached).
507 	 */
508 	if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT)
509 		return;
510 
511 	/*
512 	 * If we are going to hit something as large or larger
513 	 * than the entire data cache, just nail the whole thing.
514 	 *
515 	 * NOTE: Even though this is `wbinv_all', since the cache is
516 	 * write-though, it just invalidates it.
517 	 */
518 	if (len >= mips_pdcache_size) {
519 		mips_dcache_wbinv_all();
520 		return;
521 	}
522 
523 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
524 		/* Find the beginning segment. */
525 		if (offset >= map->dm_segs[i].ds_len) {
526 			offset -= map->dm_segs[i].ds_len;
527 			continue;
528 		}
529 
530 		/*
531 		 * Now at the first segment to sync; nail
532 		 * each segment until we have exhausted the
533 		 * length.
534 		 */
535 		minlen = len < map->dm_segs[i].ds_len - offset ?
536 		    len : map->dm_segs[i].ds_len - offset;
537 
538 		addr = map->dm_segs[i].ds_addr;
539 
540 #ifdef BUS_DMA_DEBUG
541 		printf("bus_dmamap_sync_r3k: flushing segment %d "
542 		    "(0x%lx..0x%lx) ...", i, addr + offset,
543 		    addr + offset + minlen - 1);
544 #endif
545 		mips_dcache_inv_range(
546 		    MIPS_PHYS_TO_KSEG0(addr + offset), minlen);
547 #ifdef BUS_DMA_DEBUG
548 		printf("\n");
549 #endif
550 		offset = 0;
551 		len -= minlen;
552 	}
553 }
554 #endif /* MIPS1 */
555 
556 #ifdef MIPS3
557 /*
558  * Common function for DMA map synchronization.  May be called
559  * by chipset-specific DMA map synchronization functions.
560  *
561  * This is the R4000 version.
562  */
563 void
564 _bus_dmamap_sync_r4k(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
565     bus_size_t len, int ops)
566 {
567 	bus_size_t minlen;
568 	bus_addr_t addr;
569 	int i, useindex;
570 
571 	/*
572 	 * Mixing PRE and POST operations is not allowed.
573 	 */
574 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
575 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
576 		panic("_bus_dmamap_sync_r4k: mix PRE and POST");
577 
578 #ifdef DIAGNOSTIC
579 	if (offset >= map->dm_mapsize)
580 		panic("_bus_dmamap_sync_r4k: bad offset %lu (map size is %lu)",
581 		      offset, map->dm_mapsize);
582 	if (len == 0 || (offset + len) > map->dm_mapsize)
583 		panic("_bus_dmamap_sync_r4k: bad length");
584 #endif
585 
586 	/*
587 	 * The R4000 cache is virtually-indexed, write-back.  This means
588 	 * we need to do the following things:
589 	 *
590 	 *	PREREAD -- Invalidate D-cache.  Note we might have
591 	 *	to also write-back here if we have to use an Index
592 	 *	op, or if the buffer start/end is not cache-line aligned.
593 	 *
594 	 *	PREWRITE -- Write-back the D-cache.  If we have to use
595 	 *	an Index op, we also have to invalidate.  Note that if
596 	 *	we are doing PREREAD|PREWRITE, we can collapse everything
597 	 *	into a single op.
598 	 *
599 	 *	POSTREAD -- Nothing.
600 	 *
601 	 *	POSTWRITE -- Nothing.
602 	 */
603 
604 	/*
605 	 * Flush the write buffer.
606 	 * XXX Is this always necessary?
607 	 */
608 	wbflush();
609 
610 	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
611 	if (ops == 0)
612 		return;
613 
614 	/*
615 	 * If the mapping is of COHERENT DMA-safe memory, no cache
616 	 * flush is necessary.
617 	 */
618 	if (map->_dm_flags & NEWSMIPS_DMAMAP_COHERENT)
619 		return;
620 
621 	/*
622 	 * If the mapping belongs to the kernel, or if it belongs
623 	 * to the currently-running process (XXX actually, vmspace),
624 	 * then we can use Hit ops.  Otherwise, Index ops.
625 	 *
626 	 * This should be true the vast majority of the time.
627 	 */
628 	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
629 	    map->_dm_vmspace == curproc->p_vmspace))
630 		useindex = 0;
631 	else
632 		useindex = 1;
633 
634 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
635 		/* Find the beginning segment. */
636 		if (offset >= map->dm_segs[i].ds_len) {
637 			offset -= map->dm_segs[i].ds_len;
638 			continue;
639 		}
640 
641 		/*
642 		 * Now at the first segment to sync; nail
643 		 * each segment until we have exhausted the
644 		 * length.
645 		 */
646 		minlen = len < map->dm_segs[i].ds_len - offset ?
647 		    len : map->dm_segs[i].ds_len - offset;
648 
649 		addr = map->dm_segs[i]._ds_vaddr;
650 
651 #ifdef BUS_DMA_DEBUG
652 		printf("bus_dmamap_sync: flushing segment %d "
653 		    "(0x%lx..0x%lx) ...", i, addr + offset,
654 		    addr + offset + minlen - 1);
655 #endif
656 
657 		/*
658 		 * If we are forced to use Index ops, it's always a
659 		 * Write-back,Invalidate, so just do one test.
660 		 */
661 		if (__predict_false(useindex)) {
662 			mips_dcache_wbinv_range_index(addr + offset, minlen);
663 #ifdef BUS_DMA_DEBUG
664 			printf("\n");
665 #endif
666 			offset = 0;
667 			len -= minlen;
668 			continue;
669 		}
670 
671 		switch (ops) {
672 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
673 			mips_dcache_wbinv_range(addr + offset, minlen);
674 			break;
675 
676 		case BUS_DMASYNC_PREREAD:
677 #if 1
678 			mips_dcache_wbinv_range(addr + offset, minlen);
679 #else
680 			mips_dcache_inv_range(addr + offset, minlen);
681 #endif
682 			break;
683 
684 		case BUS_DMASYNC_PREWRITE:
685 			mips_dcache_wb_range(addr + offset, minlen);
686 			break;
687 		}
688 #ifdef BUS_DMA_DEBUG
689 		printf("\n");
690 #endif
691 		offset = 0;
692 		len -= minlen;
693 	}
694 }
695 #endif /* MIPS3 */
696 
697 /*
698  * Common function for DMA-safe memory allocation.  May be called
699  * by bus-specific DMA memory allocation functions.
700  */
701 int
702 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
703     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
704     int flags)
705 {
706 	extern paddr_t avail_start, avail_end;
707 
708 	return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary,
709 					       segs, nsegs, rsegs, flags,
710 					       avail_start /*low*/,
711 					       avail_end - PAGE_SIZE /*high*/));
712 }
713 
714 /*
715  * Common function for freeing DMA-safe memory.  May be called by
716  * bus-specific DMA memory free functions.
717  */
718 void
719 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
720 {
721 
722 	_bus_dmamem_free_common(t, segs, nsegs);
723 }
724 
725 /*
726  * Common function for mapping DMA-safe memory.  May be called by
727  * bus-specific DMA memory map functions.
728  */
729 int
730 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
731     size_t size, void **kvap, int flags)
732 {
733 
734 	/*
735 	 * If we're only mapping 1 segment, and the address is lower than
736 	 * 256MB, use KSEG0 or KSEG1, to avoid TLB thrashing.
737 	 */
738 	if (nsegs == 1 && segs[0].ds_addr + segs[0].ds_len <= 0x10000000) {
739 		if (flags & BUS_DMA_COHERENT)
740 			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
741 		else
742 			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
743 		return 0;
744 	}
745 
746 	/* XXX BUS_DMA_COHERENT */
747 	return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0));
748 }
749 
750 /*
751  * Common function for unmapping DMA-safe memory.  May be called by
752  * bus-specific DMA memory unmapping functions.
753  */
754 void
755 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
756 {
757 
758 	/*
759 	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
760 	 * not in KSEG2).
761 	 */
762 	if (kva >= (void *)MIPS_KSEG0_START &&
763 	    kva < (void *)MIPS_KSEG2_START)
764 		return;
765 
766 	_bus_dmamem_unmap_common(t, kva, size);
767 }
768 
769 /*
770  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
771  * bus-specific DMA mmap(2)'ing functions.
772  */
773 paddr_t
774 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
775     off_t off, int prot, int flags)
776 {
777 	bus_addr_t rv;
778 
779 	rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags);
780 	if (rv == (bus_addr_t)-1)
781 		return (-1);
782 
783 	return (mips_btop((char *)rv));
784 }
785