xref: /netbsd/sys/arch/sgimips/sgimips/bus.c (revision 6550d01e)
1 /*	$NetBSD: bus.c,v 1.60 2010/07/06 20:50:35 cegger Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.60 2010/07/06 20:50:35 cegger Exp $");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/bswap.h>
40 #include <sys/kernel.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/mbuf.h>
45 
46 #define _SGIMIPS_BUS_DMA_PRIVATE
47 #include <machine/bus.h>
48 #include <machine/cpu.h>
49 #include <machine/machtype.h>
50 
51 #include <common/bus_dma/bus_dmamem_common.h>
52 
53 #include <uvm/uvm_extern.h>
54 
55 #include <mips/cpuregs.h>
56 #include <mips/locore.h>
57 #include <mips/cache.h>
58 
59 #include <sgimips/mace/macereg.h>
60 
61 #include "opt_sgimace.h"
62 
63 static int	_bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t,
64 				struct vmspace *, int, vaddr_t *, int *, int);
65 
66 struct sgimips_bus_dma_tag sgimips_default_bus_dma_tag = {
67 	_bus_dmamap_create,
68 	_bus_dmamap_destroy,
69 	_bus_dmamap_load,
70 	_bus_dmamap_load_mbuf,
71 	_bus_dmamap_load_uio,
72 	_bus_dmamap_load_raw,
73 	_bus_dmamap_unload,
74 	NULL,
75 	_bus_dmamem_alloc,
76 	_bus_dmamem_free,
77 	_bus_dmamem_map,
78 	_bus_dmamem_unmap,
79 	_bus_dmamem_mmap,
80 };
81 
82 void
83 sgimips_bus_dma_init(void)
84 {
85 	switch (mach_type) {
86 	/* R2000/R3000 */
87 	case MACH_SGI_IP6 | MACH_SGI_IP10:
88 	case MACH_SGI_IP12:
89 		sgimips_default_bus_dma_tag._dmamap_sync =
90 		    _bus_dmamap_sync_mips1;
91 		break;
92 
93 	/* >=R4000*/
94 	case MACH_SGI_IP20:
95 	case MACH_SGI_IP22:
96 	case MACH_SGI_IP30:
97 	case MACH_SGI_IP32:
98 		sgimips_default_bus_dma_tag._dmamap_sync =
99 		    _bus_dmamap_sync_mips3;
100 		break;
101 
102 	default:
103 		panic("sgimips_bus_dma_init: unsupported mach type IP%d\n",
104 		    mach_type);
105 	}
106 }
107 
108 u_int8_t
109 bus_space_read_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
110 {
111 	wbflush(); /* XXX ? */
112 
113 	switch (t) {
114 	case SGIMIPS_BUS_SPACE_NORMAL:
115 		return *(volatile u_int8_t *)(vaddr_t)(h + o);
116 	case SGIMIPS_BUS_SPACE_IP6_DPCLOCK:
117 		return *(volatile u_int8_t *)(vaddr_t)(h + (o << 2));
118 	case SGIMIPS_BUS_SPACE_HPC:
119 		return *(volatile u_int8_t *)(vaddr_t)(h + (o << 2) + 3);
120 	case SGIMIPS_BUS_SPACE_MEM:
121 	case SGIMIPS_BUS_SPACE_IO:
122 		return *(volatile u_int8_t *)(vaddr_t)(h + (o | 3) - (o & 3));
123 	case SGIMIPS_BUS_SPACE_MACE:
124 		return *(volatile u_int8_t *)(vaddr_t)(h + (o << 8) + 7);
125 	default:
126 		panic("no bus tag");
127 	}
128 }
129 
130 void
131 bus_space_write_1(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o, u_int8_t v)
132 {
133 	switch (t) {
134 	case SGIMIPS_BUS_SPACE_NORMAL:
135 		*(volatile u_int8_t *)(vaddr_t)(h + o) = v;
136 		break;
137 	case SGIMIPS_BUS_SPACE_IP6_DPCLOCK:
138 		*(volatile u_int8_t *)(vaddr_t)(h + (o << 2)) = v;
139 		break;
140 	case SGIMIPS_BUS_SPACE_HPC:
141 		*(volatile u_int8_t *)(vaddr_t)(h + (o << 2) + 3) = v;
142 		break;
143 	case SGIMIPS_BUS_SPACE_MEM:
144 	case SGIMIPS_BUS_SPACE_IO:
145 		*(volatile u_int8_t *)(vaddr_t)(h + (o | 3) - (o & 3)) = v;
146 		break;
147 	case SGIMIPS_BUS_SPACE_MACE:
148 		*(volatile u_int8_t *)(vaddr_t)(h + (o << 8) + 7) = v;
149 		break;
150 	default:
151 		panic("no bus tag");
152 	}
153 
154 	wbflush();	/* XXX */
155 }
156 
157 u_int16_t
158 bus_space_read_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o)
159 {
160 	wbflush(); /* XXX ? */
161 
162 	switch (t) {
163 	case SGIMIPS_BUS_SPACE_NORMAL:
164 		return *(volatile u_int16_t *)(vaddr_t)(h + o);
165 	case SGIMIPS_BUS_SPACE_HPC:
166 		return *(volatile u_int16_t *)(vaddr_t)(h + (o << 2) + 1);
167 	case SGIMIPS_BUS_SPACE_MEM:
168 	case SGIMIPS_BUS_SPACE_IO:
169 		return *(volatile u_int16_t *)(vaddr_t)(h + (o | 2) - (o & 3));
170 	default:
171 		panic("no bus tag");
172 	}
173 }
174 
175 void
176 bus_space_write_2(bus_space_tag_t t, bus_space_handle_t h, bus_size_t o, u_int16_t v)
177 {
178 	switch (t) {
179 	case SGIMIPS_BUS_SPACE_NORMAL:
180 		*(volatile u_int16_t *)(vaddr_t)(h + o) = v;
181 		break;
182 	case SGIMIPS_BUS_SPACE_HPC:
183 		*(volatile u_int16_t *)(vaddr_t)(h + (o << 2) + 1) = v;
184 		break;
185 	case SGIMIPS_BUS_SPACE_MEM:
186 	case SGIMIPS_BUS_SPACE_IO:
187 		*(volatile u_int16_t *)(vaddr_t)(h + (o | 2) - (o & 3)) = v;
188 		break;
189 	default:
190 		panic("no bus tag");
191 	}
192 
193 	wbflush();	/* XXX */
194 }
195 
196 u_int32_t
197 bus_space_read_4(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t o)
198 {
199 	u_int32_t reg;
200 #ifdef MACE_NEEDS_DELAYS
201 	int s;
202 #endif
203 
204 	switch (tag) {
205 		case SGIMIPS_BUS_SPACE_MACE:
206 #ifdef MACE_NEEDS_DELAYS
207 			s = splhigh();
208 			delay(10);
209 #endif
210 			wbflush();
211 			reg = (*(volatile u_int32_t *)(vaddr_t)(bsh + o));
212 #ifdef MACE_NEEDS_DELAYS
213 			delay(10);
214 			splx(s);
215 #endif
216 			break;
217 		default:
218 			wbflush();
219 			reg = (*(volatile u_int32_t *)(vaddr_t)(bsh + o));
220 			break;
221 	}
222 	return reg;
223 }
224 
225 
226 void
227 bus_space_write_4(bus_space_tag_t tag, bus_space_handle_t bsh,
228 	bus_size_t o, u_int32_t v)
229 {
230 #ifdef MACE_NEEDS_DELAYS
231 	int s;
232 #endif
233 
234 	switch (tag) {
235 		case SGIMIPS_BUS_SPACE_MACE:
236 #ifdef MACE_NEEDS_DELAYS
237 			s = splhigh();
238 			delay(10);
239 #endif
240 			*(volatile u_int32_t *)(vaddr_t)((bsh) + (o)) = (v);
241 			wbflush();
242 #ifdef MACE_NEEDS_DELAYS
243 			delay(10);
244 			splx(s);
245 #endif
246 			break;
247 		default:
248 			*(volatile u_int32_t *)(vaddr_t)((bsh) + (o)) = (v);
249 			wbflush(); /* XXX */
250 			break;
251 	}
252 }
253 
254 u_int16_t
255 bus_space_read_stream_2(bus_space_tag_t t, bus_space_handle_t h,
256 	bus_size_t o)
257 {
258 	u_int16_t v;
259 	wbflush(); /* XXX ? */
260 
261 	switch (t) {
262 	case SGIMIPS_BUS_SPACE_NORMAL:
263 		return *(volatile u_int16_t *)(vaddr_t)(h + o);
264 	case SGIMIPS_BUS_SPACE_HPC:
265 		return *(volatile u_int16_t *)(vaddr_t)(h + (o << 2) + 1);
266 	case SGIMIPS_BUS_SPACE_MEM:
267 	case SGIMIPS_BUS_SPACE_IO:
268 		v = *(volatile u_int16_t *)(vaddr_t)(h + (o | 2) - (o & 3));
269 		return htole16(v);
270 	default:
271 		panic("no bus tag");
272 	}
273 }
274 
275 u_int32_t
276 bus_space_read_stream_4(bus_space_tag_t t, bus_space_handle_t bsh,
277 	bus_size_t o)
278 {
279 	u_int32_t reg;
280 #ifdef MACE_NEEDS_DELAYS
281 	int s;
282 #endif
283 
284 	switch (t) {
285 		case SGIMIPS_BUS_SPACE_MACE:
286 #ifdef MACE_NEEDS_DELAYS
287 			s = splhigh();
288 			delay(10);
289 #endif
290 			wbflush();
291 			reg = (*(volatile u_int32_t *)(vaddr_t)(bsh + o));
292 #ifdef MACE_NEEDS_DELAYS
293 			delay(10);
294 			splx(s);
295 #endif
296 			break;
297 		case SGIMIPS_BUS_SPACE_MEM:
298 		case SGIMIPS_BUS_SPACE_IO:
299 			wbflush();
300 			reg = (*(volatile u_int32_t *)(vaddr_t)(bsh + o));
301 			reg = htole32(reg);
302 			break;
303 		default:
304 			wbflush();
305 			reg = (*(volatile u_int32_t *)(vaddr_t)(bsh + o));
306 			break;
307 	}
308 	return reg;
309 }
310 
311 void
312 bus_space_write_stream_2(bus_space_tag_t t, bus_space_handle_t h,
313 	bus_size_t o, u_int16_t v)
314 {
315 	switch (t) {
316 	case SGIMIPS_BUS_SPACE_NORMAL:
317 		*(volatile u_int16_t *)(vaddr_t)(h + o) = v;
318 		break;
319 	case SGIMIPS_BUS_SPACE_HPC:
320 		*(volatile u_int16_t *)(vaddr_t)(h + (o << 2) + 1) = v;
321 		break;
322 	case SGIMIPS_BUS_SPACE_MEM:
323 	case SGIMIPS_BUS_SPACE_IO:
324 		v = le16toh(v);
325 		*(volatile u_int16_t *)(vaddr_t)(h + (o | 2) - (o & 3)) = v;
326 		break;
327 	default:
328 		panic("no bus tag");
329 	}
330 
331 	wbflush();	/* XXX */
332 }
333 
334 void
335 bus_space_write_stream_4(bus_space_tag_t tag, bus_space_handle_t bsh,
336 	bus_size_t o, u_int32_t v)
337 {
338 #ifdef MACE_NEEDS_DELAYS
339 	int s;
340 #endif
341 
342 	switch (tag) {
343 		case SGIMIPS_BUS_SPACE_MACE:
344 #ifdef MACE_NEEDS_DELAYS
345 			s = splhigh();
346 			delay(10);
347 #endif
348 			*(volatile u_int32_t *)(vaddr_t)((bsh) + (o)) = (v);
349 			wbflush();
350 #ifdef MACE_NEEDS_DELAYS
351 			delay(10);
352 			splx(s);
353 #endif
354 			break;
355 		case SGIMIPS_BUS_SPACE_IO:
356 		case SGIMIPS_BUS_SPACE_MEM:
357 			v = le32toh(v);
358 			*(volatile u_int32_t *)(vaddr_t)((bsh) + (o)) = (v);
359 			wbflush(); /* XXX */
360 			break;
361 		default:
362 			*(volatile u_int32_t *)(vaddr_t)((bsh) + (o)) = (v);
363 			wbflush(); /* XXX */
364 			break;
365 	}
366 }
367 
368 #if defined(MIPS3)
369 u_int64_t
370 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t o)
371 {
372 	u_int64_t reg;
373 #ifdef MACE_NEEDS_DELAYS
374 	int s;
375 #endif
376 
377 	/* see if we're properly aligned */
378 	KASSERT((o & 7) == 0);
379 
380 	switch (tag) {
381 		case SGIMIPS_BUS_SPACE_MACE:
382 #ifdef MACE_NEEDS_DELAYS
383 			s = splhigh();
384 			delay(10);
385 #endif
386 			reg = mips3_ld((volatile uint64_t *)(vaddr_t)(bsh + o));
387 #ifdef MACE_NEEDS_DELAYS
388 			delay(10);
389 			splx(s);
390 #endif
391 			break;
392 		default:
393 			reg = mips3_ld((volatile uint64_t *)(vaddr_t)(bsh + o));
394 			break;
395 	}
396 	return reg;
397 }
398 
399 void
400 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t o, u_int64_t v)
401 {
402 #ifdef MACE_NEEDS_DELAYS
403 	int s;
404 #endif
405 
406 	/* see if we're properly aligned */
407 	KASSERT((o & 7) == 0);
408 
409 	switch (tag) {
410 		case SGIMIPS_BUS_SPACE_MACE:
411 #ifdef MACE_NEEDS_DELAYS
412 			s = splhigh();
413 			delay(10);
414 #endif
415 			mips3_sd((volatile uint64_t *)(vaddr_t)(bsh + o), v);
416 #ifdef MACE_NEEDS_DELAYS
417 			delay(10);
418 			splx(s);
419 #endif
420 			break;
421 		default:
422 			mips3_sd((volatile uint64_t *)(vaddr_t)(bsh + o), v);
423 			break;
424 	}
425 }
426 #endif /* MIPS3 */
427 
428 int
429 bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size,
430 	      int flags, bus_space_handle_t *bshp)
431 {
432 	int cacheable = flags & BUS_SPACE_MAP_CACHEABLE;
433 
434 	if (cacheable)
435 		*bshp = MIPS_PHYS_TO_KSEG0(bpa);
436 	else
437 		*bshp = MIPS_PHYS_TO_KSEG1(bpa);
438 
439 /*
440  * XXX
441  */
442 	/* XXX O2 */
443 	if (bpa > 0x80000000 && bpa < 0x82000000)
444 		*bshp = MIPS_PHYS_TO_KSEG1(MACE_PCI_LOW_MEMORY +
445 		    (bpa & 0xfffffff));
446 	if (bpa < 0x00010000)
447 		*bshp = MIPS_PHYS_TO_KSEG1(MACE_PCI_LOW_IO + bpa);
448 
449 	return 0;
450 }
451 
452 int
453 bus_space_alloc(bus_space_tag_t t, bus_addr_t rstart, bus_addr_t rend,
454 		bus_size_t size, bus_size_t alignment, bus_size_t boundary,
455 		int flags, bus_addr_t *bpap, bus_space_handle_t *bshp)
456 {
457 	panic("bus_space_alloc: not implemented");
458 }
459 
460 void
461 bus_space_free(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
462 {
463 	panic("bus_space_free: not implemented");
464 }
465 
466 void
467 bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
468 {
469 	return;
470 }
471 
472 int
473 bus_space_subregion(bus_space_tag_t t, bus_space_handle_t bsh,
474 		    bus_size_t offset, bus_size_t size,
475 		    bus_space_handle_t *nbshp)
476 {
477 
478 	*nbshp = bsh + offset;
479 	return 0;
480 }
481 
482 void *
483 bus_space_vaddr(bus_space_tag_t t, bus_space_handle_t bsh)
484 {
485 	switch(t) {
486 	case SGIMIPS_BUS_SPACE_NORMAL:
487 		return ((void *)bsh);
488 
489 	case SGIMIPS_BUS_SPACE_HPC:
490 		panic("bus_space_vaddr not supported on HPC space!");
491 
492 	case SGIMIPS_BUS_SPACE_MEM:
493 		return ((void *)bsh);
494 
495 	case SGIMIPS_BUS_SPACE_IO:
496 		panic("bus_space_vaddr not supported on I/O space!");
497 
498 	default:
499 		panic("no bus tag");
500 	}
501 }
502 
503 /*
504  * Common function for DMA map creation.  May be called by bus-specific
505  * DMA map creation functions.
506  */
507 int
508 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
509 		   bus_size_t maxsegsz, bus_size_t boundary, int flags,
510 		   bus_dmamap_t *dmamp)
511 {
512 	struct sgimips_bus_dmamap *map;
513 	void *mapstore;
514 	size_t mapsize;
515 
516 	/*
517 	 * Allocate and initialize the DMA map.  The end of the map
518 	 * is a variable-sized array of segments, so we allocate enough
519 	 * room for them in one shot.
520 	 *
521 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
522 	 * of ALLOCNOW notifies others that we've reserved these resources,
523 	 * and they are not to be freed.
524 	 *
525 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
526 	 * the (nsegments - 1).
527 	 */
528 	mapsize = sizeof(struct sgimips_bus_dmamap) +
529 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
530 	if ((mapstore = malloc(mapsize, M_DMAMAP,
531 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
532 		return ENOMEM;
533 
534 	memset(mapstore, 0, mapsize);
535 	map = (struct sgimips_bus_dmamap *)mapstore;
536 	map->_dm_size = size;
537 	map->_dm_segcnt = nsegments;
538 	map->_dm_maxmaxsegsz = maxsegsz;
539 	map->_dm_boundary = boundary;
540 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
541 	map->_dm_vmspace = NULL;
542 	map->dm_maxsegsz = maxsegsz;
543 	map->dm_mapsize = 0;		/* no valid mappings */
544 	map->dm_nsegs = 0;
545 
546 	*dmamp = map;
547 	return 0;
548 }
549 
550 /*
551  * Common function for DMA map destruction.  May be called by bus-specific
552  * DMA map destruction functions.
553  */
554 void
555 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
556 {
557 
558 	free(map, M_DMAMAP);
559 }
560 extern	paddr_t kvtophys(vaddr_t);		/* XXX */
561 
562 /*
563  * Utility function to load a linear buffer.  lastaddrp holds state
564  * between invocations (for multiple-buffer loads).  segp contains
565  * the starting segment on entrance, and the ending segment on exit.
566  * first indicates if this is the first invocation of this function.
567  */
568 int
569 _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
570 			struct vmspace *vm, int flags, vaddr_t *lastaddrp,
571 			int *segp, int first)
572 {
573 	bus_size_t sgsize;
574 	bus_addr_t lastaddr, baddr, bmask;
575 	paddr_t curaddr;
576 	vaddr_t vaddr = (vaddr_t)buf;
577 	int seg;
578 
579 	lastaddr = *lastaddrp;
580 	bmask = ~(map->_dm_boundary - 1);
581 
582 	for (seg = *segp; buflen > 0 ; ) {
583 		/*
584 		 * Get the physical address for this segment.
585 		 */
586 		if (!VMSPACE_IS_KERNEL_P(vm))
587 			(void) pmap_extract(vm_map_pmap(&vm->vm_map),
588 			    vaddr, &curaddr);
589 		else
590 			curaddr = kvtophys(vaddr);
591 
592 		/*
593 		 * Compute the segment size, and adjust counts.
594 		 */
595 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
596 		if (buflen < sgsize)
597 			sgsize = buflen;
598 
599 		/*
600 		 * Make sure we don't cross any boundaries.
601 		 */
602 		if (map->_dm_boundary > 0) {
603 			baddr = (curaddr + map->_dm_boundary) & bmask;
604 			if (sgsize > (baddr - curaddr))
605 				sgsize = (baddr - curaddr);
606 		}
607 
608 		/*
609 		 * Insert chunk into a segment, coalescing with
610 		 * the previous segment if possible.
611 		 */
612 		if (first) {
613 			map->dm_segs[seg].ds_addr = curaddr;
614 			map->dm_segs[seg].ds_len = sgsize;
615 			map->dm_segs[seg]._ds_vaddr = vaddr;
616 			first = 0;
617 		} else {
618 			if (curaddr == lastaddr &&
619 			    (map->dm_segs[seg].ds_len + sgsize) <=
620 			     map->dm_maxsegsz &&
621 			    (map->_dm_boundary == 0 ||
622 			     (map->dm_segs[seg].ds_addr & bmask) ==
623 			     (curaddr & bmask)))
624 				map->dm_segs[seg].ds_len += sgsize;
625 			else {
626 				if (++seg >= map->_dm_segcnt)
627 					break;
628 				map->dm_segs[seg].ds_addr = curaddr;
629 				map->dm_segs[seg].ds_len = sgsize;
630 				map->dm_segs[seg]._ds_vaddr = vaddr;
631 			}
632 		}
633 
634 		lastaddr = curaddr + sgsize;
635 		vaddr += sgsize;
636 		buflen -= sgsize;
637 	}
638 
639 	*segp = seg;
640 	*lastaddrp = lastaddr;
641 
642 	/*
643 	 * Did we fit?
644 	 */
645 	if (buflen != 0)
646 		return EFBIG;		/* XXX Better return value here? */
647 
648 	return 0;
649 }
650 
651 /*
652  * Common function for loading a direct-mapped DMA map with a linear
653  * buffer.
654  */
655 int
656 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
657 		 bus_size_t buflen, struct proc *p, int flags)
658 {
659 	vaddr_t lastaddr;
660 	int seg, error;
661 	struct vmspace *vm;
662 
663 	/*
664 	 * Make sure that on error condition we return "no valid mappings".
665 	 */
666 	map->dm_mapsize = 0;
667 	map->dm_nsegs = 0;
668 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
669 
670 	if (buflen > map->_dm_size)
671 		return EINVAL;
672 
673 	if (p != NULL) {
674 		vm = p->p_vmspace;
675 	} else {
676 		vm = vmspace_kernel();
677 	}
678 
679 	seg = 0;
680 	error = _bus_dmamap_load_buffer(map, buf, buflen,
681 	    vm, flags, &lastaddr, &seg, 1);
682 	if (error == 0) {
683 		map->dm_mapsize = buflen;
684 		map->dm_nsegs = seg + 1;
685 		map->_dm_vmspace = vm;
686 
687 		/*
688 		 * For linear buffers, we support marking the mapping
689 		 * as COHERENT.
690 		 *
691 		 * XXX Check TLB entries for cache-inhibit bits?
692 		 */
693 		if (buf >= (void *)MIPS_KSEG1_START &&
694 		    buf < (void *)MIPS_KSEG2_START)
695 			map->_dm_flags |= SGIMIPS_DMAMAP_COHERENT;
696 	}
697 	return error;
698 }
699 
700 /*
701  * Like _bus_dmamap_load(), but for mbufs.
702  */
703 int
704 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
705 		      int flags)
706 {
707 	vaddr_t lastaddr;
708 	int seg, error, first;
709 	struct mbuf *m;
710 
711 	/*
712 	 * Make sure that on error condition we return "no valid mappings."
713 	 */
714 	map->dm_mapsize = 0;
715 	map->dm_nsegs = 0;
716 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
717 
718 #ifdef DIAGNOSTIC
719 	if ((m0->m_flags & M_PKTHDR) == 0)
720 		panic("_bus_dmamap_load_mbuf: no packet header");
721 #endif
722 
723 	if (m0->m_pkthdr.len > map->_dm_size)
724 		return EINVAL;
725 
726 	first = 1;
727 	seg = 0;
728 	error = 0;
729 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
730 		if (m->m_len == 0)
731 			continue;
732 		error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len,
733 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
734 		first = 0;
735 	}
736 	if (error == 0) {
737 		map->dm_mapsize = m0->m_pkthdr.len;
738 		map->dm_nsegs = seg + 1;
739 		map->_dm_vmspace = vmspace_kernel();	/* always kernel */
740 	}
741 	return error;
742 }
743 
744 /*
745  * Like _bus_dmamap_load(), but for uios.
746  */
747 int
748 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
749 		     int flags)
750 {
751 	vaddr_t lastaddr;
752 	int seg, i, error, first;
753 	bus_size_t minlen, resid;
754 	struct iovec *iov;
755 	void *addr;
756 
757 	/*
758 	 * Make sure that on error condition we return "no valid mappings."
759 	 */
760 	map->dm_mapsize = 0;
761 	map->dm_nsegs = 0;
762 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
763 
764 	resid = uio->uio_resid;
765 	iov = uio->uio_iov;
766 
767 	first = 1;
768 	seg = 0;
769 	error = 0;
770 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
771 		/*
772 		 * Now at the first iovec to load.  Load each iovec
773 		 * until we have exhausted the residual count.
774 		 */
775 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
776 		addr = (void *)iov[i].iov_base;
777 
778 		error = _bus_dmamap_load_buffer(map, addr, minlen,
779 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
780 		first = 0;
781 
782 		resid -= minlen;
783 	}
784 	if (error == 0) {
785 		map->dm_mapsize = uio->uio_resid;
786 		map->dm_nsegs = seg + 1;
787 		map->_dm_vmspace = uio->uio_vmspace;
788 	}
789 	return error;
790 }
791 
792 /*
793  * Like _bus_dmamap_load(), but for raw memory.
794  */
795 int
796 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
797 		     int nsegs, bus_size_t size, int flags)
798 {
799 
800 	panic("_bus_dmamap_load_raw: not implemented");
801 }
802 
803 /*
804  * Common function for unloading a DMA map.  May be called by
805  * chipset-specific DMA map unload functions.
806  */
807 void
808 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
809 {
810 
811 	/*
812 	 * No resources to free; just mark the mappings as
813 	 * invalid.
814 	 */
815 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
816 	map->dm_mapsize = 0;
817 	map->dm_nsegs = 0;
818 	map->_dm_flags &= ~SGIMIPS_DMAMAP_COHERENT;
819 }
820 
821 /* Common function from DMA map synchronization. May be called
822  * by chipset-specific DMA map synchronization functions.
823  *
824  * This is the R3000 version.
825  */
826 void
827 _bus_dmamap_sync_mips1(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
828 		       bus_size_t len, int ops)
829 {
830 	bus_size_t minlen;
831 	bus_addr_t addr;
832 	int i;
833 
834 	/*
835 	 * Mixing PRE and POST operations is not allowed.
836 	 */
837 	 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
838 	     (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
839 		 panic("_bus_dmamap_sync_mips1: mix PRE and POST");
840 
841 #ifdef DIAGNOSTIC
842 	if (offset >= map->dm_mapsize)
843 		panic("_bus_dmamap_sync_mips1: bad offset %"PRIxPSIZE
844 		" (map size is %"PRIxPSIZE")"
845 		    , offset, map->dm_mapsize);
846 	if (len == 0 || (offset + len) > map->dm_mapsize)
847 		panic("_bus_dmamap_sync_mips1: bad length");
848 #endif
849 
850 	/*
851 	 * The R3000 cache is write-through. Therefore, we only need
852 	 * to drain the write buffer on PREWRITE. The cache is not
853 	 * coherent, however, so we need to invalidate the data cache
854 	 * on PREREAD (should we do it POSTREAD instead?).
855 	 *
856 	 * POSTWRITE (and POSTREAD, currently) are noops.
857 	 */
858 
859 	if (ops & BUS_DMASYNC_PREWRITE) {
860 		/*
861 		 * Flush the write buffer.
862 		 */
863 		 wbflush();
864 	 }
865 
866 	/*
867 	 * If we're not doing PREREAD, nothing more to do.
868 	 */
869 	if ((ops & BUS_DMASYNC_PREREAD) == 0)
870 		return;
871 
872 	/*
873 	 * No cache invalidation is necessary if the DMA map covers
874 	 * COHERENT DMA-safe memory (which is mapped un-cached).
875 	 */
876 	if (map->_dm_flags & SGIMIPS_DMAMAP_COHERENT)
877 		return;
878 
879 	/*
880 	 * If we are going to hit something as large or larger
881 	 * than the entire data cache, just nail the whole thing.
882 	 *
883 	 * NOTE: Even though this is `wbinv_all', since the cache is
884 	 * write-through, it just invalidates it.
885 	 */
886 	if (len >= mips_pdcache_size) {
887 		mips_dcache_wbinv_all();
888 		return;
889 	}
890 
891 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
892 		/* Find the beginning segment. */
893 		if (offset >= map->dm_segs[i].ds_len) {
894 			offset -= map->dm_segs[i].ds_len;
895 			continue;
896 		}
897 
898 		/*
899 		 * Now at the first segment to sync; nail
900 		 * each segment until we have exhausted the
901 		 * length.
902 		 */
903 		minlen = len < map->dm_segs[i].ds_len - offset ?
904 		    len : map->dm_segs[i].ds_len - offset;
905 
906 		addr = map->dm_segs[i].ds_addr;
907 
908 #ifdef BUS_DMA_DEBUG
909 		printf("bus_dmamap_sync_mips1: flushing segment %d "
910 		    "(0x%lx..0x%lx) ...", i, addr + offset,
911 		    addr + offset + minlen - 1);
912 #endif
913 		mips_dcache_inv_range(
914 		    MIPS_PHYS_TO_KSEG0(addr + offset), minlen);
915 #ifdef BUS_DMA_DEBUG
916 		printf("\n");
917 #endif
918 		offset = 0;
919 		len -= minlen;
920 	}
921 }
922 
923 /*
924  * Common function for DMA map synchronization.  May be called
925  * by chipset-specific DMA map synchronization functions.
926  *
927  * This is the R4x00/R5k version.
928  */
929 void
930 _bus_dmamap_sync_mips3(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
931 		       bus_size_t len, int ops)
932 {
933 	bus_size_t minlen;
934 	vaddr_t vaddr, start, end, preboundary, firstboundary, lastboundary;
935 	int i, useindex;
936 
937 	/*
938 	 * Mixing PRE and POST operations is not allowed.
939 	 */
940 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
941 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
942 		panic("_bus_dmamap_sync_mips3: mix PRE and POST");
943 
944 #ifdef DIAGNOSTIC
945 	if (offset >= map->dm_mapsize)
946 		panic("_bus_dmamap_sync_mips3: bad offset %"PRIxPSIZE
947 		    "(map size is %"PRIxPSIZE")", offset, map->dm_mapsize);
948 	if (len == 0 || (offset + len) > map->dm_mapsize)
949 		panic("_bus_dmamap_sync_mips3: bad length");
950 #endif
951 
952 	/*
953 	 * Since we're dealing with a virtually-indexed, write-back
954 	 * cache, we need to do the following things:
955 	 *
956 	 *      PREREAD -- Invalidate D-cache.  Note we might have
957 	 *      to also write-back here if we have to use an Index
958 	 *      op, or if the buffer start/end is not cache-line aligned.
959  	 *
960 	 *      PREWRITE -- Write-back the D-cache.  If we have to use
961 	 *      an Index op, we also have to invalidate.  Note that if
962 	 *      we are doing PREREAD|PREWRITE, we can collapse everything
963 	 *      into a single op.
964 	 *
965 	 *      POSTREAD -- Nothing.
966 	 *
967 	 *      POSTWRITE -- Nothing.
968 	 */
969 
970 	/*
971 	 * Flush the write buffer.
972 	 * XXX Is this always necessary?
973 	 */
974 	wbflush();
975 
976 	/*
977 	 * No cache flushes are necessary if we're only doing
978 	 * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE).
979 	 */
980 	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
981 	if (ops == 0)
982 		return;
983 
984 	/*
985 	 * If the mapping is of COHERENT DMA-safe memory, no cache
986 	 * flush is necessary.
987 	 */
988 	if (map->_dm_flags & SGIMIPS_DMAMAP_COHERENT)
989 		return;
990 
991 	/*
992 	 * If the mapping belongs to the kernel, or it belongs
993 	 * to the currently-running process (XXX actually, vmspace),
994 	 * then we can use Hit ops.  Otherwise, Index ops.
995 	 *
996 	 * This should be true the vast majority of the time.
997 	 */
998 	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
999 		map->_dm_vmspace == curproc->p_vmspace))
1000 		useindex = 0;
1001 	else
1002 		useindex = 1;
1003 
1004 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
1005 		/* Find the beginning segment. */
1006 		if (offset >= map->dm_segs[i].ds_len) {
1007 			offset -= map->dm_segs[i].ds_len;
1008 			continue;
1009 		}
1010 
1011 		/*
1012 		 * Now at the first segment to sync; nail
1013 		 * each segment until we have exhausted the
1014 		 * length.
1015 		 */
1016 		minlen = len < map->dm_segs[i].ds_len - offset ?
1017 		    len : map->dm_segs[i].ds_len - offset;
1018 
1019 		vaddr = map->dm_segs[i]._ds_vaddr;
1020 
1021 #ifdef BUS_DMA_DEBUG
1022 		printf("bus_dmamap_sync_mips3: flushing segment %d "
1023 		    "(0x%lx+%lx, 0x%lx+0x%lx) (olen = %ld)...", i,
1024 		    vaddr, offset, vaddr, offset + minlen - 1, len);
1025 #endif
1026 
1027 		/*
1028 		 * If we are forced to use Index ops, it's always a
1029 		 * Write-back,Invalidate, so just do one test.
1030 		 */
1031 		if (__predict_false(useindex)) {
1032 			mips_dcache_wbinv_range_index(vaddr + offset, minlen);
1033 #ifdef BUS_DMA_DEBUG
1034 			printf("\n");
1035 #endif
1036 			offset = 0;
1037 			len -= minlen;
1038 			continue;
1039  		}
1040 
1041 		/* The code that follows is more correct than that in
1042 		   mips/bus_dma.c. */
1043 		start = vaddr + offset;
1044 		switch (ops) {
1045 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1046 			mips_dcache_wbinv_range(start, minlen);
1047 			break;
1048 
1049 		case BUS_DMASYNC_PREREAD:
1050 			end = start + minlen;
1051 			preboundary = start & ~mips_dcache_align_mask;
1052 			firstboundary = (start + mips_dcache_align_mask)
1053 			    & ~mips_dcache_align_mask;
1054 			lastboundary = end & ~mips_dcache_align_mask;
1055 			if (preboundary < start && preboundary < lastboundary)
1056 				mips_dcache_wbinv_range(preboundary,
1057 				    mips_dcache_align);
1058 			if (firstboundary < lastboundary)
1059 				mips_dcache_inv_range(firstboundary,
1060 				    lastboundary - firstboundary);
1061 			if (lastboundary < end)
1062 				mips_dcache_wbinv_range(lastboundary,
1063 				    mips_dcache_align);
1064 			break;
1065 
1066 		case BUS_DMASYNC_PREWRITE:
1067 			mips_dcache_wb_range(start, minlen);
1068 			break;
1069 		}
1070 #ifdef BUS_DMA_DEBUG
1071 		printf("\n");
1072 #endif
1073 		offset = 0;
1074 		len -= minlen;
1075 	}
1076 }
1077 
1078 /*
1079  * Common function for DMA-safe memory allocation.  May be called
1080  * by bus-specific DMA memory allocation functions.
1081  */
1082 int
1083 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1084 		  bus_size_t boundary, bus_dma_segment_t *segs,
1085 		  int nsegs, int *rsegs, int flags)
1086 {
1087 	extern paddr_t avail_start, avail_end;
1088 
1089 	return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary,
1090 					       segs, nsegs, rsegs, flags,
1091 					       avail_start /*low*/,
1092 					       avail_end - PAGE_SIZE /*high*/));
1093 }
1094 
1095 /*
1096  * Common function for freeing DMA-safe memory.  May be called by
1097  * bus-specific DMA memory free functions.
1098  */
1099 void
1100 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1101 {
1102 
1103 	_bus_dmamem_free_common(t, segs, nsegs);
1104 }
1105 
1106 /*
1107  * Common function for mapping DMA-safe memory.  May be called by
1108  * bus-specific DMA memory map functions.
1109  */
1110 int
1111 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1112 		size_t size, void **kvap, int flags)
1113 {
1114 	vaddr_t va;
1115 	bus_addr_t addr;
1116 	int curseg;
1117 	const uvm_flag_t kmflags =
1118 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1119 	u_int pmapflags;
1120 
1121 	/*
1122 	 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
1123 	 * TLB thrashing.
1124 	 */
1125 
1126 	if (nsegs == 1) {
1127 		if (flags & BUS_DMA_COHERENT)
1128 			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
1129 		else
1130 			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
1131 		return 0;
1132 	}
1133 
1134 	size = round_page(size);
1135 
1136 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1137 
1138 	if (va == 0)
1139 		return (ENOMEM);
1140 
1141 	*kvap = (void *)va;
1142 
1143 	pmapflags = VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED;
1144 	if (flags & BUS_DMA_COHERENT)
1145 		pmapflags |= PMAP_NOCACHE;
1146 
1147 	for (curseg = 0; curseg < nsegs; curseg++) {
1148 		for (addr = segs[curseg].ds_addr;
1149 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1150 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1151 			if (size == 0)
1152 				panic("_bus_dmamem_map: size botch");
1153 			pmap_enter(pmap_kernel(), va, addr,
1154 			    VM_PROT_READ | VM_PROT_WRITE,
1155 			    pmapflags);
1156 		}
1157 	}
1158 	pmap_update(pmap_kernel());
1159 
1160 	return 0;
1161 }
1162 
1163 /*
1164  * Common function for unmapping DMA-safe memory.  May be called by
1165  * bus-specific DMA memory unmapping functions.
1166  */
1167 void
1168 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1169 {
1170 
1171 	/*
1172 	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
1173 	 * not in KSEG2).
1174 	 */
1175 	if (kva >= (void *)MIPS_KSEG0_START &&
1176 	    kva < (void *)MIPS_KSEG2_START)
1177 		return;
1178 
1179 	_bus_dmamem_unmap_common(t, kva, size);
1180 }
1181 
1182 /*
1183  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
1184  * bus-specific DMA mmap(2)'ing functions.
1185  */
1186 paddr_t
1187 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1188 		 off_t off, int prot, int flags)
1189 {
1190 	bus_addr_t rv;
1191 
1192 	rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags);
1193 	if (rv == (bus_addr_t)-1)
1194 		return (-1);
1195 
1196 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1197 	return (mips_btop(rv | PMAP_NOCACHE));
1198 #else
1199 	return (mips_btop(rv));
1200 #endif
1201 }
1202 
1203 paddr_t
1204 bus_space_mmap(bus_space_tag_t space, bus_addr_t addr, off_t off,
1205          int prot, int flags)
1206 {
1207 
1208 	if (flags & BUS_SPACE_MAP_CACHEABLE) {
1209 
1210 		return mips_btop(MIPS_KSEG0_TO_PHYS(addr) + off);
1211 	} else
1212 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1213 		return mips_btop((MIPS_KSEG1_TO_PHYS(addr) + off)
1214 		    | PMAP_NOCACHE);
1215 #else
1216 		return mips_btop((MIPS_KSEG1_TO_PHYS(addr) + off));
1217 #endif
1218 }
1219