xref: /netbsd/sys/arch/arc/arc/bus_dma.c (revision 6550d01e)
1 /*	$NetBSD: bus_dma.c,v 1.31 2010/11/12 13:18:56 uebayasi Exp $	*/
2 /*	NetBSD: bus_dma.c,v 1.20 2000/01/10 03:24:36 simonb Exp 	*/
3 
4 /*-
5  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.31 2010/11/12 13:18:56 uebayasi Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/mbuf.h>
40 #include <sys/device.h>
41 #include <sys/proc.h>
42 
43 #include <uvm/uvm.h>
44 
45 #include <mips/cache.h>
46 
47 #define _ARC_BUS_DMA_PRIVATE
48 #include <machine/bus.h>
49 
50 paddr_t	kvtophys(vaddr_t);	/* XXX */
51 
52 static int	_bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t,
53 		    void *, bus_size_t, struct vmspace *, int, paddr_t *,
54 		    int *, int);
55 
56 extern paddr_t avail_start, avail_end;	/* from pmap.c */
57 
58 void
59 _bus_dma_tag_init(bus_dma_tag_t t)
60 {
61 
62 	t->dma_offset = 0;
63 
64 	t->_dmamap_create = _bus_dmamap_create;
65 	t->_dmamap_destroy = _bus_dmamap_destroy;
66 	t->_dmamap_load = _bus_dmamap_load;
67 	t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf;
68 	t->_dmamap_load_uio = _bus_dmamap_load_uio;
69 	t->_dmamap_load_raw = _bus_dmamap_load_raw;
70 	t->_dmamap_unload = _bus_dmamap_unload;
71 	t->_dmamap_sync = _bus_dmamap_sync;
72 	t->_dmamem_alloc = _bus_dmamem_alloc;
73 	t->_dmamem_free = _bus_dmamem_free;
74 	t->_dmamem_map = _bus_dmamem_map;
75 	t->_dmamem_unmap = _bus_dmamem_unmap;
76 	t->_dmamem_mmap = _bus_dmamem_mmap;
77 }
78 
79 /*
80  * Common function for DMA map creation.  May be called by bus-specific
81  * DMA map creation functions.
82  */
83 int
84 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
85      bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
86 {
87 	struct arc_bus_dmamap *map;
88 	void *mapstore;
89 	size_t mapsize;
90 
91 	/*
92 	 * Allocate and initialize the DMA map.  The end of the map
93 	 * is a variable-sized array of segments, so we allocate enough
94 	 * room for them in one shot.
95 	 *
96 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
97 	 * of ALLOCNOW notifies others that we've reserved these resources,
98 	 * and they are not to be freed.
99 	 *
100 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
101 	 * the (nsegments - 1).
102 	 */
103 	mapsize = sizeof(struct arc_bus_dmamap) +
104 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
105 	if ((mapstore = malloc(mapsize, M_DMAMAP,
106 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK) | M_ZERO)) == NULL)
107 		return ENOMEM;
108 
109 	map = (struct arc_bus_dmamap *)mapstore;
110 	map->_dm_size = size;
111 	map->_dm_segcnt = nsegments;
112 	map->_dm_maxmaxsegsz = maxsegsz;
113 	map->_dm_boundary = boundary;
114 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
115 	map->_dm_vmspace = NULL;
116 	map->dm_maxsegsz = maxsegsz;
117 	map->dm_mapsize = 0;		/* no valid mappings */
118 	map->dm_nsegs = 0;
119 
120 	*dmamp = map;
121 	return 0;
122 }
123 
124 /*
125  * Common function for DMA map destruction.  May be called by bus-specific
126  * DMA map destruction functions.
127  */
128 void
129 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
130 {
131 
132 	free(map, M_DMAMAP);
133 }
134 
135 /*
136  * Utility function to load a linear buffer.  lastaddrp holds state
137  * between invocations (for multiple-buffer loads).  segp contains
138  * the starting segment on entrance, and the ending segment on exit.
139  * first indicates if this is the first invocation of this function.
140  */
141 static int
142 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
143     bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
144     int *segp, int first)
145 {
146 	bus_size_t sgsize;
147 	bus_addr_t baddr, bmask;
148 	paddr_t curaddr, lastaddr;
149 	vaddr_t vaddr = (vaddr_t)buf;
150 	int seg;
151 
152 	lastaddr = *lastaddrp;
153 	bmask = ~(map->_dm_boundary - 1);
154 
155 	for (seg = *segp; buflen > 0 ; ) {
156 		/*
157 		 * Get the physical address for this segment.
158 		 */
159 		if (!VMSPACE_IS_KERNEL_P(vm))
160 			(void)pmap_extract(vm_map_pmap(&vm->vm_map),
161 			    vaddr, &curaddr);
162 		else
163 			curaddr = kvtophys(vaddr);
164 
165 		/*
166 		 * Compute the segment size, and adjust counts.
167 		 */
168 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
169 		if (buflen < sgsize)
170 			sgsize = buflen;
171 
172 		/*
173 		 * Make sure we don't cross any boundaries.
174 		 */
175 		if (map->_dm_boundary > 0) {
176 			baddr = (curaddr + map->_dm_boundary) & bmask;
177 			if (sgsize > (baddr - curaddr))
178 				sgsize = (baddr - curaddr);
179 		}
180 
181 		/*
182 		 * Insert chunk into a segment, coalescing with
183 		 * the previous segment if possible.
184 		 */
185 		if (first) {
186 			map->dm_segs[seg].ds_addr = curaddr + t->dma_offset;
187 			map->dm_segs[seg].ds_len = sgsize;
188 			map->dm_segs[seg]._ds_vaddr = vaddr;
189 			map->dm_segs[seg]._ds_paddr = curaddr;
190 			first = 0;
191 		} else {
192 			if (curaddr == lastaddr &&
193 			    (map->dm_segs[seg].ds_len + sgsize) <=
194 			     map->dm_maxsegsz &&
195 			    (map->_dm_boundary == 0 ||
196 			     (map->dm_segs[seg]._ds_paddr & bmask) ==
197 			     (curaddr & bmask)))
198 				map->dm_segs[seg].ds_len += sgsize;
199 			else {
200 				if (++seg >= map->_dm_segcnt)
201 					break;
202 				map->dm_segs[seg].ds_addr =
203 				    curaddr + t->dma_offset;
204 				map->dm_segs[seg].ds_len = sgsize;
205 				map->dm_segs[seg]._ds_vaddr = vaddr;
206 				map->dm_segs[seg]._ds_paddr = curaddr;
207 			}
208 		}
209 
210 		lastaddr = curaddr + sgsize;
211 		vaddr += sgsize;
212 		buflen -= sgsize;
213 	}
214 
215 	*segp = seg;
216 	*lastaddrp = lastaddr;
217 
218 	/*
219 	 * Did we fit?
220 	 */
221 	if (buflen != 0)
222 		return EFBIG;		/* XXX better return value here? */
223 
224 	return 0;
225 }
226 
227 /*
228  * Common function for loading a direct-mapped DMA map with a linear
229  * buffer.
230  */
231 int
232 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
233     bus_size_t buflen, struct proc *p, int flags)
234 {
235 	paddr_t lastaddr;
236 	int seg, error;
237 	struct vmspace *vm;
238 
239 	/*
240 	 * Make sure that on error condition we return "no valid mappings".
241 	 */
242 	map->dm_mapsize = 0;
243 	map->dm_nsegs = 0;
244 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
245 
246 	if (buflen > map->_dm_size)
247 		return EINVAL;
248 
249 	if (p != NULL) {
250 		vm = p->p_vmspace;
251 	} else {
252 		vm = vmspace_kernel();
253 	}
254 
255 	seg = 0;
256 	error = _bus_dmamap_load_buffer(t, map, buf, buflen,
257 	    vm, flags, &lastaddr, &seg, 1);
258 	if (error == 0) {
259 		map->dm_mapsize = buflen;
260 		map->dm_nsegs = seg + 1;
261 		map->_dm_vmspace = vm;
262 
263 		/*
264 		 * For linear buffers, we support marking the mapping
265 		 * as COHERENT.
266 		 *
267 		 * XXX Check TLB entries for cache-inhibit bits?
268 		 */
269 		if (buf >= (void *)MIPS_KSEG1_START &&
270 		    buf < (void *)MIPS_KSEG2_START)
271 			map->_dm_flags |= ARC_DMAMAP_COHERENT;
272 	}
273 	return error;
274 }
275 
276 /*
277  * Like _bus_dmamap_load(), but for mbufs.
278  */
279 int
280 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
281     int flags)
282 {
283 	paddr_t lastaddr;
284 	int seg, error, first;
285 	struct mbuf *m;
286 
287 	/*
288 	 * Make sure that on error condition we return "no valid mappings."
289 	 */
290 	map->dm_mapsize = 0;
291 	map->dm_nsegs = 0;
292 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
293 
294 #ifdef DIAGNOSTIC
295 	if ((m0->m_flags & M_PKTHDR) == 0)
296 		panic("_bus_dmamap_load_mbuf: no packet header");
297 #endif
298 
299 	if (m0->m_pkthdr.len > map->_dm_size)
300 		return EINVAL;
301 
302 	first = 1;
303 	seg = 0;
304 	error = 0;
305 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
306 		if (m->m_len == 0)
307 			continue;
308 		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
309 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
310 		first = 0;
311 	}
312 	if (error == 0) {
313 		map->dm_mapsize = m0->m_pkthdr.len;
314 		map->dm_nsegs = seg + 1;
315 		map->_dm_vmspace = vmspace_kernel();	/* always kernel */
316 	}
317 	return error;
318 }
319 
320 /*
321  * Like _bus_dmamap_load(), but for uios.
322  */
323 int
324 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
325     int flags)
326 {
327 	paddr_t lastaddr;
328 	int seg, i, error, first;
329 	bus_size_t minlen, resid;
330 	struct iovec *iov;
331 	void *addr;
332 
333 	/*
334 	 * Make sure that on error condition we return "no valid mappings."
335 	 */
336 	map->dm_mapsize = 0;
337 	map->dm_nsegs = 0;
338 	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
339 
340 	resid = uio->uio_resid;
341 	iov = uio->uio_iov;
342 
343 	first = 1;
344 	seg = 0;
345 	error = 0;
346 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
347 		/*
348 		 * Now at the first iovec to load.  Load each iovec
349 		 * until we have exhausted the residual count.
350 		 */
351 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
352 		addr = (void *)iov[i].iov_base;
353 
354 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
355 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
356 		first = 0;
357 
358 		resid -= minlen;
359 	}
360 	if (error == 0) {
361 		map->dm_mapsize = uio->uio_resid;
362 		map->dm_nsegs = seg + 1;
363 		map->_dm_vmspace = uio->uio_vmspace;
364 	}
365 	return error;
366 }
367 
368 /*
369  * Like _bus_dmamap_load(), but for raw memory.
370  */
371 int
372 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
373     int nsegs, bus_size_t size, int flags)
374 {
375 
376 	panic("_bus_dmamap_load_raw: not implemented");
377 }
378 
379 /*
380  * Common function for unloading a DMA map.  May be called by
381  * chipset-specific DMA map unload functions.
382  */
383 void
384 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
385 {
386 
387 	/*
388 	 * No resources to free; just mark the mappings as
389 	 * invalid.
390 	 */
391 	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
392 	map->dm_mapsize = 0;
393 	map->dm_nsegs = 0;
394 	map->_dm_flags &= ~ARC_DMAMAP_COHERENT;
395 }
396 
397 /*
398  * Common function for DMA map synchronization.  May be called by
399  * chipset-specific DMA map synchronization functions.
400  *
401  * This version works with the virtually-indexed, write-back cache
402  * found in the MIPS-3 CPUs available in ARC machines.
403  */
404 void
405 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
406     bus_size_t len, int ops)
407 {
408 	bus_size_t minlen;
409 	bus_addr_t addr, start, end, preboundary, firstboundary, lastboundary;
410 	int i, useindex;
411 
412 	/*
413 	 * Mixing PRE and POST operations is not allowed.
414 	 */
415 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
416 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
417 		panic("_bus_dmamap_sync: mix PRE and POST");
418 
419 #ifdef DIAGNOSTIC
420 	if (offset >= map->dm_mapsize)
421 		panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
422 		      offset, map->dm_mapsize);
423 	if (len == 0 || (offset + len) > map->dm_mapsize)
424 		panic("_bus_dmamap_sync: bad length");
425 #endif
426 
427 	/*
428 	 * Since we're dealing with a virtually-indexed, write-back
429 	 * cache, we need to do the following things:
430 	 *
431 	 *	PREREAD -- Invalidate D-cache.  Note we might have
432 	 *	to also write-back here if we have to use an Index
433 	 *	op, or if the buffer start/end is not cache-line aligned.
434 	 *
435 	 *	PREWRITE -- Write-back the D-cache.  If we have to use
436 	 *	an Index op, we also have to invalidate.  Note that if
437 	 *	we are doing PREREAD|PREWRITE, we can collapse everything
438 	 *	into a single op.
439 	 *
440 	 *	POSTREAD -- Nothing.
441 	 *
442 	 *	POSTWRITE -- Nothing.
443 	 */
444 
445 	/*
446 	 * Flush the write buffer.
447 	 * XXX Is this always necessary?
448 	 */
449 	wbflush();
450 
451 	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
452 	if (ops == 0)
453 		return;
454 
455 	/*
456 	 * If the mapping is of COHERENT DMA-safe memory, no cache
457 	 * flush is necessary.
458 	 */
459 	if (map->_dm_flags & ARC_DMAMAP_COHERENT)
460 		return;
461 
462 	/*
463 	 * If the mapping belongs to the kernel, or it belongs
464 	 * to the currently-running process (XXX actually, vmspace),
465 	 * then we can use Hit ops.  Otherwise, Index ops.
466 	 *
467 	 * This should be true the vast majority of the time.
468 	 */
469 	if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
470 	    map->_dm_vmspace == curproc->p_vmspace))
471 		useindex = 0;
472 	else
473 		useindex = 1;
474 
475 	for (i = 0; i < map->dm_nsegs && len != 0; i++) {
476 		/* Find the beginning segment. */
477 		if (offset >= map->dm_segs[i].ds_len) {
478 			offset -= map->dm_segs[i].ds_len;
479 			continue;
480 		}
481 
482 		/*
483 		 * Now at the first segment to sync; nail
484 		 * each segment until we have exhausted the
485 		 * length.
486 		 */
487 		minlen = len < map->dm_segs[i].ds_len - offset ?
488 		    len : map->dm_segs[i].ds_len - offset;
489 
490 		addr = map->dm_segs[i]._ds_vaddr;
491 
492 #ifdef BUS_DMA_DEBUG
493 		printf("bus_dmamap_sync: flushing segment %d "
494 		    "(0x%lx..0x%lx) ...", i, addr + offset,
495 		    addr + offset + minlen - 1);
496 #endif
497 
498 		/*
499 		 * If we are forced to use Index ops, it's always a
500 		 * Write-back,Invalidate, so just do one test.
501 		 */
502 		if (__predict_false(useindex)) {
503 			mips_dcache_wbinv_range_index(addr + offset, minlen);
504 #ifdef BUS_DMA_DEBUG
505 			printf("\n");
506 #endif
507 			offset = 0;
508 			len -= minlen;
509 			continue;
510 		}
511 
512 		start = addr + offset;
513 		switch (ops) {
514 		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
515 			mips_dcache_wbinv_range(start, minlen);
516 			break;
517 
518 		case BUS_DMASYNC_PREREAD:
519 			end = start + minlen;
520 			preboundary = start & ~mips_dcache_align_mask;
521 			firstboundary = (start + mips_dcache_align_mask)
522 			    & ~mips_dcache_align_mask;
523 			lastboundary = end & ~mips_dcache_align_mask;
524 			if (preboundary < start && preboundary < lastboundary)
525 				mips_dcache_wbinv_range(preboundary,
526 				    mips_dcache_align);
527 			if (firstboundary < lastboundary)
528 				mips_dcache_inv_range(firstboundary,
529 				    lastboundary - firstboundary);
530 			if (lastboundary < end)
531 				mips_dcache_wbinv_range(lastboundary,
532 				    mips_dcache_align);
533 			break;
534 
535 		case BUS_DMASYNC_PREWRITE:
536 			mips_dcache_wb_range(start, minlen);
537 			break;
538 		}
539 #ifdef BUS_DMA_DEBUG
540 		printf("\n");
541 #endif
542 		offset = 0;
543 		len -= minlen;
544 	}
545 }
546 
547 /*
548  * Common function for DMA-safe memory allocation.  May be called
549  * by bus-specific DMA memory allocation functions.
550  */
551 int
552 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
553     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
554     int flags)
555 {
556 
557 	return _bus_dmamem_alloc_range(t, size, alignment, boundary,
558 	    segs, nsegs, rsegs, flags, avail_start, trunc_page(avail_end));
559 }
560 
561 /*
562  * Allocate physical memory from the given physical address range.
563  * Called by DMA-safe memory allocation methods.
564  */
565 int
566 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
567     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
568     int flags, paddr_t low, paddr_t high)
569 {
570 	paddr_t curaddr, lastaddr;
571 	struct vm_page *m;
572 	struct pglist mlist;
573 	int curseg, error;
574 
575 	/* Always round the size. */
576 	size = round_page(size);
577 
578 	high = avail_end - PAGE_SIZE;
579 
580 	/*
581 	 * Allocate pages from the VM system.
582 	 */
583 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
584 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
585 	if (error)
586 		return error;
587 
588 	/*
589 	 * Compute the location, size, and number of segments actually
590 	 * returned by the VM code.
591 	 */
592 	m = TAILQ_FIRST(&mlist);
593 	curseg = 0;
594 	lastaddr = segs[curseg]._ds_paddr = VM_PAGE_TO_PHYS(m);
595 	segs[curseg].ds_addr = segs[curseg]._ds_paddr + t->dma_offset;
596 	segs[curseg].ds_len = PAGE_SIZE;
597 	m = TAILQ_NEXT(m, pageq.queue);
598 
599 	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
600 		curaddr = VM_PAGE_TO_PHYS(m);
601 #ifdef DIAGNOSTIC
602 		if (curaddr < avail_start || curaddr >= high) {
603 			printf("uvm_pglistalloc returned non-sensical"
604 			    " address 0x%llx\n", (long long)curaddr);
605 			panic("_bus_dmamem_alloc_range");
606 		}
607 #endif
608 		if (curaddr == (lastaddr + PAGE_SIZE))
609 			segs[curseg].ds_len += PAGE_SIZE;
610 		else {
611 			curseg++;
612 			segs[curseg].ds_addr = curaddr + t->dma_offset;
613 			segs[curseg].ds_len = PAGE_SIZE;
614 			segs[curseg]._ds_paddr = curaddr;
615 		}
616 		lastaddr = curaddr;
617 	}
618 
619 	*rsegs = curseg + 1;
620 
621 	return 0;
622 }
623 
624 /*
625  * Common function for freeing DMA-safe memory.  May be called by
626  * bus-specific DMA memory free functions.
627  */
628 void
629 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
630 {
631 	struct vm_page *m;
632 	bus_addr_t addr;
633 	struct pglist mlist;
634 	int curseg;
635 
636 	/*
637 	 * Build a list of pages to free back to the VM system.
638 	 */
639 	TAILQ_INIT(&mlist);
640 	for (curseg = 0; curseg < nsegs; curseg++) {
641 		for (addr = segs[curseg]._ds_paddr;
642 		    addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
643 		    addr += PAGE_SIZE) {
644 			m = PHYS_TO_VM_PAGE(addr);
645 			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
646 		}
647 	}
648 
649 	uvm_pglistfree(&mlist);
650 }
651 
652 /*
653  * Common function for mapping DMA-safe memory.  May be called by
654  * bus-specific DMA memory map functions.
655  */
656 int
657 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
658     size_t size, void **kvap, int flags)
659 {
660 	vaddr_t va;
661 	bus_addr_t addr;
662 	int curseg;
663 	const uvm_flag_t kmflags =
664 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
665 
666 	/*
667 	 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
668 	 * TLB thrashing.
669 	 */
670 	if (nsegs == 1) {
671 		if (flags & BUS_DMA_COHERENT)
672 			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0]._ds_paddr);
673 		else
674 			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0]._ds_paddr);
675 		return 0;
676 	}
677 
678 	size = round_page(size);
679 
680 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
681 
682 	if (va == 0)
683 		return ENOMEM;
684 
685 	*kvap = (void *)va;
686 
687 	for (curseg = 0; curseg < nsegs; curseg++) {
688 		segs[curseg]._ds_vaddr = va;
689 		for (addr = segs[curseg]._ds_paddr;
690 		    addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
691 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
692 			if (size == 0)
693 				panic("_bus_dmamem_map: size botch");
694 			pmap_enter(pmap_kernel(), va, addr,
695 			    VM_PROT_READ | VM_PROT_WRITE,
696 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
697 
698 			/* XXX Do something about COHERENT here. */
699 		}
700 	}
701 	pmap_update(pmap_kernel());
702 
703 	return 0;
704 }
705 
706 /*
707  * Common function for unmapping DMA-safe memory.  May be called by
708  * bus-specific DMA memory unmapping functions.
709  */
710 void
711 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
712 {
713 
714 #ifdef DIAGNOSTIC
715 	if ((u_long)kva & PGOFSET)
716 		panic("_bus_dmamem_unmap");
717 #endif
718 
719 	/*
720 	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
721 	 * not in KSEG2).
722 	 */
723 	if (kva >= (void *)MIPS_KSEG0_START &&
724 	    kva < (void *)MIPS_KSEG2_START)
725 		return;
726 
727 	size = round_page(size);
728 	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
729 	pmap_update(pmap_kernel());
730 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
731 }
732 
733 /*
734  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
735  * bus-specific DMA mmap(2)'ing functions.
736  */
737 paddr_t
738 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
739     off_t off, int prot, int flags)
740 {
741 	int i;
742 
743 	for (i = 0; i < nsegs; i++) {
744 #ifdef DIAGNOSTIC
745 		if (off & PGOFSET)
746 			panic("_bus_dmamem_mmap: offset unaligned");
747 		if (segs[i]._ds_paddr & PGOFSET)
748 			panic("_bus_dmamem_mmap: segment unaligned");
749 		if (segs[i].ds_len & PGOFSET)
750 			panic("_bus_dmamem_mmap: segment size not multiple"
751 			    " of page size");
752 #endif
753 		if (off >= segs[i].ds_len) {
754 			off -= segs[i].ds_len;
755 			continue;
756 		}
757 
758 		return mips_btop(segs[i]._ds_paddr + off);
759 	}
760 
761 	/* Page not found. */
762 	return -1;
763 }
764