xref: /openbsd/sys/arch/alpha/dev/bus_dma.c (revision 7b36286a)
1 /* $OpenBSD: bus_dma.c,v 1.23 2008/06/26 05:42:08 ray Exp $ */
2 /* $NetBSD: bus_dma.c,v 1.40 2000/07/17 04:47:56 thorpej Exp $ */
3 
4 /*-
5  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #define _ALPHA_BUS_DMA_PRIVATE
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <sys/mbuf.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <machine/bus.h>
46 #include <machine/intr.h>
47 
48 int	_bus_dmamap_load_buffer_direct(bus_dma_tag_t,
49 	    bus_dmamap_t, void *, bus_size_t, struct proc *, int,
50 	    paddr_t *, int *, int);
51 
52 extern paddr_t avail_start, avail_end;	/* from pmap.c */
53 
54 /*
55  * Common function for DMA map creation.  May be called by bus-specific
56  * DMA map creation functions.
57  */
58 int
59 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
60 	bus_dma_tag_t t;
61 	bus_size_t size;
62 	int nsegments;
63 	bus_size_t maxsegsz;
64 	bus_size_t boundary;
65 	int flags;
66 	bus_dmamap_t *dmamp;
67 {
68 	struct alpha_bus_dmamap *map;
69 	void *mapstore;
70 	size_t mapsize;
71 
72 	/*
73 	 * Allocate and initialize the DMA map.  The end of the map
74 	 * is a variable-sized array of segments, so we allocate enough
75 	 * room for them in one shot.
76 	 *
77 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
78 	 * of ALLOCNOW notifies others that we've reserved these resources,
79 	 * and they are not to be freed.
80 	 *
81 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
82 	 * the (nsegments - 1).
83 	 */
84 	mapsize = sizeof(struct alpha_bus_dmamap) +
85 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
86 	if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
87 	    (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
88 		return (ENOMEM);
89 
90 	map = (struct alpha_bus_dmamap *)mapstore;
91 	map->_dm_size = size;
92 	map->_dm_segcnt = nsegments;
93 	map->_dm_maxsegsz = maxsegsz;
94 	if (t->_boundary != 0 && t->_boundary < boundary)
95 		map->_dm_boundary = t->_boundary;
96 	else
97 		map->_dm_boundary = boundary;
98 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
99 	map->dm_mapsize = 0;		/* no valid mappings */
100 	map->dm_nsegs = 0;
101 	map->_dm_window = NULL;
102 
103 	*dmamp = map;
104 	return (0);
105 }
106 
107 /*
108  * Common function for DMA map destruction.  May be called by bus-specific
109  * DMA map destruction functions.
110  */
111 void
112 _bus_dmamap_destroy(t, map)
113 	bus_dma_tag_t t;
114 	bus_dmamap_t map;
115 {
116 
117 	free(map, M_DEVBUF);
118 }
119 
120 /*
121  * Utility function to load a linear buffer.  lastaddrp holds state
122  * between invocations (for multiple-buffer loads).  segp contains
123  * the starting segment on entrance, and the ending segment on exit.
124  * first indicates if this is the first invocation of this function.
125  */
126 int
127 _bus_dmamap_load_buffer_direct(t, map, buf, buflen, p, flags,
128     lastaddrp, segp, first)
129 	bus_dma_tag_t t;
130 	bus_dmamap_t map;
131 	void *buf;
132 	bus_size_t buflen;
133 	struct proc *p;
134 	int flags;
135 	paddr_t *lastaddrp;
136 	int *segp;
137 	int first;
138 {
139 	bus_size_t sgsize;
140 	bus_addr_t curaddr, lastaddr, baddr, bmask;
141 	vaddr_t vaddr = (vaddr_t)buf;
142 	int seg;
143 
144 	lastaddr = *lastaddrp;
145 	bmask = ~(map->_dm_boundary - 1);
146 
147 	for (seg = *segp; buflen > 0 ; ) {
148 		/*
149 		 * Get the physical address for this segment.
150 		 */
151 		if (p != NULL)
152 			pmap_extract(p->p_vmspace->vm_map.pmap, vaddr,
153 				&curaddr);
154 		else
155 			curaddr = vtophys(vaddr);
156 
157 		/*
158 		 * If we're beyond the current DMA window, indicate
159 		 * that and try to fall back into SGMAPs.
160 		 */
161 		if (t->_wsize != 0 && curaddr >= t->_wsize)
162 			return (EINVAL);
163 
164 		curaddr |= t->_wbase;
165 
166 		/*
167 		 * Compute the segment size, and adjust counts.
168 		 */
169 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
170 		if (buflen < sgsize)
171 			sgsize = buflen;
172 		if (map->_dm_maxsegsz < sgsize)
173 			sgsize = map->_dm_maxsegsz;
174 
175 		/*
176 		 * Make sure we don't cross any boundaries.
177 		 */
178 		if (map->_dm_boundary > 0) {
179 			baddr = (curaddr + map->_dm_boundary) & bmask;
180 			if (sgsize > (baddr - curaddr))
181 				sgsize = (baddr - curaddr);
182 		}
183 
184 		/*
185 		 * Insert chunk into a segment, coalescing with
186 		 * the previous segment if possible.
187 		 */
188 		if (first) {
189 			map->dm_segs[seg].ds_addr = curaddr;
190 			map->dm_segs[seg].ds_len = sgsize;
191 			first = 0;
192 		} else {
193 			if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
194 			    curaddr == lastaddr &&
195 			    (map->dm_segs[seg].ds_len + sgsize) <=
196 			     map->_dm_maxsegsz &&
197 			    (map->_dm_boundary == 0 ||
198 			     (map->dm_segs[seg].ds_addr & bmask) ==
199 			     (curaddr & bmask)))
200 				map->dm_segs[seg].ds_len += sgsize;
201 			else {
202 				if (++seg >= map->_dm_segcnt)
203 					break;
204 				map->dm_segs[seg].ds_addr = curaddr;
205 				map->dm_segs[seg].ds_len = sgsize;
206 			}
207 		}
208 
209 		lastaddr = curaddr + sgsize;
210 		vaddr += sgsize;
211 		buflen -= sgsize;
212 	}
213 
214 	*segp = seg;
215 	*lastaddrp = lastaddr;
216 
217 	/*
218 	 * Did we fit?
219 	 */
220 	if (buflen != 0) {
221 		/*
222 		 * If there is a chained window, we will automatically
223 		 * fall back to it.
224 		 */
225 		return (EFBIG);		/* XXX better return value here? */
226 	}
227 
228 	return (0);
229 }
230 
231 /*
232  * Common function for loading a direct-mapped DMA map with a linear
233  * buffer.  Called by bus-specific DMA map load functions with the
234  * OR value appropriate for indicating "direct-mapped" for that
235  * chipset.
236  */
237 int
238 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
239 	bus_dma_tag_t t;
240 	bus_dmamap_t map;
241 	void *buf;
242 	bus_size_t buflen;
243 	struct proc *p;
244 	int flags;
245 {
246 	paddr_t lastaddr;
247 	int seg, error;
248 
249 	/*
250 	 * Make sure that on error condition we return "no valid mappings".
251 	 */
252 	map->dm_mapsize = 0;
253 	map->dm_nsegs = 0;
254 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
255 
256 	if (buflen > map->_dm_size)
257 		return (EINVAL);
258 
259 	seg = 0;
260 	error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
261 	    p, flags, &lastaddr, &seg, 1);
262 	if (error == 0) {
263 		map->dm_mapsize = buflen;
264 		map->dm_nsegs = seg + 1;
265 		map->_dm_window = t;
266 	} else if (t->_next_window != NULL) {
267 		/*
268 		 * Give the next window a chance.
269 		 */
270 		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
271 		    p, flags);
272 	}
273 	return (error);
274 }
275 
276 /*
277  * Like _bus_dmamap_load_direct(), but for mbufs.
278  */
279 int
280 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
281 	bus_dma_tag_t t;
282 	bus_dmamap_t map;
283 	struct mbuf *m0;
284 	int flags;
285 {
286 	paddr_t lastaddr;
287 	int seg, error, first;
288 	struct mbuf *m;
289 
290 	/*
291 	 * Make sure that on error condition we return "no valid mappings."
292 	 */
293 	map->dm_mapsize = 0;
294 	map->dm_nsegs = 0;
295 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
296 
297 #ifdef DIAGNOSTIC
298 	if ((m0->m_flags & M_PKTHDR) == 0)
299 		panic("_bus_dmamap_load_mbuf_direct: no packet header");
300 #endif
301 
302 	if (m0->m_pkthdr.len > map->_dm_size)
303 		return (EINVAL);
304 
305 	first = 1;
306 	seg = 0;
307 	error = 0;
308 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
309 		if (m->m_len == 0)
310 			continue;
311 		error = _bus_dmamap_load_buffer_direct(t, map,
312 		    m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
313 		first = 0;
314 	}
315 	if (error == 0) {
316 		map->dm_mapsize = m0->m_pkthdr.len;
317 		map->dm_nsegs = seg + 1;
318 		map->_dm_window = t;
319 	} else if (t->_next_window != NULL) {
320 		/*
321 		 * Give the next window a chance.
322 		 */
323 		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
324 	}
325 	return (error);
326 }
327 
328 /*
329  * Like _bus_dmamap_load_direct(), but for uios.
330  */
331 int
332 _bus_dmamap_load_uio_direct(t, map, uio, flags)
333 	bus_dma_tag_t t;
334 	bus_dmamap_t map;
335 	struct uio *uio;
336 	int flags;
337 {
338 	paddr_t lastaddr;
339 	int seg, i, error, first;
340 	bus_size_t minlen, resid;
341 	struct proc *p = NULL;
342 	struct iovec *iov;
343 	caddr_t addr;
344 
345 	/*
346 	 * Make sure that on error condition we return "no valid mappings."
347 	 */
348 	map->dm_mapsize = 0;
349 	map->dm_nsegs = 0;
350 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
351 
352 	resid = uio->uio_resid;
353 	iov = uio->uio_iov;
354 
355 	if (uio->uio_segflg == UIO_USERSPACE) {
356 		p = uio->uio_procp;
357 #ifdef DIAGNOSTIC
358 		if (p == NULL)
359 			panic("_bus_dmamap_load_uio_direct: "
360 			    "USERSPACE but no proc");
361 #endif
362 	}
363 
364 	first = 1;
365 	seg = 0;
366 	error = 0;
367 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
368 		/*
369 		 * Now at the first iovec to load.  Load each iovec
370 		 * until we have exhausted the residual count.
371 		 */
372 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
373 		addr = (caddr_t)iov[i].iov_base;
374 
375 		error = _bus_dmamap_load_buffer_direct(t, map,
376 		    addr, minlen, p, flags, &lastaddr, &seg, first);
377 		first = 0;
378 
379 		resid -= minlen;
380 	}
381 	if (error == 0) {
382 		map->dm_mapsize = uio->uio_resid;
383 		map->dm_nsegs = seg + 1;
384 		map->_dm_window = t;
385 	} else if (t->_next_window != NULL) {
386 		/*
387 		 * Give the next window a chance.
388 		 */
389 		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
390 	}
391 	return (error);
392 }
393 
394 /*
395  * Like _bus_dmamap_load_direct(), but for raw memory.
396  */
397 int
398 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
399 	bus_dma_tag_t t;
400 	bus_dmamap_t map;
401 	bus_dma_segment_t *segs;
402 	int nsegs;
403 	bus_size_t size;
404 	int flags;
405 {
406 
407 	panic("_bus_dmamap_load_raw_direct: not implemented");
408 }
409 
410 /*
411  * Common function for unloading a DMA map.  May be called by
412  * chipset-specific DMA map unload functions.
413  */
414 void
415 _bus_dmamap_unload(t, map)
416 	bus_dma_tag_t t;
417 	bus_dmamap_t map;
418 {
419 
420 	/*
421 	 * No resources to free; just mark the mappings as
422 	 * invalid.
423 	 */
424 	map->dm_mapsize = 0;
425 	map->dm_nsegs = 0;
426 	map->_dm_window = NULL;
427 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
428 }
429 
430 /*
431  * Common function for DMA map synchronization.  May be called
432  * by chipset-specific DMA map synchronization functions.
433  */
434 void
435 _bus_dmamap_sync(t, map, offset, len, op)
436 	bus_dma_tag_t t;
437 	bus_dmamap_t map;
438 	bus_addr_t offset;
439 	bus_size_t len;
440 	int op;
441 {
442 
443 	/*
444 	 * Flush the store buffer.
445 	 */
446 	alpha_mb();
447 }
448 
449 /*
450  * Common function for DMA-safe memory allocation.  May be called
451  * by bus-specific DMA memory allocation functions.
452  */
453 int
454 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
455 	bus_dma_tag_t t;
456 	bus_size_t size, alignment, boundary;
457 	bus_dma_segment_t *segs;
458 	int nsegs;
459 	int *rsegs;
460 	int flags;
461 {
462 
463 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
464 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
465 }
466 
467 /*
468  * Allocate physical memory from the given physical address range.
469  * Called by DMA-safe memory allocation methods.
470  */
471 int
472 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
473     flags, low, high)
474 	bus_dma_tag_t t;
475 	bus_size_t size, alignment, boundary;
476 	bus_dma_segment_t *segs;
477 	int nsegs;
478 	int *rsegs;
479 	int flags;
480 	paddr_t low;
481 	paddr_t high;
482 {
483 	paddr_t curaddr, lastaddr;
484 	struct vm_page *m;
485 	struct pglist mlist;
486 	int curseg, error;
487 
488 	/* Always round the size. */
489 	size = round_page(size);
490 
491 	/*
492 	 * Allocate pages from the VM system.
493 	 */
494 	TAILQ_INIT(&mlist);
495 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
496 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
497 	if (error)
498 		return (error);
499 
500 	/*
501 	 * Compute the location, size, and number of segments actually
502 	 * returned by the VM code.
503 	 */
504 	m = TAILQ_FIRST(&mlist);
505 	curseg = 0;
506 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
507 	segs[curseg].ds_len = PAGE_SIZE;
508 	m = TAILQ_NEXT(m, pageq);
509 
510 	for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
511 		curaddr = VM_PAGE_TO_PHYS(m);
512 #ifdef DIAGNOSTIC
513 		if (curaddr < avail_start || curaddr >= high) {
514 			printf("uvm_pglistalloc returned non-sensical"
515 			    " address 0x%lx\n", curaddr);
516 			panic("_bus_dmamem_alloc");
517 		}
518 #endif
519 		if (curaddr == (lastaddr + PAGE_SIZE))
520 			segs[curseg].ds_len += PAGE_SIZE;
521 		else {
522 			curseg++;
523 			segs[curseg].ds_addr = curaddr;
524 			segs[curseg].ds_len = PAGE_SIZE;
525 		}
526 		lastaddr = curaddr;
527 	}
528 
529 	*rsegs = curseg + 1;
530 
531 	return (0);
532 }
533 
534 /*
535  * Common function for freeing DMA-safe memory.  May be called by
536  * bus-specific DMA memory free functions.
537  */
538 void
539 _bus_dmamem_free(t, segs, nsegs)
540 	bus_dma_tag_t t;
541 	bus_dma_segment_t *segs;
542 	int nsegs;
543 {
544 	struct vm_page *m;
545 	bus_addr_t addr;
546 	struct pglist mlist;
547 	int curseg;
548 
549 	/*
550 	 * Build a list of pages to free back to the VM system.
551 	 */
552 	TAILQ_INIT(&mlist);
553 	for (curseg = 0; curseg < nsegs; curseg++) {
554 		for (addr = segs[curseg].ds_addr;
555 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
556 		    addr += PAGE_SIZE) {
557 			m = PHYS_TO_VM_PAGE(addr);
558 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
559 		}
560 	}
561 
562 	uvm_pglistfree(&mlist);
563 }
564 
565 /*
566  * Common function for mapping DMA-safe memory.  May be called by
567  * bus-specific DMA memory map functions.
568  */
569 int
570 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
571 	bus_dma_tag_t t;
572 	bus_dma_segment_t *segs;
573 	int nsegs;
574 	size_t size;
575 	caddr_t *kvap;
576 	int flags;
577 {
578 	vaddr_t va;
579 	bus_addr_t addr;
580 	int curseg;
581 
582 	/*
583 	 * If we're only mapping 1 segment, use K0SEG, to avoid
584 	 * TLB thrashing.
585 	 */
586 	if (nsegs == 1) {
587 		*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
588 		return (0);
589 	}
590 
591 	size = round_page(size);
592 
593 	va = uvm_km_valloc(kernel_map, size);
594 
595 	if (va == 0)
596 		return (ENOMEM);
597 
598 	*kvap = (caddr_t)va;
599 
600 	for (curseg = 0; curseg < nsegs; curseg++) {
601 		for (addr = segs[curseg].ds_addr;
602 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
603 		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
604 			if (size == 0)
605 				panic("_bus_dmamem_map: size botch");
606 			pmap_enter(pmap_kernel(), va, addr,
607 			    VM_PROT_READ | VM_PROT_WRITE,
608 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
609 		}
610 	}
611 	pmap_update(pmap_kernel());
612 
613 	return (0);
614 }
615 
616 /*
617  * Common function for unmapping DMA-safe memory.  May be called by
618  * bus-specific DMA memory unmapping functions.
619  */
620 void
621 _bus_dmamem_unmap(t, kva, size)
622 	bus_dma_tag_t t;
623 	caddr_t kva;
624 	size_t size;
625 {
626 
627 #ifdef DIAGNOSTIC
628 	if ((u_long)kva & PGOFSET)
629 		panic("_bus_dmamem_unmap");
630 #endif
631 
632 	/*
633 	 * Nothing to do if we mapped it with K0SEG.
634 	 */
635 	if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
636 	    kva <= (caddr_t)ALPHA_K0SEG_END)
637 		return;
638 
639 	size = round_page(size);
640 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
641 }
642 
643 /*
644  * Common function for mmap(2)'ing DMA-safe memory.  May be called by
645  * bus-specific DMA mmap(2)'ing functions.
646  */
647 paddr_t
648 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
649 	bus_dma_tag_t t;
650 	bus_dma_segment_t *segs;
651 	int nsegs;
652 	off_t off;
653 	int prot, flags;
654 {
655 	int i;
656 
657 	for (i = 0; i < nsegs; i++) {
658 #ifdef DIAGNOSTIC
659 		if (off & PGOFSET)
660 			panic("_bus_dmamem_mmap: offset unaligned");
661 		if (segs[i].ds_addr & PGOFSET)
662 			panic("_bus_dmamem_mmap: segment unaligned");
663 		if (segs[i].ds_len & PGOFSET)
664 			panic("_bus_dmamem_mmap: segment size not multiple"
665 			    " of page size");
666 #endif
667 		if (off >= segs[i].ds_len) {
668 			off -= segs[i].ds_len;
669 			continue;
670 		}
671 
672 		return (atop(segs[i].ds_addr + off));
673 	}
674 
675 	/* Page not found. */
676 	return (-1);
677 }
678