xref: /openbsd/sys/arch/alpha/dev/bus_dma.c (revision fd84ef7e)
1 /* $OpenBSD: bus_dma.c,v 1.8 2001/12/08 02:24:05 art Exp $ */
2 /* $NetBSD: bus_dma.c,v 1.40 2000/07/17 04:47:56 thorpej Exp $ */
3 
4 /*-
5  * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #define _ALPHA_BUS_DMA_PRIVATE
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/device.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/mbuf.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54 
55 int	_bus_dmamap_load_buffer_direct_common __P((bus_dma_tag_t,
56 	    bus_dmamap_t, void *, bus_size_t, struct proc *, int,
57 	    paddr_t *, int *, int));
58 
59 extern paddr_t avail_start, avail_end;	/* from pmap.c */
60 
61 /*
62  * Common function for DMA map creation.  May be called by bus-specific
63  * DMA map creation functions.
64  */
65 int
66 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
67 	bus_dma_tag_t t;
68 	bus_size_t size;
69 	int nsegments;
70 	bus_size_t maxsegsz;
71 	bus_size_t boundary;
72 	int flags;
73 	bus_dmamap_t *dmamp;
74 {
75 	struct alpha_bus_dmamap *map;
76 	void *mapstore;
77 	size_t mapsize;
78 
79 	/*
80 	 * Allocate and initialize the DMA map.  The end of the map
81 	 * is a variable-sized array of segments, so we allocate enough
82 	 * room for them in one shot.
83 	 *
84 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
85 	 * of ALLOCNOW notifes others that we've reserved these resources,
86 	 * and they are not to be freed.
87 	 *
88 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
89 	 * the (nsegments - 1).
90 	 */
91 	mapsize = sizeof(struct alpha_bus_dmamap) +
92 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
93 	if ((mapstore = malloc(mapsize, M_DEVBUF,
94 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
95 		return (ENOMEM);
96 
97 	bzero(mapstore, mapsize);
98 	map = (struct alpha_bus_dmamap *)mapstore;
99 	map->_dm_size = size;
100 	map->_dm_segcnt = nsegments;
101 	map->_dm_maxsegsz = maxsegsz;
102 	if (t->_boundary != 0 && t->_boundary < boundary)
103 		map->_dm_boundary = t->_boundary;
104 	else
105 		map->_dm_boundary = boundary;
106 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
107 	map->dm_mapsize = 0;		/* no valid mappings */
108 	map->dm_nsegs = 0;
109 
110 	*dmamp = map;
111 	return (0);
112 }
113 
114 /*
115  * Common function for DMA map destruction.  May be called by bus-specific
116  * DMA map destruction functions.
117  */
118 void
119 _bus_dmamap_destroy(t, map)
120 	bus_dma_tag_t t;
121 	bus_dmamap_t map;
122 {
123 
124 	free(map, M_DEVBUF);
125 }
126 
127 /*
128  * Utility function to load a linear buffer.  lastaddrp holds state
129  * between invocations (for multiple-buffer loads).  segp contains
130  * the starting segment on entrance, and the ending segment on exit.
131  * first indicates if this is the first invocation of this function.
132  */
133 int
134 _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen, p, flags,
135     lastaddrp, segp, first)
136 	bus_dma_tag_t t;
137 	bus_dmamap_t map;
138 	void *buf;
139 	bus_size_t buflen;
140 	struct proc *p;
141 	int flags;
142 	paddr_t *lastaddrp;
143 	int *segp;
144 	int first;
145 {
146 	bus_size_t sgsize;
147 	bus_addr_t curaddr, lastaddr, baddr, bmask;
148 	vaddr_t vaddr = (vaddr_t)buf;
149 	int seg;
150 
151 	lastaddr = *lastaddrp;
152 	bmask = ~(map->_dm_boundary - 1);
153 
154 	for (seg = *segp; buflen > 0 ; ) {
155 		/*
156 		 * Get the physical address for this segment.
157 		 */
158 		if (p != NULL)
159 			pmap_extract(p->p_vmspace->vm_map.pmap, vaddr,
160 				&curaddr);
161 		else
162 			curaddr = vtophys(vaddr);
163 
164 		/*
165 		 * If we're beyond the current DMA window, indicate
166 		 * that and try to fall back into SGMAPs.
167 		 */
168 		if (t->_wsize != 0 && curaddr >= t->_wsize)
169 			return (EINVAL);
170 
171 		curaddr |= t->_wbase;
172 
173 		/*
174 		 * Compute the segment size, and adjust counts.
175 		 */
176 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
177 		if (buflen < sgsize)
178 			sgsize = buflen;
179 		if (map->_dm_maxsegsz < sgsize)
180 			sgsize = map->_dm_maxsegsz;
181 
182 		/*
183 		 * Make sure we don't cross any boundaries.
184 		 */
185 		if (map->_dm_boundary > 0) {
186 			baddr = (curaddr + map->_dm_boundary) & bmask;
187 			if (sgsize > (baddr - curaddr))
188 				sgsize = (baddr - curaddr);
189 		}
190 
191 		/*
192 		 * Insert chunk into a segment, coalescing with
193 		 * the previous segment if possible.
194 		 */
195 		if (first) {
196 			map->dm_segs[seg].ds_addr = curaddr;
197 			map->dm_segs[seg].ds_len = sgsize;
198 			first = 0;
199 		} else {
200 			if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
201 			    curaddr == lastaddr &&
202 			    (map->dm_segs[seg].ds_len + sgsize) <=
203 			     map->_dm_maxsegsz &&
204 			    (map->_dm_boundary == 0 ||
205 			     (map->dm_segs[seg].ds_addr & bmask) ==
206 			     (curaddr & bmask)))
207 				map->dm_segs[seg].ds_len += sgsize;
208 			else {
209 				if (++seg >= map->_dm_segcnt)
210 					break;
211 				map->dm_segs[seg].ds_addr = curaddr;
212 				map->dm_segs[seg].ds_len = sgsize;
213 			}
214 		}
215 
216 		lastaddr = curaddr + sgsize;
217 		vaddr += sgsize;
218 		buflen -= sgsize;
219 	}
220 
221 	*segp = seg;
222 	*lastaddrp = lastaddr;
223 
224 	/*
225 	 * Did we fit?
226 	 */
227 	if (buflen != 0) {
228 		/*
229 		 * If there is a chained window, we will automatically
230 		 * fall back to it.
231 		 */
232 		return (EFBIG);		/* XXX better return value here? */
233 	}
234 
235 	return (0);
236 }
237 
238 /*
239  * Common function for loading a direct-mapped DMA map with a linear
240  * buffer.  Called by bus-specific DMA map load functions with the
241  * OR value appropriate for indicating "direct-mapped" for that
242  * chipset.
243  */
244 int
245 _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
246 	bus_dma_tag_t t;
247 	bus_dmamap_t map;
248 	void *buf;
249 	bus_size_t buflen;
250 	struct proc *p;
251 	int flags;
252 {
253 	paddr_t lastaddr;
254 	int seg, error;
255 
256 	/*
257 	 * Make sure that on error condition we return "no valid mappings".
258 	 */
259 	map->dm_mapsize = 0;
260 	map->dm_nsegs = 0;
261 
262 	if (buflen > map->_dm_size)
263 		return (EINVAL);
264 
265 	seg = 0;
266 	error = _bus_dmamap_load_buffer_direct_common(t, map, buf, buflen,
267 	    p, flags, &lastaddr, &seg, 1);
268 	if (error == 0) {
269 		map->dm_mapsize = buflen;
270 		map->dm_nsegs = seg + 1;
271 	} else if (t->_next_window != NULL) {
272 		/*
273 		 * Give the next window a chance.
274 		 */
275 		error = bus_dmamap_load(t->_next_window, map, buf, buflen,
276 		    p, flags);
277 	}
278 	return (error);
279 }
280 
281 /*
282  * Like _bus_dmamap_load_direct_common(), but for mbufs.
283  */
284 int
285 _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
286 	bus_dma_tag_t t;
287 	bus_dmamap_t map;
288 	struct mbuf *m0;
289 	int flags;
290 {
291 	paddr_t lastaddr;
292 	int seg, error, first;
293 	struct mbuf *m;
294 
295 	/*
296 	 * Make sure that on error condition we return "no valid mappings."
297 	 */
298 	map->dm_mapsize = 0;
299 	map->dm_nsegs = 0;
300 
301 #ifdef DIAGNOSTIC
302 	if ((m0->m_flags & M_PKTHDR) == 0)
303 		panic("_bus_dmamap_load_mbuf_direct_common: no packet header");
304 #endif
305 
306 	if (m0->m_pkthdr.len > map->_dm_size)
307 		return (EINVAL);
308 
309 	first = 1;
310 	seg = 0;
311 	error = 0;
312 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
313 		error = _bus_dmamap_load_buffer_direct_common(t, map,
314 		    m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
315 		first = 0;
316 	}
317 	if (error == 0) {
318 		map->dm_mapsize = m0->m_pkthdr.len;
319 		map->dm_nsegs = seg + 1;
320 	} else if (t->_next_window != NULL) {
321 		/*
322 		 * Give the next window a chance.
323 		 */
324 		error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
325 	}
326 	return (error);
327 }
328 
329 /*
330  * Like _bus_dmamap_load_direct_common(), but for uios.
331  */
332 int
333 _bus_dmamap_load_uio_direct(t, map, uio, flags)
334 	bus_dma_tag_t t;
335 	bus_dmamap_t map;
336 	struct uio *uio;
337 	int flags;
338 {
339 	paddr_t lastaddr;
340 	int seg, i, error, first;
341 	bus_size_t minlen, resid;
342 	struct proc *p = NULL;
343 	struct iovec *iov;
344 	caddr_t addr;
345 
346 	/*
347 	 * Make sure that on error condition we return "no valid mappings."
348 	 */
349 	map->dm_mapsize = 0;
350 	map->dm_nsegs = 0;
351 
352 	resid = uio->uio_resid;
353 	iov = uio->uio_iov;
354 
355 	if (uio->uio_segflg == UIO_USERSPACE) {
356 		p = uio->uio_procp;
357 #ifdef DIAGNOSTIC
358 		if (p == NULL)
359 			panic("_bus_dmamap_load_direct_common: USERSPACE but no proc");
360 #endif
361 	}
362 
363 	first = 1;
364 	seg = 0;
365 	error = 0;
366 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
367 		/*
368 		 * Now at the first iovec to load.  Load each iovec
369 		 * until we have exhausted the residual count.
370 		 */
371 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
372 		addr = (caddr_t)iov[i].iov_base;
373 
374 		error = _bus_dmamap_load_buffer_direct_common(t, map,
375 		    addr, minlen, p, flags, &lastaddr, &seg, first);
376 		first = 0;
377 
378 		resid -= minlen;
379 	}
380 	if (error == 0) {
381 		map->dm_mapsize = uio->uio_resid;
382 		map->dm_nsegs = seg + 1;
383 	} else if (t->_next_window != NULL) {
384 		/*
385 		 * Give the next window a chance.
386 		 */
387 		error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
388 	}
389 	return (error);
390 }
391 
392 /*
393  * Like _bus_dmamap_load_direct_common(), but for raw memory.
394  */
395 int
396 _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
397 	bus_dma_tag_t t;
398 	bus_dmamap_t map;
399 	bus_dma_segment_t *segs;
400 	int nsegs;
401 	bus_size_t size;
402 	int flags;
403 {
404 
405 	panic("_bus_dmamap_load_raw_direct: not implemented");
406 }
407 
408 /*
409  * Common function for unloading a DMA map.  May be called by
410  * chipset-specific DMA map unload functions.
411  */
412 void
413 _bus_dmamap_unload(t, map)
414 	bus_dma_tag_t t;
415 	bus_dmamap_t map;
416 {
417 
418 	/*
419 	 * No resources to free; just mark the mappings as
420 	 * invalid.
421 	 */
422 	map->dm_mapsize = 0;
423 	map->dm_nsegs = 0;
424 }
425 
426 /*
427  * Common function for DMA map synchronization.  May be called
428  * by chipset-specific DMA map synchronization functions.
429  */
430 void
431 _bus_dmamap_sync(t, map, offset, len, op)
432 	bus_dma_tag_t t;
433 	bus_dmamap_t map;
434 	bus_addr_t offset;
435 	bus_size_t len;
436 	int op;
437 {
438 
439 	/*
440 	 * Flush the store buffer.
441 	 */
442 	alpha_mb();
443 }
444 
445 /*
446  * Common function for DMA-safe memory allocation.  May be called
447  * by bus-specific DMA memory allocation functions.
448  */
449 int
450 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
451 	bus_dma_tag_t t;
452 	bus_size_t size, alignment, boundary;
453 	bus_dma_segment_t *segs;
454 	int nsegs;
455 	int *rsegs;
456 	int flags;
457 {
458 
459 	return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
460 	    segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
461 }
462 
463 /*
464  * Allocate physical memory from the given physical address range.
465  * Called by DMA-safe memory allocation methods.
466  */
467 int
468 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
469     flags, low, high)
470 	bus_dma_tag_t t;
471 	bus_size_t size, alignment, boundary;
472 	bus_dma_segment_t *segs;
473 	int nsegs;
474 	int *rsegs;
475 	int flags;
476 	paddr_t low;
477 	paddr_t high;
478 {
479 	paddr_t curaddr, lastaddr;
480 	struct vm_page *m;
481 	struct pglist mlist;
482 	int curseg, error;
483 
484 	/* Always round the size. */
485 	size = round_page(size);
486 
487 	high = avail_end - PAGE_SIZE;
488 
489 	/*
490 	 * Allocate pages from the VM system.
491 	 */
492 	TAILQ_INIT(&mlist);
493 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
494 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
495 	if (error)
496 		return (error);
497 
498 	/*
499 	 * Compute the location, size, and number of segments actually
500 	 * returned by the VM code.
501 	 */
502 	m = mlist.tqh_first;
503 	curseg = 0;
504 	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
505 	segs[curseg].ds_len = PAGE_SIZE;
506 	m = m->pageq.tqe_next;
507 
508 	for (; m != NULL; m = m->pageq.tqe_next) {
509 		curaddr = VM_PAGE_TO_PHYS(m);
510 #ifdef DIAGNOSTIC
511 		if (curaddr < avail_start || curaddr >= high) {
512 			printf("vm_page_alloc_memory returned non-sensical"
513 			    " address 0x%lx\n", curaddr);
514 			panic("_bus_dmamem_alloc");
515 		}
516 #endif
517 		if (curaddr == (lastaddr + PAGE_SIZE))
518 			segs[curseg].ds_len += PAGE_SIZE;
519 		else {
520 			curseg++;
521 			segs[curseg].ds_addr = curaddr;
522 			segs[curseg].ds_len = PAGE_SIZE;
523 		}
524 		lastaddr = curaddr;
525 	}
526 
527 	*rsegs = curseg + 1;
528 
529 	return (0);
530 }
531 
532 /*
533  * Common function for freeing DMA-safe memory.  May be called by
534  * bus-specific DMA memory free functions.
535  */
536 void
537 _bus_dmamem_free(t, segs, nsegs)
538 	bus_dma_tag_t t;
539 	bus_dma_segment_t *segs;
540 	int nsegs;
541 {
542 	struct vm_page *m;
543 	bus_addr_t addr;
544 	struct pglist mlist;
545 	int curseg;
546 
547 	/*
548 	 * Build a list of pages to free back to the VM system.
549 	 */
550 	TAILQ_INIT(&mlist);
551 	for (curseg = 0; curseg < nsegs; curseg++) {
552 		for (addr = segs[curseg].ds_addr;
553 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
554 		    addr += PAGE_SIZE) {
555 			m = PHYS_TO_VM_PAGE(addr);
556 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
557 		}
558 	}
559 
560 	uvm_pglistfree(&mlist);
561 }
562 
563 /*
564  * Common function for mapping DMA-safe memory.  May be called by
565  * bus-specific DMA memory map functions.
566  */
567 int
568 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
569 	bus_dma_tag_t t;
570 	bus_dma_segment_t *segs;
571 	int nsegs;
572 	size_t size;
573 	caddr_t *kvap;
574 	int flags;
575 {
576 	vaddr_t va;
577 	bus_addr_t addr;
578 	int curseg;
579 
580 	/*
581 	 * If we're only mapping 1 segment, use K0SEG, to avoid
582 	 * TLB thrashing.
583 	 */
584 	if (nsegs == 1) {
585 		*kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
586 		return (0);
587 	}
588 
589 	size = round_page(size);
590 
591 	va = uvm_km_valloc(kernel_map, size);
592 
593 	if (va == 0)
594 		return (ENOMEM);
595 
596 	*kvap = (caddr_t)va;
597 
598 	for (curseg = 0; curseg < nsegs; curseg++) {
599 		for (addr = segs[curseg].ds_addr;
600 		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
601 		    addr += NBPG, va += NBPG, size -= NBPG) {
602 			if (size == 0)
603 				panic("_bus_dmamem_map: size botch");
604 			pmap_enter(pmap_kernel(), va, addr,
605 			    VM_PROT_READ | VM_PROT_WRITE,
606 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
607 		}
608 	}
609 	pmap_update(pmap_kernel());
610 
611 	return (0);
612 }
613 
614 /*
615  * Common function for unmapping DMA-safe memory.  May be called by
616  * bus-specific DMA memory unmapping functions.
617  */
618 void
619 _bus_dmamem_unmap(t, kva, size)
620 	bus_dma_tag_t t;
621 	caddr_t kva;
622 	size_t size;
623 {
624 
625 #ifdef DIAGNOSTIC
626 	if ((u_long)kva & PGOFSET)
627 		panic("_bus_dmamem_unmap");
628 #endif
629 
630 	/*
631 	 * Nothing to do if we mapped it with K0SEG.
632 	 */
633 	if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
634 	    kva <= (caddr_t)ALPHA_K0SEG_END)
635 		return;
636 
637 	size = round_page(size);
638 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
639 }
640 
641 /*
642  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
643  * bus-specific DMA mmap(2)'ing functions.
644  */
645 paddr_t
646 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
647 	bus_dma_tag_t t;
648 	bus_dma_segment_t *segs;
649 	int nsegs;
650 	off_t off;
651 	int prot, flags;
652 {
653 	int i;
654 
655 	for (i = 0; i < nsegs; i++) {
656 #ifdef DIAGNOSTIC
657 		if (off & PGOFSET)
658 			panic("_bus_dmamem_mmap: offset unaligned");
659 		if (segs[i].ds_addr & PGOFSET)
660 			panic("_bus_dmamem_mmap: segment unaligned");
661 		if (segs[i].ds_len & PGOFSET)
662 			panic("_bus_dmamem_mmap: segment size not multiple"
663 			    " of page size");
664 #endif
665 		if (off >= segs[i].ds_len) {
666 			off -= segs[i].ds_len;
667 			continue;
668 		}
669 
670 		return (alpha_btop((caddr_t)segs[i].ds_addr + off));
671 	}
672 
673 	/* Page not found. */
674 	return (-1);
675 }
676