xref: /netbsd/sys/arch/powerpc/powerpc/bus_dma.c (revision c4a72b64)
1 /*	$NetBSD: bus_dma.c,v 1.4 2002/11/25 05:37:00 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/device.h>
44 #include <sys/malloc.h>
45 #include <sys/proc.h>
46 #include <sys/mbuf.h>
47 
48 #include <uvm/uvm_extern.h>
49 
50 #define _POWERPC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 #include <machine/intr.h>
53 
54 int	_bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *,
55 	    bus_size_t, struct proc *, int, paddr_t *, int *, int);
56 
57 /*
58  * Common function for DMA map creation.  May be called by bus-specific
59  * DMA map creation functions.
60  */
61 int
62 _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
63 	bus_dma_tag_t t;
64 	bus_size_t size;
65 	int nsegments;
66 	bus_size_t maxsegsz;
67 	bus_size_t boundary;
68 	int flags;
69 	bus_dmamap_t *dmamp;
70 {
71 	struct powerpc_bus_dmamap *map;
72 	void *mapstore;
73 	size_t mapsize;
74 
75 	/*
76 	 * Allocate and initialize the DMA map.  The end of the map
77 	 * is a variable-sized array of segments, so we allocate enough
78 	 * room for them in one shot.
79 	 *
80 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
81 	 * of ALLOCNOW notifies others that we've reserved these resources,
82 	 * and they are not to be freed.
83 	 *
84 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
85 	 * the (nsegments - 1).
86 	 */
87 	mapsize = sizeof(struct powerpc_bus_dmamap) +
88 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
89 	if ((mapstore = malloc(mapsize, M_DMAMAP,
90 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
91 		return (ENOMEM);
92 
93 	memset(mapstore, 0, mapsize);
94 	map = (struct powerpc_bus_dmamap *)mapstore;
95 	map->_dm_size = size;
96 	map->_dm_segcnt = nsegments;
97 	map->_dm_maxsegsz = maxsegsz;
98 	map->_dm_boundary = boundary;
99 	map->_dm_bounce_thresh = t->_bounce_thresh;
100 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
101 	map->dm_mapsize = 0;		/* no valid mappings */
102 	map->dm_nsegs = 0;
103 
104 	*dmamp = map;
105 	return (0);
106 }
107 
108 /*
109  * Common function for DMA map destruction.  May be called by bus-specific
110  * DMA map destruction functions.
111  */
112 void
113 _bus_dmamap_destroy(t, map)
114 	bus_dma_tag_t t;
115 	bus_dmamap_t map;
116 {
117 
118 	free(map, M_DMAMAP);
119 }
120 
121 /*
122  * Utility function to load a linear buffer.  lastaddrp holds state
123  * between invocations (for multiple-buffer loads).  segp contains
124  * the starting segment on entrance, and the ending segment on exit.
125  * first indicates if this is the first invocation of this function.
126  */
127 int
128 _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
129 	bus_dma_tag_t t;
130 	bus_dmamap_t map;
131 	void *buf;
132 	bus_size_t buflen;
133 	struct proc *p;
134 	int flags;
135 	paddr_t *lastaddrp;
136 	int *segp;
137 	int first;
138 {
139 	bus_size_t sgsize;
140 	bus_addr_t curaddr, lastaddr, baddr, bmask;
141 	vaddr_t vaddr = (vaddr_t)buf;
142 	int seg;
143 
144 	lastaddr = *lastaddrp;
145 	bmask = ~(map->_dm_boundary - 1);
146 
147 	for (seg = *segp; buflen > 0 ; ) {
148 		/*
149 		 * Get the physical address for this segment.
150 		 */
151 		if (p != NULL)
152 			(void) pmap_extract(p->p_vmspace->vm_map.pmap,
153 			    vaddr, (void *)&curaddr);
154 		else
155 			curaddr = vtophys(vaddr);
156 
157 		/*
158 		 * If we're beyond the bounce threshold, notify
159 		 * the caller.
160 		 */
161 		if (map->_dm_bounce_thresh != 0 &&
162 		    curaddr >= map->_dm_bounce_thresh)
163 			return (EINVAL);
164 
165 		/*
166 		 * Compute the segment size, and adjust counts.
167 		 */
168 		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
169 		if (buflen < sgsize)
170 			sgsize = buflen;
171 
172 		/*
173 		 * Make sure we don't cross any boundaries.
174 		 */
175 		if (map->_dm_boundary > 0) {
176 			baddr = (curaddr + map->_dm_boundary) & bmask;
177 			if (sgsize > (baddr - curaddr))
178 				sgsize = (baddr - curaddr);
179 		}
180 
181 		/*
182 		 * Insert chunk into a segment, coalescing with
183 		 * the previous segment if possible.
184 		 */
185 		if (first) {
186 			map->dm_segs[seg].ds_addr = PHYS_TO_PCI_MEM(curaddr);
187 			map->dm_segs[seg].ds_len = sgsize;
188 			first = 0;
189 		} else {
190 			if (curaddr == lastaddr &&
191 			    (map->dm_segs[seg].ds_len + sgsize) <=
192 			     map->_dm_maxsegsz &&
193 			    (map->_dm_boundary == 0 ||
194 			     (map->dm_segs[seg].ds_addr & bmask) ==
195 			     (curaddr & bmask)))
196 				map->dm_segs[seg].ds_len += sgsize;
197 			else {
198 				if (++seg >= map->_dm_segcnt)
199 					break;
200 				map->dm_segs[seg].ds_addr =
201 					PHYS_TO_PCI_MEM(curaddr);
202 				map->dm_segs[seg].ds_len = sgsize;
203 			}
204 		}
205 
206 		lastaddr = curaddr + sgsize;
207 		vaddr += sgsize;
208 		buflen -= sgsize;
209 	}
210 
211 	*segp = seg;
212 	*lastaddrp = lastaddr;
213 
214 	/*
215 	 * Did we fit?
216 	 */
217 	if (buflen != 0)
218 		return (EFBIG);		/* XXX better return value here? */
219 
220 	return (0);
221 }
222 
223 /*
224  * Common function for loading a DMA map with a linear buffer.  May
225  * be called by bus-specific DMA map load functions.
226  */
227 int
228 _bus_dmamap_load(t, map, buf, buflen, p, flags)
229 	bus_dma_tag_t t;
230 	bus_dmamap_t map;
231 	void *buf;
232 	bus_size_t buflen;
233 	struct proc *p;
234 	int flags;
235 {
236 	paddr_t lastaddr;
237 	int seg, error;
238 
239 	/*
240 	 * Make sure that on error condition we return "no valid mappings".
241 	 */
242 	map->dm_mapsize = 0;
243 	map->dm_nsegs = 0;
244 
245 	if (buflen > map->_dm_size)
246 		return (EINVAL);
247 
248 	seg = 0;
249 	error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
250 		&lastaddr, &seg, 1);
251 	if (error == 0) {
252 		map->dm_mapsize = buflen;
253 		map->dm_nsegs = seg + 1;
254 	}
255 	return (error);
256 }
257 
258 /*
259  * Like _bus_dmamap_load(), but for mbufs.
260  */
261 int
262 _bus_dmamap_load_mbuf(t, map, m0, flags)
263 	bus_dma_tag_t t;
264 	bus_dmamap_t map;
265 	struct mbuf *m0;
266 	int flags;
267 {
268 	paddr_t lastaddr;
269 	int seg, error, first;
270 	struct mbuf *m;
271 
272 	/*
273 	 * Make sure that on error condition we return "no valid mappings."
274 	 */
275 	map->dm_mapsize = 0;
276 	map->dm_nsegs = 0;
277 
278 #ifdef DIAGNOSTIC
279 	if ((m0->m_flags & M_PKTHDR) == 0)
280 		panic("_bus_dmamap_load_mbuf: no packet header");
281 #endif
282 
283 	if (m0->m_pkthdr.len > map->_dm_size)
284 		return (EINVAL);
285 
286 	first = 1;
287 	seg = 0;
288 	error = 0;
289 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
290 		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
291 		    NULL, flags, &lastaddr, &seg, first);
292 		first = 0;
293 	}
294 	if (error == 0) {
295 		map->dm_mapsize = m0->m_pkthdr.len;
296 		map->dm_nsegs = seg + 1;
297 	}
298 	return (error);
299 }
300 
301 /*
302  * Like _bus_dmamap_load(), but for uios.
303  */
304 int
305 _bus_dmamap_load_uio(t, map, uio, flags)
306 	bus_dma_tag_t t;
307 	bus_dmamap_t map;
308 	struct uio *uio;
309 	int flags;
310 {
311 	paddr_t lastaddr;
312 	int seg, i, error, first;
313 	bus_size_t minlen, resid;
314 	struct proc *p = NULL;
315 	struct iovec *iov;
316 	caddr_t addr;
317 
318 	/*
319 	 * Make sure that on error condition we return "no valid mappings."
320 	 */
321 	map->dm_mapsize = 0;
322 	map->dm_nsegs = 0;
323 
324 	resid = uio->uio_resid;
325 	iov = uio->uio_iov;
326 
327 	if (uio->uio_segflg == UIO_USERSPACE) {
328 		p = uio->uio_procp;
329 #ifdef DIAGNOSTIC
330 		if (p == NULL)
331 			panic("_bus_dmamap_load_uio: USERSPACE but no proc");
332 #endif
333 	}
334 
335 	first = 1;
336 	seg = 0;
337 	error = 0;
338 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
339 		/*
340 		 * Now at the first iovec to load.  Load each iovec
341 		 * until we have exhausted the residual count.
342 		 */
343 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
344 		addr = (caddr_t)iov[i].iov_base;
345 
346 		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
347 		    p, flags, &lastaddr, &seg, first);
348 		first = 0;
349 
350 		resid -= minlen;
351 	}
352 	if (error == 0) {
353 		map->dm_mapsize = uio->uio_resid;
354 		map->dm_nsegs = seg + 1;
355 	}
356 	return (error);
357 }
358 
359 /*
360  * Like _bus_dmamap_load(), but for raw memory allocated with
361  * bus_dmamem_alloc().
362  */
363 int
364 _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
365 	bus_dma_tag_t t;
366 	bus_dmamap_t map;
367 	bus_dma_segment_t *segs;
368 	int nsegs;
369 	bus_size_t size;
370 	int flags;
371 {
372 
373 	panic("_bus_dmamap_load_raw: not implemented");
374 }
375 
376 /*
377  * Common function for unloading a DMA map.  May be called by
378  * chipset-specific DMA map unload functions.
379  */
380 void
381 _bus_dmamap_unload(t, map)
382 	bus_dma_tag_t t;
383 	bus_dmamap_t map;
384 {
385 
386 	/*
387 	 * No resources to free; just mark the mappings as
388 	 * invalid.
389 	 */
390 	map->dm_mapsize = 0;
391 	map->dm_nsegs = 0;
392 }
393 
394 /*
395  * Common function for DMA map synchronization.  May be called
396  * by chipset-specific DMA map synchronization functions.
397  */
398 void
399 _bus_dmamap_sync(t, map, offset, len, ops)
400 	bus_dma_tag_t t;
401 	bus_dmamap_t map;
402 	bus_addr_t offset;
403 	bus_size_t len;
404 	int ops;
405 {
406 	/* Nothing to do here. */
407 }
408 
409 /*
410  * Common function for DMA-safe memory allocation.  May be called
411  * by bus-specific DMA memory allocation functions.
412  */
413 int
414 _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
415 	bus_dma_tag_t t;
416 	bus_size_t size, alignment, boundary;
417 	bus_dma_segment_t *segs;
418 	int nsegs;
419 	int *rsegs;
420 	int flags;
421 {
422 	paddr_t avail_start = 0xffffffff, avail_end = 0;
423 	int bank;
424 
425 	for (bank = 0; bank < vm_nphysseg; bank++) {
426 		if (avail_start > vm_physmem[bank].avail_start << PGSHIFT)
427 			avail_start = vm_physmem[bank].avail_start << PGSHIFT;
428 		if (avail_end < vm_physmem[bank].avail_end << PGSHIFT)
429 			avail_end = vm_physmem[bank].avail_end << PGSHIFT;
430 	}
431 
432 	return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,
433 	    nsegs, rsegs, flags, avail_start, avail_end - PAGE_SIZE);
434 }
435 
436 /*
437  * Common function for freeing DMA-safe memory.  May be called by
438  * bus-specific DMA memory free functions.
439  */
440 void
441 _bus_dmamem_free(t, segs, nsegs)
442 	bus_dma_tag_t t;
443 	bus_dma_segment_t *segs;
444 	int nsegs;
445 {
446 	struct vm_page *m;
447 	bus_addr_t addr;
448 	struct pglist mlist;
449 	int curseg;
450 
451 	/*
452 	 * Build a list of pages to free back to the VM system.
453 	 */
454 	TAILQ_INIT(&mlist);
455 	for (curseg = 0; curseg < nsegs; curseg++) {
456 		for (addr = PCI_MEM_TO_PHYS(segs[curseg].ds_addr);
457 		    addr < (PCI_MEM_TO_PHYS(segs[curseg].ds_addr)
458 			+ segs[curseg].ds_len);
459 		    addr += PAGE_SIZE) {
460 			m = PHYS_TO_VM_PAGE(addr);
461 			TAILQ_INSERT_TAIL(&mlist, m, pageq);
462 		}
463 	}
464 
465 	uvm_pglistfree(&mlist);
466 }
467 
468 /*
469  * Common function for mapping DMA-safe memory.  May be called by
470  * bus-specific DMA memory map functions.
471  */
472 int
473 _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
474 	bus_dma_tag_t t;
475 	bus_dma_segment_t *segs;
476 	int nsegs;
477 	size_t size;
478 	caddr_t *kvap;
479 	int flags;
480 {
481 	vaddr_t va;
482 	bus_addr_t addr;
483 	int curseg;
484 
485 	size = round_page(size);
486 
487 	va = uvm_km_valloc(kernel_map, size);
488 
489 	if (va == 0)
490 		return (ENOMEM);
491 
492 	*kvap = (caddr_t)va;
493 
494 	for (curseg = 0; curseg < nsegs; curseg++) {
495 		for (addr = PCI_MEM_TO_PHYS(segs[curseg].ds_addr);
496 		    addr < (PCI_MEM_TO_PHYS(segs[curseg].ds_addr)
497 			+ segs[curseg].ds_len);
498 		    addr += NBPG, va += NBPG, size -= NBPG) {
499 			if (size == 0)
500 				panic("_bus_dmamem_map: size botch");
501 			pmap_enter(pmap_kernel(), va, addr,
502 			    VM_PROT_READ | VM_PROT_WRITE,
503 			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
504 		}
505 	}
506 
507 	return (0);
508 }
509 
510 /*
511  * Common function for unmapping DMA-safe memory.  May be called by
512  * bus-specific DMA memory unmapping functions.
513  */
514 void
515 _bus_dmamem_unmap(t, kva, size)
516 	bus_dma_tag_t t;
517 	caddr_t kva;
518 	size_t size;
519 {
520 
521 #ifdef DIAGNOSTIC
522 	if ((u_long)kva & PGOFSET)
523 		panic("_bus_dmamem_unmap");
524 #endif
525 
526 	size = round_page(size);
527 
528 	uvm_km_free(kernel_map, (vaddr_t)kva, size);
529 }
530 
531 /*
532  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
533  * bus-specific DMA mmap(2)'ing functions.
534  */
535 paddr_t
536 _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
537 	bus_dma_tag_t t;
538 	bus_dma_segment_t *segs;
539 	int nsegs;
540 	off_t off;
541 	int prot, flags;
542 {
543 	int i;
544 
545 	for (i = 0; i < nsegs; i++) {
546 #ifdef DIAGNOSTIC
547 		if (off & PGOFSET)
548 			panic("_bus_dmamem_mmap: offset unaligned");
549 		if (PCI_MEM_TO_PHYS(segs[i].ds_addr) & PGOFSET)
550 			panic("_bus_dmamem_mmap: segment unaligned");
551 		if (segs[i].ds_len & PGOFSET)
552 			panic("_bus_dmamem_mmap: segment size not multiple"
553 			    " of page size");
554 #endif
555 		if (off >= segs[i].ds_len) {
556 			off -= segs[i].ds_len;
557 			continue;
558 		}
559 
560 		return (PCI_MEM_TO_PHYS(segs[i].ds_addr) + off);
561 	}
562 
563 	/* Page not found. */
564 	return (-1);
565 }
566 
567 /*
568  * Allocate physical memory from the given physical address range.
569  * Called by DMA-safe memory allocation methods.
570  */
571 int
572 _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
573     flags, low, high)
574 	bus_dma_tag_t t;
575 	bus_size_t size, alignment, boundary;
576 	bus_dma_segment_t *segs;
577 	int nsegs;
578 	int *rsegs;
579 	int flags;
580 	paddr_t low;
581 	paddr_t high;
582 {
583 	paddr_t curaddr, lastaddr;
584 	struct vm_page *m;
585 	struct pglist mlist;
586 	int curseg, error;
587 
588 	/* Always round the size. */
589 	size = round_page(size);
590 
591 	/*
592 	 * Allocate pages from the VM system.
593 	 */
594 	error = uvm_pglistalloc(size, low, high, alignment, boundary,
595 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
596 	if (error)
597 		return (error);
598 
599 	/*
600 	 * Compute the location, size, and number of segments actually
601 	 * returned by the VM code.
602 	 */
603 	m = mlist.tqh_first;
604 	curseg = 0;
605 	lastaddr = VM_PAGE_TO_PHYS(m);
606 	segs[curseg].ds_addr = PHYS_TO_PCI_MEM(lastaddr);
607 	segs[curseg].ds_len = PAGE_SIZE;
608 	m = m->pageq.tqe_next;
609 
610 	for (; m != NULL; m = m->pageq.tqe_next) {
611 		curaddr = VM_PAGE_TO_PHYS(m);
612 #ifdef DIAGNOSTIC
613 		if (curaddr < low || curaddr >= high) {
614 			printf("vm_page_alloc_memory returned non-sensical"
615 			    " address 0x%lx\n", curaddr);
616 			panic("_bus_dmamem_alloc_range");
617 		}
618 #endif
619 		if (curaddr == (lastaddr + PAGE_SIZE))
620 			segs[curseg].ds_len += PAGE_SIZE;
621 		else {
622 			curseg++;
623 			segs[curseg].ds_addr = PHYS_TO_PCI_MEM(curaddr);
624 			segs[curseg].ds_len = PAGE_SIZE;
625 		}
626 		lastaddr = curaddr;
627 	}
628 
629 	*rsegs = curseg + 1;
630 
631 	return (0);
632 }
633