1 /* $NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #define _POWERPC_BUS_DMA_PRIVATE
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.46 2012/02/01 09:54:03 matt Exp $");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/device.h>
41 #include <sys/kmem.h>
42 #include <sys/proc.h>
43 #include <sys/mbuf.h>
44 #include <sys/bus.h>
45 #include <sys/intr.h>
46
47 #include <uvm/uvm.h>
48
49 #ifdef PPC_BOOKE
50 #define EIEIO __asm volatile("mbar\t0")
51 #define SYNC __asm volatile("msync")
52 #else
53 #define EIEIO __asm volatile("eieio")
54 #define SYNC __asm volatile("sync")
55 #endif
56
57 int _bus_dmamap_load_buffer (bus_dma_tag_t, bus_dmamap_t, void *,
58 bus_size_t, struct vmspace *, int, paddr_t *, int *, int);
59
60 static inline void
dcbst(paddr_t pa,long len,int dcache_line_size)61 dcbst(paddr_t pa, long len, int dcache_line_size)
62 {
63 paddr_t epa;
64 for (epa = pa + len; pa < epa; pa += dcache_line_size)
65 __asm volatile("dcbst 0,%0" :: "r"(pa));
66 }
67
68 static inline void
dcbi(paddr_t pa,long len,int dcache_line_size)69 dcbi(paddr_t pa, long len, int dcache_line_size)
70 {
71 paddr_t epa;
72 for (epa = pa + len; pa < epa; pa += dcache_line_size)
73 __asm volatile("dcbi 0,%0" :: "r"(pa));
74 }
75
76 static inline void
dcbf(paddr_t pa,long len,int dcache_line_size)77 dcbf(paddr_t pa, long len, int dcache_line_size)
78 {
79 paddr_t epa;
80 for (epa = pa + len; pa < epa; pa += dcache_line_size)
81 __asm volatile("dcbf 0,%0" :: "r"(pa));
82 }
83
84 /*
85 * Common function for DMA map creation. May be called by bus-specific
86 * DMA map creation functions.
87 */
88 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)89 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
90 {
91 struct powerpc_bus_dmamap *map;
92 void *mapstore;
93 size_t mapsize;
94
95 /*
96 * Allocate and initialize the DMA map. The end of the map
97 * is a variable-sized array of segments, so we allocate enough
98 * room for them in one shot.
99 *
100 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
101 * of ALLOCNOW notifies others that we've reserved these resources,
102 * and they are not to be freed.
103 *
104 * The bus_dmamap_t includes one bus_dma_segment_t, hence
105 * the (nsegments - 1).
106 */
107 mapsize = sizeof(*map) + sizeof(bus_dma_segment_t [nsegments - 1]);
108 if ((mapstore = kmem_intr_alloc(mapsize,
109 (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
110 return (ENOMEM);
111
112 memset(mapstore, 0, mapsize);
113 map = (struct powerpc_bus_dmamap *)mapstore;
114 map->_dm_size = size;
115 map->_dm_segcnt = nsegments;
116 map->_dm_maxmaxsegsz = maxsegsz;
117 map->_dm_boundary = boundary;
118 map->_dm_bounce_thresh = t->_bounce_thresh;
119 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
120 map->dm_maxsegsz = maxsegsz;
121 map->dm_mapsize = 0; /* no valid mappings */
122 map->dm_nsegs = 0;
123
124 *dmamp = map;
125 return (0);
126 }
127
128 /*
129 * Common function for DMA map destruction. May be called by bus-specific
130 * DMA map destruction functions.
131 */
132 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)133 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
134 {
135
136 size_t mapsize = sizeof(*map)
137 + sizeof(bus_dma_segment_t [map->_dm_segcnt - 1]);
138 kmem_intr_free(map, mapsize);
139 }
140
141 /*
142 * Utility function to load a linear buffer. lastaddrp holds state
143 * between invocations (for multiple-buffer loads). segp contains
144 * the starting segment on entrance, and the ending segment on exit.
145 * first indicates if this is the first invocation of this function.
146 */
147 int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct vmspace * vm,int flags,paddr_t * lastaddrp,int * segp,int first)148 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp, int *segp, int first)
149 {
150 bus_size_t sgsize;
151 bus_addr_t curaddr, lastaddr, baddr, bmask;
152 vaddr_t vaddr = (vaddr_t)buf;
153 int seg;
154
155 // printf("%s(%p,%p,%p,%u,%p,%#x,%p,%p,%u)\n", __func__,
156 // t, map, buf, buflen, vm, flags, lastaddrp, segp, first);
157
158 lastaddr = *lastaddrp;
159 bmask = ~(map->_dm_boundary - 1);
160
161 for (seg = *segp; buflen > 0 ; ) {
162 /*
163 * Get the physical address for this segment.
164 */
165 if (!VMSPACE_IS_KERNEL_P(vm))
166 (void) pmap_extract(vm_map_pmap(&vm->vm_map),
167 vaddr, (void *)&curaddr);
168 else
169 curaddr = vtophys(vaddr);
170
171 /*
172 * If we're beyond the bounce threshold, notify
173 * the caller.
174 */
175 if (map->_dm_bounce_thresh != 0 &&
176 curaddr >= map->_dm_bounce_thresh)
177 return (EINVAL);
178
179 /*
180 * Compute the segment size, and adjust counts.
181 */
182 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
183 if (buflen < sgsize)
184 sgsize = buflen;
185 sgsize = min(sgsize, map->dm_maxsegsz);
186
187 /*
188 * Make sure we don't cross any boundaries.
189 */
190 if (map->_dm_boundary > 0) {
191 baddr = (curaddr + map->_dm_boundary) & bmask;
192 if (sgsize > (baddr - curaddr))
193 sgsize = (baddr - curaddr);
194 }
195
196 /*
197 * Insert chunk into a segment, coalescing with
198 * the previous segment if possible.
199 */
200 if (first) {
201 map->dm_segs[seg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr);
202 map->dm_segs[seg].ds_len = sgsize;
203 first = 0;
204 } else {
205 if (curaddr == lastaddr &&
206 (map->dm_segs[seg].ds_len + sgsize) <=
207 map->dm_maxsegsz &&
208 (map->_dm_boundary == 0 ||
209 (map->dm_segs[seg].ds_addr & bmask) ==
210 (PHYS_TO_BUS_MEM(t, curaddr) & bmask)))
211 map->dm_segs[seg].ds_len += sgsize;
212 else {
213 if (++seg >= map->_dm_segcnt)
214 break;
215 map->dm_segs[seg].ds_addr =
216 PHYS_TO_BUS_MEM(t, curaddr);
217 map->dm_segs[seg].ds_len = sgsize;
218 }
219 }
220
221 lastaddr = curaddr + sgsize;
222 vaddr += sgsize;
223 buflen -= sgsize;
224 }
225
226 *segp = seg;
227 *lastaddrp = lastaddr;
228
229 /*
230 * Did we fit?
231 */
232 if (buflen != 0)
233 return (EFBIG); /* XXX better return value here? */
234
235 return (0);
236 }
237
238 /*
239 * Common function for loading a DMA map with a linear buffer. May
240 * be called by bus-specific DMA map load functions.
241 */
242 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)243 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags)
244 {
245 paddr_t lastaddr = 0;
246 int seg, error;
247 struct vmspace *vm;
248
249 /*
250 * Make sure that on error condition we return "no valid mappings".
251 */
252 map->dm_mapsize = 0;
253 map->dm_nsegs = 0;
254 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
255
256 if (buflen > map->_dm_size)
257 return (EINVAL);
258
259 if (p != NULL) {
260 vm = p->p_vmspace;
261 } else {
262 vm = vmspace_kernel();
263 }
264
265 seg = 0;
266 error = _bus_dmamap_load_buffer(t, map, buf, buflen, vm, flags,
267 &lastaddr, &seg, 1);
268 if (error == 0) {
269 map->dm_mapsize = buflen;
270 map->dm_nsegs = seg + 1;
271 }
272 return (error);
273 }
274
275 /*
276 * Like _bus_dmamap_load(), but for mbufs.
277 */
278 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)279 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags)
280 {
281 paddr_t lastaddr = 0;
282 int seg, error, first;
283 struct mbuf *m;
284
285 /*
286 * Make sure that on error condition we return "no valid mappings."
287 */
288 map->dm_mapsize = 0;
289 map->dm_nsegs = 0;
290 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
291
292 #ifdef DIAGNOSTIC
293 if ((m0->m_flags & M_PKTHDR) == 0)
294 panic("_bus_dmamap_load_mbuf: no packet header");
295 #endif
296
297 if (m0->m_pkthdr.len > map->_dm_size)
298 return (EINVAL);
299
300 first = 1;
301 seg = 0;
302 error = 0;
303 for (m = m0; m != NULL && error == 0; m = m->m_next, first = 0) {
304 if (m->m_len == 0)
305 continue;
306 #ifdef POOL_VTOPHYS
307 /* XXX Could be better about coalescing. */
308 /* XXX Doesn't check boundaries. */
309 switch (m->m_flags & (M_EXT|M_CLUSTER)) {
310 case M_EXT|M_CLUSTER:
311 /* XXX KDASSERT */
312 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
313 lastaddr = m->m_ext.ext_paddr +
314 (m->m_data - m->m_ext.ext_buf);
315 have_addr:
316 if (first == 0 && ++seg >= map->_dm_segcnt) {
317 error = EFBIG;
318 continue;
319 }
320 map->dm_segs[seg].ds_addr =
321 PHYS_TO_BUS_MEM(t, lastaddr);
322 map->dm_segs[seg].ds_len = m->m_len;
323 lastaddr += m->m_len;
324 continue;
325
326 case 0:
327 lastaddr = m->m_paddr + M_BUFOFFSET(m) +
328 (m->m_data - M_BUFADDR(m));
329 goto have_addr;
330
331 default:
332 break;
333 }
334 #endif
335 error = _bus_dmamap_load_buffer(t, map, m->m_data,
336 m->m_len, vmspace_kernel(), flags, &lastaddr, &seg, first);
337 }
338 if (error == 0) {
339 map->dm_mapsize = m0->m_pkthdr.len;
340 map->dm_nsegs = seg + 1;
341 }
342 return (error);
343 }
344
345 /*
346 * Like _bus_dmamap_load(), but for uios.
347 */
348 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)349 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
350 {
351 paddr_t lastaddr = 0;
352 int seg, i, error, first;
353 bus_size_t minlen, resid;
354 struct iovec *iov;
355 void *addr;
356
357 /*
358 * Make sure that on error condition we return "no valid mappings."
359 */
360 map->dm_mapsize = 0;
361 map->dm_nsegs = 0;
362 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
363
364 resid = uio->uio_resid;
365 iov = uio->uio_iov;
366
367 first = 1;
368 seg = 0;
369 error = 0;
370 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
371 /*
372 * Now at the first iovec to load. Load each iovec
373 * until we have exhausted the residual count.
374 */
375 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
376 addr = (void *)iov[i].iov_base;
377
378 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
379 uio->uio_vmspace, flags, &lastaddr, &seg, first);
380 first = 0;
381
382 resid -= minlen;
383 }
384 if (error == 0) {
385 map->dm_mapsize = uio->uio_resid;
386 map->dm_nsegs = seg + 1;
387 }
388 return (error);
389 }
390
391 /*
392 * Like _bus_dmamap_load(), but for raw memory allocated with
393 * bus_dmamem_alloc().
394 */
395 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)396 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
397 {
398
399 panic("_bus_dmamap_load_raw: not implemented");
400 }
401
402 /*
403 * Common function for unloading a DMA map. May be called by
404 * chipset-specific DMA map unload functions.
405 */
406 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)407 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
408 {
409
410 /*
411 * No resources to free; just mark the mappings as
412 * invalid.
413 */
414 map->dm_maxsegsz = map->_dm_maxmaxsegsz;
415 map->dm_mapsize = 0;
416 map->dm_nsegs = 0;
417 }
418
419 /*
420 * Common function for DMA map synchronization. May be called
421 * by chipset-specific DMA map synchronization functions.
422 */
423 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)424 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops)
425 {
426 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size;
427 const bus_dma_segment_t *ds = map->dm_segs;
428
429 // printf("%s(%p,%p,%#x,%u,%#x) from %p\n", __func__,
430 // t, map, offset, len, ops, __builtin_return_address(0));
431
432 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
433 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
434 panic("_bus_dmamap_sync: invalid ops %#x", ops);
435
436 #ifdef DIAGNOSTIC
437 if (offset + len > map->dm_mapsize)
438 panic("%s: ops %#x mapsize %u: bad offset (%u) and/or length (%u)", __func__, ops, map->dm_mapsize, offset, len);
439 #endif
440
441 /*
442 * Skip leading amount
443 */
444 while (offset >= ds->ds_len) {
445 offset -= ds->ds_len;
446 ds++;
447 }
448 EIEIO;
449 for (; len > 0; ds++, offset = 0) {
450 bus_size_t seglen = ds->ds_len - offset;
451 bus_addr_t addr = BUS_MEM_TO_PHYS(t, ds->ds_addr) + offset;
452 if (seglen > len)
453 seglen = len;
454 len -= seglen;
455 KASSERT(ds < &map->dm_segs[map->dm_nsegs]);
456 /*
457 * Readjust things to start on cacheline boundarys
458 */
459 offset = (addr & (dcache_line_size-1));
460 seglen += offset;
461 addr -= offset;
462 /*
463 * Now do the appropriate thing.
464 */
465 switch (ops) {
466 case BUS_DMASYNC_PREWRITE:
467 /*
468 * Make sure cache contents are in memory for the DMA.
469 */
470 dcbst(addr, seglen, dcache_line_size);
471 break;
472 case BUS_DMASYNC_PREREAD:
473 /*
474 * If the region to be invalidated doesn't fall on
475 * cacheline boundary, flush that cacheline so we
476 * preserve the leading content.
477 */
478 if (offset) {
479 dcbf(addr, 1, 1);
480 /*
481 * If we are doing <= one cache line, stop now.
482 */
483 if (seglen <= dcache_line_size)
484 break;
485 /*
486 * Advance one cache line since we've flushed
487 * this one.
488 */
489 addr += dcache_line_size;
490 seglen -= dcache_line_size;
491 }
492 /*
493 * If the byte after the region to be invalidated
494 * doesn't fall on cacheline boundary, flush that
495 * cacheline so we preserve the trailing content.
496 */
497 if (seglen & (dcache_line_size-1)) {
498 dcbf(addr + seglen, 1, 1);
499 if (seglen <= dcache_line_size)
500 break;
501 /*
502 * Truncate the length to a multiple of a
503 * dcache line size. No reason to flush
504 * the last entry again.
505 */
506 seglen &= ~(dcache_line_size - 1);
507 }
508 SYNC; /* is this needed? */
509 EIEIO; /* is this needed? */
510 /* FALLTHROUGH */
511 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
512 case BUS_DMASYNC_POSTREAD:
513 /*
514 * The contents will have changed, make sure to remove
515 * them from the cache. Note: some implementation
516 * implement dcbi identically to dcbf. Thus if the
517 * cacheline has data, it will be written to memory.
518 * If the DMA is updating the same cacheline at the
519 * time, bad things can happen.
520 */
521 dcbi(addr, seglen, dcache_line_size);
522 break;
523 case BUS_DMASYNC_POSTWRITE:
524 /*
525 * Do nothing.
526 */
527 break;
528 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
529 /*
530 * Force it to memory and remove from cache.
531 */
532 dcbf(addr, seglen, dcache_line_size);
533 break;
534 }
535 }
536 __asm volatile("sync");
537 }
538
539 /*
540 * Common function for DMA-safe memory allocation. May be called
541 * by bus-specific DMA memory allocation functions.
542 */
543 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)544 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
545 {
546 paddr_t start = 0xffffffff, end = 0;
547 int bank;
548
549 for (bank = 0; bank < vm_nphysseg; bank++) {
550 if (start > ptoa(VM_PHYSMEM_PTR(bank)->avail_start))
551 start = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
552 if (end < ptoa(VM_PHYSMEM_PTR(bank)->avail_end))
553 end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
554 }
555
556 return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,
557 nsegs, rsegs, flags, start, end - PAGE_SIZE);
558 }
559
560 /*
561 * Common function for freeing DMA-safe memory. May be called by
562 * bus-specific DMA memory free functions.
563 */
564 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)565 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
566 {
567 struct vm_page *m;
568 bus_addr_t addr;
569 struct pglist mlist;
570 int curseg;
571
572 /*
573 * Build a list of pages to free back to the VM system.
574 */
575 TAILQ_INIT(&mlist);
576 for (curseg = 0; curseg < nsegs; curseg++) {
577 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr);
578 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr)
579 + segs[curseg].ds_len);
580 addr += PAGE_SIZE) {
581 m = PHYS_TO_VM_PAGE(addr);
582 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
583 }
584 }
585
586 uvm_pglistfree(&mlist);
587 }
588
589 /*
590 * Common function for mapping DMA-safe memory. May be called by
591 * bus-specific DMA memory map functions.
592 */
593 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)594 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags)
595 {
596 vaddr_t va;
597 bus_addr_t addr;
598 int curseg;
599 const uvm_flag_t kmflags =
600 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
601
602 size = round_page(size);
603
604 #ifdef PMAP_MAP_POOLPAGE
605 /*
606 * If we are mapping a cacheable physically contiguous segment, treat
607 * it as if we are mapping a poolpage and avoid consuming any KVAs.
608 */
609 if (nsegs == 1 && (flags & BUS_DMA_DONTCACHE) == 0) {
610 KASSERT(size == segs->ds_len);
611 addr = BUS_MEM_TO_PHYS(t, segs->ds_addr);
612 *kvap = (void *)PMAP_MAP_POOLPAGE(addr);
613 return 0;
614 }
615 #endif
616
617 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
618
619 if (va == 0)
620 return (ENOMEM);
621
622 *kvap = (void *)va;
623
624 for (curseg = 0; curseg < nsegs; curseg++) {
625 for (addr = BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr);
626 addr < (BUS_MEM_TO_PHYS(t, segs[curseg].ds_addr)
627 + segs[curseg].ds_len);
628 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
629 if (size == 0)
630 panic("_bus_dmamem_map: size botch");
631 /*
632 * If we are mapping nocache, flush the page from
633 * cache before we map it.
634 */
635 if (flags & BUS_DMA_DONTCACHE)
636 dcbf(addr, PAGE_SIZE,
637 curcpu()->ci_ci.dcache_line_size);
638 pmap_kenter_pa(va, addr,
639 VM_PROT_READ | VM_PROT_WRITE,
640 PMAP_WIRED |
641 ((flags & BUS_DMA_DONTCACHE) ? PMAP_NOCACHE : 0));
642 }
643 }
644
645 return (0);
646 }
647
648 /*
649 * Common function for unmapping DMA-safe memory. May be called by
650 * bus-specific DMA memory unmapping functions.
651 */
652 void
_bus_dmamem_unmap(bus_dma_tag_t t,void * kva,size_t size)653 _bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
654 {
655 vaddr_t va = (vaddr_t) kva;
656
657 #ifdef DIAGNOSTIC
658 if (va & PGOFSET)
659 panic("_bus_dmamem_unmap");
660 #endif
661
662 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) {
663 size = round_page(size);
664 pmap_kremove(va, size);
665 uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY);
666 }
667 }
668
669 /*
670 * Common functin for mmap(2)'ing DMA-safe memory. May be called by
671 * bus-specific DMA mmap(2)'ing functions.
672 */
673 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)674 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags)
675 {
676 int i;
677
678 for (i = 0; i < nsegs; i++) {
679 #ifdef DIAGNOSTIC
680 if (off & PGOFSET)
681 panic("_bus_dmamem_mmap: offset unaligned");
682 if (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) & PGOFSET)
683 panic("_bus_dmamem_mmap: segment unaligned");
684 if (segs[i].ds_len & PGOFSET)
685 panic("_bus_dmamem_mmap: segment size not multiple"
686 " of page size");
687 #endif
688 if (off >= segs[i].ds_len) {
689 off -= segs[i].ds_len;
690 continue;
691 }
692
693 return (BUS_MEM_TO_PHYS(t, segs[i].ds_addr) + off);
694 }
695
696 /* Page not found. */
697 return (-1);
698 }
699
700 /*
701 * Allocate physical memory from the given physical address range.
702 * Called by DMA-safe memory allocation methods.
703 */
704 int
_bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,paddr_t low,paddr_t high)705 _bus_dmamem_alloc_range(
706 bus_dma_tag_t t,
707 bus_size_t size,
708 bus_size_t alignment,
709 bus_size_t boundary,
710 bus_dma_segment_t *segs,
711 int nsegs,
712 int *rsegs,
713 int flags,
714 paddr_t low,
715 paddr_t high)
716 {
717 paddr_t curaddr, lastaddr;
718 struct vm_page *m;
719 struct pglist mlist;
720 int curseg, error;
721
722 /* Always round the size. */
723 size = round_page(size);
724
725 /*
726 * Allocate pages from the VM system.
727 */
728 error = uvm_pglistalloc(size, low, high, alignment, boundary,
729 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
730 if (error)
731 return (error);
732
733 /*
734 * Compute the location, size, and number of segments actually
735 * returned by the VM code.
736 */
737 m = mlist.tqh_first;
738 curseg = 0;
739 lastaddr = VM_PAGE_TO_PHYS(m);
740 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, lastaddr);
741 segs[curseg].ds_len = PAGE_SIZE;
742 m = m->pageq.queue.tqe_next;
743
744 for (; m != NULL; m = m->pageq.queue.tqe_next) {
745 curaddr = VM_PAGE_TO_PHYS(m);
746 #ifdef DIAGNOSTIC
747 if (curaddr < low || curaddr >= high) {
748 printf("vm_page_alloc_memory returned non-sensical"
749 " address 0x%lx\n", curaddr);
750 panic("_bus_dmamem_alloc_range");
751 }
752 #endif
753 if (curaddr == (lastaddr + PAGE_SIZE))
754 segs[curseg].ds_len += PAGE_SIZE;
755 else {
756 curseg++;
757 segs[curseg].ds_addr = PHYS_TO_BUS_MEM(t, curaddr);
758 segs[curseg].ds_len = PAGE_SIZE;
759 }
760 lastaddr = curaddr;
761 }
762
763 *rsegs = curseg + 1;
764
765 return (0);
766 }
767
768 /*
769 * Generic form of PHYS_TO_BUS_MEM().
770 */
771 bus_addr_t
_bus_dma_phys_to_bus_mem_generic(bus_dma_tag_t t,bus_addr_t addr)772 _bus_dma_phys_to_bus_mem_generic(bus_dma_tag_t t, bus_addr_t addr)
773 {
774
775 return (addr);
776 }
777
778 /*
779 * Generic form of BUS_MEM_TO_PHYS().
780 */
781 bus_addr_t
_bus_dma_bus_mem_to_phys_generic(bus_dma_tag_t t,bus_addr_t addr)782 _bus_dma_bus_mem_to_phys_generic(bus_dma_tag_t t, bus_addr_t addr)
783 {
784
785 return (addr);
786 }
787