1 /* $OpenBSD: bus_dma.c,v 1.14 2018/01/11 15:49:34 visa Exp $ */
2 /* $NetBSD: bus_dma.c,v 1.1 2006/09/01 21:26:18 uwe Exp $ */
3
4 /*
5 * Copyright (c) 2005 NONAKA Kimihiro
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/proc.h>
32 #include <sys/kernel.h>
33 #include <sys/device.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36
37 #include <uvm/uvm_extern.h>
38
39 #include <sh/cache.h>
40
41 #include <machine/autoconf.h>
42 #define _LANDISK_BUS_DMA_PRIVATE
43 #include <machine/bus.h>
44
45 #if defined(DEBUG) && defined(BUSDMA_DEBUG)
46 #define DPRINTF(a) printf a
47 #else
48 #define DPRINTF(a)
49 #endif
50
51 struct _bus_dma_tag landisk_bus_dma = {
52 ._cookie = NULL,
53
54 ._dmamap_create = _bus_dmamap_create,
55 ._dmamap_destroy = _bus_dmamap_destroy,
56 ._dmamap_load = _bus_dmamap_load,
57 ._dmamap_load_mbuf = _bus_dmamap_load_mbuf,
58 ._dmamap_load_uio = _bus_dmamap_load_uio,
59 ._dmamap_load_raw = _bus_dmamap_load_raw,
60 ._dmamap_unload = _bus_dmamap_unload,
61 ._dmamap_sync = _bus_dmamap_sync,
62
63 ._dmamem_alloc = _bus_dmamem_alloc,
64 ._dmamem_free = _bus_dmamem_free,
65 ._dmamem_map = _bus_dmamem_map,
66 ._dmamem_unmap = _bus_dmamem_unmap,
67 ._dmamem_mmap = _bus_dmamem_mmap,
68 };
69
70 #define DMAMAP_RESET(_m) do { \
71 (_m)->dm_mapsize = 0; \
72 (_m)->dm_nsegs = 0; \
73 } while (0)
74
75 int _bus_dmamap_load_vaddr(bus_dma_tag_t, bus_dmamap_t,
76 void *, bus_size_t, pmap_t);
77 int _bus_dmamap_load_paddr(bus_dma_tag_t, bus_dmamap_t,
78 paddr_t, vaddr_t, bus_size_t);
79
80 /*
81 * Create a DMA map.
82 */
83 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)84 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
85 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
86 {
87 bus_dmamap_t map;
88 void *mapstore;
89 size_t mapsize;
90 int error;
91
92 DPRINTF(("bus_dmamap_create: t = %p, size = %ld, nsegments = %d, maxsegsz = %ld, boundary = %ld, flags = %x\n", t, size, nsegments, maxsegsz, boundary, flags));
93
94 /*
95 * Allocate and initialize the DMA map. The end of the map
96 * is a variable-sized array of segments, so we allocate enough
97 * room for them in one shot.
98 *
99 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
100 * of ALLOCNOW notifies others that we've reserved these resources,
101 * and they are not to be freed.
102 *
103 * The bus_dmamap_t includes one bus_dma_segment_t, hence
104 * the (nsegments - 1).
105 */
106 error = 0;
107 mapsize = sizeof(struct _bus_dmamap) +
108 (sizeof(bus_dma_segment_t) * (nsegments - 1));
109 if ((mapstore = malloc(mapsize, M_DEVBUF, (flags & BUS_DMA_NOWAIT) ?
110 (M_NOWAIT | M_ZERO) : (M_WAITOK | M_ZERO))) == NULL)
111 return (ENOMEM);
112
113 DPRINTF(("bus_dmamap_create: dmamp = %p\n", mapstore));
114
115 map = (bus_dmamap_t)mapstore;
116 map->_dm_size = size;
117 map->_dm_segcnt = nsegments;
118 map->_dm_maxsegsz = maxsegsz;
119 map->_dm_boundary = boundary;
120 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
121
122 DMAMAP_RESET(map); /* no valid mappings */
123
124 *dmamp = map;
125
126 return (0);
127 }
128
129 /*
130 * Destroy a DMA map.
131 */
132 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)133 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
134 {
135 size_t mapsize;
136
137 DPRINTF(("bus_dmamap_destroy: t = %p, map = %p\n", t, map));
138
139 mapsize = sizeof(struct _bus_dmamap) +
140 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
141 free(map, M_DEVBUF, mapsize);
142 }
143
144 int
_bus_dmamap_load_paddr(bus_dma_tag_t t,bus_dmamap_t map,paddr_t paddr,vaddr_t vaddr,bus_size_t size)145 _bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map,
146 paddr_t paddr, vaddr_t vaddr, bus_size_t size)
147 {
148 bus_dma_segment_t * const segs = map->dm_segs;
149 bus_addr_t bmask = ~(map->_dm_boundary - 1);
150
151 int first = map->dm_mapsize == 0;
152 int nseg = map->dm_nsegs;
153 paddr_t lastaddr = SH3_P2SEG_TO_PHYS(segs[nseg].ds_addr);
154
155 map->dm_mapsize += size;
156
157 do {
158 bus_size_t sgsize = size;
159
160 /* Make sure we don't cross any boundaries. */
161 if (map->_dm_boundary > 0) {
162 bus_addr_t baddr; /* next boundary address */
163
164 baddr = (paddr + map->_dm_boundary) & bmask;
165 if (sgsize > (baddr - paddr))
166 sgsize = (baddr - paddr);
167 }
168
169 /*
170 * Insert chunk into a segment, coalescing with
171 * previous segment if possible.
172 */
173 if (first) {
174 /* first segment */
175 segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
176 segs[nseg].ds_len = sgsize;
177 segs[nseg]._ds_vaddr = vaddr;
178 first = 0;
179 } else if ((paddr == lastaddr)
180 && (segs[nseg].ds_len + sgsize <= map->_dm_maxsegsz)
181 && (map->_dm_boundary == 0 ||
182 (segs[nseg].ds_addr & bmask) == (paddr & bmask))) {
183 /* coalesce */
184 segs[nseg].ds_len += sgsize;
185 } else {
186 if (++nseg >= map->_dm_segcnt)
187 return (EFBIG);
188
189 /* new segment */
190 segs[nseg].ds_addr = SH3_PHYS_TO_P2SEG(paddr);
191 segs[nseg].ds_len = sgsize;
192 segs[nseg]._ds_vaddr = vaddr;
193 }
194
195 lastaddr = paddr + sgsize;
196 paddr += sgsize;
197 vaddr += sgsize;
198 size -= sgsize;
199 } while (size > 0);
200
201 map->dm_nsegs = nseg;
202
203 return (0);
204 }
205
206 int
_bus_dmamap_load_vaddr(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t size,pmap_t pmap)207 _bus_dmamap_load_vaddr(bus_dma_tag_t t, bus_dmamap_t map,
208 void *buf, bus_size_t size, pmap_t pmap)
209 {
210 vaddr_t vaddr;
211 paddr_t paddr;
212 vaddr_t next, end;
213 int error;
214
215 vaddr = (vaddr_t)buf;
216 end = vaddr + size;
217
218 if (pmap == pmap_kernel() &&
219 vaddr >= SH3_P1SEG_BASE && end <= SH3_P2SEG_END)
220 paddr = SH3_P1SEG_TO_PHYS(vaddr);
221 else {
222 for (next = (vaddr + PAGE_SIZE) & ~PAGE_MASK;
223 next < end; next += PAGE_SIZE) {
224 pmap_extract(pmap, vaddr, &paddr);
225 error = _bus_dmamap_load_paddr(t, map,
226 paddr, vaddr, next - vaddr);
227 if (error != 0)
228 return (error);
229
230 vaddr = next;
231 }
232
233 pmap_extract(pmap, vaddr, &paddr);
234 size = end - vaddr;
235 }
236
237 return (_bus_dmamap_load_paddr(t, map, paddr, vaddr, size));
238 }
239
240 /*
241 * Load a DMA map with a linear buffer.
242 */
243 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)244 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
245 bus_size_t buflen, struct proc *p, int flags)
246 {
247 int error;
248
249 DPRINTF(("bus_dmamap_load: t = %p, map = %p, buf = %p, buflen = %ld, p = %p, flags = %x\n", t, map, buf, buflen, p, flags));
250
251 DMAMAP_RESET(map);
252
253 if (buflen > map->_dm_size)
254 return (EINVAL);
255
256 error = _bus_dmamap_load_vaddr(t, map, buf, buflen,
257 p == NULL ? pmap_kernel() : p->p_vmspace->vm_map.pmap);
258 if (error != 0) {
259 DMAMAP_RESET(map); /* no valid mappings */
260 return (error);
261 }
262
263 map->dm_nsegs++;
264
265 return (0);
266 }
267
268 /*
269 * Like _bus_dmamap_load(), but for mbufs.
270 */
271 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)272 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
273 int flags)
274 {
275 struct mbuf *m;
276 int error;
277
278 DMAMAP_RESET(map);
279
280 #ifdef DIAGNOSTIC
281 if ((m0->m_flags & M_PKTHDR) == 0)
282 panic("_bus_dmamap_load_mbuf: no packet header");
283 #endif
284
285 if (m0->m_pkthdr.len > map->_dm_size)
286 return (EINVAL);
287
288 for (m = m0; m != NULL; m = m->m_next) {
289 if (m->m_len == 0)
290 continue;
291
292 error = _bus_dmamap_load_vaddr(t, map, m->m_data, m->m_len,
293 pmap_kernel());
294 if (error != 0) {
295 DMAMAP_RESET(map);
296 return (error);
297 }
298 }
299
300 map->dm_nsegs++;
301
302 return (0);
303 }
304
305 /*
306 * Like _bus_dmamap_load(), but for uios.
307 */
308 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)309 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
310 int flags)
311 {
312
313 panic("_bus_dmamap_load_uio: not implemented");
314 }
315
316 /*
317 * Like _bus_dmamap_load(), but for raw memory allocated with
318 * bus_dmamem_alloc().
319 */
320 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)321 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
322 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
323 {
324
325 panic("_bus_dmamap_load_raw: not implemented");
326 }
327
328 /*
329 * Unload a DMA map.
330 */
331 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)332 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
333 {
334
335 DPRINTF(("bus_dmamap_unload: t = %p, map = %p\n", t, map));
336
337 map->dm_nsegs = 0;
338 map->dm_mapsize = 0;
339 }
340
341 /*
342 * Synchronize a DMA map.
343 */
344 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)345 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
346 bus_size_t len, int ops)
347 {
348 bus_size_t minlen;
349 bus_addr_t addr, naddr;
350 int i;
351
352 DPRINTF(("bus_dmamap_sync: t = %p, map = %p, offset = %ld, len = %ld, ops = %x\n", t, map, offset, len, ops));
353
354 #ifdef DIAGNOSTIC
355 /*
356 * Mixing PRE and POST operations is not allowed.
357 */
358 if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
359 (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
360 panic("_bus_dmamap_sync: mix PRE and POST");
361
362 if (offset >= map->dm_mapsize)
363 panic("_bus_dmamap_sync: bad offset");
364 if ((offset + len) > map->dm_mapsize)
365 panic("_bus_dmamap_sync: bad length");
366 #endif
367
368 if (!sh_cache_enable_dcache) {
369 /* Nothing to do */
370 DPRINTF(("bus_dmamap_sync: disabled D-Cache\n"));
371 return;
372 }
373
374 for (i = 0; i < map->dm_nsegs && len != 0; i++) {
375 /* Find the beginning segment. */
376 if (offset >= map->dm_segs[i].ds_len) {
377 offset -= map->dm_segs[i].ds_len;
378 continue;
379 }
380
381 /*
382 * Now at the first segment to sync; nail
383 * each segment until we have exhausted the
384 * length.
385 */
386 minlen = len < map->dm_segs[i].ds_len - offset ?
387 len : map->dm_segs[i].ds_len - offset;
388
389 addr = map->dm_segs[i]._ds_vaddr;
390 naddr = addr + offset;
391
392 if ((naddr >= SH3_P2SEG_BASE)
393 && (naddr + minlen <= SH3_P2SEG_END)) {
394 DPRINTF(("bus_dmamap_sync: P2SEG (0x%08lx)\n", naddr));
395 offset = 0;
396 len -= minlen;
397 continue;
398 }
399
400 DPRINTF(("bus_dmamap_sync: flushing segment %d "
401 "(0x%lx+%lx, 0x%lx+0x%lx) (remain = %ld)\n",
402 i, addr, offset, addr, offset + minlen - 1, len));
403
404 switch (ops) {
405 case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
406 if (SH_HAS_WRITEBACK_CACHE)
407 sh_dcache_wbinv_range(naddr, minlen);
408 else
409 sh_dcache_inv_range(naddr, minlen);
410 break;
411
412 case BUS_DMASYNC_PREREAD:
413 if (SH_HAS_WRITEBACK_CACHE &&
414 ((naddr | minlen) & (sh_cache_line_size - 1)) != 0)
415 sh_dcache_wbinv_range(naddr, minlen);
416 else
417 sh_dcache_inv_range(naddr, minlen);
418 break;
419
420 case BUS_DMASYNC_PREWRITE:
421 if (SH_HAS_WRITEBACK_CACHE)
422 sh_dcache_wb_range(naddr, minlen);
423 break;
424
425 case BUS_DMASYNC_POSTREAD:
426 case BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE:
427 sh_dcache_inv_range(naddr, minlen);
428 break;
429 }
430 offset = 0;
431 len -= minlen;
432 }
433 }
434
435 /*
436 * Allocate memory safe for DMA.
437 */
438 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)439 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
440 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
441 int flags)
442 {
443 struct pglist mlist;
444 paddr_t curaddr, lastaddr;
445 struct vm_page *m;
446 int curseg, error, plaflag;
447
448 DPRINTF(("bus_dmamem_alloc: t = %p, size = %ld, alignment = %ld, boundary = %ld, segs = %p, nsegs = %d, rsegs = %p, flags = %x\n", t, size, alignment, boundary, segs, nsegs, rsegs, flags));
449
450 /* Always round the size. */
451 size = round_page(size);
452
453 /*
454 * Allocate the pages from the VM system.
455 */
456 plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
457 if (flags & BUS_DMA_ZERO)
458 plaflag |= UVM_PLA_ZERO;
459
460 TAILQ_INIT(&mlist);
461 error = uvm_pglistalloc(size, 0, -1, alignment, boundary,
462 &mlist, nsegs, plaflag);
463 if (error)
464 return (error);
465
466 /*
467 * Compute the location, size, and number of segments actually
468 * returned by the VM code.
469 */
470 m = mlist.tqh_first;
471 curseg = 0;
472 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
473 segs[curseg].ds_len = PAGE_SIZE;
474
475 DPRINTF(("bus_dmamem_alloc: m = %p, lastaddr = 0x%08lx\n",m,lastaddr));
476
477 while ((m = TAILQ_NEXT(m, pageq)) != NULL) {
478 curaddr = VM_PAGE_TO_PHYS(m);
479 DPRINTF(("bus_dmamem_alloc: m = %p, curaddr = 0x%08lx, lastaddr = 0x%08lx\n", m, curaddr, lastaddr));
480 if (curaddr == (lastaddr + PAGE_SIZE)) {
481 segs[curseg].ds_len += PAGE_SIZE;
482 } else {
483 DPRINTF(("bus_dmamem_alloc: new segment\n"));
484 curseg++;
485 segs[curseg].ds_addr = curaddr;
486 segs[curseg].ds_len = PAGE_SIZE;
487 }
488 lastaddr = curaddr;
489 }
490
491 *rsegs = curseg + 1;
492
493 DPRINTF(("bus_dmamem_alloc: curseg = %d, *rsegs = %d\n",curseg,*rsegs));
494
495 return (0);
496 }
497
498 /*
499 * Common function for freeing DMA-safe memory. May be called by
500 * bus-specific DMA memory free functions.
501 */
502 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)503 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
504 {
505 struct vm_page *m;
506 bus_addr_t addr;
507 struct pglist mlist;
508 int curseg;
509
510 DPRINTF(("bus_dmamem_free: t = %p, segs = %p, nsegs = %d\n", t, segs, nsegs));
511
512 /*
513 * Build a list of pages to free back to the VM system.
514 */
515 TAILQ_INIT(&mlist);
516 for (curseg = 0; curseg < nsegs; curseg++) {
517 DPRINTF(("bus_dmamem_free: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
518 for (addr = segs[curseg].ds_addr;
519 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
520 addr += PAGE_SIZE) {
521 m = PHYS_TO_VM_PAGE(addr);
522 DPRINTF(("bus_dmamem_free: m = %p\n", m));
523 TAILQ_INSERT_TAIL(&mlist, m, pageq);
524 }
525 }
526
527 uvm_pglistfree(&mlist);
528 }
529
530
531 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)532 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
533 size_t size, caddr_t *kvap, int flags)
534 {
535 vaddr_t va;
536 bus_addr_t addr;
537 int curseg;
538 const struct kmem_dyn_mode *kd;
539
540 DPRINTF(("bus_dmamem_map: t = %p, segs = %p, nsegs = %d, size = %d, kvap = %p, flags = %x\n", t, segs, nsegs, size, kvap, flags));
541
542 /*
543 * If we're only mapping 1 segment, use P2SEG, to avoid
544 * TLB thrashing.
545 */
546 if (nsegs == 1) {
547 if (flags & BUS_DMA_COHERENT) {
548 *kvap = (caddr_t)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
549 } else {
550 *kvap = (caddr_t)SH3_PHYS_TO_P1SEG(segs[0].ds_addr);
551 }
552 DPRINTF(("bus_dmamem_map: addr = 0x%08lx, kva = %p\n", segs[0].ds_addr, *kvap));
553 return 0;
554 }
555
556 /* Always round the size. */
557 size = round_page(size);
558 kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
559 va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
560 if (va == 0)
561 return (ENOMEM);
562
563 *kvap = (caddr_t)va;
564 for (curseg = 0; curseg < nsegs; curseg++) {
565 DPRINTF(("bus_dmamem_map: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
566 for (addr = segs[curseg].ds_addr;
567 addr < segs[curseg].ds_addr + segs[curseg].ds_len;
568 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
569 if (size == 0)
570 panic("_bus_dmamem_map: size botch");
571 pmap_kenter_pa(va, addr,
572 PROT_READ | PROT_WRITE);
573 }
574 }
575 pmap_update(pmap_kernel());
576
577 return (0);
578 }
579
580 void
_bus_dmamem_unmap(bus_dma_tag_t t,caddr_t kva,size_t size)581 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
582 {
583
584 DPRINTF(("bus_dmamem_unmap: t = %p, kva = %p, size = %d\n", t, kva, size));
585
586 #ifdef DIAGNOSTIC
587 if ((u_long)kva & PAGE_MASK)
588 panic("_bus_dmamem_unmap");
589 #endif
590
591 /*
592 * Nothing to do if we mapped it with P[12]SEG.
593 */
594 if ((kva >= (caddr_t)SH3_P1SEG_BASE)
595 && (kva <= (caddr_t)SH3_P2SEG_END)) {
596 return;
597 }
598
599 size = round_page(size);
600 pmap_kremove((vaddr_t)kva, size);
601 pmap_update(pmap_kernel());
602 km_free(kva, size, &kv_any, &kp_none);
603 }
604
605 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)606 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
607 off_t off, int prot, int flags)
608 {
609
610 /* Not implemented. */
611 return (paddr_t)(-1);
612 }
613