1 /* $OpenBSD: bus_dma.c,v 1.59 2024/10/08 19:40:00 kettenis Exp $ */
2 /* $NetBSD: bus_dma.c,v 1.3 2003/05/07 21:33:58 fvdl Exp $ */
3
4 /*-
5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * The following is included because _bus_dma_uiomove is derived from
36 * uiomove() in kern_subr.c.
37 */
38
39 /*
40 * Copyright (c) 1982, 1986, 1991, 1993
41 * The Regents of the University of California. All rights reserved.
42 * (c) UNIX System Laboratories, Inc.
43 * All or some portions of this file are derived from material licensed
44 * to the University of California by American Telephone and Telegraph
45 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
46 * the permission of UNIX System Laboratories, Inc.
47 *
48 * Copyright (c) 1992, 1993
49 * The Regents of the University of California. All rights reserved.
50 *
51 * This software was developed by the Computer Systems Engineering group
52 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
53 * contributed to Berkeley.
54 *
55 * All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Lawrence Berkeley Laboratory.
59 *
60 * Redistribution and use in source and binary forms, with or without
61 * modification, are permitted provided that the following conditions
62 * are met:
63 * 1. Redistributions of source code must retain the above copyright
64 * notice, this list of conditions and the following disclaimer.
65 * 2. Redistributions in binary form must reproduce the above copyright
66 * notice, this list of conditions and the following disclaimer in the
67 * documentation and/or other materials provided with the distribution.
68 * 3. All advertising materials mentioning features or use of this software
69 * must display the following acknowledgement:
70 * This product includes software developed by the University of
71 * California, Berkeley and its contributors.
72 * 4. Neither the name of the University nor the names of its contributors
73 * may be used to endorse or promote products derived from this software
74 * without specific prior written permission.
75 *
76 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
77 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
78 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
79 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
80 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
81 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
82 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
83 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
84 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
85 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
86 * SUCH DAMAGE.
87 */
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/malloc.h>
92 #include <sys/mbuf.h>
93 #include <sys/proc.h>
94
95 #include <machine/bus.h>
96
97 #include <uvm/uvm_extern.h>
98
99 /* #define FORCE_BOUNCE_BUFFER 1 */
100 #ifndef FORCE_BOUNCE_BUFFER
101 #define FORCE_BOUNCE_BUFFER 0
102 #endif
103
104 int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
105 struct proc *, int, paddr_t *, int *, int *, int);
106
107 /*
108 * Common function for DMA map creation. May be called by bus-specific
109 * DMA map creation functions.
110 */
111 int
_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)112 _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
113 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
114 {
115 struct bus_dmamap *map;
116 struct pglist mlist;
117 struct vm_page **pg, *pgnext;
118 size_t mapsize, sz, ssize;
119 vaddr_t va, sva;
120 void *mapstore;
121 int npages, error;
122 const struct kmem_dyn_mode *kd;
123 /* allocate and use bounce buffers when running as SEV guest */
124 int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
125
126 /*
127 * Allocate and initialize the DMA map. The end of the map
128 * is a variable-sized array of segments, so we allocate enough
129 * room for them in one shot.
130 *
131 * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
132 * of ALLOCNOW notifies others that we've reserved these resources,
133 * and they are not to be freed.
134 *
135 * The bus_dmamap_t includes one bus_dma_segment_t, hence
136 * the (nsegments - 1).
137 */
138 mapsize = sizeof(struct bus_dmamap) +
139 (sizeof(bus_dma_segment_t) * (nsegments - 1));
140
141 if (use_bounce_buffer) {
142 /* this many pages plus one in case we get split */
143 npages = round_page(size) / PAGE_SIZE + 1;
144 if (npages < nsegments)
145 npages = nsegments;
146 mapsize += sizeof(struct vm_page *) * npages;
147 }
148
149 mapstore = malloc(mapsize, M_DEVBUF,
150 (flags & BUS_DMA_NOWAIT) ? (M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO));
151 if (mapstore == NULL)
152 return (ENOMEM);
153
154 map = (struct bus_dmamap *)mapstore;
155 map->_dm_size = size;
156 map->_dm_segcnt = nsegments;
157 map->_dm_maxsegsz = maxsegsz;
158 map->_dm_boundary = boundary;
159 if (use_bounce_buffer) {
160 map->_dm_pages = (void *)&map->dm_segs[nsegments];
161 map->_dm_npages = npages;
162 }
163 map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
164
165 if (!use_bounce_buffer) {
166 *dmamp = map;
167 return (0);
168 }
169
170 sz = npages << PGSHIFT;
171 kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
172 va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, kd);
173 if (va == 0) {
174 map->_dm_npages = 0;
175 free(map, M_DEVBUF, mapsize);
176 return (ENOMEM);
177 }
178
179 TAILQ_INIT(&mlist);
180 error = uvm_pglistalloc(sz, 0, -1, PAGE_SIZE, 0, &mlist, nsegments,
181 (flags & BUS_DMA_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK);
182 if (error) {
183 map->_dm_npages = 0;
184 km_free((void *)va, sz, &kv_any, &kp_none);
185 free(map, M_DEVBUF, mapsize);
186 return (ENOMEM);
187 }
188
189 sva = va;
190 ssize = sz;
191 pgnext = TAILQ_FIRST(&mlist);
192 for (pg = map->_dm_pages; npages--; va += PAGE_SIZE, pg++) {
193 *pg = pgnext;
194 error = pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(*pg),
195 PROT_READ | PROT_WRITE,
196 PROT_READ | PROT_WRITE | PMAP_WIRED |
197 PMAP_CANFAIL | PMAP_NOCRYPT);
198 if (error) {
199 pmap_update(pmap_kernel());
200 map->_dm_npages = 0;
201 km_free((void *)sva, ssize, &kv_any, &kp_none);
202 free(map, M_DEVBUF, mapsize);
203 uvm_pglistfree(&mlist);
204 return (ENOMEM);
205 }
206 pgnext = TAILQ_NEXT(*pg, pageq);
207 bzero((void *)va, PAGE_SIZE);
208 }
209 pmap_update(pmap_kernel());
210 map->_dm_pgva = sva;
211
212 *dmamp = map;
213 return (0);
214 }
215
216 /*
217 * Common function for DMA map destruction. May be called by bus-specific
218 * DMA map destruction functions.
219 */
220 void
_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)221 _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
222 {
223 size_t mapsize;
224 struct vm_page **pg;
225 struct pglist mlist;
226 int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
227
228 if (map->_dm_pgva) {
229 km_free((void *)map->_dm_pgva, map->_dm_npages << PGSHIFT,
230 &kv_any, &kp_none);
231 }
232
233 mapsize = sizeof(struct bus_dmamap) +
234 (sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
235 if (use_bounce_buffer)
236 mapsize += sizeof(struct vm_page *) * map->_dm_npages;
237
238 if (map->_dm_pages) {
239 TAILQ_INIT(&mlist);
240 for (pg = map->_dm_pages; map->_dm_npages--; pg++) {
241 TAILQ_INSERT_TAIL(&mlist, *pg, pageq);
242 }
243 uvm_pglistfree(&mlist);
244 }
245
246 free(map, M_DEVBUF, mapsize);
247 }
248
249 /*
250 * Common function for loading a DMA map with a linear buffer. May
251 * be called by bus-specific DMA map load functions.
252 */
253 int
_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)254 _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
255 bus_size_t buflen, struct proc *p, int flags)
256 {
257 bus_addr_t lastaddr = 0;
258 int seg, used, error;
259
260 /*
261 * Make sure that on error condition we return "no valid mappings".
262 */
263 map->dm_mapsize = 0;
264 map->dm_nsegs = 0;
265
266 if (buflen > map->_dm_size)
267 return (EINVAL);
268
269 seg = 0;
270 used = 0;
271 error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
272 &lastaddr, &seg, &used, 1);
273 if (error == 0) {
274 map->dm_mapsize = buflen;
275 map->dm_nsegs = seg + 1;
276 map->_dm_nused = used;
277 }
278 return (error);
279 }
280
281 /*
282 * Like _bus_dmamap_load(), but for mbufs.
283 */
284 int
_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m0,int flags)285 _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
286 int flags)
287 {
288 paddr_t lastaddr = 0;
289 int seg, used, error, first;
290 struct mbuf *m;
291
292 /*
293 * Make sure that on error condition we return "no valid mappings".
294 */
295 map->dm_mapsize = 0;
296 map->dm_nsegs = 0;
297
298 #ifdef DIAGNOSTIC
299 if ((m0->m_flags & M_PKTHDR) == 0)
300 panic("_bus_dmamap_load_mbuf: no packet header");
301 #endif
302
303 if (m0->m_pkthdr.len > map->_dm_size)
304 return (EINVAL);
305
306 first = 1;
307 seg = 0;
308 used = 0;
309 error = 0;
310 for (m = m0; m != NULL && error == 0; m = m->m_next) {
311 if (m->m_len == 0)
312 continue;
313 error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
314 NULL, flags, &lastaddr, &seg, &used, first);
315 first = 0;
316 }
317 if (error == 0) {
318 map->dm_mapsize = m0->m_pkthdr.len;
319 map->dm_nsegs = seg + 1;
320 map->_dm_nused = used;
321 }
322 return (error);
323 }
324
325 /*
326 * Like _bus_dmamap_load(), but for uios.
327 */
328 int
_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)329 _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
330 int flags)
331 {
332 paddr_t lastaddr = 0;
333 int seg, used, i, error, first;
334 bus_size_t minlen, resid;
335 struct proc *p = NULL;
336 struct iovec *iov;
337 caddr_t addr;
338
339 /*
340 * Make sure that on error condition we return "no valid mappings".
341 */
342 map->dm_mapsize = 0;
343 map->dm_nsegs = 0;
344
345 resid = uio->uio_resid;
346 iov = uio->uio_iov;
347
348 if (uio->uio_segflg == UIO_USERSPACE) {
349 p = uio->uio_procp;
350 #ifdef DIAGNOSTIC
351 if (p == NULL)
352 panic("_bus_dmamap_load_uio: USERSPACE but no proc");
353 #endif
354 }
355
356 first = 1;
357 seg = 0;
358 used = 0;
359 error = 0;
360 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
361 /*
362 * Now at the first iovec to load. Load each iovec
363 * until we have exhausted the residual count.
364 */
365 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
366 addr = (caddr_t)iov[i].iov_base;
367
368 error = _bus_dmamap_load_buffer(t, map, addr, minlen,
369 p, flags, &lastaddr, &seg, &used, first);
370 first = 0;
371
372 resid -= minlen;
373 }
374 if (error == 0) {
375 map->dm_mapsize = uio->uio_resid;
376 map->dm_nsegs = seg + 1;
377 map->_dm_nused = used;
378 }
379 return (error);
380 }
381
382 /*
383 * Like _bus_dmamap_load(), but for raw memory allocated with
384 * bus_dmamem_alloc().
385 */
386 int
_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)387 _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
388 int nsegs, bus_size_t size, int flags)
389 {
390 bus_addr_t paddr, baddr, bmask, lastaddr = 0;
391 bus_size_t plen, sgsize, mapsize;
392 int first = 1;
393 int i, seg = 0;
394
395 /*
396 * Make sure that on error condition we return "no valid mappings".
397 */
398 map->dm_mapsize = 0;
399 map->dm_nsegs = 0;
400
401 if (nsegs > map->_dm_segcnt || size > map->_dm_size)
402 return (EINVAL);
403
404 mapsize = size;
405 bmask = ~(map->_dm_boundary - 1);
406
407 for (i = 0; i < nsegs && size > 0; i++) {
408 paddr = segs[i].ds_addr;
409 plen = MIN(segs[i].ds_len, size);
410
411 while (plen > 0) {
412 /*
413 * Compute the segment size, and adjust counts.
414 */
415 sgsize = PAGE_SIZE - ((u_long)paddr & PGOFSET);
416 if (plen < sgsize)
417 sgsize = plen;
418
419 if (paddr > dma_constraint.ucr_high &&
420 (map->_dm_flags & BUS_DMA_64BIT) == 0)
421 panic("Non dma-reachable buffer at "
422 "paddr %#lx(raw)", paddr);
423
424 /*
425 * Make sure we don't cross any boundaries.
426 */
427 if (map->_dm_boundary > 0) {
428 baddr = (paddr + map->_dm_boundary) & bmask;
429 if (sgsize > (baddr - paddr))
430 sgsize = (baddr - paddr);
431 }
432
433 /*
434 * Insert chunk into a segment, coalescing with
435 * previous segment if possible.
436 */
437 if (first) {
438 map->dm_segs[seg].ds_addr = paddr;
439 map->dm_segs[seg].ds_len = sgsize;
440 first = 0;
441 } else {
442 if (paddr == lastaddr &&
443 (map->dm_segs[seg].ds_len + sgsize) <=
444 map->_dm_maxsegsz &&
445 (map->_dm_boundary == 0 ||
446 (map->dm_segs[seg].ds_addr & bmask) ==
447 (paddr & bmask)))
448 map->dm_segs[seg].ds_len += sgsize;
449 else {
450 if (++seg >= map->_dm_segcnt)
451 return (EINVAL);
452 map->dm_segs[seg].ds_addr = paddr;
453 map->dm_segs[seg].ds_len = sgsize;
454 }
455 }
456
457 paddr += sgsize;
458 plen -= sgsize;
459 size -= sgsize;
460
461 lastaddr = paddr;
462 }
463 }
464
465 map->dm_mapsize = mapsize;
466 map->dm_nsegs = seg + 1;
467 return (0);
468 }
469
470 /*
471 * Common function for unloading a DMA map. May be called by
472 * bus-specific DMA map unload functions.
473 */
474 void
_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)475 _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
476 {
477 /*
478 * No resources to free; just mark the mappings as
479 * invalid.
480 */
481 map->dm_mapsize = 0;
482 map->dm_nsegs = 0;
483 map->_dm_nused = 0;
484 }
485
486 /*
487 * Common function for DMA map synchronization. May be called
488 * by bus-specific DMA map synchronization functions.
489 */
490 void
_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t addr,bus_size_t size,int op)491 _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
492 bus_size_t size, int op)
493 {
494 bus_dma_segment_t *sg;
495 int i, off = addr;
496 bus_size_t l;
497 int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
498
499 if (!use_bounce_buffer)
500 return;
501
502 for (i = map->_dm_segcnt, sg = map->dm_segs; size && i; i--, sg++) {
503 if (off >= sg->ds_len) {
504 off -= sg->ds_len;
505 continue;
506 }
507
508 l = sg->ds_len - off;
509 if (l > size)
510 l = size;
511 size -= l;
512
513 /* PREREAD and POSTWRITE are no-ops. */
514
515 /* READ: device -> memory */
516 if (op & BUS_DMASYNC_POSTREAD) {
517 bcopy((void *)(sg->_ds_bounce_va + off),
518 (void *)(sg->_ds_va + off), l);
519 }
520
521 /* WRITE: memory -> device */
522 if (op & BUS_DMASYNC_PREWRITE) {
523 bcopy((void *)(sg->_ds_va + off),
524 (void *)(sg->_ds_bounce_va + off), l);
525 }
526
527 off = 0;
528 }
529 }
530
531 /*
532 * Common function for DMA-safe memory allocation. May be called
533 * by bus-specific DMA memory allocation functions.
534 */
535 int
_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)536 _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
537 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
538 int flags)
539 {
540 paddr_t low, high;
541
542 if (flags & BUS_DMA_64BIT) {
543 low = no_constraint.ucr_low;
544 high = no_constraint.ucr_high;
545 } else {
546 low = dma_constraint.ucr_low;
547 high = dma_constraint.ucr_high;
548 }
549
550 return _bus_dmamem_alloc_range(t, size, alignment, boundary,
551 segs, nsegs, rsegs, flags, low, high);
552 }
553
554 /*
555 * Common function for freeing DMA-safe memory. May be called by
556 * bus-specific DMA memory free functions.
557 */
558 void
_bus_dmamem_free(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs)559 _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
560 {
561 struct vm_page *m;
562 bus_addr_t addr;
563 struct pglist mlist;
564 int curseg;
565
566 /*
567 * Build a list of pages to free back to the VM system.
568 */
569 TAILQ_INIT(&mlist);
570 for (curseg = 0; curseg < nsegs; curseg++) {
571 for (addr = segs[curseg].ds_addr;
572 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
573 addr += PAGE_SIZE) {
574 m = PHYS_TO_VM_PAGE(addr);
575 TAILQ_INSERT_TAIL(&mlist, m, pageq);
576 }
577 }
578
579 uvm_pglistfree(&mlist);
580 }
581
582 /*
583 * Common function for mapping DMA-safe memory. May be called by
584 * bus-specific DMA memory map functions.
585 */
586 int
_bus_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,caddr_t * kvap,int flags)587 _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
588 size_t size, caddr_t *kvap, int flags)
589 {
590 vaddr_t va, sva;
591 size_t ssize;
592 bus_addr_t addr;
593 int curseg, pmapflags = 0, error;
594 const struct kmem_dyn_mode *kd;
595
596 if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
597 *kvap = (caddr_t)PMAP_DIRECT_MAP(segs[0].ds_addr);
598 return (0);
599 }
600
601 if (flags & BUS_DMA_NOCACHE)
602 pmapflags |= PMAP_NOCACHE;
603
604 size = round_page(size);
605 kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
606 va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
607 if (va == 0)
608 return (ENOMEM);
609
610 *kvap = (caddr_t)va;
611
612 sva = va;
613 ssize = size;
614 for (curseg = 0; curseg < nsegs; curseg++) {
615 for (addr = segs[curseg].ds_addr;
616 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
617 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
618 if (size == 0)
619 panic("_bus_dmamem_map: size botch");
620 error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
621 PROT_READ | PROT_WRITE,
622 PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
623 if (error) {
624 pmap_update(pmap_kernel());
625 km_free((void *)sva, ssize, &kv_any, &kp_none);
626 return (error);
627 }
628 }
629 }
630 pmap_update(pmap_kernel());
631
632 return (0);
633 }
634
635 /*
636 * Common function for unmapping DMA-safe memory. May be called by
637 * bus-specific DMA memory unmapping functions.
638 */
639 void
_bus_dmamem_unmap(bus_dma_tag_t t,caddr_t kva,size_t size)640 _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
641 {
642
643 #ifdef DIAGNOSTIC
644 if ((u_long)kva & PGOFSET)
645 panic("_bus_dmamem_unmap");
646 #endif
647 if (kva >= (caddr_t)PMAP_DIRECT_BASE && kva <= (caddr_t)PMAP_DIRECT_END)
648 return;
649
650 km_free(kva, round_page(size), &kv_any, &kp_none);
651 }
652
653 /*
654 * Common function for mmap(2)'ing DMA-safe memory. May be called by
655 * bus-specific DMA mmap(2)'ing functions.
656 */
657 paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)658 _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
659 int prot, int flags)
660 {
661 int i, pmapflags = 0;
662
663 if (flags & BUS_DMA_NOCACHE)
664 pmapflags |= PMAP_NOCACHE;
665
666 for (i = 0; i < nsegs; i++) {
667 #ifdef DIAGNOSTIC
668 if (off & PGOFSET)
669 panic("_bus_dmamem_mmap: offset unaligned");
670 if (segs[i].ds_addr & PGOFSET)
671 panic("_bus_dmamem_mmap: segment unaligned");
672 if (segs[i].ds_len & PGOFSET)
673 panic("_bus_dmamem_mmap: segment size not multiple"
674 " of page size");
675 #endif
676 if (off >= segs[i].ds_len) {
677 off -= segs[i].ds_len;
678 continue;
679 }
680
681 return ((segs[i].ds_addr + off) | pmapflags);
682 }
683
684 /* Page not found. */
685 return (-1);
686 }
687
688 /**********************************************************************
689 * DMA utility functions
690 **********************************************************************/
691 /*
692 * Utility function to load a linear buffer. lastaddrp holds state
693 * between invocations (for multiple-buffer loads). segp contains
694 * the starting segment on entrance, and the ending segment on exit.
695 * first indicates if this is the first invocation of this function.
696 */
697 int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags,paddr_t * lastaddrp,int * segp,int * usedp,int first)698 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
699 bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
700 int *segp, int *usedp, int first)
701 {
702 bus_size_t sgsize;
703 bus_addr_t curaddr, lastaddr, baddr, bmask;
704 vaddr_t pgva = -1, vaddr = (vaddr_t)buf;
705 int seg, page, off;
706 pmap_t pmap;
707 struct vm_page *pg;
708 int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
709
710 if (p != NULL)
711 pmap = p->p_vmspace->vm_map.pmap;
712 else
713 pmap = pmap_kernel();
714
715 page = *usedp;
716 lastaddr = *lastaddrp;
717 bmask = ~(map->_dm_boundary - 1);
718
719 for (seg = *segp; buflen > 0 ; ) {
720 /*
721 * Get the physical address for this segment.
722 */
723 pmap_extract(pmap, vaddr, (paddr_t *)&curaddr);
724
725 if (curaddr > dma_constraint.ucr_high &&
726 (map->_dm_flags & BUS_DMA_64BIT) == 0)
727 panic("Non dma-reachable buffer at curaddr %#lx(raw)",
728 curaddr);
729
730 if (use_bounce_buffer) {
731 if (page >= map->_dm_npages)
732 return (EFBIG);
733
734 off = vaddr & PAGE_MASK;
735 pg = map->_dm_pages[page];
736 curaddr = VM_PAGE_TO_PHYS(pg) + off;
737 pgva = map->_dm_pgva + (page << PGSHIFT) + off;
738 page++;
739 }
740
741 /*
742 * Compute the segment size, and adjust counts.
743 */
744 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
745 if (buflen < sgsize)
746 sgsize = buflen;
747
748 /*
749 * Make sure we don't cross any boundaries.
750 */
751 if (map->_dm_boundary > 0) {
752 baddr = (curaddr + map->_dm_boundary) & bmask;
753 if (sgsize > (baddr - curaddr))
754 sgsize = (baddr - curaddr);
755 }
756
757 /*
758 * Insert chunk into a segment, coalescing with
759 * previous segment if possible.
760 */
761 if (first) {
762 map->dm_segs[seg].ds_addr = curaddr;
763 map->dm_segs[seg].ds_len = sgsize;
764 map->dm_segs[seg]._ds_va = vaddr;
765 map->dm_segs[seg]._ds_bounce_va = pgva;
766 first = 0;
767 } else {
768 if (curaddr == lastaddr &&
769 (map->dm_segs[seg].ds_len + sgsize) <=
770 map->_dm_maxsegsz &&
771 (map->_dm_boundary == 0 ||
772 (map->dm_segs[seg].ds_addr & bmask) ==
773 (curaddr & bmask)) &&
774 (!use_bounce_buffer || (map->dm_segs[seg]._ds_va +
775 map->dm_segs[seg].ds_len) == vaddr)) {
776 map->dm_segs[seg].ds_len += sgsize;
777 } else {
778 if (++seg >= map->_dm_segcnt)
779 break;
780 map->dm_segs[seg].ds_addr = curaddr;
781 map->dm_segs[seg].ds_len = sgsize;
782 map->dm_segs[seg]._ds_va = vaddr;
783 map->dm_segs[seg]._ds_bounce_va = pgva;
784 }
785 }
786
787 lastaddr = curaddr + sgsize;
788 vaddr += sgsize;
789 buflen -= sgsize;
790 }
791
792 *segp = seg;
793 *usedp = page;
794 *lastaddrp = lastaddr;
795
796 /*
797 * Did we fit?
798 */
799 if (buflen != 0)
800 return (EFBIG); /* XXX better return value here? */
801 return (0);
802 }
803
804 /*
805 * Allocate physical memory from the given physical address range.
806 * Called by DMA-safe memory allocation methods.
807 */
808 int
_bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,bus_addr_t low,bus_addr_t high)809 _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
810 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
811 int flags, bus_addr_t low, bus_addr_t high)
812 {
813 paddr_t curaddr, lastaddr;
814 struct vm_page *m;
815 struct pglist mlist;
816 int curseg, error, plaflag;
817
818 /* Always round the size. */
819 size = round_page(size);
820
821 segs[0]._ds_boundary = boundary;
822 segs[0]._ds_align = alignment;
823
824 /*
825 * Allocate pages from the VM system.
826 */
827 plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
828 if (flags & BUS_DMA_ZERO)
829 plaflag |= UVM_PLA_ZERO;
830
831 TAILQ_INIT(&mlist);
832 error = uvm_pglistalloc(size, low, high, alignment, boundary,
833 &mlist, nsegs, plaflag);
834 if (error)
835 return (error);
836
837 /*
838 * Compute the location, size, and number of segments actually
839 * returned by the VM code.
840 */
841 m = TAILQ_FIRST(&mlist);
842 curseg = 0;
843 lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
844 segs[curseg].ds_len = PAGE_SIZE;
845
846 for (m = TAILQ_NEXT(m, pageq); m != NULL; m = TAILQ_NEXT(m, pageq)) {
847 curaddr = VM_PAGE_TO_PHYS(m);
848 #ifdef DIAGNOSTIC
849 if (curseg == nsegs) {
850 printf("uvm_pglistalloc returned too many\n");
851 panic("_bus_dmamem_alloc_range");
852 }
853 if (curaddr < low || curaddr >= high) {
854 printf("uvm_pglistalloc returned non-sensical"
855 " address 0x%lx\n", curaddr);
856 panic("_bus_dmamem_alloc_range");
857 }
858 #endif
859 if (curaddr == (lastaddr + PAGE_SIZE))
860 segs[curseg].ds_len += PAGE_SIZE;
861 else {
862 curseg++;
863 segs[curseg].ds_addr = curaddr;
864 segs[curseg].ds_len = PAGE_SIZE;
865 }
866 lastaddr = curaddr;
867 }
868
869 *rsegs = curseg + 1;
870
871 return (0);
872 }
873