xref: /netbsd/sys/arch/alpha/common/sgmap_typedep.c (revision 966cfdca)
1 /* $NetBSD: sgmap_typedep.c,v 1.44 2021/07/19 16:25:54 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(1, "$NetBSD: sgmap_typedep.c,v 1.44 2021/07/19 16:25:54 thorpej Exp $");
35 
36 #include "opt_ddb.h"
37 
38 #include <sys/evcnt.h>
39 #include <uvm/uvm_extern.h>
40 
41 #define	DMA_COUNT_DECL(cnt)	_DMA_COUNT_DECL(dma_sgmap, cnt)
42 #define	DMA_COUNT(cnt)		_DMA_COUNT(dma_sgmap, cnt)
43 
44 #ifdef SGMAP_DEBUG
45 int			__C(SGMAP_TYPE,_debug) = 0;
46 #endif
47 
48 SGMAP_PTE_TYPE		__C(SGMAP_TYPE,_prefetch_spill_page_pte);
49 
50 static void		__C(SGMAP_TYPE,_do_unload)(bus_dma_tag_t, bus_dmamap_t,
51 			    struct alpha_sgmap *);
52 
53 void
__C(SGMAP_TYPE,_init_spill_page_pte)54 __C(SGMAP_TYPE,_init_spill_page_pte)(void)
55 {
56 
57 	__C(SGMAP_TYPE,_prefetch_spill_page_pte) =
58 	    (alpha_sgmap_prefetch_spill_page_pa >>
59 	     SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
60 }
61 
62 DMA_COUNT_DECL(spill_page);
63 DMA_COUNT_DECL(extra_segment);
64 DMA_COUNT_DECL(extra_segment_and_spill);
65 
66 static int
__C(SGMAP_TYPE,_load_buffer)67 __C(SGMAP_TYPE,_load_buffer)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
68     size_t buflen, struct vmspace *vm, int flags, int * const segp,
69     struct alpha_sgmap *sgmap)
70 {
71 	vaddr_t endva, va = (vaddr_t)buf;
72 	paddr_t pa;
73 	bus_addr_t dmaoffset, sgva, extra_sgva;
74 	bus_size_t sgvalen, extra_sgvalen, boundary, alignment;
75 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
76 	int pteidx, error, spill, seg = *segp;
77 	bool address_is_valid __diagused;
78 
79 	/* Initialize the spill page PTE if it hasn't been already. */
80 	if (__C(SGMAP_TYPE,_prefetch_spill_page_pte) == 0)
81 		__C(SGMAP_TYPE,_init_spill_page_pte)();
82 
83 	if (seg == map->_dm_segcnt) {
84 		/* Ran of segments. */
85 		return EFBIG;
86 	}
87 	KASSERT(seg < map->_dm_segcnt);
88 
89 	/*
90 	 * Remember the offset into the first page and the total
91 	 * transfer length.
92 	 */
93 	dmaoffset = ((u_long)buf) & PGOFSET;
94 
95 #ifdef SGMAP_DEBUG
96 	if (__C(SGMAP_TYPE,_debug)) {
97 		printf("sgmap_load: ----- buf = %p -----\n", buf);
98 		printf("sgmap_load: dmaoffset = 0x%lx, buflen = 0x%lx\n",
99 		    dmaoffset, buflen);
100 	}
101 #endif
102 
103 	/*
104 	 * Allocate the necessary virtual address space for the
105 	 * mapping.  Round the size, since we deal with whole pages.
106 	 */
107 
108 	/*
109 	 * XXX Always allocate a spill page for now.  Note
110 	 * the spill page is not needed for an in-bound-only
111 	 * transfer.
112 	 */
113 	if ((flags & BUS_DMA_READ) == 0)
114 		spill = 1;
115 	else
116 		spill = 0;
117 
118 	boundary = map->_dm_boundary;
119 
120 	/*
121 	 * Caller's mistake if the requested length is larger than
122 	 * their own boundary constraint.
123 	 */
124 	if (__predict_false(boundary != 0 && buflen > boundary)) {
125 		return EINVAL;
126 	}
127 
128 	endva = round_page(va + buflen);
129 	va = trunc_page(va);
130 
131 	const vm_flag_t vmflags = VM_INSTANTFIT |
132 	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
133 
134 	KASSERT(t->_sgmap_minalign != 0);
135 	alignment = t->_sgmap_minalign;
136 	sgvalen = (endva - va);
137 
138 	SGMAP_PTE_TYPE spill_pte_v = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
139 
140 	/*
141 	 * If we have a boundary constraint, it's possible to end up in
142 	 * a situation where sgvalen > boundary if the caller's buffer
143 	 * is not page aligned.  In this case, we will have to allocate
144 	 * an extra SG segment and split the buffer.
145 	 */
146 	if (__predict_false(boundary != 0 && boundary < sgvalen)) {
147 #ifdef SGMAP_DEBUG
148 		if (__C(SGMAP_TYPE,_debug)) {
149 			printf("sgmap_load: extra segment needed\n");
150 		}
151 #endif
152 		DMA_COUNT(extra_segment);
153 
154 		/* This should only ever happen for unaligned buffers. */
155 		KASSERT(dmaoffset != 0);
156 
157 		extra_sgvalen = sgvalen - boundary;
158 		KASSERT(extra_sgvalen == PAGE_SIZE);
159 
160 		/*
161 		 * Adjust the lengths of the first segment.  The length
162 		 * of the second segment will be dmaoffset.
163 		 */
164 		sgvalen -= extra_sgvalen;
165 		endva -= extra_sgvalen;
166 		buflen -= dmaoffset;
167 
168 		if (spill) {
169 			DMA_COUNT(extra_segment_and_spill);
170 			extra_sgvalen += PAGE_SIZE;
171 		}
172 
173 		error = vmem_xalloc(sgmap->aps_arena, extra_sgvalen,
174 				    alignment,		/* alignment */
175 				    0,			/* phase */
176 				    boundary,		/* nocross */
177 				    VMEM_ADDR_MIN,	/* minaddr */
178 				    VMEM_ADDR_MAX,	/* maxaddr */
179 				    vmflags,
180 				    &extra_sgva);
181 		if (error) {
182 			return error;
183 		}
184 	} else {
185 		extra_sgvalen = 0;
186 		extra_sgva = 0;
187 	}
188 
189 
190 	if (spill) {
191 		DMA_COUNT(spill_page);
192 		sgvalen += PAGE_SIZE;
193 
194 		/*
195 		 * ARGH!  If the addition of the spill page bumped us
196 		 * over our boundary, we have to 2x the boundary limit.
197 		 * To compensate (and enforce the original boundary
198 		 * constraint), we force our alignment to be at least the
199 		 * previous boundary, thus ensuring that the only boundary
200 		 * violation is the pre-fetch that the SGMAP controller
201 		 * performs that necessitates the spill page in the first
202 		 * place.
203 		 */
204 		if (boundary && boundary < sgvalen) {
205 			if (alignment < boundary) {
206 				alignment = boundary;
207 			}
208 			do {
209 				boundary <<= 1;
210 			} while (boundary < sgvalen);
211 		}
212 	}
213 
214 #ifdef SGMAP_DEBUG
215 	if (__C(SGMAP_TYPE,_debug)) {
216 		printf("sgmap_load: va:endva = 0x%lx:0x%lx\n", va, endva);
217 		printf("sgmap_load: sgvalen = 0x%lx, boundary = 0x%lx\n",
218 		       sgvalen, boundary);
219 	}
220 #endif
221 
222 	error = vmem_xalloc(sgmap->aps_arena, sgvalen,
223 			    alignment,		/* alignment */
224 			    0,			/* phase */
225 			    boundary,		/* nocross */
226 			    VMEM_ADDR_MIN,	/* minaddr */
227 			    VMEM_ADDR_MAX,	/* maxaddr */
228 			    vmflags,
229 			    &sgva);
230 	if (error) {
231 		if (extra_sgvalen != 0) {
232 			vmem_xfree(sgmap->aps_arena, extra_sgva, extra_sgvalen);
233 		}
234 		return error;
235 	}
236 
237 	pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
238 	pte = &page_table[pteidx * SGMAP_PTE_SPACING];
239 
240 #ifdef SGMAP_DEBUG
241 	if (__C(SGMAP_TYPE,_debug))
242 		printf("sgmap_load: sgva = 0x%lx, pteidx = %d, "
243 		    "pte = %p (pt = %p)\n", sgva, pteidx, pte,
244 		    page_table);
245 #endif
246 
247 	/* Generate the DMA address. */
248 	map->dm_segs[seg].ds_addr = sgmap->aps_wbase | sgva | dmaoffset;
249 	map->dm_segs[seg].ds_len = buflen;
250 	if (__predict_false(extra_sgvalen != 0)) {
251 		if (++seg == map->_dm_segcnt) {
252 			/* Boo! Ran out of segments! */
253 			vmem_xfree(sgmap->aps_arena, extra_sgva, extra_sgvalen);
254 			vmem_xfree(sgmap->aps_arena, sgva, sgvalen);
255 			return EFBIG;
256 		}
257 		map->dm_segs[seg].ds_addr = sgmap->aps_wbase | extra_sgva;
258 		map->dm_segs[seg].ds_len = dmaoffset;
259 		*segp = seg;
260 	}
261 
262 #ifdef SGMAP_DEBUG
263 	if (__C(SGMAP_TYPE,_debug))
264 		printf("sgmap_load: wbase = 0x%lx, vpage = 0x%lx, "
265 		    "DMA addr = 0x%lx\n", sgmap->aps_wbase, (uint64_t)sgva,
266 		    map->dm_segs[seg].ds_addr);
267 #endif
268 
269 	for (; va < endva; va += PAGE_SIZE, pteidx++,
270 	     pte = &page_table[pteidx * SGMAP_PTE_SPACING]) {
271 		/* Get the physical address for this segment. */
272 		address_is_valid = pmap_extract(vm->vm_map.pmap, va, &pa);
273 		KASSERT(address_is_valid);
274 
275 		/* Load the current PTE with this page. */
276 		*pte = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
277 #ifdef SGMAP_DEBUG
278 		if (__C(SGMAP_TYPE,_debug))
279 			printf("sgmap_load:     pa = 0x%lx, pte = %p, "
280 			    "*pte = 0x%lx\n", pa, pte, (u_long)(*pte));
281 #endif
282 	}
283 
284 	if (__predict_false(extra_sgvalen != 0)) {
285 		int extra_pteidx = extra_sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
286 		SGMAP_PTE_TYPE *extra_pte =
287 		    &page_table[extra_pteidx * SGMAP_PTE_SPACING];
288 
289 		/* va == endva == address of extra page */
290 		KASSERT(va == endva);
291 		address_is_valid = pmap_extract(vm->vm_map.pmap, va, &pa);
292 		KASSERT(address_is_valid);
293 
294 		/*
295 		 * If a spill page is needed, the previous segment will
296 		 * need to use this PTE value for it.
297 		 */
298 		spill_pte_v = (pa >> SGPTE_PGADDR_SHIFT) | SGPTE_VALID;
299 		*extra_pte = spill_pte_v;
300 
301 		/* ...but the extra segment uses the real spill PTE. */
302 		if (spill) {
303 			extra_pteidx++;
304 			extra_pte =
305 			    &page_table[extra_pteidx * SGMAP_PTE_SPACING];
306 			*extra_pte = __C(SGMAP_TYPE,_prefetch_spill_page_pte);
307 		}
308 	}
309 
310 	if (spill) {
311 		/* ...and the prefetch-spill page. */
312 		*pte = spill_pte_v;
313 #ifdef SGMAP_DEBUG
314 		if (__C(SGMAP_TYPE,_debug)) {
315 			printf("sgmap_load:     spill page, pte = %p, "
316 			    "*pte = 0x%lx\n", pte, (uint64_t)*pte);
317 		}
318 #endif
319 	}
320 
321 	return (0);
322 }
323 
324 DMA_COUNT_DECL(load);
325 DMA_COUNT_DECL(load_next_window);
326 
327 int
__C(SGMAP_TYPE,_load)328 __C(SGMAP_TYPE,_load)(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
329     bus_size_t buflen, struct proc *p, int flags, struct alpha_sgmap *sgmap)
330 {
331 	int seg, error;
332 	struct vmspace *vm;
333 
334 	/*
335 	 * Make sure that on error condition we return "no valid mappings".
336 	 */
337 	map->dm_mapsize = 0;
338 	map->dm_nsegs = 0;
339 
340 	if (buflen > map->_dm_size)
341 		return (EINVAL);
342 
343 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
344 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
345 	    (BUS_DMA_READ|BUS_DMA_WRITE));
346 
347 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
348 
349 	if (p != NULL) {
350 		vm = p->p_vmspace;
351 	} else {
352 		vm = vmspace_kernel();
353 	}
354 	seg = 0;
355 	error = __C(SGMAP_TYPE,_load_buffer)(t, map, buf, buflen, vm,
356 	    flags, &seg, sgmap);
357 
358 	alpha_mb();
359 
360 #if defined(SGMAP_DEBUG) && defined(DDB)
361 	if (__C(SGMAP_TYPE,_debug) > 1)
362 		Debugger();
363 #endif
364 
365 	if (error == 0) {
366 		DMA_COUNT(load);
367 		map->dm_mapsize = buflen;
368 		map->dm_nsegs = seg + 1;
369 		map->_dm_window = t;
370 	} else {
371 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
372 		if (t->_next_window != NULL) {
373 			/* Give the next window a chance. */
374 			DMA_COUNT(load_next_window);
375 			error = bus_dmamap_load(t->_next_window, map, buf,
376 			    buflen, p, flags);
377 		}
378 	}
379 	return (error);
380 }
381 
382 DMA_COUNT_DECL(load_mbuf);
383 DMA_COUNT_DECL(load_mbuf_next_window);
384 
385 int
__C(SGMAP_TYPE,_load_mbuf)386 __C(SGMAP_TYPE,_load_mbuf)(bus_dma_tag_t t, bus_dmamap_t map,
387     struct mbuf *m0, int flags, struct alpha_sgmap *sgmap)
388 {
389 	struct mbuf *m;
390 	int seg, error;
391 
392 	/*
393 	 * Make sure that on error condition we return "no valid mappings".
394 	 */
395 	map->dm_mapsize = 0;
396 	map->dm_nsegs = 0;
397 
398 #ifdef DIAGNOSTIC
399 	if ((m0->m_flags & M_PKTHDR) == 0)
400 		panic(__S(__C(SGMAP_TYPE,_load_mbuf)) ": no packet header");
401 #endif
402 
403 	if (m0->m_pkthdr.len > map->_dm_size)
404 		return (EINVAL);
405 
406 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
407 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
408 	    (BUS_DMA_READ|BUS_DMA_WRITE));
409 
410 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
411 
412 	seg = 0;
413 	error = 0;
414 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
415 		if (m->m_len == 0)
416 			continue;
417 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
418 		    m->m_data, m->m_len, vmspace_kernel(), flags, &seg, sgmap);
419 		seg++;
420 	}
421 
422 	alpha_mb();
423 
424 #if defined(SGMAP_DEBUG) && defined(DDB)
425 	if (__C(SGMAP_TYPE,_debug) > 1)
426 		Debugger();
427 #endif
428 
429 	if (error == 0) {
430 		DMA_COUNT(load_mbuf);
431 		map->dm_mapsize = m0->m_pkthdr.len;
432 		map->dm_nsegs = seg;
433 		map->_dm_window = t;
434 	} else {
435 		/* Need to back out what we've done so far. */
436 		map->dm_nsegs = seg - 1;
437 		__C(SGMAP_TYPE,_do_unload)(t, map, sgmap);
438 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
439 		if (t->_next_window != NULL) {
440 			/* Give the next window a chance. */
441 			DMA_COUNT(load_mbuf_next_window);
442 			error = bus_dmamap_load_mbuf(t->_next_window, map,
443 			    m0, flags);
444 		}
445 	}
446 
447 	return (error);
448 }
449 
450 DMA_COUNT_DECL(load_uio);
451 DMA_COUNT_DECL(load_uio_next_window);
452 
453 int
__C(SGMAP_TYPE,_load_uio)454 __C(SGMAP_TYPE,_load_uio)(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
455     int flags, struct alpha_sgmap *sgmap)
456 {
457 	bus_size_t minlen, resid;
458 	struct vmspace *vm;
459 	struct iovec *iov;
460 	void *addr;
461 	int i, seg, error;
462 
463 	/*
464 	 * Make sure that on error condition we return "no valid mappings".
465 	 */
466 	map->dm_mapsize = 0;
467 	map->dm_nsegs = 0;
468 
469 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
470 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
471 	    (BUS_DMA_READ|BUS_DMA_WRITE));
472 
473 	map->_dm_flags |= flags & (BUS_DMA_READ|BUS_DMA_WRITE);
474 
475 	resid = uio->uio_resid;
476 	iov = uio->uio_iov;
477 
478 	vm = uio->uio_vmspace;
479 
480 	seg = 0;
481 	error = 0;
482 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
483 		/*
484 		 * Now at the first iovec to load.  Load each iovec
485 		 * until we have exhausted the residual count.
486 		 */
487 		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
488 		addr = (void *)iov[i].iov_base;
489 
490 		error = __C(SGMAP_TYPE,_load_buffer)(t, map,
491 		    addr, minlen, vm, flags, &seg, sgmap);
492 		seg++;
493 
494 		resid -= minlen;
495 	}
496 
497 	alpha_mb();
498 
499 #if defined(SGMAP_DEBUG) && defined(DDB)
500 	if (__C(SGMAP_TYPE,_debug) > 1)
501 		Debugger();
502 #endif
503 
504 	if (error == 0) {
505 		DMA_COUNT(load_uio);
506 		map->dm_mapsize = uio->uio_resid;
507 		map->dm_nsegs = seg;
508 		map->_dm_window = t;
509 	} else {
510 		/* Need to back out what we've done so far. */
511 		map->dm_nsegs = seg - 1;
512 		__C(SGMAP_TYPE,_do_unload)(t, map, sgmap);
513 		map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
514 		if (t->_next_window != NULL) {
515 			/* Give the next window a chance. */
516 			DMA_COUNT(load_uio_next_window);
517 			error = bus_dmamap_load_uio(t->_next_window, map,
518 			    uio, flags);
519 		}
520 	}
521 
522 	return (error);
523 }
524 
525 int
__C(SGMAP_TYPE,_load_raw)526 __C(SGMAP_TYPE,_load_raw)(bus_dma_tag_t t, bus_dmamap_t map,
527     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags,
528     struct alpha_sgmap *sgmap)
529 {
530 
531 	KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
532 	KASSERT((flags & (BUS_DMA_READ|BUS_DMA_WRITE)) !=
533 	    (BUS_DMA_READ|BUS_DMA_WRITE));
534 
535 	panic(__S(__C(SGMAP_TYPE,_load_raw)) ": not implemented");
536 }
537 
538 static void
__C(SGMAP_TYPE,_do_unload)539 __C(SGMAP_TYPE,_do_unload)(bus_dma_tag_t t, bus_dmamap_t map,
540     struct alpha_sgmap *sgmap)
541 {
542 	SGMAP_PTE_TYPE *pte, *page_table = sgmap->aps_pt;
543 	bus_addr_t osgva, sgva, esgva;
544 	int spill, seg, pteidx;
545 
546 	for (seg = 0; seg < map->dm_nsegs; seg++) {
547 		/*
548 		 * XXX Always allocate a spill page for now.  Note
549 		 * the spill page is not needed for an in-bound-only
550 		 * transfer.
551 		 */
552 		if ((map->_dm_flags & BUS_DMA_READ) == 0)
553 			spill = 1;
554 		else
555 			spill = 0;
556 
557 		sgva = map->dm_segs[seg].ds_addr & ~sgmap->aps_wbase;
558 
559 		esgva = round_page(sgva + map->dm_segs[seg].ds_len);
560 		osgva = sgva = trunc_page(sgva);
561 
562 		if (spill)
563 			esgva += PAGE_SIZE;
564 
565 		/* Invalidate the PTEs for the mapping. */
566 		for (pteidx = sgva >> SGMAP_ADDR_PTEIDX_SHIFT;
567 		     sgva < esgva; sgva += PAGE_SIZE, pteidx++) {
568 			pte = &page_table[pteidx * SGMAP_PTE_SPACING];
569 #ifdef SGMAP_DEBUG
570 			if (__C(SGMAP_TYPE,_debug))
571 				printf("sgmap_unload:     pte = %p, "
572 				    "*pte = 0x%lx\n", pte, (u_long)(*pte));
573 #endif
574 			*pte = 0;
575 		}
576 
577 		alpha_mb();
578 
579 		/* Free the virtual address space used by the mapping. */
580 		vmem_xfree(sgmap->aps_arena, osgva, (esgva - osgva));
581 	}
582 
583 	map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
584 
585 	/* Mark the mapping invalid. */
586 	map->dm_mapsize = 0;
587 	map->dm_nsegs = 0;
588 	map->_dm_window = NULL;
589 }
590 
591 DMA_COUNT_DECL(unload);
592 
593 void
__C(SGMAP_TYPE,_unload)594 __C(SGMAP_TYPE,_unload)(bus_dma_tag_t t, bus_dmamap_t map,
595     struct alpha_sgmap *sgmap)
596 {
597 	KASSERT(map->_dm_window == t);
598 	DMA_COUNT(unload);
599 	__C(SGMAP_TYPE,_do_unload)(t, map, sgmap);
600 }
601