xref: /openbsd/sys/arch/amd64/isa/isa_machdep.c (revision cecf84d4)
1 /*	$OpenBSD: isa_machdep.c,v 1.27 2015/03/14 03:38:46 jsg Exp $	*/
2 /*	$NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $	*/
3 
4 #define ISA_DMA_STATS
5 
6 /*-
7  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
12  * NASA Ames Research Center.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*-
37  * Copyright (c) 1993, 1994, 1996, 1997
38  *	Charles M. Hannum.  All rights reserved.
39  * Copyright (c) 1991 The Regents of the University of California.
40  * All rights reserved.
41  *
42  * This code is derived from software contributed to Berkeley by
43  * William Jolitz.
44  *
45  * Redistribution and use in source and binary forms, with or without
46  * modification, are permitted provided that the following conditions
47  * are met:
48  * 1. Redistributions of source code must retain the above copyright
49  *    notice, this list of conditions and the following disclaimer.
50  * 2. Redistributions in binary form must reproduce the above copyright
51  *    notice, this list of conditions and the following disclaimer in the
52  *    documentation and/or other materials provided with the distribution.
53  * 3. Neither the name of the University nor the names of its contributors
54  *    may be used to endorse or promote products derived from this software
55  *    without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67  * SUCH DAMAGE.
68  *
69  *	@(#)isa.c	7.2 (Berkeley) 5/13/91
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/syslog.h>
75 #include <sys/device.h>
76 #include <sys/malloc.h>
77 #include <sys/proc.h>
78 
79 #include <uvm/uvm_extern.h>
80 
81 #include "ioapic.h"
82 
83 #if NIOAPIC > 0
84 #include <machine/i82093var.h>
85 #include <machine/mpbiosvar.h>
86 #endif
87 
88 #include <machine/bus.h>
89 
90 #include <machine/intr.h>
91 #include <machine/pio.h>
92 #include <machine/cpufunc.h>
93 #include <machine/i8259.h>
94 
95 #include <dev/isa/isavar.h>
96 #if 0
97 #include <dev/isa/isadmavar.h>
98 #endif
99 #include <i386/isa/isa_machdep.h>
100 
101 #include "isadma.h"
102 
103 extern	paddr_t avail_end;
104 
105 #define	IDTVEC(name)	__CONCAT(X,name)
106 /* default interrupt vector table entries */
107 typedef int (*vector)(void);
108 extern vector IDTVEC(intr)[];
109 void isa_strayintr(int);
110 int fakeintr(void *);
111 
112 #if NISADMA > 0
113 int	_isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
114 	    bus_size_t, bus_size_t, int, bus_dmamap_t *);
115 void	_isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
116 int	_isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
117 	    bus_size_t, struct proc *, int);
118 int	_isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
119 	    struct mbuf *, int);
120 int	_isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
121 	    struct uio *, int);
122 int	_isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
123 	    bus_dma_segment_t *, int, bus_size_t, int);
124 void	_isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
125 void	_isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
126 	    bus_addr_t, bus_size_t, int);
127 
128 int	_isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
129 	    bus_size_t, bus_dma_segment_t *, int, int *, int);
130 
131 int	_isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t,
132 	    struct proc *);
133 int	_isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
134 	    bus_size_t, int);
135 void	_isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
136 
137 /*
138  * Entry points for ISA DMA.  These are mostly wrappers around
139  * the generic functions that understand how to deal with bounce
140  * buffers, if necessary.
141  */
142 struct bus_dma_tag isa_bus_dma_tag = {
143 	NULL,			/* _cookie */
144 	_isa_bus_dmamap_create,
145 	_isa_bus_dmamap_destroy,
146 	_isa_bus_dmamap_load,
147 	_isa_bus_dmamap_load_mbuf,
148 	_isa_bus_dmamap_load_uio,
149 	_isa_bus_dmamap_load_raw,
150 	_isa_bus_dmamap_unload,
151 	_isa_bus_dmamap_sync,
152 	_isa_bus_dmamem_alloc,
153 	_bus_dmamem_alloc_range,
154 	_bus_dmamem_free,
155 	_bus_dmamem_map,
156 	_bus_dmamem_unmap,
157 	_bus_dmamem_mmap,
158 };
159 #endif /* NISADMA > 0 */
160 
161 #define GICODE_SEL	10
162 
163 u_long  intrstray[ICU_LEN];
164 
165 /*
166  * Caught a stray interrupt, notify
167  */
168 void
169 isa_strayintr(int irq)
170 {
171         /*
172          * Stray interrupts on irq 7 occur when an interrupt line is raised
173          * and then lowered before the CPU acknowledges it.  This generally
174          * means either the device is screwed or something is cli'ing too
175          * long and it's timing out.
176          */
177 	if (++intrstray[irq] <= 5)
178 		log(LOG_ERR, "stray interrupt %d%s\n", irq,
179 		    intrstray[irq] >= 5 ? "; stopped logging" : "");
180 }
181 
182 int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN];
183 int iminlevel[ICU_LEN], imaxlevel[ICU_LEN];
184 struct intrhand *intrhand[ICU_LEN];
185 
186 int
187 fakeintr(void *arg)
188 {
189 	return 0;
190 }
191 
192 #define	LEGAL_IRQ(x)	((x) >= 0 && (x) < ICU_LEN && (x) != 2)
193 
194 int
195 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
196 {
197 	int i, bestirq, count;
198 	int tmp;
199 	struct intrhand **p, *q;
200 
201 	if (type == IST_NONE)
202 		panic("intr_alloc: bogus type");
203 
204 	bestirq = -1;
205 	count = -1;
206 
207 	/* some interrupts should never be dynamically allocated */
208 	mask &= 0xdef8;
209 
210 	/*
211 	 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
212 	 * the right answer is to do "breadth-first" searching of devices.
213 	 */
214 	mask &= 0xefbf;
215 
216 	for (i = 0; i < ICU_LEN; i++) {
217 		if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
218 			continue;
219 
220 		switch(intrtype[i]) {
221 		case IST_NONE:
222 			/*
223 			 * if nothing's using the irq, just return it
224 			 */
225 			*irq = i;
226 			return (0);
227 
228 		case IST_EDGE:
229 		case IST_LEVEL:
230 			if (type != intrtype[i])
231 				continue;
232 			/*
233 			 * if the irq is shareable, count the number of other
234 			 * handlers, and if it's smaller than the last irq like
235 			 * this, remember it
236 			 *
237 			 * XXX We should probably also consider the
238 			 * interrupt level and stick IPL_TTY with other
239 			 * IPL_TTY, etc.
240 			 */
241 			for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
242 			     p = &q->ih_next, tmp++)
243 				;
244 			if ((bestirq == -1) || (count > tmp)) {
245 				bestirq = i;
246 				count = tmp;
247 			}
248 			break;
249 
250 		case IST_PULSE:
251 			/* this just isn't shareable */
252 			continue;
253 		}
254 	}
255 
256 	if (bestirq == -1)
257 		return (1);
258 
259 	*irq = bestirq;
260 
261 	return (0);
262 }
263 
264 /*
265  * Just check to see if an IRQ is available/can be shared.
266  * 0 = interrupt not available
267  * 1 = interrupt shareable
268  * 2 = interrupt all to ourself
269  */
270 int
271 isa_intr_check(isa_chipset_tag_t ic, int irq, int type)
272 {
273 	if (!LEGAL_IRQ(irq) || type == IST_NONE)
274 		return (0);
275 
276 	switch (intrtype[irq]) {
277 	case IST_NONE:
278 		return (2);
279 		break;
280 	case IST_LEVEL:
281 		if (type != intrtype[irq])
282 			return (0);
283 		return (1);
284 		break;
285 	case IST_EDGE:
286 	case IST_PULSE:
287 		if (type != IST_NONE)
288 			return (0);
289 	}
290 	return (1);
291 }
292 
293 /*
294  * Set up an interrupt handler to start being called.
295  * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
296  */
297 void *
298 isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level,
299     int (*ih_fun)(void *), void *ih_arg, char *ih_what)
300 {
301 	struct pic *pic = &i8259_pic;
302 	int pin = irq;
303 
304 #if NIOAPIC > 0
305 	struct mp_intr_map *mip;
306 
307  	if (mp_busses != NULL) {
308 		if (mp_isa_bus == NULL)
309 			panic("no isa bus");
310 
311 		for (mip = mp_isa_bus->mb_intrs; mip != NULL;
312 		    mip = mip->next) {
313  			if (mip->bus_pin == pin) {
314 				pin = APIC_IRQ_PIN(mip->ioapic_ih);
315 				pic = &mip->ioapic->sc_pic;
316  				break;
317  			}
318  		}
319  	}
320 #endif
321 
322 	KASSERT(pic);
323 
324 	return intr_establish(irq, pic, pin, type, level, ih_fun,
325 	    ih_arg, ih_what);
326 }
327 
328 /*
329  * Deregister an interrupt handler.
330  */
331 void
332 isa_intr_disestablish(isa_chipset_tag_t ic, void *arg)
333 {
334 	intr_disestablish(arg);
335 	return;
336 }
337 
338 void
339 isa_attach_hook(struct device *parent, struct device *self,
340     struct isabus_attach_args *iba)
341 {
342 	extern int isa_has_been_seen;
343 
344 	/*
345 	 * Notify others that might need to know that the ISA bus
346 	 * has now been attached.
347 	 */
348 	if (isa_has_been_seen)
349 		panic("isaattach: ISA bus already seen!");
350 	isa_has_been_seen = 1;
351 }
352 
353 #if NISADMA > 0
354 /**********************************************************************
355  * bus.h dma interface entry points
356  **********************************************************************/
357 
358 #ifdef ISA_DMA_STATS
359 #define	STAT_INCR(v)	(v)++
360 #define	STAT_DECR(v)	do { \
361 		if ((v) == 0) \
362 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
363 		else \
364 			(v)--; \
365 		} while (0)
366 u_long	isa_dma_stats_loads;
367 u_long	isa_dma_stats_bounces;
368 u_long	isa_dma_stats_nbouncebufs;
369 #else
370 #define	STAT_INCR(v)
371 #define	STAT_DECR(v)
372 #endif
373 
374 /*
375  * Create an ISA DMA map.
376  */
377 int
378 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
379     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
380 {
381 	struct isa_dma_cookie *cookie;
382 	bus_dmamap_t map;
383 	int error, cookieflags;
384 	void *cookiestore;
385 	size_t cookiesize;
386 
387 	/* Call common function to create the basic map. */
388 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
389 	    flags, dmamp);
390 	if (error)
391 		return (error);
392 
393 	map = *dmamp;
394 	map->_dm_cookie = NULL;
395 
396 	cookiesize = sizeof(struct isa_dma_cookie);
397 
398 	/*
399 	 * ISA only has 24-bits of address space.  This means
400 	 * we can't DMA to pages over 16M.  In order to DMA to
401 	 * arbitrary buffers, we use "bounce buffers" - pages
402 	 * in memory below the 16M boundary.  On DMA reads,
403 	 * DMA happens to the bounce buffers, and is copied into
404 	 * the caller's buffer.  On writes, data is copied into
405 	 * the bounce buffer, and the DMA happens from those
406 	 * pages.  To software using the DMA mapping interface,
407 	 * this looks simply like a data cache.
408 	 *
409 	 * If we have more than 16M of RAM in the system, we may
410 	 * need bounce buffers.  We check and remember that here.
411 	 *
412 	 * There are exceptions, however.  VLB devices can do
413 	 * 32-bit DMA, and indicate that here.
414 	 *
415 	 * ...or, there is an opposite case.  The most segments
416 	 * a transfer will require is (maxxfer / NBPG) + 1.  If
417 	 * the caller can't handle that many segments (e.g. the
418 	 * ISA DMA controller), we may have to bounce it as well.
419 	 */
420 	cookieflags = 0;
421 	if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD &&
422 	    (flags & ISABUS_DMA_32BIT) == 0) ||
423 	    ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
424 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
425 		cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
426 	}
427 
428 	/*
429 	 * Allocate our cookie.
430 	 */
431 	if ((cookiestore = malloc(cookiesize, M_DEVBUF,
432 	    (flags & BUS_DMA_NOWAIT) ?
433 	        (M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO))) == NULL) {
434 		error = ENOMEM;
435 		goto out;
436 	}
437 	cookie = (struct isa_dma_cookie *)cookiestore;
438 	cookie->id_flags = cookieflags;
439 	map->_dm_cookie = cookie;
440 
441 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
442 		/*
443 		 * Allocate the bounce pages now if the caller
444 		 * wishes us to do so.
445 		 */
446 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
447 			goto out;
448 
449 		error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
450 	}
451 
452  out:
453 	if (error) {
454 		if (map->_dm_cookie != NULL)
455 			free(map->_dm_cookie, M_DEVBUF, 0);
456 		_bus_dmamap_destroy(t, map);
457 	}
458 	return (error);
459 }
460 
461 /*
462  * Destroy an ISA DMA map.
463  */
464 void
465 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
466 {
467 	struct isa_dma_cookie *cookie = map->_dm_cookie;
468 
469 	/*
470 	 * Free any bounce pages this map might hold.
471 	 */
472 	if (cookie->id_flags & ID_HAS_BOUNCE)
473 		_isa_dma_free_bouncebuf(t, map);
474 
475 	free(cookie, M_DEVBUF, 0);
476 	_bus_dmamap_destroy(t, map);
477 }
478 
479 /*
480  * Load an ISA DMA map with a linear buffer.
481  */
482 int
483 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
484     bus_size_t buflen, struct proc *p, int flags)
485 {
486 	struct isa_dma_cookie *cookie = map->_dm_cookie;
487 	int error;
488 
489 	STAT_INCR(isa_dma_stats_loads);
490 
491 	/*
492 	 * Check to see if we might need to bounce the transfer.
493 	 */
494 	if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
495 		/*
496 		 * Check if all pages are below the bounce
497 		 * threshold.  If they are, don't bother bouncing.
498 		 */
499 		if (_isa_dma_check_buffer(buf, buflen,
500 		    map->_dm_segcnt, map->_dm_boundary, p) == 0)
501 			return (_bus_dmamap_load(t, map, buf, buflen,
502 			    p, flags));
503 
504 		STAT_INCR(isa_dma_stats_bounces);
505 
506 		/*
507 		 * Allocate bounce pages, if necessary.
508 		 */
509 		if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
510 			error = _isa_dma_alloc_bouncebuf(t, map, buflen,
511 			    flags);
512 			if (error)
513 				return (error);
514 		}
515 
516 		/*
517 		 * Cache a pointer to the caller's buffer and
518 		 * load the DMA map with the bounce buffer.
519 		 */
520 		cookie->id_origbuf = buf;
521 		cookie->id_origbuflen = buflen;
522 		error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
523 		    buflen, p, flags);
524 
525 		if (error) {
526 			/*
527 			 * Free the bounce pages, unless our resources
528 			 * are reserved for our exclusive use.
529 			 */
530 			if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
531 				_isa_dma_free_bouncebuf(t, map);
532 		}
533 
534 		/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
535 		cookie->id_flags |= ID_IS_BOUNCING;
536 	} else {
537 		/*
538 		 * Just use the generic load function.
539 		 */
540 		error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
541 	}
542 
543 	return (error);
544 }
545 
546 /*
547  * Like _isa_bus_dmamap_load(), but for mbufs.
548  */
549 int
550 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
551     int flags)
552 {
553 
554 	panic("_isa_bus_dmamap_load_mbuf: not implemented");
555 }
556 
557 /*
558  * Like _isa_bus_dmamap_load(), but for uios.
559  */
560 int
561 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
562     int flags)
563 {
564 
565 	panic("_isa_bus_dmamap_load_uio: not implemented");
566 }
567 
568 /*
569  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
570  * bus_dmamem_alloc().
571  */
572 int
573 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
574     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
575 {
576 
577 	panic("_isa_bus_dmamap_load_raw: not implemented");
578 }
579 
580 /*
581  * Unload an ISA DMA map.
582  */
583 void
584 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
585 {
586 	struct isa_dma_cookie *cookie = map->_dm_cookie;
587 
588 	/*
589 	 * If we have bounce pages, free them, unless they're
590 	 * reserved for our exclusive use.
591 	 */
592 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
593 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
594 		_isa_dma_free_bouncebuf(t, map);
595 
596 	cookie->id_flags &= ~ID_IS_BOUNCING;
597 
598 	/*
599 	 * Do the generic bits of the unload.
600 	 */
601 	_bus_dmamap_unload(t, map);
602 }
603 
604 /*
605  * Synchronize an ISA DMA map.
606  */
607 void
608 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
609     bus_size_t len, int op)
610 {
611 	struct isa_dma_cookie *cookie = map->_dm_cookie;
612 
613 #ifdef DEBUG
614 	if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
615 		if (offset >= map->dm_mapsize)
616 			panic("_isa_bus_dmamap_sync: bad offset");
617 		if (len == 0 || (offset + len) > map->dm_mapsize)
618 			panic("_isa_bus_dmamap_sync: bad length");
619 	}
620 #endif
621 #ifdef DIAGNOSTIC
622 	if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 &&
623 	    (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0)
624 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
625 #endif /* DIAGNOSTIC */
626 
627 	/* PREREAD and POSTWRITE are no-ops */
628 	if (op & BUS_DMASYNC_PREWRITE) {
629 		/*
630 		 * If we're bouncing this transfer, copy the
631 		 * caller's buffer to the bounce buffer.
632 		 */
633 		if (cookie->id_flags & ID_IS_BOUNCING)
634 			memcpy(cookie->id_bouncebuf + offset,
635 			    cookie->id_origbuf + offset, len);
636 	}
637 
638 	_bus_dmamap_sync(t, map, offset, len, op);
639 
640 	if (op & BUS_DMASYNC_POSTREAD) {
641 		/*
642 		 * If we're bouncing this transfer, copy the
643 		 * bounce buffer to the caller's buffer.
644 		 */
645 		if (cookie->id_flags & ID_IS_BOUNCING)
646 			memcpy(cookie->id_origbuf + offset,
647 			    cookie->id_bouncebuf + offset, len);
648 	}
649 }
650 
651 /*
652  * Allocate memory safe for ISA DMA.
653  */
654 int
655 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
656     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
657     int flags)
658 {
659 	int error;
660 
661 	/* Try in ISA addressable region first */
662 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
663 	    segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD);
664 	if (!error)
665 		return (error);
666 
667 	/* Otherwise try anywhere (we'll bounce later) */
668 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
669 	    segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1);
670 	return (error);
671 }
672 
673 /**********************************************************************
674  * ISA DMA utility functions
675  **********************************************************************/
676 
677 /*
678  * Return 0 if all pages in the passed buffer lie within the DMA'able
679  * range RAM.
680  */
681 int
682 _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt,
683     bus_size_t boundary, struct proc *p)
684 {
685 	vaddr_t vaddr = (vaddr_t)buf;
686 	vaddr_t endva;
687 	paddr_t pa, lastpa;
688 	u_long pagemask = ~(boundary - 1);
689 	pmap_t pmap;
690 	int nsegs;
691 
692 	endva = round_page(vaddr + buflen);
693 
694 	nsegs = 1;
695 	lastpa = 0;
696 
697 	if (p != NULL)
698 		pmap = p->p_vmspace->vm_map.pmap;
699 	else
700 		pmap = pmap_kernel();
701 
702 	for (; vaddr < endva; vaddr += NBPG) {
703 		/*
704 		 * Get physical address for this segment.
705 		 */
706 		pmap_extract(pmap, (vaddr_t)vaddr, &pa);
707 		pa = trunc_page(pa);
708 
709 		/*
710 		 * Is it below the DMA'able threshold?
711 		 */
712 		if (pa > ISA_DMA_BOUNCE_THRESHOLD)
713 			return (EINVAL);
714 
715 		if (lastpa) {
716 			/*
717 			 * Check excessive segment count.
718 			 */
719 			if (lastpa + NBPG != pa) {
720 				if (++nsegs > segcnt)
721 					return (EFBIG);
722 			}
723 
724 			/*
725 			 * Check boundary restriction.
726 			 */
727 			if (boundary) {
728 				if ((lastpa ^ pa) & pagemask)
729 					return (EINVAL);
730 			}
731 		}
732 		lastpa = pa;
733 	}
734 
735 	return (0);
736 }
737 
738 int
739 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size,
740     int flags)
741 {
742 	struct isa_dma_cookie *cookie = map->_dm_cookie;
743 	int error = 0;
744 
745 	cookie->id_bouncebuflen = round_page(size);
746 	error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen,
747 	    NBPG, map->_dm_boundary, cookie->id_bouncesegs,
748 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags,
749 	    0, ISA_DMA_BOUNCE_THRESHOLD);
750 	if (error)
751 		goto out;
752 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
753 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
754 	    (caddr_t *)&cookie->id_bouncebuf, flags);
755 
756  out:
757 	if (error) {
758 		_bus_dmamem_free(t, cookie->id_bouncesegs,
759 		    cookie->id_nbouncesegs);
760 		cookie->id_bouncebuflen = 0;
761 		cookie->id_nbouncesegs = 0;
762 	} else {
763 		cookie->id_flags |= ID_HAS_BOUNCE;
764 		STAT_INCR(isa_dma_stats_nbouncebufs);
765 	}
766 
767 	return (error);
768 }
769 
770 void
771 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
772 {
773 	struct isa_dma_cookie *cookie = map->_dm_cookie;
774 
775 	STAT_DECR(isa_dma_stats_nbouncebufs);
776 
777 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
778 	    cookie->id_bouncebuflen);
779 	_bus_dmamem_free(t, cookie->id_bouncesegs,
780 	    cookie->id_nbouncesegs);
781 	cookie->id_bouncebuflen = 0;
782 	cookie->id_nbouncesegs = 0;
783 	cookie->id_flags &= ~ID_HAS_BOUNCE;
784 }
785 #endif /* NISADMA > 0 */
786