xref: /openbsd/sys/arch/i386/isa/isa_machdep.c (revision cca36db2)
1 /*	$OpenBSD: isa_machdep.c,v 1.72 2011/04/16 00:40:58 deraadt Exp $	*/
2 /*	$NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*-
35  * Copyright (c) 1993, 1994, 1996, 1997
36  *	Charles M. Hannum.  All rights reserved.
37  * Copyright (c) 1991 The Regents of the University of California.
38  * All rights reserved.
39  *
40  * This code is derived from software contributed to Berkeley by
41  * William Jolitz.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)isa.c	7.2 (Berkeley) 5/13/91
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/syslog.h>
73 #include <sys/device.h>
74 #include <sys/malloc.h>
75 #include <sys/proc.h>
76 
77 #include <uvm/uvm_extern.h>
78 
79 #include "ioapic.h"
80 
81 #if NIOAPIC > 0
82 #include <machine/i82093var.h>
83 #include <machine/mpbiosvar.h>
84 #endif
85 
86 #include <machine/bus.h>
87 
88 #include <machine/intr.h>
89 #include <machine/pio.h>
90 #include <machine/cpufunc.h>
91 #include <machine/i8259.h>
92 
93 #include <dev/isa/isareg.h>
94 #include <dev/isa/isavar.h>
95 #include <dev/isa/isadmavar.h>
96 #include <i386/isa/isa_machdep.h>
97 
98 #include "isadma.h"
99 
100 extern	paddr_t avail_end;
101 
102 #define	IDTVEC(name)	__CONCAT(X,name)
103 /* default interrupt vector table entries */
104 typedef int (*vector)(void);
105 extern vector IDTVEC(intr)[];
106 void isa_strayintr(int);
107 void intr_calculatemasks(void);
108 int fakeintr(void *);
109 
110 #if NISADMA > 0
111 int	_isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
112 	    bus_size_t, bus_size_t, int, bus_dmamap_t *);
113 void	_isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
114 int	_isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
115 	    bus_size_t, struct proc *, int);
116 int	_isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
117 	    struct mbuf *, int);
118 int	_isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
119 	    struct uio *, int);
120 int	_isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
121 	    bus_dma_segment_t *, int, bus_size_t, int);
122 void	_isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
123 void	_isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
124 	    bus_addr_t, bus_size_t, int);
125 
126 int	_isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
127 	    bus_size_t, bus_dma_segment_t *, int, int *, int);
128 
129 int	_isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t,
130 	    struct proc *);
131 int	_isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
132 	    bus_size_t, int);
133 void	_isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
134 
135 /*
136  * Entry points for ISA DMA.  These are mostly wrappers around
137  * the generic functions that understand how to deal with bounce
138  * buffers, if necessary.
139  */
140 struct bus_dma_tag isa_bus_dma_tag = {
141 	NULL,			/* _cookie */
142 	_isa_bus_dmamap_create,
143 	_isa_bus_dmamap_destroy,
144 	_isa_bus_dmamap_load,
145 	_isa_bus_dmamap_load_mbuf,
146 	_isa_bus_dmamap_load_uio,
147 	_isa_bus_dmamap_load_raw,
148 	_isa_bus_dmamap_unload,
149 	_isa_bus_dmamap_sync,
150 	_isa_bus_dmamem_alloc,
151 	_bus_dmamem_free,
152 	_bus_dmamem_map,
153 	_bus_dmamem_unmap,
154 	_bus_dmamem_mmap,
155 };
156 #endif /* NISADMA > 0 */
157 
158 /*
159  * Fill in default interrupt table (in case of spurious interrupt
160  * during configuration of kernel, setup interrupt control unit
161  */
162 void
163 isa_defaultirq(void)
164 {
165 	int i;
166 
167 	/* icu vectors */
168 	for (i = 0; i < ICU_LEN; i++)
169 		setgate(&idt[ICU_OFFSET + i], IDTVEC(intr)[i], 0,
170 		    SDT_SYS386IGT, SEL_KPL, GICODE_SEL);
171 
172 	/* initialize 8259's */
173 	outb(IO_ICU1, 0x11);		/* reset; program device, four bytes */
174 	outb(IO_ICU1+1, ICU_OFFSET);	/* starting at this vector index */
175 	outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */
176 #ifdef AUTO_EOI_1
177 	outb(IO_ICU1+1, 2 | 1);		/* auto EOI, 8086 mode */
178 #else
179 	outb(IO_ICU1+1, 1);		/* 8086 mode */
180 #endif
181 	outb(IO_ICU1+1, 0xff);		/* leave interrupts masked */
182 	outb(IO_ICU1, 0x68);		/* special mask mode (if available) */
183 	outb(IO_ICU1, 0x0a);		/* Read IRR by default. */
184 #ifdef REORDER_IRQ
185 	outb(IO_ICU1, 0xc0 | (3 - 1));	/* pri order 3-7, 0-2 (com2 first) */
186 #endif
187 
188 	outb(IO_ICU2, 0x11);		/* reset; program device, four bytes */
189 	outb(IO_ICU2+1, ICU_OFFSET+8);	/* staring at this vector index */
190 	outb(IO_ICU2+1, IRQ_SLAVE);
191 #ifdef AUTO_EOI_2
192 	outb(IO_ICU2+1, 2 | 1);		/* auto EOI, 8086 mode */
193 #else
194 	outb(IO_ICU2+1, 1);		/* 8086 mode */
195 #endif
196 	outb(IO_ICU2+1, 0xff);		/* leave interrupts masked */
197 	outb(IO_ICU2, 0x68);		/* special mask mode (if available) */
198 	outb(IO_ICU2, 0x0a);		/* Read IRR by default. */
199 }
200 
201 void
202 isa_nodefaultirq(void)
203 {
204 	int i;
205 
206 	/* icu vectors */
207 	for (i = 0; i < ICU_LEN; i++)
208 		unsetgate(&idt[ICU_OFFSET + i]);
209 }
210 
211 /*
212  * Handle a NMI, possibly a machine check.
213  * return true to panic system, false to ignore.
214  */
215 int
216 isa_nmi(void)
217 {
218 	/* This is historic garbage; these ports are not readable */
219 	log(LOG_CRIT, "No-maskable interrupt, may be parity error\n");
220 	return(0);
221 }
222 
223 u_long  intrstray[ICU_LEN];
224 
225 /*
226  * Caught a stray interrupt, notify
227  */
228 void
229 isa_strayintr(int irq)
230 {
231         /*
232          * Stray interrupts on irq 7 occur when an interrupt line is raised
233          * and then lowered before the CPU acknowledges it.  This generally
234          * means either the device is screwed or something is cli'ing too
235          * long and it's timing out.
236          */
237 	if (++intrstray[irq] <= 5)
238 		log(LOG_ERR, "stray interrupt %d%s\n", irq,
239 		    intrstray[irq] >= 5 ? "; stopped logging" : "");
240 }
241 
242 int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN];
243 int iminlevel[ICU_LEN], imaxlevel[ICU_LEN];
244 struct intrhand *intrhand[ICU_LEN];
245 
246 int imask[NIPL];	/* Bitmask telling what interrupts are blocked. */
247 int iunmask[NIPL];	/* Bitmask telling what interrupts are accepted. */
248 
249 /*
250  * Recalculate the interrupt masks from scratch.
251  * We could code special registry and deregistry versions of this function that
252  * would be faster, but the code would be nastier, and we don't expect this to
253  * happen very much anyway.
254  */
255 void
256 intr_calculatemasks(void)
257 {
258 	int irq, level, unusedirqs;
259 	struct intrhand *q;
260 
261 	/* First, figure out which levels each IRQ uses. */
262 	unusedirqs = 0xffff;
263 	for (irq = 0; irq < ICU_LEN; irq++) {
264 		int levels = 0;
265 		for (q = intrhand[irq]; q; q = q->ih_next)
266 			levels |= 1 << IPL(q->ih_level);
267 		intrlevel[irq] = levels;
268 		if (levels)
269 			unusedirqs &= ~(1 << irq);
270 	}
271 
272 	/* Then figure out which IRQs use each level. */
273 	for (level = 0; level < NIPL; level++) {
274 		int irqs = 0;
275 		for (irq = 0; irq < ICU_LEN; irq++)
276 			if (intrlevel[irq] & (1 << level))
277 				irqs |= 1 << irq;
278 		imask[level] = irqs | unusedirqs;
279 	}
280 
281 	/*
282 	 * Initialize soft interrupt masks to block themselves.
283 	 */
284 	IMASK(IPL_SOFTCLOCK) |= 1 << SIR_CLOCK;
285 	IMASK(IPL_SOFTNET) |= 1 << SIR_NET;
286 	IMASK(IPL_SOFTTTY) |= 1 << SIR_TTY;
287 
288 	/*
289 	 * Enforce a hierarchy that gives slow devices a better chance at not
290 	 * dropping data.
291 	 */
292 	for (level = 0; level < NIPL - 1; level++)
293 		imask[level + 1] |= imask[level];
294 
295 	/* And eventually calculate the complete masks. */
296 	for (irq = 0; irq < ICU_LEN; irq++) {
297 		int irqs = 1 << irq;
298 		int minlevel = IPL_NONE;
299 		int maxlevel = IPL_NONE;
300 
301 		if (intrhand[irq] == NULL) {
302 			maxlevel = IPL_HIGH;
303 			irqs = IMASK(IPL_HIGH);
304 		} else {
305 			for (q = intrhand[irq]; q; q = q->ih_next) {
306 				irqs |= IMASK(q->ih_level);
307 				if (minlevel == IPL_NONE ||
308 				    q->ih_level < minlevel)
309 					minlevel = q->ih_level;
310 				if (q->ih_level > maxlevel)
311 					maxlevel = q->ih_level;
312 			}
313 		}
314 		if (irqs != IMASK(maxlevel))
315 			panic("irq %d level %x mask mismatch: %x vs %x", irq,
316 			    maxlevel, irqs, IMASK(maxlevel));
317 
318 		intrmask[irq] = irqs;
319 		iminlevel[irq] = minlevel;
320 		imaxlevel[irq] = maxlevel;
321 
322 #if 0
323 		printf("irq %d: level %x, mask 0x%x (%x)\n", irq,
324 		    imaxlevel[irq], intrmask[irq], IMASK(imaxlevel[irq]));
325 #endif
326 	}
327 
328 	/* Lastly, determine which IRQs are actually in use. */
329 	{
330 		int irqs = 0;
331 		for (irq = 0; irq < ICU_LEN; irq++)
332 			if (intrhand[irq])
333 				irqs |= 1 << irq;
334 		if (irqs >= 0x100) /* any IRQs >= 8 in use */
335 			irqs |= 1 << IRQ_SLAVE;
336 		imen = ~irqs;
337 		SET_ICUS();
338 	}
339 
340 	/* For speed of splx, provide the inverse of the interrupt masks. */
341 	for (irq = 0; irq < ICU_LEN; irq++)
342 		iunmask[irq] = ~imask[irq];
343 }
344 
345 int
346 fakeintr(arg)
347 	void *arg;
348 {
349 	return 0;
350 }
351 
352 #define	LEGAL_IRQ(x)	((x) >= 0 && (x) < ICU_LEN && (x) != 2)
353 
354 int
355 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
356 {
357 	int i, bestirq, count;
358 	int tmp;
359 	struct intrhand **p, *q;
360 
361 	if (type == IST_NONE)
362 		panic("intr_alloc: bogus type");
363 
364 	bestirq = -1;
365 	count = -1;
366 
367 	/* some interrupts should never be dynamically allocated */
368 	mask &= 0xdef8;
369 
370 	/*
371 	 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
372 	 * the right answer is to do "breadth-first" searching of devices.
373 	 */
374 	mask &= 0xefbf;
375 
376 	for (i = 0; i < ICU_LEN; i++) {
377 		if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
378 			continue;
379 
380 		switch(intrtype[i]) {
381 		case IST_NONE:
382 			/*
383 			 * if nothing's using the irq, just return it
384 			 */
385 			*irq = i;
386 			return (0);
387 
388 		case IST_EDGE:
389 		case IST_LEVEL:
390 			if (type != intrtype[i])
391 				continue;
392 			/*
393 			 * if the irq is shareable, count the number of other
394 			 * handlers, and if it's smaller than the last irq like
395 			 * this, remember it
396 			 *
397 			 * XXX We should probably also consider the
398 			 * interrupt level and stick IPL_TTY with other
399 			 * IPL_TTY, etc.
400 			 */
401 			for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
402 			     p = &q->ih_next, tmp++)
403 				;
404 			if ((bestirq == -1) || (count > tmp)) {
405 				bestirq = i;
406 				count = tmp;
407 			}
408 			break;
409 
410 		case IST_PULSE:
411 			/* this just isn't shareable */
412 			continue;
413 		}
414 	}
415 
416 	if (bestirq == -1)
417 		return (1);
418 
419 	*irq = bestirq;
420 
421 	return (0);
422 }
423 
424 /*
425  * Just check to see if an IRQ is available/can be shared.
426  * 0 = interrupt not available
427  * 1 = interrupt shareable
428  * 2 = interrupt all to ourself
429  */
430 int
431 isa_intr_check(isa_chipset_tag_t ic, int irq, int type)
432 {
433 	if (!LEGAL_IRQ(irq) || type == IST_NONE)
434 		return (0);
435 
436 	switch (intrtype[irq]) {
437 	case IST_NONE:
438 		return (2);
439 		break;
440 	case IST_LEVEL:
441 		if (type != intrtype[irq])
442 			return (0);
443 		return (1);
444 		break;
445 	case IST_EDGE:
446 	case IST_PULSE:
447 		if (type != IST_NONE)
448 			return (0);
449 	}
450 	return (1);
451 }
452 
453 /*
454  * Set up an interrupt handler to start being called.
455  * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
456  */
457 void *
458 isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level,
459     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
460 {
461 	struct intrhand **p, *q, *ih;
462 	static struct intrhand fakehand = {fakeintr};
463 
464 #if NIOAPIC > 0
465 	struct mp_intr_map *mip;
466 
467  	if (mp_busses != NULL) {
468  		int mpspec_pin = irq;
469  		int airq;
470 
471 		if (mp_isa_bus == NULL)
472 			panic("no isa bus");
473 
474  		for (mip = mp_isa_bus->mb_intrs; mip != NULL;
475  		    mip = mip->next) {
476  			if (mip->bus_pin == mpspec_pin) {
477  				airq = mip->ioapic_ih | irq;
478  				break;
479  			}
480  		}
481 		if (mip == NULL && mp_eisa_bus) {
482 			for (mip = mp_eisa_bus->mb_intrs; mip != NULL;
483 			    mip = mip->next) {
484 				if (mip->bus_pin == mpspec_pin) {
485 					airq = mip->ioapic_ih | irq;
486 					break;
487 				}
488 			}
489 		}
490 
491 		/* no MP mapping found -- invent! */
492  		if (mip == NULL)
493 			airq = mpbios_invent(irq, type, mp_isa_bus->mb_idx);
494 
495 		return (apic_intr_establish(airq, type, level, ih_fun,
496 		    ih_arg, ih_what));
497  	}
498 #endif
499 	/* no point in sleeping unless someone can free memory. */
500 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
501 	if (ih == NULL) {
502 		printf("%s: isa_intr_establish: can't malloc handler info\n",
503 		    ih_what);
504 		return (NULL);
505 	}
506 
507 	if (!LEGAL_IRQ(irq) || type == IST_NONE) {
508 		printf("%s: isa_intr_establish: bogus irq or type\n", ih_what);
509 		free(ih, M_DEVBUF);
510 		return (NULL);
511 	}
512 	switch (intrtype[irq]) {
513 	case IST_NONE:
514 		intrtype[irq] = type;
515 		break;
516 	case IST_EDGE:
517 		intr_shared_edge = 1;
518 		/* FALLTHROUGH */
519 	case IST_LEVEL:
520 		if (type == intrtype[irq])
521 			break;
522 	case IST_PULSE:
523 		if (type != IST_NONE) {
524 			/*printf("%s: intr_establish: can't share %s with %s, irq %d\n",
525 			    ih_what, isa_intr_typename(intrtype[irq]),
526 			    isa_intr_typename(type), irq);*/
527 			free(ih, M_DEVBUF);
528 			return (NULL);
529 		}
530 		break;
531 	}
532 
533 	/*
534 	 * Figure out where to put the handler.
535 	 * This is O(N^2), but we want to preserve the order, and N is
536 	 * generally small.
537 	 */
538 	for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
539 		;
540 
541 	/*
542 	 * Actually install a fake handler momentarily, since we might be doing
543 	 * this with interrupts enabled and don't want the real routine called
544 	 * until masking is set up.
545 	 */
546 	fakehand.ih_level = level;
547 	*p = &fakehand;
548 
549 	intr_calculatemasks();
550 
551 	/*
552 	 * Poke the real handler in now.
553 	 */
554 	ih->ih_fun = ih_fun;
555 	ih->ih_arg = ih_arg;
556 	ih->ih_next = NULL;
557 	ih->ih_level = level;
558 	ih->ih_irq = irq;
559 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
560 	*p = ih;
561 
562 	return (ih);
563 }
564 
565 /*
566  * Deregister an interrupt handler.
567  */
568 void
569 isa_intr_disestablish(isa_chipset_tag_t ic, void *arg)
570 {
571 	struct intrhand *ih = arg;
572 	int irq = ih->ih_irq;
573 	struct intrhand **p, *q;
574 
575 #if NIOAPIC > 0
576 	if (irq & APIC_INT_VIA_APIC) {
577 		apic_intr_disestablish(arg);
578 		return;
579 	}
580 #endif
581 
582 	if (!LEGAL_IRQ(irq))
583 		panic("intr_disestablish: bogus irq %d", irq);
584 
585 	/*
586 	 * Remove the handler from the chain.
587 	 * This is O(n^2), too.
588 	 */
589 	for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next)
590 		;
591 	if (q)
592 		*p = q->ih_next;
593 	else
594 		panic("intr_disestablish: handler not registered");
595 	evcount_detach(&ih->ih_count);
596 	free(ih, M_DEVBUF);
597 
598 	intr_calculatemasks();
599 
600 	if (intrhand[irq] == NULL)
601 		intrtype[irq] = IST_NONE;
602 }
603 
604 void
605 isa_attach_hook(struct device *parent, struct device *self,
606     struct isabus_attach_args *iba)
607 {
608 	extern int isa_has_been_seen;
609 
610 	/*
611 	 * Notify others that might need to know that the ISA bus
612 	 * has now been attached.
613 	 */
614 	if (isa_has_been_seen)
615 		panic("isaattach: ISA bus already seen!");
616 	isa_has_been_seen = 1;
617 }
618 
619 #if NISADMA > 0
620 /**********************************************************************
621  * bus.h dma interface entry points
622  **********************************************************************/
623 
624 #ifdef ISA_DMA_STATS
625 #define	STAT_INCR(v)	(v)++
626 #define	STAT_DECR(v)	do { \
627 		if ((v) == 0) \
628 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
629 		else \
630 			(v)--; \
631 		} while (0)
632 u_long	isa_dma_stats_loads;
633 u_long	isa_dma_stats_bounces;
634 u_long	isa_dma_stats_nbouncebufs;
635 #else
636 #define	STAT_INCR(v)
637 #define	STAT_DECR(v)
638 #endif
639 
640 /*
641  * Create an ISA DMA map.
642  */
643 int
644 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
645     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
646 {
647 	struct isa_dma_cookie *cookie;
648 	bus_dmamap_t map;
649 	int error, cookieflags;
650 	void *cookiestore;
651 	size_t cookiesize;
652 
653 	/* Call common function to create the basic map. */
654 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
655 	    flags, dmamp);
656 	if (error)
657 		return (error);
658 
659 	map = *dmamp;
660 	map->_dm_cookie = NULL;
661 
662 	cookiesize = sizeof(struct isa_dma_cookie);
663 
664 	/*
665 	 * ISA only has 24-bits of address space.  This means
666 	 * we can't DMA to pages over 16M.  In order to DMA to
667 	 * arbitrary buffers, we use "bounce buffers" - pages
668 	 * in memory below the 16M boundary.  On DMA reads,
669 	 * DMA happens to the bounce buffers, and is copied into
670 	 * the caller's buffer.  On writes, data is copied into
671 	 * the bounce buffer, and the DMA happens from those
672 	 * pages.  To software using the DMA mapping interface,
673 	 * this looks simply like a data cache.
674 	 *
675 	 * If we have more than 16M of RAM in the system, we may
676 	 * need bounce buffers.  We check and remember that here.
677 	 *
678 	 * There are exceptions, however.  VLB devices can do
679 	 * 32-bit DMA, and indicate that here.
680 	 *
681 	 * ...or, there is an opposite case.  The most segments
682 	 * a transfer will require is (maxxfer / NBPG) + 1.  If
683 	 * the caller can't handle that many segments (e.g. the
684 	 * ISA DMA controller), we may have to bounce it as well.
685 	 */
686 	cookieflags = 0;
687 	if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD &&
688 	    (flags & ISABUS_DMA_32BIT) == 0) ||
689 	    ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
690 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
691 		cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
692 	}
693 
694 	/*
695 	 * Allocate our cookie.
696 	 */
697 	if ((cookiestore = malloc(cookiesize, M_DEVBUF,
698 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)|M_ZERO)) == NULL) {
699 		error = ENOMEM;
700 		goto out;
701 	}
702 	cookie = (struct isa_dma_cookie *)cookiestore;
703 	cookie->id_flags = cookieflags;
704 	map->_dm_cookie = cookie;
705 
706 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
707 		/*
708 		 * Allocate the bounce pages now if the caller
709 		 * wishes us to do so.
710 		 */
711 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
712 			goto out;
713 
714 		error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
715 	}
716 
717  out:
718 	if (error) {
719 		if (map->_dm_cookie != NULL)
720 			free(map->_dm_cookie, M_DEVBUF);
721 		_bus_dmamap_destroy(t, map);
722 	}
723 	return (error);
724 }
725 
726 /*
727  * Destroy an ISA DMA map.
728  */
729 void
730 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
731 {
732 	struct isa_dma_cookie *cookie = map->_dm_cookie;
733 
734 	/*
735 	 * Free any bounce pages this map might hold.
736 	 */
737 	if (cookie->id_flags & ID_HAS_BOUNCE)
738 		_isa_dma_free_bouncebuf(t, map);
739 
740 	free(cookie, M_DEVBUF);
741 	_bus_dmamap_destroy(t, map);
742 }
743 
744 /*
745  * Load an ISA DMA map with a linear buffer.
746  */
747 int
748 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
749     bus_size_t buflen, struct proc *p, int flags)
750 {
751 	struct isa_dma_cookie *cookie = map->_dm_cookie;
752 	int error;
753 
754 	STAT_INCR(isa_dma_stats_loads);
755 
756 	/*
757 	 * Check to see if we might need to bounce the transfer.
758 	 */
759 	if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
760 		/*
761 		 * Check if all pages are below the bounce
762 		 * threshold.  If they are, don't bother bouncing.
763 		 */
764 		if (_isa_dma_check_buffer(buf, buflen,
765 		    map->_dm_segcnt, map->_dm_boundary, p) == 0)
766 			return (_bus_dmamap_load(t, map, buf, buflen,
767 			    p, flags));
768 
769 		STAT_INCR(isa_dma_stats_bounces);
770 
771 		/*
772 		 * Allocate bounce pages, if necessary.
773 		 */
774 		if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
775 			error = _isa_dma_alloc_bouncebuf(t, map, buflen,
776 			    flags);
777 			if (error)
778 				return (error);
779 		}
780 
781 		/*
782 		 * Cache a pointer to the caller's buffer and
783 		 * load the DMA map with the bounce buffer.
784 		 */
785 		cookie->id_origbuf = buf;
786 		cookie->id_origbuflen = buflen;
787 		error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
788 		    buflen, p, flags);
789 
790 		if (error) {
791 			/*
792 			 * Free the bounce pages, unless our resources
793 			 * are reserved for our exclusive use.
794 			 */
795 			if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
796 				_isa_dma_free_bouncebuf(t, map);
797 		}
798 
799 		/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
800 		cookie->id_flags |= ID_IS_BOUNCING;
801 	} else {
802 		/*
803 		 * Just use the generic load function.
804 		 */
805 		error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
806 	}
807 
808 	return (error);
809 }
810 
811 /*
812  * Like _isa_bus_dmamap_load(), but for mbufs.
813  */
814 int
815 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
816     int flags)
817 {
818 
819 	panic("_isa_bus_dmamap_load_mbuf: not implemented");
820 }
821 
822 /*
823  * Like _isa_bus_dmamap_load(), but for uios.
824  */
825 int
826 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
827     int flags)
828 {
829 
830 	panic("_isa_bus_dmamap_load_uio: not implemented");
831 }
832 
833 /*
834  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
835  * bus_dmamem_alloc().
836  */
837 int
838 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
839     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
840 {
841 
842 	panic("_isa_bus_dmamap_load_raw: not implemented");
843 }
844 
845 /*
846  * Unload an ISA DMA map.
847  */
848 void
849 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
850 {
851 	struct isa_dma_cookie *cookie = map->_dm_cookie;
852 
853 	/*
854 	 * If we have bounce pages, free them, unless they're
855 	 * reserved for our exclusive use.
856 	 */
857 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
858 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
859 		_isa_dma_free_bouncebuf(t, map);
860 
861 	cookie->id_flags &= ~ID_IS_BOUNCING;
862 
863 	/*
864 	 * Do the generic bits of the unload.
865 	 */
866 	_bus_dmamap_unload(t, map);
867 }
868 
869 /*
870  * Synchronize an ISA DMA map.
871  */
872 void
873 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
874     bus_size_t len, int op)
875 {
876 	struct isa_dma_cookie *cookie = map->_dm_cookie;
877 
878 #ifdef DEBUG
879 	if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
880 		if (offset >= map->dm_mapsize)
881 			panic("_isa_bus_dmamap_sync: bad offset");
882 		if (len == 0 || (offset + len) > map->dm_mapsize)
883 			panic("_isa_bus_dmamap_sync: bad length");
884 	}
885 #endif
886 #ifdef DIAGNOSTIC
887 	if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 &&
888 	    (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0)
889 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
890 #endif /* DIAGNOSTIC */
891 
892 	/* PREREAD and POSTWRITE are no-ops */
893 	if (op & BUS_DMASYNC_PREWRITE) {
894 		/*
895 		 * If we're bouncing this transfer, copy the
896 		 * caller's buffer to the bounce buffer.
897 		 */
898 		if (cookie->id_flags & ID_IS_BOUNCING)
899 			bcopy((char *)cookie->id_origbuf + offset,
900 			    cookie->id_bouncebuf + offset,
901 			    len);
902 	} else if (op & BUS_DMASYNC_POSTREAD) {
903 		/*
904 		 * If we're bouncing this transfer, copy the
905 		 * bounce buffer to the caller's buffer.
906 		 */
907 		if (cookie->id_flags & ID_IS_BOUNCING)
908 			bcopy((char *)cookie->id_bouncebuf + offset,
909 			    cookie->id_origbuf + offset,
910 			    len);
911 	}
912 
913 #if 0
914 	/* This is a noop anyhow, so why bother calling it? */
915 	_bus_dmamap_sync(t, map, op);
916 #endif
917 }
918 
919 /*
920  * Allocate memory safe for ISA DMA.
921  */
922 int
923 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
924     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
925     int flags)
926 {
927 	int error;
928 
929 	/* Try in ISA addressable region first */
930 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
931 	    segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD);
932 	if (!error)
933 		return (error);
934 
935 	/* Otherwise try anywhere (we'll bounce later) */
936 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
937 	    segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)-1);
938 	return (error);
939 }
940 
941 
942 /**********************************************************************
943  * ISA DMA utility functions
944  **********************************************************************/
945 
946 /*
947  * Return 0 if all pages in the passed buffer lie within the DMA'able
948  * range RAM.
949  */
950 int
951 _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt,
952     bus_size_t boundary, struct proc *p)
953 {
954 	vaddr_t vaddr = (vaddr_t)buf;
955 	vaddr_t endva;
956 	paddr_t pa, lastpa;
957 	u_long pagemask = ~(boundary - 1);
958 	pmap_t pmap;
959 	int nsegs;
960 
961 	endva = round_page(vaddr + buflen);
962 
963 	nsegs = 1;
964 	lastpa = 0;
965 
966 	if (p != NULL)
967 		pmap = p->p_vmspace->vm_map.pmap;
968 	else
969 		pmap = pmap_kernel();
970 
971 	for (; vaddr < endva; vaddr += NBPG) {
972 		/*
973 		 * Get physical address for this segment.
974 		 */
975 		pmap_extract(pmap, (vaddr_t)vaddr, &pa);
976 		pa = trunc_page(pa);
977 
978 		/*
979 		 * Is it below the DMA'able threshold?
980 		 */
981 		if (pa > ISA_DMA_BOUNCE_THRESHOLD)
982 			return (EINVAL);
983 
984 		if (lastpa) {
985 			/*
986 			 * Check excessive segment count.
987 			 */
988 			if (lastpa + NBPG != pa) {
989 				if (++nsegs > segcnt)
990 					return (EFBIG);
991 			}
992 
993 			/*
994 			 * Check boundary restriction.
995 			 */
996 			if (boundary) {
997 				if ((lastpa ^ pa) & pagemask)
998 					return (EINVAL);
999 			}
1000 		}
1001 		lastpa = pa;
1002 	}
1003 
1004 	return (0);
1005 }
1006 
1007 int
1008 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags)
1009 {
1010 	struct isa_dma_cookie *cookie = map->_dm_cookie;
1011 	int error = 0;
1012 
1013 	cookie->id_bouncebuflen = round_page(size);
1014 	error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen,
1015 	    NBPG, map->_dm_boundary, cookie->id_bouncesegs,
1016 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags,
1017 	    0, ISA_DMA_BOUNCE_THRESHOLD);
1018 	if (error)
1019 		goto out;
1020 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1021 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1022 	    (caddr_t *)&cookie->id_bouncebuf, flags);
1023 
1024  out:
1025 	if (error) {
1026 		_bus_dmamem_free(t, cookie->id_bouncesegs,
1027 		    cookie->id_nbouncesegs);
1028 		cookie->id_bouncebuflen = 0;
1029 		cookie->id_nbouncesegs = 0;
1030 	} else {
1031 		cookie->id_flags |= ID_HAS_BOUNCE;
1032 		STAT_INCR(isa_dma_stats_nbouncebufs);
1033 	}
1034 
1035 	return (error);
1036 }
1037 
1038 void
1039 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1040 {
1041 	struct isa_dma_cookie *cookie = map->_dm_cookie;
1042 
1043 	STAT_DECR(isa_dma_stats_nbouncebufs);
1044 
1045 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
1046 	    cookie->id_bouncebuflen);
1047 	_bus_dmamem_free(t, cookie->id_bouncesegs,
1048 	    cookie->id_nbouncesegs);
1049 	cookie->id_bouncebuflen = 0;
1050 	cookie->id_nbouncesegs = 0;
1051 	cookie->id_flags &= ~ID_HAS_BOUNCE;
1052 }
1053 #endif /* NISADMA > 0 */
1054