xref: /openbsd/sys/arch/i386/isa/isa_machdep.c (revision 0bca52fc)
1 /*	$OpenBSD: isa_machdep.c,v 1.84 2021/03/06 09:20:50 jsg Exp $	*/
2 /*	$NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*-
35  * Copyright (c) 1993, 1994, 1996, 1997
36  *	Charles M. Hannum.  All rights reserved.
37  * Copyright (c) 1991 The Regents of the University of California.
38  * All rights reserved.
39  *
40  * This code is derived from software contributed to Berkeley by
41  * William Jolitz.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)isa.c	7.2 (Berkeley) 5/13/91
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/syslog.h>
73 #include <sys/device.h>
74 #include <sys/malloc.h>
75 #include <sys/proc.h>
76 
77 #include <uvm/uvm_extern.h>
78 
79 #include "ioapic.h"
80 
81 #if NIOAPIC > 0
82 #include <machine/i82093var.h>
83 #include <machine/mpbiosvar.h>
84 #endif
85 
86 #include <machine/bus.h>
87 
88 #include <machine/intr.h>
89 #include <machine/pio.h>
90 #include <machine/cpufunc.h>
91 #include <machine/i8259.h>
92 
93 #include <dev/isa/isareg.h>
94 #include <dev/isa/isavar.h>
95 #include <dev/isa/isadmavar.h>
96 #include <i386/isa/isa_machdep.h>
97 
98 #include "isadma.h"
99 
100 extern	paddr_t avail_end;
101 
102 #define	IDTVEC(name)	__CONCAT(X,name)
103 /* default interrupt vector table entries */
104 typedef int (*vector)(void);
105 extern vector IDTVEC(intr)[];
106 void isa_strayintr(int);
107 void intr_calculatemasks(void);
108 int fakeintr(void *);
109 
110 #if NISADMA > 0
111 int	_isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
112 	    bus_size_t, bus_size_t, int, bus_dmamap_t *);
113 void	_isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
114 int	_isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
115 	    bus_size_t, struct proc *, int);
116 int	_isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
117 	    struct mbuf *, int);
118 int	_isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
119 	    struct uio *, int);
120 int	_isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
121 	    bus_dma_segment_t *, int, bus_size_t, int);
122 void	_isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
123 void	_isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
124 	    bus_addr_t, bus_size_t, int);
125 
126 int	_isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
127 	    bus_size_t, bus_dma_segment_t *, int, int *, int);
128 
129 int	_isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t,
130 	    struct proc *);
131 int	_isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
132 	    bus_size_t, int);
133 void	_isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
134 
135 /*
136  * Entry points for ISA DMA.  These are mostly wrappers around
137  * the generic functions that understand how to deal with bounce
138  * buffers, if necessary.
139  */
140 struct bus_dma_tag isa_bus_dma_tag = {
141 	NULL,			/* _cookie */
142 	_isa_bus_dmamap_create,
143 	_isa_bus_dmamap_destroy,
144 	_isa_bus_dmamap_load,
145 	_isa_bus_dmamap_load_mbuf,
146 	_isa_bus_dmamap_load_uio,
147 	_isa_bus_dmamap_load_raw,
148 	_isa_bus_dmamap_unload,
149 	_isa_bus_dmamap_sync,
150 	_isa_bus_dmamem_alloc,
151 	_bus_dmamem_alloc_range,
152 	_bus_dmamem_free,
153 	_bus_dmamem_map,
154 	_bus_dmamem_unmap,
155 	_bus_dmamem_mmap,
156 };
157 #endif /* NISADMA > 0 */
158 
159 /*
160  * Fill in default interrupt table (in case of spurious interrupt
161  * during configuration of kernel, setup interrupt control unit
162  */
163 void
isa_defaultirq(void)164 isa_defaultirq(void)
165 {
166 	int i;
167 
168 	/* icu vectors */
169 	for (i = 0; i < ICU_LEN; i++)
170 		setgate(&idt[ICU_OFFSET + i], IDTVEC(intr)[i], 0,
171 		    SDT_SYS386IGT, SEL_KPL, GICODE_SEL);
172 
173 	/* initialize 8259's */
174 	outb(IO_ICU1, 0x11);		/* reset; program device, four bytes */
175 	outb(IO_ICU1+1, ICU_OFFSET);	/* starting at this vector index */
176 	outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */
177 #ifdef AUTO_EOI_1
178 	outb(IO_ICU1+1, 2 | 1);		/* auto EOI, 8086 mode */
179 #else
180 	outb(IO_ICU1+1, 1);		/* 8086 mode */
181 #endif
182 	outb(IO_ICU1+1, 0xff);		/* leave interrupts masked */
183 	outb(IO_ICU1, 0x68);		/* special mask mode (if available) */
184 	outb(IO_ICU1, 0x0a);		/* Read IRR by default. */
185 #ifdef REORDER_IRQ
186 	outb(IO_ICU1, 0xc0 | (3 - 1));	/* pri order 3-7, 0-2 (com2 first) */
187 #endif
188 
189 	outb(IO_ICU2, 0x11);		/* reset; program device, four bytes */
190 	outb(IO_ICU2+1, ICU_OFFSET+8);	/* staring at this vector index */
191 	outb(IO_ICU2+1, IRQ_SLAVE);
192 #ifdef AUTO_EOI_2
193 	outb(IO_ICU2+1, 2 | 1);		/* auto EOI, 8086 mode */
194 #else
195 	outb(IO_ICU2+1, 1);		/* 8086 mode */
196 #endif
197 	outb(IO_ICU2+1, 0xff);		/* leave interrupts masked */
198 	outb(IO_ICU2, 0x68);		/* special mask mode (if available) */
199 	outb(IO_ICU2, 0x0a);		/* Read IRR by default. */
200 }
201 
202 /*
203  * Handle a NMI, possibly a machine check.
204  * return true to panic system, false to ignore.
205  */
206 int
isa_nmi(void)207 isa_nmi(void)
208 {
209 	/* This is historic garbage; these ports are not readable */
210 	log(LOG_CRIT, "No-maskable interrupt, may be parity error\n");
211 	return(0);
212 }
213 
214 u_long  intrstray[ICU_LEN];
215 
216 /*
217  * Caught a stray interrupt, notify
218  */
219 void
isa_strayintr(int irq)220 isa_strayintr(int irq)
221 {
222         /*
223          * Stray interrupts on irq 7 occur when an interrupt line is raised
224          * and then lowered before the CPU acknowledges it.  This generally
225          * means either the device is screwed or something is cli'ing too
226          * long and it's timing out.
227          */
228 	if (++intrstray[irq] <= 5)
229 		log(LOG_ERR, "stray interrupt %d%s\n", irq,
230 		    intrstray[irq] >= 5 ? "; stopped logging" : "");
231 }
232 
233 int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN];
234 int iminlevel[ICU_LEN], imaxlevel[ICU_LEN];
235 struct intrhand *intrhand[ICU_LEN];
236 
237 int imask[NIPL];	/* Bitmask telling what interrupts are blocked. */
238 int iunmask[NIPL];	/* Bitmask telling what interrupts are accepted. */
239 
240 /*
241  * Recalculate the interrupt masks from scratch.
242  * We could code special registry and deregistry versions of this function that
243  * would be faster, but the code would be nastier, and we don't expect this to
244  * happen very much anyway.
245  */
246 void
intr_calculatemasks(void)247 intr_calculatemasks(void)
248 {
249 	int irq, level, unusedirqs;
250 	struct intrhand *q;
251 
252 	/* First, figure out which levels each IRQ uses. */
253 	unusedirqs = 0xffff;
254 	for (irq = 0; irq < ICU_LEN; irq++) {
255 		int levels = 0;
256 		for (q = intrhand[irq]; q; q = q->ih_next)
257 			levels |= 1 << IPL(q->ih_level);
258 		intrlevel[irq] = levels;
259 		if (levels)
260 			unusedirqs &= ~(1 << irq);
261 	}
262 
263 	/* Then figure out which IRQs use each level. */
264 	for (level = 0; level < NIPL; level++) {
265 		int irqs = 0;
266 		for (irq = 0; irq < ICU_LEN; irq++)
267 			if (intrlevel[irq] & (1 << level))
268 				irqs |= 1 << irq;
269 		imask[level] = irqs | unusedirqs;
270 	}
271 
272 	/*
273 	 * Initialize soft interrupt masks to block themselves.
274 	 */
275 	IMASK(IPL_SOFTCLOCK) |= 1 << SIR_CLOCK;
276 	IMASK(IPL_SOFTNET) |= 1 << SIR_NET;
277 	IMASK(IPL_SOFTTTY) |= 1 << SIR_TTY;
278 
279 	/*
280 	 * Enforce a hierarchy that gives slow devices a better chance at not
281 	 * dropping data.
282 	 */
283 	for (level = 0; level < NIPL - 1; level++)
284 		imask[level + 1] |= imask[level];
285 
286 	/* And eventually calculate the complete masks. */
287 	for (irq = 0; irq < ICU_LEN; irq++) {
288 		int irqs = 1 << irq;
289 		int minlevel = IPL_NONE;
290 		int maxlevel = IPL_NONE;
291 
292 		if (intrhand[irq] == NULL) {
293 			maxlevel = IPL_HIGH;
294 			irqs = IMASK(IPL_HIGH);
295 		} else {
296 			for (q = intrhand[irq]; q; q = q->ih_next) {
297 				irqs |= IMASK(q->ih_level);
298 				if (minlevel == IPL_NONE ||
299 				    q->ih_level < minlevel)
300 					minlevel = q->ih_level;
301 				if (q->ih_level > maxlevel)
302 					maxlevel = q->ih_level;
303 			}
304 		}
305 		if (irqs != IMASK(maxlevel))
306 			panic("irq %d level %x mask mismatch: %x vs %x", irq,
307 			    maxlevel, irqs, IMASK(maxlevel));
308 
309 		intrmask[irq] = irqs;
310 		iminlevel[irq] = minlevel;
311 		imaxlevel[irq] = maxlevel;
312 
313 #if 0
314 		printf("irq %d: level %x, mask 0x%x (%x)\n", irq,
315 		    imaxlevel[irq], intrmask[irq], IMASK(imaxlevel[irq]));
316 #endif
317 	}
318 
319 	/* Lastly, determine which IRQs are actually in use. */
320 	{
321 		int irqs = 0;
322 		for (irq = 0; irq < ICU_LEN; irq++)
323 			if (intrhand[irq])
324 				irqs |= 1 << irq;
325 		if (irqs >= 0x100) /* any IRQs >= 8 in use */
326 			irqs |= 1 << IRQ_SLAVE;
327 		imen = ~irqs;
328 		SET_ICUS();
329 	}
330 
331 	/* For speed of splx, provide the inverse of the interrupt masks. */
332 	for (irq = 0; irq < ICU_LEN; irq++)
333 		iunmask[irq] = ~imask[irq];
334 }
335 
336 int
fakeintr(void * arg)337 fakeintr(void *arg)
338 {
339 	return 0;
340 }
341 
342 #define	LEGAL_IRQ(x)	((x) >= 0 && (x) < ICU_LEN && (x) != 2)
343 
344 int
isa_intr_alloc(isa_chipset_tag_t ic,int mask,int type,int * irq)345 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
346 {
347 	int i, bestirq, count;
348 	int tmp;
349 	struct intrhand **p, *q;
350 
351 	if (type == IST_NONE)
352 		panic("intr_alloc: bogus type");
353 
354 	bestirq = -1;
355 	count = -1;
356 
357 	/* some interrupts should never be dynamically allocated */
358 	mask &= 0xdef8;
359 
360 	/*
361 	 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
362 	 * the right answer is to do "breadth-first" searching of devices.
363 	 */
364 	mask &= 0xefbf;
365 
366 	for (i = 0; i < ICU_LEN; i++) {
367 		if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
368 			continue;
369 
370 		switch(intrtype[i]) {
371 		case IST_NONE:
372 			/*
373 			 * if nothing's using the irq, just return it
374 			 */
375 			*irq = i;
376 			return (0);
377 
378 		case IST_EDGE:
379 		case IST_LEVEL:
380 			if (type != intrtype[i])
381 				continue;
382 			/*
383 			 * if the irq is shareable, count the number of other
384 			 * handlers, and if it's smaller than the last irq like
385 			 * this, remember it
386 			 *
387 			 * XXX We should probably also consider the
388 			 * interrupt level and stick IPL_TTY with other
389 			 * IPL_TTY, etc.
390 			 */
391 			for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
392 			     p = &q->ih_next, tmp++)
393 				;
394 			if ((bestirq == -1) || (count > tmp)) {
395 				bestirq = i;
396 				count = tmp;
397 			}
398 			break;
399 
400 		case IST_PULSE:
401 			/* this just isn't shareable */
402 			continue;
403 		}
404 	}
405 
406 	if (bestirq == -1)
407 		return (1);
408 
409 	*irq = bestirq;
410 
411 	return (0);
412 }
413 
414 /*
415  * Just check to see if an IRQ is available/can be shared.
416  * 0 = interrupt not available
417  * 1 = interrupt shareable
418  * 2 = interrupt all to ourself
419  */
420 int
isa_intr_check(isa_chipset_tag_t ic,int irq,int type)421 isa_intr_check(isa_chipset_tag_t ic, int irq, int type)
422 {
423 	if (!LEGAL_IRQ(irq) || type == IST_NONE)
424 		return (0);
425 
426 	switch (intrtype[irq]) {
427 	case IST_NONE:
428 		return (2);
429 		break;
430 	case IST_LEVEL:
431 		if (type != intrtype[irq])
432 			return (0);
433 		return (1);
434 		break;
435 	case IST_EDGE:
436 	case IST_PULSE:
437 		if (type != IST_NONE)
438 			return (0);
439 	}
440 	return (1);
441 }
442 
443 /*
444  * Set up an interrupt handler to start being called.
445  * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
446  */
447 void *
isa_intr_establish(isa_chipset_tag_t ic,int irq,int type,int level,int (* ih_fun)(void *),void * ih_arg,const char * ih_what)448 isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level,
449     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
450 {
451 	struct intrhand **p, *q, *ih;
452 	static struct intrhand fakehand = {fakeintr};
453 	int flags;
454 
455 #if NIOAPIC > 0
456 	struct mp_intr_map *mip;
457 
458 	if (mp_busses != NULL) {
459 		int mpspec_pin = irq;
460 		int airq;
461 
462 		if (mp_isa_bus == NULL)
463 			panic("no isa bus");
464 
465 		for (mip = mp_isa_bus->mb_intrs; mip != NULL;
466 		    mip = mip->next) {
467 			if (mip->bus_pin == mpspec_pin) {
468 				airq = mip->ioapic_ih | irq;
469 				break;
470 			}
471 		}
472 		if (mip == NULL && mp_eisa_bus) {
473 			for (mip = mp_eisa_bus->mb_intrs; mip != NULL;
474 			    mip = mip->next) {
475 				if (mip->bus_pin == mpspec_pin) {
476 					airq = mip->ioapic_ih | irq;
477 					break;
478 				}
479 			}
480 		}
481 
482 		/* no MP mapping found -- invent! */
483  		if (mip == NULL)
484 			airq = mpbios_invent(irq, type, mp_isa_bus->mb_idx);
485 
486 		return (apic_intr_establish(airq, type, level, ih_fun,
487 		    ih_arg, ih_what));
488  	}
489 #endif
490 
491 	flags = level & IPL_MPSAFE;
492 	level &= ~IPL_MPSAFE;
493 
494 	KASSERT(level <= IPL_TTY || level >= IPL_CLOCK || flags & IPL_MPSAFE);
495 
496 	/* no point in sleeping unless someone can free memory. */
497 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
498 	if (ih == NULL) {
499 		printf("%s: isa_intr_establish: can't malloc handler info\n",
500 		    ih_what);
501 		return (NULL);
502 	}
503 
504 	if (!LEGAL_IRQ(irq) || type == IST_NONE) {
505 		printf("%s: isa_intr_establish: bogus irq or type\n", ih_what);
506 		free(ih, M_DEVBUF, sizeof *ih);
507 		return (NULL);
508 	}
509 	switch (intrtype[irq]) {
510 	case IST_NONE:
511 		intrtype[irq] = type;
512 		break;
513 	case IST_EDGE:
514 		intr_shared_edge = 1;
515 		/* FALLTHROUGH */
516 	case IST_LEVEL:
517 		if (type == intrtype[irq])
518 			break;
519 	case IST_PULSE:
520 		if (type != IST_NONE) {
521 			/*printf("%s: intr_establish: can't share %s with %s, irq %d\n",
522 			    ih_what, isa_intr_typename(intrtype[irq]),
523 			    isa_intr_typename(type), irq);*/
524 			free(ih, M_DEVBUF, sizeof *ih);
525 			return (NULL);
526 		}
527 		break;
528 	}
529 
530 	/*
531 	 * Figure out where to put the handler.
532 	 * This is O(N^2), but we want to preserve the order, and N is
533 	 * generally small.
534 	 */
535 	for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
536 		;
537 
538 	/*
539 	 * Actually install a fake handler momentarily, since we might be doing
540 	 * this with interrupts enabled and don't want the real routine called
541 	 * until masking is set up.
542 	 */
543 	fakehand.ih_level = level;
544 	*p = &fakehand;
545 
546 	intr_calculatemasks();
547 
548 	/*
549 	 * Poke the real handler in now.
550 	 */
551 	ih->ih_fun = ih_fun;
552 	ih->ih_arg = ih_arg;
553 	ih->ih_next = NULL;
554 	ih->ih_level = level;
555 	ih->ih_flags = flags;
556 	ih->ih_irq = irq;
557 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
558 	*p = ih;
559 
560 	return (ih);
561 }
562 
563 /*
564  * Deregister an interrupt handler.
565  */
566 void
isa_intr_disestablish(isa_chipset_tag_t ic,void * arg)567 isa_intr_disestablish(isa_chipset_tag_t ic, void *arg)
568 {
569 	struct intrhand *ih = arg;
570 	int irq = ih->ih_irq;
571 	struct intrhand **p, *q;
572 
573 #if NIOAPIC > 0
574 	if (irq & APIC_INT_VIA_APIC) {
575 		apic_intr_disestablish(arg);
576 		return;
577 	}
578 #endif
579 
580 	if (!LEGAL_IRQ(irq))
581 		panic("intr_disestablish: bogus irq %d", irq);
582 
583 	/*
584 	 * Remove the handler from the chain.
585 	 * This is O(n^2), too.
586 	 */
587 	for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next)
588 		;
589 	if (q)
590 		*p = q->ih_next;
591 	else
592 		panic("intr_disestablish: handler not registered");
593 	evcount_detach(&ih->ih_count);
594 	free(ih, M_DEVBUF, sizeof *ih);
595 
596 	intr_calculatemasks();
597 
598 	if (intrhand[irq] == NULL)
599 		intrtype[irq] = IST_NONE;
600 }
601 
602 void
isa_attach_hook(struct device * parent,struct device * self,struct isabus_attach_args * iba)603 isa_attach_hook(struct device *parent, struct device *self,
604     struct isabus_attach_args *iba)
605 {
606 	extern int isa_has_been_seen;
607 
608 	/*
609 	 * Notify others that might need to know that the ISA bus
610 	 * has now been attached.
611 	 */
612 	if (isa_has_been_seen)
613 		panic("isaattach: ISA bus already seen!");
614 	isa_has_been_seen = 1;
615 }
616 
617 #if NISADMA > 0
618 /**********************************************************************
619  * bus.h dma interface entry points
620  **********************************************************************/
621 
622 #ifdef ISA_DMA_STATS
623 #define	STAT_INCR(v)	(v)++
624 #define	STAT_DECR(v)	do { \
625 		if ((v) == 0) \
626 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
627 		else \
628 			(v)--; \
629 		} while (0)
630 u_long	isa_dma_stats_loads;
631 u_long	isa_dma_stats_bounces;
632 u_long	isa_dma_stats_nbouncebufs;
633 #else
634 #define	STAT_INCR(v)
635 #define	STAT_DECR(v)
636 #endif
637 
638 /*
639  * Create an ISA DMA map.
640  */
641 int
_isa_bus_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)642 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
643     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
644 {
645 	struct isa_dma_cookie *cookie;
646 	bus_dmamap_t map;
647 	int error, cookieflags;
648 	void *cookiestore;
649 	size_t cookiesize;
650 
651 	/* Call common function to create the basic map. */
652 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
653 	    flags, dmamp);
654 	if (error)
655 		return (error);
656 
657 	map = *dmamp;
658 	map->_dm_cookie = NULL;
659 
660 	cookiesize = sizeof(struct isa_dma_cookie);
661 
662 	/*
663 	 * ISA only has 24-bits of address space.  This means
664 	 * we can't DMA to pages over 16M.  In order to DMA to
665 	 * arbitrary buffers, we use "bounce buffers" - pages
666 	 * in memory below the 16M boundary.  On DMA reads,
667 	 * DMA happens to the bounce buffers, and is copied into
668 	 * the caller's buffer.  On writes, data is copied into
669 	 * the bounce buffer, and the DMA happens from those
670 	 * pages.  To software using the DMA mapping interface,
671 	 * this looks simply like a data cache.
672 	 *
673 	 * If we have more than 16M of RAM in the system, we may
674 	 * need bounce buffers.  We check and remember that here.
675 	 *
676 	 * There are exceptions, however.  VLB devices can do
677 	 * 32-bit DMA, and indicate that here.
678 	 *
679 	 * ...or, there is an opposite case.  The most segments
680 	 * a transfer will require is (maxxfer / NBPG) + 1.  If
681 	 * the caller can't handle that many segments (e.g. the
682 	 * ISA DMA controller), we may have to bounce it as well.
683 	 */
684 	cookieflags = 0;
685 	if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD &&
686 	    (flags & ISABUS_DMA_32BIT) == 0) ||
687 	    ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
688 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
689 		cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
690 	}
691 
692 	/*
693 	 * Allocate our cookie.
694 	 */
695 	if ((cookiestore = malloc(cookiesize, M_DEVBUF,
696 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)|M_ZERO)) == NULL) {
697 		error = ENOMEM;
698 		goto out;
699 	}
700 	cookie = (struct isa_dma_cookie *)cookiestore;
701 	cookie->id_flags = cookieflags;
702 	map->_dm_cookie = cookie;
703 
704 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
705 		/*
706 		 * Allocate the bounce pages now if the caller
707 		 * wishes us to do so.
708 		 */
709 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
710 			goto out;
711 
712 		error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
713 	}
714 
715  out:
716 	if (error) {
717 		free(map->_dm_cookie, M_DEVBUF, cookiesize);
718 		_bus_dmamap_destroy(t, map);
719 	}
720 	return (error);
721 }
722 
723 /*
724  * Destroy an ISA DMA map.
725  */
726 void
_isa_bus_dmamap_destroy(bus_dma_tag_t t,bus_dmamap_t map)727 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
728 {
729 	struct isa_dma_cookie *cookie = map->_dm_cookie;
730 
731 	/*
732 	 * Free any bounce pages this map might hold.
733 	 */
734 	if (cookie->id_flags & ID_HAS_BOUNCE)
735 		_isa_dma_free_bouncebuf(t, map);
736 
737 	free(cookie, M_DEVBUF, 0);
738 	_bus_dmamap_destroy(t, map);
739 }
740 
741 /*
742  * Load an ISA DMA map with a linear buffer.
743  */
744 int
_isa_bus_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)745 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
746     bus_size_t buflen, struct proc *p, int flags)
747 {
748 	struct isa_dma_cookie *cookie = map->_dm_cookie;
749 	int error;
750 
751 	STAT_INCR(isa_dma_stats_loads);
752 
753 	/*
754 	 * Check to see if we might need to bounce the transfer.
755 	 */
756 	if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
757 		/*
758 		 * Check if all pages are below the bounce
759 		 * threshold.  If they are, don't bother bouncing.
760 		 */
761 		if (_isa_dma_check_buffer(buf, buflen,
762 		    map->_dm_segcnt, map->_dm_boundary, p) == 0)
763 			return (_bus_dmamap_load(t, map, buf, buflen,
764 			    p, flags));
765 
766 		STAT_INCR(isa_dma_stats_bounces);
767 
768 		/*
769 		 * Allocate bounce pages, if necessary.
770 		 */
771 		if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
772 			error = _isa_dma_alloc_bouncebuf(t, map, buflen,
773 			    flags);
774 			if (error)
775 				return (error);
776 		}
777 
778 		/*
779 		 * Cache a pointer to the caller's buffer and
780 		 * load the DMA map with the bounce buffer.
781 		 */
782 		cookie->id_origbuf = buf;
783 		cookie->id_origbuflen = buflen;
784 		error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
785 		    buflen, p, flags);
786 
787 		if (error) {
788 			/*
789 			 * Free the bounce pages, unless our resources
790 			 * are reserved for our exclusive use.
791 			 */
792 			if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
793 				_isa_dma_free_bouncebuf(t, map);
794 		}
795 
796 		/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
797 		cookie->id_flags |= ID_IS_BOUNCING;
798 	} else {
799 		/*
800 		 * Just use the generic load function.
801 		 */
802 		error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
803 	}
804 
805 	return (error);
806 }
807 
808 /*
809  * Like _isa_bus_dmamap_load(), but for mbufs.
810  */
811 int
_isa_bus_dmamap_load_mbuf(bus_dma_tag_t t,bus_dmamap_t map,struct mbuf * m,int flags)812 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
813     int flags)
814 {
815 
816 	panic("_isa_bus_dmamap_load_mbuf: not implemented");
817 }
818 
819 /*
820  * Like _isa_bus_dmamap_load(), but for uios.
821  */
822 int
_isa_bus_dmamap_load_uio(bus_dma_tag_t t,bus_dmamap_t map,struct uio * uio,int flags)823 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
824     int flags)
825 {
826 
827 	panic("_isa_bus_dmamap_load_uio: not implemented");
828 }
829 
830 /*
831  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
832  * bus_dmamem_alloc().
833  */
834 int
_isa_bus_dmamap_load_raw(bus_dma_tag_t t,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)835 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
836     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
837 {
838 
839 	panic("_isa_bus_dmamap_load_raw: not implemented");
840 }
841 
842 /*
843  * Unload an ISA DMA map.
844  */
845 void
_isa_bus_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)846 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
847 {
848 	struct isa_dma_cookie *cookie = map->_dm_cookie;
849 
850 	/*
851 	 * If we have bounce pages, free them, unless they're
852 	 * reserved for our exclusive use.
853 	 */
854 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
855 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
856 		_isa_dma_free_bouncebuf(t, map);
857 
858 	cookie->id_flags &= ~ID_IS_BOUNCING;
859 
860 	/*
861 	 * Do the generic bits of the unload.
862 	 */
863 	_bus_dmamap_unload(t, map);
864 }
865 
866 /*
867  * Synchronize an ISA DMA map.
868  */
869 void
_isa_bus_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int op)870 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
871     bus_size_t len, int op)
872 {
873 	struct isa_dma_cookie *cookie = map->_dm_cookie;
874 
875 #ifdef DEBUG
876 	if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
877 		if (offset >= map->dm_mapsize)
878 			panic("_isa_bus_dmamap_sync: bad offset");
879 		if (len == 0 || (offset + len) > map->dm_mapsize)
880 			panic("_isa_bus_dmamap_sync: bad length");
881 	}
882 #endif
883 #ifdef DIAGNOSTIC
884 	if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 &&
885 	    (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0)
886 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
887 #endif /* DIAGNOSTIC */
888 
889 	/* PREREAD and POSTWRITE are no-ops */
890 	if (op & BUS_DMASYNC_PREWRITE) {
891 		/*
892 		 * If we're bouncing this transfer, copy the
893 		 * caller's buffer to the bounce buffer.
894 		 */
895 		if (cookie->id_flags & ID_IS_BOUNCING)
896 			memcpy(cookie->id_bouncebuf + offset,
897 			    (char *)cookie->id_origbuf + offset, len);
898 	}
899 
900 	_bus_dmamap_sync(t, map, offset, len, op);
901 
902 	if (op & BUS_DMASYNC_POSTREAD) {
903 		/*
904 		 * If we're bouncing this transfer, copy the
905 		 * bounce buffer to the caller's buffer.
906 		 */
907 		if (cookie->id_flags & ID_IS_BOUNCING)
908 			memcpy(cookie->id_origbuf + offset,
909 			    (char *)cookie->id_bouncebuf + offset, len);
910 	}
911 }
912 
913 /*
914  * Allocate memory safe for ISA DMA.
915  */
916 int
_isa_bus_dmamem_alloc(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)917 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
918     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
919     int flags)
920 {
921 	int error;
922 
923 	/* Try in ISA addressable region first */
924 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
925 	    segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD);
926 	if (!error)
927 		return (error);
928 
929 	/* Otherwise try anywhere (we'll bounce later) */
930 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
931 	    segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1);
932 	return (error);
933 }
934 
935 
936 /**********************************************************************
937  * ISA DMA utility functions
938  **********************************************************************/
939 
940 /*
941  * Return 0 if all pages in the passed buffer lie within the DMA'able
942  * range RAM.
943  */
944 int
_isa_dma_check_buffer(void * buf,bus_size_t buflen,int segcnt,bus_size_t boundary,struct proc * p)945 _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt,
946     bus_size_t boundary, struct proc *p)
947 {
948 	vaddr_t vaddr = (vaddr_t)buf;
949 	vaddr_t endva;
950 	paddr_t pa, lastpa;
951 	u_long pagemask = ~(boundary - 1);
952 	pmap_t pmap;
953 	int nsegs;
954 
955 	endva = round_page(vaddr + buflen);
956 
957 	nsegs = 1;
958 	lastpa = 0;
959 
960 	if (p != NULL)
961 		pmap = p->p_vmspace->vm_map.pmap;
962 	else
963 		pmap = pmap_kernel();
964 
965 	for (; vaddr < endva; vaddr += NBPG) {
966 		/*
967 		 * Get physical address for this segment.
968 		 */
969 		pmap_extract(pmap, (vaddr_t)vaddr, &pa);
970 		pa = trunc_page(pa);
971 
972 		/*
973 		 * Is it below the DMA'able threshold?
974 		 */
975 		if (pa > ISA_DMA_BOUNCE_THRESHOLD)
976 			return (EINVAL);
977 
978 		if (lastpa) {
979 			/*
980 			 * Check excessive segment count.
981 			 */
982 			if (lastpa + NBPG != pa) {
983 				if (++nsegs > segcnt)
984 					return (EFBIG);
985 			}
986 
987 			/*
988 			 * Check boundary restriction.
989 			 */
990 			if (boundary) {
991 				if ((lastpa ^ pa) & pagemask)
992 					return (EINVAL);
993 			}
994 		}
995 		lastpa = pa;
996 	}
997 
998 	return (0);
999 }
1000 
1001 int
_isa_dma_alloc_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map,bus_size_t size,int flags)1002 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags)
1003 {
1004 	struct isa_dma_cookie *cookie = map->_dm_cookie;
1005 	int error = 0;
1006 
1007 	cookie->id_bouncebuflen = round_page(size);
1008 	error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen,
1009 	    NBPG, map->_dm_boundary, cookie->id_bouncesegs,
1010 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags,
1011 	    0, ISA_DMA_BOUNCE_THRESHOLD);
1012 	if (error)
1013 		goto out;
1014 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1015 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1016 	    (caddr_t *)&cookie->id_bouncebuf, flags);
1017 
1018  out:
1019 	if (error) {
1020 		_bus_dmamem_free(t, cookie->id_bouncesegs,
1021 		    cookie->id_nbouncesegs);
1022 		cookie->id_bouncebuflen = 0;
1023 		cookie->id_nbouncesegs = 0;
1024 	} else {
1025 		cookie->id_flags |= ID_HAS_BOUNCE;
1026 		STAT_INCR(isa_dma_stats_nbouncebufs);
1027 	}
1028 
1029 	return (error);
1030 }
1031 
1032 void
_isa_dma_free_bouncebuf(bus_dma_tag_t t,bus_dmamap_t map)1033 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1034 {
1035 	struct isa_dma_cookie *cookie = map->_dm_cookie;
1036 
1037 	STAT_DECR(isa_dma_stats_nbouncebufs);
1038 
1039 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
1040 	    cookie->id_bouncebuflen);
1041 	_bus_dmamem_free(t, cookie->id_bouncesegs,
1042 	    cookie->id_nbouncesegs);
1043 	cookie->id_bouncebuflen = 0;
1044 	cookie->id_nbouncesegs = 0;
1045 	cookie->id_flags &= ~ID_HAS_BOUNCE;
1046 }
1047 #endif /* NISADMA > 0 */
1048