xref: /openbsd/sys/arch/i386/isa/isa_machdep.c (revision d644a89a)
1 /*	$OpenBSD: isa_machdep.c,v 1.81 2015/09/01 06:01:26 deraadt Exp $	*/
2 /*	$NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*-
35  * Copyright (c) 1993, 1994, 1996, 1997
36  *	Charles M. Hannum.  All rights reserved.
37  * Copyright (c) 1991 The Regents of the University of California.
38  * All rights reserved.
39  *
40  * This code is derived from software contributed to Berkeley by
41  * William Jolitz.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)isa.c	7.2 (Berkeley) 5/13/91
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/syslog.h>
73 #include <sys/device.h>
74 #include <sys/malloc.h>
75 #include <sys/proc.h>
76 
77 #include <uvm/uvm_extern.h>
78 
79 #include "ioapic.h"
80 
81 #if NIOAPIC > 0
82 #include <machine/i82093var.h>
83 #include <machine/mpbiosvar.h>
84 #endif
85 
86 #include <machine/bus.h>
87 
88 #include <machine/intr.h>
89 #include <machine/pio.h>
90 #include <machine/cpufunc.h>
91 #include <machine/i8259.h>
92 
93 #include <dev/isa/isareg.h>
94 #include <dev/isa/isavar.h>
95 #include <dev/isa/isadmavar.h>
96 #include <i386/isa/isa_machdep.h>
97 
98 #include "isadma.h"
99 
100 extern	paddr_t avail_end;
101 
102 #define	IDTVEC(name)	__CONCAT(X,name)
103 /* default interrupt vector table entries */
104 typedef int (*vector)(void);
105 extern vector IDTVEC(intr)[];
106 void isa_strayintr(int);
107 void intr_calculatemasks(void);
108 int fakeintr(void *);
109 
110 #if NISADMA > 0
111 int	_isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
112 	    bus_size_t, bus_size_t, int, bus_dmamap_t *);
113 void	_isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
114 int	_isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
115 	    bus_size_t, struct proc *, int);
116 int	_isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
117 	    struct mbuf *, int);
118 int	_isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
119 	    struct uio *, int);
120 int	_isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
121 	    bus_dma_segment_t *, int, bus_size_t, int);
122 void	_isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
123 void	_isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
124 	    bus_addr_t, bus_size_t, int);
125 
126 int	_isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
127 	    bus_size_t, bus_dma_segment_t *, int, int *, int);
128 
129 int	_isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t,
130 	    struct proc *);
131 int	_isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
132 	    bus_size_t, int);
133 void	_isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
134 
135 /*
136  * Entry points for ISA DMA.  These are mostly wrappers around
137  * the generic functions that understand how to deal with bounce
138  * buffers, if necessary.
139  */
140 struct bus_dma_tag isa_bus_dma_tag = {
141 	NULL,			/* _cookie */
142 	_isa_bus_dmamap_create,
143 	_isa_bus_dmamap_destroy,
144 	_isa_bus_dmamap_load,
145 	_isa_bus_dmamap_load_mbuf,
146 	_isa_bus_dmamap_load_uio,
147 	_isa_bus_dmamap_load_raw,
148 	_isa_bus_dmamap_unload,
149 	_isa_bus_dmamap_sync,
150 	_isa_bus_dmamem_alloc,
151 	_bus_dmamem_alloc_range,
152 	_bus_dmamem_free,
153 	_bus_dmamem_map,
154 	_bus_dmamem_unmap,
155 	_bus_dmamem_mmap,
156 };
157 #endif /* NISADMA > 0 */
158 
159 /*
160  * Fill in default interrupt table (in case of spurious interrupt
161  * during configuration of kernel, setup interrupt control unit
162  */
163 void
164 isa_defaultirq(void)
165 {
166 	int i;
167 
168 	/* icu vectors */
169 	for (i = 0; i < ICU_LEN; i++)
170 		setgate(&idt[ICU_OFFSET + i], IDTVEC(intr)[i], 0,
171 		    SDT_SYS386IGT, SEL_KPL, GICODE_SEL);
172 
173 	/* initialize 8259's */
174 	outb(IO_ICU1, 0x11);		/* reset; program device, four bytes */
175 	outb(IO_ICU1+1, ICU_OFFSET);	/* starting at this vector index */
176 	outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */
177 #ifdef AUTO_EOI_1
178 	outb(IO_ICU1+1, 2 | 1);		/* auto EOI, 8086 mode */
179 #else
180 	outb(IO_ICU1+1, 1);		/* 8086 mode */
181 #endif
182 	outb(IO_ICU1+1, 0xff);		/* leave interrupts masked */
183 	outb(IO_ICU1, 0x68);		/* special mask mode (if available) */
184 	outb(IO_ICU1, 0x0a);		/* Read IRR by default. */
185 #ifdef REORDER_IRQ
186 	outb(IO_ICU1, 0xc0 | (3 - 1));	/* pri order 3-7, 0-2 (com2 first) */
187 #endif
188 
189 	outb(IO_ICU2, 0x11);		/* reset; program device, four bytes */
190 	outb(IO_ICU2+1, ICU_OFFSET+8);	/* staring at this vector index */
191 	outb(IO_ICU2+1, IRQ_SLAVE);
192 #ifdef AUTO_EOI_2
193 	outb(IO_ICU2+1, 2 | 1);		/* auto EOI, 8086 mode */
194 #else
195 	outb(IO_ICU2+1, 1);		/* 8086 mode */
196 #endif
197 	outb(IO_ICU2+1, 0xff);		/* leave interrupts masked */
198 	outb(IO_ICU2, 0x68);		/* special mask mode (if available) */
199 	outb(IO_ICU2, 0x0a);		/* Read IRR by default. */
200 }
201 
202 /*
203  * Handle a NMI, possibly a machine check.
204  * return true to panic system, false to ignore.
205  */
206 int
207 isa_nmi(void)
208 {
209 	/* This is historic garbage; these ports are not readable */
210 	log(LOG_CRIT, "No-maskable interrupt, may be parity error\n");
211 	return(0);
212 }
213 
214 u_long  intrstray[ICU_LEN];
215 
216 /*
217  * Caught a stray interrupt, notify
218  */
219 void
220 isa_strayintr(int irq)
221 {
222         /*
223          * Stray interrupts on irq 7 occur when an interrupt line is raised
224          * and then lowered before the CPU acknowledges it.  This generally
225          * means either the device is screwed or something is cli'ing too
226          * long and it's timing out.
227          */
228 	if (++intrstray[irq] <= 5)
229 		log(LOG_ERR, "stray interrupt %d%s\n", irq,
230 		    intrstray[irq] >= 5 ? "; stopped logging" : "");
231 }
232 
233 int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN];
234 int iminlevel[ICU_LEN], imaxlevel[ICU_LEN];
235 struct intrhand *intrhand[ICU_LEN];
236 
237 int imask[NIPL];	/* Bitmask telling what interrupts are blocked. */
238 int iunmask[NIPL];	/* Bitmask telling what interrupts are accepted. */
239 
240 /*
241  * Recalculate the interrupt masks from scratch.
242  * We could code special registry and deregistry versions of this function that
243  * would be faster, but the code would be nastier, and we don't expect this to
244  * happen very much anyway.
245  */
246 void
247 intr_calculatemasks(void)
248 {
249 	int irq, level, unusedirqs;
250 	struct intrhand *q;
251 
252 	/* First, figure out which levels each IRQ uses. */
253 	unusedirqs = 0xffff;
254 	for (irq = 0; irq < ICU_LEN; irq++) {
255 		int levels = 0;
256 		for (q = intrhand[irq]; q; q = q->ih_next)
257 			levels |= 1 << IPL(q->ih_level);
258 		intrlevel[irq] = levels;
259 		if (levels)
260 			unusedirqs &= ~(1 << irq);
261 	}
262 
263 	/* Then figure out which IRQs use each level. */
264 	for (level = 0; level < NIPL; level++) {
265 		int irqs = 0;
266 		for (irq = 0; irq < ICU_LEN; irq++)
267 			if (intrlevel[irq] & (1 << level))
268 				irqs |= 1 << irq;
269 		imask[level] = irqs | unusedirqs;
270 	}
271 
272 	/*
273 	 * Initialize soft interrupt masks to block themselves.
274 	 */
275 	IMASK(IPL_SOFTCLOCK) |= 1 << SIR_CLOCK;
276 	IMASK(IPL_SOFTNET) |= 1 << SIR_NET;
277 	IMASK(IPL_SOFTTTY) |= 1 << SIR_TTY;
278 
279 	/*
280 	 * Enforce a hierarchy that gives slow devices a better chance at not
281 	 * dropping data.
282 	 */
283 	for (level = 0; level < NIPL - 1; level++)
284 		imask[level + 1] |= imask[level];
285 
286 	/* And eventually calculate the complete masks. */
287 	for (irq = 0; irq < ICU_LEN; irq++) {
288 		int irqs = 1 << irq;
289 		int minlevel = IPL_NONE;
290 		int maxlevel = IPL_NONE;
291 
292 		if (intrhand[irq] == NULL) {
293 			maxlevel = IPL_HIGH;
294 			irqs = IMASK(IPL_HIGH);
295 		} else {
296 			for (q = intrhand[irq]; q; q = q->ih_next) {
297 				irqs |= IMASK(q->ih_level);
298 				if (minlevel == IPL_NONE ||
299 				    q->ih_level < minlevel)
300 					minlevel = q->ih_level;
301 				if (q->ih_level > maxlevel)
302 					maxlevel = q->ih_level;
303 			}
304 		}
305 		if (irqs != IMASK(maxlevel))
306 			panic("irq %d level %x mask mismatch: %x vs %x", irq,
307 			    maxlevel, irqs, IMASK(maxlevel));
308 
309 		intrmask[irq] = irqs;
310 		iminlevel[irq] = minlevel;
311 		imaxlevel[irq] = maxlevel;
312 
313 #if 0
314 		printf("irq %d: level %x, mask 0x%x (%x)\n", irq,
315 		    imaxlevel[irq], intrmask[irq], IMASK(imaxlevel[irq]));
316 #endif
317 	}
318 
319 	/* Lastly, determine which IRQs are actually in use. */
320 	{
321 		int irqs = 0;
322 		for (irq = 0; irq < ICU_LEN; irq++)
323 			if (intrhand[irq])
324 				irqs |= 1 << irq;
325 		if (irqs >= 0x100) /* any IRQs >= 8 in use */
326 			irqs |= 1 << IRQ_SLAVE;
327 		imen = ~irqs;
328 		SET_ICUS();
329 	}
330 
331 	/* For speed of splx, provide the inverse of the interrupt masks. */
332 	for (irq = 0; irq < ICU_LEN; irq++)
333 		iunmask[irq] = ~imask[irq];
334 }
335 
336 int
337 fakeintr(arg)
338 	void *arg;
339 {
340 	return 0;
341 }
342 
343 #define	LEGAL_IRQ(x)	((x) >= 0 && (x) < ICU_LEN && (x) != 2)
344 
345 int
346 isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
347 {
348 	int i, bestirq, count;
349 	int tmp;
350 	struct intrhand **p, *q;
351 
352 	if (type == IST_NONE)
353 		panic("intr_alloc: bogus type");
354 
355 	bestirq = -1;
356 	count = -1;
357 
358 	/* some interrupts should never be dynamically allocated */
359 	mask &= 0xdef8;
360 
361 	/*
362 	 * XXX some interrupts will be used later (6 for fdc, 12 for pms).
363 	 * the right answer is to do "breadth-first" searching of devices.
364 	 */
365 	mask &= 0xefbf;
366 
367 	for (i = 0; i < ICU_LEN; i++) {
368 		if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
369 			continue;
370 
371 		switch(intrtype[i]) {
372 		case IST_NONE:
373 			/*
374 			 * if nothing's using the irq, just return it
375 			 */
376 			*irq = i;
377 			return (0);
378 
379 		case IST_EDGE:
380 		case IST_LEVEL:
381 			if (type != intrtype[i])
382 				continue;
383 			/*
384 			 * if the irq is shareable, count the number of other
385 			 * handlers, and if it's smaller than the last irq like
386 			 * this, remember it
387 			 *
388 			 * XXX We should probably also consider the
389 			 * interrupt level and stick IPL_TTY with other
390 			 * IPL_TTY, etc.
391 			 */
392 			for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
393 			     p = &q->ih_next, tmp++)
394 				;
395 			if ((bestirq == -1) || (count > tmp)) {
396 				bestirq = i;
397 				count = tmp;
398 			}
399 			break;
400 
401 		case IST_PULSE:
402 			/* this just isn't shareable */
403 			continue;
404 		}
405 	}
406 
407 	if (bestirq == -1)
408 		return (1);
409 
410 	*irq = bestirq;
411 
412 	return (0);
413 }
414 
415 /*
416  * Just check to see if an IRQ is available/can be shared.
417  * 0 = interrupt not available
418  * 1 = interrupt shareable
419  * 2 = interrupt all to ourself
420  */
421 int
422 isa_intr_check(isa_chipset_tag_t ic, int irq, int type)
423 {
424 	if (!LEGAL_IRQ(irq) || type == IST_NONE)
425 		return (0);
426 
427 	switch (intrtype[irq]) {
428 	case IST_NONE:
429 		return (2);
430 		break;
431 	case IST_LEVEL:
432 		if (type != intrtype[irq])
433 			return (0);
434 		return (1);
435 		break;
436 	case IST_EDGE:
437 	case IST_PULSE:
438 		if (type != IST_NONE)
439 			return (0);
440 	}
441 	return (1);
442 }
443 
444 /*
445  * Set up an interrupt handler to start being called.
446  * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
447  */
448 void *
449 isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level,
450     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
451 {
452 	struct intrhand **p, *q, *ih;
453 	static struct intrhand fakehand = {fakeintr};
454 	int flags;
455 
456 #if NIOAPIC > 0
457 	struct mp_intr_map *mip;
458 
459  	if (mp_busses != NULL) {
460  		int mpspec_pin = irq;
461  		int airq;
462 
463 		if (mp_isa_bus == NULL)
464 			panic("no isa bus");
465 
466  		for (mip = mp_isa_bus->mb_intrs; mip != NULL;
467  		    mip = mip->next) {
468  			if (mip->bus_pin == mpspec_pin) {
469  				airq = mip->ioapic_ih | irq;
470  				break;
471  			}
472  		}
473 		if (mip == NULL && mp_eisa_bus) {
474 			for (mip = mp_eisa_bus->mb_intrs; mip != NULL;
475 			    mip = mip->next) {
476 				if (mip->bus_pin == mpspec_pin) {
477 					airq = mip->ioapic_ih | irq;
478 					break;
479 				}
480 			}
481 		}
482 
483 		/* no MP mapping found -- invent! */
484  		if (mip == NULL)
485 			airq = mpbios_invent(irq, type, mp_isa_bus->mb_idx);
486 
487 		return (apic_intr_establish(airq, type, level, ih_fun,
488 		    ih_arg, ih_what));
489  	}
490 #endif
491 
492 	flags = level & IPL_MPSAFE;
493 	level &= ~IPL_MPSAFE;
494 
495 	KASSERT(level <= IPL_TTY || level >= IPL_CLOCK || flags & IPL_MPSAFE);
496 
497 	/* no point in sleeping unless someone can free memory. */
498 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
499 	if (ih == NULL) {
500 		printf("%s: isa_intr_establish: can't malloc handler info\n",
501 		    ih_what);
502 		return (NULL);
503 	}
504 
505 	if (!LEGAL_IRQ(irq) || type == IST_NONE) {
506 		printf("%s: isa_intr_establish: bogus irq or type\n", ih_what);
507 		free(ih, M_DEVBUF, sizeof *ih);
508 		return (NULL);
509 	}
510 	switch (intrtype[irq]) {
511 	case IST_NONE:
512 		intrtype[irq] = type;
513 		break;
514 	case IST_EDGE:
515 		intr_shared_edge = 1;
516 		/* FALLTHROUGH */
517 	case IST_LEVEL:
518 		if (type == intrtype[irq])
519 			break;
520 	case IST_PULSE:
521 		if (type != IST_NONE) {
522 			/*printf("%s: intr_establish: can't share %s with %s, irq %d\n",
523 			    ih_what, isa_intr_typename(intrtype[irq]),
524 			    isa_intr_typename(type), irq);*/
525 			free(ih, M_DEVBUF, sizeof *ih);
526 			return (NULL);
527 		}
528 		break;
529 	}
530 
531 	/*
532 	 * Figure out where to put the handler.
533 	 * This is O(N^2), but we want to preserve the order, and N is
534 	 * generally small.
535 	 */
536 	for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
537 		;
538 
539 	/*
540 	 * Actually install a fake handler momentarily, since we might be doing
541 	 * this with interrupts enabled and don't want the real routine called
542 	 * until masking is set up.
543 	 */
544 	fakehand.ih_level = level;
545 	*p = &fakehand;
546 
547 	intr_calculatemasks();
548 
549 	/*
550 	 * Poke the real handler in now.
551 	 */
552 	ih->ih_fun = ih_fun;
553 	ih->ih_arg = ih_arg;
554 	ih->ih_next = NULL;
555 	ih->ih_level = level;
556 	ih->ih_flags = flags;
557 	ih->ih_irq = irq;
558 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
559 	*p = ih;
560 
561 	return (ih);
562 }
563 
564 /*
565  * Deregister an interrupt handler.
566  */
567 void
568 isa_intr_disestablish(isa_chipset_tag_t ic, void *arg)
569 {
570 	struct intrhand *ih = arg;
571 	int irq = ih->ih_irq;
572 	struct intrhand **p, *q;
573 
574 #if NIOAPIC > 0
575 	if (irq & APIC_INT_VIA_APIC) {
576 		apic_intr_disestablish(arg);
577 		return;
578 	}
579 #endif
580 
581 	if (!LEGAL_IRQ(irq))
582 		panic("intr_disestablish: bogus irq %d", irq);
583 
584 	/*
585 	 * Remove the handler from the chain.
586 	 * This is O(n^2), too.
587 	 */
588 	for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next)
589 		;
590 	if (q)
591 		*p = q->ih_next;
592 	else
593 		panic("intr_disestablish: handler not registered");
594 	evcount_detach(&ih->ih_count);
595 	free(ih, M_DEVBUF, sizeof *ih);
596 
597 	intr_calculatemasks();
598 
599 	if (intrhand[irq] == NULL)
600 		intrtype[irq] = IST_NONE;
601 }
602 
603 void
604 isa_attach_hook(struct device *parent, struct device *self,
605     struct isabus_attach_args *iba)
606 {
607 	extern int isa_has_been_seen;
608 
609 	/*
610 	 * Notify others that might need to know that the ISA bus
611 	 * has now been attached.
612 	 */
613 	if (isa_has_been_seen)
614 		panic("isaattach: ISA bus already seen!");
615 	isa_has_been_seen = 1;
616 }
617 
618 #if NISADMA > 0
619 /**********************************************************************
620  * bus.h dma interface entry points
621  **********************************************************************/
622 
623 #ifdef ISA_DMA_STATS
624 #define	STAT_INCR(v)	(v)++
625 #define	STAT_DECR(v)	do { \
626 		if ((v) == 0) \
627 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
628 		else \
629 			(v)--; \
630 		} while (0)
631 u_long	isa_dma_stats_loads;
632 u_long	isa_dma_stats_bounces;
633 u_long	isa_dma_stats_nbouncebufs;
634 #else
635 #define	STAT_INCR(v)
636 #define	STAT_DECR(v)
637 #endif
638 
639 /*
640  * Create an ISA DMA map.
641  */
642 int
643 _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
644     bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
645 {
646 	struct isa_dma_cookie *cookie;
647 	bus_dmamap_t map;
648 	int error, cookieflags;
649 	void *cookiestore;
650 	size_t cookiesize;
651 
652 	/* Call common function to create the basic map. */
653 	error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
654 	    flags, dmamp);
655 	if (error)
656 		return (error);
657 
658 	map = *dmamp;
659 	map->_dm_cookie = NULL;
660 
661 	cookiesize = sizeof(struct isa_dma_cookie);
662 
663 	/*
664 	 * ISA only has 24-bits of address space.  This means
665 	 * we can't DMA to pages over 16M.  In order to DMA to
666 	 * arbitrary buffers, we use "bounce buffers" - pages
667 	 * in memory below the 16M boundary.  On DMA reads,
668 	 * DMA happens to the bounce buffers, and is copied into
669 	 * the caller's buffer.  On writes, data is copied into
670 	 * the bounce buffer, and the DMA happens from those
671 	 * pages.  To software using the DMA mapping interface,
672 	 * this looks simply like a data cache.
673 	 *
674 	 * If we have more than 16M of RAM in the system, we may
675 	 * need bounce buffers.  We check and remember that here.
676 	 *
677 	 * There are exceptions, however.  VLB devices can do
678 	 * 32-bit DMA, and indicate that here.
679 	 *
680 	 * ...or, there is an opposite case.  The most segments
681 	 * a transfer will require is (maxxfer / NBPG) + 1.  If
682 	 * the caller can't handle that many segments (e.g. the
683 	 * ISA DMA controller), we may have to bounce it as well.
684 	 */
685 	cookieflags = 0;
686 	if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD &&
687 	    (flags & ISABUS_DMA_32BIT) == 0) ||
688 	    ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
689 		cookieflags |= ID_MIGHT_NEED_BOUNCE;
690 		cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
691 	}
692 
693 	/*
694 	 * Allocate our cookie.
695 	 */
696 	if ((cookiestore = malloc(cookiesize, M_DEVBUF,
697 	    ((flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)|M_ZERO)) == NULL) {
698 		error = ENOMEM;
699 		goto out;
700 	}
701 	cookie = (struct isa_dma_cookie *)cookiestore;
702 	cookie->id_flags = cookieflags;
703 	map->_dm_cookie = cookie;
704 
705 	if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
706 		/*
707 		 * Allocate the bounce pages now if the caller
708 		 * wishes us to do so.
709 		 */
710 		if ((flags & BUS_DMA_ALLOCNOW) == 0)
711 			goto out;
712 
713 		error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
714 	}
715 
716  out:
717 	if (error) {
718 		if (map->_dm_cookie != NULL)
719 			free(map->_dm_cookie, M_DEVBUF, 0);
720 		_bus_dmamap_destroy(t, map);
721 	}
722 	return (error);
723 }
724 
725 /*
726  * Destroy an ISA DMA map.
727  */
728 void
729 _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
730 {
731 	struct isa_dma_cookie *cookie = map->_dm_cookie;
732 
733 	/*
734 	 * Free any bounce pages this map might hold.
735 	 */
736 	if (cookie->id_flags & ID_HAS_BOUNCE)
737 		_isa_dma_free_bouncebuf(t, map);
738 
739 	free(cookie, M_DEVBUF, 0);
740 	_bus_dmamap_destroy(t, map);
741 }
742 
743 /*
744  * Load an ISA DMA map with a linear buffer.
745  */
746 int
747 _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
748     bus_size_t buflen, struct proc *p, int flags)
749 {
750 	struct isa_dma_cookie *cookie = map->_dm_cookie;
751 	int error;
752 
753 	STAT_INCR(isa_dma_stats_loads);
754 
755 	/*
756 	 * Check to see if we might need to bounce the transfer.
757 	 */
758 	if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
759 		/*
760 		 * Check if all pages are below the bounce
761 		 * threshold.  If they are, don't bother bouncing.
762 		 */
763 		if (_isa_dma_check_buffer(buf, buflen,
764 		    map->_dm_segcnt, map->_dm_boundary, p) == 0)
765 			return (_bus_dmamap_load(t, map, buf, buflen,
766 			    p, flags));
767 
768 		STAT_INCR(isa_dma_stats_bounces);
769 
770 		/*
771 		 * Allocate bounce pages, if necessary.
772 		 */
773 		if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
774 			error = _isa_dma_alloc_bouncebuf(t, map, buflen,
775 			    flags);
776 			if (error)
777 				return (error);
778 		}
779 
780 		/*
781 		 * Cache a pointer to the caller's buffer and
782 		 * load the DMA map with the bounce buffer.
783 		 */
784 		cookie->id_origbuf = buf;
785 		cookie->id_origbuflen = buflen;
786 		error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
787 		    buflen, p, flags);
788 
789 		if (error) {
790 			/*
791 			 * Free the bounce pages, unless our resources
792 			 * are reserved for our exclusive use.
793 			 */
794 			if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
795 				_isa_dma_free_bouncebuf(t, map);
796 		}
797 
798 		/* ...so _isa_bus_dmamap_sync() knows we're bouncing */
799 		cookie->id_flags |= ID_IS_BOUNCING;
800 	} else {
801 		/*
802 		 * Just use the generic load function.
803 		 */
804 		error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
805 	}
806 
807 	return (error);
808 }
809 
810 /*
811  * Like _isa_bus_dmamap_load(), but for mbufs.
812  */
813 int
814 _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
815     int flags)
816 {
817 
818 	panic("_isa_bus_dmamap_load_mbuf: not implemented");
819 }
820 
821 /*
822  * Like _isa_bus_dmamap_load(), but for uios.
823  */
824 int
825 _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
826     int flags)
827 {
828 
829 	panic("_isa_bus_dmamap_load_uio: not implemented");
830 }
831 
832 /*
833  * Like _isa_bus_dmamap_load(), but for raw memory allocated with
834  * bus_dmamem_alloc().
835  */
836 int
837 _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
838     bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
839 {
840 
841 	panic("_isa_bus_dmamap_load_raw: not implemented");
842 }
843 
844 /*
845  * Unload an ISA DMA map.
846  */
847 void
848 _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
849 {
850 	struct isa_dma_cookie *cookie = map->_dm_cookie;
851 
852 	/*
853 	 * If we have bounce pages, free them, unless they're
854 	 * reserved for our exclusive use.
855 	 */
856 	if ((cookie->id_flags & ID_HAS_BOUNCE) &&
857 	    (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
858 		_isa_dma_free_bouncebuf(t, map);
859 
860 	cookie->id_flags &= ~ID_IS_BOUNCING;
861 
862 	/*
863 	 * Do the generic bits of the unload.
864 	 */
865 	_bus_dmamap_unload(t, map);
866 }
867 
868 /*
869  * Synchronize an ISA DMA map.
870  */
871 void
872 _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
873     bus_size_t len, int op)
874 {
875 	struct isa_dma_cookie *cookie = map->_dm_cookie;
876 
877 #ifdef DEBUG
878 	if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
879 		if (offset >= map->dm_mapsize)
880 			panic("_isa_bus_dmamap_sync: bad offset");
881 		if (len == 0 || (offset + len) > map->dm_mapsize)
882 			panic("_isa_bus_dmamap_sync: bad length");
883 	}
884 #endif
885 #ifdef DIAGNOSTIC
886 	if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0 &&
887 	    (op & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) != 0)
888 		panic("_isa_bus_dmamap_sync: mix PRE and POST");
889 #endif /* DIAGNOSTIC */
890 
891 	/* PREREAD and POSTWRITE are no-ops */
892 	if (op & BUS_DMASYNC_PREWRITE) {
893 		/*
894 		 * If we're bouncing this transfer, copy the
895 		 * caller's buffer to the bounce buffer.
896 		 */
897 		if (cookie->id_flags & ID_IS_BOUNCING)
898 			memcpy(cookie->id_bouncebuf + offset,
899 			    (char *)cookie->id_origbuf + offset, len);
900 	}
901 
902 	_bus_dmamap_sync(t, map, offset, len, op);
903 
904 	if (op & BUS_DMASYNC_POSTREAD) {
905 		/*
906 		 * If we're bouncing this transfer, copy the
907 		 * bounce buffer to the caller's buffer.
908 		 */
909 		if (cookie->id_flags & ID_IS_BOUNCING)
910 			memcpy(cookie->id_origbuf + offset,
911 			    (char *)cookie->id_bouncebuf + offset, len);
912 	}
913 }
914 
915 /*
916  * Allocate memory safe for ISA DMA.
917  */
918 int
919 _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
920     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
921     int flags)
922 {
923 	int error;
924 
925 	/* Try in ISA addressable region first */
926 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
927 	    segs, nsegs, rsegs, flags, 0, ISA_DMA_BOUNCE_THRESHOLD);
928 	if (!error)
929 		return (error);
930 
931 	/* Otherwise try anywhere (we'll bounce later) */
932 	error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
933 	    segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)-1);
934 	return (error);
935 }
936 
937 
938 /**********************************************************************
939  * ISA DMA utility functions
940  **********************************************************************/
941 
942 /*
943  * Return 0 if all pages in the passed buffer lie within the DMA'able
944  * range RAM.
945  */
946 int
947 _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt,
948     bus_size_t boundary, struct proc *p)
949 {
950 	vaddr_t vaddr = (vaddr_t)buf;
951 	vaddr_t endva;
952 	paddr_t pa, lastpa;
953 	u_long pagemask = ~(boundary - 1);
954 	pmap_t pmap;
955 	int nsegs;
956 
957 	endva = round_page(vaddr + buflen);
958 
959 	nsegs = 1;
960 	lastpa = 0;
961 
962 	if (p != NULL)
963 		pmap = p->p_vmspace->vm_map.pmap;
964 	else
965 		pmap = pmap_kernel();
966 
967 	for (; vaddr < endva; vaddr += NBPG) {
968 		/*
969 		 * Get physical address for this segment.
970 		 */
971 		pmap_extract(pmap, (vaddr_t)vaddr, &pa);
972 		pa = trunc_page(pa);
973 
974 		/*
975 		 * Is it below the DMA'able threshold?
976 		 */
977 		if (pa > ISA_DMA_BOUNCE_THRESHOLD)
978 			return (EINVAL);
979 
980 		if (lastpa) {
981 			/*
982 			 * Check excessive segment count.
983 			 */
984 			if (lastpa + NBPG != pa) {
985 				if (++nsegs > segcnt)
986 					return (EFBIG);
987 			}
988 
989 			/*
990 			 * Check boundary restriction.
991 			 */
992 			if (boundary) {
993 				if ((lastpa ^ pa) & pagemask)
994 					return (EINVAL);
995 			}
996 		}
997 		lastpa = pa;
998 	}
999 
1000 	return (0);
1001 }
1002 
1003 int
1004 _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags)
1005 {
1006 	struct isa_dma_cookie *cookie = map->_dm_cookie;
1007 	int error = 0;
1008 
1009 	cookie->id_bouncebuflen = round_page(size);
1010 	error = _bus_dmamem_alloc_range(t, cookie->id_bouncebuflen,
1011 	    NBPG, map->_dm_boundary, cookie->id_bouncesegs,
1012 	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags,
1013 	    0, ISA_DMA_BOUNCE_THRESHOLD);
1014 	if (error)
1015 		goto out;
1016 	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1017 	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1018 	    (caddr_t *)&cookie->id_bouncebuf, flags);
1019 
1020  out:
1021 	if (error) {
1022 		_bus_dmamem_free(t, cookie->id_bouncesegs,
1023 		    cookie->id_nbouncesegs);
1024 		cookie->id_bouncebuflen = 0;
1025 		cookie->id_nbouncesegs = 0;
1026 	} else {
1027 		cookie->id_flags |= ID_HAS_BOUNCE;
1028 		STAT_INCR(isa_dma_stats_nbouncebufs);
1029 	}
1030 
1031 	return (error);
1032 }
1033 
1034 void
1035 _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1036 {
1037 	struct isa_dma_cookie *cookie = map->_dm_cookie;
1038 
1039 	STAT_DECR(isa_dma_stats_nbouncebufs);
1040 
1041 	_bus_dmamem_unmap(t, cookie->id_bouncebuf,
1042 	    cookie->id_bouncebuflen);
1043 	_bus_dmamem_free(t, cookie->id_bouncesegs,
1044 	    cookie->id_nbouncesegs);
1045 	cookie->id_bouncebuflen = 0;
1046 	cookie->id_nbouncesegs = 0;
1047 	cookie->id_flags &= ~ID_HAS_BOUNCE;
1048 }
1049 #endif /* NISADMA > 0 */
1050