xref: /netbsd/sys/arch/amiga/amiga/machdep.c (revision c4a72b64)
1 /*	$NetBSD: machdep.c,v 1.177 2002/11/03 15:39:39 aymeric Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.63 91/04/24$
41  *
42  *	@(#)machdep.c	7.16 (Berkeley) 6/3/91
43  */
44 
45 #include "opt_ddb.h"
46 #include "opt_compat_netbsd.h"
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.177 2002/11/03 15:39:39 aymeric Exp $");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/callout.h>
54 #include <sys/signalvar.h>
55 #include <sys/kernel.h>
56 #include <sys/proc.h>
57 #include <sys/buf.h>
58 #include <sys/reboot.h>
59 #include <sys/conf.h>
60 #include <sys/file.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/msgbuf.h>
64 #include <sys/user.h>
65 #include <sys/vnode.h>
66 #include <sys/device.h>
67 #include <sys/queue.h>
68 #include <sys/mount.h>
69 #include <sys/core.h>
70 #include <sys/kcore.h>
71 
72 #include <sys/exec.h>
73 
74 #if defined(DDB) && defined(__ELF__)
75 #include <sys/exec_elf.h>
76 #endif
77 
78 #include <sys/exec_aout.h>
79 
80 #include <net/netisr.h>
81 #undef PS	/* XXX netccitt/pk.h conflict with machine/reg.h? */
82 
83 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
84 #include <uvm/uvm_extern.h>
85 
86 #include <sys/sysctl.h>
87 
88 #include <machine/db_machdep.h>
89 #include <ddb/db_sym.h>
90 #include <ddb/db_extern.h>
91 
92 #include <machine/cpu.h>
93 #include <machine/reg.h>
94 #include <machine/psl.h>
95 #include <machine/pte.h>
96 #include <machine/kcore.h>
97 #include <dev/cons.h>
98 #include <amiga/amiga/isr.h>
99 #include <amiga/amiga/custom.h>
100 #ifdef DRACO
101 #include <amiga/amiga/drcustom.h>
102 #include <m68k/include/asm_single.h>
103 #endif
104 #include <amiga/amiga/cia.h>
105 #include <amiga/amiga/cc.h>
106 #include <amiga/amiga/memlist.h>
107 
108 #include "fd.h"
109 #include "ser.h"
110 
111 /* prototypes */
112 void identifycpu(void);
113 vm_offset_t reserve_dumppages(vm_offset_t);
114 void dumpsys(void);
115 void initcpu(void);
116 void straytrap(int, u_short);
117 static void netintr(void);
118 static void call_sicallbacks(void);
119 static void _softintr_callit(void *, void *);
120 void intrhand(int);
121 #if NSER > 0
122 void ser_outintr(void);
123 #endif
124 #if NFD > 0
125 void fdintr(int);
126 #endif
127 
128 volatile unsigned int interrupt_depth = 0;
129 
130 /*
131  * patched by some devices at attach time (currently, only the coms)
132  */
133 u_int16_t amiga_serialspl = PSL_S|PSL_IPL4;
134 
135 struct vm_map *exec_map = NULL;
136 struct vm_map *mb_map = NULL;
137 struct vm_map *phys_map = NULL;
138 
139 caddr_t	msgbufaddr;
140 paddr_t msgbufpa;
141 
142 int	machineid;
143 int	maxmem;			/* max memory per process */
144 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
145 /*
146  * extender "register" for software interrupts. Moved here
147  * from locore.s, since softints are no longer dealt with
148  * in locore.s.
149  */
150 unsigned char ssir;
151 /*
152  * safepri is a safe priority for sleep to set for a spin-wait
153  * during autoconfiguration or after a panic.
154  */
155 int	safepri = PSL_LOWIPL;
156 extern  int   freebufspace;
157 extern	u_int lowram;
158 
159 /* used in init_main.c */
160 char	*cpu_type = "m68k";
161 /* the following is used externally (sysctl_hw) */
162 char	machine[] = MACHINE;	/* from <machine/param.h> */
163 
164 /* Our exported CPU info; we can have only one. */
165 struct cpu_info cpu_info_store;
166 
167 /*
168  * current open serial device speed;  used by some SCSI drivers to reduce
169  * DMA transfer lengths.
170  */
171 int	ser_open_speed;
172 
173 #ifdef DRACO
174 vaddr_t DRCCADDR;
175 
176 volatile u_int8_t *draco_intena, *draco_intpen, *draco_intfrc;
177 volatile u_int8_t *draco_misc;
178 volatile struct drioct *draco_ioct;
179 #endif
180 
181  /*
182  * Console initialization: called early on from main,
183  * before vm init or startup.  Do enough configuration
184  * to choose and initialize a console.
185  */
186 void
187 consinit()
188 {
189 	/* initialize custom chip interface */
190 #ifdef DRACO
191 	if (is_draco()) {
192 		/* XXX to be done */
193 	} else
194 #endif
195 		custom_chips_init();
196 	/*
197 	 * Initialize the console before we print anything out.
198 	 */
199 	cninit();
200 
201 #if defined (DDB)
202 	{
203 		extern int end[];
204 		extern int *esym;
205 
206 #ifndef __ELF__
207 		ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
208 #else
209 		ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
210 		    (void *)&end, esym);
211 #endif
212 	}
213         if (boothowto & RB_KDB)
214                 Debugger();
215 #endif
216 }
217 
218 /*
219  * cpu_startup: allocate memory for variable-sized tables,
220  * initialize cpu, and do autoconfiguration.
221  */
222 void
223 cpu_startup()
224 {
225 	caddr_t v;
226 	u_int i, base, residual;
227 	char pbuf[9];
228 #ifdef DEBUG
229 	extern int pmapdebug;
230 	int opmapdebug = pmapdebug;
231 #endif
232 	paddr_t minaddr, maxaddr;
233 	paddr_t size = 0;
234 
235 	/*
236 	 * Initialize error message buffer (at end of core).
237 	 */
238 #ifdef DEBUG
239 	pmapdebug = 0;
240 #endif
241 	/*
242 	 * pmap_bootstrap has positioned this at the end of kernel
243 	 * memory segment - map and initialize it now.
244 	 */
245 
246 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
247 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
248 		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
249 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
250 	pmap_update(pmap_kernel());
251 	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
252 
253 	/*
254 	 * Good {morning,afternoon,evening,night}.
255 	 */
256 	printf(version);
257 	identifycpu();
258 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
259 	printf("total memory = %s\n", pbuf);
260 
261 	/*
262 	 * Find out how much space we need, allocate it,
263 	 * and then give everything true virtual addresses.
264 	 */
265 	size = (vm_size_t)allocsys(NULL, NULL);
266 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
267 		panic("startup: no room for tables");
268 	if (allocsys(v, NULL) - v != size)
269 		panic("startup: table size inconsistency");
270 
271 	/*
272 	 * Now allocate buffers proper.  They are different than the above
273 	 * in that they usually occupy more virtual memory than physical.
274 	 */
275 	size = MAXBSIZE * nbuf;
276 	if (uvm_map(kernel_map, (vm_offset_t *)&buffers, round_page(size),
277 	    NULL, UVM_UNKNOWN_OFFSET, 0,
278 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
279 	    UVM_ADV_NORMAL, 0)) != 0)
280 		panic("startup: cannot allocate VM for buffers");
281 	minaddr = (vm_offset_t) buffers;
282 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
283 		/* don't want to alloc more physical mem than needed */
284 		bufpages = btoc(MAXBSIZE) * nbuf;
285 	}
286 	base = bufpages / nbuf;
287 	residual = bufpages % nbuf;
288 	for (i = 0; i < nbuf; i++) {
289 		vm_size_t curbufsize;
290 		vm_offset_t curbuf;
291 		struct vm_page *pg;
292 
293 		/*
294 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
295 		 * that MAXBSIZE space, we allocate and map (base+1) pages
296 		 * for the first "residual" buffers, and then we allocate
297 		 * "base" pages for the rest.
298 		 */
299 		curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
300 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
301 
302 		while (curbufsize) {
303 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
304 			if (pg == NULL)
305 				panic("cpu_startup: not enough memory for "
306 				    "buffer cache");
307 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
308 				       VM_PROT_READ|VM_PROT_WRITE);
309 			curbuf += PAGE_SIZE;
310 			curbufsize -= PAGE_SIZE;
311 		}
312 	}
313 	pmap_update(pmap_kernel());
314 
315 	/*
316 	 * Allocate a submap for exec arguments.  This map effectively
317 	 * limits the number of processes exec'ing at any time.
318 	 */
319 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
320 				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
321 
322 	/*
323 	 * Allocate a submap for physio
324 	 */
325 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
326 				   VM_PHYS_SIZE, 0, FALSE, NULL);
327 
328 	/*
329 	 * Finally, allocate mbuf cluster submap.
330 	 */
331 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
332 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
333 				 FALSE, NULL);
334 
335 #ifdef DEBUG
336 	pmapdebug = opmapdebug;
337 #endif
338 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
339 	printf("avail memory = %s\n", pbuf);
340 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
341 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
342 
343 	/*
344 	 * display memory configuration passed from loadbsd
345 	 */
346 	if (memlist->m_nseg > 0 && memlist->m_nseg < 16)
347 		for (i = 0; i < memlist->m_nseg; i++)
348 			printf("memory segment %d at %08x size %08x\n", i,
349 			    memlist->m_seg[i].ms_start,
350 			    memlist->m_seg[i].ms_size);
351 
352 #ifdef DEBUG_KERNEL_START
353 	printf("calling initcpu...\n");
354 #endif
355 	/*
356 	 * Set up CPU-specific registers, cache, etc.
357 	 */
358 	initcpu();
359 
360 #ifdef DEBUG_KERNEL_START
361 	printf("survived initcpu...\n");
362 #endif
363 
364 	/*
365 	 * Set up buffers, so they can be used to read disk labels.
366 	 */
367 	bufinit();
368 
369 #ifdef DEBUG_KERNEL_START
370 	printf("survived bufinit...\n");
371 #endif
372 }
373 
374 /*
375  * Set registers on exec.
376  */
377 void
378 setregs(p, pack, stack)
379 	register struct proc *p;
380 	struct exec_package *pack;
381 	u_long stack;
382 {
383 	struct frame *frame = (struct frame *)p->p_md.md_regs;
384 
385 	frame->f_sr = PSL_USERSET;
386 	frame->f_pc = pack->ep_entry & ~1;
387 	frame->f_regs[D0] = 0;
388 	frame->f_regs[D1] = 0;
389 	frame->f_regs[D2] = 0;
390 	frame->f_regs[D3] = 0;
391 	frame->f_regs[D4] = 0;
392 	frame->f_regs[D5] = 0;
393 	frame->f_regs[D6] = 0;
394 	frame->f_regs[D7] = 0;
395 	frame->f_regs[A0] = 0;
396 	frame->f_regs[A1] = 0;
397 	frame->f_regs[A2] = (int)p->p_psstr;
398 	frame->f_regs[A3] = 0;
399 	frame->f_regs[A4] = 0;
400 	frame->f_regs[A5] = 0;
401 	frame->f_regs[A6] = 0;
402 	frame->f_regs[SP] = stack;
403 
404 	/* restore a null state frame */
405 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
406 #ifdef FPU_EMULATE
407 	if (!fputype)
408 		bzero(&p->p_addr->u_pcb.pcb_fpregs, sizeof(struct fpframe));
409 	else
410 #endif
411 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
412 }
413 
414 /*
415  * Info for CTL_HW
416  */
417 char cpu_model[120];
418 
419 #if defined(M68060)
420 int m68060_pcr_init = 0x21;	/* make this patchable */
421 #endif
422 
423 
424 void
425 identifycpu()
426 {
427         /* there's alot of XXX in here... */
428 	char *mach, *mmu, *fpu;
429 
430 #ifdef M68060
431 	char cpubuf[16];
432 	u_int32_t pcr;
433 #endif
434 
435 #ifdef DRACO
436 	char machbuf[16];
437 
438 	if (is_draco()) {
439 		sprintf(machbuf, "DraCo rev.%d", is_draco());
440 		mach = machbuf;
441 	} else
442 #endif
443 	if (is_a4000())
444 		mach = "Amiga 4000";
445 	else if (is_a3000())
446 		mach = "Amiga 3000";
447 	else if (is_a1200())
448 		mach = "Amiga 1200";
449 	else
450 		mach = "Amiga 500/2000";
451 
452 	fpu = NULL;
453 #ifdef M68060
454 	if (machineid & AMIGA_68060) {
455 		asm(".word 0x4e7a,0x0808; movl %%d0,%0" : "=d"(pcr) : : "d0");
456 		sprintf(cpubuf, "68%s060 rev.%d",
457 		    pcr & 0x10000 ? "LC/EC" : "", (pcr>>8)&0xff);
458 		cpu_type = cpubuf;
459 		mmu = "/MMU";
460 		if (pcr & 2) {
461 			fpu = "/FPU disabled";
462 			fputype = FPU_NONE;
463 		} else if (m68060_pcr_init & 2){
464 			fpu = "/FPU will be disabled";
465 			fputype = FPU_NONE;
466 		} else  if (machineid & AMIGA_FPU40) {
467 			fpu = "/FPU";
468 			fputype = FPU_68040; /* XXX */
469 		}
470 	} else
471 #endif
472 	if (machineid & AMIGA_68040) {
473 		cpu_type = "m68040";
474 		mmu = "/MMU";
475 		fpu = "/FPU";
476 		fputype = FPU_68040; /* XXX */
477 	} else if (machineid & AMIGA_68030) {
478 		cpu_type = "m68030";	/* XXX */
479 		mmu = "/MMU";
480 	} else {
481 		cpu_type = "m68020";
482 		mmu = " m68851 MMU";
483 	}
484 	if (fpu == NULL) {
485 		if (machineid & AMIGA_68882) {
486 			fpu = " m68882 FPU";
487 			fputype = FPU_68882;
488 		} else if (machineid & AMIGA_68881) {
489 			fpu = " m68881 FPU";
490 			fputype = FPU_68881;
491 		} else {
492 			fpu = " no FPU";
493 			fputype = FPU_NONE;
494 		}
495 	}
496 	sprintf(cpu_model, "%s (%s CPU%s%s)", mach, cpu_type, mmu, fpu);
497 	printf("%s\n", cpu_model);
498 }
499 
500 /*
501  * machine dependent system variables.
502  */
503 int
504 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
505 	int *name;
506 	u_int namelen;
507 	void *oldp;
508 	size_t *oldlenp;
509 	void *newp;
510 	size_t newlen;
511 	struct proc *p;
512 {
513 	dev_t consdev;
514 
515 	/* all sysctl names at this level are terminal */
516 	if (namelen != 1)
517 		return(ENOTDIR);               /* overloaded */
518 
519 	switch (name[0]) {
520 	case CPU_CONSDEV:
521 		if (cn_tab != NULL)
522 			consdev = cn_tab->cn_dev;
523 		else
524 			consdev = NODEV;
525 		return(sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
526 		    sizeof(consdev)));
527 	default:
528 		return(EOPNOTSUPP);
529 	}
530 	/* NOTREACHED */
531 }
532 
533 static int waittime = -1;
534 
535 void
536 bootsync(void)
537 {
538 	if (waittime < 0) {
539 		waittime = 0;
540 		vfs_shutdown();
541 		/*
542 		 * If we've been adjusting the clock, the todr
543 		 * will be out of synch; adjust it now.
544 		 */
545 		resettodr();
546 	}
547 }
548 
549 
550 void
551 cpu_reboot(howto, bootstr)
552 	register int howto;
553 	char *bootstr;
554 {
555 	/* take a snap shot before clobbering any registers */
556 	if (curproc)
557 		savectx(&curproc->p_addr->u_pcb);
558 
559 	boothowto = howto;
560 	if ((howto & RB_NOSYNC) == 0)
561 		bootsync();
562 
563 	/* Disable interrupts. */
564 	spl7();
565 
566 	/* If rebooting and a dump is requested do it. */
567 	if (howto & RB_DUMP)
568 		dumpsys();
569 
570 	if (howto & RB_HALT) {
571 		printf("\n");
572 		printf("The operating system has halted.\n");
573 		printf("Please press any key to reboot.\n\n");
574 		cngetc();
575 	}
576 
577 	printf("rebooting...\n");
578 	DELAY(1000000);
579 	doboot();
580 	/*NOTREACHED*/
581 }
582 
583 
584 u_int32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
585 int	dumpsize = 0;		/* also for savecore */
586 long	dumplo = 0;
587 cpu_kcore_hdr_t cpu_kcore_hdr;
588 
589 void
590 cpu_dumpconf()
591 {
592 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
593 	struct m68k_kcore_hdr *m = &h->un._m68k;
594 	const struct bdevsw *bdev;
595 	int nblks;
596 	int i;
597 	extern u_int Sysseg_pa;
598 	extern int end[];
599 
600 	bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
601 
602 	/*
603 	 * Intitialize the `dispatcher' portion of the header.
604 	 */
605 	strcpy(h->name, machine);
606 	h->page_size = NBPG;
607 	h->kernbase = KERNBASE;
608 
609 	/*
610 	 * Fill in information about our MMU configuration.
611 	 */
612 	m->mmutype	= mmutype;
613 	m->sg_v		= SG_V;
614 	m->sg_frame	= SG_FRAME;
615 	m->sg_ishift	= SG_ISHIFT;
616 	m->sg_pmask	= SG_PMASK;
617 	m->sg40_shift1	= SG4_SHIFT1;
618 	m->sg40_mask2	= SG4_MASK2;
619 	m->sg40_shift2	= SG4_SHIFT2;
620 	m->sg40_mask3	= SG4_MASK3;
621 	m->sg40_shift3	= SG4_SHIFT3;
622 	m->sg40_addr1	= SG4_ADDR1;
623 	m->sg40_addr2	= SG4_ADDR2;
624 	m->pg_v		= PG_V;
625 	m->pg_frame	= PG_FRAME;
626 
627 	/*
628 	 * Initialize the pointer to the kernel segment table.
629 	 */
630 	m->sysseg_pa = Sysseg_pa;
631 
632 	/*
633 	 * Initialize relocation value such that:
634 	 *
635 	 *	pa = (va - KERNBASE) + reloc
636 	 */
637 	m->reloc = lowram;
638 
639 	/*
640 	 * Define the end of the relocatable range.
641 	 */
642 	m->relocend = (u_int32_t)&end;
643 
644 	/* XXX new corefile format, single segment + chipmem */
645 	dumpsize = physmem;
646 	m->ram_segs[0].start = lowram;
647 	m->ram_segs[0].size  = ctob(physmem);
648 	for (i = 0; i < memlist->m_nseg; i++) {
649 		if ((memlist->m_seg[i].ms_attrib & MEMF_CHIP) == 0)
650 			continue;
651 		dumpsize += btoc(memlist->m_seg[i].ms_size);
652 		m->ram_segs[1].start = 0;
653 		m->ram_segs[1].size  = memlist->m_seg[i].ms_size;
654 		break;
655 	}
656 	if ((bdev = bdevsw_lookup(dumpdev)) != NULL &&
657 	    bdev->d_psize != NULL) {
658 		nblks = (*bdev->d_psize)(dumpdev);
659 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
660 			dumpsize = btoc(dbtob(nblks - dumplo));
661 		else if (dumplo == 0)
662 			dumplo = nblks - btodb(ctob(dumpsize));
663 	}
664 	--dumplo;	/* XXX assume header fits in one block */
665 	/*
666 	 * Don't dump on the first NBPG (why NBPG?)
667 	 * in case the dump device includes a disk label.
668 	 */
669 	if (dumplo < btodb(NBPG))
670 		dumplo = btodb(NBPG);
671 }
672 
673 /*
674  * Doadump comes here after turning off memory management and
675  * getting on the dump stack, either when called above, or by
676  * the auto-restart code.
677  */
678 #define BYTES_PER_DUMP MAXPHYS	/* Must be a multiple of pagesize XXX small */
679 static vm_offset_t dumpspace;
680 
681 vm_offset_t
682 reserve_dumppages(p)
683 	vm_offset_t p;
684 {
685 	dumpspace = p;
686 	return (p + BYTES_PER_DUMP);
687 }
688 
689 void
690 dumpsys()
691 {
692 	unsigned bytes, i, n, seg;
693 	int     maddr, psize;
694 	daddr_t blkno;
695 	int     (*dump)(dev_t, daddr_t, caddr_t, size_t);
696 	int     error = 0;
697 	kcore_seg_t *kseg_p;
698 	cpu_kcore_hdr_t *chdr_p;
699 	char	dump_hdr[dbtob(1)];	/* XXX assume hdr fits in 1 block */
700 	const struct bdevsw *bdev;
701 
702 	if (dumpdev == NODEV)
703 		return;
704 	bdev = bdevsw_lookup(dumpdev);
705 	if (bdev == NULL || bdev->d_psize == NULL)
706 		return;
707 	/*
708 	 * For dumps during autoconfiguration,
709 	 * if dump device has already configured...
710 	 */
711 	if (dumpsize == 0)
712 		cpu_dumpconf();
713 	if (dumplo <= 0) {
714 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
715 		    minor(dumpdev));
716 		return;
717 	}
718 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
719 	    minor(dumpdev), dumplo);
720 
721 	psize = (*bdev->d_psize)(dumpdev);
722 	printf("dump ");
723 	if (psize == -1) {
724 		printf("area unavailable.\n");
725 		return;
726 	}
727 	kseg_p = (kcore_seg_t *)dump_hdr;
728 	chdr_p = (cpu_kcore_hdr_t *)&dump_hdr[ALIGN(sizeof(*kseg_p))];
729 	bzero(dump_hdr, sizeof(dump_hdr));
730 
731 	/*
732 	 * Generate a segment header
733 	 */
734 	CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
735 	kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p));
736 
737 	/*
738 	 * Add the md header
739 	 */
740 
741 	*chdr_p = cpu_kcore_hdr;
742 
743 	bytes = ctob(dumpsize);
744 	maddr = cpu_kcore_hdr.un._m68k.ram_segs[0].start;
745 	seg = 0;
746 	blkno = dumplo;
747 	dump = bdev->d_dump;
748 	error = (*dump) (dumpdev, blkno++, (caddr_t)dump_hdr, dbtob(1));
749 	for (i = 0; i < bytes && error == 0; i += n) {
750 		/* Print out how many MBs we have to go. */
751 		n = bytes - i;
752 		if (n && (n % (1024 * 1024)) == 0)
753 			printf("%d ", n / (1024 * 1024));
754 
755 		/* Limit size for next transfer. */
756 		if (n > BYTES_PER_DUMP)
757 			n = BYTES_PER_DUMP;
758 
759 		if (maddr == 0) {	/* XXX kvtop chokes on this */
760 			maddr += NBPG;
761 			n -= NBPG;
762 			i += NBPG;
763 			++blkno;	/* XXX skip physical page 0 */
764 		}
765 		(void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
766 		error = (*dump) (dumpdev, blkno, (caddr_t) dumpspace, n);
767 		if (error)
768 			break;
769 		maddr += n;
770 		blkno += btodb(n);	/* XXX? */
771 		if (maddr >= (cpu_kcore_hdr.un._m68k.ram_segs[seg].start +
772 		    cpu_kcore_hdr.un._m68k.ram_segs[seg].size)) {
773 			++seg;
774 			maddr = cpu_kcore_hdr.un._m68k.ram_segs[seg].start;
775 			if (cpu_kcore_hdr.un._m68k.ram_segs[seg].size == 0)
776 				break;
777 		}
778 	}
779 
780 	switch (error) {
781 
782 	case ENXIO:
783 		printf("device bad\n");
784 		break;
785 
786 	case EFAULT:
787 		printf("device not ready\n");
788 		break;
789 
790 	case EINVAL:
791 		printf("area improper\n");
792 		break;
793 
794 	case EIO:
795 		printf("i/o error\n");
796 		break;
797 
798 	default:
799 		printf("succeeded\n");
800 		break;
801 	}
802 	printf("\n\n");
803 	delay(5000000);		/* 5 seconds */
804 }
805 
806 /*
807  * Return the best possible estimate of the time in the timeval
808  * to which tvp points.  We do this by returning the current time
809  * plus the amount of time since the last clock interrupt (clock.c:clkread).
810  *
811  * Check that this time is no less than any previously-reported time,
812  * which could happen around the time of a clock adjustment.  Just for fun,
813  * we guarantee that the time will be greater than the value obtained by a
814  * previous call.
815  */
816 void
817 microtime(tvp)
818 	register struct timeval *tvp;
819 {
820 	int s = spl7();
821 	static struct timeval lasttime;
822 
823 	*tvp = time;
824 	tvp->tv_usec += clkread();
825 	while (tvp->tv_usec >= 1000000) {
826 		tvp->tv_sec++;
827 		tvp->tv_usec -= 1000000;
828 	}
829 	if (tvp->tv_sec == lasttime.tv_sec &&
830 	    tvp->tv_usec <= lasttime.tv_usec &&
831 	    (tvp->tv_usec = lasttime.tv_usec + 1) >= 1000000) {
832 		tvp->tv_sec++;
833 		tvp->tv_usec -= 1000000;
834 	}
835 	lasttime = *tvp;
836 	splx(s);
837 }
838 
839 void
840 initcpu()
841 {
842 	typedef void trapfun(void);
843 
844 	/* XXX should init '40 vecs here, too */
845 #if defined(M68060) || defined(M68040) || defined(DRACO) || defined(FPU_EMULATE)
846 	extern trapfun *vectab[256];
847 #endif
848 
849 #if defined(M68060) || defined(M68040)
850 	extern trapfun addrerr4060;
851 #endif
852 
853 #ifdef M68060
854 	extern trapfun buserr60;
855 #if defined(M060SP)
856 	/*extern u_int8_t I_CALL_TOP[];*/
857 	extern trapfun intemu60, fpiemu60, fpdemu60, fpeaemu60;
858 	extern u_int8_t FP_CALL_TOP[];
859 #else
860 	extern trapfun illinst;
861 #endif
862 	extern trapfun fpfault;
863 #endif
864 
865 #ifdef M68040
866 	extern trapfun buserr40;
867 #endif
868 
869 #ifdef DRACO
870 	extern trapfun DraCoIntr, DraCoLev1intr, DraCoLev2intr;
871 	u_char dracorev;
872 #endif
873 
874 #ifdef FPU_EMULATE
875 	extern trapfun fpemuli;
876 #endif
877 
878 #ifdef M68060
879 	if (machineid & AMIGA_68060) {
880 		if (machineid & AMIGA_FPU40 && m68060_pcr_init & 2) {
881 			/*
882 			 * in this case, we're about to switch the FPU off;
883 			 * do a FNOP to avoid stray FP traps later
884 			 */
885 			__asm("fnop");
886 			/* ... and mark FPU as absent for identifyfpu() */
887 			machineid &= ~(AMIGA_FPU40|AMIGA_68882|AMIGA_68881);
888 		}
889 		asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : :
890 			"d"(m68060_pcr_init):"d0" );
891 
892 		/* bus/addrerr vectors */
893 		vectab[2] = buserr60;
894 		vectab[3] = addrerr4060;
895 #if defined(M060SP)
896 
897 		/* integer support */
898 		vectab[61] = intemu60/*(trapfun *)&I_CALL_TOP[128 + 0x00]*/;
899 
900 		/* floating point support */
901 		/*
902 		 * XXX maybe we really should run-time check for the
903 		 * stack frame format here:
904 		 */
905 		vectab[11] = fpiemu60/*(trapfun *)&FP_CALL_TOP[128 + 0x30]*/;
906 
907 		vectab[55] = fpdemu60/*(trapfun *)&FP_CALL_TOP[128 + 0x38]*/;
908 		vectab[60] = fpeaemu60/*(trapfun *)&FP_CALL_TOP[128 + 0x40]*/;
909 
910 		vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00];
911 		vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08];
912 		vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10];
913 		vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18];
914 		vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20];
915 		vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28];
916 
917 #else
918 		vectab[61] = illinst;
919 #endif
920 		vectab[48] = fpfault;
921 	}
922 #endif
923 
924 /*
925  * Vector initialization for special motherboards
926  */
927 #ifdef M68040
928 #ifdef M68060
929 	else
930 #endif
931 	if (machineid & AMIGA_68040) {
932 		/* addrerr vector */
933 		vectab[2] = buserr40;
934 		vectab[3] = addrerr4060;
935 	}
936 #endif
937 
938 #ifdef FPU_EMULATE
939 	if (!(machineid & (AMIGA_68881|AMIGA_68882|AMIGA_FPU40))) {
940 		vectab[11] = fpemuli;
941 		printf("FPU software emulation initialized.\n");
942 	}
943 #endif
944 
945 /*
946  * Vector initialization for special motherboards
947  */
948 
949 #ifdef DRACO
950 	dracorev = is_draco();
951 	if (dracorev) {
952 		if (dracorev >= 4) {
953 			vectab[24+1] = DraCoLev1intr;
954 			vectab[24+2] = DraCoIntr;
955 		} else {
956 			vectab[24+1] = DraCoIntr;
957 			vectab[24+2] = DraCoLev2intr;
958 		}
959 		vectab[24+3] = DraCoIntr;
960 		vectab[24+4] = DraCoIntr;
961 		vectab[24+5] = DraCoIntr;
962 		vectab[24+6] = DraCoIntr;
963 	}
964 #endif
965 }
966 
967 void
968 straytrap(pc, evec)
969 	int pc;
970 	u_short evec;
971 {
972 	printf("unexpected trap format %x (vector offset %x) from %x\n",
973 	       evec>>12, evec & 0xFFF, pc);
974 /*XXX*/	panic("straytrap");
975 }
976 
977 int	*nofault;
978 
979 int
980 badaddr(addr)
981 	register caddr_t addr;
982 {
983 	register int i;
984 	label_t	faultbuf;
985 
986 #ifdef lint
987 	i = *addr; if (i) return(0);
988 #endif
989 	nofault = (int *) &faultbuf;
990 	if (setjmp((label_t *)nofault)) {
991 		nofault = (int *) 0;
992 		return(1);
993 	}
994 	i = *(volatile short *)addr;
995 	nofault = (int *) 0;
996 	return(0);
997 }
998 
999 int
1000 badbaddr(addr)
1001 	register caddr_t addr;
1002 {
1003 	register int i;
1004 	label_t	faultbuf;
1005 
1006 #ifdef lint
1007 	i = *addr; if (i) return(0);
1008 #endif
1009 	nofault = (int *) &faultbuf;
1010 	if (setjmp((label_t *)nofault)) {
1011 		nofault = (int *) 0;
1012 		return(1);
1013 	}
1014 	i = *(volatile char *)addr;
1015 	nofault = (int *) 0;
1016 	return(0);
1017 }
1018 
1019 static void
1020 netintr()
1021 {
1022 
1023 #define DONETISR(bit, fn) do {		\
1024 	if (netisr & (1 << bit)) {	\
1025 		netisr &= ~(1 << bit);	\
1026 		fn();			\
1027 	}				\
1028 } while (0)
1029 
1030 #include <net/netisr_dispatch.h>
1031 
1032 #undef DONETISR
1033 }
1034 
1035 
1036 /*
1037  * this is a handy package to have asynchronously executed
1038  * function calls executed at very low interrupt priority.
1039  * Example for use is keyboard repeat, where the repeat
1040  * handler running at splclock() triggers such a (hardware
1041  * aided) software interrupt.
1042  * Note: the installed functions are currently called in a
1043  * LIFO fashion, might want to change this to FIFO
1044  * later.
1045  */
1046 struct si_callback {
1047 	struct si_callback *next;
1048 	void (*function)(void *rock1, void *rock2);
1049 	void *rock1, *rock2;
1050 };
1051 static struct si_callback *si_callbacks;
1052 static struct si_callback *si_free;
1053 #ifdef DIAGNOSTIC
1054 static int ncb;		/* number of callback blocks allocated */
1055 static int ncbd;	/* number of callback blocks dynamically allocated */
1056 #endif
1057 
1058 /*
1059  * these are __GENERIC_SOFT_INTERRUPT wrappers; will be replaced
1060  * once by the real thing once all drivers are converted.
1061  *
1062  * to help performance for converted drivers, the YYY_sicallback() function
1063  * family can be implemented in terms of softintr_XXX() as an intermediate
1064  * measure.
1065  */
1066 
1067 static void
1068 _softintr_callit(rock1, rock2)
1069 	void *rock1, *rock2;
1070 {
1071 	(*(void (*)(void *))rock1)(rock2);
1072 }
1073 
1074 void *
1075 softintr_establish(ipl, func, arg)
1076 	int ipl;
1077 	void func(void *);
1078 	void *arg;
1079 {
1080 	struct si_callback *si;
1081 
1082 	(void)ipl;
1083 
1084 	si = (struct si_callback *)malloc(sizeof(*si), M_TEMP, M_NOWAIT);
1085 	if (si == NULL)
1086 		return (si);
1087 
1088 	si->function = (void *)0;
1089 	si->rock1 = (void *)func;
1090 	si->rock2 = arg;
1091 
1092 	alloc_sicallback();
1093 	return ((void *)si);
1094 }
1095 
1096 void
1097 softintr_disestablish(hook)
1098 	void *hook;
1099 {
1100 	/*
1101 	 * XXX currently, there is a memory leak here; we cant free the
1102 	 * sicallback structure.
1103 	 * this will be automatically repaired once we rewirte the soft
1104 	 * interupt functions.
1105 	 */
1106 
1107 	free(hook, M_TEMP);
1108 }
1109 
1110 void
1111 alloc_sicallback()
1112 {
1113 	struct si_callback *si;
1114 	int s;
1115 
1116 	si = (struct si_callback *)malloc(sizeof(*si), M_TEMP, M_NOWAIT);
1117 	if (si == NULL)
1118 		return;
1119 	s = splhigh();
1120 	si->next = si_free;
1121 	si_free = si;
1122 	splx(s);
1123 #ifdef DIAGNOSTIC
1124 	++ncb;
1125 #endif
1126 }
1127 
1128 void
1129 softintr_schedule(vsi)
1130 	void *vsi;
1131 {
1132 	struct si_callback *si;
1133 	si = vsi;
1134 
1135 	add_sicallback(_softintr_callit, si->rock1, si->rock2);
1136 }
1137 
1138 void
1139 add_sicallback (function, rock1, rock2)
1140 	void (*function)(void *rock1, void *rock2);
1141 	void *rock1, *rock2;
1142 {
1143 	struct si_callback *si;
1144 	int s;
1145 
1146 	/*
1147 	 * this function may be called from high-priority interrupt handlers.
1148 	 * We may NOT block for  memory-allocation in here!.
1149 	 */
1150 	s = splhigh();
1151 	si = si_free;
1152 	if (si != NULL)
1153 		si_free = si->next;
1154 	splx(s);
1155 
1156 	if (si == NULL) {
1157 		si = (struct si_callback *)malloc(sizeof(*si), M_TEMP, M_NOWAIT);
1158 #ifdef DIAGNOSTIC
1159 		if (si)
1160 			++ncbd;		/* count # dynamically allocated */
1161 #endif
1162 
1163 		if (!si)
1164 			return;
1165 	}
1166 
1167 	si->function = function;
1168 	si->rock1 = rock1;
1169 	si->rock2 = rock2;
1170 
1171 	s = splhigh();
1172 	si->next = si_callbacks;
1173 	si_callbacks = si;
1174 	splx(s);
1175 
1176 	/*
1177 	 * Cause a software interrupt (spl1). This interrupt might
1178 	 * happen immediately, or after returning to a safe enough level.
1179 	 */
1180 	setsoftcback();
1181 }
1182 
1183 
1184 void
1185 rem_sicallback(function)
1186 	void (*function)(void *rock1, void *rock2);
1187 {
1188 	struct si_callback *si, *psi, *nsi;
1189 	int s;
1190 
1191 	s = splhigh();
1192 	for (psi = 0, si = si_callbacks; si; ) {
1193 		nsi = si->next;
1194 
1195 		if (si->function != function)
1196 			psi = si;
1197 		else {
1198 /*			free(si, M_TEMP); */
1199 			si->next = si_free;
1200 			si_free = si;
1201 			if (psi)
1202 				psi->next = nsi;
1203 			else
1204 				si_callbacks = nsi;
1205 		}
1206 		si = nsi;
1207 	}
1208 	splx(s);
1209 }
1210 
1211 /* purge the list */
1212 static void
1213 call_sicallbacks()
1214 {
1215 	struct si_callback *si;
1216 	int s;
1217 	void *rock1, *rock2;
1218 	void (*function)(void *, void *);
1219 
1220 	do {
1221 		s = splhigh ();
1222 		if ((si = si_callbacks) != 0)
1223 			si_callbacks = si->next;
1224 		splx(s);
1225 
1226 		if (si) {
1227 			function = si->function;
1228 			rock1 = si->rock1;
1229 			rock2 = si->rock2;
1230 /*			si->function(si->rock1, si->rock2); */
1231 /*			free(si, M_TEMP); */
1232 			s = splhigh ();
1233 			si->next = si_free;
1234 			si_free = si;
1235 			splx(s);
1236 			function (rock1, rock2);
1237 		}
1238 	} while (si);
1239 #ifdef DIAGNOSTIC
1240 	if (ncbd) {
1241 		ncb += ncbd;
1242 		printf("call_sicallback: %d more dynamic structures %d total\n",
1243 		    ncbd, ncb);
1244 		ncbd = 0;
1245 	}
1246 #endif
1247 }
1248 
1249 struct isr *isr_ports;
1250 #ifdef DRACO
1251 struct isr *isr_slot3;
1252 struct isr *isr_supio;
1253 #endif
1254 struct isr *isr_exter;
1255 
1256 void
1257 add_isr(isr)
1258 	struct isr *isr;
1259 {
1260 	struct isr **p, *q;
1261 
1262 #ifdef DRACO
1263 	switch (isr->isr_ipl) {
1264 	case 2:
1265 		p = &isr_ports;
1266 		break;
1267 	case 3:
1268 		p = &isr_slot3;
1269 		break;
1270 	case 5:
1271 		p = &isr_supio;
1272 		break;
1273 	default:	/* was case 6:; make gcc -Wall quiet */
1274 		p = &isr_exter;
1275 		break;
1276 	}
1277 #else
1278 	p = isr->isr_ipl == 2 ? &isr_ports : &isr_exter;
1279 #endif
1280 	while ((q = *p) != NULL)
1281 		p = &q->isr_forw;
1282 	isr->isr_forw = NULL;
1283 	*p = isr;
1284 	/* enable interrupt */
1285 #ifdef DRACO
1286 	if (is_draco())
1287 		switch(isr->isr_ipl) {
1288 			case 6:
1289 				single_inst_bset_b(*draco_intena, DRIRQ_INT6);
1290 				break;
1291 			case 2:
1292 				single_inst_bset_b(*draco_intena, DRIRQ_INT2);
1293 				break;
1294 			default:
1295 				break;
1296 		}
1297 	else
1298 #endif
1299 		custom.intena = isr->isr_ipl == 2 ?
1300 		    INTF_SETCLR | INTF_PORTS :
1301 		    INTF_SETCLR | INTF_EXTER;
1302 }
1303 
1304 void
1305 remove_isr(isr)
1306 	struct isr *isr;
1307 {
1308 	struct isr **p, *q;
1309 
1310 #ifdef DRACO
1311 	switch (isr->isr_ipl) {
1312 	case 2:
1313 		p = &isr_ports;
1314 		break;
1315 	case 3:
1316 		p = &isr_slot3;
1317 		break;
1318 	case 5:
1319 		p = &isr_supio;
1320 		break;
1321 	default:	/* XXX to make gcc -Wall quiet, was 6: */
1322 		p = &isr_exter;
1323 		break;
1324 	}
1325 #else
1326 	p = isr->isr_ipl == 6 ? &isr_exter : &isr_ports;
1327 #endif
1328 
1329 	while ((q = *p) != NULL && q != isr)
1330 		p = &q->isr_forw;
1331 	if (q)
1332 		*p = q->isr_forw;
1333 	else
1334 		panic("remove_isr: handler not registered");
1335 	/* disable interrupt if no more handlers */
1336 #ifdef DRACO
1337 	switch (isr->isr_ipl) {
1338 	case 2:
1339 		p = &isr_ports;
1340 		break;
1341 	case 3:
1342 		p = &isr_slot3;
1343 		break;
1344 	case 5:
1345 		p = &isr_supio;
1346 		break;
1347 	case 6:
1348 		p = &isr_exter;
1349 		break;
1350 	}
1351 #else
1352 	p = isr->isr_ipl == 6 ? &isr_exter : &isr_ports;
1353 #endif
1354 	if (*p == NULL) {
1355 #ifdef DRACO
1356 		if (is_draco()) {
1357 			switch(isr->isr_ipl) {
1358 				case 2:
1359 					single_inst_bclr_b(*draco_intena,
1360 					    DRIRQ_INT2);
1361 					break;
1362 				case 6:
1363 					single_inst_bclr_b(*draco_intena,
1364 					    DRIRQ_INT6);
1365 					break;
1366 				default:
1367 					break;
1368 			}
1369 		} else
1370 #endif
1371 			custom.intena = isr->isr_ipl == 6 ?
1372 			    INTF_EXTER : INTF_PORTS;
1373 	}
1374 }
1375 
1376 void
1377 intrhand(sr)
1378 	int sr;
1379 {
1380 	register unsigned int ipl;
1381 	register unsigned short ireq;
1382 	register struct isr **p, *q;
1383 
1384 	ipl = (sr >> 8) & 7;
1385 #ifdef REALLYDEBUG
1386 	printf("intrhand: got int. %d\n", ipl);
1387 #endif
1388 #ifdef DRACO
1389 	if (is_draco())
1390 		ireq = ((ipl == 1)  && (*draco_intfrc & DRIRQ_SOFT) ?
1391 		    INTF_SOFTINT : 0);
1392 	else
1393 #endif
1394 		ireq = custom.intreqr;
1395 
1396 	switch (ipl) {
1397 	case 1:
1398 #ifdef DRACO
1399 		if (is_draco() && (draco_ioct->io_status & DRSTAT_KBDRECV))
1400 			drkbdintr();
1401 #endif
1402 		if (ireq & INTF_TBE) {
1403 #if NSER > 0
1404 			ser_outintr();
1405 #else
1406 			custom.intreq = INTF_TBE;
1407 #endif
1408 		}
1409 
1410 		if (ireq & INTF_DSKBLK) {
1411 #if NFD > 0
1412 			fdintr(0);
1413 #endif
1414 			custom.intreq = INTF_DSKBLK;
1415 		}
1416 		if (ireq & INTF_SOFTINT) {
1417 			unsigned char ssir_active;
1418 			int s;
1419 
1420 			/*
1421 			 * first clear the softint-bit
1422 			 * then process all classes of softints.
1423 			 * this order is dicated by the nature of
1424 			 * software interrupts.  The other order
1425 			 * allows software interrupts to be missed.
1426 			 * Also copy and clear ssir to prevent
1427 			 * interrupt loss.
1428 			 */
1429 			clrsoftint();
1430 			s = splhigh();
1431 			ssir_active = ssir;
1432 			siroff(SIR_NET | SIR_CBACK);
1433 			splx(s);
1434 			if (ssir_active & SIR_NET) {
1435 #ifdef REALLYDEBUG
1436 				printf("calling netintr\n");
1437 #endif
1438 				uvmexp.softs++;
1439 				netintr();
1440 			}
1441 			if (ssir_active & SIR_CBACK) {
1442 #ifdef REALLYDEBUG
1443 				printf("calling softcallbacks\n");
1444 #endif
1445 				uvmexp.softs++;
1446 				call_sicallbacks();
1447 			}
1448 		}
1449 		break;
1450 
1451 	case 2:
1452 		p = &isr_ports;
1453 		while ((q = *p) != NULL) {
1454 			if ((q->isr_intr)(q->isr_arg))
1455 				break;
1456 			p = &q->isr_forw;
1457 		}
1458 		if (q == NULL)
1459 			ciaa_intr ();
1460 #ifdef DRACO
1461 		if (is_draco())
1462 			single_inst_bclr_b(*draco_intpen, DRIRQ_INT2);
1463 		else
1464 #endif
1465 			custom.intreq = INTF_PORTS;
1466 
1467 		break;
1468 
1469 #ifdef DRACO
1470 	/* only handled here for DraCo */
1471 	case 6:
1472 		p = &isr_exter;
1473 		while ((q = *p) != NULL) {
1474 			if ((q->isr_intr)(q->isr_arg))
1475 				break;
1476 			p = &q->isr_forw;
1477 		}
1478 		single_inst_bclr_b(*draco_intpen, DRIRQ_INT6);
1479 		break;
1480 #endif
1481 
1482 	case 3:
1483 	/* VBL */
1484 		if (ireq & INTF_BLIT)
1485 			blitter_handler();
1486 		if (ireq & INTF_COPER)
1487 			copper_handler();
1488 		if (ireq & INTF_VERTB)
1489 			vbl_handler();
1490 		break;
1491 #ifdef DRACO
1492 	case 5:
1493 		p = &isr_supio;
1494 		while ((q = *p) != NULL) {
1495 			if ((q->isr_intr)(q->isr_arg))
1496 				break;
1497 			p = &q->isr_forw;
1498 		}
1499 		break;
1500 #endif
1501 #if 0
1502 /* now dealt with in locore.s for speed reasons */
1503 	case 5:
1504 		/* check RS232 RBF */
1505 		serintr (0);
1506 
1507 		custom.intreq = INTF_DSKSYNC;
1508 		break;
1509 #endif
1510 
1511 	case 4:
1512 #ifdef DRACO
1513 #include "drsc.h"
1514 		if (is_draco())
1515 #if NDRSC > 0
1516 			drsc_handler();
1517 #else
1518 			single_inst_bclr_b(*draco_intpen, DRIRQ_SCSI);
1519 #endif
1520 		else
1521 #endif
1522 		audio_handler();
1523 		break;
1524 	default:
1525 		printf("intrhand: unexpected sr 0x%x, intreq = 0x%x\n",
1526 		    sr, ireq);
1527 		break;
1528 	}
1529 #ifdef REALLYDEBUG
1530 	printf("intrhand: leaving.\n");
1531 #endif
1532 }
1533 
1534 #if defined(DEBUG) && !defined(PANICBUTTON)
1535 #define PANICBUTTON
1536 #endif
1537 
1538 #ifdef PANICBUTTON
1539 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
1540 int crashandburn = 0;
1541 int candbdelay = 50;	/* give em half a second */
1542 void candbtimer(void);
1543 struct callout candbtimer_ch = CALLOUT_INITIALIZER;
1544 
1545 void
1546 candbtimer()
1547 {
1548 	crashandburn = 0;
1549 }
1550 #endif
1551 
1552 #if 0
1553 /*
1554  * Level 7 interrupts can be caused by the keyboard or parity errors.
1555  */
1556 nmihand(frame)
1557 	struct frame frame;
1558 {
1559 	if (kbdnmi()) {
1560 #ifdef PANICBUTTON
1561 		static int innmihand = 0;
1562 
1563 		/*
1564 		 * Attempt to reduce the window of vulnerability for recursive
1565 		 * NMIs (e.g. someone holding down the keyboard reset button).
1566 		 */
1567 		if (innmihand == 0) {
1568 			innmihand = 1;
1569 			printf("Got a keyboard NMI\n");
1570 			innmihand = 0;
1571 		}
1572 		if (panicbutton) {
1573 			if (crashandburn) {
1574 				crashandburn = 0;
1575 				panic(panicstr ?
1576 				      "forced crash, nosync" : "forced crash");
1577 			}
1578 			crashandburn++;
1579 			callout_reset(&candbtimer_ch, candbdelay,
1580 			    candbtimer, NULL);
1581 		}
1582 #endif
1583 		return;
1584 	}
1585 	if (parityerror(&frame))
1586 		return;
1587 	/* panic?? */
1588 	printf("unexpected level 7 interrupt ignored\n");
1589 }
1590 #endif
1591 
1592 /*
1593  * should only get here, if no standard executable. This can currently
1594  * only mean, we're reading an old ZMAGIC file without MID, but since Amiga
1595  * ZMAGIC always worked the `right' way (;-)) just ignore the missing
1596  * MID and proceed to new zmagic code ;-)
1597  */
1598 int
1599 cpu_exec_aout_makecmds(p, epp)
1600 	struct proc *p;
1601 	struct exec_package *epp;
1602 {
1603 	int error = ENOEXEC;
1604 #ifdef COMPAT_NOMID
1605 	struct exec *execp = epp->ep_hdr;
1606 #endif
1607 
1608 #ifdef COMPAT_NOMID
1609 	if (!((execp->a_midmag >> 16) & 0x0fff)
1610 	    && execp->a_midmag == ZMAGIC)
1611 		return(exec_aout_prep_zmagic(p, epp));
1612 #endif
1613 	return(error);
1614 }
1615 
1616 #ifdef LKM
1617 
1618 int _spllkm6(void);
1619 int _spllkm7(void);
1620 
1621 #ifdef LEV6_DEFER
1622 int _spllkm6() {
1623 	return spl4();
1624 };
1625 
1626 int _spllkm7() {
1627 	return spl4();
1628 };
1629 
1630 #else
1631 
1632 int _spllkm6() {
1633 	return spl6();
1634 };
1635 
1636 int _spllkm7() {
1637 	return spl7();
1638 };
1639 
1640 #endif
1641 
1642 #endif
1643