xref: /netbsd/sys/arch/amiga/amiga/machdep.c (revision bf9ec67e)
1 /*	$NetBSD: machdep.c,v 1.172 2002/05/14 00:08:21 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.63 91/04/24$
41  *
42  *	@(#)machdep.c	7.16 (Berkeley) 6/3/91
43  */
44 
45 #include "opt_ddb.h"
46 #include "opt_compat_netbsd.h"
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.172 2002/05/14 00:08:21 matt Exp $");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/callout.h>
54 #include <sys/signalvar.h>
55 #include <sys/kernel.h>
56 #include <sys/map.h>
57 #include <sys/proc.h>
58 #include <sys/buf.h>
59 #include <sys/reboot.h>
60 #include <sys/conf.h>
61 #include <sys/file.h>
62 #include <sys/clist.h>
63 #include <sys/malloc.h>
64 #include <sys/mbuf.h>
65 #include <sys/msgbuf.h>
66 #include <sys/user.h>
67 #include <sys/vnode.h>
68 #include <sys/device.h>
69 #include <sys/queue.h>
70 #include <sys/mount.h>
71 #include <sys/syscallargs.h>
72 #include <sys/core.h>
73 #include <sys/kcore.h>
74 
75 #include <sys/exec.h>
76 
77 #if defined(DDB) && defined(__ELF__)
78 #include <sys/exec_elf.h>
79 #endif
80 
81 #include <sys/exec_aout.h>
82 
83 #include <net/netisr.h>
84 #undef PS	/* XXX netccitt/pk.h conflict with machine/reg.h? */
85 
86 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
87 #include <uvm/uvm_extern.h>
88 
89 #include <sys/sysctl.h>
90 
91 #include <machine/db_machdep.h>
92 #include <ddb/db_sym.h>
93 #include <ddb/db_extern.h>
94 
95 #include <machine/cpu.h>
96 #include <machine/reg.h>
97 #include <machine/psl.h>
98 #include <machine/pte.h>
99 #include <machine/kcore.h>
100 #include <dev/cons.h>
101 #include <amiga/amiga/isr.h>
102 #include <amiga/amiga/custom.h>
103 #ifdef DRACO
104 #include <amiga/amiga/drcustom.h>
105 #include <m68k/include/asm_single.h>
106 #endif
107 #include <amiga/amiga/cia.h>
108 #include <amiga/amiga/cc.h>
109 #include <amiga/amiga/memlist.h>
110 
111 #include "fd.h"
112 #include "ser.h"
113 
114 /* prototypes */
115 void identifycpu(void);
116 vm_offset_t reserve_dumppages(vm_offset_t);
117 void dumpsys(void);
118 void initcpu(void);
119 void straytrap(int, u_short);
120 static void netintr(void);
121 static void call_sicallbacks(void);
122 static void _softintr_callit(void *, void *);
123 void intrhand(int);
124 #if NSER > 0
125 void ser_outintr(void);
126 #endif
127 #if NFD > 0
128 void fdintr(int);
129 #endif
130 
131 volatile unsigned int interrupt_depth = 0;
132 
133 /*
134  * patched by some devices at attach time (currently, only the coms)
135  */
136 u_int16_t amiga_serialspl = PSL_S|PSL_IPL4;
137 
138 struct vm_map *exec_map = NULL;
139 struct vm_map *mb_map = NULL;
140 struct vm_map *phys_map = NULL;
141 
142 caddr_t	msgbufaddr;
143 paddr_t msgbufpa;
144 
145 int	machineid;
146 int	maxmem;			/* max memory per process */
147 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
148 /*
149  * extender "register" for software interrupts. Moved here
150  * from locore.s, since softints are no longer dealt with
151  * in locore.s.
152  */
153 unsigned char ssir;
154 /*
155  * safepri is a safe priority for sleep to set for a spin-wait
156  * during autoconfiguration or after a panic.
157  */
158 int	safepri = PSL_LOWIPL;
159 extern  int   freebufspace;
160 extern	u_int lowram;
161 
162 /* used in init_main.c */
163 char	*cpu_type = "m68k";
164 /* the following is used externally (sysctl_hw) */
165 char	machine[] = MACHINE;	/* from <machine/param.h> */
166 
167 /* Our exported CPU info; we can have only one. */
168 struct cpu_info cpu_info_store;
169 
170 /*
171  * current open serial device speed;  used by some SCSI drivers to reduce
172  * DMA transfer lengths.
173  */
174 int	ser_open_speed;
175 
176 #ifdef DRACO
177 vaddr_t DRCCADDR;
178 
179 volatile u_int8_t *draco_intena, *draco_intpen, *draco_intfrc;
180 volatile u_int8_t *draco_misc;
181 volatile struct drioct *draco_ioct;
182 #endif
183 
184  /*
185  * Console initialization: called early on from main,
186  * before vm init or startup.  Do enough configuration
187  * to choose and initialize a console.
188  */
189 void
190 consinit()
191 {
192 	/* initialize custom chip interface */
193 #ifdef DRACO
194 	if (is_draco()) {
195 		/* XXX to be done */
196 	} else
197 #endif
198 		custom_chips_init();
199 	/*
200 	 * Initialize the console before we print anything out.
201 	 */
202 	cninit();
203 
204 #if defined (DDB)
205 	{
206 		extern int end[];
207 		extern int *esym;
208 
209 #ifndef __ELF__
210 		ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
211 #else
212 		ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
213 		    (void *)&end, esym);
214 #endif
215 	}
216         if (boothowto & RB_KDB)
217                 Debugger();
218 #endif
219 }
220 
221 /*
222  * cpu_startup: allocate memory for variable-sized tables,
223  * initialize cpu, and do autoconfiguration.
224  */
225 void
226 cpu_startup()
227 {
228 	register unsigned i;
229 	caddr_t v;
230 	int base, residual;
231 	char pbuf[9];
232 #ifdef DEBUG
233 	extern int pmapdebug;
234 	int opmapdebug = pmapdebug;
235 #endif
236 	paddr_t minaddr, maxaddr;
237 	paddr_t size = 0;
238 
239 	/*
240 	 * Initialize error message buffer (at end of core).
241 	 */
242 #ifdef DEBUG
243 	pmapdebug = 0;
244 #endif
245 	/*
246 	 * pmap_bootstrap has positioned this at the end of kernel
247 	 * memory segment - map and initialize it now.
248 	 */
249 
250 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
251 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
252 		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
253 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
254 	pmap_update(pmap_kernel());
255 	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
256 
257 	/*
258 	 * Good {morning,afternoon,evening,night}.
259 	 */
260 	printf(version);
261 	identifycpu();
262 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
263 	printf("total memory = %s\n", pbuf);
264 
265 	/*
266 	 * Find out how much space we need, allocate it,
267 	 * and then give everything true virtual addresses.
268 	 */
269 	size = (vm_size_t)allocsys(NULL, NULL);
270 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
271 		panic("startup: no room for tables");
272 	if (allocsys(v, NULL) - v != size)
273 		panic("startup: table size inconsistency");
274 
275 	/*
276 	 * Now allocate buffers proper.  They are different than the above
277 	 * in that they usually occupy more virtual memory than physical.
278 	 */
279 	size = MAXBSIZE * nbuf;
280 	if (uvm_map(kernel_map, (vm_offset_t *)&buffers, round_page(size),
281 	    NULL, UVM_UNKNOWN_OFFSET, 0,
282 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
283 	    UVM_ADV_NORMAL, 0)) != 0)
284 		panic("startup: cannot allocate VM for buffers");
285 	minaddr = (vm_offset_t) buffers;
286 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
287 		/* don't want to alloc more physical mem than needed */
288 		bufpages = btoc(MAXBSIZE) * nbuf;
289 	}
290 	base = bufpages / nbuf;
291 	residual = bufpages % nbuf;
292 	for (i = 0; i < nbuf; i++) {
293 		vm_size_t curbufsize;
294 		vm_offset_t curbuf;
295 		struct vm_page *pg;
296 
297 		/*
298 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
299 		 * that MAXBSIZE space, we allocate and map (base+1) pages
300 		 * for the first "residual" buffers, and then we allocate
301 		 * "base" pages for the rest.
302 		 */
303 		curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
304 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
305 
306 		while (curbufsize) {
307 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
308 			if (pg == NULL)
309 				panic("cpu_startup: not enough memory for "
310 				    "buffer cache");
311 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
312 				       VM_PROT_READ|VM_PROT_WRITE);
313 			curbuf += PAGE_SIZE;
314 			curbufsize -= PAGE_SIZE;
315 		}
316 	}
317 	pmap_update(pmap_kernel());
318 
319 	/*
320 	 * Allocate a submap for exec arguments.  This map effectively
321 	 * limits the number of processes exec'ing at any time.
322 	 */
323 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
324 				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
325 
326 	/*
327 	 * Allocate a submap for physio
328 	 */
329 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
330 				   VM_PHYS_SIZE, 0, FALSE, NULL);
331 
332 	/*
333 	 * Finally, allocate mbuf cluster submap.
334 	 */
335 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
336 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
337 				 FALSE, NULL);
338 
339 #ifdef DEBUG
340 	pmapdebug = opmapdebug;
341 #endif
342 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
343 	printf("avail memory = %s\n", pbuf);
344 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
345 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
346 
347 	/*
348 	 * display memory configuration passed from loadbsd
349 	 */
350 	if (memlist->m_nseg > 0 && memlist->m_nseg < 16)
351 		for (i = 0; i < memlist->m_nseg; i++)
352 			printf("memory segment %d at %08x size %08x\n", i,
353 			    memlist->m_seg[i].ms_start,
354 			    memlist->m_seg[i].ms_size);
355 
356 #ifdef DEBUG_KERNEL_START
357 	printf("calling initcpu...\n");
358 #endif
359 	/*
360 	 * Set up CPU-specific registers, cache, etc.
361 	 */
362 	initcpu();
363 
364 #ifdef DEBUG_KERNEL_START
365 	printf("survived initcpu...\n");
366 #endif
367 
368 	/*
369 	 * Set up buffers, so they can be used to read disk labels.
370 	 */
371 	bufinit();
372 
373 #ifdef DEBUG_KERNEL_START
374 	printf("survived bufinit...\n");
375 #endif
376 }
377 
378 /*
379  * Set registers on exec.
380  */
381 void
382 setregs(p, pack, stack)
383 	register struct proc *p;
384 	struct exec_package *pack;
385 	u_long stack;
386 {
387 	struct frame *frame = (struct frame *)p->p_md.md_regs;
388 
389 	frame->f_sr = PSL_USERSET;
390 	frame->f_pc = pack->ep_entry & ~1;
391 	frame->f_regs[D0] = 0;
392 	frame->f_regs[D1] = 0;
393 	frame->f_regs[D2] = 0;
394 	frame->f_regs[D3] = 0;
395 	frame->f_regs[D4] = 0;
396 	frame->f_regs[D5] = 0;
397 	frame->f_regs[D6] = 0;
398 	frame->f_regs[D7] = 0;
399 	frame->f_regs[A0] = 0;
400 	frame->f_regs[A1] = 0;
401 	frame->f_regs[A2] = (int)p->p_psstr;
402 	frame->f_regs[A3] = 0;
403 	frame->f_regs[A4] = 0;
404 	frame->f_regs[A5] = 0;
405 	frame->f_regs[A6] = 0;
406 	frame->f_regs[SP] = stack;
407 
408 	/* restore a null state frame */
409 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
410 #ifdef FPU_EMULATE
411 	if (!fputype)
412 		bzero(&p->p_addr->u_pcb.pcb_fpregs, sizeof(struct fpframe));
413 	else
414 #endif
415 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
416 }
417 
418 /*
419  * Info for CTL_HW
420  */
421 char cpu_model[120];
422 
423 #if defined(M68060)
424 int m68060_pcr_init = 0x21;	/* make this patchable */
425 #endif
426 
427 
428 void
429 identifycpu()
430 {
431         /* there's alot of XXX in here... */
432 	char *mach, *mmu, *fpu;
433 
434 #ifdef M68060
435 	char cpubuf[16];
436 	u_int32_t pcr;
437 #endif
438 
439 #ifdef DRACO
440 	char machbuf[16];
441 
442 	if (is_draco()) {
443 		sprintf(machbuf, "DraCo rev.%d", is_draco());
444 		mach = machbuf;
445 	} else
446 #endif
447 	if (is_a4000())
448 		mach = "Amiga 4000";
449 	else if (is_a3000())
450 		mach = "Amiga 3000";
451 	else if (is_a1200())
452 		mach = "Amiga 1200";
453 	else
454 		mach = "Amiga 500/2000";
455 
456 	fpu = NULL;
457 #ifdef M68060
458 	if (machineid & AMIGA_68060) {
459 		asm(".word 0x4e7a,0x0808; movl %%d0,%0" : "=d"(pcr) : : "d0");
460 		sprintf(cpubuf, "68%s060 rev.%d",
461 		    pcr & 0x10000 ? "LC/EC" : "", (pcr>>8)&0xff);
462 		cpu_type = cpubuf;
463 		mmu = "/MMU";
464 		if (pcr & 2) {
465 			fpu = "/FPU disabled";
466 			fputype = FPU_NONE;
467 		} else if (m68060_pcr_init & 2){
468 			fpu = "/FPU will be disabled";
469 			fputype = FPU_NONE;
470 		} else  if (machineid & AMIGA_FPU40) {
471 			fpu = "/FPU";
472 			fputype = FPU_68040; /* XXX */
473 		}
474 	} else
475 #endif
476 	if (machineid & AMIGA_68040) {
477 		cpu_type = "m68040";
478 		mmu = "/MMU";
479 		fpu = "/FPU";
480 		fputype = FPU_68040; /* XXX */
481 	} else if (machineid & AMIGA_68030) {
482 		cpu_type = "m68030";	/* XXX */
483 		mmu = "/MMU";
484 	} else {
485 		cpu_type = "m68020";
486 		mmu = " m68851 MMU";
487 	}
488 	if (fpu == NULL) {
489 		if (machineid & AMIGA_68882) {
490 			fpu = " m68882 FPU";
491 			fputype = FPU_68882;
492 		} else if (machineid & AMIGA_68881) {
493 			fpu = " m68881 FPU";
494 			fputype = FPU_68881;
495 		} else {
496 			fpu = " no FPU";
497 			fputype = FPU_NONE;
498 		}
499 	}
500 	sprintf(cpu_model, "%s (%s CPU%s%s)", mach, cpu_type, mmu, fpu);
501 	printf("%s\n", cpu_model);
502 }
503 
504 /*
505  * machine dependent system variables.
506  */
507 int
508 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
509 	int *name;
510 	u_int namelen;
511 	void *oldp;
512 	size_t *oldlenp;
513 	void *newp;
514 	size_t newlen;
515 	struct proc *p;
516 {
517 	dev_t consdev;
518 
519 	/* all sysctl names at this level are terminal */
520 	if (namelen != 1)
521 		return(ENOTDIR);               /* overloaded */
522 
523 	switch (name[0]) {
524 	case CPU_CONSDEV:
525 		if (cn_tab != NULL)
526 			consdev = cn_tab->cn_dev;
527 		else
528 			consdev = NODEV;
529 		return(sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
530 		    sizeof(consdev)));
531 	default:
532 		return(EOPNOTSUPP);
533 	}
534 	/* NOTREACHED */
535 }
536 
537 static int waittime = -1;
538 
539 void
540 bootsync(void)
541 {
542 	if (waittime < 0) {
543 		waittime = 0;
544 		vfs_shutdown();
545 		/*
546 		 * If we've been adjusting the clock, the todr
547 		 * will be out of synch; adjust it now.
548 		 */
549 		resettodr();
550 	}
551 }
552 
553 
554 void
555 cpu_reboot(howto, bootstr)
556 	register int howto;
557 	char *bootstr;
558 {
559 	/* take a snap shot before clobbering any registers */
560 	if (curproc)
561 		savectx(&curproc->p_addr->u_pcb);
562 
563 	boothowto = howto;
564 	if ((howto & RB_NOSYNC) == 0)
565 		bootsync();
566 
567 	/* Disable interrupts. */
568 	spl7();
569 
570 	/* If rebooting and a dump is requested do it. */
571 	if (howto & RB_DUMP)
572 		dumpsys();
573 
574 	if (howto & RB_HALT) {
575 		printf("\n");
576 		printf("The operating system has halted.\n");
577 		printf("Please press any key to reboot.\n\n");
578 		cngetc();
579 	}
580 
581 	printf("rebooting...\n");
582 	DELAY(1000000);
583 	doboot();
584 	/*NOTREACHED*/
585 }
586 
587 
588 u_int32_t dumpmag = 0x8fca0101;	/* magic number for savecore */
589 int	dumpsize = 0;		/* also for savecore */
590 long	dumplo = 0;
591 cpu_kcore_hdr_t cpu_kcore_hdr;
592 
593 void
594 cpu_dumpconf()
595 {
596 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
597 	struct m68k_kcore_hdr *m = &h->un._m68k;
598 	int nblks;
599 	int i;
600 	extern u_int Sysseg_pa;
601 	extern int end[];
602 
603 	bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
604 
605 	/*
606 	 * Intitialize the `dispatcher' portion of the header.
607 	 */
608 	strcpy(h->name, machine);
609 	h->page_size = NBPG;
610 	h->kernbase = KERNBASE;
611 
612 	/*
613 	 * Fill in information about our MMU configuration.
614 	 */
615 	m->mmutype	= mmutype;
616 	m->sg_v		= SG_V;
617 	m->sg_frame	= SG_FRAME;
618 	m->sg_ishift	= SG_ISHIFT;
619 	m->sg_pmask	= SG_PMASK;
620 	m->sg40_shift1	= SG4_SHIFT1;
621 	m->sg40_mask2	= SG4_MASK2;
622 	m->sg40_shift2	= SG4_SHIFT2;
623 	m->sg40_mask3	= SG4_MASK3;
624 	m->sg40_shift3	= SG4_SHIFT3;
625 	m->sg40_addr1	= SG4_ADDR1;
626 	m->sg40_addr2	= SG4_ADDR2;
627 	m->pg_v		= PG_V;
628 	m->pg_frame	= PG_FRAME;
629 
630 	/*
631 	 * Initialize the pointer to the kernel segment table.
632 	 */
633 	m->sysseg_pa = Sysseg_pa;
634 
635 	/*
636 	 * Initialize relocation value such that:
637 	 *
638 	 *	pa = (va - KERNBASE) + reloc
639 	 */
640 	m->reloc = lowram;
641 
642 	/*
643 	 * Define the end of the relocatable range.
644 	 */
645 	m->relocend = (u_int32_t)&end;
646 
647 	/* XXX new corefile format, single segment + chipmem */
648 	dumpsize = physmem;
649 	m->ram_segs[0].start = lowram;
650 	m->ram_segs[0].size  = ctob(physmem);
651 	for (i = 0; i < memlist->m_nseg; i++) {
652 		if ((memlist->m_seg[i].ms_attrib & MEMF_CHIP) == 0)
653 			continue;
654 		dumpsize += btoc(memlist->m_seg[i].ms_size);
655 		m->ram_segs[1].start = 0;
656 		m->ram_segs[1].size  = memlist->m_seg[i].ms_size;
657 		break;
658 	}
659 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
660 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
661 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
662 			dumpsize = btoc(dbtob(nblks - dumplo));
663 		else if (dumplo == 0)
664 			dumplo = nblks - btodb(ctob(dumpsize));
665 	}
666 	--dumplo;	/* XXX assume header fits in one block */
667 	/*
668 	 * Don't dump on the first NBPG (why NBPG?)
669 	 * in case the dump device includes a disk label.
670 	 */
671 	if (dumplo < btodb(NBPG))
672 		dumplo = btodb(NBPG);
673 }
674 
675 /*
676  * Doadump comes here after turning off memory management and
677  * getting on the dump stack, either when called above, or by
678  * the auto-restart code.
679  */
680 #define BYTES_PER_DUMP MAXPHYS	/* Must be a multiple of pagesize XXX small */
681 static vm_offset_t dumpspace;
682 
683 vm_offset_t
684 reserve_dumppages(p)
685 	vm_offset_t p;
686 {
687 	dumpspace = p;
688 	return (p + BYTES_PER_DUMP);
689 }
690 
691 void
692 dumpsys()
693 {
694 	unsigned bytes, i, n, seg;
695 	int     maddr, psize;
696 	daddr_t blkno;
697 	int     (*dump)(dev_t, daddr_t, caddr_t, size_t);
698 	int     error = 0;
699 	kcore_seg_t *kseg_p;
700 	cpu_kcore_hdr_t *chdr_p;
701 	char	dump_hdr[dbtob(1)];	/* XXX assume hdr fits in 1 block */
702 
703 	if (dumpdev == NODEV)
704 		return;
705 	/*
706 	 * For dumps during autoconfiguration,
707 	 * if dump device has already configured...
708 	 */
709 	if (dumpsize == 0)
710 		cpu_dumpconf();
711 	if (dumplo <= 0) {
712 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
713 		    minor(dumpdev));
714 		return;
715 	}
716 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
717 	    minor(dumpdev), dumplo);
718 
719 	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
720 	printf("dump ");
721 	if (psize == -1) {
722 		printf("area unavailable.\n");
723 		return;
724 	}
725 	kseg_p = (kcore_seg_t *)dump_hdr;
726 	chdr_p = (cpu_kcore_hdr_t *)&dump_hdr[ALIGN(sizeof(*kseg_p))];
727 	bzero(dump_hdr, sizeof(dump_hdr));
728 
729 	/*
730 	 * Generate a segment header
731 	 */
732 	CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
733 	kseg_p->c_size = dbtob(1) - ALIGN(sizeof(*kseg_p));
734 
735 	/*
736 	 * Add the md header
737 	 */
738 
739 	*chdr_p = cpu_kcore_hdr;
740 
741 	bytes = ctob(dumpsize);
742 	maddr = cpu_kcore_hdr.un._m68k.ram_segs[0].start;
743 	seg = 0;
744 	blkno = dumplo;
745 	dump = bdevsw[major(dumpdev)].d_dump;
746 	error = (*dump) (dumpdev, blkno++, (caddr_t)dump_hdr, dbtob(1));
747 	for (i = 0; i < bytes && error == 0; i += n) {
748 		/* Print out how many MBs we have to go. */
749 		n = bytes - i;
750 		if (n && (n % (1024 * 1024)) == 0)
751 			printf("%d ", n / (1024 * 1024));
752 
753 		/* Limit size for next transfer. */
754 		if (n > BYTES_PER_DUMP)
755 			n = BYTES_PER_DUMP;
756 
757 		if (maddr == 0) {	/* XXX kvtop chokes on this */
758 			maddr += NBPG;
759 			n -= NBPG;
760 			i += NBPG;
761 			++blkno;	/* XXX skip physical page 0 */
762 		}
763 		(void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
764 		error = (*dump) (dumpdev, blkno, (caddr_t) dumpspace, n);
765 		if (error)
766 			break;
767 		maddr += n;
768 		blkno += btodb(n);	/* XXX? */
769 		if (maddr >= (cpu_kcore_hdr.un._m68k.ram_segs[seg].start +
770 		    cpu_kcore_hdr.un._m68k.ram_segs[seg].size)) {
771 			++seg;
772 			maddr = cpu_kcore_hdr.un._m68k.ram_segs[seg].start;
773 			if (cpu_kcore_hdr.un._m68k.ram_segs[seg].size == 0)
774 				break;
775 		}
776 	}
777 
778 	switch (error) {
779 
780 	case ENXIO:
781 		printf("device bad\n");
782 		break;
783 
784 	case EFAULT:
785 		printf("device not ready\n");
786 		break;
787 
788 	case EINVAL:
789 		printf("area improper\n");
790 		break;
791 
792 	case EIO:
793 		printf("i/o error\n");
794 		break;
795 
796 	default:
797 		printf("succeeded\n");
798 		break;
799 	}
800 	printf("\n\n");
801 	delay(5000000);		/* 5 seconds */
802 }
803 
804 /*
805  * Return the best possible estimate of the time in the timeval
806  * to which tvp points.  We do this by returning the current time
807  * plus the amount of time since the last clock interrupt (clock.c:clkread).
808  *
809  * Check that this time is no less than any previously-reported time,
810  * which could happen around the time of a clock adjustment.  Just for fun,
811  * we guarantee that the time will be greater than the value obtained by a
812  * previous call.
813  */
814 void
815 microtime(tvp)
816 	register struct timeval *tvp;
817 {
818 	int s = spl7();
819 	static struct timeval lasttime;
820 
821 	*tvp = time;
822 	tvp->tv_usec += clkread();
823 	while (tvp->tv_usec >= 1000000) {
824 		tvp->tv_sec++;
825 		tvp->tv_usec -= 1000000;
826 	}
827 	if (tvp->tv_sec == lasttime.tv_sec &&
828 	    tvp->tv_usec <= lasttime.tv_usec &&
829 	    (tvp->tv_usec = lasttime.tv_usec + 1) >= 1000000) {
830 		tvp->tv_sec++;
831 		tvp->tv_usec -= 1000000;
832 	}
833 	lasttime = *tvp;
834 	splx(s);
835 }
836 
837 void
838 initcpu()
839 {
840 	typedef void trapfun(void);
841 
842 	/* XXX should init '40 vecs here, too */
843 #if defined(M68060) || defined(M68040) || defined(DRACO) || defined(FPU_EMULATE)
844 	extern trapfun *vectab[256];
845 #endif
846 
847 #if defined(M68060) || defined(M68040)
848 	extern trapfun addrerr4060;
849 #endif
850 
851 #ifdef M68060
852 	extern trapfun buserr60;
853 #if defined(M060SP)
854 	/*extern u_int8_t I_CALL_TOP[];*/
855 	extern trapfun intemu60, fpiemu60, fpdemu60, fpeaemu60;
856 	extern u_int8_t FP_CALL_TOP[];
857 #else
858 	extern trapfun illinst;
859 #endif
860 	extern trapfun fpfault;
861 #endif
862 
863 #ifdef M68040
864 	extern trapfun buserr40;
865 #endif
866 
867 #ifdef DRACO
868 	extern trapfun DraCoIntr, DraCoLev1intr, DraCoLev2intr;
869 	u_char dracorev;
870 #endif
871 
872 #ifdef FPU_EMULATE
873 	extern trapfun fpemuli;
874 #endif
875 
876 #ifdef M68060
877 	if (machineid & AMIGA_68060) {
878 		if (machineid & AMIGA_FPU40 && m68060_pcr_init & 2) {
879 			/*
880 			 * in this case, we're about to switch the FPU off;
881 			 * do a FNOP to avoid stray FP traps later
882 			 */
883 			__asm("fnop");
884 			/* ... and mark FPU as absent for identifyfpu() */
885 			machineid &= ~(AMIGA_FPU40|AMIGA_68882|AMIGA_68881);
886 		}
887 		asm volatile ("movl %0,%%d0; .word 0x4e7b,0x0808" : :
888 			"d"(m68060_pcr_init):"d0" );
889 
890 		/* bus/addrerr vectors */
891 		vectab[2] = buserr60;
892 		vectab[3] = addrerr4060;
893 #if defined(M060SP)
894 
895 		/* integer support */
896 		vectab[61] = intemu60/*(trapfun *)&I_CALL_TOP[128 + 0x00]*/;
897 
898 		/* floating point support */
899 		/*
900 		 * XXX maybe we really should run-time check for the
901 		 * stack frame format here:
902 		 */
903 		vectab[11] = fpiemu60/*(trapfun *)&FP_CALL_TOP[128 + 0x30]*/;
904 
905 		vectab[55] = fpdemu60/*(trapfun *)&FP_CALL_TOP[128 + 0x38]*/;
906 		vectab[60] = fpeaemu60/*(trapfun *)&FP_CALL_TOP[128 + 0x40]*/;
907 
908 		vectab[54] = (trapfun *)&FP_CALL_TOP[128 + 0x00];
909 		vectab[52] = (trapfun *)&FP_CALL_TOP[128 + 0x08];
910 		vectab[53] = (trapfun *)&FP_CALL_TOP[128 + 0x10];
911 		vectab[51] = (trapfun *)&FP_CALL_TOP[128 + 0x18];
912 		vectab[50] = (trapfun *)&FP_CALL_TOP[128 + 0x20];
913 		vectab[49] = (trapfun *)&FP_CALL_TOP[128 + 0x28];
914 
915 #else
916 		vectab[61] = illinst;
917 #endif
918 		vectab[48] = fpfault;
919 	}
920 #endif
921 
922 /*
923  * Vector initialization for special motherboards
924  */
925 #ifdef M68040
926 #ifdef M68060
927 	else
928 #endif
929 	if (machineid & AMIGA_68040) {
930 		/* addrerr vector */
931 		vectab[2] = buserr40;
932 		vectab[3] = addrerr4060;
933 	}
934 #endif
935 
936 #ifdef FPU_EMULATE
937 	if (!(machineid & (AMIGA_68881|AMIGA_68882|AMIGA_FPU40))) {
938 		vectab[11] = fpemuli;
939 		printf("FPU software emulation initialized.\n");
940 	}
941 #endif
942 
943 /*
944  * Vector initialization for special motherboards
945  */
946 
947 #ifdef DRACO
948 	dracorev = is_draco();
949 	if (dracorev) {
950 		if (dracorev >= 4) {
951 			vectab[24+1] = DraCoLev1intr;
952 			vectab[24+2] = DraCoIntr;
953 		} else {
954 			vectab[24+1] = DraCoIntr;
955 			vectab[24+2] = DraCoLev2intr;
956 		}
957 		vectab[24+3] = DraCoIntr;
958 		vectab[24+4] = DraCoIntr;
959 		vectab[24+5] = DraCoIntr;
960 		vectab[24+6] = DraCoIntr;
961 	}
962 #endif
963 }
964 
965 void
966 straytrap(pc, evec)
967 	int pc;
968 	u_short evec;
969 {
970 	printf("unexpected trap format %x (vector offset %x) from %x\n",
971 	       evec>>12, evec & 0xFFF, pc);
972 /*XXX*/	panic("straytrap");
973 }
974 
975 int	*nofault;
976 
977 int
978 badaddr(addr)
979 	register caddr_t addr;
980 {
981 	register int i;
982 	label_t	faultbuf;
983 
984 #ifdef lint
985 	i = *addr; if (i) return(0);
986 #endif
987 	nofault = (int *) &faultbuf;
988 	if (setjmp((label_t *)nofault)) {
989 		nofault = (int *) 0;
990 		return(1);
991 	}
992 	i = *(volatile short *)addr;
993 	nofault = (int *) 0;
994 	return(0);
995 }
996 
997 int
998 badbaddr(addr)
999 	register caddr_t addr;
1000 {
1001 	register int i;
1002 	label_t	faultbuf;
1003 
1004 #ifdef lint
1005 	i = *addr; if (i) return(0);
1006 #endif
1007 	nofault = (int *) &faultbuf;
1008 	if (setjmp((label_t *)nofault)) {
1009 		nofault = (int *) 0;
1010 		return(1);
1011 	}
1012 	i = *(volatile char *)addr;
1013 	nofault = (int *) 0;
1014 	return(0);
1015 }
1016 
1017 static void
1018 netintr()
1019 {
1020 
1021 #define DONETISR(bit, fn) do {		\
1022 	if (netisr & (1 << bit)) {	\
1023 		netisr &= ~(1 << bit);	\
1024 		fn();			\
1025 	}				\
1026 } while (0)
1027 
1028 #include <net/netisr_dispatch.h>
1029 
1030 #undef DONETISR
1031 }
1032 
1033 
1034 /*
1035  * this is a handy package to have asynchronously executed
1036  * function calls executed at very low interrupt priority.
1037  * Example for use is keyboard repeat, where the repeat
1038  * handler running at splclock() triggers such a (hardware
1039  * aided) software interrupt.
1040  * Note: the installed functions are currently called in a
1041  * LIFO fashion, might want to change this to FIFO
1042  * later.
1043  */
1044 struct si_callback {
1045 	struct si_callback *next;
1046 	void (*function)(void *rock1, void *rock2);
1047 	void *rock1, *rock2;
1048 };
1049 static struct si_callback *si_callbacks;
1050 static struct si_callback *si_free;
1051 #ifdef DIAGNOSTIC
1052 static int ncb;		/* number of callback blocks allocated */
1053 static int ncbd;	/* number of callback blocks dynamically allocated */
1054 #endif
1055 
1056 /*
1057  * these are __GENERIC_SOFT_INTERRUPT wrappers; will be replaced
1058  * once by the real thing once all drivers are converted.
1059  *
1060  * to help performance for converted drivers, the YYY_sicallback() function
1061  * family can be implemented in terms of softintr_XXX() as an intermediate
1062  * measure.
1063  */
1064 
1065 static void
1066 _softintr_callit(rock1, rock2)
1067 	void *rock1, *rock2;
1068 {
1069 	(*(void (*)(void *))rock1)(rock2);
1070 }
1071 
1072 void *
1073 softintr_establish(ipl, func, arg)
1074 	int ipl;
1075 	void func(void *);
1076 	void *arg;
1077 {
1078 	struct si_callback *si;
1079 
1080 	(void)ipl;
1081 
1082 	si = (struct si_callback *)malloc(sizeof(*si), M_TEMP, M_NOWAIT);
1083 	if (si == NULL)
1084 		return (si);
1085 
1086 	si->function = (void *)0;
1087 	si->rock1 = (void *)func;
1088 	si->rock2 = arg;
1089 
1090 	alloc_sicallback();
1091 	return ((void *)si);
1092 }
1093 
1094 void
1095 softintr_disestablish(hook)
1096 	void *hook;
1097 {
1098 	/*
1099 	 * XXX currently, there is a memory leak here; we cant free the
1100 	 * sicallback structure.
1101 	 * this will be automatically repaired once we rewirte the soft
1102 	 * interupt functions.
1103 	 */
1104 
1105 	free(hook, M_TEMP);
1106 }
1107 
1108 void
1109 alloc_sicallback()
1110 {
1111 	struct si_callback *si;
1112 	int s;
1113 
1114 	si = (struct si_callback *)malloc(sizeof(*si), M_TEMP, M_NOWAIT);
1115 	if (si == NULL)
1116 		return;
1117 	s = splhigh();
1118 	si->next = si_free;
1119 	si_free = si;
1120 	splx(s);
1121 #ifdef DIAGNOSTIC
1122 	++ncb;
1123 #endif
1124 }
1125 
1126 void
1127 softintr_schedule(vsi)
1128 	void *vsi;
1129 {
1130 	struct si_callback *si;
1131 	si = vsi;
1132 
1133 	add_sicallback(_softintr_callit, si->rock1, si->rock2);
1134 }
1135 
1136 void
1137 add_sicallback (function, rock1, rock2)
1138 	void (*function)(void *rock1, void *rock2);
1139 	void *rock1, *rock2;
1140 {
1141 	struct si_callback *si;
1142 	int s;
1143 
1144 	/*
1145 	 * this function may be called from high-priority interrupt handlers.
1146 	 * We may NOT block for  memory-allocation in here!.
1147 	 */
1148 	s = splhigh();
1149 	si = si_free;
1150 	if (si != NULL)
1151 		si_free = si->next;
1152 	splx(s);
1153 
1154 	if (si == NULL) {
1155 		si = (struct si_callback *)malloc(sizeof(*si), M_TEMP, M_NOWAIT);
1156 #ifdef DIAGNOSTIC
1157 		if (si)
1158 			++ncbd;		/* count # dynamically allocated */
1159 #endif
1160 
1161 		if (!si)
1162 			return;
1163 	}
1164 
1165 	si->function = function;
1166 	si->rock1 = rock1;
1167 	si->rock2 = rock2;
1168 
1169 	s = splhigh();
1170 	si->next = si_callbacks;
1171 	si_callbacks = si;
1172 	splx(s);
1173 
1174 	/*
1175 	 * Cause a software interrupt (spl1). This interrupt might
1176 	 * happen immediately, or after returning to a safe enough level.
1177 	 */
1178 	setsoftcback();
1179 }
1180 
1181 
1182 void
1183 rem_sicallback(function)
1184 	void (*function)(void *rock1, void *rock2);
1185 {
1186 	struct si_callback *si, *psi, *nsi;
1187 	int s;
1188 
1189 	s = splhigh();
1190 	for (psi = 0, si = si_callbacks; si; ) {
1191 		nsi = si->next;
1192 
1193 		if (si->function != function)
1194 			psi = si;
1195 		else {
1196 /*			free(si, M_TEMP); */
1197 			si->next = si_free;
1198 			si_free = si;
1199 			if (psi)
1200 				psi->next = nsi;
1201 			else
1202 				si_callbacks = nsi;
1203 		}
1204 		si = nsi;
1205 	}
1206 	splx(s);
1207 }
1208 
1209 /* purge the list */
1210 static void
1211 call_sicallbacks()
1212 {
1213 	struct si_callback *si;
1214 	int s;
1215 	void *rock1, *rock2;
1216 	void (*function)(void *, void *);
1217 
1218 	do {
1219 		s = splhigh ();
1220 		if ((si = si_callbacks) != 0)
1221 			si_callbacks = si->next;
1222 		splx(s);
1223 
1224 		if (si) {
1225 			function = si->function;
1226 			rock1 = si->rock1;
1227 			rock2 = si->rock2;
1228 /*			si->function(si->rock1, si->rock2); */
1229 /*			free(si, M_TEMP); */
1230 			s = splhigh ();
1231 			si->next = si_free;
1232 			si_free = si;
1233 			splx(s);
1234 			function (rock1, rock2);
1235 		}
1236 	} while (si);
1237 #ifdef DIAGNOSTIC
1238 	if (ncbd) {
1239 		ncb += ncbd;
1240 		printf("call_sicallback: %d more dynamic structures %d total\n",
1241 		    ncbd, ncb);
1242 		ncbd = 0;
1243 	}
1244 #endif
1245 }
1246 
1247 struct isr *isr_ports;
1248 #ifdef DRACO
1249 struct isr *isr_slot3;
1250 struct isr *isr_supio;
1251 #endif
1252 struct isr *isr_exter;
1253 
1254 void
1255 add_isr(isr)
1256 	struct isr *isr;
1257 {
1258 	struct isr **p, *q;
1259 
1260 #ifdef DRACO
1261 	switch (isr->isr_ipl) {
1262 	case 2:
1263 		p = &isr_ports;
1264 		break;
1265 	case 3:
1266 		p = &isr_slot3;
1267 		break;
1268 	case 5:
1269 		p = &isr_supio;
1270 		break;
1271 	default:	/* was case 6:; make gcc -Wall quiet */
1272 		p = &isr_exter;
1273 		break;
1274 	}
1275 #else
1276 	p = isr->isr_ipl == 2 ? &isr_ports : &isr_exter;
1277 #endif
1278 	while ((q = *p) != NULL)
1279 		p = &q->isr_forw;
1280 	isr->isr_forw = NULL;
1281 	*p = isr;
1282 	/* enable interrupt */
1283 #ifdef DRACO
1284 	if (is_draco())
1285 		switch(isr->isr_ipl) {
1286 			case 6:
1287 				single_inst_bset_b(*draco_intena, DRIRQ_INT6);
1288 				break;
1289 			case 2:
1290 				single_inst_bset_b(*draco_intena, DRIRQ_INT2);
1291 				break;
1292 			default:
1293 				break;
1294 		}
1295 	else
1296 #endif
1297 		custom.intena = isr->isr_ipl == 2 ?
1298 		    INTF_SETCLR | INTF_PORTS :
1299 		    INTF_SETCLR | INTF_EXTER;
1300 }
1301 
1302 void
1303 remove_isr(isr)
1304 	struct isr *isr;
1305 {
1306 	struct isr **p, *q;
1307 
1308 #ifdef DRACO
1309 	switch (isr->isr_ipl) {
1310 	case 2:
1311 		p = &isr_ports;
1312 		break;
1313 	case 3:
1314 		p = &isr_slot3;
1315 		break;
1316 	case 5:
1317 		p = &isr_supio;
1318 		break;
1319 	default:	/* XXX to make gcc -Wall quiet, was 6: */
1320 		p = &isr_exter;
1321 		break;
1322 	}
1323 #else
1324 	p = isr->isr_ipl == 6 ? &isr_exter : &isr_ports;
1325 #endif
1326 
1327 	while ((q = *p) != NULL && q != isr)
1328 		p = &q->isr_forw;
1329 	if (q)
1330 		*p = q->isr_forw;
1331 	else
1332 		panic("remove_isr: handler not registered");
1333 	/* disable interrupt if no more handlers */
1334 #ifdef DRACO
1335 	switch (isr->isr_ipl) {
1336 	case 2:
1337 		p = &isr_ports;
1338 		break;
1339 	case 3:
1340 		p = &isr_slot3;
1341 		break;
1342 	case 5:
1343 		p = &isr_supio;
1344 		break;
1345 	case 6:
1346 		p = &isr_exter;
1347 		break;
1348 	}
1349 #else
1350 	p = isr->isr_ipl == 6 ? &isr_exter : &isr_ports;
1351 #endif
1352 	if (*p == NULL) {
1353 #ifdef DRACO
1354 		if (is_draco()) {
1355 			switch(isr->isr_ipl) {
1356 				case 2:
1357 					single_inst_bclr_b(*draco_intena,
1358 					    DRIRQ_INT2);
1359 					break;
1360 				case 6:
1361 					single_inst_bclr_b(*draco_intena,
1362 					    DRIRQ_INT6);
1363 					break;
1364 				default:
1365 					break;
1366 			}
1367 		} else
1368 #endif
1369 			custom.intena = isr->isr_ipl == 6 ?
1370 			    INTF_EXTER : INTF_PORTS;
1371 	}
1372 }
1373 
1374 void
1375 intrhand(sr)
1376 	int sr;
1377 {
1378 	register unsigned int ipl;
1379 	register unsigned short ireq;
1380 	register struct isr **p, *q;
1381 
1382 	ipl = (sr >> 8) & 7;
1383 #ifdef REALLYDEBUG
1384 	printf("intrhand: got int. %d\n", ipl);
1385 #endif
1386 #ifdef DRACO
1387 	if (is_draco())
1388 		ireq = ((ipl == 1)  && (*draco_intfrc & DRIRQ_SOFT) ?
1389 		    INTF_SOFTINT : 0);
1390 	else
1391 #endif
1392 		ireq = custom.intreqr;
1393 
1394 	switch (ipl) {
1395 	case 1:
1396 #ifdef DRACO
1397 		if (is_draco() && (draco_ioct->io_status & DRSTAT_KBDRECV))
1398 			drkbdintr();
1399 #endif
1400 		if (ireq & INTF_TBE) {
1401 #if NSER > 0
1402 			ser_outintr();
1403 #else
1404 			custom.intreq = INTF_TBE;
1405 #endif
1406 		}
1407 
1408 		if (ireq & INTF_DSKBLK) {
1409 #if NFD > 0
1410 			fdintr(0);
1411 #endif
1412 			custom.intreq = INTF_DSKBLK;
1413 		}
1414 		if (ireq & INTF_SOFTINT) {
1415 			unsigned char ssir_active;
1416 			int s;
1417 
1418 			/*
1419 			 * first clear the softint-bit
1420 			 * then process all classes of softints.
1421 			 * this order is dicated by the nature of
1422 			 * software interrupts.  The other order
1423 			 * allows software interrupts to be missed.
1424 			 * Also copy and clear ssir to prevent
1425 			 * interrupt loss.
1426 			 */
1427 			clrsoftint();
1428 			s = splhigh();
1429 			ssir_active = ssir;
1430 			siroff(SIR_NET | SIR_CBACK);
1431 			splx(s);
1432 			if (ssir_active & SIR_NET) {
1433 #ifdef REALLYDEBUG
1434 				printf("calling netintr\n");
1435 #endif
1436 				uvmexp.softs++;
1437 				netintr();
1438 			}
1439 			if (ssir_active & SIR_CBACK) {
1440 #ifdef REALLYDEBUG
1441 				printf("calling softcallbacks\n");
1442 #endif
1443 				uvmexp.softs++;
1444 				call_sicallbacks();
1445 			}
1446 		}
1447 		break;
1448 
1449 	case 2:
1450 		p = &isr_ports;
1451 		while ((q = *p) != NULL) {
1452 			if ((q->isr_intr)(q->isr_arg))
1453 				break;
1454 			p = &q->isr_forw;
1455 		}
1456 		if (q == NULL)
1457 			ciaa_intr ();
1458 #ifdef DRACO
1459 		if (is_draco())
1460 			single_inst_bclr_b(*draco_intpen, DRIRQ_INT2);
1461 		else
1462 #endif
1463 			custom.intreq = INTF_PORTS;
1464 
1465 		break;
1466 
1467 #ifdef DRACO
1468 	/* only handled here for DraCo */
1469 	case 6:
1470 		p = &isr_exter;
1471 		while ((q = *p) != NULL) {
1472 			if ((q->isr_intr)(q->isr_arg))
1473 				break;
1474 			p = &q->isr_forw;
1475 		}
1476 		single_inst_bclr_b(*draco_intpen, DRIRQ_INT6);
1477 		break;
1478 #endif
1479 
1480 	case 3:
1481 	/* VBL */
1482 		if (ireq & INTF_BLIT)
1483 			blitter_handler();
1484 		if (ireq & INTF_COPER)
1485 			copper_handler();
1486 		if (ireq & INTF_VERTB)
1487 			vbl_handler();
1488 		break;
1489 #ifdef DRACO
1490 	case 5:
1491 		p = &isr_supio;
1492 		while ((q = *p) != NULL) {
1493 			if ((q->isr_intr)(q->isr_arg))
1494 				break;
1495 			p = &q->isr_forw;
1496 		}
1497 		break;
1498 #endif
1499 #if 0
1500 /* now dealt with in locore.s for speed reasons */
1501 	case 5:
1502 		/* check RS232 RBF */
1503 		serintr (0);
1504 
1505 		custom.intreq = INTF_DSKSYNC;
1506 		break;
1507 #endif
1508 
1509 	case 4:
1510 #ifdef DRACO
1511 #include "drsc.h"
1512 		if (is_draco())
1513 #if NDRSC > 0
1514 			drsc_handler();
1515 #else
1516 			single_inst_bclr_b(*draco_intpen, DRIRQ_SCSI);
1517 #endif
1518 		else
1519 #endif
1520 		audio_handler();
1521 		break;
1522 	default:
1523 		printf("intrhand: unexpected sr 0x%x, intreq = 0x%x\n",
1524 		    sr, ireq);
1525 		break;
1526 	}
1527 #ifdef REALLYDEBUG
1528 	printf("intrhand: leaving.\n");
1529 #endif
1530 }
1531 
1532 #if defined(DEBUG) && !defined(PANICBUTTON)
1533 #define PANICBUTTON
1534 #endif
1535 
1536 #ifdef PANICBUTTON
1537 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
1538 int crashandburn = 0;
1539 int candbdelay = 50;	/* give em half a second */
1540 void candbtimer(void);
1541 struct callout candbtimer_ch = CALLOUT_INITIALIZER;
1542 
1543 void
1544 candbtimer()
1545 {
1546 	crashandburn = 0;
1547 }
1548 #endif
1549 
1550 #if 0
1551 /*
1552  * Level 7 interrupts can be caused by the keyboard or parity errors.
1553  */
1554 nmihand(frame)
1555 	struct frame frame;
1556 {
1557 	if (kbdnmi()) {
1558 #ifdef PANICBUTTON
1559 		static int innmihand = 0;
1560 
1561 		/*
1562 		 * Attempt to reduce the window of vulnerability for recursive
1563 		 * NMIs (e.g. someone holding down the keyboard reset button).
1564 		 */
1565 		if (innmihand == 0) {
1566 			innmihand = 1;
1567 			printf("Got a keyboard NMI\n");
1568 			innmihand = 0;
1569 		}
1570 		if (panicbutton) {
1571 			if (crashandburn) {
1572 				crashandburn = 0;
1573 				panic(panicstr ?
1574 				      "forced crash, nosync" : "forced crash");
1575 			}
1576 			crashandburn++;
1577 			callout_reset(&candbtimer_ch, candbdelay,
1578 			    candbtimer, NULL);
1579 		}
1580 #endif
1581 		return;
1582 	}
1583 	if (parityerror(&frame))
1584 		return;
1585 	/* panic?? */
1586 	printf("unexpected level 7 interrupt ignored\n");
1587 }
1588 #endif
1589 
1590 /*
1591  * should only get here, if no standard executable. This can currently
1592  * only mean, we're reading an old ZMAGIC file without MID, but since Amiga
1593  * ZMAGIC always worked the `right' way (;-)) just ignore the missing
1594  * MID and proceed to new zmagic code ;-)
1595  */
1596 int
1597 cpu_exec_aout_makecmds(p, epp)
1598 	struct proc *p;
1599 	struct exec_package *epp;
1600 {
1601 	int error = ENOEXEC;
1602 #ifdef COMPAT_NOMID
1603 	struct exec *execp = epp->ep_hdr;
1604 #endif
1605 
1606 #ifdef COMPAT_NOMID
1607 	if (!((execp->a_midmag >> 16) & 0x0fff)
1608 	    && execp->a_midmag == ZMAGIC)
1609 		return(exec_aout_prep_zmagic(p, epp));
1610 #endif
1611 	return(error);
1612 }
1613 
1614 #ifdef LKM
1615 
1616 int _spllkm6(void);
1617 int _spllkm7(void);
1618 
1619 #ifdef LEV6_DEFER
1620 int _spllkm6() {
1621 	return spl4();
1622 };
1623 
1624 int _spllkm7() {
1625 	return spl4();
1626 };
1627 
1628 #else
1629 
1630 int _spllkm6() {
1631 	return spl6();
1632 };
1633 
1634 int _spllkm7() {
1635 	return spl7();
1636 };
1637 
1638 #endif
1639 
1640 #endif
1641