xref: /netbsd/sys/arch/atari/atari/machdep.c (revision c4a72b64)
1 /*	$NetBSD: machdep.c,v 1.119 2002/09/25 22:21:05 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.63 91/04/24$
41  *
42  *	@(#)machdep.c	7.16 (Berkeley) 6/3/91
43  */
44 
45 #include "opt_ddb.h"
46 #include "opt_compat_netbsd.h"
47 #include "opt_mbtype.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/signalvar.h>
52 #include <sys/kernel.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/reboot.h>
56 #include <sys/conf.h>
57 #include <sys/file.h>
58 #include <sys/device.h>
59 #include <sys/malloc.h>
60 #include <sys/mbuf.h>
61 #include <sys/msgbuf.h>
62 #include <sys/user.h>
63 #include <sys/vnode.h>
64 #include <sys/queue.h>
65 #include <sys/mount.h>
66 #include <sys/syscallargs.h>
67 
68 #include <sys/exec.h>
69 #if defined(DDB) && defined(__ELF__)
70 #include <sys/exec_elf.h>
71 #endif
72 
73 #include <net/netisr.h>
74 #undef PS	/* XXX netccitt/pk.h conflict with machine/reg.h? */
75 
76 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
77 #include <uvm/uvm_extern.h>
78 
79 #include <sys/sysctl.h>
80 
81 #include <machine/db_machdep.h>
82 #include <ddb/db_sym.h>
83 #include <ddb/db_extern.h>
84 
85 #include <machine/cpu.h>
86 #include <machine/reg.h>
87 #include <machine/psl.h>
88 #include <machine/pte.h>
89 
90 #include <dev/cons.h>
91 
92 static void bootsync __P((void));
93 static void call_sicallbacks __P((void));
94 static void identifycpu __P((void));
95 static void netintr __P((void));
96 void	straymfpint __P((int, u_short));
97 void	straytrap __P((int, u_short));
98 
99 #ifdef _MILANHW_
100 void	nmihandler __P((void));
101 #endif
102 
103 struct vm_map *exec_map = NULL;
104 struct vm_map *mb_map = NULL;
105 struct vm_map *phys_map = NULL;
106 
107 caddr_t	msgbufaddr;
108 vaddr_t	msgbufpa;
109 
110 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
111 /*
112  * safepri is a safe priority for sleep to set for a spin-wait
113  * during autoconfiguration or after a panic.
114  */
115 int	safepri = PSL_LOWIPL;
116 extern  int   freebufspace;
117 extern	u_int lowram;
118 
119 /*
120  * For the fpu emulation and the fpu driver
121  */
122 int	fputype = 0;
123 
124 /* the following is used externally (sysctl_hw) */
125 char	machine[] = MACHINE;	/* from <machine/param.h> */
126 
127 /* Our exported CPU info; we can have only one. */
128 struct cpu_info cpu_info_store;
129 
130  /*
131  * Console initialization: called early on from main,
132  * before vm init or startup.  Do enough configuration
133  * to choose and initialize a console.
134  */
135 void
136 consinit()
137 {
138 	int	i;
139 
140 	/*
141 	 * Initialize error message buffer. pmap_bootstrap() has
142 	 * positioned this at the end of kernel memory segment - map
143 	 * and initialize it now.
144 	 */
145 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
146 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
147 		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
148 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
149 	pmap_update(pmap_kernel());
150 	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
151 
152 	/*
153 	 * Initialize hardware that support various console types like
154 	 * the grf and PCI busses.
155 	 */
156 	config_console();
157 
158 	/*
159 	 * Now pick the best console candidate.
160 	 */
161 	cninit();
162 
163 #if defined (DDB)
164 	{
165 		extern int end;
166 		extern int *esym;
167 
168 #ifndef __ELF__
169 		ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
170 #else
171 		ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
172 			(void *)&end, esym);
173 #endif
174 	}
175         if(boothowto & RB_KDB)
176                 Debugger();
177 #endif
178 }
179 
180 /*
181  * cpu_startup: allocate memory for variable-sized tables,
182  * initialize cpu, and do autoconfiguration.
183  */
184 void
185 cpu_startup()
186 {
187 	extern	 void		etext __P((void));
188 	extern	 int		iomem_malloc_safe;
189 		 caddr_t	v;
190 		 u_int		i, base, residual;
191 		 char		pbuf[9];
192 
193 #ifdef DEBUG
194 	extern	 int		pmapdebug;
195 		 int		opmapdebug = pmapdebug;
196 #endif
197 		 vaddr_t	minaddr, maxaddr;
198 		 vsize_t	size = 0;
199 	extern	 vsize_t	mem_size;	/* from pmap.c */
200 
201 #ifdef DEBUG
202 	pmapdebug = 0;
203 #endif
204 
205 	/*
206 	 * Good {morning,afternoon,evening,night}.
207 	 */
208 	printf(version);
209 	identifycpu();
210 
211 	format_bytes(pbuf, sizeof(pbuf), mem_size);
212 	printf("total memory = %s\n", pbuf);
213 
214 	/*
215 	 * Find out how much space we need, allocate it,
216 	 * and then give everything true virtual addresses.
217 	 */
218 	size = (int)allocsys(NULL, NULL);
219 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
220 		panic("startup: no room for tables");
221 	if (allocsys(v, NULL) - v != size)
222 		panic("startup: table size inconsistency");
223 
224 	/*
225 	 * Now allocate buffers proper.  They are different than the above
226 	 * in that they usually occupy more virtual memory than physical.
227 	 */
228 	size = MAXBSIZE * nbuf;
229 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
230 		    NULL, UVM_UNKNOWN_OFFSET, 0,
231 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
232 				UVM_ADV_NORMAL, 0)) != 0)
233 		panic("startup: cannot allocate VM for buffers");
234 	minaddr = (vaddr_t)buffers;
235 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
236 		/* don't want to alloc more physical mem than needed */
237 		bufpages = btoc(MAXBSIZE) * nbuf;
238 	}
239 	base = bufpages / nbuf;
240 	residual = bufpages % nbuf;
241 	for (i = 0; i < nbuf; i++) {
242 		vsize_t curbufsize;
243 		vaddr_t curbuf;
244 		struct vm_page *pg;
245 
246 		/*
247 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
248 		 * that MAXBSIZE space, we allocate and map (base+1) pages
249 		 * for the first "residual" buffers, and then we allocate
250 		 * "base" pages for the rest.
251 		 */
252 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
253 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
254 
255 		while (curbufsize) {
256 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
257 			if (pg == NULL)
258 				panic("cpu_startup: not enough memory for "
259 				    "buffer cache");
260 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
261 			    VM_PROT_READ | VM_PROT_WRITE);
262 			curbuf += PAGE_SIZE;
263 			curbufsize -= PAGE_SIZE;
264 		}
265 	}
266 	pmap_update(kernel_map->pmap);
267 
268 	/*
269 	 * Allocate a submap for exec arguments.  This map effectively
270 	 * limits the number of processes exec'ing at any time.
271 	 */
272 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
273 				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
274 
275 	/*
276 	 * Allocate a submap for physio
277 	 */
278 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
279 				   VM_PHYS_SIZE, 0, FALSE, NULL);
280 
281 	/*
282 	 * Finally, allocate mbuf cluster submap.
283 	 */
284 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
285 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
286 				 FALSE, NULL);
287 
288 	/*
289 	 * Tell the VM system that page 0 isn't mapped.
290 	 *
291 	 * XXX This is bogus; should just fix KERNBASE and
292 	 * XXX VM_MIN_KERNEL_ADDRESS, but not right now.
293 	 */
294 	if (uvm_map_protect(kernel_map, 0, NBPG, UVM_PROT_NONE, TRUE) != 0)
295 		panic("can't mark page 0 off-limits");
296 
297 	/*
298 	 * Tell the VM system that writing to kernel text isn't allowed.
299 	 * If we don't, we might end up COW'ing the text segment!
300 	 *
301 	 * XXX Should be m68k_trunc_page(&kernel_text) instead
302 	 * XXX of NBPG.
303 	 */
304 	if (uvm_map_protect(kernel_map, NBPG, m68k_round_page(&etext),
305 	    UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
306 		panic("can't protect kernel text");
307 
308 #ifdef DEBUG
309 	pmapdebug = opmapdebug;
310 #endif
311 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
312 	printf("avail memory = %s\n", pbuf);
313 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
314 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
315 
316 	/*
317 	 * Set up buffers, so they can be used to read disk labels.
318 	 */
319 	bufinit();
320 
321 	/*
322 	 * Alloc extent allocation to use malloc
323 	 */
324 	iomem_malloc_safe = 1;
325 }
326 
327 /*
328  * Set registers on exec.
329  */
330 void
331 setregs(p, pack, stack)
332 	register struct proc *p;
333 	struct exec_package *pack;
334 	u_long stack;
335 {
336 	struct frame *frame = (struct frame *)p->p_md.md_regs;
337 
338 	frame->f_sr = PSL_USERSET;
339 	frame->f_pc = pack->ep_entry & ~1;
340 	frame->f_regs[D0] = 0;
341 	frame->f_regs[D1] = 0;
342 	frame->f_regs[D2] = 0;
343 	frame->f_regs[D3] = 0;
344 	frame->f_regs[D4] = 0;
345 	frame->f_regs[D5] = 0;
346 	frame->f_regs[D6] = 0;
347 	frame->f_regs[D7] = 0;
348 	frame->f_regs[A0] = 0;
349 	frame->f_regs[A1] = 0;
350 	frame->f_regs[A2] = (int)p->p_psstr;
351 	frame->f_regs[A3] = 0;
352 	frame->f_regs[A4] = 0;
353 	frame->f_regs[A5] = 0;
354 	frame->f_regs[A6] = 0;
355 	frame->f_regs[SP] = stack;
356 
357 	/* restore a null state frame */
358 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
359 	if (fputype)
360 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
361 }
362 
363 /*
364  * Info for CTL_HW
365  */
366 char cpu_model[120];
367 
368 static void
369 identifycpu()
370 {
371        char	*mach, *mmu, *fpu, *cpu;
372 
373 	switch (machineid & ATARI_ANYMACH) {
374 		case ATARI_TT:
375 				mach = "Atari TT";
376 				break;
377 		case ATARI_FALCON:
378 				mach = "Atari Falcon";
379 				break;
380 		case ATARI_HADES:
381 				mach = "Atari Hades";
382 				break;
383 		case ATARI_MILAN:
384 				mach = "Atari Milan";
385 				break;
386 		default:
387 				mach = "Atari UNKNOWN";
388 				break;
389 	}
390 
391 	cpu     = "m68k";
392 	fputype = fpu_probe();
393 	fpu     = fpu_describe(fputype);
394 
395 	switch (cputype) {
396 
397 	    case CPU_68060:
398 		{
399 			u_int32_t	pcr;
400 			char		cputxt[30];
401 
402 			asm(".word 0x4e7a,0x0808;"
403 			    "movl %%d0,%0" : "=d"(pcr) : : "d0");
404 			sprintf(cputxt, "68%s060 rev.%d",
405 				pcr & 0x10000 ? "LC/EC" : "", (pcr>>8)&0xff);
406 			cpu = cputxt;
407 			mmu = "/MMU";
408 		}
409 		break;
410 	case CPU_68040:
411 		cpu = "m68040";
412 		mmu = "/MMU";
413 		break;
414 	case CPU_68030:
415 		cpu = "m68030";
416 		mmu = "/MMU";
417 		break;
418 	default: /* XXX */
419 		cpu = "m68020";
420 		mmu = " m68851 MMU";
421 	}
422 	sprintf(cpu_model, "%s (%s CPU%s%sFPU)", mach, cpu, mmu, fpu);
423 	printf("%s\n", cpu_model);
424 }
425 
426 /*
427  * machine dependent system variables.
428  */
429 int
430 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
431 	int *name;
432 	u_int namelen;
433 	void *oldp;
434 	size_t *oldlenp;
435 	void *newp;
436 	size_t newlen;
437 	struct proc *p;
438 {
439 	dev_t consdev;
440 
441 	/* all sysctl names at this level are terminal */
442 	if (namelen != 1)
443 		return(ENOTDIR);               /* overloaded */
444 
445 	switch (name[0]) {
446 	case CPU_CONSDEV:
447 		if (cn_tab != NULL)
448 			consdev = cn_tab->cn_dev;
449 		else
450 			consdev = NODEV;
451 		return(sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
452 					sizeof(consdev)));
453 	default:
454 		return(EOPNOTSUPP);
455 	}
456 	/* NOTREACHED */
457 }
458 
459 static int waittime = -1;
460 
461 static void
462 bootsync(void)
463 {
464 	if (waittime < 0) {
465 		waittime = 0;
466 
467 		vfs_shutdown();
468 
469 		/*
470 		 * If we've been adjusting the clock, the todr
471 		 * will be out of synch; adjust it now.
472 		 */
473 		resettodr();
474 	}
475 }
476 
477 void
478 cpu_reboot(howto, bootstr)
479 	int	howto;
480 	char	*bootstr;
481 {
482 	/* take a snap shot before clobbering any registers */
483 	if (curproc && curproc->p_addr)
484 		savectx(&curproc->p_addr->u_pcb);
485 
486 	boothowto = howto;
487 	if((howto & RB_NOSYNC) == 0)
488 		bootsync();
489 
490 	/*
491 	 * Call shutdown hooks. Do this _before_ anything might be
492 	 * asked to the user in case nobody is there....
493 	 */
494 	doshutdownhooks();
495 
496 	splhigh();			/* extreme priority */
497 	if(howto & RB_HALT) {
498 		printf("halted\n\n");
499 		asm("	stop	#0x2700");
500 	}
501 	else {
502 		if(howto & RB_DUMP)
503 			dumpsys();
504 
505 		doboot();
506 		/*NOTREACHED*/
507 	}
508 	panic("Boot() should never come here");
509 	/*NOTREACHED*/
510 }
511 
512 #define	BYTES_PER_DUMP	NBPG		/* Must be a multiple of NBPG	*/
513 static vaddr_t	dumpspace;	/* Virt. space to map dumppages	*/
514 
515 /*
516  * Reserve _virtual_ memory to map in the page to be dumped
517  */
518 vaddr_t
519 reserve_dumppages(p)
520 vaddr_t	p;
521 {
522 	dumpspace = p;
523 	return(p + BYTES_PER_DUMP);
524 }
525 
526 u_int32_t	dumpmag  = 0x8fca0101;	/* magic number for savecore	*/
527 int		dumpsize = 0;		/* also for savecore (pages)	*/
528 long		dumplo   = 0;		/* (disk blocks)		*/
529 
530 void
531 cpu_dumpconf()
532 {
533 	const struct bdevsw *bdev;
534 	int	nblks, i;
535 
536 	for (i = dumpsize = 0; i < NMEM_SEGS; i++) {
537 		if (boot_segs[i].start == boot_segs[i].end)
538 			break;
539 		dumpsize += boot_segs[i].end - boot_segs[i].start;
540 	}
541 	dumpsize = btoc(dumpsize);
542 
543 	if (dumpdev != NODEV) {
544 		bdev = bdevsw_lookup(dumpdev);
545 		if (bdev != NULL && bdev->d_psize != NULL) {
546 			nblks = (*bdev->d_psize)(dumpdev);
547 			if (dumpsize > btoc(dbtob(nblks - dumplo)))
548 				dumpsize = btoc(dbtob(nblks - dumplo));
549 			else if (dumplo == 0)
550 				dumplo = nblks - btodb(ctob(dumpsize));
551 		}
552 	}
553 	dumplo -= cpu_dumpsize();
554 
555 	/*
556 	 * Don't dump on the first NBPG (why NBPG?)
557 	 * in case the dump device includes a disk label.
558 	 */
559 	if (dumplo < btodb(NBPG))
560 		dumplo = btodb(NBPG);
561 }
562 
563 /*
564  * Doadump comes here after turning off memory management and
565  * getting on the dump stack, either when called above, or by
566  * the auto-restart code.
567  */
568 void
569 dumpsys()
570 {
571 	const struct bdevsw *bdev;
572 	daddr_t	blkno;		/* Current block to write	*/
573 	int	(*dump) __P((dev_t, daddr_t, caddr_t, size_t));
574 				/* Dumping function		*/
575 	u_long	maddr;		/* PA being dumped		*/
576 	int	segbytes;	/* Number of bytes in this seg.	*/
577 	int	segnum;		/* Segment we are dumping	*/
578 	int	nbytes;		/* Bytes left to dump		*/
579 	int	i, n, error;
580 
581 	error = segnum = 0;
582 	if (dumpdev == NODEV)
583 		return;
584 	bdev = bdevsw_lookup(dumpdev);
585 	if (bdev == NULL)
586 		return;
587 	/*
588 	 * For dumps during autoconfiguration,
589 	 * if dump device has already configured...
590 	 */
591 	if (dumpsize == 0)
592 		cpu_dumpconf();
593 	if (dumplo <= 0) {
594 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
595 		    minor(dumpdev));
596 		return;
597 	}
598 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
599 	    minor(dumpdev), dumplo);
600 
601 #if defined(DDB) || defined(PANICWAIT)
602 	printf("Do you want to dump memory? [y]");
603 	cnputc(i = cngetc());
604 	switch (i) {
605 		case 'n':
606 		case 'N':
607 			return;
608 		case '\n':
609 			break;
610 		default :
611 			cnputc('\n');
612 	}
613 #endif /* defined(DDB) || defined(PANICWAIT) */
614 
615 	maddr    = 0;
616 	segbytes = boot_segs[0].end;
617 	blkno    = dumplo;
618 	dump     = bdev->d_dump;
619 	nbytes   = dumpsize * NBPG;
620 
621 	printf("dump ");
622 
623 	error = cpu_dump(dump, &blkno);
624 	if (!error) {
625 	    for (i = 0; i < nbytes; i += n, segbytes -= n) {
626 		/*
627 		 * Skip the hole
628 		 */
629 		if (segbytes == 0) {
630 		    segnum++;
631 		    maddr    = boot_segs[segnum].start;
632 		    segbytes = boot_segs[segnum].end - boot_segs[segnum].start;
633 		}
634 		/*
635 		 * Print Mb's to go
636 		 */
637 		n = nbytes - i;
638 		if (n && (n % (1024*1024)) == 0)
639 			printf("%d ", n / (1024 * 1024));
640 
641 		/*
642 		 * Limit transfer to BYTES_PER_DUMP
643 		 */
644 		if (n > BYTES_PER_DUMP)
645 			n = BYTES_PER_DUMP;
646 
647 		/*
648 		 * Map to a VA and write it
649 		 */
650 		if (maddr != 0) { /* XXX kvtop chokes on this	*/
651 			(void)pmap_map(dumpspace, maddr, maddr+n, VM_PROT_READ);
652 			error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
653 			if (error)
654 				break;
655 		}
656 
657 		maddr += n;
658 		blkno += btodb(n);
659 	    }
660 	}
661 	switch (error) {
662 
663 	case ENXIO:
664 		printf("device bad\n");
665 		break;
666 
667 	case EFAULT:
668 		printf("device not ready\n");
669 		break;
670 
671 	case EINVAL:
672 		printf("area improper\n");
673 		break;
674 
675 	case EIO:
676 		printf("i/o error\n");
677 		break;
678 
679 	default:
680 		printf("succeeded\n");
681 		break;
682 	}
683 	printf("\n\n");
684 	delay(5000000);		/* 5 seconds */
685 }
686 
687 /*
688  * Return the best possible estimate of the time in the timeval
689  * to which tvp points.  We do this by returning the current time
690  * plus the amount of time since the last clock interrupt (clock.c:clkread).
691  *
692  * Check that this time is no less than any previously-reported time,
693  * which could happen around the time of a clock adjustment.  Just for fun,
694  * we guarantee that the time will be greater than the value obtained by a
695  * previous call.
696  */
697 void microtime(tvp)
698 	register struct timeval *tvp;
699 {
700 	int s = splhigh();
701 	static struct timeval lasttime;
702 
703 	*tvp = time;
704 	tvp->tv_usec += clkread();
705 	while (tvp->tv_usec >= 1000000) {
706 		tvp->tv_sec++;
707 		tvp->tv_usec -= 1000000;
708 	}
709 	if (tvp->tv_sec == lasttime.tv_sec &&
710 	    tvp->tv_usec <= lasttime.tv_usec &&
711 	    (tvp->tv_usec = lasttime.tv_usec + 1) >= 1000000) {
712 		tvp->tv_sec++;
713 		tvp->tv_usec -= 1000000;
714 	}
715 	lasttime = *tvp;
716 	splx(s);
717 }
718 
719 void
720 straytrap(pc, evec)
721 int pc;
722 u_short evec;
723 {
724 	static int	prev_evec;
725 
726 	printf("unexpected trap (vector offset 0x%x) from 0x%x\n",
727 						evec & 0xFFF, pc);
728 
729 	if(prev_evec == evec) {
730 		delay(1000000);
731 		prev_evec = 0;
732 	}
733 	else prev_evec = evec;
734 }
735 
736 void
737 straymfpint(pc, evec)
738 int		pc;
739 u_short	evec;
740 {
741 	printf("unexpected mfp-interrupt (vector offset 0x%x) from 0x%x\n",
742 	       evec & 0xFFF, pc);
743 }
744 
745 /*
746  * Simulated software interrupt handler
747  */
748 void
749 softint()
750 {
751 	if(ssir & SIR_NET) {
752 		siroff(SIR_NET);
753 		uvmexp.softs++;
754 		netintr();
755 	}
756 	if(ssir & SIR_CLOCK) {
757 		siroff(SIR_CLOCK);
758 		uvmexp.softs++;
759 		/* XXXX softclock(&frame.f_stackadj); */
760 		softclock(NULL);
761 	}
762 	if (ssir & SIR_CBACK) {
763 		siroff(SIR_CBACK);
764 		uvmexp.softs++;
765 		call_sicallbacks();
766 	}
767 }
768 
769 int	*nofault;
770 
771 int
772 badbaddr(addr, size)
773 	register caddr_t addr;
774 	int		 size;
775 {
776 	register int i;
777 	label_t	faultbuf;
778 
779 #ifdef lint
780 	i = *addr; if (i) return(0);
781 #endif
782 	nofault = (int *) &faultbuf;
783 	if (setjmp((label_t *)nofault)) {
784 		nofault = (int *) 0;
785 		return(1);
786 	}
787 	switch (size) {
788 		case 1:
789 			i = *(volatile char *)addr;
790 			break;
791 		case 2:
792 			i = *(volatile short *)addr;
793 			break;
794 		case 4:
795 			i = *(volatile long *)addr;
796 			break;
797 		default:
798 			panic("badbaddr: unknown size");
799 	}
800 	nofault = (int *) 0;
801 	return(0);
802 }
803 
804 /*
805  * Network interrupt handling
806  */
807 static void
808 netintr()
809 {
810 #define DONETISR(bit, fn) do {			\
811 	if (netisr & (1 << bit)) {		\
812 		netisr &= ~(1 << bit);		\
813 		fn();				\
814 	}					\
815 } while (0)
816 
817 #include <net/netisr_dispatch.h>
818 
819 #undef DONETISR
820 }
821 
822 
823 /*
824  * this is a handy package to have asynchronously executed
825  * function calls executed at very low interrupt priority.
826  * Example for use is keyboard repeat, where the repeat
827  * handler running at splclock() triggers such a (hardware
828  * aided) software interrupt.
829  * Note: the installed functions are currently called in a
830  * LIFO fashion, might want to change this to FIFO
831  * later.
832  */
833 struct si_callback {
834 	struct si_callback *next;
835 	void (*function) __P((void *rock1, void *rock2));
836 	void *rock1, *rock2;
837 };
838 static struct si_callback *si_callbacks;
839 static struct si_callback *si_free;
840 #ifdef DIAGNOSTIC
841 static int ncbd;	/* number of callback blocks dynamically allocated */
842 #endif
843 
844 void add_sicallback (function, rock1, rock2)
845 void	(*function) __P((void *rock1, void *rock2));
846 void	*rock1, *rock2;
847 {
848 	struct si_callback	*si;
849 	int			s;
850 
851 	/*
852 	 * this function may be called from high-priority interrupt handlers.
853 	 * We may NOT block for  memory-allocation in here!.
854 	 */
855 	s  = splhigh();
856 	if((si = si_free) != NULL)
857 		si_free = si->next;
858 	splx(s);
859 
860 	if(si == NULL) {
861 		si = (struct si_callback *)malloc(sizeof(*si),M_TEMP,M_NOWAIT);
862 #ifdef DIAGNOSTIC
863 		if(si)
864 			++ncbd;		/* count # dynamically allocated */
865 #endif
866 		if(!si)
867 			return;
868 	}
869 
870 	si->function = function;
871 	si->rock1    = rock1;
872 	si->rock2    = rock2;
873 
874 	s = splhigh();
875 	si->next     = si_callbacks;
876 	si_callbacks = si;
877 	splx(s);
878 
879 	/*
880 	 * and cause a software interrupt (spl1). This interrupt might
881 	 * happen immediately, or after returning to a safe enough level.
882 	 */
883 	setsoftcback();
884 }
885 
886 void rem_sicallback(function)
887 void (*function) __P((void *rock1, void *rock2));
888 {
889 	struct si_callback	*si, *psi, *nsi;
890 	int			s;
891 
892 	s = splhigh();
893 	for(psi = 0, si = si_callbacks; si; ) {
894 		nsi = si->next;
895 
896 		if(si->function != function)
897 			psi = si;
898 		else {
899 			si->next = si_free;
900 			si_free  = si;
901 			if(psi)
902 				psi->next = nsi;
903 			else si_callbacks = nsi;
904 		}
905 		si = nsi;
906 	}
907 	splx(s);
908 }
909 
910 /* purge the list */
911 static void call_sicallbacks()
912 {
913 	struct si_callback	*si;
914 	int			s;
915 	void			*rock1, *rock2;
916 	void			(*function) __P((void *, void *));
917 
918 	do {
919 		s = splhigh ();
920 		if ((si = si_callbacks) != NULL)
921 			si_callbacks = si->next;
922 		splx(s);
923 
924 		if (si) {
925 			function = si->function;
926 			rock1    = si->rock1;
927 			rock2    = si->rock2;
928 			s = splhigh ();
929 			if(si_callbacks)
930 				setsoftcback();
931 			si->next = si_free;
932 			si_free  = si;
933 			splx(s);
934 			function(rock1, rock2);
935 		}
936 	} while (si);
937 #ifdef DIAGNOSTIC
938 	if (ncbd) {
939 #ifdef DEBUG
940 		printf("call_sicallback: %d more dynamic structures\n", ncbd);
941 #endif
942 		ncbd = 0;
943 	}
944 #endif
945 }
946 
947 #if defined(DEBUG) && !defined(PANICBUTTON)
948 #define PANICBUTTON
949 #endif
950 
951 #ifdef PANICBUTTON
952 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
953 int crashandburn = 0;
954 int candbdelay = 50;	/* give em half a second */
955 
956 void candbtimer __P((void));
957 
958 void
959 candbtimer()
960 {
961 	crashandburn = 0;
962 }
963 #endif
964 
965 /*
966  * should only get here, if no standard executable. This can currently
967  * only mean, we're reading an old ZMAGIC file without MID, but since Atari
968  * ZMAGIC always worked the `right' way (;-)) just ignore the missing
969  * MID and proceed to new zmagic code ;-)
970  */
971 int
972 cpu_exec_aout_makecmds(p, epp)
973 	struct proc *p;
974 	struct exec_package *epp;
975 {
976 	int error = ENOEXEC;
977 #ifdef COMPAT_NOMID
978 	struct exec *execp = epp->ep_hdr;
979 #endif
980 
981 #ifdef COMPAT_NOMID
982 	if (!((execp->a_midmag >> 16) & 0x0fff)
983 	    && execp->a_midmag == ZMAGIC)
984 		return(exec_aout_prep_zmagic(p, epp));
985 #endif
986 	return(error);
987 }
988 
989 #ifdef _MILANHW_
990 
991 /*
992  * Currently the only source of NMI interrupts on the Milan is the PLX9080.
993  * On access errors to the PCI bus, an NMI is generated. This NMI is shorted
994  * in locore in case of a PCI config cycle to a non-existing address to allow
995  * for probes. On other occaisions, it ShouldNotHappen(TM).
996  * Note: The handler in locore clears the errors, to make further PCI access
997  * possible.
998  */
999 void
1000 nmihandler()
1001 {
1002 	extern unsigned long	plx_status;
1003 
1004 	printf("nmihandler: plx_status = 0x%08lx\n", plx_status);
1005 }
1006 #endif
1007