xref: /netbsd/sys/arch/atari/atari/machdep.c (revision bf9ec67e)
1 /*	$NetBSD: machdep.c,v 1.115 2002/04/09 13:04:43 leo Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.63 91/04/24$
41  *
42  *	@(#)machdep.c	7.16 (Berkeley) 6/3/91
43  */
44 
45 #include "opt_ddb.h"
46 #include "opt_compat_netbsd.h"
47 #include "opt_mbtype.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/signalvar.h>
52 #include <sys/kernel.h>
53 #include <sys/map.h>
54 #include <sys/proc.h>
55 #include <sys/buf.h>
56 #include <sys/reboot.h>
57 #include <sys/conf.h>
58 #include <sys/file.h>
59 #include <sys/clist.h>
60 #include <sys/device.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/msgbuf.h>
64 #include <sys/user.h>
65 #include <sys/vnode.h>
66 #include <sys/queue.h>
67 #include <sys/mount.h>
68 #include <sys/syscallargs.h>
69 
70 #include <sys/exec.h>
71 #if defined(DDB) && defined(__ELF__)
72 #include <sys/exec_elf.h>
73 #endif
74 
75 #include <net/netisr.h>
76 #undef PS	/* XXX netccitt/pk.h conflict with machine/reg.h? */
77 
78 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
79 #include <uvm/uvm_extern.h>
80 
81 #include <sys/sysctl.h>
82 
83 #include <machine/db_machdep.h>
84 #include <ddb/db_sym.h>
85 #include <ddb/db_extern.h>
86 
87 #include <machine/cpu.h>
88 #include <machine/reg.h>
89 #include <machine/psl.h>
90 #include <machine/pte.h>
91 
92 #include <dev/cons.h>
93 
94 static void bootsync __P((void));
95 static void call_sicallbacks __P((void));
96 static void identifycpu __P((void));
97 static void netintr __P((void));
98 void	straymfpint __P((int, u_short));
99 void	straytrap __P((int, u_short));
100 
101 #ifdef _MILANHW_
102 void	nmihandler __P((void));
103 #endif
104 
105 struct vm_map *exec_map = NULL;
106 struct vm_map *mb_map = NULL;
107 struct vm_map *phys_map = NULL;
108 
109 caddr_t	msgbufaddr;
110 vaddr_t	msgbufpa;
111 
112 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
113 /*
114  * safepri is a safe priority for sleep to set for a spin-wait
115  * during autoconfiguration or after a panic.
116  */
117 int	safepri = PSL_LOWIPL;
118 extern  int   freebufspace;
119 extern	u_int lowram;
120 
121 /*
122  * For the fpu emulation and the fpu driver
123  */
124 int	fputype = 0;
125 
126 /* the following is used externally (sysctl_hw) */
127 char	machine[] = MACHINE;	/* from <machine/param.h> */
128 
129 /* Our exported CPU info; we can have only one. */
130 struct cpu_info cpu_info_store;
131 
132  /*
133  * Console initialization: called early on from main,
134  * before vm init or startup.  Do enough configuration
135  * to choose and initialize a console.
136  */
137 void
138 consinit()
139 {
140 	int	i;
141 
142 	/*
143 	 * Initialize error message buffer. pmap_bootstrap() has
144 	 * positioned this at the end of kernel memory segment - map
145 	 * and initialize it now.
146 	 */
147 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
148 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
149 		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
150 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
151 	pmap_update(pmap_kernel());
152 	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
153 
154 	/*
155 	 * Initialize hardware that support various console types like
156 	 * the grf and PCI busses.
157 	 */
158 	config_console();
159 
160 	/*
161 	 * Now pick the best console candidate.
162 	 */
163 	cninit();
164 
165 #if defined (DDB)
166 	{
167 		extern int end;
168 		extern int *esym;
169 
170 #ifndef __ELF__
171 		ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
172 #else
173 		ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
174 			(void *)&end, esym);
175 #endif
176 	}
177         if(boothowto & RB_KDB)
178                 Debugger();
179 #endif
180 }
181 
182 /*
183  * cpu_startup: allocate memory for variable-sized tables,
184  * initialize cpu, and do autoconfiguration.
185  */
186 void
187 cpu_startup()
188 {
189 	extern	 void		etext __P((void));
190 	extern	 int		iomem_malloc_safe;
191 	register unsigned	i;
192 		 caddr_t	v;
193 		 int		base, residual;
194 		 char		pbuf[9];
195 
196 #ifdef DEBUG
197 	extern	 int		pmapdebug;
198 		 int		opmapdebug = pmapdebug;
199 #endif
200 		 vaddr_t	minaddr, maxaddr;
201 		 vsize_t	size = 0;
202 	extern	 vsize_t	mem_size;	/* from pmap.c */
203 
204 #ifdef DEBUG
205 	pmapdebug = 0;
206 #endif
207 
208 	/*
209 	 * Good {morning,afternoon,evening,night}.
210 	 */
211 	printf(version);
212 	identifycpu();
213 
214 	format_bytes(pbuf, sizeof(pbuf), mem_size);
215 	printf("total memory = %s\n", pbuf);
216 
217 	/*
218 	 * Find out how much space we need, allocate it,
219 	 * and then give everything true virtual addresses.
220 	 */
221 	size = (int)allocsys(NULL, NULL);
222 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
223 		panic("startup: no room for tables");
224 	if (allocsys(v, NULL) - v != size)
225 		panic("startup: table size inconsistency");
226 
227 	/*
228 	 * Now allocate buffers proper.  They are different than the above
229 	 * in that they usually occupy more virtual memory than physical.
230 	 */
231 	size = MAXBSIZE * nbuf;
232 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
233 		    NULL, UVM_UNKNOWN_OFFSET, 0,
234 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
235 				UVM_ADV_NORMAL, 0)) != 0)
236 		panic("startup: cannot allocate VM for buffers");
237 	minaddr = (vaddr_t)buffers;
238 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
239 		/* don't want to alloc more physical mem than needed */
240 		bufpages = btoc(MAXBSIZE) * nbuf;
241 	}
242 	base = bufpages / nbuf;
243 	residual = bufpages % nbuf;
244 	for (i = 0; i < nbuf; i++) {
245 		vsize_t curbufsize;
246 		vaddr_t curbuf;
247 		struct vm_page *pg;
248 
249 		/*
250 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
251 		 * that MAXBSIZE space, we allocate and map (base+1) pages
252 		 * for the first "residual" buffers, and then we allocate
253 		 * "base" pages for the rest.
254 		 */
255 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
256 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
257 
258 		while (curbufsize) {
259 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
260 			if (pg == NULL)
261 				panic("cpu_startup: not enough memory for "
262 				    "buffer cache");
263 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
264 			    VM_PROT_READ | VM_PROT_WRITE);
265 			curbuf += PAGE_SIZE;
266 			curbufsize -= PAGE_SIZE;
267 		}
268 	}
269 	pmap_update(kernel_map->pmap);
270 
271 	/*
272 	 * Allocate a submap for exec arguments.  This map effectively
273 	 * limits the number of processes exec'ing at any time.
274 	 */
275 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
276 				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
277 
278 	/*
279 	 * Allocate a submap for physio
280 	 */
281 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
282 				   VM_PHYS_SIZE, 0, FALSE, NULL);
283 
284 	/*
285 	 * Finally, allocate mbuf cluster submap.
286 	 */
287 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
288 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
289 				 FALSE, NULL);
290 
291 	/*
292 	 * Tell the VM system that page 0 isn't mapped.
293 	 *
294 	 * XXX This is bogus; should just fix KERNBASE and
295 	 * XXX VM_MIN_KERNEL_ADDRESS, but not right now.
296 	 */
297 	if (uvm_map_protect(kernel_map, 0, NBPG, UVM_PROT_NONE, TRUE) != 0)
298 		panic("can't mark page 0 off-limits");
299 
300 	/*
301 	 * Tell the VM system that writing to kernel text isn't allowed.
302 	 * If we don't, we might end up COW'ing the text segment!
303 	 *
304 	 * XXX Should be m68k_trunc_page(&kernel_text) instead
305 	 * XXX of NBPG.
306 	 */
307 	if (uvm_map_protect(kernel_map, NBPG, m68k_round_page(&etext),
308 	    UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != 0)
309 		panic("can't protect kernel text");
310 
311 #ifdef DEBUG
312 	pmapdebug = opmapdebug;
313 #endif
314 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
315 	printf("avail memory = %s\n", pbuf);
316 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
317 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
318 
319 	/*
320 	 * Set up buffers, so they can be used to read disk labels.
321 	 */
322 	bufinit();
323 
324 	/*
325 	 * Alloc extent allocation to use malloc
326 	 */
327 	iomem_malloc_safe = 1;
328 }
329 
330 /*
331  * Set registers on exec.
332  */
333 void
334 setregs(p, pack, stack)
335 	register struct proc *p;
336 	struct exec_package *pack;
337 	u_long stack;
338 {
339 	struct frame *frame = (struct frame *)p->p_md.md_regs;
340 
341 	frame->f_sr = PSL_USERSET;
342 	frame->f_pc = pack->ep_entry & ~1;
343 	frame->f_regs[D0] = 0;
344 	frame->f_regs[D1] = 0;
345 	frame->f_regs[D2] = 0;
346 	frame->f_regs[D3] = 0;
347 	frame->f_regs[D4] = 0;
348 	frame->f_regs[D5] = 0;
349 	frame->f_regs[D6] = 0;
350 	frame->f_regs[D7] = 0;
351 	frame->f_regs[A0] = 0;
352 	frame->f_regs[A1] = 0;
353 	frame->f_regs[A2] = (int)p->p_psstr;
354 	frame->f_regs[A3] = 0;
355 	frame->f_regs[A4] = 0;
356 	frame->f_regs[A5] = 0;
357 	frame->f_regs[A6] = 0;
358 	frame->f_regs[SP] = stack;
359 
360 	/* restore a null state frame */
361 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
362 	if (fputype)
363 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
364 }
365 
366 /*
367  * Info for CTL_HW
368  */
369 char cpu_model[120];
370 
371 static void
372 identifycpu()
373 {
374        char	*mach, *mmu, *fpu, *cpu;
375 
376 	switch (machineid & ATARI_ANYMACH) {
377 		case ATARI_TT:
378 				mach = "Atari TT";
379 				break;
380 		case ATARI_FALCON:
381 				mach = "Atari Falcon";
382 				break;
383 		case ATARI_HADES:
384 				mach = "Atari Hades";
385 				break;
386 		case ATARI_MILAN:
387 				mach = "Atari Milan";
388 				break;
389 		default:
390 				mach = "Atari UNKNOWN";
391 				break;
392 	}
393 
394 	cpu     = "m68k";
395 	fputype = fpu_probe();
396 	fpu     = fpu_describe(fputype);
397 
398 	switch (cputype) {
399 
400 	    case CPU_68060:
401 		{
402 			u_int32_t	pcr;
403 			char		cputxt[30];
404 
405 			asm(".word 0x4e7a,0x0808;"
406 			    "movl %%d0,%0" : "=d"(pcr) : : "d0");
407 			sprintf(cputxt, "68%s060 rev.%d",
408 				pcr & 0x10000 ? "LC/EC" : "", (pcr>>8)&0xff);
409 			cpu = cputxt;
410 			mmu = "/MMU";
411 		}
412 		break;
413 	case CPU_68040:
414 		cpu = "m68040";
415 		mmu = "/MMU";
416 		break;
417 	case CPU_68030:
418 		cpu = "m68030";
419 		mmu = "/MMU";
420 		break;
421 	default: /* XXX */
422 		cpu = "m68020";
423 		mmu = " m68851 MMU";
424 	}
425 	sprintf(cpu_model, "%s (%s CPU%s%sFPU)", mach, cpu, mmu, fpu);
426 	printf("%s\n", cpu_model);
427 }
428 
429 /*
430  * machine dependent system variables.
431  */
432 int
433 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
434 	int *name;
435 	u_int namelen;
436 	void *oldp;
437 	size_t *oldlenp;
438 	void *newp;
439 	size_t newlen;
440 	struct proc *p;
441 {
442 	dev_t consdev;
443 
444 	/* all sysctl names at this level are terminal */
445 	if (namelen != 1)
446 		return(ENOTDIR);               /* overloaded */
447 
448 	switch (name[0]) {
449 	case CPU_CONSDEV:
450 		if (cn_tab != NULL)
451 			consdev = cn_tab->cn_dev;
452 		else
453 			consdev = NODEV;
454 		return(sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
455 					sizeof(consdev)));
456 	default:
457 		return(EOPNOTSUPP);
458 	}
459 	/* NOTREACHED */
460 }
461 
462 static int waittime = -1;
463 
464 static void
465 bootsync(void)
466 {
467 	if (waittime < 0) {
468 		waittime = 0;
469 
470 		vfs_shutdown();
471 
472 		/*
473 		 * If we've been adjusting the clock, the todr
474 		 * will be out of synch; adjust it now.
475 		 */
476 		resettodr();
477 	}
478 }
479 
480 void
481 cpu_reboot(howto, bootstr)
482 	int	howto;
483 	char	*bootstr;
484 {
485 	/* take a snap shot before clobbering any registers */
486 	if (curproc && curproc->p_addr)
487 		savectx(&curproc->p_addr->u_pcb);
488 
489 	boothowto = howto;
490 	if((howto & RB_NOSYNC) == 0)
491 		bootsync();
492 
493 	/*
494 	 * Call shutdown hooks. Do this _before_ anything might be
495 	 * asked to the user in case nobody is there....
496 	 */
497 	doshutdownhooks();
498 
499 	splhigh();			/* extreme priority */
500 	if(howto & RB_HALT) {
501 		printf("halted\n\n");
502 		asm("	stop	#0x2700");
503 	}
504 	else {
505 		if(howto & RB_DUMP)
506 			dumpsys();
507 
508 		doboot();
509 		/*NOTREACHED*/
510 	}
511 	panic("Boot() should never come here");
512 	/*NOTREACHED*/
513 }
514 
515 #define	BYTES_PER_DUMP	NBPG		/* Must be a multiple of NBPG	*/
516 static vaddr_t	dumpspace;	/* Virt. space to map dumppages	*/
517 
518 /*
519  * Reserve _virtual_ memory to map in the page to be dumped
520  */
521 vaddr_t
522 reserve_dumppages(p)
523 vaddr_t	p;
524 {
525 	dumpspace = p;
526 	return(p + BYTES_PER_DUMP);
527 }
528 
529 u_int32_t	dumpmag  = 0x8fca0101;	/* magic number for savecore	*/
530 int		dumpsize = 0;		/* also for savecore (pages)	*/
531 long		dumplo   = 0;		/* (disk blocks)		*/
532 
533 void
534 cpu_dumpconf()
535 {
536 	int	nblks, i;
537 
538 	for (i = dumpsize = 0; i < NMEM_SEGS; i++) {
539 		if (boot_segs[i].start == boot_segs[i].end)
540 			break;
541 		dumpsize += boot_segs[i].end - boot_segs[i].start;
542 	}
543 	dumpsize = btoc(dumpsize);
544 
545 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
546 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
547 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
548 			dumpsize = btoc(dbtob(nblks - dumplo));
549 		else if (dumplo == 0)
550 			dumplo = nblks - btodb(ctob(dumpsize));
551 	}
552 	dumplo -= cpu_dumpsize();
553 
554 	/*
555 	 * Don't dump on the first NBPG (why NBPG?)
556 	 * in case the dump device includes a disk label.
557 	 */
558 	if (dumplo < btodb(NBPG))
559 		dumplo = btodb(NBPG);
560 }
561 
562 /*
563  * Doadump comes here after turning off memory management and
564  * getting on the dump stack, either when called above, or by
565  * the auto-restart code.
566  */
567 void
568 dumpsys()
569 {
570 	daddr_t	blkno;		/* Current block to write	*/
571 	int	(*dump) __P((dev_t, daddr_t, caddr_t, size_t));
572 				/* Dumping function		*/
573 	u_long	maddr;		/* PA being dumped		*/
574 	int	segbytes;	/* Number of bytes in this seg.	*/
575 	int	segnum;		/* Segment we are dumping	*/
576 	int	nbytes;		/* Bytes left to dump		*/
577 	int	i, n, error;
578 
579 	error = segnum = 0;
580 	if (dumpdev == NODEV)
581 		return;
582 	/*
583 	 * For dumps during autoconfiguration,
584 	 * if dump device has already configured...
585 	 */
586 	if (dumpsize == 0)
587 		cpu_dumpconf();
588 	if (dumplo <= 0) {
589 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
590 		    minor(dumpdev));
591 		return;
592 	}
593 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
594 	    minor(dumpdev), dumplo);
595 
596 #if defined(DDB) || defined(PANICWAIT)
597 	printf("Do you want to dump memory? [y]");
598 	cnputc(i = cngetc());
599 	switch (i) {
600 		case 'n':
601 		case 'N':
602 			return;
603 		case '\n':
604 			break;
605 		default :
606 			cnputc('\n');
607 	}
608 #endif /* defined(DDB) || defined(PANICWAIT) */
609 
610 	maddr    = 0;
611 	segbytes = boot_segs[0].end;
612 	blkno    = dumplo;
613 	dump     = bdevsw[major(dumpdev)].d_dump;
614 	nbytes   = dumpsize * NBPG;
615 
616 	printf("dump ");
617 
618 	error = cpu_dump(dump, &blkno);
619 	if (!error) {
620 	    for (i = 0; i < nbytes; i += n, segbytes -= n) {
621 		/*
622 		 * Skip the hole
623 		 */
624 		if (segbytes == 0) {
625 		    segnum++;
626 		    maddr    = boot_segs[segnum].start;
627 		    segbytes = boot_segs[segnum].end - boot_segs[segnum].start;
628 		}
629 		/*
630 		 * Print Mb's to go
631 		 */
632 		n = nbytes - i;
633 		if (n && (n % (1024*1024)) == 0)
634 			printf("%d ", n / (1024 * 1024));
635 
636 		/*
637 		 * Limit transfer to BYTES_PER_DUMP
638 		 */
639 		if (n > BYTES_PER_DUMP)
640 			n = BYTES_PER_DUMP;
641 
642 		/*
643 		 * Map to a VA and write it
644 		 */
645 		if (maddr != 0) { /* XXX kvtop chokes on this	*/
646 			(void)pmap_map(dumpspace, maddr, maddr+n, VM_PROT_READ);
647 			error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
648 			if (error)
649 				break;
650 		}
651 
652 		maddr += n;
653 		blkno += btodb(n);
654 	    }
655 	}
656 	switch (error) {
657 
658 	case ENXIO:
659 		printf("device bad\n");
660 		break;
661 
662 	case EFAULT:
663 		printf("device not ready\n");
664 		break;
665 
666 	case EINVAL:
667 		printf("area improper\n");
668 		break;
669 
670 	case EIO:
671 		printf("i/o error\n");
672 		break;
673 
674 	default:
675 		printf("succeeded\n");
676 		break;
677 	}
678 	printf("\n\n");
679 	delay(5000000);		/* 5 seconds */
680 }
681 
682 /*
683  * Return the best possible estimate of the time in the timeval
684  * to which tvp points.  We do this by returning the current time
685  * plus the amount of time since the last clock interrupt (clock.c:clkread).
686  *
687  * Check that this time is no less than any previously-reported time,
688  * which could happen around the time of a clock adjustment.  Just for fun,
689  * we guarantee that the time will be greater than the value obtained by a
690  * previous call.
691  */
692 void microtime(tvp)
693 	register struct timeval *tvp;
694 {
695 	int s = splhigh();
696 	static struct timeval lasttime;
697 
698 	*tvp = time;
699 	tvp->tv_usec += clkread();
700 	while (tvp->tv_usec >= 1000000) {
701 		tvp->tv_sec++;
702 		tvp->tv_usec -= 1000000;
703 	}
704 	if (tvp->tv_sec == lasttime.tv_sec &&
705 	    tvp->tv_usec <= lasttime.tv_usec &&
706 	    (tvp->tv_usec = lasttime.tv_usec + 1) >= 1000000) {
707 		tvp->tv_sec++;
708 		tvp->tv_usec -= 1000000;
709 	}
710 	lasttime = *tvp;
711 	splx(s);
712 }
713 
714 void
715 straytrap(pc, evec)
716 int pc;
717 u_short evec;
718 {
719 	static int	prev_evec;
720 
721 	printf("unexpected trap (vector offset 0x%x) from 0x%x\n",
722 						evec & 0xFFF, pc);
723 
724 	if(prev_evec == evec) {
725 		delay(1000000);
726 		prev_evec = 0;
727 	}
728 	else prev_evec = evec;
729 }
730 
731 void
732 straymfpint(pc, evec)
733 int		pc;
734 u_short	evec;
735 {
736 	printf("unexpected mfp-interrupt (vector offset 0x%x) from 0x%x\n",
737 	       evec & 0xFFF, pc);
738 }
739 
740 /*
741  * Simulated software interrupt handler
742  */
743 void
744 softint()
745 {
746 	if(ssir & SIR_NET) {
747 		siroff(SIR_NET);
748 		uvmexp.softs++;
749 		netintr();
750 	}
751 	if(ssir & SIR_CLOCK) {
752 		siroff(SIR_CLOCK);
753 		uvmexp.softs++;
754 		/* XXXX softclock(&frame.f_stackadj); */
755 		softclock(NULL);
756 	}
757 	if (ssir & SIR_CBACK) {
758 		siroff(SIR_CBACK);
759 		uvmexp.softs++;
760 		call_sicallbacks();
761 	}
762 }
763 
764 int	*nofault;
765 
766 int
767 badbaddr(addr, size)
768 	register caddr_t addr;
769 	int		 size;
770 {
771 	register int i;
772 	label_t	faultbuf;
773 
774 #ifdef lint
775 	i = *addr; if (i) return(0);
776 #endif
777 	nofault = (int *) &faultbuf;
778 	if (setjmp((label_t *)nofault)) {
779 		nofault = (int *) 0;
780 		return(1);
781 	}
782 	switch (size) {
783 		case 1:
784 			i = *(volatile char *)addr;
785 			break;
786 		case 2:
787 			i = *(volatile short *)addr;
788 			break;
789 		case 4:
790 			i = *(volatile long *)addr;
791 			break;
792 		default:
793 			panic("badbaddr: unknown size");
794 	}
795 	nofault = (int *) 0;
796 	return(0);
797 }
798 
799 /*
800  * Network interrupt handling
801  */
802 static void
803 netintr()
804 {
805 #define DONETISR(bit, fn) do {			\
806 	if (netisr & (1 << bit)) {		\
807 		netisr &= ~(1 << bit);		\
808 		fn();				\
809 	}					\
810 } while (0)
811 
812 #include <net/netisr_dispatch.h>
813 
814 #undef DONETISR
815 }
816 
817 
818 /*
819  * this is a handy package to have asynchronously executed
820  * function calls executed at very low interrupt priority.
821  * Example for use is keyboard repeat, where the repeat
822  * handler running at splclock() triggers such a (hardware
823  * aided) software interrupt.
824  * Note: the installed functions are currently called in a
825  * LIFO fashion, might want to change this to FIFO
826  * later.
827  */
828 struct si_callback {
829 	struct si_callback *next;
830 	void (*function) __P((void *rock1, void *rock2));
831 	void *rock1, *rock2;
832 };
833 static struct si_callback *si_callbacks;
834 static struct si_callback *si_free;
835 #ifdef DIAGNOSTIC
836 static int ncbd;	/* number of callback blocks dynamically allocated */
837 #endif
838 
839 void add_sicallback (function, rock1, rock2)
840 void	(*function) __P((void *rock1, void *rock2));
841 void	*rock1, *rock2;
842 {
843 	struct si_callback	*si;
844 	int			s;
845 
846 	/*
847 	 * this function may be called from high-priority interrupt handlers.
848 	 * We may NOT block for  memory-allocation in here!.
849 	 */
850 	s  = splhigh();
851 	if((si = si_free) != NULL)
852 		si_free = si->next;
853 	splx(s);
854 
855 	if(si == NULL) {
856 		si = (struct si_callback *)malloc(sizeof(*si),M_TEMP,M_NOWAIT);
857 #ifdef DIAGNOSTIC
858 		if(si)
859 			++ncbd;		/* count # dynamically allocated */
860 #endif
861 		if(!si)
862 			return;
863 	}
864 
865 	si->function = function;
866 	si->rock1    = rock1;
867 	si->rock2    = rock2;
868 
869 	s = splhigh();
870 	si->next     = si_callbacks;
871 	si_callbacks = si;
872 	splx(s);
873 
874 	/*
875 	 * and cause a software interrupt (spl1). This interrupt might
876 	 * happen immediately, or after returning to a safe enough level.
877 	 */
878 	setsoftcback();
879 }
880 
881 void rem_sicallback(function)
882 void (*function) __P((void *rock1, void *rock2));
883 {
884 	struct si_callback	*si, *psi, *nsi;
885 	int			s;
886 
887 	s = splhigh();
888 	for(psi = 0, si = si_callbacks; si; ) {
889 		nsi = si->next;
890 
891 		if(si->function != function)
892 			psi = si;
893 		else {
894 			si->next = si_free;
895 			si_free  = si;
896 			if(psi)
897 				psi->next = nsi;
898 			else si_callbacks = nsi;
899 		}
900 		si = nsi;
901 	}
902 	splx(s);
903 }
904 
905 /* purge the list */
906 static void call_sicallbacks()
907 {
908 	struct si_callback	*si;
909 	int			s;
910 	void			*rock1, *rock2;
911 	void			(*function) __P((void *, void *));
912 
913 	do {
914 		s = splhigh ();
915 		if ((si = si_callbacks) != NULL)
916 			si_callbacks = si->next;
917 		splx(s);
918 
919 		if (si) {
920 			function = si->function;
921 			rock1    = si->rock1;
922 			rock2    = si->rock2;
923 			s = splhigh ();
924 			if(si_callbacks)
925 				setsoftcback();
926 			si->next = si_free;
927 			si_free  = si;
928 			splx(s);
929 			function(rock1, rock2);
930 		}
931 	} while (si);
932 #ifdef DIAGNOSTIC
933 	if (ncbd) {
934 #ifdef DEBUG
935 		printf("call_sicallback: %d more dynamic structures\n", ncbd);
936 #endif
937 		ncbd = 0;
938 	}
939 #endif
940 }
941 
942 #if defined(DEBUG) && !defined(PANICBUTTON)
943 #define PANICBUTTON
944 #endif
945 
946 #ifdef PANICBUTTON
947 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
948 int crashandburn = 0;
949 int candbdelay = 50;	/* give em half a second */
950 
951 void candbtimer __P((void));
952 
953 void
954 candbtimer()
955 {
956 	crashandburn = 0;
957 }
958 #endif
959 
960 /*
961  * should only get here, if no standard executable. This can currently
962  * only mean, we're reading an old ZMAGIC file without MID, but since Atari
963  * ZMAGIC always worked the `right' way (;-)) just ignore the missing
964  * MID and proceed to new zmagic code ;-)
965  */
966 int
967 cpu_exec_aout_makecmds(p, epp)
968 	struct proc *p;
969 	struct exec_package *epp;
970 {
971 	int error = ENOEXEC;
972 #ifdef COMPAT_NOMID
973 	struct exec *execp = epp->ep_hdr;
974 #endif
975 
976 #ifdef COMPAT_NOMID
977 	if (!((execp->a_midmag >> 16) & 0x0fff)
978 	    && execp->a_midmag == ZMAGIC)
979 		return(exec_aout_prep_zmagic(p, epp));
980 #endif
981 	return(error);
982 }
983 
984 #ifdef _MILANHW_
985 
986 /*
987  * Currently the only source of NMI interrupts on the Milan is the PLX9080.
988  * On access errors to the PCI bus, an NMI is generated. This NMI is shorted
989  * in locore in case of a PCI config cycle to a non-existing address to allow
990  * for probes. On other occaisions, it ShouldNotHappen(TM).
991  * Note: The handler in locore clears the errors, to make further PCI access
992  * possible.
993  */
994 void
995 nmihandler()
996 {
997 	extern unsigned long	plx_status;
998 
999 	printf("nmihandler: plx_status = 0x%08lx\n", plx_status);
1000 }
1001 #endif
1002