xref: /netbsd/sys/arch/x68k/x68k/machdep.c (revision bf9ec67e)
1 /*	$NetBSD: machdep.c,v 1.105 2002/03/20 17:59:27 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
41  *
42  *	@(#)machdep.c	8.10 (Berkeley) 4/20/94
43  */
44 
45 #include "opt_ddb.h"
46 #include "opt_kgdb.h"
47 #include "opt_compat_netbsd.h"
48 #include "opt_m680x0.h"
49 #include "opt_fpu_emulate.h"
50 #include "opt_m060sp.h"
51 #include "opt_panicbutton.h"
52 #include "opt_extmem.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/signalvar.h>
58 #include <sys/kernel.h>
59 #include <sys/map.h>
60 #include <sys/proc.h>
61 #include <sys/buf.h>
62 #include <sys/reboot.h>
63 #include <sys/conf.h>
64 #include <sys/file.h>
65 #include <sys/clist.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/msgbuf.h>
69 #include <sys/ioctl.h>
70 #include <sys/tty.h>
71 #include <sys/mount.h>
72 #include <sys/user.h>
73 #include <sys/exec.h>
74 #include <sys/vnode.h>
75 #include <sys/syscallargs.h>
76 #include <sys/core.h>
77 #include <sys/kcore.h>
78 
79 #if defined(DDB) && defined(__ELF__)
80 #include <sys/exec_elf.h>
81 #endif
82 
83 #include <net/netisr.h>
84 #undef PS	/* XXX netccitt/pk.h conflict with machine/reg.h? */
85 
86 #include <machine/db_machdep.h>
87 #include <ddb/db_sym.h>
88 #include <ddb/db_extern.h>
89 
90 #include <machine/cpu.h>
91 #include <machine/reg.h>
92 #include <machine/psl.h>
93 #include <machine/pte.h>
94 #include <machine/kcore.h>
95 
96 #include <dev/cons.h>
97 
98 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
99 #include <uvm/uvm_extern.h>
100 
101 #include <sys/sysctl.h>
102 
103 #include <sys/device.h>
104 
105 #include <machine/bus.h>
106 #include <arch/x68k/dev/intiovar.h>
107 
108 void initcpu __P((void));
109 void identifycpu __P((void));
110 void doboot __P((void))
111     __attribute__((__noreturn__));
112 int badaddr __P((caddr_t));
113 int badbaddr __P((caddr_t));
114 
115 /* the following is used externally (sysctl_hw) */
116 char	machine[] = MACHINE;	/* from <machine/param.h> */
117 
118 /* Our exported CPU info; we can have only one. */
119 struct cpu_info cpu_info_store;
120 
121 struct vm_map *exec_map = NULL;
122 struct vm_map *mb_map = NULL;
123 struct vm_map *phys_map = NULL;
124 
125 extern paddr_t avail_start, avail_end;
126 extern vaddr_t virtual_avail;
127 extern u_int lowram;
128 extern int end, *esym;
129 
130 caddr_t	msgbufaddr;
131 int	maxmem;			/* max memory per process */
132 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
133 
134 /*
135  * safepri is a safe priority for sleep to set for a spin-wait
136  * during autoconfiguration or after a panic.
137  */
138 int	safepri = PSL_LOWIPL;
139 
140 /* prototypes for local functions */
141 void    identifycpu __P((void));
142 void    initcpu __P((void));
143 int	cpu_dumpsize __P((void));
144 int	cpu_dump __P((int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *));
145 void	cpu_init_kcore_hdr __P((void));
146 #ifdef EXTENDED_MEMORY
147 static int mem_exists __P((caddr_t, u_long));
148 static void setmemrange __P((void));
149 #endif
150 
151 /* functions called from locore.s */
152 void	dumpsys __P((void));
153 void    straytrap __P((int, u_short));
154 void	nmihand __P((struct frame));
155 void	intrhand __P((int));
156 
157 /*
158  * On the 68020/68030, the value of delay_divisor is roughly
159  * 2048 / cpuspeed (where cpuspeed is in MHz).
160  *
161  * On the 68040, the value of delay_divisor is roughly
162  * 759 / cpuspeed (where cpuspeed is in MHz).
163  *
164  * On the 68060, the value of delay_divisor is reported to be
165  * 128 / cpuspeed (where cpuspeed is in MHz).
166  */
167 int	delay_divisor = 140;	/* assume some reasonable value to start */
168 static int cpuspeed;		/* MPU clock (in MHz) */
169 
170 /*
171  * Machine-dependent crash dump header info.
172  */
173 cpu_kcore_hdr_t cpu_kcore_hdr;
174 
175 /*
176  * Console initialization: called early on from main,
177  * before vm init or startup.  Do enough configuration
178  * to choose and initialize a console.
179  */
180 void
181 consinit()
182 {
183 	/*
184 	 * bring graphics layer up.
185 	 */
186 	config_console();
187 
188 	/*
189 	 * Initialize the console before we print anything out.
190 	 */
191 	cninit();
192 
193 #ifdef KGDB
194 	zs_kgdb_init();			/* XXX */
195 #endif
196 #ifdef DDB
197 #ifndef __ELF__
198 	ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
199 #else
200 	ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
201 		 (void *)&end, esym);
202 #endif
203 	if (boothowto & RB_KDB)
204 		Debugger();
205 #endif
206 
207 	/*
208 	 * Tell the VM system about available physical memory.
209 	 */
210 	uvm_page_physload(atop(avail_start), atop(avail_end),
211 			atop(avail_start), atop(avail_end),
212 			VM_FREELIST_DEFAULT);
213 #ifdef EXTENDED_MEMORY
214 	setmemrange();
215 #endif
216 }
217 
218 /*
219  * cpu_startup: allocate memory for variable-sized tables,
220  * initialize cpu, and do autoconfiguration.
221  */
222 void
223 cpu_startup()
224 {
225 	unsigned i;
226 	caddr_t v;
227 	int base, residual;
228 	vaddr_t minaddr, maxaddr;
229 	vsize_t size;
230 	char pbuf[9];
231 #ifdef DEBUG
232 	extern int pmapdebug;
233 	int opmapdebug = pmapdebug;
234 
235 	pmapdebug = 0;
236 #endif
237 #if 0
238 	rtclockinit(); /* XXX */
239 #endif
240 
241 	/*
242 	 * Initialize error message buffer (at end of core).
243 	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
244 	 */
245 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
246 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
247 		    avail_end + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
248 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
249 	pmap_update(pmap_kernel());
250 	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
251 
252 	/*
253 	 * Initialize the kernel crash dump header.
254 	 */
255 	cpu_init_kcore_hdr();
256 
257 	/*
258 	 * Good {morning,afternoon,evening,night}.
259 	 */
260 	printf(version);
261 	identifycpu();
262 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
263 	printf("total memory = %s\n", pbuf);
264 
265 	/*
266 	 * Find out how much space we need, allocate it,
267 	 * and then give everything true virtual addresses.
268 	 */
269 	size = (vm_size_t)allocsys(NULL, NULL);
270 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
271 		panic("startup: no room for tables");
272 	if (allocsys(v, NULL) - v != size)
273 		panic("startup: table size inconsistency");
274 
275 	/*
276 	 * Now allocate buffers proper.  They are different than the above
277 	 * in that they usually occupy more virtual memory than physical.
278 	 */
279 	size = MAXBSIZE * nbuf;
280 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
281 		    NULL, UVM_UNKNOWN_OFFSET, 0,
282 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
283 				UVM_ADV_NORMAL, 0)) != 0)
284 		panic("startup: cannot allocate VM for buffers");
285 	minaddr = (vaddr_t)buffers;
286 #if 0
287 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
288 		/* don't want to alloc more physical mem than needed */
289 		bufpages = btoc(MAXBSIZE) * nbuf;
290 	}
291 #endif
292 	base = bufpages / nbuf;
293 	residual = bufpages % nbuf;
294 	for (i = 0; i < nbuf; i++) {
295 		vsize_t curbufsize;
296 		vaddr_t curbuf;
297 		struct vm_page *pg;
298 
299 		/*
300 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
301 		 * that MAXBSIZE space, we allocate and map (base+1) pages
302 		 * for the first "residual" buffers, and then we allocate
303 		 * "base" pages for the rest.
304 		 */
305 		curbuf = (vsize_t) buffers + (i * MAXBSIZE);
306 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
307 
308 		while (curbufsize) {
309 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
310 			if (pg == NULL)
311 				panic("cpu_startup: not enough memory for "
312 				    "buffer cache");
313 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
314 				       VM_PROT_READ|VM_PROT_WRITE);
315 			curbuf += PAGE_SIZE;
316 			curbufsize -= PAGE_SIZE;
317 		}
318 	}
319 	pmap_update(pmap_kernel());
320 
321 	/*
322 	 * Allocate a submap for exec arguments.  This map effectively
323 	 * limits the number of processes exec'ing at any time.
324 	 */
325 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
326 				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
327 
328 	/*
329 	 * Allocate a submap for physio
330 	 */
331 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
332 				   VM_PHYS_SIZE, 0, FALSE, NULL);
333 
334 	/*
335 	 * Finally, allocate mbuf cluster submap.
336 	 */
337 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
338 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
339 				 FALSE, NULL);
340 
341 #ifdef DEBUG
342 	pmapdebug = opmapdebug;
343 #endif
344 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
345 	printf("avail memory = %s\n", pbuf);
346 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
347 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
348 
349 	/*
350 	 * Set up CPU-specific registers, cache, etc.
351 	 */
352 	initcpu();
353 
354 	/*
355 	 * Set up buffers, so they can be used to read disk labels.
356 	 */
357 	bufinit();
358 }
359 
360 /*
361  * Set registers on exec.
362  */
363 void
364 setregs(p, pack, stack)
365 	struct proc *p;
366 	struct exec_package *pack;
367 	u_long stack;
368 {
369 	struct frame *frame = (struct frame *)p->p_md.md_regs;
370 
371 	frame->f_sr = PSL_USERSET;
372 	frame->f_pc = pack->ep_entry & ~1;
373 	frame->f_regs[D0] = 0;
374 	frame->f_regs[D1] = 0;
375 	frame->f_regs[D2] = 0;
376 	frame->f_regs[D3] = 0;
377 	frame->f_regs[D4] = 0;
378 	frame->f_regs[D5] = 0;
379 	frame->f_regs[D6] = 0;
380 	frame->f_regs[D7] = 0;
381 	frame->f_regs[A0] = 0;
382 	frame->f_regs[A1] = 0;
383 	frame->f_regs[A2] = (int)p->p_psstr;
384 	frame->f_regs[A3] = 0;
385 	frame->f_regs[A4] = 0;
386 	frame->f_regs[A5] = 0;
387 	frame->f_regs[A6] = 0;
388 	frame->f_regs[SP] = stack;
389 
390 	/* restore a null state frame */
391 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
392 	if (fputype)
393 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
394 }
395 
396 /*
397  * Info for CTL_HW
398  */
399 char	cpu_model[96];		/* max 85 chars */
400 static char *fpu_descr[] = {
401 #ifdef	FPU_EMULATE
402 	", emulator FPU", 	/* 0 */
403 #else
404 	", no math support",	/* 0 */
405 #endif
406 	", m68881 FPU",		/* 1 */
407 	", m68882 FPU",		/* 2 */
408 	"/FPU",			/* 3 */
409 	"/FPU",			/* 4 */
410 	};
411 
412 void
413 identifycpu()
414 {
415         /* there's alot of XXX in here... */
416 	char *cpu_type, *mach, *mmu, *fpu;
417 	char clock[16];
418 
419 	/*
420 	 * check machine type constant
421 	 */
422 	switch (intio_get_sysport_mpustat()) {
423 	case 0xdc:
424 		/*
425 		 * CPU Type == 68030, Clock == 25MHz
426 		 */
427 		mach = "030";
428 		break;
429 	case 0xfe:
430 		/*
431 		 * CPU Type == 68000, Clock == 16MHz
432 		 */
433 		mach = "000XVI";
434 		break;
435 	case 0xff:
436 		/*
437 		 * CPU Type == 68000, Clock == 10MHz
438 		 */
439 		mach = "000/ACE/PRO/EXPERT/SUPER";
440 		break;
441 	default:
442 		/*
443 		 * unknown type
444 		 */
445 		mach = "000?(unknown model)";
446 		break;
447 	}
448 
449 	cpuspeed = 2048 / delay_divisor;
450 	sprintf(clock, "%dMHz", cpuspeed);
451 	switch (cputype) {
452 	case CPU_68060:
453 		cpu_type = "m68060";
454 		mmu = "/MMU";
455 		cpuspeed = 128 / delay_divisor;
456 		sprintf(clock, "%d/%dMHz", cpuspeed*2, cpuspeed);
457 		break;
458 	case CPU_68040:
459 		cpu_type = "m68040";
460 		mmu = "/MMU";
461 		cpuspeed = 759 / delay_divisor;
462 		sprintf(clock, "%d/%dMHz", cpuspeed*2, cpuspeed);
463 		break;
464 	case CPU_68030:
465 		cpu_type = "m68030";
466 		mmu = "/MMU";
467 		break;
468 	case CPU_68020:
469 		cpu_type = "m68020";
470 		mmu = ", m68851 MMU";
471 		break;
472 	default:
473 		cpu_type = "unknown";
474 		mmu = ", unknown MMU";
475 		break;
476 	}
477 	if (fputype >= 0 && fputype < sizeof(fpu_descr)/sizeof(fpu_descr[0]))
478 		fpu = fpu_descr[fputype];
479 	else
480 		fpu = ", unknown FPU";
481 	sprintf(cpu_model, "X68%s (%s CPU%s%s, %s clock)",
482 		mach, cpu_type, mmu, fpu, clock);
483 	printf("%s\n", cpu_model);
484 }
485 
486 /*
487  * machine dependent system variables.
488  */
489 int
490 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
491 	int *name;
492 	u_int namelen;
493 	void *oldp;
494 	size_t *oldlenp;
495 	void *newp;
496 	size_t newlen;
497 	struct proc *p;
498 {
499 	dev_t consdev;
500 
501 	/* all sysctl names at this level are terminal */
502 	if (namelen != 1)
503 		return (ENOTDIR);		/* overloaded */
504 
505 	switch (name[0]) {
506 	case CPU_CONSDEV:
507 		if (cn_tab != NULL)
508 			consdev = cn_tab->cn_dev;
509 		else
510 			consdev = NODEV;
511 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
512 		    sizeof consdev));
513 	default:
514 		return (EOPNOTSUPP);
515 	}
516 	/* NOTREACHED */
517 }
518 
519 int	waittime = -1;
520 int	power_switch_is_off = 0;
521 
522 void
523 cpu_reboot(howto, bootstr)
524 	int howto;
525 	char *bootstr;
526 {
527 	/* take a snap shot before clobbering any registers */
528 	if (curproc && curproc->p_addr)
529 		savectx(&curproc->p_addr->u_pcb);
530 
531 	boothowto = howto;
532 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
533 		waittime = 0;
534 		vfs_shutdown();
535 		/*
536 		 * If we've been adjusting the clock, the todr
537 		 * will be out of synch; adjust it now.
538 		 */
539 		/*resettodr();*/
540 	}
541 
542 	/* Disable interrputs. */
543 	splhigh();
544 
545 	if (howto & RB_DUMP)
546 		dumpsys();
547 
548 	/* Run any shutdown hooks. */
549 	doshutdownhooks();
550 
551 #if defined(PANICWAIT) && !defined(DDB)
552 	if ((howto & RB_HALT) == 0 && panicstr) {
553 		printf("hit any key to reboot...\n");
554 		(void)cngetc();
555 		printf("\n");
556 	}
557 #endif
558 
559 	/* Finally, halt/reboot the system. */
560 	/* a) RB_POWERDOWN
561 	 *  a1: the power switch is still on
562 	 *	Power cannot be removed; simply halt the system (b)
563 	 *	Power switch state is checked in shutdown hook
564 	 *  a2: the power switch is off
565 	 *	Remove the power; the simplest way is go back to ROM eg. reboot
566 	 * b) RB_HALT
567 	 *      call cngetc
568          * c) otherwise
569 	 *	Reboot
570 	*/
571 	if (((howto & RB_POWERDOWN) == RB_POWERDOWN) && power_switch_is_off)
572 		doboot();
573 	else if (/*((howto & RB_POWERDOWN) == RB_POWERDOWN) ||*/
574 		 ((howto & RB_HALT) == RB_HALT)) {
575 		printf("System halted.  Hit any key to reboot.\n\n");
576 		(void)cngetc();
577 	}
578 
579 	printf("rebooting...\n");
580 	DELAY(1000000);
581 	doboot();
582 	/*NOTREACHED*/
583 }
584 
585 /*
586  * Initialize the kernel crash dump header.
587  */
588 void
589 cpu_init_kcore_hdr()
590 {
591 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
592 	struct m68k_kcore_hdr *m = &h->un._m68k;
593 	int i;
594 
595 	memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr));
596 
597 	/*
598 	 * Initialize the `dispatcher' portion of the header.
599 	 */
600 	strcpy(h->name, machine);
601 	h->page_size = NBPG;
602 	h->kernbase = KERNBASE;
603 
604 	/*
605 	 * Fill in information about our MMU configuration.
606 	 */
607 	m->mmutype	= mmutype;
608 	m->sg_v		= SG_V;
609 	m->sg_frame	= SG_FRAME;
610 	m->sg_ishift	= SG_ISHIFT;
611 	m->sg_pmask	= SG_PMASK;
612 	m->sg40_shift1	= SG4_SHIFT1;
613 	m->sg40_mask2	= SG4_MASK2;
614 	m->sg40_shift2	= SG4_SHIFT2;
615 	m->sg40_mask3	= SG4_MASK3;
616 	m->sg40_shift3	= SG4_SHIFT3;
617 	m->sg40_addr1	= SG4_ADDR1;
618 	m->sg40_addr2	= SG4_ADDR2;
619 	m->pg_v		= PG_V;
620 	m->pg_frame	= PG_FRAME;
621 
622 	/*
623 	 * Initialize pointer to kernel segment table.
624 	 */
625 	m->sysseg_pa = (u_int32_t)(pmap_kernel()->pm_stpa);
626 
627 	/*
628 	 * Initialize relocation value such that:
629 	 *
630 	 *	pa = (va - KERNBASE) + reloc
631 	 */
632 	m->reloc = lowram;
633 
634 	/*
635 	 * Define the end of the relocatable range.
636 	 */
637 	m->relocend = (u_int32_t)&end;
638 
639 	/*
640 	 * X68k has multiple RAM segments on some models.
641 	 */
642 	m->ram_segs[0].start = lowram;
643 	m->ram_segs[0].size = mem_size - lowram;
644 	for (i = 1; i < vm_nphysseg; i++) {
645 		m->ram_segs[i].start = ctob(vm_physmem[i].start);
646 		m->ram_segs[i].size  = ctob(vm_physmem[i].end
647 					    - vm_physmem[i].start);
648 	}
649 }
650 
651 /*
652  * Compute the size of the machine-dependent crash dump header.
653  * Returns size in disk blocks.
654  */
655 int
656 cpu_dumpsize()
657 {
658 	int size;
659 
660 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
661 	return (btodb(roundup(size, dbtob(1))));
662 }
663 
664 /*
665  * Called by dumpsys() to dump the machine-dependent header.
666  */
667 int
668 cpu_dump(dump, blknop)
669 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
670 	daddr_t *blknop;
671 {
672 	int buf[dbtob(1) / sizeof(int)];
673 	cpu_kcore_hdr_t *chdr;
674 	kcore_seg_t *kseg;
675 	int error;
676 
677 	kseg = (kcore_seg_t *)buf;
678 	chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) /
679 	    sizeof(int)];
680 
681 	/* Create the segment header. */
682 	CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
683 	kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t));
684 
685 	memcpy(chdr, &cpu_kcore_hdr, sizeof(cpu_kcore_hdr_t));
686 	error = (*dump)(dumpdev, *blknop, (caddr_t)buf, sizeof(buf));
687 	*blknop += btodb(sizeof(buf));
688 	return (error);
689 }
690 
691 /*
692  * These variables are needed by /sbin/savecore
693  */
694 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
695 int	dumpsize = 0;		/* pages */
696 long	dumplo = 0;		/* blocks */
697 
698 /*
699  * This is called by main to set dumplo and dumpsize.
700  * Dumps always skip the first NBPG of disk space in
701  * case there might be a disk label stored there.  If there
702  * is extra space, put dump at the end to reduce the chance
703  * that swapping trashes it.
704  */
705 void
706 cpu_dumpconf()
707 {
708 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
709 	struct m68k_kcore_hdr *m = &h->un._m68k;
710 	int chdrsize;	/* size of dump header */
711 	int nblks;	/* size of dump area */
712 	int maj;
713 	int i;
714 
715 	if (dumpdev == NODEV)
716 		return;
717 
718 	maj = major(dumpdev);
719 	if (maj < 0 || maj >= nblkdev)
720 		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
721 	if (bdevsw[maj].d_psize == NULL)
722 		return;
723 	nblks = (*bdevsw[maj].d_psize)(dumpdev);
724 	chdrsize = cpu_dumpsize();
725 
726 	dumpsize = 0;
727 	for (i = 0; m->ram_segs[i].size && i < M68K_NPHYS_RAM_SEGS; i++)
728 		dumpsize += btoc(m->ram_segs[i].size);
729 	/*
730 	 * Check to see if we will fit.  Note we always skip the
731 	 * first NBPG in case there is a disk label there.
732 	 */
733 	if (nblks < (ctod(dumpsize) + chdrsize + ctod(1))) {
734 		dumpsize = 0;
735 		dumplo = -1;
736 		return;
737 	}
738 
739 	/*
740 	 * Put dump at the end of the partition.
741 	 */
742 	dumplo = (nblks - 1) - ctod(dumpsize) - chdrsize;
743 }
744 
745 void
746 dumpsys()
747 {
748 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
749 	struct m68k_kcore_hdr *m = &h->un._m68k;
750 	daddr_t blkno;		/* current block to write */
751 				/* dump routine */
752 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
753 	int pg;			/* page being dumped */
754 	paddr_t maddr;		/* PA being dumped */
755 	int seg;		/* RAM segment being dumped */
756 	int error;		/* error code from (*dump)() */
757 
758 	/* XXX initialized here because of gcc lossage */
759 	seg = 0;
760 	maddr = m->ram_segs[seg].start;
761 	pg = 0;
762 
763 	/* Make sure dump device is valid. */
764 	if (dumpdev == NODEV)
765 		return;
766 	if (dumpsize == 0) {
767 		cpu_dumpconf();
768 		if (dumpsize == 0)
769 			return;
770 	}
771 	if (dumplo <= 0) {
772 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
773 		    minor(dumpdev));
774 		return;
775 	}
776 	dump = bdevsw[major(dumpdev)].d_dump;
777 	blkno = dumplo;
778 
779 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
780 	    minor(dumpdev), dumplo);
781 
782 	printf("dump ");
783 
784 	/* Write the dump header. */
785 	error = cpu_dump(dump, &blkno);
786 	if (error)
787 		goto bad;
788 
789 	for (pg = 0; pg < dumpsize; pg++) {
790 #define NPGMB	(1024*1024/NBPG)
791 		/* print out how many MBs we have dumped */
792 		if (pg && (pg % NPGMB) == 0)
793 			printf("%d ", pg / NPGMB);
794 #undef NPGMB
795 		if (maddr == 0) {
796 			/* Skip first page */
797 			maddr += NBPG;
798 			blkno += btodb(NBPG);
799 			continue;
800 		}
801 		while (maddr >=
802 		    (m->ram_segs[seg].start + m->ram_segs[seg].size)) {
803 			if (++seg >= M68K_NPHYS_RAM_SEGS ||
804 			    m->ram_segs[seg].size == 0) {
805 				error = EINVAL;		/* XXX ?? */
806 				goto bad;
807 			}
808 			maddr = m->ram_segs[seg].start;
809 		}
810 		pmap_enter(pmap_kernel(), (vaddr_t)vmmap, maddr,
811 		    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
812 		pmap_update(pmap_kernel());
813 
814 		error = (*dump)(dumpdev, blkno, vmmap, NBPG);
815  bad:
816 		switch (error) {
817 		case 0:
818 			maddr += NBPG;
819 			blkno += btodb(NBPG);
820 			break;
821 
822 		case ENXIO:
823 			printf("device bad\n");
824 			return;
825 
826 		case EFAULT:
827 			printf("device not ready\n");
828 			return;
829 
830 		case EINVAL:
831 			printf("area improper\n");
832 			return;
833 
834 		case EIO:
835 			printf("i/o error\n");
836 			return;
837 
838 		case EINTR:
839 			printf("aborted from console\n");
840 			return;
841 
842 		default:
843 			printf("error %d\n", error);
844 			return;
845 		}
846 	}
847 	printf("succeeded\n");
848 }
849 
850 void
851 initcpu()
852 {
853 	/* XXX should init '40 vecs here, too */
854 #if defined(M68060)
855 	extern caddr_t vectab[256];
856 #if defined(M060SP)
857 	extern u_int8_t I_CALL_TOP[];
858 	extern u_int8_t FP_CALL_TOP[];
859 #else
860 	extern u_int8_t illinst;
861 #endif
862 	extern u_int8_t fpfault;
863 #endif
864 
865 #ifdef MAPPEDCOPY
866 
867 	/*
868 	 * Initialize lower bound for doing copyin/copyout using
869 	 * page mapping (if not already set).  We don't do this on
870 	 * VAC machines as it loses big time.
871 	 */
872 	if ((int) mappedcopysize == -1) {
873 		mappedcopysize = NBPG;
874 	}
875 #endif
876 
877 #if defined(M68060)
878 	if (cputype == CPU_68060) {
879 #if defined(M060SP)
880 		/* integer support */
881 		vectab[61] = &I_CALL_TOP[128 + 0x00];
882 
883 		/* floating point support */
884 		vectab[11] = &FP_CALL_TOP[128 + 0x30];
885 		vectab[55] = &FP_CALL_TOP[128 + 0x38];
886 		vectab[60] = &FP_CALL_TOP[128 + 0x40];
887 
888 		vectab[54] = &FP_CALL_TOP[128 + 0x00];
889 		vectab[52] = &FP_CALL_TOP[128 + 0x08];
890 		vectab[53] = &FP_CALL_TOP[128 + 0x10];
891 		vectab[51] = &FP_CALL_TOP[128 + 0x18];
892 		vectab[50] = &FP_CALL_TOP[128 + 0x20];
893 		vectab[49] = &FP_CALL_TOP[128 + 0x28];
894 #else
895 		vectab[61] = &illinst;
896 #endif
897 		vectab[48] = &fpfault;
898 	}
899 	DCIS();
900 #endif
901 }
902 
903 void
904 straytrap(pc, evec)
905 	int pc;
906 	u_short evec;
907 {
908 	printf("unexpected trap (vector offset %x) from %x\n",
909 	       evec & 0xFFF, pc);
910 #if defined(DDB)
911 	Debugger();
912 #endif
913 }
914 
915 int	*nofault;
916 
917 int
918 badaddr(addr)
919 	caddr_t addr;
920 {
921 	int i;
922 	label_t	faultbuf;
923 
924 	nofault = (int *) &faultbuf;
925 	if (setjmp((label_t *)nofault)) {
926 		nofault = (int *) 0;
927 		return(1);
928 	}
929 	i = *(volatile short *)addr;
930 	nofault = (int *) 0;
931 	return(0);
932 }
933 
934 int
935 badbaddr(addr)
936 	caddr_t addr;
937 {
938 	int i;
939 	label_t	faultbuf;
940 
941 	nofault = (int *) &faultbuf;
942 	if (setjmp((label_t *)nofault)) {
943 		nofault = (int *) 0;
944 		return(1);
945 	}
946 	i = *(volatile char *)addr;
947 	nofault = (int *) 0;
948 	return(0);
949 }
950 
951 void netintr __P((void));
952 
953 void
954 netintr()
955 {
956 
957 #define DONETISR(bit, fn) do {		\
958 	if (netisr & (1 << bit)) {	\
959 		netisr &= ~(1 << bit);	\
960 		fn();			\
961 	}				\
962 } while (0)
963 
964 #include <net/netisr_dispatch.h>
965 
966 #undef DONETISR
967 }
968 
969 void
970 intrhand(sr)
971 	int sr;
972 {
973 	printf("intrhand: unexpected sr 0x%x\n", sr);
974 }
975 
976 #if (defined(DDB) || defined(DEBUG)) && !defined(PANICBUTTON)
977 #define PANICBUTTON
978 #endif
979 
980 #ifdef PANICBUTTON
981 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
982 int crashandburn = 0;
983 int candbdelay = 50;	/* give em half a second */
984 void candbtimer __P((void *));
985 
986 #ifndef DDB
987 static struct callout candbtimer_ch = CALLOUT_INITIALIZER;
988 #endif
989 
990 void
991 candbtimer(arg)
992 	void *arg;
993 {
994 
995 	crashandburn = 0;
996 }
997 #endif
998 
999 /*
1000  * Level 7 interrupts can be caused by the keyboard or parity errors.
1001  */
1002 void
1003 nmihand(frame)
1004 	struct frame frame;
1005 {
1006 	intio_set_sysport_keyctrl(intio_get_sysport_keyctrl() | 0x04);
1007 
1008 	if (1) {
1009 #ifdef PANICBUTTON
1010 		static int innmihand = 0;
1011 
1012 		/*
1013 		 * Attempt to reduce the window of vulnerability for recursive
1014 		 * NMIs (e.g. someone holding down the keyboard reset button).
1015 		 */
1016 		if (innmihand == 0) {
1017 			innmihand = 1;
1018 			printf("Got a keyboard NMI\n");
1019 			innmihand = 0;
1020 		}
1021 #ifdef DDB
1022 		Debugger();
1023 #else
1024 		if (panicbutton) {
1025 			if (crashandburn) {
1026 				crashandburn = 0;
1027 				panic(panicstr ?
1028 				      "forced crash, nosync" : "forced crash");
1029 			}
1030 			crashandburn++;
1031 			callout_reset(&candbtimer_ch, candbdelay,
1032 			    candbtimer, NULL);
1033 		}
1034 #endif /* DDB */
1035 #endif /* PANICBUTTON */
1036 		return;
1037 	}
1038 	/* panic?? */
1039 	printf("unexpected level 7 interrupt ignored\n");
1040 }
1041 
1042 /*
1043  * cpu_exec_aout_makecmds():
1044  *	cpu-dependent a.out format hook for execve().
1045  *
1046  * Determine of the given exec package refers to something which we
1047  * understand and, if so, set up the vmcmds for it.
1048  *
1049  * XXX what are the special cases for the hp300?
1050  * XXX why is this COMPAT_NOMID?  was something generating
1051  *	hp300 binaries with an a_mid of 0?  i thought that was only
1052  *	done on little-endian machines...  -- cgd
1053  */
1054 int
1055 cpu_exec_aout_makecmds(p, epp)
1056 	struct proc *p;
1057 	struct exec_package *epp;
1058 {
1059 #if defined(COMPAT_NOMID) || defined(COMPAT_44)
1060 	u_long midmag, magic;
1061 	u_short mid;
1062 	int error;
1063 	struct exec *execp = epp->ep_hdr;
1064 
1065 	midmag = ntohl(execp->a_midmag);
1066 	mid = (midmag >> 16) & 0xffff;
1067 	magic = midmag & 0xffff;
1068 
1069 	midmag = mid << 16 | magic;
1070 
1071 	switch (midmag) {
1072 #ifdef COMPAT_NOMID
1073 	case (MID_ZERO << 16) | ZMAGIC:
1074 		error = exec_aout_prep_oldzmagic(p, epp);
1075 		break;
1076 #endif
1077 #ifdef COMPAT_44
1078 	case (MID_HP300 << 16) | ZMAGIC:
1079 		error = exec_aout_prep_oldzmagic(p, epp);
1080 		break;
1081 #endif
1082 	default:
1083 		error = ENOEXEC;
1084 	}
1085 
1086 	return error;
1087 #else /* !(defined(COMPAT_NOMID) || defined(COMPAT_44)) */
1088 	return ENOEXEC;
1089 #endif
1090 }
1091 
1092 #ifdef EXTENDED_MEMORY
1093 #ifdef EM_DEBUG
1094 static int em_debug = 0;
1095 #define DPRINTF(str) do{ if (em_debug) printf str; } while (0);
1096 #else
1097 #define DPRINTF(str)
1098 #endif
1099 
1100 static struct memlist {
1101 	caddr_t base;
1102 	psize_t min;
1103 	psize_t max;
1104 } memlist[] = {
1105 	/* TS-6BE16 16MB memory */
1106 	{(caddr_t)0x01000000, 0x01000000, 0x01000000},
1107 	/* 060turbo SIMM slot (4--128MB) */
1108 	{(caddr_t)0x10000000, 0x00400000, 0x08000000},
1109 };
1110 static vaddr_t mem_v, base_v;
1111 
1112 /*
1113  * check memory existency
1114  */
1115 static int
1116 mem_exists(mem, basemax)
1117 	caddr_t mem;
1118 	u_long basemax;
1119 {
1120 	/* most variables must be register! */
1121 	register volatile unsigned char *m, *b;
1122 	register unsigned char save_m, save_b;
1123 	register int baseismem;
1124 	register int exists = 0;
1125 	caddr_t base;
1126 	caddr_t begin_check, end_check;
1127 	label_t	faultbuf;
1128 
1129 	DPRINTF (("Enter mem_exists(%p, %x)\n", mem, basemax));
1130 	DPRINTF ((" pmap_enter(%p, %p) for target... ", mem_v, mem));
1131 	pmap_enter(pmap_kernel(), mem_v, (paddr_t)mem,
1132 		   VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|PMAP_WIRED);
1133 	pmap_update(pmap_kernel());
1134 	DPRINTF ((" done.\n"));
1135 
1136 	/* only 24bits are significant on normal X680x0 systems */
1137 	base = (caddr_t)((u_long)mem & 0x00FFFFFF);
1138 	DPRINTF ((" pmap_enter(%p, %p) for shadow... ", base_v, base));
1139 	pmap_enter(pmap_kernel(), base_v, (paddr_t)base,
1140 		   VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|PMAP_WIRED);
1141 	pmap_update(pmap_kernel());
1142 	DPRINTF ((" done.\n"));
1143 
1144 	m = (void*)mem_v;
1145 	b = (void*)base_v;
1146 
1147 	/* This is somewhat paranoid -- avoid overwriting myself */
1148 	asm("lea %%pc@(begin_check_mem),%0" : "=a"(begin_check));
1149 	asm("lea %%pc@(end_check_mem),%0" : "=a"(end_check));
1150 	if (base >= begin_check && base < end_check) {
1151 		size_t off = end_check - begin_check;
1152 
1153 		DPRINTF ((" Adjusting the testing area.\n"));
1154 		m -= off;
1155 		b -= off;
1156 	}
1157 
1158 	nofault = (int *) &faultbuf;
1159 	if (setjmp ((label_t *)nofault)) {
1160 		nofault = (int *) 0;
1161 		pmap_remove(pmap_kernel(), mem_v, mem_v+NBPG);
1162 		pmap_remove(pmap_kernel(), base_v, base_v+NBPG);
1163 		pmap_update(pmap_kernel());
1164 		DPRINTF (("Fault!!! Returning 0.\n"));
1165 		return 0;
1166 	}
1167 
1168 	DPRINTF ((" Let's begin. mem=%p, base=%p, m=%p, b=%p\n",
1169 		  mem, base, m, b));
1170 
1171 	(void) *m;
1172 	/*
1173 	 * Can't check by writing if the corresponding
1174 	 * base address isn't memory.
1175 	 *
1176 	 * I hope this would be no harm....
1177 	 */
1178 	baseismem = base < (caddr_t)basemax;
1179 
1180 	/* save original value (base must be saved first) */
1181 	if (baseismem)
1182 		save_b = *b;
1183 	save_m = *m;
1184 
1185 asm("begin_check_mem:");
1186 	/*
1187 	 * stack and other data segment variables are unusable
1188 	 * til end_check_mem, because they may be clobbered.
1189 	 */
1190 
1191 	/*
1192 	 * check memory by writing/reading
1193 	 */
1194 	if (baseismem)
1195 		*b = 0x55;
1196 	*m = 0xAA;
1197 	if ((baseismem && *b != 0x55) || *m != 0xAA)
1198 		goto out;
1199 
1200 	*m = 0x55;
1201 	if (baseismem)
1202 		*b = 0xAA;
1203 	if (*m != 0x55 || (baseismem && *b != 0xAA))
1204 		goto out;
1205 
1206 	exists = 1;
1207 out:
1208 	*m = save_m;
1209 	if (baseismem)
1210 		*b = save_b;
1211 
1212 asm("end_check_mem:");
1213 
1214 	nofault = (int *)0;
1215 	pmap_remove(pmap_kernel(), mem_v, mem_v+NBPG);
1216 	pmap_remove(pmap_kernel(), base_v, base_v+NBPG);
1217 	pmap_update(pmap_kernel());
1218 
1219 	DPRINTF ((" End.\n"));
1220 
1221 	DPRINTF (("Returning from mem_exists. result = %d\n", exists));
1222 
1223 	return exists;
1224 }
1225 
1226 static void
1227 setmemrange(void)
1228 {
1229 	int i;
1230 	psize_t s, min, max;
1231 	struct memlist *mlist = memlist;
1232 	u_long h;
1233 	int basemax = ctob(physmem);
1234 
1235 	/*
1236 	 * VM system is not started yet.  Use the first and second avalable
1237 	 * pages to map the (possible) target memory and its shadow.
1238 	 */
1239 	mem_v = virtual_avail;	/* target */
1240 	base_v = mem_v + NBPG;	/* shadow */
1241 
1242 	{	/* Turn off the processor cache. */
1243 		register int cacr;
1244 		PCIA();		/* cpusha dc */
1245 		switch (cputype) {
1246 		case CPU_68030:
1247 			cacr = CACHE_OFF;
1248 			break;
1249 		case CPU_68040:
1250 			cacr = CACHE40_OFF;
1251 			break;
1252 		case CPU_68060:
1253 			cacr = CACHE60_OFF;
1254 			break;
1255 		}
1256 		asm volatile ("movc %0,%%cacr"::"d"(cacr));
1257 	}
1258 
1259 	/* discover extended memory */
1260 	for (i = 0; i < sizeof(memlist) / sizeof(memlist[0]); i++) {
1261 		min = mlist[i].min;
1262 		max = mlist[i].max;
1263 		/*
1264 		 * Normally, x68k hardware is NOT 32bit-clean.
1265 		 * But some type of extended memory is in 32bit address space.
1266 		 * Check whether.
1267 		 */
1268 		if (!mem_exists(mlist[i].base, basemax))
1269 			continue;
1270 		h = 0;
1271 		/* range check */
1272 		for (s = min; s <= max; s += 0x00100000) {
1273 			if (!mem_exists(mlist[i].base + s - 4, basemax))
1274 				break;
1275 			h = (u_long)(mlist[i].base + s);
1276 		}
1277 		if ((u_long)mlist[i].base < h) {
1278 			uvm_page_physload(atop(mlist[i].base), atop(h),
1279 					  atop(mlist[i].base), atop(h),
1280 					  VM_FREELIST_DEFAULT);
1281 			mem_size += h - (u_long) mlist[i].base;
1282 		}
1283 
1284 	}
1285 
1286 	{	/* Re-enable the processor cache. */
1287 		register int cacr;
1288 		ICIA();
1289 		switch (cputype) {
1290 		case CPU_68030:
1291 			cacr = CACHE_ON;
1292 			break;
1293 		case CPU_68040:
1294 			cacr = CACHE40_ON;
1295 			break;
1296 		case CPU_68060:
1297 			cacr = CACHE60_ON;
1298 			break;
1299 		}
1300 		asm volatile ("movc %0,%%cacr"::"d"(cacr));
1301 	}
1302 
1303 	physmem = m68k_btop(mem_size);
1304 }
1305 #endif
1306