xref: /netbsd/sys/arch/x68k/x68k/machdep.c (revision c4a72b64)
1 /*	$NetBSD: machdep.c,v 1.110 2002/10/28 00:55:18 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
41  *
42  *	@(#)machdep.c	8.10 (Berkeley) 4/20/94
43  */
44 
45 #include "opt_ddb.h"
46 #include "opt_kgdb.h"
47 #include "opt_compat_netbsd.h"
48 #include "opt_m680x0.h"
49 #include "opt_fpu_emulate.h"
50 #include "opt_m060sp.h"
51 #include "opt_panicbutton.h"
52 #include "opt_extmem.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/signalvar.h>
58 #include <sys/kernel.h>
59 #include <sys/proc.h>
60 #include <sys/buf.h>
61 #include <sys/reboot.h>
62 #include <sys/conf.h>
63 #include <sys/file.h>
64 #include <sys/malloc.h>
65 #include <sys/mbuf.h>
66 #include <sys/msgbuf.h>
67 #include <sys/ioctl.h>
68 #include <sys/tty.h>
69 #include <sys/mount.h>
70 #include <sys/user.h>
71 #include <sys/exec.h>
72 #include <sys/vnode.h>
73 #include <sys/syscallargs.h>
74 #include <sys/core.h>
75 #include <sys/kcore.h>
76 
77 #if defined(DDB) && defined(__ELF__)
78 #include <sys/exec_elf.h>
79 #endif
80 
81 #include <net/netisr.h>
82 #undef PS	/* XXX netccitt/pk.h conflict with machine/reg.h? */
83 
84 #include <machine/db_machdep.h>
85 #include <ddb/db_sym.h>
86 #include <ddb/db_extern.h>
87 
88 #include <m68k/cacheops.h>
89 #include <machine/cpu.h>
90 #include <machine/reg.h>
91 #include <machine/psl.h>
92 #include <machine/pte.h>
93 #include <machine/kcore.h>
94 
95 #include <dev/cons.h>
96 
97 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
98 #include <uvm/uvm_extern.h>
99 
100 #include <sys/sysctl.h>
101 
102 #include <sys/device.h>
103 
104 #include <machine/bus.h>
105 #include <arch/x68k/dev/intiovar.h>
106 
107 void initcpu __P((void));
108 void identifycpu __P((void));
109 void doboot __P((void))
110     __attribute__((__noreturn__));
111 int badaddr __P((caddr_t));
112 int badbaddr __P((caddr_t));
113 
114 /* the following is used externally (sysctl_hw) */
115 char	machine[] = MACHINE;	/* from <machine/param.h> */
116 
117 /* Our exported CPU info; we can have only one. */
118 struct cpu_info cpu_info_store;
119 
120 struct vm_map *exec_map = NULL;
121 struct vm_map *mb_map = NULL;
122 struct vm_map *phys_map = NULL;
123 
124 extern paddr_t avail_start, avail_end;
125 extern vaddr_t virtual_avail;
126 extern u_int lowram;
127 extern int end, *esym;
128 
129 caddr_t	msgbufaddr;
130 int	maxmem;			/* max memory per process */
131 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
132 
133 /*
134  * safepri is a safe priority for sleep to set for a spin-wait
135  * during autoconfiguration or after a panic.
136  */
137 int	safepri = PSL_LOWIPL;
138 
139 /* prototypes for local functions */
140 void    identifycpu __P((void));
141 void    initcpu __P((void));
142 int	cpu_dumpsize __P((void));
143 int	cpu_dump __P((int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *));
144 void	cpu_init_kcore_hdr __P((void));
145 #ifdef EXTENDED_MEMORY
146 static int mem_exists __P((caddr_t, u_long));
147 static void setmemrange __P((void));
148 #endif
149 
150 /* functions called from locore.s */
151 void	dumpsys __P((void));
152 void    straytrap __P((int, u_short));
153 void	nmihand __P((struct frame));
154 void	intrhand __P((int));
155 
156 /*
157  * On the 68020/68030, the value of delay_divisor is roughly
158  * 2048 / cpuspeed (where cpuspeed is in MHz).
159  *
160  * On the 68040, the value of delay_divisor is roughly
161  * 759 / cpuspeed (where cpuspeed is in MHz).
162  *
163  * On the 68060, the value of delay_divisor is reported to be
164  * 128 / cpuspeed (where cpuspeed is in MHz).
165  */
166 int	delay_divisor = 140;	/* assume some reasonable value to start */
167 static int cpuspeed;		/* MPU clock (in MHz) */
168 
169 /*
170  * Machine-dependent crash dump header info.
171  */
172 cpu_kcore_hdr_t cpu_kcore_hdr;
173 
174 /*
175  * Console initialization: called early on from main,
176  * before vm init or startup.  Do enough configuration
177  * to choose and initialize a console.
178  */
179 void
180 consinit()
181 {
182 	/*
183 	 * bring graphics layer up.
184 	 */
185 	config_console();
186 
187 	/*
188 	 * Initialize the console before we print anything out.
189 	 */
190 	cninit();
191 
192 #ifdef KGDB
193 	zs_kgdb_init();			/* XXX */
194 #endif
195 #ifdef DDB
196 #ifndef __ELF__
197 	ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
198 #else
199 	ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
200 		 (void *)&end, esym);
201 #endif
202 	if (boothowto & RB_KDB)
203 		Debugger();
204 #endif
205 
206 	/*
207 	 * Tell the VM system about available physical memory.
208 	 */
209 	uvm_page_physload(atop(avail_start), atop(avail_end),
210 			atop(avail_start), atop(avail_end),
211 			VM_FREELIST_DEFAULT);
212 #ifdef EXTENDED_MEMORY
213 	setmemrange();
214 #endif
215 }
216 
217 /*
218  * cpu_startup: allocate memory for variable-sized tables,
219  * initialize cpu, and do autoconfiguration.
220  */
221 void
222 cpu_startup()
223 {
224 	caddr_t v;
225 	u_int i, base, residual;
226 	vaddr_t minaddr, maxaddr;
227 	vsize_t size;
228 	char pbuf[9];
229 #ifdef DEBUG
230 	extern int pmapdebug;
231 	int opmapdebug = pmapdebug;
232 
233 	pmapdebug = 0;
234 #endif
235 #if 0
236 	rtclockinit(); /* XXX */
237 #endif
238 
239 	/*
240 	 * Initialize error message buffer (at end of core).
241 	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
242 	 */
243 	for (i = 0; i < btoc(MSGBUFSIZE); i++)
244 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
245 		    avail_end + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
246 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
247 	pmap_update(pmap_kernel());
248 	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
249 
250 	/*
251 	 * Initialize the kernel crash dump header.
252 	 */
253 	cpu_init_kcore_hdr();
254 
255 	/*
256 	 * Good {morning,afternoon,evening,night}.
257 	 */
258 	printf(version);
259 	identifycpu();
260 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
261 	printf("total memory = %s\n", pbuf);
262 
263 	/*
264 	 * Find out how much space we need, allocate it,
265 	 * and then give everything true virtual addresses.
266 	 */
267 	size = (vm_size_t)allocsys(NULL, NULL);
268 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
269 		panic("startup: no room for tables");
270 	if (allocsys(v, NULL) - v != size)
271 		panic("startup: table size inconsistency");
272 
273 	/*
274 	 * Now allocate buffers proper.  They are different than the above
275 	 * in that they usually occupy more virtual memory than physical.
276 	 */
277 	size = MAXBSIZE * nbuf;
278 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
279 		    NULL, UVM_UNKNOWN_OFFSET, 0,
280 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
281 				UVM_ADV_NORMAL, 0)) != 0)
282 		panic("startup: cannot allocate VM for buffers");
283 	minaddr = (vaddr_t)buffers;
284 #if 0
285 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
286 		/* don't want to alloc more physical mem than needed */
287 		bufpages = btoc(MAXBSIZE) * nbuf;
288 	}
289 #endif
290 	base = bufpages / nbuf;
291 	residual = bufpages % nbuf;
292 	for (i = 0; i < nbuf; i++) {
293 		vsize_t curbufsize;
294 		vaddr_t curbuf;
295 		struct vm_page *pg;
296 
297 		/*
298 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
299 		 * that MAXBSIZE space, we allocate and map (base+1) pages
300 		 * for the first "residual" buffers, and then we allocate
301 		 * "base" pages for the rest.
302 		 */
303 		curbuf = (vsize_t) buffers + (i * MAXBSIZE);
304 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
305 
306 		while (curbufsize) {
307 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
308 			if (pg == NULL)
309 				panic("cpu_startup: not enough memory for "
310 				    "buffer cache");
311 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
312 				       VM_PROT_READ|VM_PROT_WRITE);
313 			curbuf += PAGE_SIZE;
314 			curbufsize -= PAGE_SIZE;
315 		}
316 	}
317 	pmap_update(pmap_kernel());
318 
319 	/*
320 	 * Allocate a submap for exec arguments.  This map effectively
321 	 * limits the number of processes exec'ing at any time.
322 	 */
323 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
324 				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
325 
326 	/*
327 	 * Allocate a submap for physio
328 	 */
329 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
330 				   VM_PHYS_SIZE, 0, FALSE, NULL);
331 
332 	/*
333 	 * Finally, allocate mbuf cluster submap.
334 	 */
335 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
336 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
337 				 FALSE, NULL);
338 
339 #ifdef DEBUG
340 	pmapdebug = opmapdebug;
341 #endif
342 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
343 	printf("avail memory = %s\n", pbuf);
344 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
345 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
346 
347 	/*
348 	 * Set up CPU-specific registers, cache, etc.
349 	 */
350 	initcpu();
351 
352 	/*
353 	 * Set up buffers, so they can be used to read disk labels.
354 	 */
355 	bufinit();
356 }
357 
358 /*
359  * Set registers on exec.
360  */
361 void
362 setregs(p, pack, stack)
363 	struct proc *p;
364 	struct exec_package *pack;
365 	u_long stack;
366 {
367 	struct frame *frame = (struct frame *)p->p_md.md_regs;
368 
369 	frame->f_sr = PSL_USERSET;
370 	frame->f_pc = pack->ep_entry & ~1;
371 	frame->f_regs[D0] = 0;
372 	frame->f_regs[D1] = 0;
373 	frame->f_regs[D2] = 0;
374 	frame->f_regs[D3] = 0;
375 	frame->f_regs[D4] = 0;
376 	frame->f_regs[D5] = 0;
377 	frame->f_regs[D6] = 0;
378 	frame->f_regs[D7] = 0;
379 	frame->f_regs[A0] = 0;
380 	frame->f_regs[A1] = 0;
381 	frame->f_regs[A2] = (int)p->p_psstr;
382 	frame->f_regs[A3] = 0;
383 	frame->f_regs[A4] = 0;
384 	frame->f_regs[A5] = 0;
385 	frame->f_regs[A6] = 0;
386 	frame->f_regs[SP] = stack;
387 
388 	/* restore a null state frame */
389 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
390 	if (fputype)
391 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
392 }
393 
394 /*
395  * Info for CTL_HW
396  */
397 char	cpu_model[96];		/* max 85 chars */
398 static char *fpu_descr[] = {
399 #ifdef	FPU_EMULATE
400 	", emulator FPU", 	/* 0 */
401 #else
402 	", no math support",	/* 0 */
403 #endif
404 	", m68881 FPU",		/* 1 */
405 	", m68882 FPU",		/* 2 */
406 	"/FPU",			/* 3 */
407 	"/FPU",			/* 4 */
408 	};
409 
410 void
411 identifycpu()
412 {
413         /* there's alot of XXX in here... */
414 	char *cpu_type, *mach, *mmu, *fpu;
415 	char clock[16];
416 
417 	/*
418 	 * check machine type constant
419 	 */
420 	switch (intio_get_sysport_mpustat()) {
421 	case 0xdc:
422 		/*
423 		 * CPU Type == 68030, Clock == 25MHz
424 		 */
425 		mach = "030";
426 		break;
427 	case 0xfe:
428 		/*
429 		 * CPU Type == 68000, Clock == 16MHz
430 		 */
431 		mach = "000XVI";
432 		break;
433 	case 0xff:
434 		/*
435 		 * CPU Type == 68000, Clock == 10MHz
436 		 */
437 		mach = "000/ACE/PRO/EXPERT/SUPER";
438 		break;
439 	default:
440 		/*
441 		 * unknown type
442 		 */
443 		mach = "000?(unknown model)";
444 		break;
445 	}
446 
447 	cpuspeed = 2048 / delay_divisor;
448 	sprintf(clock, "%dMHz", cpuspeed);
449 	switch (cputype) {
450 	case CPU_68060:
451 		cpu_type = "m68060";
452 		mmu = "/MMU";
453 		cpuspeed = 128 / delay_divisor;
454 		sprintf(clock, "%d/%dMHz", cpuspeed*2, cpuspeed);
455 		break;
456 	case CPU_68040:
457 		cpu_type = "m68040";
458 		mmu = "/MMU";
459 		cpuspeed = 759 / delay_divisor;
460 		sprintf(clock, "%d/%dMHz", cpuspeed*2, cpuspeed);
461 		break;
462 	case CPU_68030:
463 		cpu_type = "m68030";
464 		mmu = "/MMU";
465 		break;
466 	case CPU_68020:
467 		cpu_type = "m68020";
468 		mmu = ", m68851 MMU";
469 		break;
470 	default:
471 		cpu_type = "unknown";
472 		mmu = ", unknown MMU";
473 		break;
474 	}
475 	if (fputype >= 0 && fputype < sizeof(fpu_descr)/sizeof(fpu_descr[0]))
476 		fpu = fpu_descr[fputype];
477 	else
478 		fpu = ", unknown FPU";
479 	sprintf(cpu_model, "X68%s (%s CPU%s%s, %s clock)",
480 		mach, cpu_type, mmu, fpu, clock);
481 	printf("%s\n", cpu_model);
482 }
483 
484 /*
485  * machine dependent system variables.
486  */
487 int
488 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
489 	int *name;
490 	u_int namelen;
491 	void *oldp;
492 	size_t *oldlenp;
493 	void *newp;
494 	size_t newlen;
495 	struct proc *p;
496 {
497 	dev_t consdev;
498 
499 	/* all sysctl names at this level are terminal */
500 	if (namelen != 1)
501 		return (ENOTDIR);		/* overloaded */
502 
503 	switch (name[0]) {
504 	case CPU_CONSDEV:
505 		if (cn_tab != NULL)
506 			consdev = cn_tab->cn_dev;
507 		else
508 			consdev = NODEV;
509 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
510 		    sizeof consdev));
511 	default:
512 		return (EOPNOTSUPP);
513 	}
514 	/* NOTREACHED */
515 }
516 
517 int	waittime = -1;
518 int	power_switch_is_off = 0;
519 
520 void
521 cpu_reboot(howto, bootstr)
522 	int howto;
523 	char *bootstr;
524 {
525 	/* take a snap shot before clobbering any registers */
526 	if (curproc && curproc->p_addr)
527 		savectx(&curproc->p_addr->u_pcb);
528 
529 	boothowto = howto;
530 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
531 		waittime = 0;
532 		vfs_shutdown();
533 		/*
534 		 * If we've been adjusting the clock, the todr
535 		 * will be out of synch; adjust it now.
536 		 */
537 		/*resettodr();*/
538 	}
539 
540 	/* Disable interrputs. */
541 	splhigh();
542 
543 	if (howto & RB_DUMP)
544 		dumpsys();
545 
546 	/* Run any shutdown hooks. */
547 	doshutdownhooks();
548 
549 #if defined(PANICWAIT) && !defined(DDB)
550 	if ((howto & RB_HALT) == 0 && panicstr) {
551 		printf("hit any key to reboot...\n");
552 		(void)cngetc();
553 		printf("\n");
554 	}
555 #endif
556 
557 	/* Finally, halt/reboot the system. */
558 	/* a) RB_POWERDOWN
559 	 *  a1: the power switch is still on
560 	 *	Power cannot be removed; simply halt the system (b)
561 	 *	Power switch state is checked in shutdown hook
562 	 *  a2: the power switch is off
563 	 *	Remove the power; the simplest way is go back to ROM eg. reboot
564 	 * b) RB_HALT
565 	 *      call cngetc
566          * c) otherwise
567 	 *	Reboot
568 	*/
569 	if (((howto & RB_POWERDOWN) == RB_POWERDOWN) && power_switch_is_off)
570 		doboot();
571 	else if (/*((howto & RB_POWERDOWN) == RB_POWERDOWN) ||*/
572 		 ((howto & RB_HALT) == RB_HALT)) {
573 		printf("System halted.  Hit any key to reboot.\n\n");
574 		(void)cngetc();
575 	}
576 
577 	printf("rebooting...\n");
578 	DELAY(1000000);
579 	doboot();
580 	/*NOTREACHED*/
581 }
582 
583 /*
584  * Initialize the kernel crash dump header.
585  */
586 void
587 cpu_init_kcore_hdr()
588 {
589 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
590 	struct m68k_kcore_hdr *m = &h->un._m68k;
591 	int i;
592 
593 	memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr));
594 
595 	/*
596 	 * Initialize the `dispatcher' portion of the header.
597 	 */
598 	strcpy(h->name, machine);
599 	h->page_size = NBPG;
600 	h->kernbase = KERNBASE;
601 
602 	/*
603 	 * Fill in information about our MMU configuration.
604 	 */
605 	m->mmutype	= mmutype;
606 	m->sg_v		= SG_V;
607 	m->sg_frame	= SG_FRAME;
608 	m->sg_ishift	= SG_ISHIFT;
609 	m->sg_pmask	= SG_PMASK;
610 	m->sg40_shift1	= SG4_SHIFT1;
611 	m->sg40_mask2	= SG4_MASK2;
612 	m->sg40_shift2	= SG4_SHIFT2;
613 	m->sg40_mask3	= SG4_MASK3;
614 	m->sg40_shift3	= SG4_SHIFT3;
615 	m->sg40_addr1	= SG4_ADDR1;
616 	m->sg40_addr2	= SG4_ADDR2;
617 	m->pg_v		= PG_V;
618 	m->pg_frame	= PG_FRAME;
619 
620 	/*
621 	 * Initialize pointer to kernel segment table.
622 	 */
623 	m->sysseg_pa = (u_int32_t)(pmap_kernel()->pm_stpa);
624 
625 	/*
626 	 * Initialize relocation value such that:
627 	 *
628 	 *	pa = (va - KERNBASE) + reloc
629 	 */
630 	m->reloc = lowram;
631 
632 	/*
633 	 * Define the end of the relocatable range.
634 	 */
635 	m->relocend = (u_int32_t)&end;
636 
637 	/*
638 	 * X68k has multiple RAM segments on some models.
639 	 */
640 	m->ram_segs[0].start = lowram;
641 	m->ram_segs[0].size = mem_size - lowram;
642 	for (i = 1; i < vm_nphysseg; i++) {
643 		m->ram_segs[i].start = ctob(vm_physmem[i].start);
644 		m->ram_segs[i].size  = ctob(vm_physmem[i].end
645 					    - vm_physmem[i].start);
646 	}
647 }
648 
649 /*
650  * Compute the size of the machine-dependent crash dump header.
651  * Returns size in disk blocks.
652  */
653 int
654 cpu_dumpsize()
655 {
656 	int size;
657 
658 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
659 	return (btodb(roundup(size, dbtob(1))));
660 }
661 
662 /*
663  * Called by dumpsys() to dump the machine-dependent header.
664  */
665 int
666 cpu_dump(dump, blknop)
667 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
668 	daddr_t *blknop;
669 {
670 	int buf[dbtob(1) / sizeof(int)];
671 	cpu_kcore_hdr_t *chdr;
672 	kcore_seg_t *kseg;
673 	int error;
674 
675 	kseg = (kcore_seg_t *)buf;
676 	chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) /
677 	    sizeof(int)];
678 
679 	/* Create the segment header. */
680 	CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
681 	kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t));
682 
683 	memcpy(chdr, &cpu_kcore_hdr, sizeof(cpu_kcore_hdr_t));
684 	error = (*dump)(dumpdev, *blknop, (caddr_t)buf, sizeof(buf));
685 	*blknop += btodb(sizeof(buf));
686 	return (error);
687 }
688 
689 /*
690  * These variables are needed by /sbin/savecore
691  */
692 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
693 int	dumpsize = 0;		/* pages */
694 long	dumplo = 0;		/* blocks */
695 
696 /*
697  * This is called by main to set dumplo and dumpsize.
698  * Dumps always skip the first NBPG of disk space in
699  * case there might be a disk label stored there.  If there
700  * is extra space, put dump at the end to reduce the chance
701  * that swapping trashes it.
702  */
703 void
704 cpu_dumpconf()
705 {
706 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
707 	struct m68k_kcore_hdr *m = &h->un._m68k;
708 	const struct bdevsw *bdev;
709 	int chdrsize;	/* size of dump header */
710 	int nblks;	/* size of dump area */
711 	int i;
712 
713 	if (dumpdev == NODEV)
714 		return;
715 	bdev = bdevsw_lookup(dumpdev);
716 	if (bdev == NULL)
717 		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
718 	if (bdev->d_psize == NULL)
719 		return;
720 	nblks = (*bdev->d_psize)(dumpdev);
721 	chdrsize = cpu_dumpsize();
722 
723 	dumpsize = 0;
724 	for (i = 0; m->ram_segs[i].size && i < M68K_NPHYS_RAM_SEGS; i++)
725 		dumpsize += btoc(m->ram_segs[i].size);
726 	/*
727 	 * Check to see if we will fit.  Note we always skip the
728 	 * first NBPG in case there is a disk label there.
729 	 */
730 	if (nblks < (ctod(dumpsize) + chdrsize + ctod(1))) {
731 		dumpsize = 0;
732 		dumplo = -1;
733 		return;
734 	}
735 
736 	/*
737 	 * Put dump at the end of the partition.
738 	 */
739 	dumplo = (nblks - 1) - ctod(dumpsize) - chdrsize;
740 }
741 
742 void
743 dumpsys()
744 {
745 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
746 	struct m68k_kcore_hdr *m = &h->un._m68k;
747 	const struct bdevsw *bdev;
748 	daddr_t blkno;		/* current block to write */
749 				/* dump routine */
750 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
751 	int pg;			/* page being dumped */
752 	paddr_t maddr;		/* PA being dumped */
753 	int seg;		/* RAM segment being dumped */
754 	int error;		/* error code from (*dump)() */
755 
756 	/* XXX initialized here because of gcc lossage */
757 	seg = 0;
758 	maddr = m->ram_segs[seg].start;
759 	pg = 0;
760 
761 	/* Make sure dump device is valid. */
762 	if (dumpdev == NODEV)
763 		return;
764 	bdev = bdevsw_lookup(dumpdev);
765 	if (bdev == NULL)
766 		return;
767 	if (dumpsize == 0) {
768 		cpu_dumpconf();
769 		if (dumpsize == 0)
770 			return;
771 	}
772 	if (dumplo <= 0) {
773 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
774 		    minor(dumpdev));
775 		return;
776 	}
777 	dump = bdev->d_dump;
778 	blkno = dumplo;
779 
780 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
781 	    minor(dumpdev), dumplo);
782 
783 	printf("dump ");
784 
785 	/* Write the dump header. */
786 	error = cpu_dump(dump, &blkno);
787 	if (error)
788 		goto bad;
789 
790 	for (pg = 0; pg < dumpsize; pg++) {
791 #define NPGMB	(1024*1024/NBPG)
792 		/* print out how many MBs we have dumped */
793 		if (pg && (pg % NPGMB) == 0)
794 			printf("%d ", pg / NPGMB);
795 #undef NPGMB
796 		if (maddr == 0) {
797 			/* Skip first page */
798 			maddr += NBPG;
799 			blkno += btodb(NBPG);
800 			continue;
801 		}
802 		while (maddr >=
803 		    (m->ram_segs[seg].start + m->ram_segs[seg].size)) {
804 			if (++seg >= M68K_NPHYS_RAM_SEGS ||
805 			    m->ram_segs[seg].size == 0) {
806 				error = EINVAL;		/* XXX ?? */
807 				goto bad;
808 			}
809 			maddr = m->ram_segs[seg].start;
810 		}
811 		pmap_enter(pmap_kernel(), (vaddr_t)vmmap, maddr,
812 		    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
813 		pmap_update(pmap_kernel());
814 
815 		error = (*dump)(dumpdev, blkno, vmmap, NBPG);
816  bad:
817 		switch (error) {
818 		case 0:
819 			maddr += NBPG;
820 			blkno += btodb(NBPG);
821 			break;
822 
823 		case ENXIO:
824 			printf("device bad\n");
825 			return;
826 
827 		case EFAULT:
828 			printf("device not ready\n");
829 			return;
830 
831 		case EINVAL:
832 			printf("area improper\n");
833 			return;
834 
835 		case EIO:
836 			printf("i/o error\n");
837 			return;
838 
839 		case EINTR:
840 			printf("aborted from console\n");
841 			return;
842 
843 		default:
844 			printf("error %d\n", error);
845 			return;
846 		}
847 	}
848 	printf("succeeded\n");
849 }
850 
851 void
852 initcpu()
853 {
854 	/* XXX should init '40 vecs here, too */
855 #if defined(M68060)
856 	extern caddr_t vectab[256];
857 #if defined(M060SP)
858 	extern u_int8_t I_CALL_TOP[];
859 	extern u_int8_t FP_CALL_TOP[];
860 #else
861 	extern u_int8_t illinst;
862 #endif
863 	extern u_int8_t fpfault;
864 #endif
865 
866 #ifdef MAPPEDCOPY
867 
868 	/*
869 	 * Initialize lower bound for doing copyin/copyout using
870 	 * page mapping (if not already set).  We don't do this on
871 	 * VAC machines as it loses big time.
872 	 */
873 	if ((int) mappedcopysize == -1) {
874 		mappedcopysize = NBPG;
875 	}
876 #endif
877 
878 #if defined(M68060)
879 	if (cputype == CPU_68060) {
880 #if defined(M060SP)
881 		/* integer support */
882 		vectab[61] = &I_CALL_TOP[128 + 0x00];
883 
884 		/* floating point support */
885 		vectab[11] = &FP_CALL_TOP[128 + 0x30];
886 		vectab[55] = &FP_CALL_TOP[128 + 0x38];
887 		vectab[60] = &FP_CALL_TOP[128 + 0x40];
888 
889 		vectab[54] = &FP_CALL_TOP[128 + 0x00];
890 		vectab[52] = &FP_CALL_TOP[128 + 0x08];
891 		vectab[53] = &FP_CALL_TOP[128 + 0x10];
892 		vectab[51] = &FP_CALL_TOP[128 + 0x18];
893 		vectab[50] = &FP_CALL_TOP[128 + 0x20];
894 		vectab[49] = &FP_CALL_TOP[128 + 0x28];
895 #else
896 		vectab[61] = &illinst;
897 #endif
898 		vectab[48] = &fpfault;
899 	}
900 	DCIS();
901 #endif
902 }
903 
904 void
905 straytrap(pc, evec)
906 	int pc;
907 	u_short evec;
908 {
909 	printf("unexpected trap (vector offset %x) from %x\n",
910 	       evec & 0xFFF, pc);
911 #if defined(DDB)
912 	Debugger();
913 #endif
914 }
915 
916 int	*nofault;
917 
918 int
919 badaddr(addr)
920 	caddr_t addr;
921 {
922 	int i;
923 	label_t	faultbuf;
924 
925 	nofault = (int *) &faultbuf;
926 	if (setjmp((label_t *)nofault)) {
927 		nofault = (int *) 0;
928 		return(1);
929 	}
930 	i = *(volatile short *)addr;
931 	nofault = (int *) 0;
932 	return(0);
933 }
934 
935 int
936 badbaddr(addr)
937 	caddr_t addr;
938 {
939 	int i;
940 	label_t	faultbuf;
941 
942 	nofault = (int *) &faultbuf;
943 	if (setjmp((label_t *)nofault)) {
944 		nofault = (int *) 0;
945 		return(1);
946 	}
947 	i = *(volatile char *)addr;
948 	nofault = (int *) 0;
949 	return(0);
950 }
951 
952 void netintr __P((void));
953 
954 void
955 netintr()
956 {
957 
958 #define DONETISR(bit, fn) do {		\
959 	if (netisr & (1 << bit)) {	\
960 		netisr &= ~(1 << bit);	\
961 		fn();			\
962 	}				\
963 } while (0)
964 
965 #include <net/netisr_dispatch.h>
966 
967 #undef DONETISR
968 }
969 
970 void
971 intrhand(sr)
972 	int sr;
973 {
974 	printf("intrhand: unexpected sr 0x%x\n", sr);
975 }
976 
977 #if (defined(DDB) || defined(DEBUG)) && !defined(PANICBUTTON)
978 #define PANICBUTTON
979 #endif
980 
981 #ifdef PANICBUTTON
982 int panicbutton = 1;	/* non-zero if panic buttons are enabled */
983 int crashandburn = 0;
984 int candbdelay = 50;	/* give em half a second */
985 void candbtimer __P((void *));
986 
987 #ifndef DDB
988 static struct callout candbtimer_ch = CALLOUT_INITIALIZER;
989 #endif
990 
991 void
992 candbtimer(arg)
993 	void *arg;
994 {
995 
996 	crashandburn = 0;
997 }
998 #endif
999 
1000 /*
1001  * Level 7 interrupts can be caused by the keyboard or parity errors.
1002  */
1003 void
1004 nmihand(frame)
1005 	struct frame frame;
1006 {
1007 	intio_set_sysport_keyctrl(intio_get_sysport_keyctrl() | 0x04);
1008 
1009 	if (1) {
1010 #ifdef PANICBUTTON
1011 		static int innmihand = 0;
1012 
1013 		/*
1014 		 * Attempt to reduce the window of vulnerability for recursive
1015 		 * NMIs (e.g. someone holding down the keyboard reset button).
1016 		 */
1017 		if (innmihand == 0) {
1018 			innmihand = 1;
1019 			printf("Got a keyboard NMI\n");
1020 			innmihand = 0;
1021 		}
1022 #ifdef DDB
1023 		Debugger();
1024 #else
1025 		if (panicbutton) {
1026 			if (crashandburn) {
1027 				crashandburn = 0;
1028 				panic(panicstr ?
1029 				      "forced crash, nosync" : "forced crash");
1030 			}
1031 			crashandburn++;
1032 			callout_reset(&candbtimer_ch, candbdelay,
1033 			    candbtimer, NULL);
1034 		}
1035 #endif /* DDB */
1036 #endif /* PANICBUTTON */
1037 		return;
1038 	}
1039 	/* panic?? */
1040 	printf("unexpected level 7 interrupt ignored\n");
1041 }
1042 
1043 /*
1044  * cpu_exec_aout_makecmds():
1045  *	cpu-dependent a.out format hook for execve().
1046  *
1047  * Determine of the given exec package refers to something which we
1048  * understand and, if so, set up the vmcmds for it.
1049  *
1050  * XXX what are the special cases for the hp300?
1051  * XXX why is this COMPAT_NOMID?  was something generating
1052  *	hp300 binaries with an a_mid of 0?  i thought that was only
1053  *	done on little-endian machines...  -- cgd
1054  */
1055 int
1056 cpu_exec_aout_makecmds(p, epp)
1057 	struct proc *p;
1058 	struct exec_package *epp;
1059 {
1060 #if defined(COMPAT_NOMID) || defined(COMPAT_44)
1061 	u_long midmag, magic;
1062 	u_short mid;
1063 	int error;
1064 	struct exec *execp = epp->ep_hdr;
1065 
1066 	midmag = ntohl(execp->a_midmag);
1067 	mid = (midmag >> 16) & 0xffff;
1068 	magic = midmag & 0xffff;
1069 
1070 	midmag = mid << 16 | magic;
1071 
1072 	switch (midmag) {
1073 #ifdef COMPAT_NOMID
1074 	case (MID_ZERO << 16) | ZMAGIC:
1075 		error = exec_aout_prep_oldzmagic(p, epp);
1076 		break;
1077 #endif
1078 #ifdef COMPAT_44
1079 	case (MID_HP300 << 16) | ZMAGIC:
1080 		error = exec_aout_prep_oldzmagic(p, epp);
1081 		break;
1082 #endif
1083 	default:
1084 		error = ENOEXEC;
1085 	}
1086 
1087 	return error;
1088 #else /* !(defined(COMPAT_NOMID) || defined(COMPAT_44)) */
1089 	return ENOEXEC;
1090 #endif
1091 }
1092 
1093 #ifdef EXTENDED_MEMORY
1094 #ifdef EM_DEBUG
1095 static int em_debug = 0;
1096 #define DPRINTF(str) do{ if (em_debug) printf str; } while (0);
1097 #else
1098 #define DPRINTF(str)
1099 #endif
1100 
1101 static struct memlist {
1102 	caddr_t base;
1103 	psize_t min;
1104 	psize_t max;
1105 } memlist[] = {
1106 	/* TS-6BE16 16MB memory */
1107 	{(caddr_t)0x01000000, 0x01000000, 0x01000000},
1108 	/* 060turbo SIMM slot (4--128MB) */
1109 	{(caddr_t)0x10000000, 0x00400000, 0x08000000},
1110 };
1111 static vaddr_t mem_v, base_v;
1112 
1113 /*
1114  * check memory existency
1115  */
1116 static int
1117 mem_exists(mem, basemax)
1118 	caddr_t mem;
1119 	u_long basemax;
1120 {
1121 	/* most variables must be register! */
1122 	register volatile unsigned char *m, *b;
1123 	register unsigned char save_m, save_b;
1124 	register int baseismem;
1125 	register int exists = 0;
1126 	caddr_t base;
1127 	caddr_t begin_check, end_check;
1128 	label_t	faultbuf;
1129 
1130 	DPRINTF (("Enter mem_exists(%p, %x)\n", mem, basemax));
1131 	DPRINTF ((" pmap_enter(%p, %p) for target... ", mem_v, mem));
1132 	pmap_enter(pmap_kernel(), mem_v, (paddr_t)mem,
1133 		   VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|PMAP_WIRED);
1134 	pmap_update(pmap_kernel());
1135 	DPRINTF ((" done.\n"));
1136 
1137 	/* only 24bits are significant on normal X680x0 systems */
1138 	base = (caddr_t)((u_long)mem & 0x00FFFFFF);
1139 	DPRINTF ((" pmap_enter(%p, %p) for shadow... ", base_v, base));
1140 	pmap_enter(pmap_kernel(), base_v, (paddr_t)base,
1141 		   VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|PMAP_WIRED);
1142 	pmap_update(pmap_kernel());
1143 	DPRINTF ((" done.\n"));
1144 
1145 	m = (void*)mem_v;
1146 	b = (void*)base_v;
1147 
1148 	/* This is somewhat paranoid -- avoid overwriting myself */
1149 	asm("lea %%pc@(begin_check_mem),%0" : "=a"(begin_check));
1150 	asm("lea %%pc@(end_check_mem),%0" : "=a"(end_check));
1151 	if (base >= begin_check && base < end_check) {
1152 		size_t off = end_check - begin_check;
1153 
1154 		DPRINTF ((" Adjusting the testing area.\n"));
1155 		m -= off;
1156 		b -= off;
1157 	}
1158 
1159 	nofault = (int *) &faultbuf;
1160 	if (setjmp ((label_t *)nofault)) {
1161 		nofault = (int *) 0;
1162 		pmap_remove(pmap_kernel(), mem_v, mem_v+NBPG);
1163 		pmap_remove(pmap_kernel(), base_v, base_v+NBPG);
1164 		pmap_update(pmap_kernel());
1165 		DPRINTF (("Fault!!! Returning 0.\n"));
1166 		return 0;
1167 	}
1168 
1169 	DPRINTF ((" Let's begin. mem=%p, base=%p, m=%p, b=%p\n",
1170 		  mem, base, m, b));
1171 
1172 	(void) *m;
1173 	/*
1174 	 * Can't check by writing if the corresponding
1175 	 * base address isn't memory.
1176 	 *
1177 	 * I hope this would be no harm....
1178 	 */
1179 	baseismem = base < (caddr_t)basemax;
1180 
1181 	/* save original value (base must be saved first) */
1182 	if (baseismem)
1183 		save_b = *b;
1184 	save_m = *m;
1185 
1186 asm("begin_check_mem:");
1187 	/*
1188 	 * stack and other data segment variables are unusable
1189 	 * til end_check_mem, because they may be clobbered.
1190 	 */
1191 
1192 	/*
1193 	 * check memory by writing/reading
1194 	 */
1195 	if (baseismem)
1196 		*b = 0x55;
1197 	*m = 0xAA;
1198 	if ((baseismem && *b != 0x55) || *m != 0xAA)
1199 		goto out;
1200 
1201 	*m = 0x55;
1202 	if (baseismem)
1203 		*b = 0xAA;
1204 	if (*m != 0x55 || (baseismem && *b != 0xAA))
1205 		goto out;
1206 
1207 	exists = 1;
1208 out:
1209 	*m = save_m;
1210 	if (baseismem)
1211 		*b = save_b;
1212 
1213 asm("end_check_mem:");
1214 
1215 	nofault = (int *)0;
1216 	pmap_remove(pmap_kernel(), mem_v, mem_v+NBPG);
1217 	pmap_remove(pmap_kernel(), base_v, base_v+NBPG);
1218 	pmap_update(pmap_kernel());
1219 
1220 	DPRINTF ((" End.\n"));
1221 
1222 	DPRINTF (("Returning from mem_exists. result = %d\n", exists));
1223 
1224 	return exists;
1225 }
1226 
1227 static void
1228 setmemrange(void)
1229 {
1230 	int i;
1231 	psize_t s, min, max;
1232 	struct memlist *mlist = memlist;
1233 	u_long h;
1234 	int basemax = ctob(physmem);
1235 
1236 	/*
1237 	 * VM system is not started yet.  Use the first and second avalable
1238 	 * pages to map the (possible) target memory and its shadow.
1239 	 */
1240 	mem_v = virtual_avail;	/* target */
1241 	base_v = mem_v + NBPG;	/* shadow */
1242 
1243 	{	/* Turn off the processor cache. */
1244 		register int cacr;
1245 		PCIA();		/* cpusha dc */
1246 		switch (cputype) {
1247 		case CPU_68030:
1248 			cacr = CACHE_OFF;
1249 			break;
1250 		case CPU_68040:
1251 			cacr = CACHE40_OFF;
1252 			break;
1253 		case CPU_68060:
1254 			cacr = CACHE60_OFF;
1255 			break;
1256 		}
1257 		asm volatile ("movc %0,%%cacr"::"d"(cacr));
1258 	}
1259 
1260 	/* discover extended memory */
1261 	for (i = 0; i < sizeof(memlist) / sizeof(memlist[0]); i++) {
1262 		min = mlist[i].min;
1263 		max = mlist[i].max;
1264 		/*
1265 		 * Normally, x68k hardware is NOT 32bit-clean.
1266 		 * But some type of extended memory is in 32bit address space.
1267 		 * Check whether.
1268 		 */
1269 		if (!mem_exists(mlist[i].base, basemax))
1270 			continue;
1271 		h = 0;
1272 		/* range check */
1273 		for (s = min; s <= max; s += 0x00100000) {
1274 			if (!mem_exists(mlist[i].base + s - 4, basemax))
1275 				break;
1276 			h = (u_long)(mlist[i].base + s);
1277 		}
1278 		if ((u_long)mlist[i].base < h) {
1279 			uvm_page_physload(atop(mlist[i].base), atop(h),
1280 					  atop(mlist[i].base), atop(h),
1281 					  VM_FREELIST_DEFAULT);
1282 			mem_size += h - (u_long) mlist[i].base;
1283 		}
1284 
1285 	}
1286 
1287 	{	/* Re-enable the processor cache. */
1288 		register int cacr;
1289 		ICIA();
1290 		switch (cputype) {
1291 		case CPU_68030:
1292 			cacr = CACHE_ON;
1293 			break;
1294 		case CPU_68040:
1295 			cacr = CACHE40_ON;
1296 			break;
1297 		case CPU_68060:
1298 			cacr = CACHE60_ON;
1299 			break;
1300 		}
1301 		asm volatile ("movc %0,%%cacr"::"d"(cacr));
1302 	}
1303 
1304 	physmem = m68k_btop(mem_size);
1305 }
1306 #endif
1307