xref: /netbsd/sys/arch/next68k/next68k/machdep.c (revision bf9ec67e)
1 /*	$NetBSD: machdep.c,v 1.49 2002/05/20 17:55:46 jdolecek Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * Copyright (c) 1998 Darrin B. Jewell
8  *
9  * This code is derived from software contributed to Berkeley by
10  * the Systems Programming Group of the University of Utah Computer
11  * Science Department.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
42  *
43  *	@(#)machdep.c	8.10 (Berkeley) 4/20/94
44  */
45 
46 #include "opt_ddb.h"
47 #include "opt_kgdb.h"
48 #include "opt_compat_hpux.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/signalvar.h>
53 #include <sys/kernel.h>
54 #include <sys/map.h>
55 #include <sys/proc.h>
56 #include <sys/buf.h>
57 #include <sys/reboot.h>
58 #include <sys/conf.h>
59 #include <sys/file.h>
60 #include <sys/clist.h>
61 #include <sys/device.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/msgbuf.h>
65 #include <sys/ioctl.h>
66 #include <sys/tty.h>
67 #include <sys/mount.h>
68 #include <sys/user.h>
69 #include <sys/exec.h>
70 #include <sys/core.h>
71 #include <sys/kcore.h>
72 #include <sys/vnode.h>
73 #include <sys/syscallargs.h>
74 #ifdef KGDB
75 #include <sys/kgdb.h>
76 #endif
77 #include <sys/boot_flag.h>
78 
79 #include <uvm/uvm_extern.h>
80 
81 #ifdef DDB
82 #include <machine/db_machdep.h>
83 #include <ddb/db_access.h>
84 #include <ddb/db_sym.h>
85 #include <ddb/db_extern.h>
86 #endif
87 
88 #ifdef KGDB
89 #include <sys/kgdb.h>
90 
91 /* Is zs configured in? */
92 #include "zsc.h"
93 #if (NZSC > 0)
94 #include <next68k/dev/zs_cons.h>
95 #endif
96 #endif
97 
98 #include <sys/sysctl.h>
99 
100 #include <machine/cpu.h>
101 #include <machine/bus.h>
102 #include <machine/reg.h>
103 #include <machine/psl.h>
104 #include <machine/pte.h>
105 #include <machine/vmparam.h>
106 #include <dev/cons.h>
107 
108 #include <machine/kcore.h>	/* XXX should be pulled in by sys/kcore.h */
109 
110 #include <next68k/next68k/isr.h>
111 #include <next68k/next68k/nextrom.h>
112 #include <next68k/next68k/rtc.h>
113 #include <next68k/next68k/seglist.h>
114 
115 int nsym;
116 char *ssym, *esym;
117 
118 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
119 
120 /* the following is used externally (sysctl_hw) */
121 char	machine[] = MACHINE;	/* from <machine/param.h> */
122 
123 /* Our exported CPU info; we can have only one. */
124 struct cpu_info cpu_info_store;
125 
126 struct vm_map *exec_map = NULL;
127 struct vm_map *mb_map = NULL;
128 struct vm_map *phys_map = NULL;
129 
130 caddr_t	msgbufaddr;		/* KVA of message buffer */
131 paddr_t msgbufpa;		/* PA of message buffer */
132 
133 int	maxmem;			/* max memory per process */
134 int	physmem;		/* size of physical memory */
135 
136 /*
137  * safepri is a safe priority for sleep to set for a spin-wait
138  * during autoconfiguration or after a panic.
139  */
140 int	safepri = PSL_LOWIPL;
141 
142 extern	u_int lowram;
143 extern	short exframesize[];
144 
145 #ifdef COMPAT_HPUX
146 extern struct emul emul_hpux;
147 #endif
148 
149 /* prototypes for local functions */
150 void	identifycpu __P((void));
151 void	initcpu __P((void));
152 void	dumpsys __P((void));
153 
154 int	cpu_dumpsize __P((void));
155 int	cpu_dump __P((int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *));
156 void	cpu_init_kcore_hdr __P((void));
157 
158 /* functions called from locore.s */
159 void next68k_init __P((void));
160 void straytrap __P((int, u_short));
161 void nmihand __P((struct frame));
162 
163 /*
164  * Machine-independent crash dump header info.
165  */
166 cpu_kcore_hdr_t cpu_kcore_hdr;
167 
168 /*
169  * Memory segments initialized in locore, which are eventually loaded
170  * as managed VM pages.
171  */
172 phys_seg_list_t phys_seg_list[VM_PHYSSEG_MAX];
173 
174 /*
175  * Memory segments to dump.  This is initialized from the phys_seg_list
176  * before pages are stolen from it for VM system overhead.  I.e. this
177  * covers the entire range of physical memory.
178  */
179 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
180 int	mem_cluster_cnt;
181 
182 /****************************************************************/
183 
184 /*
185  * Early initialization, before main() is called.
186  */
187 void
188 next68k_init(void)
189 {
190 	int i;
191 
192 	/*
193 	 * Tell the VM system about available physical memory.
194 	 */
195 	for (i = 0; i < mem_cluster_cnt; i++) {
196 		if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) {
197 			/*
198 			 * Segment has been completely gobbled up.
199 			 */
200 			continue;
201 		}
202 		/*
203 		 * Note the index of the mem cluster is the free
204 		 * list we want to put the memory on.
205 		 */
206 		uvm_page_physload(atop(phys_seg_list[i].ps_start),
207 				  atop(phys_seg_list[i].ps_end),
208 				  atop(phys_seg_list[i].ps_start),
209 				  atop(phys_seg_list[i].ps_end),
210 				  VM_FREELIST_DEFAULT);
211 	}
212 
213 	{
214 		char *p = rom_boot_arg;
215 		boothowto = 0;
216 		if (*p++ == '-') {
217 			for (;*p;p++)
218 				BOOT_FLAG(*p, boothowto);
219 		}
220 	}
221 
222 	/* Initialize the interrupt handlers. */
223 	isrinit();
224 
225 	/* Calibrate the delay loop. */
226 	next68k_calibrate_delay();
227 
228 	/*
229 	 * Initialize error message buffer (at end of core).
230 	 */
231 	for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++)
232 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
233 		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
234 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
235 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
236 	pmap_update(pmap_kernel());
237 }
238 
239 /*
240  * Console initialization: called early on from main,
241  * before vm init or startup.  Do enough configuration
242  * to choose and initialize a console.
243  */
244 void
245 consinit()
246 {
247 	static int init = 0;
248 
249 	/*
250 	 * Generic console: sys/dev/cons.c
251 	 *	Initializes either ite or ser as console.
252 	 *	Can be called from locore.s and init_main.c.
253 	 */
254 
255 	if (!init) {
256 		cninit();
257 #if defined(KGDB) && (NZSC > 0)
258 		zs_kgdb_init();
259 #endif
260 #ifdef  DDB
261 		/* Initialize kernel debugger, if compiled in. */
262 		ddb_init(nsym, ssym, esym);
263 #endif
264 		if (boothowto & RB_KDB) {
265 #if defined(KGDB)
266 			kgdb_connect(1);
267 #elif defined(DDB)
268 			Debugger();
269 #endif
270 		}
271 
272 		init = 1;
273 	} else {
274 		next68k_calibrate_delay();
275 	}
276 }
277 
278 /*
279  * cpu_startup: allocate memory for variable-sized tables,
280  * initialize cpu, and do autoconfiguration.
281  */
282 void
283 cpu_startup()
284 {
285 	extern char *kernel_text, *etext;
286 	unsigned i;
287 	caddr_t v;
288 	int base, residual;
289 	vaddr_t minaddr, maxaddr;
290 	vsize_t size;
291 	char pbuf[9];
292 #ifdef DEBUG
293 	extern int pmapdebug;
294 	int opmapdebug = pmapdebug;
295 
296 	pmapdebug = 0;
297 #endif
298 
299 	/*
300 	 * Initialize the kernel crash dump header.
301 	 */
302 	cpu_init_kcore_hdr();
303 
304 	/*
305 	 * Good {morning,afternoon,evening,night}.
306 	 */
307 	printf(version);
308 	identifycpu();
309 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
310 	printf("total memory = %s\n", pbuf);
311 
312 	/*
313 	 * Find out how much space we need, allocate it,
314 	 * and then give everything true virtual addresses.
315 	 */
316 	size = (vsize_t)allocsys(NULL, NULL);
317 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
318 		panic("startup: no room for tables");
319 	if ((allocsys(v, NULL) - v) != size)
320 		panic("startup: table size inconsistency");
321 
322 
323 	/*
324 	 * Now allocate buffers proper.  They are different than the above
325 	 * in that they usually occupy more virtual memory than physical.
326 	 */
327 	size = MAXBSIZE * nbuf;
328 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
329 		    NULL, UVM_UNKNOWN_OFFSET, 0,
330 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
331 				UVM_ADV_NORMAL, 0)) != 0)
332 		panic("startup: cannot allocate VM for buffers");
333 	minaddr = (vaddr_t)buffers;
334 	base = bufpages / nbuf;
335 	residual = bufpages % nbuf;
336 	for (i = 0; i < nbuf; i++) {
337 		vsize_t curbufsize;
338 		vaddr_t curbuf;
339 		struct vm_page *pg;
340 
341 		/*
342 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
343 		 * that MAXBSIZE space, we allocate and map (base+1) pages
344 		 * for the first "residual" buffers, and then we allocate
345 		 * "base" pages for the rest.
346 		 */
347 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
348 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
349 
350 		while (curbufsize) {
351 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
352 			if (pg == NULL)
353 				panic("cpu_startup: not enough memory for "
354 				      "buffer cache");
355 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
356 				       VM_PROT_READ|VM_PROT_WRITE);
357 			curbuf += PAGE_SIZE;
358 			curbufsize -= PAGE_SIZE;
359 		}
360 	}
361 	pmap_update(pmap_kernel());
362 
363 	/*
364 	 * Allocate a submap for exec arguments.  This map effectively
365 	 * limits the number of processes exec'ing at any time.
366 	 */
367 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
368 				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
369 	/*
370 	 * Allocate a submap for physio
371 	 */
372 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
373 				 VM_PHYS_SIZE, 0, FALSE, NULL);
374 
375 	/*
376 	 * Finally, allocate mbuf cluster submap.
377 	 */
378 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
379 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
380 				 FALSE, NULL);
381 
382 #ifdef DEBUG
383 	pmapdebug = opmapdebug;
384 #endif
385 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
386 	printf("avail memory = %s\n", pbuf);
387 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
388 	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
389 
390 	/*
391 	 * Tell the VM system that the area before the text segment
392 	 * is invalid.
393 	 *
394 	 * XXX Should just change KERNBASE and VM_MIN_KERNEL_ADDRESS,
395 	 * XXX but not right now.
396 	 */
397 	if (uvm_map_protect(kernel_map, 0, round_page((vaddr_t)&kernel_text),
398 	    UVM_PROT_NONE, TRUE) != 0)
399 		panic("can't mark pre-text pages off-limits");
400 
401 	/*
402 	 * Tell the VM system that writing to the kernel text isn't allowed.
403 	 * If we don't, we might end up COW'ing the text segment!
404 	 */
405 	if (uvm_map_protect(kernel_map, trunc_page((vaddr_t)&kernel_text),
406 	    round_page((vaddr_t)&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE)
407 	    != 0)
408 		panic("can't protect kernel text");
409 
410 	/*
411 	 * Set up CPU-specific registers, cache, etc.
412 	 */
413 	initcpu();
414 
415 	/*
416 	 * Set up buffers, so they can be used to read disk labels.
417 	 */
418 	bufinit();
419 }
420 
421 /*
422  * Set registers on exec.
423  */
424 void
425 setregs(p, pack, stack)
426 	struct proc *p;
427 	struct exec_package *pack;
428 	u_long stack;
429 {
430 	struct frame *frame = (struct frame *)p->p_md.md_regs;
431 
432 	frame->f_sr = PSL_USERSET;
433 	frame->f_pc = pack->ep_entry & ~1;
434 	frame->f_regs[D0] = 0;
435 	frame->f_regs[D1] = 0;
436 	frame->f_regs[D2] = 0;
437 	frame->f_regs[D3] = 0;
438 	frame->f_regs[D4] = 0;
439 	frame->f_regs[D5] = 0;
440 	frame->f_regs[D6] = 0;
441 	frame->f_regs[D7] = 0;
442 	frame->f_regs[A0] = 0;
443 	frame->f_regs[A1] = 0;
444 	frame->f_regs[A2] = (int)p->p_psstr;
445 	frame->f_regs[A3] = 0;
446 	frame->f_regs[A4] = 0;
447 	frame->f_regs[A5] = 0;
448 	frame->f_regs[A6] = 0;
449 	frame->f_regs[SP] = stack;
450 
451 	/* restore a null state frame */
452 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
453 	if (fputype)
454 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
455 }
456 
457 /*
458  * Info for CTL_HW
459  */
460 char	cpu_model[124];
461 
462 void
463 identifycpu()
464 {
465 	const char *mc;
466 	int len;
467 
468 	/*
469 	 * ...and the CPU type.
470 	 */
471 	switch (cputype) {
472 	case CPU_68040:
473 		mc = "40";
474 		break;
475 	case CPU_68030:
476 		mc = "30";
477 		break;
478 	case CPU_68020:
479 		mc = "20";
480 		break;
481 	default:
482 		printf("\nunknown cputype %d\n", cputype);
483 		goto lose;
484 	}
485 
486 	sprintf(cpu_model, "NeXT/MC680%s CPU",mc);
487 
488 	/*
489 	 * ...and the MMU type.
490 	 */
491 	switch (mmutype) {
492 	case MMU_68040:
493 	case MMU_68030:
494 		strcat(cpu_model, "+MMU");
495 		break;
496 	case MMU_68851:
497 		strcat(cpu_model, ", MC68851 MMU");
498 		break;
499 	case MMU_HP:
500 		strcat(cpu_model, ", HP MMU");
501 		break;
502 	default:
503 		printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
504 		panic("startup");
505 	}
506 
507 	len = strlen(cpu_model);
508 
509 	/*
510 	 * ...and the FPU type.
511 	 */
512 	switch (fputype) {
513 	case FPU_68040:
514 		len += sprintf(cpu_model + len, "+FPU");
515 		break;
516 	case FPU_68882:
517 		len += sprintf(cpu_model + len, ", MC68882 FPU");
518 		break;
519 	case FPU_68881:
520 		len += sprintf(cpu_model + len, ", MHz MC68881 FPU");
521 		break;
522 	default:
523 		len += sprintf(cpu_model + len, ", unknown FPU");
524 	}
525 
526 	/*
527 	 * ...and finally, the cache type.
528 	 */
529 	if (cputype == CPU_68040)
530 		sprintf(cpu_model + len, ", 4k on-chip physical I/D caches");
531 	else {
532 #if defined(ENABLE_HP_CODE)
533 		switch (ectype) {
534 		case EC_VIRT:
535 			sprintf(cpu_model + len,
536 			    ", virtual-address cache");
537 			break;
538 		case EC_PHYS:
539 			sprintf(cpu_model + len,
540 			    ", physical-address cache");
541 			break;
542 		}
543 #endif
544 	}
545 
546 	printf("%s\n", cpu_model);
547 
548 	return;
549  lose:
550 	panic("startup");
551 }
552 
553 /*
554  * machine dependent system variables.
555  */
556 int
557 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
558 	int *name;
559 	u_int namelen;
560 	void *oldp;
561 	size_t *oldlenp;
562 	void *newp;
563 	size_t newlen;
564 	struct proc *p;
565 {
566 #if 0
567 	dev_t consdev;
568 #endif
569 
570 	/* all sysctl names at this level are terminal */
571 	if (namelen != 1)
572 		return (ENOTDIR);		/* overloaded */
573 
574 	switch (name[0]) {
575 #if 0
576 	case CPU_CONSDEV:
577 		if (cn_tab != NULL)
578 			consdev = cn_tab->cn_dev;
579 		else
580 			consdev = NODEV;
581 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
582 		    sizeof consdev));
583 #endif
584 	default:
585 		return (EOPNOTSUPP);
586 	}
587 	/* NOTREACHED */
588 }
589 
590 /* See: sig_machdep.c */
591 
592 int	waittime = -1;
593 
594 void
595 cpu_reboot(howto, bootstr)
596 	int howto;
597 	char *bootstr;
598 {
599 
600 #if __GNUC__	/* XXX work around lame compiler problem (gcc 2.7.2) */
601 	(void)&howto;
602 #endif
603 	/* take a snap shot before clobbering any registers */
604 	if (curproc && curproc->p_addr)
605 		savectx(&curproc->p_addr->u_pcb);
606 
607 	/* If system is cold, just halt. */
608 	if (cold) {
609 		howto |= RB_HALT;
610 		goto haltsys;
611 	}
612 
613 	boothowto = howto;
614 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
615 		waittime = 0;
616 		vfs_shutdown();
617 		/*
618 		 * If we've been adjusting the clock, the todr
619 		 * will be out of synch; adjust it now.
620 		 */
621 		resettodr();
622 	}
623 
624 	/* Disable interrupts. */
625 	splhigh();
626 
627 	/* If rebooting and a dump is requested, do it. */
628 	if (howto & RB_DUMP)
629 		dumpsys();
630 
631  haltsys:
632 	/* Run any shutdown hooks. */
633 	doshutdownhooks();
634 
635 #if defined(PANICWAIT) && !defined(DDB)
636 	if ((howto & RB_HALT) == 0 && panicstr) {
637 		printf("hit any key to reboot...\n");
638 		(void)cngetc();
639 		printf("\n");
640 	}
641 #endif
642 
643 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
644 		poweroff();
645 	}
646 
647 	/* Finally, halt/reboot the system. */
648 	if (howto & RB_HALT) {
649 		monbootflag = 0x2d680000;				/* "-h" */
650 	}
651 
652 	printf("rebooting...\n");
653 	DELAY(1000000);
654 	doboot();
655 	/*NOTREACHED*/
656 }
657 
658 /*
659  * Initialize the kernel crash dump header.
660  */
661 void
662 cpu_init_kcore_hdr()
663 {
664 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
665 	struct m68k_kcore_hdr *m = &h->un._m68k;
666 	int i;
667 	extern char end[];
668 
669 	bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
670 
671 	/*
672 	 * Initialize the `dispatcher' portion of the header.
673 	 */
674 	strcpy(h->name, machine);
675 	h->page_size = NBPG;
676 	h->kernbase = KERNBASE;
677 
678 	/*
679 	 * Fill in information about our MMU configuration.
680 	 */
681 	m->mmutype	= mmutype;
682 	m->sg_v		= SG_V;
683 	m->sg_frame	= SG_FRAME;
684 	m->sg_ishift	= SG_ISHIFT;
685 	m->sg_pmask	= SG_PMASK;
686 	m->sg40_shift1	= SG4_SHIFT1;
687 	m->sg40_mask2	= SG4_MASK2;
688 	m->sg40_shift2	= SG4_SHIFT2;
689 	m->sg40_mask3	= SG4_MASK3;
690 	m->sg40_shift3	= SG4_SHIFT3;
691 	m->sg40_addr1	= SG4_ADDR1;
692 	m->sg40_addr2	= SG4_ADDR2;
693 	m->pg_v		= PG_V;
694 	m->pg_frame	= PG_FRAME;
695 
696 	/*
697 	 * Initialize pointer to kernel segment table.
698 	 */
699 	m->sysseg_pa = (u_int32_t)(pmap_kernel()->pm_stpa);
700 
701 	/*
702 	 * Initialize relocation value such that:
703 	 *
704 	 *	pa = (va - KERNBASE) + reloc
705 	 */
706 	m->reloc = lowram;
707 
708 	/*
709 	 * Define the end of the relocatable range.
710 	 */
711 	m->relocend = (u_int32_t)end;
712 
713 	/*
714 	 * The next68k has multiple memory segments.
715 	 */
716 	for (i = 0; i < mem_cluster_cnt; i++) {
717 		m->ram_segs[i].start = mem_clusters[i].start;
718 		m->ram_segs[i].size  = mem_clusters[i].size;
719 	}
720 }
721 
722 /*
723  * Compute the size of the machine-dependent crash dump header.
724  * Returns size in disk blocks.
725  */
726 int
727 cpu_dumpsize()
728 {
729 	int size;
730 
731 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
732 	return (btodb(roundup(size, dbtob(1))));
733 }
734 
735 /*
736  * Called by dumpsys() to dump the machine-dependent header.
737  */
738 int
739 cpu_dump(dump, blknop)
740 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
741 	daddr_t *blknop;
742 {
743 	int buf[dbtob(1) / sizeof(int)];
744 	cpu_kcore_hdr_t *chdr;
745 	kcore_seg_t *kseg;
746 	int error;
747 
748 	kseg = (kcore_seg_t *)buf;
749 	chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) /
750 	    sizeof(int)];
751 
752 	/* Create the segment header. */
753 	CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
754 	kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t));
755 
756 	bcopy(&cpu_kcore_hdr, chdr, sizeof(cpu_kcore_hdr_t));
757 	error = (*dump)(dumpdev, *blknop, (caddr_t)buf, sizeof(buf));
758 	*blknop += btodb(sizeof(buf));
759 	return (error);
760 }
761 
762 /*
763  * These variables are needed by /sbin/savecore
764  */
765 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
766 int	dumpsize = 0;		/* pages */
767 long	dumplo = 0;		/* blocks */
768 
769 /*
770  * This is called by main to set dumplo and dumpsize.
771  * Dumps always skip the first NBPG of disk space
772  * in case there might be a disk label stored there.
773  * If there is extra space, put dump at the end to
774  * reduce the chance that swapping trashes it.
775  */
776 void
777 cpu_dumpconf()
778 {
779 	int chdrsize;	/* size of dump header */
780 	int nblks;	/* size of dump area */
781 	int maj;
782 
783 	if (dumpdev == NODEV)
784 		return;
785 	maj = major(dumpdev);
786 	if (maj < 0 || maj >= nblkdev)
787 		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
788 	if (bdevsw[maj].d_psize == NULL)
789 		return;
790 	nblks = (*bdevsw[maj].d_psize)(dumpdev);
791 	chdrsize = cpu_dumpsize();
792 
793 	dumpsize = btoc(cpu_kcore_hdr.un._m68k.ram_segs[0].size);
794 
795 	/*
796 	 * Check do see if we will fit.  Note we always skip the
797 	 * first NBPG in case there is a disk label there.
798 	 */
799 	if (nblks < (ctod(dumpsize) + chdrsize + ctod(1))) {
800 		dumpsize = 0;
801 		dumplo = -1;
802 		return;
803 	}
804 
805 	/*
806 	 * Put dump at the end of the partition.
807 	 */
808 	dumplo = (nblks - 1) - ctod(dumpsize) - chdrsize;
809 }
810 
811 /*
812  * Dump physical memory onto the dump device.  Called by cpu_reboot().
813  */
814 void
815 dumpsys()
816 {
817 	daddr_t blkno;		/* current block to write */
818 				/* dump routine */
819 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
820 	int pg;			/* page being dumped */
821 	vm_offset_t maddr;	/* PA being dumped */
822 	int error;		/* error code from (*dump)() */
823 
824 	/* XXX initialized here because of gcc lossage */
825 	maddr = lowram;
826 	pg = 0;
827 
828 	/* Don't put dump messages in msgbuf. */
829 	msgbufmapped = 0;
830 
831 	/* Make sure dump device is valid. */
832 	if (dumpdev == NODEV)
833 		return;
834 	if (dumpsize == 0) {
835 		cpu_dumpconf();
836 		if (dumpsize == 0)
837 			return;
838 	}
839 	if (dumplo < 0)
840 		return;
841 	dump = bdevsw[major(dumpdev)].d_dump;
842 	blkno = dumplo;
843 
844 	printf("\ndumping to dev 0x%x, offset %ld\n", dumpdev, dumplo);
845 
846 	printf("dump ");
847 
848 	/* Write the dump header. */
849 	error = cpu_dump(dump, &blkno);
850 	if (error)
851 		goto bad;
852 
853 	for (pg = 0; pg < dumpsize; pg++) {
854 #define NPGMB	(1024*1024/NBPG)
855 		/* print out how many MBs we have dumped */
856 		if (pg && (pg % NPGMB) == 0)
857 			printf("%d ", pg / NPGMB);
858 #undef NPGMB
859 		pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, maddr,
860 		    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
861 		pmap_update(pmap_kernel());
862 
863 		error = (*dump)(dumpdev, blkno, vmmap, NBPG);
864  bad:
865 		switch (error) {
866 		case 0:
867 			maddr += NBPG;
868 			blkno += btodb(NBPG);
869 			break;
870 
871 		case ENXIO:
872 			printf("device bad\n");
873 			return;
874 
875 		case EFAULT:
876 			printf("device not ready\n");
877 			return;
878 
879 		case EINVAL:
880 			printf("area improper\n");
881 			return;
882 
883 		case EIO:
884 			printf("i/o error\n");
885 			return;
886 
887 		case EINTR:
888 			printf("aborted from console\n");
889 			return;
890 
891 		default:
892 			printf("error %d\n", error);
893 			return;
894 		}
895 	}
896 	printf("succeeded\n");
897 }
898 
899 void
900 initcpu()
901 {
902 #ifdef MAPPEDCOPY
903 	/*
904 	 * Initialize lower bound for doing copyin/copyout using
905 	 * page mapping (if not already set).  We don't do this on
906 	 * VAC machines as it loses big time.
907 	 */
908 	if (ectype == EC_VIRT)
909 		mappedcopysize = -1;	/* in case it was patched */
910 	else
911 		mappedcopysize = NBPG;
912 #endif
913 }
914 
915 void
916 straytrap(pc, evec)
917 	int pc;
918 	u_short evec;
919 {
920 	printf("unexpected trap (vector offset %x) from %x\n",
921 	       evec & 0xFFF, pc);
922 
923 	/* XXX kgdb/ddb entry? */
924 }
925 
926 /* XXX should change the interface, and make one badaddr() function */
927 
928 int	*nofault;
929 
930 #if 0
931 int
932 badaddr(addr, nbytes)
933 	caddr_t addr;
934 	int nbytes;
935 {
936 	int i;
937 	label_t faultbuf;
938 
939 #ifdef lint
940 	i = *addr; if (i) return (0);
941 #endif
942 
943 	nofault = (int *) &faultbuf;
944 	if (setjmp((label_t *)nofault)) {
945 		nofault = (int *) 0;
946 		return(1);
947 	}
948 
949 	switch (nbytes) {
950 	case 1:
951 		i = *(volatile char *)addr;
952 		break;
953 
954 	case 2:
955 		i = *(volatile short *)addr;
956 		break;
957 
958 	case 4:
959 		i = *(volatile int *)addr;
960 		break;
961 
962 	default:
963 		panic("badaddr: bad request");
964 	}
965 	nofault = (int *) 0;
966 	return (0);
967 }
968 #endif
969 
970 /*
971  * Level 7 interrupts can be caused by the keyboard or parity errors.
972  */
973 void
974 nmihand(frame)
975 	struct frame frame;
976 {
977   static int innmihand;	/* simple mutex */
978 
979   /* Prevent unwanted recursion. */
980   if (innmihand)
981     return;
982   innmihand = 1;
983 
984   printf("Got a NMI");
985 
986 	if (!INTR_OCCURRED(NEXT_I_NMI)) {
987 		printf("But NMI isn't set in intrstat!\n");
988 	}
989 	INTR_DISABLE(NEXT_I_NMI);
990 
991 #if defined(DDB)
992   printf(": entering debugger\n");
993   Debugger();
994   printf("continuing after NMI\n");
995 #elif defined(KGDB)
996   kgdb_connect(1);
997 #else
998   printf(": ignoring\n");
999 #endif /* DDB */
1000 
1001 	INTR_ENABLE(NEXT_I_NMI);
1002 
1003   innmihand = 0;
1004 }
1005 
1006 
1007 /*
1008  * cpu_exec_aout_makecmds():
1009  *	cpu-dependent a.out format hook for execve().
1010  *
1011  * Determine of the given exec package refers to something which we
1012  * understand and, if so, set up the vmcmds for it.
1013  */
1014 int
1015 cpu_exec_aout_makecmds(p, epp)
1016     struct proc *p;
1017     struct exec_package *epp;
1018 {
1019     return ENOEXEC;
1020 }
1021