xref: /netbsd/sys/arch/next68k/next68k/machdep.c (revision c4a72b64)
1 /*	$NetBSD: machdep.c,v 1.54 2002/09/25 22:21:16 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * Copyright (c) 1998 Darrin B. Jewell
8  *
9  * This code is derived from software contributed to Berkeley by
10  * the Systems Programming Group of the University of Utah Computer
11  * Science Department.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
42  *
43  *	@(#)machdep.c	8.10 (Berkeley) 4/20/94
44  */
45 
46 #include "opt_ddb.h"
47 #include "opt_kgdb.h"
48 #include "opt_compat_hpux.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/signalvar.h>
53 #include <sys/kernel.h>
54 #include <sys/proc.h>
55 #include <sys/buf.h>
56 #include <sys/reboot.h>
57 #include <sys/conf.h>
58 #include <sys/file.h>
59 #include <sys/device.h>
60 #include <sys/malloc.h>
61 #include <sys/mbuf.h>
62 #include <sys/msgbuf.h>
63 #include <sys/ioctl.h>
64 #include <sys/tty.h>
65 #include <sys/mount.h>
66 #include <sys/user.h>
67 #include <sys/exec.h>
68 #include <sys/core.h>
69 #include <sys/kcore.h>
70 #include <sys/vnode.h>
71 #include <sys/syscallargs.h>
72 #ifdef KGDB
73 #include <sys/kgdb.h>
74 #endif
75 #include <sys/boot_flag.h>
76 
77 #include <uvm/uvm_extern.h>
78 
79 #ifdef DDB
80 #include <machine/db_machdep.h>
81 #include <ddb/db_access.h>
82 #include <ddb/db_sym.h>
83 #include <ddb/db_extern.h>
84 #endif
85 
86 #ifdef KGDB
87 #include <sys/kgdb.h>
88 
89 /* Is zs configured in? */
90 #include "zsc.h"
91 #if (NZSC > 0)
92 #include <next68k/dev/zs_cons.h>
93 #endif
94 #endif
95 
96 #include <sys/sysctl.h>
97 
98 #include <machine/cpu.h>
99 #include <machine/bus.h>
100 #include <machine/reg.h>
101 #include <machine/psl.h>
102 #include <machine/pte.h>
103 #include <machine/vmparam.h>
104 #include <dev/cons.h>
105 
106 #include <machine/kcore.h>	/* XXX should be pulled in by sys/kcore.h */
107 
108 #include <next68k/next68k/isr.h>
109 #include <next68k/next68k/nextrom.h>
110 #include <next68k/next68k/rtc.h>
111 #include <next68k/next68k/seglist.h>
112 
113 int nsym;
114 char *ssym, *esym;
115 
116 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
117 
118 /* the following is used externally (sysctl_hw) */
119 char	machine[] = MACHINE;	/* from <machine/param.h> */
120 
121 /* Our exported CPU info; we can have only one. */
122 struct cpu_info cpu_info_store;
123 
124 struct vm_map *exec_map = NULL;
125 struct vm_map *mb_map = NULL;
126 struct vm_map *phys_map = NULL;
127 
128 caddr_t	msgbufaddr;		/* KVA of message buffer */
129 paddr_t msgbufpa;		/* PA of message buffer */
130 
131 int	maxmem;			/* max memory per process */
132 int	physmem;		/* size of physical memory */
133 
134 /*
135  * safepri is a safe priority for sleep to set for a spin-wait
136  * during autoconfiguration or after a panic.
137  */
138 int	safepri = PSL_LOWIPL;
139 
140 extern	u_int lowram;
141 extern	short exframesize[];
142 
143 #ifdef COMPAT_HPUX
144 extern struct emul emul_hpux;
145 #endif
146 
147 /* prototypes for local functions */
148 void	identifycpu __P((void));
149 void	initcpu __P((void));
150 void	dumpsys __P((void));
151 
152 int	cpu_dumpsize __P((void));
153 int	cpu_dump __P((int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *));
154 void	cpu_init_kcore_hdr __P((void));
155 
156 /* functions called from locore.s */
157 void next68k_init __P((void));
158 void straytrap __P((int, u_short));
159 
160 /*
161  * Machine-independent crash dump header info.
162  */
163 cpu_kcore_hdr_t cpu_kcore_hdr;
164 
165 /*
166  * Memory segments initialized in locore, which are eventually loaded
167  * as managed VM pages.
168  */
169 phys_seg_list_t phys_seg_list[VM_PHYSSEG_MAX];
170 
171 /*
172  * Memory segments to dump.  This is initialized from the phys_seg_list
173  * before pages are stolen from it for VM system overhead.  I.e. this
174  * covers the entire range of physical memory.
175  */
176 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
177 int	mem_cluster_cnt;
178 
179 /****************************************************************/
180 
181 /*
182  * Early initialization, before main() is called.
183  */
184 void
185 next68k_init(void)
186 {
187 	int i;
188 
189 	/*
190 	 * Tell the VM system about available physical memory.
191 	 */
192 	for (i = 0; i < mem_cluster_cnt; i++) {
193 		if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) {
194 			/*
195 			 * Segment has been completely gobbled up.
196 			 */
197 			continue;
198 		}
199 		/*
200 		 * Note the index of the mem cluster is the free
201 		 * list we want to put the memory on.
202 		 */
203 		uvm_page_physload(atop(phys_seg_list[i].ps_start),
204 				  atop(phys_seg_list[i].ps_end),
205 				  atop(phys_seg_list[i].ps_start),
206 				  atop(phys_seg_list[i].ps_end),
207 				  VM_FREELIST_DEFAULT);
208 	}
209 
210 	{
211 		char *p = rom_boot_arg;
212 		boothowto = 0;
213 		if (*p++ == '-') {
214 			for (;*p;p++)
215 				BOOT_FLAG(*p, boothowto);
216 		}
217 	}
218 
219 	/* Initialize the interrupt handlers. */
220 	isrinit();
221 
222 	/* Calibrate the delay loop. */
223 	next68k_calibrate_delay();
224 
225 	/*
226 	 * Initialize error message buffer (at end of core).
227 	 */
228 	for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++)
229 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
230 		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
231 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
232 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
233 	pmap_update(pmap_kernel());
234 }
235 
236 /*
237  * Console initialization: called early on from main,
238  * before vm init or startup.  Do enough configuration
239  * to choose and initialize a console.
240  */
241 void
242 consinit()
243 {
244 	static int init = 0;
245 
246 	/*
247 	 * Generic console: sys/dev/cons.c
248 	 *	Initializes either ite or ser as console.
249 	 *	Can be called from locore.s and init_main.c.
250 	 */
251 
252 	if (!init) {
253 		cninit();
254 #if defined(KGDB) && (NZSC > 0)
255 		zs_kgdb_init();
256 #endif
257 #ifdef  DDB
258 		/* Initialize kernel debugger, if compiled in. */
259 		ddb_init(nsym, ssym, esym);
260 #endif
261 		if (boothowto & RB_KDB) {
262 #if defined(KGDB)
263 			kgdb_connect(1);
264 #elif defined(DDB)
265 			Debugger();
266 #endif
267 		}
268 
269 		init = 1;
270 	} else {
271 		next68k_calibrate_delay();
272 	}
273 }
274 
275 /*
276  * cpu_startup: allocate memory for variable-sized tables,
277  * initialize cpu, and do autoconfiguration.
278  */
279 void
280 cpu_startup()
281 {
282 	extern char *kernel_text, *etext;
283 	caddr_t v;
284 	u_int i, base, residual;
285 	vaddr_t minaddr, maxaddr;
286 	vsize_t size;
287 	char pbuf[9];
288 #ifdef DEBUG
289 	extern int pmapdebug;
290 	int opmapdebug = pmapdebug;
291 
292 	pmapdebug = 0;
293 #endif
294 
295 	/*
296 	 * Initialize the kernel crash dump header.
297 	 */
298 	cpu_init_kcore_hdr();
299 
300 	/*
301 	 * Good {morning,afternoon,evening,night}.
302 	 */
303 	printf(version);
304 	identifycpu();
305 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
306 	printf("total memory = %s\n", pbuf);
307 
308 	/*
309 	 * Find out how much space we need, allocate it,
310 	 * and then give everything true virtual addresses.
311 	 */
312 	size = (vsize_t)allocsys(NULL, NULL);
313 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
314 		panic("startup: no room for tables");
315 	if ((allocsys(v, NULL) - v) != size)
316 		panic("startup: table size inconsistency");
317 
318 
319 	/*
320 	 * Now allocate buffers proper.  They are different than the above
321 	 * in that they usually occupy more virtual memory than physical.
322 	 */
323 	size = MAXBSIZE * nbuf;
324 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
325 		    NULL, UVM_UNKNOWN_OFFSET, 0,
326 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
327 				UVM_ADV_NORMAL, 0)) != 0)
328 		panic("startup: cannot allocate VM for buffers");
329 	minaddr = (vaddr_t)buffers;
330 	base = bufpages / nbuf;
331 	residual = bufpages % nbuf;
332 	for (i = 0; i < nbuf; i++) {
333 		vsize_t curbufsize;
334 		vaddr_t curbuf;
335 		struct vm_page *pg;
336 
337 		/*
338 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
339 		 * that MAXBSIZE space, we allocate and map (base+1) pages
340 		 * for the first "residual" buffers, and then we allocate
341 		 * "base" pages for the rest.
342 		 */
343 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
344 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
345 
346 		while (curbufsize) {
347 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
348 			if (pg == NULL)
349 				panic("cpu_startup: not enough memory for "
350 				      "buffer cache");
351 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
352 				       VM_PROT_READ|VM_PROT_WRITE);
353 			curbuf += PAGE_SIZE;
354 			curbufsize -= PAGE_SIZE;
355 		}
356 	}
357 	pmap_update(pmap_kernel());
358 
359 	/*
360 	 * Allocate a submap for exec arguments.  This map effectively
361 	 * limits the number of processes exec'ing at any time.
362 	 */
363 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
364 				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
365 	/*
366 	 * Allocate a submap for physio
367 	 */
368 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
369 				 VM_PHYS_SIZE, 0, FALSE, NULL);
370 
371 	/*
372 	 * Finally, allocate mbuf cluster submap.
373 	 */
374 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
375 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
376 				 FALSE, NULL);
377 
378 #ifdef DEBUG
379 	pmapdebug = opmapdebug;
380 #endif
381 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
382 	printf("avail memory = %s\n", pbuf);
383 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
384 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
385 
386 	/*
387 	 * Tell the VM system that the area before the text segment
388 	 * is invalid.
389 	 *
390 	 * XXX Should just change KERNBASE and VM_MIN_KERNEL_ADDRESS,
391 	 * XXX but not right now.
392 	 */
393 	if (uvm_map_protect(kernel_map, 0, round_page((vaddr_t)&kernel_text),
394 	    UVM_PROT_NONE, TRUE) != 0)
395 		panic("can't mark pre-text pages off-limits");
396 
397 	/*
398 	 * Tell the VM system that writing to the kernel text isn't allowed.
399 	 * If we don't, we might end up COW'ing the text segment!
400 	 */
401 	if (uvm_map_protect(kernel_map, trunc_page((vaddr_t)&kernel_text),
402 	    round_page((vaddr_t)&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE)
403 	    != 0)
404 		panic("can't protect kernel text");
405 
406 	/*
407 	 * Set up CPU-specific registers, cache, etc.
408 	 */
409 	initcpu();
410 
411 	/*
412 	 * Set up buffers, so they can be used to read disk labels.
413 	 */
414 	bufinit();
415 }
416 
417 /*
418  * Set registers on exec.
419  */
420 void
421 setregs(p, pack, stack)
422 	struct proc *p;
423 	struct exec_package *pack;
424 	u_long stack;
425 {
426 	struct frame *frame = (struct frame *)p->p_md.md_regs;
427 
428 	frame->f_sr = PSL_USERSET;
429 	frame->f_pc = pack->ep_entry & ~1;
430 	frame->f_regs[D0] = 0;
431 	frame->f_regs[D1] = 0;
432 	frame->f_regs[D2] = 0;
433 	frame->f_regs[D3] = 0;
434 	frame->f_regs[D4] = 0;
435 	frame->f_regs[D5] = 0;
436 	frame->f_regs[D6] = 0;
437 	frame->f_regs[D7] = 0;
438 	frame->f_regs[A0] = 0;
439 	frame->f_regs[A1] = 0;
440 	frame->f_regs[A2] = (int)p->p_psstr;
441 	frame->f_regs[A3] = 0;
442 	frame->f_regs[A4] = 0;
443 	frame->f_regs[A5] = 0;
444 	frame->f_regs[A6] = 0;
445 	frame->f_regs[SP] = stack;
446 
447 	/* restore a null state frame */
448 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
449 	if (fputype)
450 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
451 }
452 
453 /*
454  * Info for CTL_HW
455  */
456 char	cpu_model[124];
457 
458 void
459 identifycpu()
460 {
461 	const char *mc;
462 	int len;
463 
464 	/*
465 	 * ...and the CPU type.
466 	 */
467 	switch (cputype) {
468 	case CPU_68040:
469 		mc = "40";
470 		break;
471 	case CPU_68030:
472 		mc = "30";
473 		break;
474 	case CPU_68020:
475 		mc = "20";
476 		break;
477 	default:
478 		printf("\nunknown cputype %d\n", cputype);
479 		goto lose;
480 	}
481 
482 	sprintf(cpu_model, "NeXT/MC680%s CPU",mc);
483 
484 	/*
485 	 * ...and the MMU type.
486 	 */
487 	switch (mmutype) {
488 	case MMU_68040:
489 	case MMU_68030:
490 		strcat(cpu_model, "+MMU");
491 		break;
492 	case MMU_68851:
493 		strcat(cpu_model, ", MC68851 MMU");
494 		break;
495 	case MMU_HP:
496 		strcat(cpu_model, ", HP MMU");
497 		break;
498 	default:
499 		printf("%s\nunknown MMU type %d\n", cpu_model, mmutype);
500 		panic("startup");
501 	}
502 
503 	len = strlen(cpu_model);
504 
505 	/*
506 	 * ...and the FPU type.
507 	 */
508 	switch (fputype) {
509 	case FPU_68040:
510 		len += sprintf(cpu_model + len, "+FPU");
511 		break;
512 	case FPU_68882:
513 		len += sprintf(cpu_model + len, ", MC68882 FPU");
514 		break;
515 	case FPU_68881:
516 		len += sprintf(cpu_model + len, ", MHz MC68881 FPU");
517 		break;
518 	default:
519 		len += sprintf(cpu_model + len, ", unknown FPU");
520 	}
521 
522 	/*
523 	 * ...and finally, the cache type.
524 	 */
525 	if (cputype == CPU_68040)
526 		sprintf(cpu_model + len, ", 4k on-chip physical I/D caches");
527 	else {
528 #if defined(ENABLE_HP_CODE)
529 		switch (ectype) {
530 		case EC_VIRT:
531 			sprintf(cpu_model + len,
532 			    ", virtual-address cache");
533 			break;
534 		case EC_PHYS:
535 			sprintf(cpu_model + len,
536 			    ", physical-address cache");
537 			break;
538 		}
539 #endif
540 	}
541 
542 	printf("%s\n", cpu_model);
543 
544 	return;
545  lose:
546 	panic("startup");
547 }
548 
549 /*
550  * machine dependent system variables.
551  */
552 int
553 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
554 	int *name;
555 	u_int namelen;
556 	void *oldp;
557 	size_t *oldlenp;
558 	void *newp;
559 	size_t newlen;
560 	struct proc *p;
561 {
562 #if 0
563 	dev_t consdev;
564 #endif
565 
566 	/* all sysctl names at this level are terminal */
567 	if (namelen != 1)
568 		return (ENOTDIR);		/* overloaded */
569 
570 	switch (name[0]) {
571 #if 0
572 	case CPU_CONSDEV:
573 		if (cn_tab != NULL)
574 			consdev = cn_tab->cn_dev;
575 		else
576 			consdev = NODEV;
577 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
578 		    sizeof consdev));
579 #endif
580 	default:
581 		return (EOPNOTSUPP);
582 	}
583 	/* NOTREACHED */
584 }
585 
586 /* See: sig_machdep.c */
587 
588 int	waittime = -1;
589 
590 void
591 cpu_reboot(howto, bootstr)
592 	int howto;
593 	char *bootstr;
594 {
595 
596 #if __GNUC__	/* XXX work around lame compiler problem (gcc 2.7.2) */
597 	(void)&howto;
598 #endif
599 	/* take a snap shot before clobbering any registers */
600 	if (curproc && curproc->p_addr)
601 		savectx(&curproc->p_addr->u_pcb);
602 
603 	/* If system is cold, just halt. */
604 	if (cold) {
605 		howto |= RB_HALT;
606 		goto haltsys;
607 	}
608 
609 	boothowto = howto;
610 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
611 		waittime = 0;
612 		vfs_shutdown();
613 		/*
614 		 * If we've been adjusting the clock, the todr
615 		 * will be out of synch; adjust it now.
616 		 */
617 		resettodr();
618 	}
619 
620 	/* Disable interrupts. */
621 	splhigh();
622 
623 	/* If rebooting and a dump is requested, do it. */
624 	if (howto & RB_DUMP)
625 		dumpsys();
626 
627  haltsys:
628 	/* Run any shutdown hooks. */
629 	doshutdownhooks();
630 
631 #if defined(PANICWAIT) && !defined(DDB)
632 	if ((howto & RB_HALT) == 0 && panicstr) {
633 		printf("hit any key to reboot...\n");
634 		(void)cngetc();
635 		printf("\n");
636 	}
637 #endif
638 
639 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
640 		poweroff();
641 	}
642 
643 	/* Finally, halt/reboot the system. */
644 	if (howto & RB_HALT) {
645 		monbootflag = 0x2d680000;				/* "-h" */
646 	}
647 
648 	printf("rebooting...\n");
649 	DELAY(1000000);
650 	doboot();
651 	/*NOTREACHED*/
652 }
653 
654 /*
655  * Initialize the kernel crash dump header.
656  */
657 void
658 cpu_init_kcore_hdr()
659 {
660 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
661 	struct m68k_kcore_hdr *m = &h->un._m68k;
662 	int i;
663 	extern char end[];
664 
665 	bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
666 
667 	/*
668 	 * Initialize the `dispatcher' portion of the header.
669 	 */
670 	strcpy(h->name, machine);
671 	h->page_size = NBPG;
672 	h->kernbase = KERNBASE;
673 
674 	/*
675 	 * Fill in information about our MMU configuration.
676 	 */
677 	m->mmutype	= mmutype;
678 	m->sg_v		= SG_V;
679 	m->sg_frame	= SG_FRAME;
680 	m->sg_ishift	= SG_ISHIFT;
681 	m->sg_pmask	= SG_PMASK;
682 	m->sg40_shift1	= SG4_SHIFT1;
683 	m->sg40_mask2	= SG4_MASK2;
684 	m->sg40_shift2	= SG4_SHIFT2;
685 	m->sg40_mask3	= SG4_MASK3;
686 	m->sg40_shift3	= SG4_SHIFT3;
687 	m->sg40_addr1	= SG4_ADDR1;
688 	m->sg40_addr2	= SG4_ADDR2;
689 	m->pg_v		= PG_V;
690 	m->pg_frame	= PG_FRAME;
691 
692 	/*
693 	 * Initialize pointer to kernel segment table.
694 	 */
695 	m->sysseg_pa = (u_int32_t)(pmap_kernel()->pm_stpa);
696 
697 	/*
698 	 * Initialize relocation value such that:
699 	 *
700 	 *	pa = (va - KERNBASE) + reloc
701 	 */
702 	m->reloc = lowram;
703 
704 	/*
705 	 * Define the end of the relocatable range.
706 	 */
707 	m->relocend = (u_int32_t)end;
708 
709 	/*
710 	 * The next68k has multiple memory segments.
711 	 */
712 	for (i = 0; i < mem_cluster_cnt; i++) {
713 		m->ram_segs[i].start = mem_clusters[i].start;
714 		m->ram_segs[i].size  = mem_clusters[i].size;
715 	}
716 }
717 
718 /*
719  * Compute the size of the machine-dependent crash dump header.
720  * Returns size in disk blocks.
721  */
722 int
723 cpu_dumpsize()
724 {
725 	int size;
726 
727 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
728 	return (btodb(roundup(size, dbtob(1))));
729 }
730 
731 /*
732  * Called by dumpsys() to dump the machine-dependent header.
733  */
734 int
735 cpu_dump(dump, blknop)
736 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
737 	daddr_t *blknop;
738 {
739 	int buf[dbtob(1) / sizeof(int)];
740 	cpu_kcore_hdr_t *chdr;
741 	kcore_seg_t *kseg;
742 	int error;
743 
744 	kseg = (kcore_seg_t *)buf;
745 	chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) /
746 	    sizeof(int)];
747 
748 	/* Create the segment header. */
749 	CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
750 	kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t));
751 
752 	bcopy(&cpu_kcore_hdr, chdr, sizeof(cpu_kcore_hdr_t));
753 	error = (*dump)(dumpdev, *blknop, (caddr_t)buf, sizeof(buf));
754 	*blknop += btodb(sizeof(buf));
755 	return (error);
756 }
757 
758 /*
759  * These variables are needed by /sbin/savecore
760  */
761 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
762 int	dumpsize = 0;		/* pages */
763 long	dumplo = 0;		/* blocks */
764 
765 /*
766  * This is called by main to set dumplo and dumpsize.
767  * Dumps always skip the first NBPG of disk space
768  * in case there might be a disk label stored there.
769  * If there is extra space, put dump at the end to
770  * reduce the chance that swapping trashes it.
771  */
772 void
773 cpu_dumpconf()
774 {
775 	const struct bdevsw *bdev;
776 	int chdrsize;	/* size of dump header */
777 	int nblks;	/* size of dump area */
778 
779 	if (dumpdev == NODEV)
780 		return;
781 	bdev = bdevsw_lookup(dumpdev);
782 	if (bdev == NULL)
783 		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
784 	if (bdev->d_psize == NULL)
785 		return;
786 	nblks = (*bdev->d_psize)(dumpdev);
787 	chdrsize = cpu_dumpsize();
788 
789 	dumpsize = btoc(cpu_kcore_hdr.un._m68k.ram_segs[0].size);
790 
791 	/*
792 	 * Check do see if we will fit.  Note we always skip the
793 	 * first NBPG in case there is a disk label there.
794 	 */
795 	if (nblks < (ctod(dumpsize) + chdrsize + ctod(1))) {
796 		dumpsize = 0;
797 		dumplo = -1;
798 		return;
799 	}
800 
801 	/*
802 	 * Put dump at the end of the partition.
803 	 */
804 	dumplo = (nblks - 1) - ctod(dumpsize) - chdrsize;
805 }
806 
807 /*
808  * Dump physical memory onto the dump device.  Called by cpu_reboot().
809  */
810 void
811 dumpsys()
812 {
813 	const struct bdevsw *bdev;
814 	daddr_t blkno;		/* current block to write */
815 				/* dump routine */
816 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
817 	int pg;			/* page being dumped */
818 	vm_offset_t maddr;	/* PA being dumped */
819 	int error;		/* error code from (*dump)() */
820 
821 	/* XXX initialized here because of gcc lossage */
822 	maddr = lowram;
823 	pg = 0;
824 
825 	/* Don't put dump messages in msgbuf. */
826 	msgbufmapped = 0;
827 
828 	/* Make sure dump device is valid. */
829 	if (dumpdev == NODEV)
830 		return;
831 	bdev = bdevsw_lookup(dumpdev);
832 	if (bdev == NULL)
833 		return;
834 	if (dumpsize == 0) {
835 		cpu_dumpconf();
836 		if (dumpsize == 0)
837 			return;
838 	}
839 	if (dumplo < 0)
840 		return;
841 	dump = bdev->d_dump;
842 	blkno = dumplo;
843 
844 	printf("\ndumping to dev 0x%x, offset %ld\n", dumpdev, dumplo);
845 
846 	printf("dump ");
847 
848 	/* Write the dump header. */
849 	error = cpu_dump(dump, &blkno);
850 	if (error)
851 		goto bad;
852 
853 	for (pg = 0; pg < dumpsize; pg++) {
854 #define NPGMB	(1024*1024/NBPG)
855 		/* print out how many MBs we have dumped */
856 		if (pg && (pg % NPGMB) == 0)
857 			printf("%d ", pg / NPGMB);
858 #undef NPGMB
859 		pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, maddr,
860 		    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
861 		pmap_update(pmap_kernel());
862 
863 		error = (*dump)(dumpdev, blkno, vmmap, NBPG);
864  bad:
865 		switch (error) {
866 		case 0:
867 			maddr += NBPG;
868 			blkno += btodb(NBPG);
869 			break;
870 
871 		case ENXIO:
872 			printf("device bad\n");
873 			return;
874 
875 		case EFAULT:
876 			printf("device not ready\n");
877 			return;
878 
879 		case EINVAL:
880 			printf("area improper\n");
881 			return;
882 
883 		case EIO:
884 			printf("i/o error\n");
885 			return;
886 
887 		case EINTR:
888 			printf("aborted from console\n");
889 			return;
890 
891 		default:
892 			printf("error %d\n", error);
893 			return;
894 		}
895 	}
896 	printf("succeeded\n");
897 }
898 
899 void
900 initcpu()
901 {
902 #ifdef MAPPEDCOPY
903 	/*
904 	 * Initialize lower bound for doing copyin/copyout using
905 	 * page mapping (if not already set).  We don't do this on
906 	 * VAC machines as it loses big time.
907 	 */
908 	if (ectype == EC_VIRT)
909 		mappedcopysize = -1;	/* in case it was patched */
910 	else
911 		mappedcopysize = NBPG;
912 #endif
913 }
914 
915 void
916 straytrap(pc, evec)
917 	int pc;
918 	u_short evec;
919 {
920 	printf("unexpected trap (vector offset %x) from %x\n",
921 	       evec & 0xFFF, pc);
922 
923 	/* XXX kgdb/ddb entry? */
924 }
925 
926 /* XXX should change the interface, and make one badaddr() function */
927 
928 int	*nofault;
929 
930 #if 0
931 int
932 badaddr(addr, nbytes)
933 	caddr_t addr;
934 	int nbytes;
935 {
936 	int i;
937 	label_t faultbuf;
938 
939 #ifdef lint
940 	i = *addr; if (i) return (0);
941 #endif
942 
943 	nofault = (int *) &faultbuf;
944 	if (setjmp((label_t *)nofault)) {
945 		nofault = (int *) 0;
946 		return(1);
947 	}
948 
949 	switch (nbytes) {
950 	case 1:
951 		i = *(volatile char *)addr;
952 		break;
953 
954 	case 2:
955 		i = *(volatile short *)addr;
956 		break;
957 
958 	case 4:
959 		i = *(volatile int *)addr;
960 		break;
961 
962 	default:
963 		panic("badaddr: bad request");
964 	}
965 	nofault = (int *) 0;
966 	return (0);
967 }
968 #endif
969 
970 /*
971  * Level 7 interrupts can be caused by the keyboard or parity errors.
972  */
973 int
974 nmihand(frame)
975 	void *frame;
976 {
977   static int innmihand;	/* simple mutex */
978 
979   /* Prevent unwanted recursion. */
980   if (innmihand)
981     return 0;
982   innmihand = 1;
983 
984   printf("Got a NMI");
985 
986 	if (!INTR_OCCURRED(NEXT_I_NMI)) {
987 		printf("But NMI isn't set in intrstat!\n");
988 	}
989 	INTR_DISABLE(NEXT_I_NMI);
990 
991 #if defined(DDB)
992   printf(": entering debugger\n");
993   Debugger();
994   printf("continuing after NMI\n");
995 #elif defined(KGDB)
996   kgdb_connect(1);
997 #else
998   printf(": ignoring\n");
999 #endif /* DDB */
1000 
1001 	INTR_ENABLE(NEXT_I_NMI);
1002 
1003   innmihand = 0;
1004 
1005   return 0;
1006 }
1007 
1008 
1009 /*
1010  * cpu_exec_aout_makecmds():
1011  *	cpu-dependent a.out format hook for execve().
1012  *
1013  * Determine of the given exec package refers to something which we
1014  * understand and, if so, set up the vmcmds for it.
1015  */
1016 int
1017 cpu_exec_aout_makecmds(p, epp)
1018     struct proc *p;
1019     struct exec_package *epp;
1020 {
1021     return ENOEXEC;
1022 }
1023