xref: /netbsd/sys/arch/mvme68k/mvme68k/machdep.c (revision c4a72b64)
1 /*	$NetBSD: machdep.c,v 1.95 2002/10/20 02:37:29 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: machdep.c 1.74 92/12/20$
41  *
42  *	@(#)machdep.c	8.10 (Berkeley) 4/20/94
43  */
44 
45 #include "opt_ddb.h"
46 #include "opt_compat_hpux.h"
47 #include "opt_m060sp.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/signalvar.h>
52 #include <sys/kernel.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/reboot.h>
56 #include <sys/conf.h>
57 #include <sys/file.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/msgbuf.h>
61 #include <sys/ioctl.h>
62 #include <sys/tty.h>
63 #include <sys/mount.h>
64 #include <sys/user.h>
65 #include <sys/exec.h>
66 #include <sys/core.h>
67 #include <sys/kcore.h>
68 #include <sys/vnode.h>
69 #include <sys/syscallargs.h>
70 
71 #if defined(DDB) && defined(__ELF__)
72 #include <sys/exec_elf.h>
73 #endif
74 
75 #include <uvm/uvm_extern.h>
76 
77 #include <sys/sysctl.h>
78 
79 #include <machine/cpu.h>
80 #define _MVME68K_BUS_DMA_PRIVATE
81 #include <machine/bus.h>
82 #undef _MVME68K_BUS_DMA_PRIVATE
83 #include <machine/reg.h>
84 #include <machine/prom.h>
85 #include <machine/psl.h>
86 #include <machine/pte.h>
87 #include <machine/vmparam.h>
88 #include <m68k/include/cacheops.h>
89 #include <dev/cons.h>
90 
91 #include <machine/kcore.h>	/* XXX should be pulled in by sys/kcore.h */
92 
93 #include <mvme68k/dev/mainbus.h>
94 #include <mvme68k/mvme68k/seglist.h>
95 
96 #ifdef DDB
97 #include <machine/db_machdep.h>
98 #include <ddb/db_extern.h>
99 #include <ddb/db_output.h>
100 #endif
101 
102 #define	MAXMEM	64*1024	/* XXX - from cmap.h */
103 
104 /* the following is used externally (sysctl_hw) */
105 char	machine[] = MACHINE;	/* from <machine/param.h> */
106 
107 /* Our exported CPU info; we can have only one. */
108 struct cpu_info cpu_info_store;
109 
110 struct vm_map *exec_map = NULL;
111 struct vm_map *mb_map = NULL;
112 struct vm_map *phys_map = NULL;
113 
114 /*
115  * Model information, filled in by the Bug; see locore.s
116  */
117 struct	mvmeprom_brdid  boardid;
118 
119 caddr_t	msgbufaddr;		/* KVA of message buffer */
120 paddr_t msgbufpa;		/* PA of message buffer */
121 
122 int	maxmem;			/* max memory per process */
123 int	physmem;		/* size of physical memory */
124 
125 /*
126  * safepri is a safe priority for sleep to set for a spin-wait
127  * during autoconfiguration or after a panic.
128  */
129 int	safepri = PSL_LOWIPL;
130 
131 /*
132  * The driver for the ethernet chip appropriate to the
133  * platform (lance or i82586) will use this variable
134  * to size the chip's packet buffer.
135  */
136 #ifndef ETHER_DATA_BUFF_PAGES
137 #define	ETHER_DATA_BUFF_PAGES	4
138 #endif
139 u_long	ether_data_buff_size = ETHER_DATA_BUFF_PAGES * NBPG;
140 u_char	mvme_ea[6];
141 
142 extern	u_int lowram;
143 extern	short exframesize[];
144 
145 #ifdef COMPAT_HPUX
146 extern struct emul emul_hpux;
147 #endif
148 
149 /* prototypes for local functions */
150 void	identifycpu __P((void));
151 void	initcpu __P((void));
152 void	dumpsys __P((void));
153 
154 int	cpu_dumpsize __P((void));
155 int	cpu_dump __P((int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *));
156 void	cpu_init_kcore_hdr __P((void));
157 u_long	cpu_dump_mempagecnt __P((void));
158 int	cpu_exec_aout_makecmds __P((struct proc *, struct exec_package *));
159 void	straytrap __P((int, u_short));
160 
161 /*
162  * Machine-independent crash dump header info.
163  */
164 cpu_kcore_hdr_t cpu_kcore_hdr;
165 
166 /*
167  * Memory segments initialized in locore, which are eventually loaded
168  * as managed VM pages.
169  */
170 phys_seg_list_t phys_seg_list[VM_PHYSSEG_MAX];
171 
172 /*
173  * Memory segments to dump.  This is initialized from the phys_seg_list
174  * before pages are stolen from it for VM system overhead.  I.e. this
175  * covers the entire range of physical memory.
176  */
177 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
178 int	mem_cluster_cnt;
179 
180 /*
181  * On the 68020/68030 (mvme14x), the value of delay_divisor is roughly
182  * 8192 / cpuspeed (where cpuspeed is in MHz).
183  *
184  * On the other boards (mvme162 and up), the cpuspeed is passed
185  * in from the firmware.
186  */
187 int	cpuspeed;		/* only used for printing later */
188 int	delay_divisor = 512;	/* assume some reasonable value to start */
189 
190 /*
191  * Since mvme68k boards can have anything from 4MB of onboard RAM, we
192  * would rather set the PAGER_MAP_SIZE at runtime based on the amount
193  * of onboard RAM.
194  */
195 int	mvme68k_pager_map_size;
196 
197 /* Machine-dependent initialization routines. */
198 void	mvme68k_init __P((void));
199 
200 #ifdef MVME147
201 #include <mvme68k/dev/pccreg.h>
202 void	mvme147_init __P((void));
203 #endif
204 
205 #if defined(MVME162) || defined(MVME167) || defined(MVME172) || defined(MVME177)
206 #include <dev/mvme/pcctworeg.h>
207 void	mvme1xx_init __P((void));
208 #endif
209 
210 /*
211  * Early initialization, right before main is called.
212  */
213 void
214 mvme68k_init()
215 {
216 	int i;
217 
218 	/*
219 	 * Set PAGER_MAP_SIZE to half the size of onboard RAM, up to a
220 	 * maximum of 16MB.
221 	 * (Note: Just use ps_end here since onboard RAM starts at 0x0)
222 	 */
223 	mvme68k_pager_map_size = phys_seg_list[0].ps_end / 2;
224 	if (mvme68k_pager_map_size > (16 * 1024 * 1024))
225 		mvme68k_pager_map_size = 16 * 1024 * 1024;
226 
227 	/*
228 	 * Tell the VM system about available physical memory.
229 	 */
230 	for (i = 0; i < mem_cluster_cnt; i++) {
231 		if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) {
232 			/*
233 			 * Segment has been completely gobbled up.
234 			 */
235 			continue;
236 		}
237 		/*
238 		 * Note the index of the mem cluster is the free
239 		 * list we want to put the memory on (0 == default,
240 		 * 1 == VME).  There can only be two.
241 		 */
242 		uvm_page_physload(atop(phys_seg_list[i].ps_start),
243 				 atop(phys_seg_list[i].ps_end),
244 				 atop(phys_seg_list[i].ps_start),
245 				 atop(phys_seg_list[i].ps_end), i);
246 	}
247 
248 	switch (machineid) {
249 #ifdef MVME147
250 	case MVME_147:
251 		mvme147_init();
252 		break;
253 #endif
254 #ifdef MVME167
255 	case MVME_167:
256 #endif
257 #ifdef MVME162
258 	case MVME_162:
259 #endif
260 #ifdef MVME177
261 	case MVME_177:
262 #endif
263 #ifdef MVME172
264 	case MVME_172:
265 #endif
266 #if defined(MVME162) || defined(MVME167) || defined(MVME172) || defined(MVME177)
267 		mvme1xx_init();
268 		break;
269 #endif
270 	default:
271 		panic("mvme68k_init: impossible machineid");
272 	}
273 
274 	/*
275 	 * Initialize error message buffer (at end of core).
276 	 */
277 	for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++)
278 		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
279 		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE,
280 		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
281 	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
282 	pmap_update(pmap_kernel());
283 }
284 
285 #ifdef MVME147
286 /*
287  * MVME-147 specific initialization.
288  */
289 void
290 mvme147_init()
291 {
292 	bus_space_tag_t bt = &_mainbus_space_tag;
293 	bus_space_handle_t bh;
294 
295 	/*
296 	 * Set up a temporary mapping to the PCC's registers
297 	 */
298 	bus_space_map(bt, intiobase_phys + MAINBUS_PCC_OFFSET, PCCREG_SIZE, 0, &bh);
299 
300 	/*
301 	 * calibrate delay() using the 6.25 usec counter.
302 	 * we adjust the delay_divisor until we get the result we want.
303 	 */
304 	bus_space_write_1(bt, bh, PCCREG_TMR1_CONTROL, PCC_TIMERCLEAR);
305 	bus_space_write_2(bt, bh, PCCREG_TMR1_PRELOAD, 0);
306 	bus_space_write_1(bt, bh, PCCREG_TMR1_INTR_CTRL, 0);
307 
308 	for (delay_divisor = 512; delay_divisor > 0; delay_divisor--) {
309 		bus_space_write_1(bt, bh, PCCREG_TMR1_CONTROL, PCC_TIMERSTART);
310 		delay(10000);
311 		bus_space_write_1(bt, bh, PCCREG_TMR1_CONTROL, PCC_TIMERSTOP);
312 
313 		/* 1600 * 6.25usec == 10000usec */
314 		if (bus_space_read_2(bt, bh, PCCREG_TMR1_COUNT) > 1600)
315 			break;	/* got it! */
316 
317 		bus_space_write_1(bt, bh, PCCREG_TMR1_CONTROL, PCC_TIMERCLEAR);
318 		/* retry! */
319 	}
320 
321 	bus_space_unmap(bt, bh, PCCREG_SIZE);
322 
323 	/* calculate cpuspeed */
324 	cpuspeed = 8192 / delay_divisor;
325 	cpuspeed *= 100;
326 }
327 #endif /* MVME147 */
328 
329 #if defined(MVME162) || defined(MVME167) || defined(MVME172) || defined(MVME177)
330 int	get_cpuspeed __P((void));
331 
332 /*
333  * MVME-1[67]x specific initializaion.
334  */
335 void
336 mvme1xx_init()
337 {
338 	bus_space_tag_t bt = &_mainbus_space_tag;
339 	bus_space_handle_t bh;
340 
341 	/*
342 	 * Set up a temporary mapping to the PCCChip2's registers
343 	 */
344 	bus_space_map(bt, intiobase_phys + MAINBUS_PCCTWO_OFFSET + PCCTWO_REG_OFF,
345 	    PCC2REG_SIZE, 0, &bh);
346 
347 	bus_space_write_1(bt, bh, PCC2REG_TIMER1_ICSR, 0);
348 
349 	for (delay_divisor = (cputype == CPU_68060) ? 20 : 154;
350 	    delay_divisor > 0; delay_divisor--) {
351 		bus_space_write_4(bt, bh, PCC2REG_TIMER1_COUNTER, 0);
352 		bus_space_write_1(bt, bh, PCC2REG_TIMER1_CONTROL,
353 		    PCCTWO_TT_CTRL_CEN);
354 		delay(10000);
355 		bus_space_write_1(bt, bh, PCC2REG_TIMER1_CONTROL, 0);
356 		if (bus_space_read_4(bt, bh, PCC2REG_TIMER1_COUNTER) > 10000)
357 			break;	/* got it! */
358 	}
359 
360 	bus_space_unmap(bt, bh, PCC2REG_SIZE);
361 
362 	/* calculate cpuspeed */
363 	cpuspeed = get_cpuspeed();
364 	if (cpuspeed < 1250 || cpuspeed > 6000) {
365 		printf("mvme1xx_init: Warning! Firmware has " \
366 		    "bogus CPU speed: `%s'\n", boardid.speed);
367 		cpuspeed = ((cputype == CPU_68060) ? 1000 : 3072) /
368 		    delay_divisor;
369 		cpuspeed *= 100;
370 		printf("mvme1xx_init: Approximating speed using "\
371 		    "delay_divisor\n");
372 	}
373 }
374 
375 /*
376  * Parse the `speed' field of Bug's boardid structure.
377  */
378 int
379 get_cpuspeed()
380 {
381 	int rv, i;
382 
383 	for (i = 0, rv = 0; i < sizeof(boardid.speed); i++) {
384 		if (boardid.speed[i] < '0' || boardid.speed[i] > '9')
385 			return (0);
386 		rv = (rv * 10) + (boardid.speed[i] - '0');
387 	}
388 
389 	return (rv);
390 }
391 #endif
392 
393 /*
394  * Console initialization: called early on from main,
395  * before vm init or startup.  Do enough configuration
396  * to choose and initialize a console.
397  */
398 void
399 consinit()
400 {
401 
402 	/*
403 	 * Initialize the console before we print anything out.
404 	 */
405 	cninit();
406 
407 #ifdef DDB
408 	{
409 		extern int end;
410 		extern int *esym;
411 
412 #ifndef __ELF__
413 		ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
414 #else
415 		ddb_init((int)esym - (int)&end - sizeof(Elf32_Ehdr),
416 		    (void *)&end, esym);
417 #endif
418 	}
419 	if (boothowto & RB_KDB)
420 		Debugger();
421 #endif
422 }
423 
424 /*
425  * cpu_startup: allocate memory for variable-sized tables,
426  * initialize cpu, and do autoconfiguration.
427  */
428 void
429 cpu_startup()
430 {
431 	extern char *kernel_text, *etext;
432 	caddr_t v;
433 	u_int i, base, residual;
434 	u_quad_t vmememsize;
435 	vaddr_t minaddr, maxaddr;
436 	vsize_t size;
437 	char pbuf[9];
438 #ifdef DEBUG
439 	extern int pmapdebug;
440 	int opmapdebug = pmapdebug;
441 
442 	pmapdebug = 0;
443 #endif
444 
445 	/*
446 	 * Initialize the kernel crash dump header.
447 	 */
448 	cpu_init_kcore_hdr();
449 
450 	/*
451 	 * Good {morning,afternoon,evening,night}.
452 	 */
453 	printf(version);
454 	identifycpu();
455 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
456 	printf("total memory = %s", pbuf);
457 
458 	for (vmememsize = 0, i = 1; i < mem_cluster_cnt; i++)
459 		vmememsize += mem_clusters[i].size;
460 	if (vmememsize != 0) {
461 		format_bytes(pbuf, sizeof(pbuf), mem_clusters[0].size);
462 		printf(" (%s on-board", pbuf);
463 		format_bytes(pbuf, sizeof(pbuf), vmememsize);
464 		printf(", %s VMEbus)", pbuf);
465 	}
466 
467 	printf("\n");
468 
469 	/*
470 	 * Find out how much space we need, allocate it,
471 	 * and then give everything true virtual addresses.
472 	 */
473 	size = (vsize_t)allocsys(NULL, NULL);
474 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
475 		panic("startup: no room for tables");
476 	if ((allocsys(v, NULL) - v) != size)
477 		panic("startup: table size inconsistency");
478 
479 
480 	/*
481 	 * Now allocate buffers proper.  They are different than the above
482 	 * in that they usually occupy more virtual memory than physical.
483 	 */
484 	size = MAXBSIZE * nbuf;
485 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
486 		    NULL, UVM_UNKNOWN_OFFSET, 0,
487 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
488 				UVM_ADV_NORMAL, 0)) != 0)
489 		panic("startup: cannot allocate VM for buffers");
490 	minaddr = (vaddr_t)buffers;
491 	base = bufpages / nbuf;
492 	residual = bufpages % nbuf;
493 	for (i = 0; i < nbuf; i++) {
494 		vsize_t curbufsize;
495 		vaddr_t curbuf;
496 		struct vm_page *pg;
497 
498 		/*
499 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
500 		 * that MAXBSIZE space, we allocate and map (base+1) pages
501 		 * for the first "residual" buffers, and then we allocate
502 		 * "base" pages for the rest.
503 		 */
504 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
505 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
506 
507 		while (curbufsize) {
508 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
509 			if (pg == NULL)
510 				panic("cpu_startup: not enough memory for "
511 				      "buffer cache");
512 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
513 				       VM_PROT_READ|VM_PROT_WRITE);
514 			curbuf += PAGE_SIZE;
515 			curbufsize -= PAGE_SIZE;
516 		}
517 	}
518 	pmap_update(pmap_kernel());
519 
520 	/*
521 	 * Allocate a submap for exec arguments.  This map effectively
522 	 * limits the number of processes exec'ing at any time.
523 	 */
524 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
525 				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
526 	/*
527 	 * Allocate a submap for physio
528 	 */
529 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
530 				 VM_PHYS_SIZE, 0, FALSE, NULL);
531 
532 	/*
533 	 * Finally, allocate mbuf cluster submap.
534 	 */
535 	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
536 				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
537 				 FALSE, NULL);
538 
539 #ifdef DEBUG
540 	pmapdebug = opmapdebug;
541 #endif
542 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
543 	printf("avail memory = %s\n", pbuf);
544 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
545 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
546 
547 	/*
548 	 * Tell the VM system that the area before the text segment
549 	 * is invalid.
550 	 *
551 	 * XXX Should just change KERNBASE and VM_MIN_KERNEL_ADDRESS,
552 	 * XXX but not right now.
553 	 */
554 	if (uvm_map_protect(kernel_map, 0, round_page((vaddr_t)&kernel_text),
555 	    UVM_PROT_NONE, TRUE) != 0)
556 		panic("can't mark pre-text pages off-limits");
557 
558 	/*
559 	 * Tell the VM system that writing to the kernel text isn't allowed.
560 	 * If we don't, we might end up COW'ing the text segment!
561 	 */
562 	if (uvm_map_protect(kernel_map, trunc_page((vaddr_t)&kernel_text),
563 	    round_page((vaddr_t)&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE)
564 	    != 0)
565 		panic("can't protect kernel text");
566 
567 	/*
568 	 * Set up CPU-specific registers, cache, etc.
569 	 */
570 	initcpu();
571 
572 	/*
573 	 * Set up buffers, so they can be used to read disk labels.
574 	 */
575 	bufinit();
576 }
577 
578 /*
579  * Set registers on exec.
580  */
581 void
582 setregs(p, pack, stack)
583 	struct proc *p;
584 	struct exec_package *pack;
585 	u_long stack;
586 {
587 	struct frame *frame = (struct frame *)p->p_md.md_regs;
588 	extern void m68881_restore __P((struct fpframe *));
589 
590 	frame->f_sr = PSL_USERSET;
591 	frame->f_pc = pack->ep_entry & ~1;
592 	frame->f_regs[D0] = 0;
593 	frame->f_regs[D1] = 0;
594 	frame->f_regs[D2] = 0;
595 	frame->f_regs[D3] = 0;
596 	frame->f_regs[D4] = 0;
597 	frame->f_regs[D5] = 0;
598 	frame->f_regs[D6] = 0;
599 	frame->f_regs[D7] = 0;
600 	frame->f_regs[A0] = 0;
601 	frame->f_regs[A1] = 0;
602 	frame->f_regs[A2] = (int)p->p_psstr;
603 	frame->f_regs[A3] = 0;
604 	frame->f_regs[A4] = 0;
605 	frame->f_regs[A5] = 0;
606 	frame->f_regs[A6] = 0;
607 	frame->f_regs[SP] = stack;
608 
609 	/* restore a null state frame */
610 	p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
611 	if (fputype)
612 		m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
613 }
614 
615 /*
616  * Info for CTL_HW
617  */
618 char	cpu_model[124];
619 
620 void
621 identifycpu()
622 {
623 	char board_str[16];
624 	char cpu_str[32];
625 	char mmu_str[16];
626 	char fpu_str[16];
627 	int len = 0;
628 
629 	memset(cpu_model, 0, sizeof(cpu_model));
630 	memset(board_str, 0, sizeof(board_str));
631 	memset(cpu_str, 0, sizeof(cpu_str));
632 	memset(mmu_str, 0, sizeof(mmu_str));
633 	memset(fpu_str, 0, sizeof(cpu_str));
634 
635 	/* Fill in the CPU string. */
636 	switch (cputype) {
637 #ifdef M68020
638 	case CPU_68020:
639 		sprintf(cpu_str, "MC68020 CPU");
640 		sprintf(fpu_str, "MC68881 FPU");	/* XXX */
641 		break;
642 #endif
643 
644 #ifdef M68030
645 	case CPU_68030:
646 		sprintf(cpu_str, "MC68030 CPU+MMU");
647 		sprintf(fpu_str, "MC68882 FPU");	/* XXX */
648 		break;
649 #endif
650 
651 #ifdef M68040
652 	case CPU_68040:
653 		sprintf(cpu_str, "MC68040 CPU+MMU+FPU");
654 		break;
655 #endif
656 
657 #ifdef M68060
658 	case CPU_68060:
659 		sprintf(cpu_str, "MC68060 CPU+MMU+FPU");
660 		break;
661 #endif
662 
663 	default:
664 		printf("unknown CPU type");
665 		panic("startup");
666 	}
667 
668 	/* Fill in the MMU string; only need to handle one case. */
669 	switch (mmutype) {
670 	case MMU_68851:
671 		sprintf(mmu_str, "MC68851 MMU");
672 		break;
673 	}
674 
675 	/* XXX Find out FPU type and fill in string here. */
676 
677 	/* Fill in board model string. */
678 	switch (machineid) {
679 #ifdef MVME147
680 	case MVME_147: {
681 		char *suffix = (char *)&boardid.suffix;
682 		len = sprintf(board_str, "%x", machineid);
683 		if (suffix[0] != '\0') {
684 			board_str[len++] = suffix[0];
685 			if (suffix[1] != '\0')
686 				board_str[len++] = suffix[1];
687 		}
688 		break; }
689 #endif
690 
691 #if defined(MVME162) || defined(MVME167) || defined(MVME172) || defined(MVME177)
692 	case MVME_162:
693 	case MVME_167:
694 	case MVME_172:
695 	case MVME_177: {
696 		char *suffix = (char *)&boardid.suffix;
697 		len = sprintf(board_str, "%x", machineid);
698 		if (suffix[0] != '\0') {
699 			board_str[len++] = suffix[0];
700 			if (suffix[1] != '\0')
701 				board_str[len++] = suffix[1];
702 		}
703 		break; }
704 #endif
705 	default:
706 		printf("unknown machine type: 0x%x\n", machineid);
707 		panic("startup");
708 	}
709 
710 	len = sprintf(cpu_model, "Motorola MVME-%s: %d.%dMHz %s", board_str,
711 	    cpuspeed / 100, (cpuspeed % 100) / 10, cpu_str);
712 
713 	cpuspeed /= 100;
714 
715 	if (mmu_str[0] != '\0')
716 		len += sprintf(cpu_model + len, ", %s", mmu_str);
717 
718 	if (fpu_str[0] != '\0')
719 		len += sprintf(cpu_model + len, ", %s", fpu_str);
720 
721 #if defined(M68040) || defined(M68060)
722 	switch (cputype) {
723 #if defined(M68040)
724 	case CPU_68040:
725 		strcat(cpu_model, ", 4k+4k on-chip physical I/D caches");
726 		break;
727 #endif
728 #if defined(M68060)
729 	case CPU_68060:
730 		strcat(cpu_model, ", 8k+8k on-chip physical I/D caches");
731 		break;
732 #endif
733 	}
734 #endif
735 
736 	printf("%s\n", cpu_model);
737 }
738 
739 /*
740  * machine dependent system variables.
741  */
742 int
743 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
744 	int *name;
745 	u_int namelen;
746 	void *oldp;
747 	size_t *oldlenp;
748 	void *newp;
749 	size_t newlen;
750 	struct proc *p;
751 {
752 	dev_t consdev;
753 
754 	/* all sysctl names at this level are terminal */
755 	if (namelen != 1)
756 		return (ENOTDIR);		/* overloaded */
757 
758 	switch (name[0]) {
759 	case CPU_CONSDEV:
760 		if (cn_tab != NULL)
761 			consdev = cn_tab->cn_dev;
762 		else
763 			consdev = NODEV;
764 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
765 		    sizeof consdev));
766 	default:
767 		return (EOPNOTSUPP);
768 	}
769 	/* NOTREACHED */
770 }
771 
772 /* See: sig_machdep.c */
773 
774 int	waittime = -1;
775 
776 void
777 cpu_reboot(howto, bootstr)
778 	int howto;
779 	char *bootstr;
780 {
781 
782 	/* take a snap shot before clobbering any registers */
783 	if (curproc && curproc->p_addr)
784 		savectx(&curproc->p_addr->u_pcb);
785 
786 	/* Save the RB_SBOOT flag. */
787 	howto |= (boothowto & RB_SBOOT);
788 
789 	/* If system is hold, just halt. */
790 	if (cold) {
791 		howto |= RB_HALT;
792 		goto haltsys;
793 	}
794 
795 	boothowto = howto;
796 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
797 		waittime = 0;
798 		vfs_shutdown();
799 		/*
800 		 * If we've been adjusting the clock, the todr
801 		 * will be out of synch; adjust it now.
802 		 */
803 		resettodr();
804 	}
805 
806 	/* Disable interrupts. */
807 	splhigh();
808 
809 	/* If rebooting and a dump is requested, do it. */
810 	if (howto & RB_DUMP)
811 		dumpsys();
812 
813  haltsys:
814 	/* Run any shutdown hooks. */
815 	doshutdownhooks();
816 
817 #if defined(PANICWAIT) && !defined(DDB)
818 	if ((howto & RB_HALT) == 0 && panicstr) {
819 		printf("hit any key to reboot...\n");
820 		(void)cngetc();
821 		printf("\n");
822 	}
823 #endif
824 
825 	/* Finally, halt/reboot the system. */
826 	if (howto & RB_HALT) {
827 		printf("halted\n\n");
828 		doboot(RB_HALT);
829 		/* NOTREACHED */
830 	}
831 
832 	printf("rebooting...\n");
833 	delay(1000000);
834 	doboot(RB_AUTOBOOT);
835 	/*NOTREACHED*/
836 }
837 
838 /*
839  * Initialize the kernel crash dump header.
840  */
841 void
842 cpu_init_kcore_hdr()
843 {
844 	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
845 	struct m68k_kcore_hdr *m = &h->un._m68k;
846 	int i;
847 	extern char end[];
848 
849 	memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr));
850 
851 	/*
852 	 * Initialize the `dispatcher' portion of the header.
853 	 */
854 	strcpy(h->name, machine);
855 	h->page_size = NBPG;
856 	h->kernbase = KERNBASE;
857 
858 	/*
859 	 * Fill in information about our MMU configuration.
860 	 */
861 	m->mmutype	= mmutype;
862 	m->sg_v		= SG_V;
863 	m->sg_frame	= SG_FRAME;
864 	m->sg_ishift	= SG_ISHIFT;
865 	m->sg_pmask	= SG_PMASK;
866 	m->sg40_shift1	= SG4_SHIFT1;
867 	m->sg40_mask2	= SG4_MASK2;
868 	m->sg40_shift2	= SG4_SHIFT2;
869 	m->sg40_mask3	= SG4_MASK3;
870 	m->sg40_shift3	= SG4_SHIFT3;
871 	m->sg40_addr1	= SG4_ADDR1;
872 	m->sg40_addr2	= SG4_ADDR2;
873 	m->pg_v		= PG_V;
874 	m->pg_frame	= PG_FRAME;
875 
876 	/*
877 	 * Initialize pointer to kernel segment table.
878 	 */
879 	m->sysseg_pa = (u_int32_t)(pmap_kernel()->pm_stpa);
880 
881 	/*
882 	 * Initialize relocation value such that:
883 	 *
884 	 *	pa = (va - KERNBASE) + reloc
885 	 *
886 	 * Since we're linked and loaded at the same place,
887 	 * and the kernel is mapped va == pa, this is 0.
888 	 */
889 	m->reloc = 0;
890 
891 	/*
892 	 * Define the end of the relocatable range.
893 	 */
894 	m->relocend = (u_int32_t)end;
895 
896 	/*
897 	 * The mvme68k has one or two memory segments.
898 	 */
899 	for (i = 0; i < mem_cluster_cnt; i++) {
900 		m->ram_segs[i].start = mem_clusters[i].start;
901 		m->ram_segs[i].size  = mem_clusters[i].size;
902 	}
903 }
904 
905 /*
906  * Compute the size of the machine-dependent crash dump header.
907  * Returns size in disk blocks.
908  */
909 int
910 cpu_dumpsize()
911 {
912 	int size;
913 
914 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
915 	return (btodb(roundup(size, dbtob(1))));
916 }
917 
918 /*
919  * Calculate size of RAM (in pages) to be dumped.
920  */
921 u_long
922 cpu_dump_mempagecnt()
923 {
924 	u_long i, n;
925 
926 	n = 0;
927 	for (i = 0; i < mem_cluster_cnt; i++)
928 		n += atop(mem_clusters[i].size);
929 	return (n);
930 }
931 
932 /*
933  * Called by dumpsys() to dump the machine-dependent header.
934  */
935 int
936 cpu_dump(dump, blknop)
937 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
938 	daddr_t *blknop;
939 {
940 	int buf[dbtob(1) / sizeof(int)];
941 	cpu_kcore_hdr_t *chdr;
942 	kcore_seg_t *kseg;
943 	int error;
944 
945 	kseg = (kcore_seg_t *)buf;
946 	chdr = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(kcore_seg_t)) /
947 	    sizeof(int)];
948 
949 	/* Create the segment header. */
950 	CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
951 	kseg->c_size = dbtob(1) - ALIGN(sizeof(kcore_seg_t));
952 
953 	memcpy(chdr, &cpu_kcore_hdr, sizeof(cpu_kcore_hdr_t));
954 	error = (*dump)(dumpdev, *blknop, (caddr_t)buf, sizeof(buf));
955 	*blknop += btodb(sizeof(buf));
956 	return (error);
957 }
958 
959 /*
960  * These variables are needed by /sbin/savecore
961  */
962 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
963 int	dumpsize = 0;		/* pages */
964 long	dumplo = 0;		/* blocks */
965 
966 /*
967  * This is called by main to set dumplo and dumpsize.
968  * Dumps always skip the first NBPG of disk space
969  * in case there might be a disk label stored there.
970  * If there is extra space, put dump at the end to
971  * reduce the chance that swapping trashes it.
972  */
973 void
974 cpu_dumpconf()
975 {
976 	const struct bdevsw *bdev;
977 	int nblks, dumpblks;	/* size of dump area */
978 
979 	if (dumpdev == NODEV)
980 		goto bad;
981 	bdev = bdevsw_lookup(dumpdev);
982 	if (bdev == NULL)
983 		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
984 	if (bdev->d_psize == NULL)
985 		goto bad;
986 	nblks = (*bdev->d_psize)(dumpdev);
987 	if (nblks <= ctod(1))
988 		goto bad;
989 
990 	dumpblks = cpu_dumpsize();
991 	if (dumpblks < 0)
992 		goto bad;
993 	dumpblks += ctod(cpu_dump_mempagecnt());
994 
995 	/* If dump won't fit (incl. room for possible label), punt. */
996 	if (dumpblks > (nblks - ctod(1)))
997 		goto bad;
998 
999 	/* Put dump at end of partition */
1000 	dumplo = nblks - dumpblks;
1001 
1002 	/* dumpsize is in page units, and doesn't include headers. */
1003 	dumpsize = cpu_dump_mempagecnt();
1004 	return;
1005 
1006  bad:
1007 	dumpsize = 0;
1008 }
1009 
1010 /*
1011  * Dump physical memory onto the dump device.  Called by cpu_reboot().
1012  */
1013 void
1014 dumpsys()
1015 {
1016 	const struct bdevsw *bdev;
1017 	u_long totalbytesleft, bytes, i, n, memcl;
1018 	u_long maddr;
1019 	int psize;
1020 	daddr_t blkno;
1021 	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
1022 	int error;
1023 
1024 	/* XXX Should save registers. */
1025 
1026 	if (dumpdev == NODEV)
1027 		return;
1028 	bdev = bdevsw_lookup(dumpdev);
1029 	if (bdev == NULL || bdev->d_psize == NULL)
1030 		return;
1031 
1032 	/*
1033 	 * For dumps during autoconfiguration,
1034 	 * if dump device has already configured...
1035 	 */
1036 	if (dumpsize == 0)
1037 		cpu_dumpconf();
1038 	if (dumplo <= 0) {
1039 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
1040 		    minor(dumpdev));
1041 		return;
1042 	}
1043 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
1044 	    minor(dumpdev), dumplo);
1045 
1046 	psize = (*bdev->d_psize)(dumpdev);
1047 	printf("dump ");
1048 	if (psize == -1) {
1049 		printf("area unavailable\n");
1050 		return;
1051 	}
1052 
1053 	/* XXX should purge all outstanding keystrokes. */
1054 
1055 	dump = bdev->d_dump;
1056 	blkno = dumplo;
1057 
1058 	if ((error = cpu_dump(dump, &blkno)) != 0)
1059 		goto err;
1060 
1061 	totalbytesleft = ptoa(cpu_dump_mempagecnt());
1062 
1063 	for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
1064 		maddr = mem_clusters[memcl].start;
1065 		bytes = mem_clusters[memcl].size;
1066 
1067 		for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1068 
1069 			/* Print out how many MBs we have left to go. */
1070 			if ((totalbytesleft % (1024*1024)) == 0)
1071 				printf("%ld ", totalbytesleft / (1024 * 1024));
1072 
1073 			/* Limit size for next transfer. */
1074 			n = bytes - i;
1075 			if (n > NBPG)
1076 				n = NBPG;
1077 
1078 			pmap_enter(pmap_kernel(), (vaddr_t)vmmap, maddr,
1079 			    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
1080 			pmap_update(pmap_kernel());
1081 
1082 			error = (*dump)(dumpdev, blkno, vmmap, n);
1083 			if (error)
1084 				goto err;
1085 			maddr += n;
1086 			blkno += btodb(n);
1087 		}
1088 	}
1089 
1090  err:
1091 	switch (error) {
1092 
1093 	case ENXIO:
1094 		printf("device bad\n");
1095 		break;
1096 
1097 	case EFAULT:
1098 		printf("device not ready\n");
1099 		break;
1100 
1101 	case EINVAL:
1102 		printf("area improper\n");
1103 		break;
1104 
1105 	case EIO:
1106 		printf("i/o error\n");
1107 		break;
1108 
1109 	case EINTR:
1110 		printf("aborted from console\n");
1111 		break;
1112 
1113 	case 0:
1114 		printf("succeeded\n");
1115 		break;
1116 
1117 	default:
1118 		printf("error %d\n", error);
1119 		break;
1120 	}
1121 	printf("\n\n");
1122 	delay(5000);
1123 }
1124 
1125 void
1126 initcpu()
1127 {
1128 #if defined(M68060)
1129 	extern caddr_t vectab[256];
1130 #if defined(M060SP)
1131 	extern u_int8_t I_CALL_TOP[];
1132 	extern u_int8_t FP_CALL_TOP[];
1133 #else
1134 	extern u_int8_t illinst;
1135 #endif
1136 	extern u_int8_t fpfault;
1137 #endif
1138 
1139 #ifdef MAPPEDCOPY
1140 	extern u_int mappedcopysize;
1141 
1142 	/*
1143 	 * Initialize lower bound for doing copyin/copyout using
1144 	 * page mapping (if not already set).  We don't do this on
1145 	 * VAC machines as it loses big time.
1146 	 */
1147 	if (mappedcopysize == 0) {
1148 		mappedcopysize = NBPG;
1149 	}
1150 #endif
1151 
1152 #if defined(M68060)
1153 	if (cputype == CPU_68060) {
1154 #if defined(M060SP)
1155 		/* integer support */
1156 		vectab[61] = &I_CALL_TOP[128 + 0x00];
1157 
1158 		/* floating point support */
1159 		vectab[11] = &FP_CALL_TOP[128 + 0x30];
1160 		vectab[55] = &FP_CALL_TOP[128 + 0x38];
1161 		vectab[60] = &FP_CALL_TOP[128 + 0x40];
1162 
1163 		vectab[54] = &FP_CALL_TOP[128 + 0x00];
1164 		vectab[52] = &FP_CALL_TOP[128 + 0x08];
1165 		vectab[53] = &FP_CALL_TOP[128 + 0x10];
1166 		vectab[51] = &FP_CALL_TOP[128 + 0x18];
1167 		vectab[50] = &FP_CALL_TOP[128 + 0x20];
1168 		vectab[49] = &FP_CALL_TOP[128 + 0x28];
1169 #else
1170 		vectab[61] = &illinst;
1171 #endif
1172 		vectab[48] = &fpfault;
1173 	}
1174 	DCIS();
1175 #endif
1176 }
1177 
1178 void
1179 straytrap(pc, evec)
1180 	int pc;
1181 	u_short evec;
1182 {
1183 	printf("unexpected trap (vector offset %x) from %x\n",
1184 	       evec & 0xFFF, pc);
1185 }
1186 
1187 /*
1188  * Level 7 interrupts are caused by e.g. the ABORT switch.
1189  *
1190  * If we have DDB, then break into DDB on ABORT.  In a production
1191  * environment, bumping the ABORT switch would be bad, so we enable
1192  * panic'ing on ABORT with the kernel option "PANICBUTTON".
1193  */
1194 int
1195 nmihand(arg)
1196 	void *arg;
1197 {
1198 	mvme68k_abort("ABORT SWITCH");
1199 
1200 	return 1;
1201 }
1202 
1203 /*
1204  * Common code for handling ABORT signals from buttons, switches,
1205  * serial lines, etc.
1206  */
1207 void
1208 mvme68k_abort(cp)
1209 	const char *cp;
1210 {
1211 #ifdef DDB
1212 	db_printf("%s\n", cp);
1213 	Debugger();
1214 #else
1215 #ifdef PANICBUTTON
1216 	panic(cp);
1217 #else
1218 	printf("%s ignored\n", cp);
1219 #endif /* PANICBUTTON */
1220 #endif /* DDB */
1221 }
1222 
1223 /*
1224  * cpu_exec_aout_makecmds():
1225  *	cpu-dependent a.out format hook for execve().
1226  *
1227  * Determine of the given exec package refers to something which we
1228  * understand and, if so, set up the vmcmds for it.
1229  */
1230 int
1231 cpu_exec_aout_makecmds(p, epp)
1232     struct proc *p;
1233     struct exec_package *epp;
1234 {
1235     return ENOEXEC;
1236 }
1237