xref: /openbsd/sys/arch/alpha/alpha/machdep.c (revision a6445c1d)
1 /* $OpenBSD: machdep.c,v 1.166 2014/11/16 12:30:52 deraadt Exp $ */
2 /* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */
3 
4 /*-
5  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10  * NASA Ames Research Center and by Chris G. Demetriou.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Author: Chris G. Demetriou
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/signalvar.h>
64 #include <sys/kernel.h>
65 #include <sys/proc.h>
66 #include <sys/socket.h>
67 #include <sys/sched.h>
68 #include <sys/buf.h>
69 #include <sys/reboot.h>
70 #include <sys/device.h>
71 #include <sys/conf.h>
72 #include <sys/file.h>
73 #include <sys/timeout.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/msgbuf.h>
77 #include <sys/ioctl.h>
78 #include <sys/tty.h>
79 #include <sys/user.h>
80 #include <sys/exec.h>
81 #include <sys/sysctl.h>
82 #include <sys/core.h>
83 #include <sys/kcore.h>
84 
85 #include <net/if.h>
86 #include <uvm/uvm.h>
87 
88 #include <machine/kcore.h>
89 #ifndef NO_IEEE
90 #include <machine/fpu.h>
91 #endif
92 #include <sys/timetc.h>
93 
94 #include <sys/mount.h>
95 #include <sys/syscallargs.h>
96 
97 #include <dev/cons.h>
98 
99 #include <machine/autoconf.h>
100 #include <machine/cpu.h>
101 #include <machine/reg.h>
102 #include <machine/rpb.h>
103 #include <machine/prom.h>
104 #include <machine/cpuconf.h>
105 #ifndef NO_IEEE
106 #include <machine/ieeefp.h>
107 #endif
108 
109 #include <dev/pci/pcivar.h>
110 
111 #ifdef DDB
112 #include <machine/db_machdep.h>
113 #include <ddb/db_extern.h>
114 #include <ddb/db_interface.h>
115 #endif
116 
117 #include "ioasic.h"
118 
119 #if NIOASIC > 0
120 #include <machine/tc_machdep.h>
121 #include <dev/tc/tcreg.h>
122 #include <dev/tc/ioasicvar.h>
123 #endif
124 
125 int	cpu_dump(void);
126 int	cpu_dumpsize(void);
127 u_long	cpu_dump_mempagecnt(void);
128 void	dumpsys(void);
129 void	identifycpu(void);
130 void	regdump(struct trapframe *framep);
131 void	printregs(struct reg *);
132 
133 struct uvm_constraint_range  isa_constraint = { 0x0, 0x00ffffffUL };
134 struct uvm_constraint_range  dma_constraint = { 0x0, (paddr_t)-1 };
135 struct uvm_constraint_range *uvm_md_constraints[] = {
136 	&isa_constraint,
137 	NULL
138 };
139 
140 struct vm_map *exec_map = NULL;
141 struct vm_map *phys_map = NULL;
142 
143 /*
144  * safepri is a safe priority for sleep to set for a spin-wait
145  * during autoconfiguration or after a panic.
146  */
147 int   safepri = 0;
148 
149 #ifdef APERTURE
150 #ifdef INSECURE
151 int allowaperture = 1;
152 #else
153 int allowaperture = 0;
154 #endif
155 #endif
156 
157 int	totalphysmem;		/* total amount of physical memory in system */
158 int	physmem;		/* physical mem used by OpenBSD + some rsvd */
159 int	resvmem;		/* amount of memory reserved for PROM */
160 int	unusedmem;		/* amount of memory for OS that we don't use */
161 int	unknownmem;		/* amount of memory with an unknown use */
162 
163 int	cputype;		/* system type, from the RPB */
164 
165 int	bootdev_debug = 0;	/* patchable, or from DDB */
166 
167 /* the following is used externally (sysctl_hw) */
168 char	machine[] = MACHINE;		/* from <machine/param.h> */
169 char	cpu_model[128];
170 
171 struct	user *proc0paddr;
172 
173 /* Number of machine cycles per microsecond */
174 u_int64_t	cycles_per_usec;
175 
176 struct bootinfo_kernel bootinfo;
177 
178 struct consdev *cn_tab;
179 
180 /* For built-in TCDS */
181 #if defined(DEC_3000_300) || defined(DEC_3000_500)
182 u_int8_t	dec_3000_scsiid[2], dec_3000_scsifast[2];
183 #endif
184 
185 struct platform platform;
186 
187 /* for cpu_sysctl() */
188 int	alpha_unaligned_print = 1;	/* warn about unaligned accesses */
189 int	alpha_unaligned_fix = 1;	/* fix up unaligned accesses */
190 int	alpha_unaligned_sigbus = 1;	/* SIGBUS on fixed-up accesses */
191 #ifndef NO_IEEE
192 int	alpha_fp_sync_complete = 0;	/* fp fixup if sync even without /s */
193 #endif
194 #if NIOASIC > 0
195 int	alpha_led_blink = 1;
196 #endif
197 
198 /* used by hw_sysctl */
199 extern char *hw_serial;
200 
201 /*
202  * XXX This should be dynamically sized, but we have the chicken-egg problem!
203  * XXX it should also be larger than it is, because not all of the mddt
204  * XXX clusters end up being used for VM.
205  */
206 phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];	/* low size bits overloaded */
207 int	mem_cluster_cnt;
208 
209 void
210 alpha_init(unused, ptb, bim, bip, biv)
211 	u_long unused;
212 	u_long ptb;		/* PFN of current level 1 page table */
213 	u_long bim;		/* bootinfo magic */
214 	u_long bip;		/* bootinfo pointer */
215 	u_long biv;		/* bootinfo version */
216 {
217 	extern char kernel_text[], _end[];
218 	struct mddt *mddtp;
219 	struct mddt_cluster *memc;
220 	int i, mddtweird;
221 	struct vm_physseg *vps;
222 	vaddr_t kernstart, kernend;
223 	paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
224 	char *p;
225 	const char *bootinfo_msg;
226 	const struct cpuinit *c;
227 	extern caddr_t esym;
228 	struct cpu_info *ci;
229 	cpuid_t cpu_id;
230 
231 	/* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
232 
233 	/*
234 	 * Turn off interrupts (not mchecks) and floating point.
235 	 * Make sure the instruction and data streams are consistent.
236 	 */
237 	(void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
238 	alpha_pal_wrfen(0);
239 	ALPHA_TBIA();
240 	alpha_pal_imb();
241 
242 	/* Initialize the SCB. */
243 	scb_init();
244 
245 	cpu_id = cpu_number();
246 
247 #if defined(MULTIPROCESSOR)
248 	/*
249 	 * Set our SysValue to the address of our cpu_info structure.
250 	 * Secondary processors do this in their spinup trampoline.
251 	 */
252 	alpha_pal_wrval((u_long)&cpu_info_primary);
253 	cpu_info[cpu_id] = &cpu_info_primary;
254 #endif
255 
256 	ci = curcpu();
257 	ci->ci_cpuid = cpu_id;
258 
259 	/*
260 	 * Get critical system information (if possible, from the
261 	 * information provided by the boot program).
262 	 */
263 	bootinfo_msg = NULL;
264 	if (bim == BOOTINFO_MAGIC) {
265 		if (biv == 0) {		/* backward compat */
266 			biv = *(u_long *)bip;
267 			bip += 8;
268 		}
269 		switch (biv) {
270 		case 1: {
271 			struct bootinfo_v1 *v1p = (struct bootinfo_v1 *)bip;
272 
273 			bootinfo.ssym = v1p->ssym;
274 			bootinfo.esym = v1p->esym;
275 			/* hwrpb may not be provided by boot block in v1 */
276 			if (v1p->hwrpb != NULL) {
277 				bootinfo.hwrpb_phys =
278 				    ((struct rpb *)v1p->hwrpb)->rpb_phys;
279 				bootinfo.hwrpb_size = v1p->hwrpbsize;
280 			} else {
281 				bootinfo.hwrpb_phys =
282 				    ((struct rpb *)HWRPB_ADDR)->rpb_phys;
283 				bootinfo.hwrpb_size =
284 				    ((struct rpb *)HWRPB_ADDR)->rpb_size;
285 			}
286 			bcopy(v1p->boot_flags, bootinfo.boot_flags,
287 			    min(sizeof v1p->boot_flags,
288 			      sizeof bootinfo.boot_flags));
289 			bcopy(v1p->booted_kernel, bootinfo.booted_kernel,
290 			    min(sizeof v1p->booted_kernel,
291 			      sizeof bootinfo.booted_kernel));
292 			/* booted dev not provided in bootinfo */
293 			init_prom_interface((struct rpb *)
294 			    ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys));
295                 	prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
296 			    sizeof bootinfo.booted_dev);
297 			break;
298 		}
299 		default:
300 			bootinfo_msg = "unknown bootinfo version";
301 			goto nobootinfo;
302 		}
303 	} else {
304 		bootinfo_msg = "boot program did not pass bootinfo";
305 nobootinfo:
306 		bootinfo.ssym = (u_long)_end;
307 		bootinfo.esym = (u_long)_end;
308 		bootinfo.hwrpb_phys = ((struct rpb *)HWRPB_ADDR)->rpb_phys;
309 		bootinfo.hwrpb_size = ((struct rpb *)HWRPB_ADDR)->rpb_size;
310 		init_prom_interface((struct rpb *)HWRPB_ADDR);
311 		prom_getenv(PROM_E_BOOTED_OSFLAGS, bootinfo.boot_flags,
312 		    sizeof bootinfo.boot_flags);
313 		prom_getenv(PROM_E_BOOTED_FILE, bootinfo.booted_kernel,
314 		    sizeof bootinfo.booted_kernel);
315 		prom_getenv(PROM_E_BOOTED_DEV, bootinfo.booted_dev,
316 		    sizeof bootinfo.booted_dev);
317 	}
318 
319 	esym = (caddr_t)bootinfo.esym;
320 	/*
321 	 * Initialize the kernel's mapping of the RPB.  It's needed for
322 	 * lots of things.
323 	 */
324 	hwrpb = (struct rpb *)ALPHA_PHYS_TO_K0SEG(bootinfo.hwrpb_phys);
325 
326 #if defined(DEC_3000_300) || defined(DEC_3000_500)
327 	if (hwrpb->rpb_type == ST_DEC_3000_300 ||
328 	    hwrpb->rpb_type == ST_DEC_3000_500) {
329 		prom_getenv(PROM_E_SCSIID, dec_3000_scsiid,
330 		    sizeof(dec_3000_scsiid));
331 		prom_getenv(PROM_E_SCSIFAST, dec_3000_scsifast,
332 		    sizeof(dec_3000_scsifast));
333 	}
334 #endif
335 
336 	/*
337 	 * Remember how many cycles there are per microsecond,
338 	 * so that we can use delay().  Round up, for safety.
339 	 */
340 	cycles_per_usec = (hwrpb->rpb_cc_freq + 999999) / 1000000;
341 
342 	/*
343 	 * Initialize the (temporary) bootstrap console interface, so
344 	 * we can use printf until the VM system starts being setup.
345 	 * The real console is initialized before then.
346 	 */
347 	init_bootstrap_console();
348 
349 	/* OUTPUT NOW ALLOWED */
350 
351 	/* delayed from above */
352 	if (bootinfo_msg)
353 		printf("WARNING: %s (0x%lx, 0x%lx, 0x%lx)\n",
354 		    bootinfo_msg, bim, bip, biv);
355 
356 	/* Initialize the trap vectors on the primary processor. */
357 	trap_init();
358 
359 	/*
360 	 * Find out what hardware we're on, and do basic initialization.
361 	 */
362 	cputype = hwrpb->rpb_type;
363 	if (cputype < 0) {
364 		/*
365 		 * At least some white-box systems have SRM which
366 		 * reports a systype that's the negative of their
367 		 * blue-box counterpart.
368 		 */
369 		cputype = -cputype;
370 	}
371 	c = platform_lookup(cputype);
372 	if (c == NULL) {
373 		platform_not_supported();
374 		/* NOTREACHED */
375 	}
376 	(*c->init)();
377 	strlcpy(cpu_model, platform.model, sizeof cpu_model);
378 
379 	/*
380 	 * Initialize the real console, so that the bootstrap console is
381 	 * no longer necessary.
382 	 */
383 	(*platform.cons_init)();
384 
385 #if 0
386 	/* Paranoid sanity checking */
387 
388 	assert(hwrpb->rpb_primary_cpu_id == alpha_pal_whami());
389 
390 	/*
391 	 * On single-CPU systypes, the primary should always be CPU 0,
392 	 * except on Alpha 8200 systems where the CPU id is related
393 	 * to the VID, which is related to the Turbo Laser node id.
394 	 */
395 	if (cputype != ST_DEC_21000)
396 		assert(hwrpb->rpb_primary_cpu_id == 0);
397 #endif
398 
399 	/* NO MORE FIRMWARE ACCESS ALLOWED */
400 #ifdef _PMAP_MAY_USE_PROM_CONSOLE
401 	/*
402 	 * XXX (unless _PMAP_MAY_USE_PROM_CONSOLE is defined and
403 	 * XXX pmap_uses_prom_console() evaluates to non-zero.)
404 	 */
405 #endif
406 
407 #ifndef SMALL_KERNEL
408 	/*
409 	 * If we run on a BWX-capable processor, override cpu_switch
410 	 * with a faster version.
411 	 * We do this now because the kernel text might be mapped
412 	 * read-only eventually (although this is not the case at the moment).
413 	 */
414 	if (alpha_implver() >= ALPHA_IMPLVER_EV5) {
415 		if (~alpha_amask(ALPHA_AMASK_BWX) != 0) {
416 			extern vaddr_t __bwx_switch0, __bwx_switch1,
417 			    __bwx_switch2, __bwx_switch3;
418 			u_int32_t *dst, *src, *end;
419 
420 			src = (u_int32_t *)&__bwx_switch2;
421 			end = (u_int32_t *)&__bwx_switch3;
422 			dst = (u_int32_t *)&__bwx_switch0;
423 			while (src != end)
424 				*dst++ = *src++;
425 			src = (u_int32_t *)&__bwx_switch1;
426 			end = (u_int32_t *)&__bwx_switch2;
427 			while (src != end)
428 				*dst++ = *src++;
429 		}
430 	}
431 #endif
432 
433 	/*
434 	 * find out this system's page size
435 	 */
436 	if ((uvmexp.pagesize = hwrpb->rpb_page_size) != 8192)
437 		panic("page size %d != 8192?!", uvmexp.pagesize);
438 
439 	uvm_setpagesize();
440 
441 	/*
442 	 * Find the beginning and end of the kernel (and leave a
443 	 * bit of space before the beginning for the bootstrap
444 	 * stack).
445 	 */
446 	kernstart = trunc_page((vaddr_t)kernel_text) - 2 * PAGE_SIZE;
447 	kernend = (vaddr_t)round_page((vaddr_t)bootinfo.esym);
448 
449 	kernstartpfn = atop(ALPHA_K0SEG_TO_PHYS(kernstart));
450 	kernendpfn = atop(ALPHA_K0SEG_TO_PHYS(kernend));
451 
452 	/*
453 	 * Find out how much memory is available, by looking at
454 	 * the memory cluster descriptors.  This also tries to do
455 	 * its best to detect things things that have never been seen
456 	 * before...
457 	 */
458 	mddtp = (struct mddt *)(((caddr_t)hwrpb) + hwrpb->rpb_memdat_off);
459 
460 	/* MDDT SANITY CHECKING */
461 	mddtweird = 0;
462 	if (mddtp->mddt_cluster_cnt < 2) {
463 		mddtweird = 1;
464 		printf("WARNING: weird number of mem clusters: %lu\n",
465 		    (unsigned long)mddtp->mddt_cluster_cnt);
466 	}
467 
468 #if 0
469 	printf("Memory cluster count: %d\n", mddtp->mddt_cluster_cnt);
470 #endif
471 
472 	for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
473 		memc = &mddtp->mddt_clusters[i];
474 #if 0
475 		printf("MEMC %d: pfn 0x%lx cnt 0x%lx usage 0x%lx\n", i,
476 		    memc->mddt_pfn, memc->mddt_pg_cnt, memc->mddt_usage);
477 #endif
478 		totalphysmem += memc->mddt_pg_cnt;
479 		if (mem_cluster_cnt < VM_PHYSSEG_MAX) {	/* XXX */
480 			mem_clusters[mem_cluster_cnt].start =
481 			    ptoa(memc->mddt_pfn);
482 			mem_clusters[mem_cluster_cnt].size =
483 			    ptoa(memc->mddt_pg_cnt);
484 			if (memc->mddt_usage & MDDT_mbz ||
485 			    memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */
486 			    memc->mddt_usage & MDDT_PALCODE)
487 				mem_clusters[mem_cluster_cnt].size |=
488 				    PROT_READ;
489 			else
490 				mem_clusters[mem_cluster_cnt].size |=
491 				    PROT_READ | PROT_WRITE | PROT_EXEC;
492 			mem_cluster_cnt++;
493 		} /* XXX else print something! */
494 
495 		if (memc->mddt_usage & MDDT_mbz) {
496 			mddtweird = 1;
497 			printf("WARNING: mem cluster %d has weird "
498 			    "usage 0x%lx\n", i, (long)memc->mddt_usage);
499 			unknownmem += memc->mddt_pg_cnt;
500 			continue;
501 		}
502 		if (memc->mddt_usage & MDDT_NONVOLATILE) {
503 			/* XXX should handle these... */
504 			printf("WARNING: skipping non-volatile mem "
505 			    "cluster %d\n", i);
506 			unusedmem += memc->mddt_pg_cnt;
507 			continue;
508 		}
509 		if (memc->mddt_usage & MDDT_PALCODE) {
510 			resvmem += memc->mddt_pg_cnt;
511 			continue;
512 		}
513 
514 		/*
515 		 * We have a memory cluster available for system
516 		 * software use.  We must determine if this cluster
517 		 * holds the kernel.
518 		 */
519 #ifdef _PMAP_MAY_USE_PROM_CONSOLE
520 		/*
521 		 * XXX If the kernel uses the PROM console, we only use the
522 		 * XXX memory after the kernel in the first system segment,
523 		 * XXX to avoid clobbering prom mapping, data, etc.
524 		 */
525 	    if (!pmap_uses_prom_console() || physmem == 0) {
526 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */
527 		physmem += memc->mddt_pg_cnt;
528 		pfn0 = memc->mddt_pfn;
529 		pfn1 = memc->mddt_pfn + memc->mddt_pg_cnt;
530 		if (pfn0 <= kernstartpfn && kernendpfn <= pfn1) {
531 			/*
532 			 * Must compute the location of the kernel
533 			 * within the segment.
534 			 */
535 #if 0
536 			printf("Cluster %d contains kernel\n", i);
537 #endif
538 #ifdef _PMAP_MAY_USE_PROM_CONSOLE
539 		    if (!pmap_uses_prom_console()) {
540 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */
541 			if (pfn0 < kernstartpfn) {
542 				/*
543 				 * There is a chunk before the kernel.
544 				 */
545 #if 0
546 				printf("Loading chunk before kernel: "
547 				    "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
548 #endif
549 				uvm_page_physload(pfn0, kernstartpfn,
550 				    pfn0, kernstartpfn, 0);
551 			}
552 #ifdef _PMAP_MAY_USE_PROM_CONSOLE
553 		    }
554 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */
555 			if (kernendpfn < pfn1) {
556 				/*
557 				 * There is a chunk after the kernel.
558 				 */
559 #if 0
560 				printf("Loading chunk after kernel: "
561 				    "0x%lx / 0x%lx\n", kernendpfn, pfn1);
562 #endif
563 				uvm_page_physload(kernendpfn, pfn1,
564 				    kernendpfn, pfn1, 0);
565 			}
566 		} else {
567 			/*
568 			 * Just load this cluster as one chunk.
569 			 */
570 #if 0
571 			printf("Loading cluster %d: 0x%lx / 0x%lx\n", i,
572 			    pfn0, pfn1);
573 #endif
574 			uvm_page_physload(pfn0, pfn1, pfn0, pfn1, 0);
575 		}
576 #ifdef _PMAP_MAY_USE_PROM_CONSOLE
577 	    }
578 #endif /* _PMAP_MAY_USE_PROM_CONSOLE */
579 	}
580 
581 #ifdef DEBUG
582 	/*
583 	 * Dump out the MDDT if it looks odd...
584 	 */
585 	if (mddtweird) {
586 		printf("\n");
587 		printf("complete memory cluster information:\n");
588 		for (i = 0; i < mddtp->mddt_cluster_cnt; i++) {
589 			printf("mddt %d:\n", i);
590 			printf("\tpfn %lx\n",
591 			    mddtp->mddt_clusters[i].mddt_pfn);
592 			printf("\tcnt %lx\n",
593 			    mddtp->mddt_clusters[i].mddt_pg_cnt);
594 			printf("\ttest %lx\n",
595 			    mddtp->mddt_clusters[i].mddt_pg_test);
596 			printf("\tbva %lx\n",
597 			    mddtp->mddt_clusters[i].mddt_v_bitaddr);
598 			printf("\tbpa %lx\n",
599 			    mddtp->mddt_clusters[i].mddt_p_bitaddr);
600 			printf("\tbcksum %lx\n",
601 			    mddtp->mddt_clusters[i].mddt_bit_cksum);
602 			printf("\tusage %lx\n",
603 			    mddtp->mddt_clusters[i].mddt_usage);
604 		}
605 		printf("\n");
606 	}
607 #endif
608 
609 	if (totalphysmem == 0)
610 		panic("can't happen: system seems to have no memory!");
611 #if 0
612 	printf("totalphysmem = %u\n", totalphysmem);
613 	printf("physmem = %u\n", physmem);
614 	printf("resvmem = %d\n", resvmem);
615 	printf("unusedmem = %d\n", unusedmem);
616 	printf("unknownmem = %d\n", unknownmem);
617 #endif
618 
619 	/*
620 	 * Initialize error message buffer (at end of core).
621 	 */
622 	{
623 		vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
624 		vsize_t reqsz = sz;
625 
626 		vps = &vm_physmem[vm_nphysseg - 1];
627 
628 		/* shrink so that it'll fit in the last segment */
629 		if ((vps->avail_end - vps->avail_start) < atop(sz))
630 			sz = ptoa(vps->avail_end - vps->avail_start);
631 
632 		vps->end -= atop(sz);
633 		vps->avail_end -= atop(sz);
634 		initmsgbuf((caddr_t) ALPHA_PHYS_TO_K0SEG(ptoa(vps->end)), sz);
635 
636 		/* Remove the last segment if it now has no pages. */
637 		if (vps->start == vps->end)
638 			vm_nphysseg--;
639 
640 		/* warn if the message buffer had to be shrunk */
641 		if (sz != reqsz)
642 			printf("WARNING: %ld bytes not available for msgbuf "
643 			    "in last cluster (%ld used)\n", reqsz, sz);
644 
645 	}
646 
647 	/*
648 	 * Init mapping for u page(s) for proc 0
649 	 */
650 	proc0.p_addr = proc0paddr =
651 	    (struct user *)pmap_steal_memory(UPAGES * PAGE_SIZE, NULL, NULL);
652 
653 	/*
654 	 * Initialize the virtual memory system, and set the
655 	 * page table base register in proc 0's PCB.
656 	 */
657 	pmap_bootstrap(ALPHA_PHYS_TO_K0SEG(ptb << PGSHIFT),
658 	    hwrpb->rpb_max_asn, hwrpb->rpb_pcs_cnt);
659 
660 	/*
661 	 * Initialize the rest of proc 0's PCB, and cache its physical
662 	 * address.
663 	 */
664 	proc0.p_md.md_pcbpaddr =
665 	    (struct pcb *)ALPHA_K0SEG_TO_PHYS((vaddr_t)&proc0paddr->u_pcb);
666 
667 	/*
668 	 * Set the kernel sp, reserving space for an (empty) trapframe,
669 	 * and make proc0's trapframe pointer point to it for sanity.
670 	 */
671 	proc0paddr->u_pcb.pcb_hw.apcb_ksp =
672 	    (u_int64_t)proc0paddr + USPACE - sizeof(struct trapframe);
673 	proc0.p_md.md_tf =
674 	    (struct trapframe *)proc0paddr->u_pcb.pcb_hw.apcb_ksp;
675 
676 	/*
677 	 * Initialize the primary CPU's idle PCB to proc0's.  In a
678 	 * MULTIPROCESSOR configuration, each CPU will later get
679 	 * its own idle PCB when autoconfiguration runs.
680 	 */
681 	ci->ci_idle_pcb = &proc0paddr->u_pcb;
682 	ci->ci_idle_pcb_paddr = (u_long)proc0.p_md.md_pcbpaddr;
683 
684 	/*
685 	 * Look at arguments passed to us and compute boothowto.
686 	 */
687 
688 #ifdef KADB
689 	boothowto |= RB_KDB;
690 #endif
691 	for (p = bootinfo.boot_flags; p && *p != '\0'; p++) {
692 		/*
693 		 * Note that we'd really like to differentiate case here,
694 		 * but the Alpha AXP Architecture Reference Manual
695 		 * says that we shouldn't.
696 		 */
697 		switch (*p) {
698 		case 'a': /* Ignore */
699 		case 'A':
700 			break;
701 
702 		case 'b': /* Enter DDB as soon as the console is initialised */
703 		case 'B':
704 			boothowto |= RB_KDB;
705 			break;
706 
707 		case 'c': /* enter user kernel configuration */
708 		case 'C':
709 			boothowto |= RB_CONFIG;
710 			break;
711 
712 #ifdef DEBUG
713 		case 'd': /* crash dump immediately after autoconfig */
714 		case 'D':
715 			boothowto |= RB_DUMP;
716 			break;
717 #endif
718 
719 		case 'h': /* always halt, never reboot */
720 		case 'H':
721 			boothowto |= RB_HALT;
722 			break;
723 
724 
725 		case 'n': /* askname */
726 		case 'N':
727 			boothowto |= RB_ASKNAME;
728 			break;
729 
730 		case 's': /* single-user */
731 		case 'S':
732 			boothowto |= RB_SINGLE;
733 			break;
734 
735 		case '-':
736 			/*
737 			 * Just ignore this.  It's not required, but it's
738 			 * common for it to be passed regardless.
739 			 */
740 			break;
741 
742 		default:
743 			printf("Unrecognized boot flag '%c'.\n", *p);
744 			break;
745 		}
746 	}
747 
748 
749 	/*
750 	 * Figure out the number of cpus in the box, from RPB fields.
751 	 * Really.  We mean it.
752 	 */
753 	for (ncpusfound = 0, i = 0; i < hwrpb->rpb_pcs_cnt; i++) {
754 		struct pcs *pcsp;
755 
756 		pcsp = LOCATE_PCS(hwrpb, i);
757 		if ((pcsp->pcs_flags & PCS_PP) != 0)
758 			ncpusfound++;
759 	}
760 
761 	/*
762 	 * Initialize debuggers, and break into them if appropriate.
763 	 */
764 #ifdef DDB
765 	db_machine_init();
766 	ddb_init();
767 
768 	if (boothowto & RB_KDB)
769 		Debugger();
770 #endif
771 #ifdef KGDB
772 	if (boothowto & RB_KDB)
773 		kgdb_connect(0);
774 #endif
775 	/*
776 	 * Figure out our clock frequency, from RPB fields.
777 	 */
778 	hz = hwrpb->rpb_intr_freq >> 12;
779 	if (!(60 <= hz && hz <= 10240)) {
780 #ifdef DIAGNOSTIC
781 		printf("WARNING: unbelievable rpb_intr_freq: %lu (%d hz)\n",
782 			(unsigned long)hwrpb->rpb_intr_freq, hz);
783 #endif
784 		hz = 1024;
785 	}
786 }
787 
788 void
789 consinit()
790 {
791 
792 	/*
793 	 * Everything related to console initialization is done
794 	 * in alpha_init().
795 	 */
796 #if defined(DIAGNOSTIC) && defined(_PMAP_MAY_USE_PROM_CONSOLE)
797 	printf("consinit: %susing prom console\n",
798 	    pmap_uses_prom_console() ? "" : "not ");
799 #endif
800 }
801 
802 void
803 cpu_startup()
804 {
805 	vaddr_t minaddr, maxaddr;
806 #if defined(DEBUG)
807 	extern int pmapdebug;
808 	int opmapdebug = pmapdebug;
809 
810 	pmapdebug = 0;
811 #endif
812 
813 	/*
814 	 * Good {morning,afternoon,evening,night}.
815 	 */
816 	printf(version);
817 	identifycpu();
818 	printf("real mem = %lu (%luMB)\n", ptoa((psize_t)totalphysmem),
819 	    ptoa((psize_t)totalphysmem) / 1024 / 1024);
820 	printf("rsvd mem = %lu (%luMB)\n", ptoa((psize_t)resvmem),
821 	    ptoa((psize_t)resvmem) / 1024 / 1024);
822 	if (unusedmem) {
823 		printf("WARNING: unused memory = %lu (%luMB)\n",
824 		    ptoa((psize_t)unusedmem),
825 		    ptoa((psize_t)unusedmem) / 1024 / 1024);
826 	}
827 	if (unknownmem) {
828 		printf("WARNING: %lu (%luMB) of memory with unknown purpose\n",
829 		    ptoa((psize_t)unknownmem),
830 		    ptoa((psize_t)unknownmem) / 1024 / 1024);
831 	}
832 
833 	/*
834 	 * Allocate a submap for exec arguments.  This map effectively
835 	 * limits the number of processes exec'ing at any time.
836 	 */
837 	minaddr = vm_map_min(kernel_map);
838 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
839 	    16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
840 
841 	/*
842 	 * Allocate a submap for physio
843 	 */
844 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
845 	    VM_PHYS_SIZE, 0, FALSE, NULL);
846 
847 #if defined(DEBUG)
848 	pmapdebug = opmapdebug;
849 #endif
850 	printf("avail mem = %lu (%luMB)\n", ptoa((psize_t)uvmexp.free),
851 	    ptoa((psize_t)uvmexp.free) / 1024 / 1024);
852 #if 0
853 	{
854 		extern u_long pmap_pages_stolen;
855 
856 		printf("stolen memory for VM structures = %d\n", pmap_pages_stolen * PAGE_SIZE);
857 	}
858 #endif
859 
860 	/*
861 	 * Set up buffers, so they can be used to read disk labels.
862 	 */
863 	bufinit();
864 
865 	/*
866 	 * Configure the system.
867 	 */
868 	if (boothowto & RB_CONFIG) {
869 #ifdef BOOT_CONFIG
870 		user_config();
871 #else
872 		printf("kernel does not support -c; continuing..\n");
873 #endif
874 	}
875 
876 	/*
877 	 * Set up the HWPCB so that it's safe to configure secondary
878 	 * CPUs.
879 	 */
880 	hwrpb_primary_init();
881 }
882 
883 /*
884  * Retrieve the platform name from the DSR.
885  */
886 const char *
887 alpha_dsr_sysname()
888 {
889 	struct dsrdb *dsr;
890 	const char *sysname;
891 
892 	/*
893 	 * DSR does not exist on early HWRPB versions.
894 	 */
895 	if (hwrpb->rpb_version < HWRPB_DSRDB_MINVERS)
896 		return (NULL);
897 
898 	dsr = (struct dsrdb *)(((caddr_t)hwrpb) + hwrpb->rpb_dsrdb_off);
899 	sysname = (const char *)((caddr_t)dsr + (dsr->dsr_sysname_off +
900 	    sizeof(u_int64_t)));
901 	return (sysname);
902 }
903 
904 /*
905  * Lookup the system specified system variation in the provided table,
906  * returning the model string on match.
907  */
908 const char *
909 alpha_variation_name(variation, avtp)
910 	u_int64_t variation;
911 	const struct alpha_variation_table *avtp;
912 {
913 	int i;
914 
915 	for (i = 0; avtp[i].avt_model != NULL; i++)
916 		if (avtp[i].avt_variation == variation)
917 			return (avtp[i].avt_model);
918 	return (NULL);
919 }
920 
921 /*
922  * Generate a default platform name based for unknown system variations.
923  */
924 const char *
925 alpha_unknown_sysname()
926 {
927 	static char s[128];		/* safe size */
928 
929 	snprintf(s, sizeof s, "%s family, unknown model variation 0x%lx",
930 	    platform.family, (unsigned long)hwrpb->rpb_variation & SV_ST_MASK);
931 	return ((const char *)s);
932 }
933 
934 void
935 identifycpu()
936 {
937 	char *s;
938 	int slen;
939 
940 	/*
941 	 * print out CPU identification information.
942 	 */
943 	printf("%s", cpu_model);
944 	for(s = cpu_model; *s; ++s)
945 		if(strncasecmp(s, "MHz", 3) == 0)
946 			goto skipMHz;
947 	printf(", %luMHz", (unsigned long)hwrpb->rpb_cc_freq / 1000000);
948 skipMHz:
949 	/* fill in hw_serial if a serial number is known */
950 	slen = strlen(hwrpb->rpb_ssn) + 1;
951 	if (slen > 1) {
952 		hw_serial = malloc(slen, M_SYSCTL, M_NOWAIT);
953 		if (hw_serial)
954 			strlcpy(hw_serial, (char *)hwrpb->rpb_ssn, slen);
955 	}
956 
957 	printf("\n");
958 	printf("%lu byte page size, %d processor%s.\n",
959 	    (unsigned long)hwrpb->rpb_page_size, ncpusfound,
960 	    ncpusfound == 1 ? "" : "s");
961 #if 0
962 	/* this is not particularly useful! */
963 	printf("variation: 0x%lx, revision 0x%lx\n",
964 	    hwrpb->rpb_variation, *(long *)hwrpb->rpb_revision);
965 #endif
966 }
967 
968 int	waittime = -1;
969 struct pcb dumppcb;
970 
971 __dead void
972 boot(int howto)
973 {
974 #if defined(MULTIPROCESSOR)
975 	u_long wait_mask;
976 	int i;
977 #endif
978 
979 	if (cold) {
980 		if ((howto & RB_USERREQ) == 0)
981 			howto |= RB_HALT;
982 		goto haltsys;
983 	}
984 
985 	if ((boothowto & RB_HALT) != 0)
986 		howto |= RB_HALT;
987 
988 	boothowto = howto;
989 	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
990 		waittime = 0;
991 		vfs_shutdown();
992 
993 		if ((howto & RB_TIMEBAD) == 0) {
994 			resettodr();
995 		} else {
996 			printf("WARNING: not updating battery clock\n");
997 		}
998 	}
999 	if_downall();
1000 
1001 	uvm_shutdown();
1002 	splhigh();
1003 	cold = 1;
1004 
1005 #if defined(MULTIPROCESSOR)
1006 	/*
1007 	 * Halt all other CPUs.
1008 	 */
1009 	wait_mask = (1UL << hwrpb->rpb_primary_cpu_id);
1010 	alpha_broadcast_ipi(ALPHA_IPI_HALT);
1011 
1012 	/* Ensure any CPUs paused by DDB resume execution so they can halt */
1013 	cpus_paused = 0;
1014 
1015 	for (i = 0; i < 10000; i++) {
1016 		alpha_mb();
1017 		if (cpus_running == wait_mask)
1018 			break;
1019 		delay(1000);
1020 	}
1021 	alpha_mb();
1022 	if (cpus_running != wait_mask)
1023 		printf("WARNING: Unable to halt secondary CPUs (0x%lx)\n",
1024 		    cpus_running);
1025 #endif
1026 
1027 	if ((howto & RB_DUMP) != 0)
1028 		dumpsys();
1029 
1030 haltsys:
1031 	doshutdownhooks();
1032 	config_suspend_all(DVACT_POWERDOWN);
1033 
1034 #ifdef BOOTKEY
1035 	printf("hit any key to %s...\n",
1036 	    (howto & RB_HALT) != 0 ? "halt" : "reboot");
1037 	cnpollc(1);	/* for proper keyboard command handling */
1038 	cngetc();
1039 	cnpollc(0);
1040 	printf("\n");
1041 #endif
1042 
1043 	/* Finally, powerdown/halt/reboot the system. */
1044 	if ((howto & RB_POWERDOWN) != 0 &&
1045 	    platform.powerdown != NULL) {
1046 		(*platform.powerdown)();
1047 		printf("WARNING: powerdown failed!\n");
1048 	}
1049 	printf("%s\n\n",
1050 	    (howto & RB_HALT) != 0 ? "halted." : "rebooting...");
1051 	prom_halt((howto & RB_HALT) != 0);
1052 	for (;;) ;
1053 	/* NOTREACHED */
1054 }
1055 
1056 /*
1057  * These variables are needed by /sbin/savecore
1058  */
1059 u_long	dumpmag = 0x8fca0101;	/* magic number */
1060 int 	dumpsize = 0;		/* pages */
1061 long	dumplo = 0; 		/* blocks */
1062 
1063 /*
1064  * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1065  */
1066 int
1067 cpu_dumpsize()
1068 {
1069 	int size;
1070 
1071 	size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1072 	    ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1073 	if (roundup(size, dbtob(1)) != dbtob(1))
1074 		return -1;
1075 
1076 	return (1);
1077 }
1078 
1079 /*
1080  * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
1081  */
1082 u_long
1083 cpu_dump_mempagecnt()
1084 {
1085 	u_long i, n;
1086 
1087 	n = 0;
1088 	for (i = 0; i < mem_cluster_cnt; i++)
1089 		n += atop(mem_clusters[i].size);
1090 	return (n);
1091 }
1092 
1093 /*
1094  * cpu_dump: dump machine-dependent kernel core dump headers.
1095  */
1096 int
1097 cpu_dump()
1098 {
1099 	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1100 	char buf[dbtob(1)];
1101 	kcore_seg_t *segp;
1102 	cpu_kcore_hdr_t *cpuhdrp;
1103 	phys_ram_seg_t *memsegp;
1104 	int i;
1105 
1106 	dump = bdevsw[major(dumpdev)].d_dump;
1107 
1108 	bzero(buf, sizeof buf);
1109 	segp = (kcore_seg_t *)buf;
1110 	cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1111 	memsegp = (phys_ram_seg_t *)&buf[ALIGN(sizeof(*segp)) +
1112 	    ALIGN(sizeof(*cpuhdrp))];
1113 
1114 	/*
1115 	 * Generate a segment header.
1116 	 */
1117 	CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1118 	segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1119 
1120 	/*
1121 	 * Add the machine-dependent header info.
1122 	 */
1123 	cpuhdrp->lev1map_pa = ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map);
1124 	cpuhdrp->page_size = PAGE_SIZE;
1125 	cpuhdrp->nmemsegs = mem_cluster_cnt;
1126 
1127 	/*
1128 	 * Fill in the memory segment descriptors.
1129 	 */
1130 	for (i = 0; i < mem_cluster_cnt; i++) {
1131 		memsegp[i].start = mem_clusters[i].start;
1132 		memsegp[i].size = mem_clusters[i].size & ~PAGE_MASK;
1133 	}
1134 
1135 	return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
1136 }
1137 
1138 /*
1139  * This is called by main to set dumplo and dumpsize.
1140  * Dumps always skip the first PAGE_SIZE of disk space
1141  * in case there might be a disk label stored there.
1142  * If there is extra space, put dump at the end to
1143  * reduce the chance that swapping trashes it.
1144  */
1145 void
1146 dumpconf(void)
1147 {
1148 	int nblks, dumpblks;	/* size of dump area */
1149 
1150 	if (dumpdev == NODEV ||
1151 	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
1152 		return;
1153 	if (nblks <= ctod(1))
1154 		return;
1155 
1156 	dumpblks = cpu_dumpsize();
1157 	if (dumpblks < 0)
1158 		return;
1159 	dumpblks += ctod(cpu_dump_mempagecnt());
1160 
1161 	/* If dump won't fit (incl. room for possible label), punt. */
1162 	if (dumpblks > (nblks - ctod(1)))
1163 		return;
1164 
1165 	/* Put dump at end of partition */
1166 	dumplo = nblks - dumpblks;
1167 
1168 	/* dumpsize is in page units, and doesn't include headers. */
1169 	dumpsize = cpu_dump_mempagecnt();
1170 }
1171 
1172 /*
1173  * Dump the kernel's image to the swap partition.
1174  */
1175 #define	BYTES_PER_DUMP	PAGE_SIZE
1176 
1177 void
1178 dumpsys()
1179 {
1180 	u_long totalbytesleft, bytes, i, n, memcl;
1181 	u_long maddr;
1182 	int psize;
1183 	daddr_t blkno;
1184 	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
1185 	int error;
1186 	extern int msgbufmapped;
1187 
1188 	/* Save registers. */
1189 	savectx(&dumppcb);
1190 
1191 	msgbufmapped = 0;	/* don't record dump msgs in msgbuf */
1192 	if (dumpdev == NODEV)
1193 		return;
1194 
1195 	/*
1196 	 * For dumps during autoconfiguration,
1197 	 * if dump device has already configured...
1198 	 */
1199 	if (dumpsize == 0)
1200 		dumpconf();
1201 	if (dumplo <= 0) {
1202 		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
1203 		    minor(dumpdev));
1204 		return;
1205 	}
1206 	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
1207 	    minor(dumpdev), dumplo);
1208 
1209 #ifdef UVM_SWAP_ENCRYPT
1210 	uvm_swap_finicrypt_all();
1211 #endif
1212 
1213 	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1214 	printf("dump ");
1215 	if (psize == -1) {
1216 		printf("area unavailable\n");
1217 		return;
1218 	}
1219 
1220 	/* XXX should purge all outstanding keystrokes. */
1221 
1222 	if ((error = cpu_dump()) != 0)
1223 		goto err;
1224 
1225 	totalbytesleft = ptoa(cpu_dump_mempagecnt());
1226 	blkno = dumplo + cpu_dumpsize();
1227 	dump = bdevsw[major(dumpdev)].d_dump;
1228 	error = 0;
1229 
1230 	for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
1231 		maddr = mem_clusters[memcl].start;
1232 		bytes = mem_clusters[memcl].size & ~PAGE_MASK;
1233 
1234 		for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1235 
1236 			/* Print out how many MBs we to go. */
1237 			if ((totalbytesleft % (1024*1024)) == 0)
1238 				printf("%ld ", totalbytesleft / (1024 * 1024));
1239 
1240 			/* Limit size for next transfer. */
1241 			n = bytes - i;
1242 			if (n > BYTES_PER_DUMP)
1243 				n =  BYTES_PER_DUMP;
1244 
1245 			error = (*dump)(dumpdev, blkno,
1246 			    (caddr_t)ALPHA_PHYS_TO_K0SEG(maddr), n);
1247 			if (error)
1248 				goto err;
1249 			maddr += n;
1250 			blkno += btodb(n);			/* XXX? */
1251 
1252 			/* XXX should look for keystrokes, to cancel. */
1253 		}
1254 	}
1255 
1256 err:
1257 	switch (error) {
1258 #ifdef DEBUG
1259 	case ENXIO:
1260 		printf("device bad\n");
1261 		break;
1262 
1263 	case EFAULT:
1264 		printf("device not ready\n");
1265 		break;
1266 
1267 	case EINVAL:
1268 		printf("area improper\n");
1269 		break;
1270 
1271 	case EIO:
1272 		printf("i/o error\n");
1273 		break;
1274 
1275 	case EINTR:
1276 		printf("aborted from console\n");
1277 		break;
1278 #endif /* DEBUG */
1279 	case 0:
1280 		printf("succeeded\n");
1281 		break;
1282 
1283 	default:
1284 		printf("error %d\n", error);
1285 		break;
1286 	}
1287 	printf("\n\n");
1288 	delay(1000);
1289 }
1290 
1291 void
1292 frametoreg(framep, regp)
1293 	struct trapframe *framep;
1294 	struct reg *regp;
1295 {
1296 
1297 	regp->r_regs[R_V0] = framep->tf_regs[FRAME_V0];
1298 	regp->r_regs[R_T0] = framep->tf_regs[FRAME_T0];
1299 	regp->r_regs[R_T1] = framep->tf_regs[FRAME_T1];
1300 	regp->r_regs[R_T2] = framep->tf_regs[FRAME_T2];
1301 	regp->r_regs[R_T3] = framep->tf_regs[FRAME_T3];
1302 	regp->r_regs[R_T4] = framep->tf_regs[FRAME_T4];
1303 	regp->r_regs[R_T5] = framep->tf_regs[FRAME_T5];
1304 	regp->r_regs[R_T6] = framep->tf_regs[FRAME_T6];
1305 	regp->r_regs[R_T7] = framep->tf_regs[FRAME_T7];
1306 	regp->r_regs[R_S0] = framep->tf_regs[FRAME_S0];
1307 	regp->r_regs[R_S1] = framep->tf_regs[FRAME_S1];
1308 	regp->r_regs[R_S2] = framep->tf_regs[FRAME_S2];
1309 	regp->r_regs[R_S3] = framep->tf_regs[FRAME_S3];
1310 	regp->r_regs[R_S4] = framep->tf_regs[FRAME_S4];
1311 	regp->r_regs[R_S5] = framep->tf_regs[FRAME_S5];
1312 	regp->r_regs[R_S6] = framep->tf_regs[FRAME_S6];
1313 	regp->r_regs[R_A0] = framep->tf_regs[FRAME_A0];
1314 	regp->r_regs[R_A1] = framep->tf_regs[FRAME_A1];
1315 	regp->r_regs[R_A2] = framep->tf_regs[FRAME_A2];
1316 	regp->r_regs[R_A3] = framep->tf_regs[FRAME_A3];
1317 	regp->r_regs[R_A4] = framep->tf_regs[FRAME_A4];
1318 	regp->r_regs[R_A5] = framep->tf_regs[FRAME_A5];
1319 	regp->r_regs[R_T8] = framep->tf_regs[FRAME_T8];
1320 	regp->r_regs[R_T9] = framep->tf_regs[FRAME_T9];
1321 	regp->r_regs[R_T10] = framep->tf_regs[FRAME_T10];
1322 	regp->r_regs[R_T11] = framep->tf_regs[FRAME_T11];
1323 	regp->r_regs[R_RA] = framep->tf_regs[FRAME_RA];
1324 	regp->r_regs[R_T12] = framep->tf_regs[FRAME_T12];
1325 	regp->r_regs[R_AT] = framep->tf_regs[FRAME_AT];
1326 	regp->r_regs[R_GP] = framep->tf_regs[FRAME_GP];
1327 	/* regp->r_regs[R_SP] = framep->tf_regs[FRAME_SP]; XXX */
1328 	regp->r_regs[R_ZERO] = 0;
1329 }
1330 
1331 void
1332 regtoframe(regp, framep)
1333 	struct reg *regp;
1334 	struct trapframe *framep;
1335 {
1336 
1337 	framep->tf_regs[FRAME_V0] = regp->r_regs[R_V0];
1338 	framep->tf_regs[FRAME_T0] = regp->r_regs[R_T0];
1339 	framep->tf_regs[FRAME_T1] = regp->r_regs[R_T1];
1340 	framep->tf_regs[FRAME_T2] = regp->r_regs[R_T2];
1341 	framep->tf_regs[FRAME_T3] = regp->r_regs[R_T3];
1342 	framep->tf_regs[FRAME_T4] = regp->r_regs[R_T4];
1343 	framep->tf_regs[FRAME_T5] = regp->r_regs[R_T5];
1344 	framep->tf_regs[FRAME_T6] = regp->r_regs[R_T6];
1345 	framep->tf_regs[FRAME_T7] = regp->r_regs[R_T7];
1346 	framep->tf_regs[FRAME_S0] = regp->r_regs[R_S0];
1347 	framep->tf_regs[FRAME_S1] = regp->r_regs[R_S1];
1348 	framep->tf_regs[FRAME_S2] = regp->r_regs[R_S2];
1349 	framep->tf_regs[FRAME_S3] = regp->r_regs[R_S3];
1350 	framep->tf_regs[FRAME_S4] = regp->r_regs[R_S4];
1351 	framep->tf_regs[FRAME_S5] = regp->r_regs[R_S5];
1352 	framep->tf_regs[FRAME_S6] = regp->r_regs[R_S6];
1353 	framep->tf_regs[FRAME_A0] = regp->r_regs[R_A0];
1354 	framep->tf_regs[FRAME_A1] = regp->r_regs[R_A1];
1355 	framep->tf_regs[FRAME_A2] = regp->r_regs[R_A2];
1356 	framep->tf_regs[FRAME_A3] = regp->r_regs[R_A3];
1357 	framep->tf_regs[FRAME_A4] = regp->r_regs[R_A4];
1358 	framep->tf_regs[FRAME_A5] = regp->r_regs[R_A5];
1359 	framep->tf_regs[FRAME_T8] = regp->r_regs[R_T8];
1360 	framep->tf_regs[FRAME_T9] = regp->r_regs[R_T9];
1361 	framep->tf_regs[FRAME_T10] = regp->r_regs[R_T10];
1362 	framep->tf_regs[FRAME_T11] = regp->r_regs[R_T11];
1363 	framep->tf_regs[FRAME_RA] = regp->r_regs[R_RA];
1364 	framep->tf_regs[FRAME_T12] = regp->r_regs[R_T12];
1365 	framep->tf_regs[FRAME_AT] = regp->r_regs[R_AT];
1366 	framep->tf_regs[FRAME_GP] = regp->r_regs[R_GP];
1367 	/* framep->tf_regs[FRAME_SP] = regp->r_regs[R_SP]; XXX */
1368 	/* ??? = regp->r_regs[R_ZERO]; */
1369 }
1370 
1371 void
1372 printregs(regp)
1373 	struct reg *regp;
1374 {
1375 	int i;
1376 
1377 	for (i = 0; i < 32; i++)
1378 		printf("R%d:\t0x%016lx%s", i, regp->r_regs[i],
1379 		   i & 1 ? "\n" : "\t");
1380 }
1381 
1382 void
1383 regdump(framep)
1384 	struct trapframe *framep;
1385 {
1386 	struct reg reg;
1387 
1388 	frametoreg(framep, &reg);
1389 	reg.r_regs[R_SP] = alpha_pal_rdusp();
1390 
1391 	printf("REGISTERS:\n");
1392 	printregs(&reg);
1393 }
1394 
1395 #ifdef DEBUG
1396 int sigdebug = 0;
1397 pid_t sigpid = 0;
1398 #define	SDB_FOLLOW	0x01
1399 #define	SDB_KSTACK	0x02
1400 #endif
1401 
1402 /*
1403  * Send an interrupt to process.
1404  */
1405 void
1406 sendsig(catcher, sig, mask, code, type, val)
1407 	sig_t catcher;
1408 	int sig, mask;
1409 	u_long code;
1410 	int type;
1411 	union sigval val;
1412 {
1413 	struct proc *p = curproc;
1414 	struct sigcontext *scp, ksc;
1415 	struct fpreg *fpregs = (struct fpreg *)&ksc.sc_fpregs;
1416 	struct trapframe *frame;
1417 	struct sigacts *psp = p->p_p->ps_sigacts;
1418 	unsigned long oldsp;
1419 	int fsize, rndfsize, kscsize;
1420 	siginfo_t *sip, ksi;
1421 
1422 	oldsp = alpha_pal_rdusp();
1423 	frame = p->p_md.md_tf;
1424 	fsize = sizeof ksc;
1425 	rndfsize = ((fsize + 15) / 16) * 16;
1426 	kscsize = rndfsize;
1427 	if (psp->ps_siginfo & sigmask(sig)) {
1428 		fsize += sizeof ksi;
1429 		rndfsize = ((fsize + 15) / 16) * 16;
1430 	}
1431 
1432 	/*
1433 	 * Allocate and validate space for the signal handler
1434 	 * context. Note that if the stack is in P0 space, the
1435 	 * call to uvm_grow() is a nop, and the useracc() check
1436 	 * will fail if the process has not already allocated
1437 	 * the space with a `brk'.
1438 	 */
1439 	if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 &&
1440 	    !sigonstack(oldsp) && (psp->ps_sigonstack & sigmask(sig)))
1441 		scp = (struct sigcontext *)(p->p_sigstk.ss_sp +
1442 		    p->p_sigstk.ss_size - rndfsize);
1443 	else
1444 		scp = (struct sigcontext *)(oldsp - rndfsize);
1445 	if ((u_long)scp <= USRSTACK - ptoa(p->p_vmspace->vm_ssize))
1446 		(void)uvm_grow(p, (u_long)scp);
1447 #ifdef DEBUG
1448 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1449 		printf("sendsig(%d): sig %d ssp %p usp %p\n", p->p_pid,
1450 		    sig, &ksc, scp);
1451 #endif
1452 
1453 	/*
1454 	 * Build the signal context to be used by sigreturn.
1455 	 */
1456 	bzero(&ksc, sizeof(ksc));
1457 	ksc.sc_mask = mask;
1458 	ksc.sc_pc = frame->tf_regs[FRAME_PC];
1459 	ksc.sc_ps = frame->tf_regs[FRAME_PS];
1460 
1461 	/* copy the registers. */
1462 	frametoreg(frame, (struct reg *)ksc.sc_regs);
1463 	ksc.sc_regs[R_ZERO] = 0xACEDBADE;		/* magic number */
1464 	ksc.sc_regs[R_SP] = oldsp;
1465 
1466 	/* save the floating-point state, if necessary, then copy it. */
1467 	if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1468 		fpusave_proc(p, 1);
1469 	ksc.sc_ownedfp = p->p_md.md_flags & MDP_FPUSED;
1470 	memcpy(/*ksc.sc_*/fpregs, &p->p_addr->u_pcb.pcb_fp,
1471 	    sizeof(struct fpreg));
1472 #ifndef NO_IEEE
1473 	ksc.sc_fp_control = alpha_read_fp_c(p);
1474 #else
1475 	ksc.sc_fp_control = 0;
1476 #endif
1477 	memset(ksc.sc_reserved, 0, sizeof ksc.sc_reserved);	/* XXX */
1478 	memset(ksc.sc_xxx, 0, sizeof ksc.sc_xxx);		/* XXX */
1479 
1480 	if (psp->ps_siginfo & sigmask(sig)) {
1481 		initsiginfo(&ksi, sig, code, type, val);
1482 		sip = (void *)scp + kscsize;
1483 		if (copyout((caddr_t)&ksi, (caddr_t)sip, fsize - kscsize) != 0)
1484 			goto trash;
1485 	} else
1486 		sip = NULL;
1487 
1488 	/*
1489 	 * copy the frame out to userland.
1490 	 */
1491 	if (copyout((caddr_t)&ksc, (caddr_t)scp, kscsize) != 0) {
1492 trash:
1493 #ifdef DEBUG
1494 		if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1495 			printf("sendsig(%d): copyout failed on sig %d\n",
1496 			    p->p_pid, sig);
1497 #endif
1498 		/*
1499 		 * Process has trashed its stack; give it an illegal
1500 		 * instruction to halt it in its tracks.
1501 		 */
1502 		sigexit(p, SIGILL);
1503 		/* NOTREACHED */
1504 	}
1505 #ifdef DEBUG
1506 	if (sigdebug & SDB_FOLLOW)
1507 		printf("sendsig(%d): sig %d scp %p code %lx\n", p->p_pid, sig,
1508 		    scp, code);
1509 #endif
1510 
1511 	/*
1512 	 * Set up the registers to return to sigcode.
1513 	 */
1514 	frame->tf_regs[FRAME_PC] = p->p_p->ps_sigcode;
1515 	frame->tf_regs[FRAME_A0] = sig;
1516 	frame->tf_regs[FRAME_A1] = (u_int64_t)sip;
1517 	frame->tf_regs[FRAME_A2] = (u_int64_t)scp;
1518 	frame->tf_regs[FRAME_T12] = (u_int64_t)catcher;		/* t12 is pv */
1519 	alpha_pal_wrusp((unsigned long)scp);
1520 
1521 #ifdef DEBUG
1522 	if (sigdebug & SDB_FOLLOW)
1523 		printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid,
1524 		    frame->tf_regs[FRAME_PC], frame->tf_regs[FRAME_A3]);
1525 	if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
1526 		printf("sendsig(%d): sig %d returns\n",
1527 		    p->p_pid, sig);
1528 #endif
1529 }
1530 
1531 /*
1532  * System call to cleanup state after a signal
1533  * has been taken.  Reset signal mask and
1534  * stack state from context left by sendsig (above).
1535  * Return to previous pc and psl as specified by
1536  * context left by sendsig. Check carefully to
1537  * make sure that the user has not modified the
1538  * psl to gain improper privileges or to cause
1539  * a machine fault.
1540  */
1541 /* ARGSUSED */
1542 int
1543 sys_sigreturn(p, v, retval)
1544 	struct proc *p;
1545 	void *v;
1546 	register_t *retval;
1547 {
1548 	struct sys_sigreturn_args /* {
1549 		syscallarg(struct sigcontext *) sigcntxp;
1550 	} */ *uap = v;
1551 	struct sigcontext ksc;
1552 	struct fpreg *fpregs = (struct fpreg *)&ksc.sc_fpregs;
1553 #ifdef DEBUG
1554 	struct sigcontext *scp;
1555 #endif
1556 	int error;
1557 
1558 #ifdef DEBUG
1559 	if (sigdebug & SDB_FOLLOW)
1560 	    printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp);
1561 #endif
1562 
1563 	/*
1564 	 * Test and fetch the context structure.
1565 	 * We grab it all at once for speed.
1566 	 */
1567 	if ((error = copyin(SCARG(uap, sigcntxp), &ksc, sizeof(ksc))) != 0)
1568 		return (error);
1569 
1570 	if (ksc.sc_regs[R_ZERO] != 0xACEDBADE)		/* magic number */
1571 		return (EINVAL);
1572 	/*
1573 	 * Restore the user-supplied information
1574 	 */
1575 	p->p_sigmask = ksc.sc_mask &~ sigcantmask;
1576 
1577 	p->p_md.md_tf->tf_regs[FRAME_PC] = ksc.sc_pc;
1578 	p->p_md.md_tf->tf_regs[FRAME_PS] =
1579 	    (ksc.sc_ps | ALPHA_PSL_USERSET) & ~ALPHA_PSL_USERCLR;
1580 
1581 	regtoframe((struct reg *)ksc.sc_regs, p->p_md.md_tf);
1582 	alpha_pal_wrusp(ksc.sc_regs[R_SP]);
1583 
1584 	/* XXX ksc.sc_ownedfp ? */
1585 	if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1586 		fpusave_proc(p, 0);
1587 	memcpy(&p->p_addr->u_pcb.pcb_fp, /*ksc.sc_*/fpregs,
1588 	    sizeof(struct fpreg));
1589 #ifndef NO_IEEE
1590 	p->p_addr->u_pcb.pcb_fp.fpr_cr = ksc.sc_fpcr;
1591 	p->p_md.md_flags = ksc.sc_fp_control & MDP_FP_C;
1592 #endif
1593 
1594 #ifdef DEBUG
1595 	if (sigdebug & SDB_FOLLOW)
1596 		printf("sigreturn(%d): returns\n", p->p_pid);
1597 #endif
1598 	return (EJUSTRETURN);
1599 }
1600 
1601 /*
1602  * machine dependent system variables.
1603  */
1604 int
1605 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1606 	int *name;
1607 	u_int namelen;
1608 	void *oldp;
1609 	size_t *oldlenp;
1610 	void *newp;
1611 	size_t newlen;
1612 	struct proc *p;
1613 {
1614 	dev_t consdev;
1615 #if NIOASIC > 0
1616 	int oldval, ret;
1617 #endif
1618 
1619 	if (name[0] != CPU_CHIPSET && namelen != 1)
1620 		return (ENOTDIR);		/* overloaded */
1621 
1622 	switch (name[0]) {
1623 	case CPU_CONSDEV:
1624 		if (cn_tab != NULL)
1625 			consdev = cn_tab->cn_dev;
1626 		else
1627 			consdev = NODEV;
1628 		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1629 			sizeof consdev));
1630 
1631 #ifndef SMALL_KERNEL
1632 	case CPU_UNALIGNED_PRINT:
1633 		return (sysctl_int(oldp, oldlenp, newp, newlen,
1634 		    &alpha_unaligned_print));
1635 
1636 	case CPU_UNALIGNED_FIX:
1637 		return (sysctl_int(oldp, oldlenp, newp, newlen,
1638 		    &alpha_unaligned_fix));
1639 
1640 	case CPU_UNALIGNED_SIGBUS:
1641 		return (sysctl_int(oldp, oldlenp, newp, newlen,
1642 		    &alpha_unaligned_sigbus));
1643 
1644 	case CPU_BOOTED_KERNEL:
1645 		return (sysctl_rdstring(oldp, oldlenp, newp,
1646 		    bootinfo.booted_kernel));
1647 
1648 	case CPU_CHIPSET:
1649 		return (alpha_sysctl_chipset(name + 1, namelen - 1, oldp,
1650 		    oldlenp));
1651 #endif /* SMALL_KERNEL */
1652 
1653 #ifndef NO_IEEE
1654 	case CPU_FP_SYNC_COMPLETE:
1655 		return (sysctl_int(oldp, oldlenp, newp, newlen,
1656 		    &alpha_fp_sync_complete));
1657 #endif
1658 	case CPU_ALLOWAPERTURE:
1659 #ifdef APERTURE
1660 		if (securelevel > 0)
1661 			return (sysctl_int_lower(oldp, oldlenp, newp, newlen,
1662 			    &allowaperture));
1663                 else
1664                         return (sysctl_int(oldp, oldlenp, newp, newlen,
1665                             &allowaperture));
1666 #else
1667 		return (sysctl_rdint(oldp, oldlenp, newp, 0));
1668 #endif
1669 #if NIOASIC > 0
1670 	case CPU_LED_BLINK:
1671 		oldval = alpha_led_blink;
1672 		ret = sysctl_int(oldp, oldlenp, newp, newlen, &alpha_led_blink);
1673 		if (oldval != alpha_led_blink)
1674 			ioasic_led_blink(NULL);
1675 		return (ret);
1676 #endif
1677 	default:
1678 		return (EOPNOTSUPP);
1679 	}
1680 	/* NOTREACHED */
1681 }
1682 
1683 /*
1684  * Set registers on exec.
1685  */
1686 void
1687 setregs(p, pack, stack, retval)
1688 	register struct proc *p;
1689 	struct exec_package *pack;
1690 	u_long stack;
1691 	register_t *retval;
1692 {
1693 	struct trapframe *tfp = p->p_md.md_tf;
1694 #ifdef DEBUG
1695 	int i;
1696 #endif
1697 
1698 #ifdef DEBUG
1699 	/*
1700 	 * Crash and dump, if the user requested it.
1701 	 */
1702 	if (boothowto & RB_DUMP)
1703 		panic("crash requested by boot flags");
1704 #endif
1705 
1706 #ifdef DEBUG
1707 	for (i = 0; i < FRAME_SIZE; i++)
1708 		tfp->tf_regs[i] = 0xbabefacedeadbeef;
1709 	tfp->tf_regs[FRAME_A1] = 0;
1710 #else
1711 	bzero(tfp->tf_regs, FRAME_SIZE * sizeof tfp->tf_regs[0]);
1712 #endif
1713 	bzero(&p->p_addr->u_pcb.pcb_fp, sizeof p->p_addr->u_pcb.pcb_fp);
1714 	alpha_pal_wrusp(stack);
1715 	tfp->tf_regs[FRAME_PS] = ALPHA_PSL_USERSET;
1716 	tfp->tf_regs[FRAME_PC] = pack->ep_entry & ~3;
1717 
1718 	tfp->tf_regs[FRAME_A0] = stack;
1719 	/* a1 and a2 already zeroed */
1720 	tfp->tf_regs[FRAME_T12] = tfp->tf_regs[FRAME_PC];	/* a.k.a. PV */
1721 
1722 	p->p_md.md_flags &= ~MDP_FPUSED;
1723 #ifndef NO_IEEE
1724 	if (__predict_true((p->p_md.md_flags & IEEE_INHERIT) == 0)) {
1725 		p->p_md.md_flags &= ~MDP_FP_C;
1726 		p->p_addr->u_pcb.pcb_fp.fpr_cr = FPCR_DYN(FP_RN);
1727 	}
1728 #endif
1729 	if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1730 		fpusave_proc(p, 0);
1731 
1732 	retval[1] = 0;
1733 }
1734 
1735 /*
1736  * Release the FPU.
1737  */
1738 void
1739 fpusave_cpu(struct cpu_info *ci, int save)
1740 {
1741 	struct proc *p;
1742 #if defined(MULTIPROCESSOR)
1743 	int s;
1744 #endif
1745 
1746 	KDASSERT(ci == curcpu());
1747 
1748 #if defined(MULTIPROCESSOR)
1749 	/* Need to block IPIs */
1750 	s = splipi();
1751 	atomic_setbits_ulong(&ci->ci_flags, CPUF_FPUSAVE);
1752 #endif
1753 
1754 	p = ci->ci_fpcurproc;
1755 	if (p == NULL)
1756 		goto out;
1757 
1758 	if (save) {
1759 		alpha_pal_wrfen(1);
1760 		savefpstate(&p->p_addr->u_pcb.pcb_fp);
1761 	}
1762 
1763 	alpha_pal_wrfen(0);
1764 
1765 	p->p_addr->u_pcb.pcb_fpcpu = NULL;
1766 	ci->ci_fpcurproc = NULL;
1767 
1768 out:
1769 #if defined(MULTIPROCESSOR)
1770 	atomic_clearbits_ulong(&ci->ci_flags, CPUF_FPUSAVE);
1771 	alpha_pal_swpipl(s);
1772 #endif
1773 	return;
1774 }
1775 
1776 /*
1777  * Synchronize FP state for this process.
1778  */
1779 void
1780 fpusave_proc(struct proc *p, int save)
1781 {
1782 	struct cpu_info *ci = curcpu();
1783 	struct cpu_info *oci;
1784 #if defined(MULTIPROCESSOR)
1785 	u_long ipi = save ? ALPHA_IPI_SYNCH_FPU : ALPHA_IPI_DISCARD_FPU;
1786 	int s;
1787 #endif
1788 
1789 	KDASSERT(p->p_addr != NULL);
1790 
1791 	for (;;) {
1792 #if defined(MULTIPROCESSOR)
1793 		/* Need to block IPIs */
1794 		s = splipi();
1795 #endif
1796 
1797 		oci = p->p_addr->u_pcb.pcb_fpcpu;
1798 		if (oci == NULL) {
1799 #if defined(MULTIPROCESSOR)
1800 			alpha_pal_swpipl(s);
1801 #endif
1802 			return;
1803 		}
1804 
1805 #if defined(MULTIPROCESSOR)
1806 		if (oci == ci) {
1807 			KASSERT(ci->ci_fpcurproc == p);
1808 			alpha_pal_swpipl(s);
1809 			fpusave_cpu(ci, save);
1810 			return;
1811 		}
1812 
1813 		/*
1814 		 * The other cpu may still be running and could have
1815 		 * discarded the fpu context on its own.
1816 		 */
1817 		if (oci->ci_fpcurproc != p)
1818 			continue;
1819 
1820 		alpha_send_ipi(oci->ci_cpuid, ipi);
1821 		alpha_pal_swpipl(s);
1822 
1823 		while (p->p_addr->u_pcb.pcb_fpcpu != NULL)
1824 			SPINLOCK_SPIN_HOOK;
1825 #else
1826 		KASSERT(ci->ci_fpcurproc == p);
1827 		fpusave_cpu(ci, save);
1828 #endif /* MULTIPROCESSOR */
1829 
1830 		break;
1831 	}
1832 }
1833 
1834 int
1835 spl0()
1836 {
1837 
1838 	if (ssir) {
1839 		(void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT);
1840 		softintr_dispatch();
1841 	}
1842 
1843 	return (alpha_pal_swpipl(ALPHA_PSL_IPL_0));
1844 }
1845 
1846 /*
1847  * Wait "n" microseconds.
1848  */
1849 void
1850 delay(n)
1851 	unsigned long n;
1852 {
1853 	unsigned long pcc0, pcc1, curcycle, cycles, usec;
1854 
1855 	if (n == 0)
1856 		return;
1857 
1858 	pcc0 = alpha_rpcc() & 0xffffffffUL;
1859 	cycles = 0;
1860 	usec = 0;
1861 
1862 	while (usec <= n) {
1863 		/*
1864 		 * Get the next CPU cycle count - assumes that we can not
1865 		 * have had more than one 32 bit overflow.
1866 		 */
1867 		pcc1 = alpha_rpcc() & 0xffffffffUL;
1868 		if (pcc1 < pcc0)
1869 			curcycle = (pcc1 + 0x100000000UL) - pcc0;
1870 		else
1871 			curcycle = pcc1 - pcc0;
1872 
1873 		/*
1874 		 * We now have the number of processor cycles since we
1875 		 * last checked. Add the current cycle count to the
1876 		 * running total. If it's over cycles_per_usec, increment
1877 		 * the usec counter.
1878 		 */
1879 		cycles += curcycle;
1880 		while (cycles >= cycles_per_usec) {
1881 			usec++;
1882 			cycles -= cycles_per_usec;
1883 		}
1884 		pcc0 = pcc1;
1885 	}
1886 }
1887 
1888 int
1889 alpha_pa_access(pa)
1890 	u_long pa;
1891 {
1892 	int i;
1893 
1894 	for (i = 0; i < mem_cluster_cnt; i++) {
1895 		if (pa < mem_clusters[i].start)
1896 			continue;
1897 		if ((pa - mem_clusters[i].start) >=
1898 		    (mem_clusters[i].size & ~PAGE_MASK))
1899 			continue;
1900 		return (mem_clusters[i].size & PAGE_MASK);	/* prot */
1901 	}
1902 
1903 	/*
1904 	 * Address is not a memory address.  If we're secure, disallow
1905 	 * access.  Otherwise, grant read/write.
1906 	 */
1907 	if (securelevel > 0)
1908 		return (PROT_NONE);
1909 	else
1910 		return (PROT_READ | PROT_WRITE);
1911 }
1912 
1913 /* XXX XXX BEGIN XXX XXX */
1914 paddr_t alpha_XXX_dmamap_or;					/* XXX */
1915 								/* XXX */
1916 paddr_t								/* XXX */
1917 alpha_XXX_dmamap(v)						/* XXX */
1918 	vaddr_t v;						/* XXX */
1919 {								/* XXX */
1920 								/* XXX */
1921 	return (vtophys(v) | alpha_XXX_dmamap_or);		/* XXX */
1922 }								/* XXX */
1923 /* XXX XXX END XXX XXX */
1924