xref: /netbsd/sys/arch/sparc/sparc/cpu.c (revision bf9ec67e)
1 /*	$NetBSD: cpu.c,v 1.126 2002/01/25 17:40:45 pk Exp $ */
2 
3 /*
4  * Copyright (c) 1996
5  *	The President and Fellows of Harvard College. All rights reserved.
6  * Copyright (c) 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This software was developed by the Computer Systems Engineering group
10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11  * contributed to Berkeley.
12  *
13  * All advertising materials mentioning features or use of this software
14  * must display the following acknowledgement:
15  *	This product includes software developed by Harvard University.
16  *	This product includes software developed by the University of
17  *	California, Lawrence Berkeley Laboratory.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  *
23  * 1. Redistributions of source code must retain the above copyright
24  *    notice, this list of conditions and the following disclaimer.
25  * 2. Redistributions in binary form must reproduce the above copyright
26  *    notice, this list of conditions and the following disclaimer in the
27  *    documentation and/or other materials provided with the distribution.
28  * 3. All advertising materials mentioning features or use of this software
29  *    must display the following acknowledgement:
30  *	This product includes software developed by Aaron Brown and
31  *	Harvard University.
32  *	This product includes software developed by the University of
33  *	California, Berkeley and its contributors.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  *	@(#)cpu.c	8.5 (Berkeley) 11/23/93
51  *
52  */
53 
54 #include "opt_multiprocessor.h"
55 #include "opt_lockdebug.h"
56 #include "opt_ddb.h"
57 #include "opt_sparc_arch.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/device.h>
62 #include <sys/malloc.h>
63 #include <sys/lock.h>
64 
65 #include <uvm/uvm.h>
66 
67 #include <machine/autoconf.h>
68 #include <machine/cpu.h>
69 #include <machine/reg.h>
70 #include <machine/ctlreg.h>
71 #include <machine/trap.h>
72 #include <machine/pcb.h>
73 #include <machine/pmap.h>
74 
75 #include <machine/oldmon.h>
76 #include <machine/idprom.h>
77 
78 #include <sparc/sparc/cache.h>
79 #include <sparc/sparc/asm.h>
80 #include <sparc/sparc/cpuvar.h>
81 #include <sparc/sparc/memreg.h>
82 
83 struct cpu_softc {
84 	struct device	sc_dv;		/* generic device info */
85 	struct cpu_info	*sc_cpuinfo;
86 };
87 
88 /* The following are used externally (sysctl_hw). */
89 char	machine[] = MACHINE;		/* from <machine/param.h> */
90 char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
91 char	cpu_model[100];
92 
93 int	ncpu;				/* # of CPUs detected by PROM */
94 struct	cpu_info **cpus;
95 #define CPU_MID2CPUNO(mid) ((mid) - 8)
96 static	int cpu_instance;		/* current # of CPUs wired by us */
97 
98 
99 /* The CPU configuration driver. */
100 static void cpu_attach __P((struct device *, struct device *, void *));
101 int  cpu_match __P((struct device *, struct cfdata *, void *));
102 
103 struct cfattach cpu_ca = {
104 	sizeof(struct cpu_softc), cpu_match, cpu_attach
105 };
106 
107 static char *fsrtoname __P((int, int, int));
108 void cache_print __P((struct cpu_softc *));
109 void cpu_setup __P((struct cpu_softc *));
110 void fpu_init __P((struct cpu_info *));
111 
112 #define	IU_IMPL(psr)	((u_int)(psr) >> 28)
113 #define	IU_VERS(psr)	(((psr) >> 24) & 0xf)
114 
115 #define SRMMU_IMPL(mmusr)	((u_int)(mmusr) >> 28)
116 #define SRMMU_VERS(mmusr)	(((mmusr) >> 24) & 0xf)
117 
118 #if defined(MULTIPROCESSOR)
119 void cpu_spinup __P((struct cpu_softc *));
120 struct cpu_info *alloc_cpuinfo_global_va __P((int, vsize_t *));
121 struct cpu_info	*alloc_cpuinfo __P((void));
122 
123 int go_smp_cpus = 0;	/* non-primary cpu's wait for this to go */
124 
125 /* lock this to send IPI's */
126 struct simplelock xpmsg_lock = SIMPLELOCK_INITIALIZER;
127 
128 struct cpu_info *
129 alloc_cpuinfo_global_va(ismaster, sizep)
130 	int ismaster;
131 	vsize_t *sizep;
132 {
133 	int align;
134 	vaddr_t sva, va;
135 	vsize_t sz, esz;
136 
137 	/*
138 	 * Allocate aligned KVA.  `cpuinfo' resides at a fixed virtual
139 	 * address. Since we need to access an other CPU's cpuinfo
140 	 * structure occasionally, this must be done at a virtual address
141 	 * that's cache congruent to the fixed address CPUINFO_VA.
142 	 *
143 	 * NOTE: we're using the cache properties of the boot CPU to
144 	 * determine the alignment (XXX).
145 	 */
146 	align = NBPG;
147 	if (CACHEINFO.c_totalsize > align)
148 		/* Assumes `c_totalsize' is power of two */
149 		align = CACHEINFO.c_totalsize;
150 
151 	sz = sizeof(struct cpu_info);
152 
153 	if (ismaster == 0) {
154 		/*
155 		 * While we're here, allocate a per-CPU idle PCB and
156 		 * interrupt stack as well.
157 		 */
158 		sz += USPACE;		/* `idle' u-area for this CPU */
159 		sz += INT_STACK_SIZE;	/* interrupt stack for this CPU */
160 	}
161 
162 	sz = (sz + NBPG - 1) & -NBPG;
163 	esz = sz + align - NBPG;
164 
165 	if ((sva = uvm_km_valloc(kernel_map, esz)) == 0)
166 		panic("alloc_cpuinfo_global_va: no virtual space");
167 
168 	va = sva + (((CPUINFO_VA & (align - 1)) + align - sva) & (align - 1));
169 
170 	/* Return excess virtual memory space */
171 	if (va != sva)
172 		(void)uvm_unmap(kernel_map, sva, va);
173 	if (va + sz != sva + esz)
174 		(void)uvm_unmap(kernel_map, va + sz, sva + esz);
175 
176 	if (sizep != NULL)
177 		*sizep = sz;
178 
179 	return ((struct cpu_info *)va);
180 }
181 
182 struct cpu_info *
183 alloc_cpuinfo()
184 {
185 	vaddr_t va;
186 	vsize_t sz;
187 	vaddr_t low, high;
188 	struct vm_page *m;
189 	struct pglist mlist;
190 	struct cpu_info *cpi;
191 
192 	/* Allocate the aligned VA and determine the size. */
193 	cpi = alloc_cpuinfo_global_va(0, &sz);
194 	va = (vaddr_t)cpi;
195 
196 	/* Allocate physical pages */
197 	low = vm_first_phys;
198 	high = vm_first_phys + vm_num_phys - NBPG;
199 	TAILQ_INIT(&mlist);
200 	if (uvm_pglistalloc(sz, low, high, NBPG, 0, &mlist, 1, 0) != 0)
201 		panic("alloc_cpuinfo: no pages");
202 
203 	/* Map the pages */
204 	for (m = TAILQ_FIRST(&mlist); m != NULL; m = TAILQ_NEXT(m, pageq)) {
205 		paddr_t pa = VM_PAGE_TO_PHYS(m);
206 		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
207 		va += NBPG;
208 	}
209 	pmap_update(pmap_kernel());
210 
211 	bzero((void *)cpi, sz);
212 	cpi->eintstack = (void *)((vaddr_t)cpi + sz);
213 	cpi->idle_u = (void *)((vaddr_t)cpi + sz - INT_STACK_SIZE - USPACE);
214 
215 	return (cpi);
216 }
217 #endif /* MULTIPROCESSOR */
218 
219 #ifdef notdef
220 /*
221  * IU implementations are parceled out to vendors (with some slight
222  * glitches).  Printing these is cute but takes too much space.
223  */
224 static char *iu_vendor[16] = {
225 	"Fujitsu",	/* and also LSI Logic */
226 	"ROSS",		/* ROSS (ex-Cypress) */
227 	"BIT",
228 	"LSIL",		/* LSI Logic finally got their own */
229 	"TI",		/* Texas Instruments */
230 	"Matsushita",
231 	"Philips",
232 	"Harvest",	/* Harvest VLSI Design Center */
233 	"SPEC",		/* Systems and Processes Engineering Corporation */
234 	"Weitek",
235 	"vendor#10",
236 	"vendor#11",
237 	"vendor#12",
238 	"vendor#13",
239 	"vendor#14",
240 	"vendor#15"
241 };
242 #endif
243 
244 /*
245  * 4/110 comment: the 4/110 chops off the top 4 bits of an OBIO address.
246  *	this confuses autoconf.  for example, if you try and map
247  *	0xfe000000 in obio space on a 4/110 it actually maps 0x0e000000.
248  *	this is easy to verify with the PROM.   this causes problems
249  *	with devices like "esp0 at obio0 addr 0xfa000000" because the
250  *	4/110 treats it as esp0 at obio0 addr 0x0a000000" which is the
251  *	address of the 4/110's "sw0" scsi chip.   the same thing happens
252  *	between zs1 and zs2.    since the sun4 line is "closed" and
253  *	we know all the "obio" devices that will ever be on it we just
254  *	put in some special case "if"'s in the match routines of esp,
255  *	dma, and zs.
256  */
257 
258 int
259 cpu_match(parent, cf, aux)
260 	struct device *parent;
261 	struct cfdata *cf;
262 	void *aux;
263 {
264 	struct mainbus_attach_args *ma = aux;
265 
266 	return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
267 }
268 
269 /*
270  * Attach the CPU.
271  * Discover interesting goop about the virtual address cache
272  * (slightly funny place to do it, but this is where it is to be found).
273  */
274 static void
275 cpu_attach(parent, self, aux)
276 	struct device *parent;
277 	struct device *self;
278 	void *aux;
279 {
280 static	struct cpu_softc *bootcpu;
281 	struct mainbus_attach_args *ma = aux;
282 	struct cpu_softc *sc = (struct cpu_softc *)self;
283 	struct cpu_info *cpi;
284 	int node, mid;
285 
286 	node = ma->ma_node;
287 
288 #if defined(MULTIPROCESSOR)
289 	mid = (node != 0) ? PROM_getpropint(node, "mid", 0) : 0;
290 #else
291 	mid = 0;
292 #endif
293 
294 	/*
295 	 * First, find out if we're attaching the boot CPU.
296 	 */
297 	if (bootcpu == NULL) {
298 		extern struct pcb idle_u[];
299 
300 		bootcpu = sc;
301 		cpus = malloc(ncpu * sizeof(cpi), M_DEVBUF, M_NOWAIT);
302 		bzero(cpus, ncpu * sizeof(cpi));
303 
304 		getcpuinfo(&cpuinfo, node);
305 
306 #if defined(MULTIPROCESSOR)
307 		/*
308 		 * Allocate a suitable global VA for the boot CPU's
309 		 * cpu_info (which is already statically allocated),
310 		 * and double map it to that global VA.  Then fixup
311 		 * the self-reference to use the globalized address.
312 		 */
313 		cpi = sc->sc_cpuinfo = alloc_cpuinfo_global_va(1, NULL);
314 		pmap_globalize_boot_cpuinfo(cpi);
315 		cpuinfo.ci_self = cpi;
316 
317 		/* XXX - fixup proc0.p_cpu */
318 		proc0.p_cpu = cpi;
319 #else
320 		/* The `local' VA is global for uniprocessor. */
321 		cpi = sc->sc_cpuinfo = (struct cpu_info *)CPUINFO_VA;
322 #endif
323 		cpi->master = 1;
324 		cpi->eintstack = eintstack;
325 		cpi->idle_u = idle_u;
326 		/* Note: `curpcb' is set to `proc0' in locore */
327 	} else {
328 #if defined(MULTIPROCESSOR)
329 		cpi = sc->sc_cpuinfo = alloc_cpuinfo();
330 		cpi->ci_self = cpi;
331 		cpi->curpcb = cpi->idle_u;
332 		cpi->curpcb->pcb_wim = 1;
333 		/*
334 		 * Note: `idle_u' and `eintstack' are set in alloc_cpuinfo().
335 		 * The %wim register will be initialized in cpu_hatch().
336 		 */
337 		getcpuinfo(cpi, node);
338 #else
339 		printf(": no SMP support in kernel\n");
340 		return;
341 #endif
342 	}
343 
344 #ifdef DEBUG
345 	cpi->redzone = (void *)((long)cpi->idle_u + REDSIZE);
346 #endif
347 
348 	cpus[cpu_instance] = cpi;
349 	cpi->ci_cpuid = cpu_instance++;
350 	cpi->mid = mid;
351 	cpi->node = node;
352 	simple_lock_init(&cpi->msg.lock);
353 
354 	if (ncpu > 1)
355 		printf(": mid %d", mid);
356 
357 	if (cpi->master) {
358 		cpu_setup(sc);
359 		sprintf(cpu_model, "%s @ %s MHz, %s FPU",
360 			cpi->cpu_name, clockfreq(cpi->hz), cpi->fpu_name);
361 		printf(": %s\n", cpu_model);
362 		cache_print(sc);
363 		return;
364 	}
365 
366 #if defined(MULTIPROCESSOR)
367 	/* for now use the fixed virtual addresses setup in autoconf.c */
368 	cpi->intreg_4m = (struct icr_pi *)
369 		(PI_INTR_VA + (_MAXNBPG * CPU_MID2CPUNO(mid)));
370 
371 	/* Now start this CPU */
372 	cpu_spinup(sc);
373 	printf(": %s @ %s MHz, %s FPU\n", cpi->cpu_name,
374 		clockfreq(cpi->hz), cpi->fpu_name);
375 
376 	cache_print(sc);
377 
378 	if (ncpu > 1 && cpu_instance == ncpu) {
379 		int n;
380 		/*
381 		 * Install MP cache flush functions, unless the
382 		 * single-processor versions are no-ops.
383 		 */
384 		for (n = 0; n < ncpu; n++) {
385 			struct cpu_info *cpi = cpus[n];
386 			if (cpi == NULL)
387 				continue;
388 #define SET_CACHE_FUNC(x) \
389 	if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x)
390 			SET_CACHE_FUNC(cache_flush);
391 			SET_CACHE_FUNC(vcache_flush_page);
392 			SET_CACHE_FUNC(vcache_flush_segment);
393 			SET_CACHE_FUNC(vcache_flush_region);
394 			SET_CACHE_FUNC(vcache_flush_context);
395 		}
396 	}
397 #endif /* MULTIPROCESSOR */
398 }
399 
400 #if defined(MULTIPROCESSOR)
401 /*
402  * Start secondary processors in motion.
403  */
404 void
405 cpu_boot_secondary_processors()
406 {
407 	int n;
408 
409 	if (cpu_instance != ncpu) {
410 		printf("NOTICE: only %d out of %d CPUs were configured\n",
411 			cpu_instance, ncpu);
412 		return;
413 	}
414 
415 	if (cpus == NULL)
416 		return;
417 
418 	printf("cpu0: booting secondary processors:");
419 	for (n = 0; n < ncpu; n++) {
420 		struct cpu_info *cpi = cpus[n];
421 
422 		if (cpi == NULL || cpuinfo.mid == cpi->mid)
423 			continue;
424 
425 		printf(" cpu%d", cpi->ci_cpuid);
426 		cpi->flags |= CPUFLG_READY;
427 	}
428 
429 	/* Tell the other CPU's to start up.  */
430 	go_smp_cpus = 1;
431 
432 	/* OK, we're done. */
433 	cpuinfo.flags |= CPUFLG_READY;
434 	printf("\n");
435 }
436 #endif /* MULTIPROCESSOR */
437 
438 /* */
439 void *cpu_hatchstack = 0;
440 void *cpu_hatch_sc = 0;
441 volatile int cpu_hatched = 0;
442 
443 /*
444  * Finish CPU attach.
445  * Must be run by the CPU which is being attached.
446  */
447 void
448 cpu_setup(sc)
449 	struct cpu_softc *sc;
450 {
451 
452 	if (cpuinfo.hotfix)
453 		(*cpuinfo.hotfix)(&cpuinfo);
454 
455 	/* Initialize FPU */
456 	fpu_init(&cpuinfo);
457 
458 	/* Enable the cache */
459 	cpuinfo.cache_enable();
460 
461 	cpu_hatched = 1;
462 #if 0
463 	/* Flush cache line */
464 	cpuinfo.cache_flush((caddr_t)&cpu_hatched, sizeof(cpu_hatched));
465 #endif
466 }
467 
468 #if defined(MULTIPROCESSOR)
469 /*
470  * Allocate per-CPU data, then start up this CPU using PROM.
471  */
472 void
473 cpu_spinup(sc)
474 	struct cpu_softc *sc;
475 {
476 	struct cpu_info *cpi = sc->sc_cpuinfo;
477 	int n;
478 extern void cpu_hatch __P((void));	/* in locore.s */
479 	caddr_t pc = (caddr_t)cpu_hatch;
480 	struct openprom_addr oa;
481 
482 	/* Setup CPU-specific MMU tables */
483 	pmap_alloc_cpu(cpi);
484 
485 	cpu_hatched = 0;
486 	cpu_hatchstack = cpi->idle_u;
487 	cpu_hatch_sc = sc;
488 
489 	/*
490 	 * The physical address of the context table is passed to
491 	 * the PROM in a "physical address descriptor".
492 	 */
493 	oa.oa_space = 0;
494 	oa.oa_base = (u_int32_t)cpi->ctx_tbl_pa;
495 	oa.oa_size = cpi->mmu_ncontext * sizeof(cpi->ctx_tbl[0]); /*???*/
496 
497 	/*
498 	 * Flush entire cache here, since the CPU may start with
499 	 * caches off, hence no cache-coherency may be assumed.
500 	 */
501 	cpuinfo.cache_flush_all();
502 	prom_cpustart(cpi->node, &oa, 0, pc);
503 
504 	/*
505 	 * Wait for this CPU to spin up.
506 	 */
507 	for (n = 10000; n != 0; n--) {
508 		cpuinfo.cache_flush((caddr_t)&cpu_hatched, sizeof(cpu_hatched));
509 		if (cpu_hatched != 0) {
510 			return;
511 		}
512 		delay(100);
513 	}
514 	printf("CPU did not spin up\n");
515 }
516 
517 /*
518  * Calls raise_ipi(), waits for the remote CPU to notice the message, and
519  * unlocks this CPU's message lock, which we expect was locked at entry.
520  */
521 void
522 raise_ipi_wait_and_unlock(cpi)
523 	struct cpu_info *cpi;
524 {
525 	int i;
526 
527 	raise_ipi(cpi);
528 	i = 0;
529 	while ((cpi->flags & CPUFLG_GOTMSG) == 0) {
530 		if (i++ > 500000) {
531 			printf("raise_ipi_wait_and_unlock(cpu%d): couldn't ping cpu%d\n",
532 			    cpuinfo.ci_cpuid, cpi->ci_cpuid);
533 			break;
534 		}
535 	}
536 	simple_unlock(&cpi->msg.lock);
537 }
538 
539 /*
540  * Call a function on every CPU.  One must hold xpmsg_lock around
541  * this function.
542  */
543 void
544 cross_call(func, arg0, arg1, arg2, arg3, cpuset)
545 	int	(*func)(int, int, int, int);
546 	int	arg0, arg1, arg2, arg3;
547 	int	cpuset;	/* XXX unused; cpus to send to: we do all */
548 {
549 	int n, i, not_done;
550 	struct xpmsg_func *p;
551 
552 	/*
553 	 * If no cpus are configured yet, just call ourselves.
554 	 */
555 	if (cpus == NULL) {
556 		p = &cpuinfo.msg.u.xpmsg_func;
557 		p->func = func;
558 		p->arg0 = arg0;
559 		p->arg1 = arg1;
560 		p->arg2 = arg2;
561 		p->arg3 = arg3;
562 		p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
563 		return;
564 	}
565 
566 	/*
567 	 * Firstly, call each CPU.  We do this so that they might have
568 	 * finished by the time we start looking.
569 	 */
570 	for (n = 0; n < ncpu; n++) {
571 		struct cpu_info *cpi = cpus[n];
572 
573 		if (CPU_READY(cpi))
574 			continue;
575 
576 		simple_lock(&cpi->msg.lock);
577 		cpi->msg.tag = XPMSG_FUNC;
578 		p = &cpi->msg.u.xpmsg_func;
579 		p->func = func;
580 		p->arg0 = arg0;
581 		p->arg1 = arg1;
582 		p->arg2 = arg2;
583 		p->arg3 = arg3;
584 		cpi->flags &= ~CPUFLG_GOTMSG;
585 		raise_ipi(cpi);
586 	}
587 
588 	/*
589 	 * Second, call ourselves.
590 	 */
591 
592 	p = &cpuinfo.msg.u.xpmsg_func;
593 
594 	/* Call this on me first. */
595 	p->func = func;
596 	p->arg0 = arg0;
597 	p->arg1 = arg1;
598 	p->arg2 = arg2;
599 	p->arg3 = arg3;
600 
601 	p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
602 
603 	/*
604 	 * Lastly, start looping, waiting for all cpu's to register that they
605 	 * have completed (bailing if it takes "too long", being loud about
606 	 * this in the process).
607 	 */
608 	i = 0;
609 	while (not_done) {
610 		not_done = 0;
611 		for (n = 0; n < ncpu; n++) {
612 			struct cpu_info *cpi = cpus[n];
613 
614 			if (CPU_READY(cpi))
615 				continue;
616 
617 			if ((cpi->flags & CPUFLG_GOTMSG) != 0)
618 				not_done = 1;
619 		}
620 		if (not_done && i++ > 100000) {
621 			printf("cross_call(cpu%d): couldn't ping cpus:",
622 			    cpuinfo.ci_cpuid);
623 			break;
624 		}
625 		if (not_done == 0)
626 			break;
627 	}
628 	for (n = 0; n < ncpu; n++) {
629 		struct cpu_info *cpi = cpus[n];
630 
631 		if (CPU_READY(cpi))
632 			continue;
633 		simple_unlock(&cpi->msg.lock);
634 		if ((cpi->flags & CPUFLG_GOTMSG) != 0)
635 			printf(" cpu%d", cpi->ci_cpuid);
636 	}
637 	if (not_done)
638 		printf("\n");
639 }
640 
641 void
642 mp_pause_cpus()
643 {
644 	int n;
645 
646 	if (cpus == NULL)
647 		return;
648 
649 	LOCK_XPMSG();
650 	for (n = 0; n < ncpu; n++) {
651 		struct cpu_info *cpi = cpus[n];
652 
653 		if (CPU_READY(cpi))
654 			continue;
655 
656 		simple_lock(&cpi->msg.lock);
657 		cpi->msg.tag = XPMSG_PAUSECPU;
658 		cpi->flags &= ~CPUFLG_GOTMSG;
659 		raise_ipi_wait_and_unlock(cpi);
660 	}
661 	UNLOCK_XPMSG();
662 }
663 
664 void
665 mp_resume_cpus()
666 {
667 	int n;
668 
669 	if (cpus == NULL)
670 		return;
671 
672 	for (n = 0; n < ncpu; n++) {
673 		struct cpu_info *cpi = cpus[n];
674 
675 		if (cpi == NULL || cpuinfo.mid == cpi->mid)
676 			continue;
677 
678 		/* tell it to continue */
679 		cpi->flags &= ~CPUFLG_PAUSED;
680 	}
681 }
682 #endif /* MULTIPROCESSOR */
683 
684 /*
685  * fpu_init() must be run on associated CPU.
686  */
687 void
688 fpu_init(sc)
689 	struct cpu_info *sc;
690 {
691 	struct fpstate fpstate;
692 	int fpuvers;
693 
694 	/*
695 	 * Get the FSR and clear any exceptions.  If we do not unload
696 	 * the queue here and it is left over from a previous crash, we
697 	 * will panic in the first loadfpstate(), due to a sequence
698 	 * error, so we need to dump the whole state anyway.
699 	 *
700 	 * If there is no FPU, trap.c will advance over all the stores,
701 	 * so we initialize fs_fsr here.
702 	 */
703 
704 	/* 7 is reserved for "none" */
705 	fpstate.fs_fsr = 7 << FSR_VER_SHIFT;
706 	savefpstate(&fpstate);
707 	sc->fpuvers = fpuvers =
708 		(fpstate.fs_fsr >> FSR_VER_SHIFT) & (FSR_VER >> FSR_VER_SHIFT);
709 
710 	if (fpuvers == 7) {
711 		sc->fpu_name = "no";
712 		return;
713 	}
714 
715 	sc->fpupresent = 1;
716 	sc->fpu_name = fsrtoname(sc->cpu_impl, sc->cpu_vers, fpuvers);
717 	if (sc->fpu_name == NULL) {
718 		sprintf(sc->fpu_namebuf, "version 0x%x", fpuvers);
719 		sc->fpu_name = sc->fpu_namebuf;
720 	}
721 }
722 
723 void
724 cache_print(sc)
725 	struct cpu_softc *sc;
726 {
727 	struct cacheinfo *ci = &sc->sc_cpuinfo->cacheinfo;
728 
729 	printf("%s: ", sc->sc_dv.dv_xname);
730 
731 	if (ci->c_totalsize == 0) {
732 		printf("no cache\n");
733 		return;
734 	}
735 
736 	if (ci->c_split) {
737 		char *sep = "";
738 
739 		printf("%s", (ci->c_physical ? "physical " : ""));
740 		if (ci->ic_totalsize > 0) {
741 			printf("%s%dK instruction (%d b/l)", sep,
742 			    ci->ic_totalsize/1024, ci->ic_linesize);
743 			sep = ", ";
744 		}
745 		if (ci->dc_totalsize > 0) {
746 			printf("%s%dK data (%d b/l)", sep,
747 			    ci->dc_totalsize/1024, ci->dc_linesize);
748 		}
749 	} else if (ci->c_physical) {
750 		/* combined, physical */
751 		printf("physical %dK combined cache (%d bytes/line)",
752 		    ci->c_totalsize/1024, ci->c_linesize);
753 	} else {
754 		/* combined, virtual */
755 		printf("%dK byte write-%s, %d bytes/line, %cw flush",
756 		    ci->c_totalsize/1024,
757 		    (ci->c_vactype == VAC_WRITETHROUGH) ? "through" : "back",
758 		    ci->c_linesize,
759 		    ci->c_hwflush ? 'h' : 's');
760 	}
761 
762 	if (ci->ec_totalsize > 0) {
763 		printf(", %dK external (%d b/l)",
764 		    ci->ec_totalsize/1024, ci->ec_linesize);
765 	}
766 	printf(": ");
767 	if (ci->c_enabled)
768 		printf("cache enabled");
769 	printf("\n");
770 }
771 
772 
773 /*------------*/
774 
775 
776 void cpumatch_unknown __P((struct cpu_info *, struct module_info *, int));
777 void cpumatch_sun4 __P((struct cpu_info *, struct module_info *, int));
778 void cpumatch_sun4c __P((struct cpu_info *, struct module_info *, int));
779 void cpumatch_ms1 __P((struct cpu_info *, struct module_info *, int));
780 void cpumatch_viking __P((struct cpu_info *, struct module_info *, int));
781 void cpumatch_hypersparc __P((struct cpu_info *, struct module_info *, int));
782 void cpumatch_turbosparc __P((struct cpu_info *, struct module_info *, int));
783 
784 void getcacheinfo_sun4 __P((struct cpu_info *, int node));
785 void getcacheinfo_sun4c __P((struct cpu_info *, int node));
786 void getcacheinfo_obp __P((struct cpu_info *, int node));
787 
788 void sun4_hotfix __P((struct cpu_info *));
789 void viking_hotfix __P((struct cpu_info *));
790 void turbosparc_hotfix __P((struct cpu_info *));
791 void swift_hotfix __P((struct cpu_info *));
792 
793 void ms1_mmu_enable __P((void));
794 void viking_mmu_enable __P((void));
795 void swift_mmu_enable __P((void));
796 void hypersparc_mmu_enable __P((void));
797 
798 void srmmu_get_syncflt __P((void));
799 void ms1_get_syncflt __P((void));
800 void viking_get_syncflt __P((void));
801 void swift_get_syncflt __P((void));
802 void turbosparc_get_syncflt __P((void));
803 void hypersparc_get_syncflt __P((void));
804 void cypress_get_syncflt __P((void));
805 
806 int srmmu_get_asyncflt __P((u_int *, u_int *));
807 int hypersparc_get_asyncflt __P((u_int *, u_int *));
808 int cypress_get_asyncflt __P((u_int *, u_int *));
809 int no_asyncflt_regs __P((u_int *, u_int *));
810 
811 struct module_info module_unknown = {
812 	CPUTYP_UNKNOWN,
813 	VAC_UNKNOWN,
814 	cpumatch_unknown
815 };
816 
817 
818 void
819 cpumatch_unknown(sc, mp, node)
820 	struct cpu_info *sc;
821 	struct module_info *mp;
822 	int	node;
823 {
824 	panic("Unknown CPU type: "
825 	      "cpu: impl %d, vers %d; mmu: impl %d, vers %d",
826 		sc->cpu_impl, sc->cpu_vers,
827 		sc->mmu_impl, sc->mmu_vers);
828 }
829 
830 #if defined(SUN4)
831 struct module_info module_sun4 = {
832 	CPUTYP_UNKNOWN,
833 	VAC_WRITETHROUGH,
834 	cpumatch_sun4,
835 	getcacheinfo_sun4,
836 	sun4_hotfix,
837 	0,
838 	sun4_cache_enable,
839 	0,			/* ncontext set in `match' function */
840 	0,			/* get_syncflt(); unused in sun4c */
841 	0,			/* get_asyncflt(); unused in sun4c */
842 	sun4_cache_flush,
843 	sun4_vcache_flush_page,
844 	sun4_vcache_flush_segment,
845 	sun4_vcache_flush_region,
846 	sun4_vcache_flush_context,
847 	noop_pcache_flush_page,
848 	noop_pure_vcache_flush,
849 	noop_cache_flush_all,
850 	0,
851 	pmap_zero_page4_4c,
852 	pmap_copy_page4_4c
853 };
854 
855 void
856 getcacheinfo_sun4(sc, node)
857 	struct cpu_info *sc;
858 	int	node;
859 {
860 	struct cacheinfo *ci = &sc->cacheinfo;
861 
862 	switch (sc->cpu_type) {
863 	case CPUTYP_4_100:
864 		ci->c_vactype = VAC_NONE;
865 		ci->c_totalsize = 0;
866 		ci->c_hwflush = 0;
867 		ci->c_linesize = 0;
868 		ci->c_l2linesize = 0;
869 		ci->c_split = 0;
870 		ci->c_nlines = 0;
871 
872 		/* Override cache flush functions */
873 		sc->sp_cache_flush = noop_cache_flush;
874 		sc->sp_vcache_flush_page = noop_vcache_flush_page;
875 		sc->sp_vcache_flush_segment = noop_vcache_flush_segment;
876 		sc->sp_vcache_flush_region = noop_vcache_flush_region;
877 		sc->sp_vcache_flush_context = noop_vcache_flush_context;
878 		break;
879 	case CPUTYP_4_200:
880 		ci->c_vactype = VAC_WRITEBACK;
881 		ci->c_totalsize = 128*1024;
882 		ci->c_hwflush = 0;
883 		ci->c_linesize = 16;
884 		ci->c_l2linesize = 4;
885 		ci->c_split = 0;
886 		ci->c_nlines = ci->c_totalsize << ci->c_l2linesize;
887 		break;
888 	case CPUTYP_4_300:
889 		ci->c_vactype = VAC_WRITEBACK;
890 		ci->c_totalsize = 128*1024;
891 		ci->c_hwflush = 0;
892 		ci->c_linesize = 16;
893 		ci->c_l2linesize = 4;
894 		ci->c_split = 0;
895 		ci->c_nlines = ci->c_totalsize << ci->c_l2linesize;
896 		sc->flags |= CPUFLG_SUN4CACHEBUG;
897 		break;
898 	case CPUTYP_4_400:
899 		ci->c_vactype = VAC_WRITEBACK;
900 		ci->c_totalsize = 128 * 1024;
901 		ci->c_hwflush = 0;
902 		ci->c_linesize = 32;
903 		ci->c_l2linesize = 5;
904 		ci->c_split = 0;
905 		ci->c_nlines = ci->c_totalsize << ci->c_l2linesize;
906 		break;
907 	}
908 }
909 
910 struct	idprom sun4_idprom_store;
911 void	getidprom __P((struct idprom *, int size));
912 
913 void
914 cpumatch_sun4(sc, mp, node)
915 	struct cpu_info *sc;
916 	struct module_info *mp;
917 	int	node;
918 {
919 	extern struct idprom *idprom;
920 	/*
921 	 * XXX - for e.g. myetheraddr(), which in sun4 can be called
922 	 *	 before the clock attaches.
923 	 */
924 	idprom = &sun4_idprom_store;
925 
926 	getidprom(&sun4_idprom_store, sizeof(struct idprom));
927 	switch (sun4_idprom_store.id_machine) {
928 	case ID_SUN4_100:
929 		sc->cpu_type = CPUTYP_4_100;
930 		sc->classlvl = 100;
931 		sc->mmu_ncontext = 8;
932 		sc->mmu_nsegment = 256;
933 /*XXX*/		sc->hz = 14280000;
934 		break;
935 	case ID_SUN4_200:
936 		sc->cpu_type = CPUTYP_4_200;
937 		sc->classlvl = 200;
938 		sc->mmu_nsegment = 512;
939 		sc->mmu_ncontext = 16;
940 /*XXX*/		sc->hz = 16670000;
941 		break;
942 	case ID_SUN4_300:
943 		sc->cpu_type = CPUTYP_4_300;
944 		sc->classlvl = 300;
945 		sc->mmu_nsegment = 256;
946 		sc->mmu_ncontext = 16;
947 /*XXX*/		sc->hz = 25000000;
948 		break;
949 	case ID_SUN4_400:
950 		sc->cpu_type = CPUTYP_4_400;
951 		sc->classlvl = 400;
952 		sc->mmu_nsegment = 1024;
953 		sc->mmu_ncontext = 64;
954 		sc->mmu_nregion = 256;
955 /*XXX*/		sc->hz = 33000000;
956 		sc->sun4_mmu3l = 1;
957 		break;
958 	}
959 
960 }
961 #endif /* SUN4 */
962 
963 #if defined(SUN4C)
964 struct module_info module_sun4c = {
965 	CPUTYP_UNKNOWN,
966 	VAC_WRITETHROUGH,
967 	cpumatch_sun4c,
968 	getcacheinfo_sun4c,
969 	sun4_hotfix,
970 	0,
971 	sun4_cache_enable,
972 	0,			/* ncontext set in `match' function */
973 	0,			/* get_syncflt(); unused in sun4c */
974 	0,			/* get_asyncflt(); unused in sun4c */
975 	sun4_cache_flush,
976 	sun4_vcache_flush_page,
977 	sun4_vcache_flush_segment,
978 	sun4_vcache_flush_region,
979 	sun4_vcache_flush_context,
980 	noop_pcache_flush_page,
981 	noop_pure_vcache_flush,
982 	noop_cache_flush_all,
983 	0,
984 	pmap_zero_page4_4c,
985 	pmap_copy_page4_4c
986 };
987 
988 void
989 cpumatch_sun4c(sc, mp, node)
990 	struct cpu_info *sc;
991 	struct module_info *mp;
992 	int	node;
993 {
994 	int	rnode;
995 
996 	rnode = findroot();
997 	sc->mmu_npmeg = sc->mmu_nsegment =
998 		PROM_getpropint(rnode, "mmu-npmg", 128);
999 	sc->mmu_ncontext = PROM_getpropint(rnode, "mmu-nctx", 8);
1000 
1001 	/* Get clock frequency */
1002 	sc->hz = PROM_getpropint(rnode, "clock-frequency", 0);
1003 }
1004 
1005 void
1006 getcacheinfo_sun4c(sc, node)
1007 	struct cpu_info *sc;
1008 	int node;
1009 {
1010 	struct cacheinfo *ci = &sc->cacheinfo;
1011 	int i, l;
1012 
1013 	if (node == 0)
1014 		/* Bootstrapping */
1015 		return;
1016 
1017 	/* Sun4c's have only virtually-addressed caches */
1018 	ci->c_physical = 0;
1019 	ci->c_totalsize = PROM_getpropint(node, "vac-size", 65536);
1020 	/*
1021 	 * Note: vac-hwflush is spelled with an underscore
1022 	 * on the 4/75s.
1023 	 */
1024 	ci->c_hwflush =
1025 		PROM_getpropint(node, "vac_hwflush", 0) |
1026 		PROM_getpropint(node, "vac-hwflush", 0);
1027 
1028 	ci->c_linesize = l = PROM_getpropint(node, "vac-linesize", 16);
1029 	for (i = 0; (1 << i) < l; i++)
1030 		/* void */;
1031 	if ((1 << i) != l)
1032 		panic("bad cache line size %d", l);
1033 	ci->c_l2linesize = i;
1034 	ci->c_associativity = 1;
1035 	ci->c_nlines = ci->c_totalsize << i;
1036 
1037 	ci->c_vactype = VAC_WRITETHROUGH;
1038 
1039 	/*
1040 	 * Machines with "buserr-type" 1 have a bug in the cache
1041 	 * chip that affects traps.  (I wish I knew more about this
1042 	 * mysterious buserr-type variable....)
1043 	 */
1044 	if (PROM_getpropint(node, "buserr-type", 0) == 1)
1045 		sc->flags |= CPUFLG_SUN4CACHEBUG;
1046 }
1047 #endif /* SUN4C */
1048 
1049 void
1050 sun4_hotfix(sc)
1051 	struct cpu_info *sc;
1052 {
1053 
1054 	if ((sc->flags & CPUFLG_SUN4CACHEBUG) != 0) {
1055 		kvm_uncache((caddr_t)trapbase, 1);
1056 		printf(": cache chip bug; trap page uncached");
1057 	}
1058 
1059 	/* Use the hardware-assisted page flush routine, if present */
1060 	if (sc->cacheinfo.c_hwflush)
1061 		sc->vcache_flush_page = sun4_vcache_flush_page_hw;
1062 }
1063 
1064 #if defined(SUN4M)
1065 void
1066 getcacheinfo_obp(sc, node)
1067 	struct	cpu_info *sc;
1068 	int	node;
1069 {
1070 	struct cacheinfo *ci = &sc->cacheinfo;
1071 	int i, l;
1072 
1073 	if (node == 0)
1074 		/* Bootstrapping */
1075 		return;
1076 
1077 	/*
1078 	 * Determine the Sun4m cache organization.
1079 	 */
1080 	ci->c_physical = node_has_property(node, "cache-physical?");
1081 
1082 	if (PROM_getpropint(node, "ncaches", 1) == 2)
1083 		ci->c_split = 1;
1084 	else
1085 		ci->c_split = 0;
1086 
1087 	/* hwflush is used only by sun4/4c code */
1088 	ci->c_hwflush = 0;
1089 
1090 	if (node_has_property(node, "icache-nlines") &&
1091 	    node_has_property(node, "dcache-nlines") &&
1092 	    ci->c_split) {
1093 		/* Harvard architecture: get I and D cache sizes */
1094 		ci->ic_nlines = PROM_getpropint(node, "icache-nlines", 0);
1095 		ci->ic_linesize = l =
1096 			PROM_getpropint(node, "icache-line-size", 0);
1097 		for (i = 0; (1 << i) < l && l; i++)
1098 			/* void */;
1099 		if ((1 << i) != l && l)
1100 			panic("bad icache line size %d", l);
1101 		ci->ic_l2linesize = i;
1102 		ci->ic_associativity =
1103 			PROM_getpropint(node, "icache-associativity", 1);
1104 		ci->ic_totalsize = l * ci->ic_nlines * ci->ic_associativity;
1105 
1106 		ci->dc_nlines = PROM_getpropint(node, "dcache-nlines", 0);
1107 		ci->dc_linesize = l =
1108 			PROM_getpropint(node, "dcache-line-size",0);
1109 		for (i = 0; (1 << i) < l && l; i++)
1110 			/* void */;
1111 		if ((1 << i) != l && l)
1112 			panic("bad dcache line size %d", l);
1113 		ci->dc_l2linesize = i;
1114 		ci->dc_associativity =
1115 			PROM_getpropint(node, "dcache-associativity", 1);
1116 		ci->dc_totalsize = l * ci->dc_nlines * ci->dc_associativity;
1117 
1118 		ci->c_l2linesize = min(ci->ic_l2linesize, ci->dc_l2linesize);
1119 		ci->c_linesize = min(ci->ic_linesize, ci->dc_linesize);
1120 		ci->c_totalsize = ci->ic_totalsize + ci->dc_totalsize;
1121 	} else {
1122 		/* unified I/D cache */
1123 		ci->c_nlines = PROM_getpropint(node, "cache-nlines", 128);
1124 		ci->c_linesize = l =
1125 			PROM_getpropint(node, "cache-line-size", 0);
1126 		for (i = 0; (1 << i) < l && l; i++)
1127 			/* void */;
1128 		if ((1 << i) != l && l)
1129 			panic("bad cache line size %d", l);
1130 		ci->c_l2linesize = i;
1131 		ci->c_totalsize = l *
1132 			ci->c_nlines *
1133 			PROM_getpropint(node, "cache-associativity", 1);
1134 	}
1135 
1136 	if (node_has_property(node, "ecache-nlines")) {
1137 		/* we have a L2 "e"xternal cache */
1138 		ci->ec_nlines = PROM_getpropint(node, "ecache-nlines", 32768);
1139 		ci->ec_linesize = l = PROM_getpropint(node, "ecache-line-size", 0);
1140 		for (i = 0; (1 << i) < l && l; i++)
1141 			/* void */;
1142 		if ((1 << i) != l && l)
1143 			panic("bad ecache line size %d", l);
1144 		ci->ec_l2linesize = i;
1145 		ci->ec_associativity =
1146 			PROM_getpropint(node, "ecache-associativity", 1);
1147 		ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity;
1148 	}
1149 	if (ci->c_totalsize == 0)
1150 		printf("warning: couldn't identify cache\n");
1151 }
1152 
1153 /*
1154  * We use the max. number of contexts on the micro and
1155  * hyper SPARCs. The SuperSPARC would let us use up to 65536
1156  * contexts (by powers of 2), but we keep it at 4096 since
1157  * the table must be aligned to #context*4. With 4K contexts,
1158  * we waste at most 16K of memory. Note that the context
1159  * table is *always* page-aligned, so there can always be
1160  * 1024 contexts without sacrificing memory space (given
1161  * that the chip supports 1024 contexts).
1162  *
1163  * Currently known limits: MS1=64, MS2=256, HS=4096, SS=65536
1164  * 	some old SS's=4096
1165  */
1166 
1167 /* TI Microsparc I */
1168 struct module_info module_ms1 = {
1169 	CPUTYP_MS1,
1170 	VAC_NONE,
1171 	cpumatch_ms1,
1172 	getcacheinfo_obp,
1173 	0,
1174 	ms1_mmu_enable,
1175 	ms1_cache_enable,
1176 	64,
1177 	ms1_get_syncflt,
1178 	no_asyncflt_regs,
1179 	ms1_cache_flush,
1180 	noop_vcache_flush_page,
1181 	noop_vcache_flush_segment,
1182 	noop_vcache_flush_region,
1183 	noop_vcache_flush_context,
1184 	noop_pcache_flush_page,
1185 	noop_pure_vcache_flush,
1186 	ms1_cache_flush_all,
1187 	memerr4m,
1188 	pmap_zero_page4m,
1189 	pmap_copy_page4m
1190 };
1191 
1192 void
1193 cpumatch_ms1(sc, mp, node)
1194 	struct cpu_info *sc;
1195 	struct module_info *mp;
1196 	int	node;
1197 {
1198 
1199 	/*
1200 	 * Turn off page zeroing in the idle loop; an unidentified
1201 	 * bug causes (very sporadic) user process corruption.
1202 	 */
1203 	vm_page_zero_enable = 0;
1204 }
1205 
1206 void
1207 ms1_mmu_enable()
1208 {
1209 }
1210 
1211 /* TI Microsparc II */
1212 struct module_info module_ms2 = {		/* UNTESTED */
1213 	CPUTYP_MS2,
1214 	VAC_WRITETHROUGH,
1215 	0,
1216 	getcacheinfo_obp,
1217 	0,
1218 	0,
1219 	swift_cache_enable,
1220 	256,
1221 	srmmu_get_syncflt,
1222 	srmmu_get_asyncflt,
1223 	srmmu_cache_flush,
1224 	srmmu_vcache_flush_page,
1225 	srmmu_vcache_flush_segment,
1226 	srmmu_vcache_flush_region,
1227 	srmmu_vcache_flush_context,
1228 	noop_pcache_flush_page,
1229 	noop_pure_vcache_flush,
1230 	srmmu_cache_flush_all,
1231 	memerr4m,
1232 	pmap_zero_page4m,
1233 	pmap_copy_page4m
1234 };
1235 
1236 
1237 struct module_info module_swift = {
1238 	CPUTYP_MS2,
1239 	VAC_WRITETHROUGH,
1240 	0,
1241 	getcacheinfo_obp,
1242 	swift_hotfix,
1243 	0,
1244 	swift_cache_enable,
1245 	256,
1246 	swift_get_syncflt,
1247 	no_asyncflt_regs,
1248 	srmmu_cache_flush,
1249 	srmmu_vcache_flush_page,
1250 	srmmu_vcache_flush_segment,
1251 	srmmu_vcache_flush_region,
1252 	srmmu_vcache_flush_context,
1253 	noop_pcache_flush_page,
1254 	noop_pure_vcache_flush,
1255 	srmmu_cache_flush_all,
1256 	memerr4m,
1257 	pmap_zero_page4m,
1258 	pmap_copy_page4m
1259 };
1260 
1261 void
1262 swift_hotfix(sc)
1263 	struct cpu_info *sc;
1264 {
1265 	int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1266 
1267 	/* Turn off branch prediction */
1268 	pcr &= ~SWIFT_PCR_BF;
1269 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1270 }
1271 
1272 void
1273 swift_mmu_enable()
1274 {
1275 }
1276 
1277 struct module_info module_viking = {
1278 	CPUTYP_UNKNOWN,		/* set in cpumatch() */
1279 	VAC_NONE,
1280 	cpumatch_viking,
1281 	getcacheinfo_obp,
1282 	viking_hotfix,
1283 	viking_mmu_enable,
1284 	viking_cache_enable,
1285 	4096,
1286 	viking_get_syncflt,
1287 	no_asyncflt_regs,
1288 	/* supersparcs use cached DVMA, no need to flush */
1289 	noop_cache_flush,
1290 	noop_vcache_flush_page,
1291 	noop_vcache_flush_segment,
1292 	noop_vcache_flush_region,
1293 	noop_vcache_flush_context,
1294 	viking_pcache_flush_page,
1295 	noop_pure_vcache_flush,
1296 	noop_cache_flush_all,
1297 	viking_memerr,
1298 	pmap_zero_page4m,
1299 	pmap_copy_page4m
1300 };
1301 
1302 void
1303 cpumatch_viking(sc, mp, node)
1304 	struct cpu_info *sc;
1305 	struct module_info *mp;
1306 	int	node;
1307 {
1308 	if (node == 0)
1309 		viking_hotfix(sc);
1310 }
1311 
1312 void
1313 viking_hotfix(sc)
1314 	struct cpu_info *sc;
1315 {
1316 	int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1317 
1318 	/* Test if we're directly on the MBus */
1319 	if ((pcr & VIKING_PCR_MB) == 0) {
1320 		sc->mxcc = 1;
1321 		sc->flags |= CPUFLG_CACHE_MANDATORY;
1322 		sc->zero_page = pmap_zero_page_viking_mxcc;
1323 		sc->copy_page = pmap_copy_page_viking_mxcc;
1324 		/*
1325 		 * Ok to cache PTEs; set the flag here, so we don't
1326 		 * uncache in pmap_bootstrap().
1327 		 */
1328 		if ((pcr & VIKING_PCR_TC) == 0)
1329 			printf("[viking: PCR_TC is off]");
1330 		else
1331 			sc->flags |= CPUFLG_CACHEPAGETABLES;
1332 	} else {
1333 		sc->cache_flush = viking_cache_flush;
1334 	}
1335 
1336 	/* XXX! */
1337 	if (sc->mxcc)
1338 		sc->cpu_type = CPUTYP_SS1_MBUS_MXCC;
1339 	else
1340 		sc->cpu_type = CPUTYP_SS1_MBUS_NOMXCC;
1341 }
1342 
1343 void
1344 viking_mmu_enable()
1345 {
1346 	int pcr;
1347 
1348 	pcr = lda(SRMMU_PCR, ASI_SRMMU);
1349 
1350 	if (cpuinfo.mxcc) {
1351 		if ((pcr & VIKING_PCR_TC) == 0) {
1352 			printf("[viking: turn on PCR_TC]");
1353 		}
1354 		pcr |= VIKING_PCR_TC;
1355 		cpuinfo.flags |= CPUFLG_CACHEPAGETABLES;
1356 	} else
1357 		pcr &= ~VIKING_PCR_TC;
1358 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1359 }
1360 
1361 
1362 /* ROSS Hypersparc */
1363 struct module_info module_hypersparc = {
1364 	CPUTYP_UNKNOWN,
1365 	VAC_WRITEBACK,
1366 	cpumatch_hypersparc,
1367 	getcacheinfo_obp,
1368 	0,
1369 	hypersparc_mmu_enable,
1370 	hypersparc_cache_enable,
1371 	4096,
1372 	hypersparc_get_syncflt,
1373 	hypersparc_get_asyncflt,
1374 	srmmu_cache_flush,
1375 	srmmu_vcache_flush_page,
1376 	srmmu_vcache_flush_segment,
1377 	srmmu_vcache_flush_region,
1378 	srmmu_vcache_flush_context,
1379 	noop_pcache_flush_page,
1380 	hypersparc_pure_vcache_flush,
1381 	hypersparc_cache_flush_all,
1382 	hypersparc_memerr,
1383 	pmap_zero_page4m,
1384 	pmap_copy_page4m
1385 };
1386 
1387 void
1388 cpumatch_hypersparc(sc, mp, node)
1389 	struct cpu_info *sc;
1390 	struct module_info *mp;
1391 	int	node;
1392 {
1393 	sc->cpu_type = CPUTYP_HS_MBUS;/*XXX*/
1394 	if (node == 0)
1395 		sta(0, ASI_HICACHECLR, 0);
1396 }
1397 
1398 void
1399 hypersparc_mmu_enable()
1400 {
1401 #if 0
1402 	int pcr;
1403 
1404 	pcr = lda(SRMMU_PCR, ASI_SRMMU);
1405 	pcr |= HYPERSPARC_PCR_C;
1406 	pcr &= ~HYPERSPARC_PCR_CE;
1407 
1408 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1409 #endif
1410 }
1411 
1412 /* Cypress 605 */
1413 struct module_info module_cypress = {
1414 	CPUTYP_CYPRESS,
1415 	VAC_WRITEBACK,
1416 	0,
1417 	getcacheinfo_obp,
1418 	0,
1419 	0,
1420 	cypress_cache_enable,
1421 	4096,
1422 	cypress_get_syncflt,
1423 	cypress_get_asyncflt,
1424 	srmmu_cache_flush,
1425 	srmmu_vcache_flush_page,
1426 	srmmu_vcache_flush_segment,
1427 	srmmu_vcache_flush_region,
1428 	srmmu_vcache_flush_context,
1429 	noop_pcache_flush_page,
1430 	noop_pure_vcache_flush,
1431 	cypress_cache_flush_all,
1432 	memerr4m,
1433 	pmap_zero_page4m,
1434 	pmap_copy_page4m
1435 };
1436 
1437 /* Fujitsu Turbosparc */
1438 struct module_info module_turbosparc = {
1439 	CPUTYP_MS2,
1440 	VAC_WRITEBACK,
1441 	cpumatch_turbosparc,
1442 	getcacheinfo_obp,
1443 	turbosparc_hotfix,
1444 	0,
1445 	turbosparc_cache_enable,
1446 	256,
1447 	turbosparc_get_syncflt,
1448 	no_asyncflt_regs,
1449 	srmmu_cache_flush,
1450 	srmmu_vcache_flush_page,
1451 	srmmu_vcache_flush_segment,
1452 	srmmu_vcache_flush_region,
1453 	srmmu_vcache_flush_context,
1454 	noop_pcache_flush_page,
1455 	noop_pure_vcache_flush,
1456 	srmmu_cache_flush_all,
1457 	memerr4m,
1458 	pmap_zero_page4m,
1459 	pmap_copy_page4m
1460 };
1461 
1462 void
1463 cpumatch_turbosparc(sc, mp, node)
1464 	struct cpu_info *sc;
1465 	struct module_info *mp;
1466 	int	node;
1467 {
1468 	int i;
1469 
1470 	if (node == 0 || sc->master == 0)
1471 		return;
1472 
1473 	i = getpsr();
1474 	if (sc->cpu_vers == IU_VERS(i))
1475 		return;
1476 
1477 	/*
1478 	 * A cloaked Turbosparc: clear any items in cpuinfo that
1479 	 * might have been set to uS2 versions during bootstrap.
1480 	 */
1481 	sc->cpu_name = 0;
1482 	sc->mmu_ncontext = 0;
1483 	sc->cpu_type = 0;
1484 	sc->cacheinfo.c_vactype = 0;
1485 	sc->hotfix = 0;
1486 	sc->mmu_enable = 0;
1487 	sc->cache_enable = 0;
1488 	sc->get_syncflt = 0;
1489 	sc->sp_cache_flush = 0;
1490 	sc->sp_vcache_flush_page = 0;
1491 	sc->sp_vcache_flush_segment = 0;
1492 	sc->sp_vcache_flush_region = 0;
1493 	sc->sp_vcache_flush_context = 0;
1494 	sc->pcache_flush_page = 0;
1495 }
1496 
1497 void
1498 turbosparc_hotfix(sc)
1499 	struct cpu_info *sc;
1500 {
1501 	int pcf;
1502 
1503 	pcf = lda(SRMMU_PCFG, ASI_SRMMU);
1504 	if (pcf & TURBOSPARC_PCFG_US2) {
1505 		/* Turn off uS2 emulation bit */
1506 		pcf &= ~TURBOSPARC_PCFG_US2;
1507 		sta(SRMMU_PCFG, ASI_SRMMU, pcf);
1508 	}
1509 }
1510 #endif /* SUN4M */
1511 
1512 
1513 #define	ANY	-1	/* match any version */
1514 
1515 struct cpu_conf {
1516 	int	arch;
1517 	int	cpu_impl;
1518 	int	cpu_vers;
1519 	int	mmu_impl;
1520 	int	mmu_vers;
1521 	char	*name;
1522 	struct	module_info *minfo;
1523 } cpu_conf[] = {
1524 #if defined(SUN4)
1525 	{ CPU_SUN4, 0, 0, ANY, ANY, "MB86900/1A or L64801", &module_sun4 },
1526 	{ CPU_SUN4, 1, 0, ANY, ANY, "L64811", &module_sun4 },
1527 	{ CPU_SUN4, 1, 1, ANY, ANY, "CY7C601", &module_sun4 },
1528 #endif
1529 
1530 #if defined(SUN4C)
1531 	{ CPU_SUN4C, 0, 0, ANY, ANY, "MB86900/1A or L64801", &module_sun4c },
1532 	{ CPU_SUN4C, 1, 0, ANY, ANY, "L64811", &module_sun4c },
1533 	{ CPU_SUN4C, 1, 1, ANY, ANY, "CY7C601", &module_sun4c },
1534 	{ CPU_SUN4C, 9, 0, ANY, ANY, "W8601/8701 or MB86903", &module_sun4c },
1535 #endif
1536 
1537 #if defined(SUN4M)
1538 	{ CPU_SUN4M, 0, 4, 0, 4, "MB86904", &module_swift },
1539 	{ CPU_SUN4M, 0, 5, 0, 5, "MB86907", &module_turbosparc },
1540 	{ CPU_SUN4M, 1, 1, 1, 0, "CY7C601/604", &module_cypress },
1541 	{ CPU_SUN4M, 1, 1, 1, 0xb, "CY7C601/605 (v.b)", &module_cypress },
1542 	{ CPU_SUN4M, 1, 1, 1, 0xc, "CY7C601/605 (v.c)", &module_cypress },
1543 	{ CPU_SUN4M, 1, 1, 1, 0xf, "CY7C601/605 (v.f)", &module_cypress },
1544 	{ CPU_SUN4M, 1, 3, 1, ANY, "CY7C611", &module_cypress },
1545 	{ CPU_SUN4M, 1, 0xe, 1, 7, "RT620/625", &module_hypersparc },
1546 	{ CPU_SUN4M, 1, 0xf, 1, 7, "RT620/625", &module_hypersparc },
1547 	{ CPU_SUN4M, 4, 0, 0, ANY, "TMS390Z50 v0 or TMS390Z55", &module_viking },
1548 	{ CPU_SUN4M, 4, 1, 0, ANY, "TMS390Z50 v1", &module_viking },
1549 	{ CPU_SUN4M, 4, 1, 4, ANY, "TMS390S10", &module_ms1 },
1550 	{ CPU_SUN4M, 4, 2, 0, ANY, "TI_MS2", &module_ms2 },
1551 	{ CPU_SUN4M, 4, 3, ANY, ANY, "TI_4_3", &module_viking },
1552 	{ CPU_SUN4M, 4, 4, ANY, ANY, "TI_4_4", &module_viking },
1553 #endif
1554 
1555 	{ ANY, ANY, ANY, ANY, ANY, "Unknown", &module_unknown }
1556 };
1557 
1558 void
1559 getcpuinfo(sc, node)
1560 	struct cpu_info *sc;
1561 	int	node;
1562 {
1563 	struct cpu_conf *mp;
1564 	int i;
1565 	int cpu_impl, cpu_vers;
1566 	int mmu_impl, mmu_vers;
1567 
1568 	/*
1569 	 * Set up main criteria for selection from the CPU configuration
1570 	 * table: the CPU implementation/version fields from the PSR
1571 	 * register, and -- on sun4m machines -- the MMU
1572 	 * implementation/version from the SCR register.
1573 	 */
1574 	if (sc->master) {
1575 		i = getpsr();
1576 		if (node == 0 ||
1577 		    (cpu_impl =
1578 		     PROM_getpropint(node, "psr-implementation", -1)) == -1)
1579 			cpu_impl = IU_IMPL(i);
1580 
1581 		if (node == 0 ||
1582 		    (cpu_vers = PROM_getpropint(node, "psr-version", -1)) == -1)
1583 			cpu_vers = IU_VERS(i);
1584 
1585 		if (CPU_ISSUN4M) {
1586 			i = lda(SRMMU_PCR, ASI_SRMMU);
1587 			if (node == 0 ||
1588 			    (mmu_impl =
1589 			     PROM_getpropint(node, "implementation", -1)) == -1)
1590 				mmu_impl = SRMMU_IMPL(i);
1591 
1592 			if (node == 0 ||
1593 			    (mmu_vers = PROM_getpropint(node, "version", -1)) == -1)
1594 				mmu_vers = SRMMU_VERS(i);
1595 		} else {
1596 			mmu_impl = ANY;
1597 			mmu_vers = ANY;
1598 		}
1599 	} else {
1600 		/*
1601 		 * Get CPU version/implementation from ROM. If not
1602 		 * available, assume same as boot CPU.
1603 		 */
1604 		cpu_impl = PROM_getpropint(node, "psr-implementation", -1);
1605 		if (cpu_impl == -1)
1606 			cpu_impl = cpuinfo.cpu_impl;
1607 		cpu_vers = PROM_getpropint(node, "psr-version", -1);
1608 		if (cpu_vers == -1)
1609 			cpu_vers = cpuinfo.cpu_vers;
1610 
1611 		/* Get MMU version/implementation from ROM always */
1612 		mmu_impl = PROM_getpropint(node, "implementation", -1);
1613 		mmu_vers = PROM_getpropint(node, "version", -1);
1614 	}
1615 
1616 	for (mp = cpu_conf; ; mp++) {
1617 		if (mp->arch != cputyp && mp->arch != ANY)
1618 			continue;
1619 
1620 #define MATCH(x)	(mp->x == x || mp->x == ANY)
1621 		if (!MATCH(cpu_impl) ||
1622 		    !MATCH(cpu_vers) ||
1623 		    !MATCH(mmu_impl) ||
1624 		    !MATCH(mmu_vers))
1625 			continue;
1626 #undef MATCH
1627 
1628 		/*
1629 		 * Got CPU type.
1630 		 */
1631 		sc->cpu_impl = cpu_impl;
1632 		sc->cpu_vers = cpu_vers;
1633 		sc->mmu_impl = mmu_impl;
1634 		sc->mmu_vers = mmu_vers;
1635 
1636 		if (mp->minfo->cpu_match) {
1637 			/* Additional fixups */
1638 			mp->minfo->cpu_match(sc, mp->minfo, node);
1639 		}
1640 		if (sc->cpu_name == 0)
1641 			sc->cpu_name = mp->name;
1642 
1643 		if (sc->mmu_ncontext == 0)
1644 			sc->mmu_ncontext = mp->minfo->ncontext;
1645 
1646 		if (sc->cpu_type == 0)
1647 			sc->cpu_type = mp->minfo->cpu_type;
1648 
1649 		if (sc->cacheinfo.c_vactype == VAC_UNKNOWN)
1650 			sc->cacheinfo.c_vactype = mp->minfo->vactype;
1651 
1652 		mp->minfo->getcacheinfo(sc, node);
1653 
1654 		if (node && sc->hz == 0 && !CPU_ISSUN4/*XXX*/) {
1655 			sc->hz = PROM_getpropint(node, "clock-frequency", 0);
1656 			if (sc->hz == 0) {
1657 				/*
1658 				 * Try to find it in the OpenPROM root...
1659 				 */
1660 				sc->hz = PROM_getpropint(findroot(),
1661 						    "clock-frequency", 0);
1662 			}
1663 		}
1664 
1665 		/*
1666 		 * Copy CPU/MMU/Cache specific routines into cpu_info.
1667 		 */
1668 #define MPCOPY(x)	if (sc->x == 0) sc->x = mp->minfo->x;
1669 		MPCOPY(hotfix);
1670 		MPCOPY(mmu_enable);
1671 		MPCOPY(cache_enable);
1672 		MPCOPY(get_syncflt);
1673 		MPCOPY(get_asyncflt);
1674 		MPCOPY(sp_cache_flush);
1675 		MPCOPY(sp_vcache_flush_page);
1676 		MPCOPY(sp_vcache_flush_segment);
1677 		MPCOPY(sp_vcache_flush_region);
1678 		MPCOPY(sp_vcache_flush_context);
1679 		MPCOPY(pcache_flush_page);
1680 		MPCOPY(pure_vcache_flush);
1681 		MPCOPY(cache_flush_all);
1682 		MPCOPY(memerr);
1683 		MPCOPY(zero_page);
1684 		MPCOPY(copy_page);
1685 #undef MPCOPY
1686 		/*
1687 		 * Use the single-processor cache flush functions until
1688 		 * all CPUs are initialized.
1689 		 */
1690 		sc->cache_flush = sc->sp_cache_flush;
1691 		sc->vcache_flush_page = sc->sp_vcache_flush_page;
1692 		sc->vcache_flush_segment = sc->sp_vcache_flush_segment;
1693 		sc->vcache_flush_region = sc->sp_vcache_flush_region;
1694 		sc->vcache_flush_context = sc->sp_vcache_flush_context;
1695 		return;
1696 	}
1697 	panic("Out of CPUs");
1698 }
1699 
1700 /*
1701  * The following tables convert <IU impl, IU version, FPU version> triples
1702  * into names for the CPU and FPU chip.  In most cases we do not need to
1703  * inspect the FPU version to name the IU chip, but there is one exception
1704  * (for Tsunami), and this makes the tables the same.
1705  *
1706  * The table contents (and much of the structure here) are from Guy Harris.
1707  *
1708  */
1709 struct info {
1710 	int	valid;
1711 	int	iu_impl;
1712 	int	iu_vers;
1713 	int	fpu_vers;
1714 	char	*name;
1715 };
1716 
1717 /* NB: table order matters here; specific numbers must appear before ANY. */
1718 static struct info fpu_types[] = {
1719 	/*
1720 	 * Vendor 0, IU Fujitsu0.
1721 	 */
1722 	{ 1, 0x0, ANY, 0, "MB86910 or WTL1164/5" },
1723 	{ 1, 0x0, ANY, 1, "MB86911 or WTL1164/5" },
1724 	{ 1, 0x0, ANY, 2, "L64802 or ACT8847" },
1725 	{ 1, 0x0, ANY, 3, "WTL3170/2" },
1726 	{ 1, 0x0, 4,   4, "on-chip" },		/* Swift */
1727 	{ 1, 0x0, 5,   5, "on-chip" },		/* TurboSparc */
1728 	{ 1, 0x0, ANY, 4, "L64804" },
1729 
1730 	/*
1731 	 * Vendor 1, IU ROSS0/1 or Pinnacle.
1732 	 */
1733 	{ 1, 0x1, 0xf, 0, "on-chip" },		/* Pinnacle */
1734 	{ 1, 0x1, 0xe, 0, "on-chip" },		/* Hypersparc RT 625/626 */
1735 	{ 1, 0x1, ANY, 0, "L64812 or ACT8847" },
1736 	{ 1, 0x1, ANY, 1, "L64814" },
1737 	{ 1, 0x1, ANY, 2, "TMS390C602A" },
1738 	{ 1, 0x1, ANY, 3, "RT602 or WTL3171" },
1739 
1740 	/*
1741 	 * Vendor 2, IU BIT0.
1742 	 */
1743 	{ 1, 0x2, ANY, 0, "B5010 or B5110/20 or B5210" },
1744 
1745 	/*
1746 	 * Vendor 4, Texas Instruments.
1747 	 */
1748 	{ 1, 0x4, ANY, 0, "on-chip" },		/* Viking */
1749 	{ 1, 0x4, ANY, 4, "on-chip" },		/* Tsunami */
1750 
1751 	/*
1752 	 * Vendor 5, IU Matsushita0.
1753 	 */
1754 	{ 1, 0x5, ANY, 0, "on-chip" },
1755 
1756 	/*
1757 	 * Vendor 9, Weitek.
1758 	 */
1759 	{ 1, 0x9, ANY, 3, "on-chip" },
1760 
1761 	{ 0 }
1762 };
1763 
1764 static char *
1765 fsrtoname(impl, vers, fver)
1766 	int impl, vers, fver;
1767 {
1768 	struct info *p;
1769 
1770 	for (p = fpu_types; p->valid; p++) {
1771 		if (p->iu_impl == impl &&
1772 		    (p->iu_vers == vers || p->iu_vers == ANY) &&
1773 		    (p->fpu_vers == fver))
1774 			return (p->name);
1775 	}
1776 	return (NULL);
1777 }
1778 
1779 #ifdef DDB
1780 
1781 #include <ddb/db_output.h>
1782 #include <machine/db_machdep.h>
1783 
1784 #include "ioconf.h"
1785 
1786 void cpu_debug_dump(void);
1787 
1788 /*
1789  * Dump cpu information from ddb.
1790  */
1791 void
1792 cpu_debug_dump(void)
1793 {
1794 	struct cpu_info *ci;
1795 	CPU_INFO_ITERATOR cii;
1796 
1797 	db_printf("addr		cpuid	flags	curproc		fpproc\n");
1798 	for (CPU_INFO_FOREACH(cii, ci)) {
1799 		db_printf("%p	%d	%x	%10p	%10p\n",
1800 		    ci,
1801 		    ci->ci_cpuid,
1802 		    ci->flags,
1803 		    ci->ci_curproc,
1804 		    ci->fpproc);
1805 	}
1806 }
1807 #endif
1808