1 /*	$NetBSD: cpu.c,v 1.248 2014/07/25 17:21:32 nakayama Exp $ */
2 
3 /*
4  * Copyright (c) 1996
5  *	The President and Fellows of Harvard College. All rights reserved.
6  * Copyright (c) 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This software was developed by the Computer Systems Engineering group
10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11  * contributed to Berkeley.
12  *
13  * All advertising materials mentioning features or use of this software
14  * must display the following acknowledgement:
15  *	This product includes software developed by Harvard University.
16  *	This product includes software developed by the University of
17  *	California, Lawrence Berkeley Laboratory.
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions
21  * are met:
22  *
23  * 1. Redistributions of source code must retain the above copyright
24  *    notice, this list of conditions and the following disclaimer.
25  * 2. Redistributions in binary form must reproduce the above copyright
26  *    notice, this list of conditions and the following disclaimer in the
27  *    documentation and/or other materials provided with the distribution.
28  * 3. All advertising materials mentioning features or use of this software
29  *    must display the following acknowledgement:
30  *	This product includes software developed by Aaron Brown and
31  *	Harvard University.
32  *	This product includes software developed by the University of
33  *	California, Berkeley and its contributors.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  *	@(#)cpu.c	8.5 (Berkeley) 11/23/93
51  *
52  */
53 
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.248 2014/07/25 17:21:32 nakayama Exp $");
56 
57 #include "opt_multiprocessor.h"
58 #include "opt_lockdebug.h"
59 #include "opt_ddb.h"
60 #include "opt_sparc_arch.h"
61 
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/device.h>
65 #include <sys/malloc.h>
66 #include <sys/kernel.h>
67 #include <sys/evcnt.h>
68 #include <sys/xcall.h>
69 #include <sys/ipi.h>
70 #include <sys/cpu.h>
71 
72 #include <uvm/uvm.h>
73 
74 #include <machine/promlib.h>
75 #include <machine/autoconf.h>
76 #include <machine/cpu.h>
77 #include <machine/reg.h>
78 #include <machine/ctlreg.h>
79 #include <machine/trap.h>
80 #include <machine/pcb.h>
81 #include <machine/pmap.h>
82 
83 #if defined(MULTIPROCESSOR) && defined(DDB)
84 #include <ddb/db_output.h>
85 #include <machine/db_machdep.h>
86 #endif
87 
88 #include <sparc/sparc/cache.h>
89 #include <sparc/sparc/asm.h>
90 #include <sparc/sparc/cpuvar.h>
91 #include <sparc/sparc/memreg.h>
92 #if defined(SUN4D)
93 #include <sparc/sparc/cpuunitvar.h>
94 #endif
95 
96 #ifdef DEBUG
97 #ifndef DEBUG_XCALL
98 #define DEBUG_XCALL 0
99 #endif
100 int	debug_xcall = DEBUG_XCALL;
101 #else
102 #define debug_xcall 0
103 #endif
104 
105 struct cpu_softc {
106 	device_t sc_dev;
107 	struct cpu_info	*sc_cpuinfo;
108 };
109 
110 /* The following are used externally (sysctl_hw). */
111 char	machine[] = MACHINE;		/* from <machine/param.h> */
112 char	machine_arch[] = MACHINE_ARCH;	/* from <machine/param.h> */
113 int	cpu_arch;			/* sparc architecture version */
114 extern char machine_model[];
115 
116 int	sparc_ncpus;			/* # of CPUs detected by PROM */
117 struct cpu_info *cpus[_MAXNCPU+1];	/* we only support 4 CPUs. */
118 
119 /* The CPU configuration driver. */
120 static void cpu_mainbus_attach(device_t, device_t, void *);
121 int  cpu_mainbus_match(device_t, cfdata_t, void *);
122 
123 CFATTACH_DECL_NEW(cpu_mainbus, sizeof(struct cpu_softc),
124     cpu_mainbus_match, cpu_mainbus_attach, NULL, NULL);
125 
126 #if defined(SUN4D)
127 static int cpu_cpuunit_match(device_t, cfdata_t, void *);
128 static void cpu_cpuunit_attach(device_t, device_t, void *);
129 
130 CFATTACH_DECL_NEW(cpu_cpuunit, sizeof(struct cpu_softc),
131     cpu_cpuunit_match, cpu_cpuunit_attach, NULL, NULL);
132 #endif /* SUN4D */
133 
134 static void cpu_init_evcnt(struct cpu_info *cpi);
135 static void cpu_attach(struct cpu_softc *, int, int);
136 
137 static const char *fsrtoname(int, int, int);
138 void cache_print(struct cpu_softc *);
139 void cpu_setup(void);
140 void fpu_init(struct cpu_info *);
141 
142 #define	IU_IMPL(psr)	((u_int)(psr) >> 28)
143 #define	IU_VERS(psr)	(((psr) >> 24) & 0xf)
144 
145 #define SRMMU_IMPL(mmusr)	((u_int)(mmusr) >> 28)
146 #define SRMMU_VERS(mmusr)	(((mmusr) >> 24) & 0xf)
147 
148 int bootmid;		/* Module ID of boot CPU */
149 
150 #ifdef notdef
151 /*
152  * IU implementations are parceled out to vendors (with some slight
153  * glitches).  Printing these is cute but takes too much space.
154  */
155 static char *iu_vendor[16] = {
156 	"Fujitsu",	/* and also LSI Logic */
157 	"ROSS",		/* ROSS (ex-Cypress) */
158 	"BIT",
159 	"LSIL",		/* LSI Logic finally got their own */
160 	"TI",		/* Texas Instruments */
161 	"Matsushita",
162 	"Philips",
163 	"Harvest",	/* Harvest VLSI Design Center */
164 	"SPEC",		/* Systems and Processes Engineering Corporation */
165 	"Weitek",
166 	"vendor#10",
167 	"vendor#11",
168 	"vendor#12",
169 	"vendor#13",
170 	"vendor#14",
171 	"vendor#15"
172 };
173 #endif
174 
175 #if defined(MULTIPROCESSOR)
176 u_int	cpu_ready_mask;			/* the set of CPUs marked as READY */
177 void cpu_spinup(struct cpu_info *);
178 static void cpu_attach_non_boot(struct cpu_softc *, struct cpu_info *, int);
179 
180 int go_smp_cpus = 0;	/* non-primary CPUs wait for this to go */
181 
182 /*
183  * This must be locked around all message transactions to ensure only
184  * one CPU is generating them.
185  */
186 static kmutex_t xpmsg_mutex;
187 
188 #endif /* MULTIPROCESSOR */
189 
190 /*
191  * 4/110 comment: the 4/110 chops off the top 4 bits of an OBIO address.
192  *	this confuses autoconf.  for example, if you try and map
193  *	0xfe000000 in obio space on a 4/110 it actually maps 0x0e000000.
194  *	this is easy to verify with the PROM.   this causes problems
195  *	with devices like "esp0 at obio0 addr 0xfa000000" because the
196  *	4/110 treats it as esp0 at obio0 addr 0x0a000000" which is the
197  *	address of the 4/110's "sw0" scsi chip.   the same thing happens
198  *	between zs1 and zs2.    since the sun4 line is "closed" and
199  *	we know all the "obio" devices that will ever be on it we just
200  *	put in some special case "if"'s in the match routines of esp,
201  *	dma, and zs.
202  */
203 
204 int
cpu_mainbus_match(device_t parent,cfdata_t cf,void * aux)205 cpu_mainbus_match(device_t parent, cfdata_t cf, void *aux)
206 {
207 	struct mainbus_attach_args *ma = aux;
208 
209 	return (strcmp(cf->cf_name, ma->ma_name) == 0);
210 }
211 
212 static void
cpu_mainbus_attach(device_t parent,device_t self,void * aux)213 cpu_mainbus_attach(device_t parent, device_t self, void *aux)
214 {
215 	struct mainbus_attach_args *ma = aux;
216 	struct { uint32_t va; uint32_t size; } *mbprop = NULL;
217 	struct openprom_addr *rrp = NULL;
218 	struct cpu_info *cpi;
219 	struct cpu_softc *sc;
220 	int mid, node;
221 	int error, n;
222 
223 	node = ma->ma_node;
224 	mid = (node != 0) ? prom_getpropint(node, "mid", 0) : 0;
225 	sc = device_private(self);
226 	sc->sc_dev = self;
227 	cpu_attach(sc, node, mid);
228 
229 	cpi = sc->sc_cpuinfo;
230 	if (cpi == NULL)
231 		return;
232 
233 	/*
234 	 * Map CPU mailbox if available
235 	 */
236 	if (node != 0 && (error = prom_getprop(node, "mailbox-virtual",
237 					sizeof(*mbprop),
238 					&n, &mbprop)) == 0) {
239 		cpi->mailbox = mbprop->va;
240 		free(mbprop, M_DEVBUF);
241 	} else if (node != 0 && (error = prom_getprop(node, "mailbox",
242 					sizeof(struct openprom_addr),
243 					&n, &rrp)) == 0) {
244 		/* XXX - map cached/uncached? If cached, deal with
245 		 *	 cache congruency!
246 		 */
247 		if (rrp[0].oa_space == 0)
248 			printf("%s: mailbox in mem space\n", device_xname(self));
249 
250 		if (bus_space_map(ma->ma_bustag,
251 				BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base),
252 				rrp[0].oa_size,
253 				BUS_SPACE_MAP_LINEAR,
254 				&cpi->mailbox) != 0)
255 			panic("%s: can't map CPU mailbox", device_xname(self));
256 		free(rrp, M_DEVBUF);
257 	}
258 
259 	/*
260 	 * Map Module Control Space if available
261 	 */
262 	if (cpi->mxcc == 0)
263 		/* We only know what it means on MXCCs */
264 		return;
265 
266 	rrp = NULL;
267 	if (node == 0 || (error = prom_getprop(node, "reg",
268 					sizeof(struct openprom_addr),
269 					&n, &rrp)) != 0)
270 		return;
271 
272 	/* register set #0 is the MBus port register */
273 	if (bus_space_map(ma->ma_bustag,
274 			BUS_ADDR(rrp[0].oa_space, rrp[0].oa_base),
275 			rrp[0].oa_size,
276 			BUS_SPACE_MAP_LINEAR,
277 			&cpi->ci_mbusport) != 0) {
278 		panic("%s: can't map CPU regs", device_xname(self));
279 	}
280 	/* register set #1: MCXX control */
281 	if (bus_space_map(ma->ma_bustag,
282 			BUS_ADDR(rrp[1].oa_space, rrp[1].oa_base),
283 			rrp[1].oa_size,
284 			BUS_SPACE_MAP_LINEAR,
285 			&cpi->ci_mxccregs) != 0) {
286 		panic("%s: can't map CPU regs", device_xname(self));
287 	}
288 	/* register sets #3 and #4 are E$ cache data and tags */
289 
290 	free(rrp, M_DEVBUF);
291 }
292 
293 #if defined(SUN4D)
294 static int
cpu_cpuunit_match(device_t parent,cfdata_t cf,void * aux)295 cpu_cpuunit_match(device_t parent, cfdata_t cf, void *aux)
296 {
297 	struct cpuunit_attach_args *cpua = aux;
298 
299 	return (strcmp(cf->cf_name, cpua->cpua_type) == 0);
300 }
301 
302 static void
cpu_cpuunit_attach(device_t parent,device_t self,void * aux)303 cpu_cpuunit_attach(device_t parent, device_t self, void *aux)
304 {
305 	struct cpuunit_attach_args *cpua = aux;
306 	struct cpu_softc *sc = device_private(self);
307 
308 	sc->sc_dev = self;
309 	cpu_attach(sc, cpua->cpua_node, cpua->cpua_device_id);
310 }
311 #endif /* SUN4D */
312 
313 static const char * const hard_intr_names[] = {
314 	"spur hard",
315 	"lev1 hard",
316 	"lev2 hard",
317 	"lev3 hard",
318 	"lev4 hard",
319 	"lev5 hard",
320 	"lev6 hard",
321 	"lev7 hard",
322 	"lev8 hard",
323 	"lev9 hard",
324 	"clock hard",
325 	"lev11 hard",
326 	"lev12 hard",
327 	"lev13 hard",
328 	"prof hard",
329 	"nmi hard",
330 };
331 
332 static const char * const soft_intr_names[] = {
333 	"spur soft",
334 	"lev1 soft",
335 	"lev2 soft",
336 	"lev3 soft",
337 	"lev4 soft",
338 	"lev5 soft",
339 	"lev6 soft",
340 	"lev7 soft",
341 	"lev8 soft",
342 	"lev9 soft",
343 	"lev10 soft",
344 	"lev11 soft",
345 	"lev12 soft",
346 	"xcall std",
347 	"xcall fast",
348 	"nmi soft",
349 };
350 
351 static void
cpu_init_evcnt(struct cpu_info * cpi)352 cpu_init_evcnt(struct cpu_info *cpi)
353 {
354 	int i;
355 
356 	/*
357 	 * Setup the per-cpu counters.
358 	 *
359 	 * The "savefp null" counter should go away when the NULL
360 	 * struct fpstate * bug is fixed.
361 	 */
362 	evcnt_attach_dynamic(&cpi->ci_savefpstate, EVCNT_TYPE_MISC,
363 			     NULL, cpu_name(cpi), "savefp ipi");
364 	evcnt_attach_dynamic(&cpi->ci_savefpstate_null, EVCNT_TYPE_MISC,
365 			     NULL, cpu_name(cpi), "savefp null ipi");
366 	evcnt_attach_dynamic(&cpi->ci_xpmsg_mutex_fail, EVCNT_TYPE_MISC,
367 			     NULL, cpu_name(cpi), "IPI mutex_trylock fail");
368 	evcnt_attach_dynamic(&cpi->ci_xpmsg_mutex_fail_call, EVCNT_TYPE_MISC,
369 			     NULL, cpu_name(cpi), "IPI mutex_trylock fail/call");
370 
371 	/*
372 	 * These are the per-cpu per-IPL hard & soft interrupt counters.
373 	 */
374 	for (i = 0; i < 16; i++) {
375 		evcnt_attach_dynamic(&cpi->ci_intrcnt[i], EVCNT_TYPE_INTR,
376 				     NULL, cpu_name(cpi), hard_intr_names[i]);
377 		evcnt_attach_dynamic(&cpi->ci_sintrcnt[i], EVCNT_TYPE_INTR,
378 				     NULL, cpu_name(cpi), soft_intr_names[i]);
379 	}
380 }
381 
382 /*
383  * Attach the CPU.
384  * Discover interesting goop about the virtual address cache
385  * (slightly funny place to do it, but this is where it is to be found).
386  */
387 static void
cpu_attach(struct cpu_softc * sc,int node,int mid)388 cpu_attach(struct cpu_softc *sc, int node, int mid)
389 {
390 	char buf[100];
391 	struct cpu_info *cpi;
392 	int idx;
393 	static int cpu_attach_count = 0;
394 
395 	/*
396 	 * The first CPU we're attaching must be the boot CPU.
397 	 * (see autoconf.c and cpuunit.c)
398 	 */
399 	idx = cpu_attach_count++;
400 
401 #if !defined(MULTIPROCESSOR)
402 	if (cpu_attach_count > 1) {
403 		printf(": no SMP support in kernel\n");
404 		return;
405 	}
406 #endif
407 
408 	/*
409 	 * Initialise this cpu's cpu_info.
410 	 */
411 	cpi = sc->sc_cpuinfo = cpus[idx];
412 	getcpuinfo(cpi, node);
413 
414 	cpi->ci_cpuid = idx;
415 	cpi->mid = mid;
416 	cpi->node = node;
417 #ifdef DEBUG
418 	cpi->redzone = (void *)((long)cpi->eintstack + REDSIZE);
419 #endif
420 
421 	if (sparc_ncpus > 1) {
422 		printf(": mid %d", mid);
423 		if (mid == 0 && !CPU_ISSUN4D)
424 			printf(" [WARNING: mid should not be 0]");
425 	}
426 
427 #if defined(MULTIPROCESSOR)
428 	if (cpu_attach_count > 1) {
429 		cpu_attach_non_boot(sc, cpi, node);
430 		cpu_init_evcnt(cpi);
431 		return;
432 	}
433 #endif /* MULTIPROCESSOR */
434 
435 	cpu_init_evcnt(cpi);
436 
437 	/* Stuff to only run on the boot CPU */
438 	cpu_setup();
439 	snprintf(buf, sizeof buf, "%s @ %s MHz, %s FPU",
440 		cpi->cpu_longname, clockfreq(cpi->hz), cpi->fpu_name);
441 	cpu_setmodel("%s (%s)", machine_model, buf);
442 	printf(": %s\n", buf);
443 	cache_print(sc);
444 
445 	cpi->master = 1;
446 	cpi->eintstack = eintstack;
447 
448 	/*
449 	 * If we haven't been able to determine the Id of the
450 	 * boot CPU, set it now. In this case we can only boot
451 	 * from CPU #0 (see also the CPU attach code in autoconf.c)
452 	 */
453 	if (bootmid == 0)
454 		bootmid = mid;
455 }
456 
457 /*
458  * Finish CPU attach.
459  * Must be run by the CPU which is being attached.
460  */
461 void
cpu_setup(void)462 cpu_setup(void)
463 {
464  	if (cpuinfo.hotfix)
465 		(*cpuinfo.hotfix)(&cpuinfo);
466 
467 	/* Initialize FPU */
468 	fpu_init(&cpuinfo);
469 
470 	/* Enable the cache */
471 	cpuinfo.cache_enable();
472 
473 	cpuinfo.flags |= CPUFLG_HATCHED;
474 }
475 
476 #if defined(MULTIPROCESSOR)
477 /*
478  * Perform most of the tasks needed for a non-boot CPU.
479  */
480 static void
cpu_attach_non_boot(struct cpu_softc * sc,struct cpu_info * cpi,int node)481 cpu_attach_non_boot(struct cpu_softc *sc, struct cpu_info *cpi, int node)
482 {
483 	vaddr_t intstack, va;
484 	int error;
485 
486 	/*
487 	 * Arrange interrupt stack.  This cpu will also abuse the bottom
488 	 * half of the interrupt stack before it gets to run its idle LWP.
489 	 */
490 	intstack = uvm_km_alloc(kernel_map, INT_STACK_SIZE, 0, UVM_KMF_WIRED);
491 	if (intstack == 0)
492 		panic("%s: no uspace/intstack", __func__);
493 	cpi->eintstack = (void*)(intstack + INT_STACK_SIZE);
494 
495 	/* Allocate virtual space for pmap page_copy/page_zero */
496 	va = uvm_km_alloc(kernel_map, 2*PAGE_SIZE, 0, UVM_KMF_VAONLY);
497 	if (va == 0)
498 		panic("%s: no virtual space", __func__);
499 
500 	cpi->vpage[0] = (void *)(va + 0);
501 	cpi->vpage[1] = (void *)(va + PAGE_SIZE);
502 
503 	/*
504 	 * Call the MI attach which creates an idle LWP for us.
505 	 */
506 	error = mi_cpu_attach(cpi);
507 	if (error != 0) {
508 		aprint_normal("\n");
509 		aprint_error("%s: mi_cpu_attach failed with %d\n",
510 		    device_xname(sc->sc_dev), error);
511 		return;
512 	}
513 
514 	/*
515 	 * Note: `eintstack' is set in cpu_attach_non_boot() above.
516 	 * The %wim register will be initialized in cpu_hatch().
517 	 */
518 	cpi->ci_curlwp = cpi->ci_data.cpu_idlelwp;
519 	cpi->curpcb = lwp_getpcb(cpi->ci_curlwp);
520 	cpi->curpcb->pcb_wim = 1;
521 
522 	/* for now use the fixed virtual addresses setup in autoconf.c */
523 	cpi->intreg_4m = (struct icr_pi *)
524 		(PI_INTR_VA + (_MAXNBPG * CPU_MID2CPUNO(cpi->mid)));
525 
526 	/* Now start this CPU */
527 	cpu_spinup(cpi);
528 	printf(": %s @ %s MHz, %s FPU\n", cpi->cpu_longname,
529 		clockfreq(cpi->hz), cpi->fpu_name);
530 
531 	cache_print(sc);
532 
533 	/*
534 	 * Now we're on the last CPU to be attaching.
535 	 */
536 	if (sparc_ncpus > 1 && cpi->ci_cpuid == sparc_ncpus - 1) {
537 		CPU_INFO_ITERATOR n;
538 		/*
539 		 * Install MP cache flush functions, unless the
540 		 * single-processor versions are no-ops.
541 		 */
542 		for (CPU_INFO_FOREACH(n, cpi)) {
543 #define SET_CACHE_FUNC(x) \
544 	if (cpi->x != __CONCAT(noop_,x)) cpi->x = __CONCAT(smp_,x)
545 			SET_CACHE_FUNC(vcache_flush_page);
546 			SET_CACHE_FUNC(vcache_flush_segment);
547 			SET_CACHE_FUNC(vcache_flush_region);
548 			SET_CACHE_FUNC(vcache_flush_context);
549 		}
550 	}
551 #undef SET_CACHE_FUNC
552 }
553 
554 /*
555  * Start secondary processors in motion.
556  */
557 void
cpu_boot_secondary_processors(void)558 cpu_boot_secondary_processors(void)
559 {
560 	CPU_INFO_ITERATOR n;
561 	struct cpu_info *cpi;
562 
563 	printf("cpu0: booting secondary processors:");
564 	for (CPU_INFO_FOREACH(n, cpi)) {
565 		if (cpuinfo.mid == cpi->mid ||
566 		    (cpi->flags & CPUFLG_HATCHED) == 0)
567 			continue;
568 
569 		printf(" cpu%d", cpi->ci_cpuid);
570 		cpu_ready_mask |= (1 << n);
571 	}
572 
573 	/* Mark the boot CPU as ready */
574 	cpu_ready_mask |= (1 << 0);
575 
576 	/* Tell the other CPU's to start up.  */
577 	go_smp_cpus = 1;
578 
579 	printf("\n");
580 }
581 
582 /*
583  * Early initialisation, before main().
584  */
585 void
cpu_init_system(void)586 cpu_init_system(void)
587 {
588 
589 	mutex_init(&xpmsg_mutex, MUTEX_SPIN, IPL_SCHED);
590 }
591 
592 /*
593  * Allocate per-CPU data, then start up this CPU using PROM.
594  */
595 void
cpu_spinup(struct cpu_info * cpi)596 cpu_spinup(struct cpu_info *cpi)
597 {
598 	extern void cpu_hatch(void); /* in locore.s */
599 	struct openprom_addr oa;
600 	void *pc;
601 	int n;
602 
603 	pc = (void *)cpu_hatch;
604 
605 	/* Setup CPU-specific MMU tables */
606 	pmap_alloc_cpu(cpi);
607 
608 	cpi->flags &= ~CPUFLG_HATCHED;
609 
610 	/*
611 	 * The physical address of the context table is passed to
612 	 * the PROM in a "physical address descriptor".
613 	 */
614 	oa.oa_space = 0;
615 	oa.oa_base = (uint32_t)cpi->ctx_tbl_pa;
616 	oa.oa_size = cpi->mmu_ncontext * sizeof(cpi->ctx_tbl[0]); /*???*/
617 
618 	/*
619 	 * Flush entire cache here, since the CPU may start with
620 	 * caches off, hence no cache-coherency may be assumed.
621 	 */
622 	cpuinfo.cache_flush_all();
623 	prom_cpustart(cpi->node, &oa, 0, pc);
624 
625 	/*
626 	 * Wait for this CPU to spin up.
627 	 */
628 	for (n = 10000; n != 0; n--) {
629 		cache_flush((void *) __UNVOLATILE(&cpi->flags),
630 			    sizeof(cpi->flags));
631 		if (cpi->flags & CPUFLG_HATCHED)
632 			return;
633 		delay(100);
634 	}
635 	printf("CPU did not spin up\n");
636 }
637 
638 /*
639  * Call a function on some CPUs.  `cpuset' can be set to CPUSET_ALL
640  * to call every CPU, or `1 << cpi->ci_cpuid' for each CPU to call.
641  */
642 void
xcall(xcall_func_t func,xcall_trap_t trap,int arg0,int arg1,int arg2,u_int cpuset)643 xcall(xcall_func_t func, xcall_trap_t trap, int arg0, int arg1, int arg2,
644       u_int cpuset)
645 {
646 	struct cpu_info *cpi;
647 	int n, i, done, callself, mybit;
648 	volatile struct xpmsg_func *p;
649 	u_int pil;
650 	int fasttrap;
651 	int is_noop = func == (xcall_func_t)sparc_noop;
652 	static char errbuf[160];
653 	char *bufp = errbuf;
654 	size_t bufsz = sizeof errbuf, wrsz;
655 
656 	mybit = (1 << cpuinfo.ci_cpuid);
657 	callself = func && (cpuset & mybit) != 0;
658 	cpuset &= ~mybit;
659 
660 	/* Mask any CPUs that are not ready */
661 	cpuset &= cpu_ready_mask;
662 
663 #if 0
664 	mutex_spin_enter(&xpmsg_mutex);
665 #else
666 	/*
667 	 * There's a deadlock potential between multiple CPUs trying
668 	 * to xcall() at the same time, and the thread that loses the
669 	 * race to get xpmsg_lock is at an IPL above the incoming IPI
670 	 * IPL level, so it sits around waiting to take the lock while
671 	 * the other CPU is waiting for this CPU to handle the IPI and
672 	 * mark it as completed.
673 	 *
674 	 * If we fail to get the mutex, and we're at high enough IPL,
675 	 * call xcallintr() if there is a valid msg.tag.
676 	 */
677 	pil = (getpsr() & PSR_PIL) >> 8;
678 
679 	if (cold || pil <= IPL_SCHED)
680 		mutex_spin_enter(&xpmsg_mutex);
681 	else {
682 		/*
683 		 * Warn about xcall at high IPL.
684 		 *
685 		 * XXX This is probably bogus (logging at high IPL),
686 		 * XXX so we don't do it by default.
687 		 */
688 		if (debug_xcall && (void *)func != sparc_noop) {
689 			u_int pc;
690 
691 			__asm("mov %%i7, %0" : "=r" (pc) : );
692 			printf_nolog("%d: xcall %p at lvl %u from 0x%x\n",
693 			    cpu_number(), func, pil, pc);
694 		}
695 
696 		while (mutex_tryenter(&xpmsg_mutex) == 0) {
697 			cpuinfo.ci_xpmsg_mutex_fail.ev_count++;
698 			if (cpuinfo.msg.tag) {
699 				cpuinfo.ci_xpmsg_mutex_fail_call.ev_count++;
700 				xcallintr(xcallintr);
701 			}
702 		}
703 	}
704 #endif
705 
706 	/*
707 	 * Firstly, call each CPU.  We do this so that they might have
708 	 * finished by the time we start looking.
709 	 */
710 	fasttrap = trap != NULL ? 1 : 0;
711 	for (CPU_INFO_FOREACH(n, cpi)) {
712 
713 		/* Note: n == cpi->ci_cpuid */
714 		if ((cpuset & (1 << n)) == 0)
715 			continue;
716 
717 		cpi->msg.tag = XPMSG_FUNC;
718 		cpi->msg.complete = 0;
719 		p = &cpi->msg.u.xpmsg_func;
720 		p->func = func;
721 		p->trap = trap;
722 		p->arg0 = arg0;
723 		p->arg1 = arg1;
724 		p->arg2 = arg2;
725 		/* Fast cross calls use interrupt level 14 */
726 		raise_ipi(cpi,13+fasttrap);/*xcall_cookie->pil*/
727 	}
728 
729 	/*
730 	 * Second, call ourselves.
731 	 */
732 	if (callself)
733 		(*func)(arg0, arg1, arg2);
734 
735 	/*
736 	 * Lastly, start looping, waiting for all CPUs to register that they
737 	 * have completed (bailing if it takes "too long", being loud about
738 	 * this in the process).
739 	 */
740 	done = is_noop;
741 	i = 1000000;	/* time-out, not too long, but still an _AGE_ */
742 	while (!done) {
743 		if (--i < 0) {
744 			wrsz = snprintf(bufp, bufsz,
745 			    "xcall(cpu%d,%p) from %p: couldn't ping cpus:",
746 			    cpu_number(), fasttrap ? trap : func,
747 			    __builtin_return_address(0));
748 			if (wrsz > bufsz)
749 				break;
750 			bufsz -= wrsz;
751 			bufp += wrsz;
752 		}
753 
754 		done = 1;
755 		for (CPU_INFO_FOREACH(n, cpi)) {
756 			if ((cpuset & (1 << n)) == 0)
757 				continue;
758 
759 			if (cpi->msg.complete == 0) {
760 				if (i < 0) {
761 					wrsz = snprintf(bufp, bufsz,
762 							" cpu%d", cpi->ci_cpuid);
763 					if (wrsz > bufsz)
764 						break;
765 					bufsz -= wrsz;
766 					bufp += wrsz;
767 				} else {
768 					done = 0;
769 					break;
770 				}
771 			}
772 		}
773 	}
774 
775 	if (i >= 0 || debug_xcall == 0) {
776 		if (i < 0)
777 			printf_nolog("%s\n", errbuf);
778 		mutex_spin_exit(&xpmsg_mutex);
779 		return;
780 	}
781 
782 	/*
783 	 * Let's make this a hard panic for now, and figure out why it
784 	 * happens.
785 	 *
786 	 * We call mp_pause_cpus() so we can capture their state *now*
787 	 * as opposed to after we've written all the below to the console.
788 	 */
789 #ifdef DDB
790 	mp_pause_cpus_ddb();
791 #else
792 	mp_pause_cpus();
793 #endif
794 	printf_nolog("%s\n", errbuf);
795 	mutex_spin_exit(&xpmsg_mutex);
796 
797 	panic("failed to ping cpus");
798 }
799 
800 /*
801  * MD support for MI xcall(9) interface.
802  */
803 void
xc_send_ipi(struct cpu_info * target)804 xc_send_ipi(struct cpu_info *target)
805 {
806 	u_int cpuset;
807 
808 	KASSERT(kpreempt_disabled());
809 	KASSERT(curcpu() != target);
810 
811 	if (target)
812 		cpuset = 1 << target->ci_cpuid;
813 	else
814 		cpuset = CPUSET_ALL & ~(1 << cpuinfo.ci_cpuid);
815 	XCALL0(xc_ipi_handler, cpuset);
816 }
817 
818 void
cpu_ipi(struct cpu_info * target)819 cpu_ipi(struct cpu_info *target)
820 {
821 	u_int cpuset;
822 
823 	KASSERT(kpreempt_disabled());
824 	KASSERT(curcpu() != target);
825 
826 	if (target)
827 		cpuset = 1 << target->ci_cpuid;
828 	else
829 		cpuset = CPUSET_ALL & ~(1 << cpuinfo.ci_cpuid);
830 	XCALL0(ipi_cpu_handler, cpuset);
831 }
832 
833 /*
834  * Tell all CPUs other than the current one to enter the PROM idle loop.
835  */
836 void
mp_pause_cpus(void)837 mp_pause_cpus(void)
838 {
839 	CPU_INFO_ITERATOR n;
840 	struct cpu_info *cpi;
841 
842 	for (CPU_INFO_FOREACH(n, cpi)) {
843 		if (cpuinfo.mid == cpi->mid ||
844 		    (cpi->flags & CPUFLG_HATCHED) == 0)
845 			continue;
846 
847 		/*
848 		 * This PROM utility will put the OPENPROM_MBX_ABORT
849 		 * message (0xfc) in the target CPU's mailbox and then
850 		 * send it a level 15 soft interrupt.
851 		 */
852 		if (prom_cpuidle(cpi->node) != 0)
853 			printf("cpu%d could not be paused\n", cpi->ci_cpuid);
854 	}
855 }
856 
857 /*
858  * Resume all idling CPUs.
859  */
860 void
mp_resume_cpus(void)861 mp_resume_cpus(void)
862 {
863 	CPU_INFO_ITERATOR n;
864 	struct cpu_info *cpi;
865 
866 	for (CPU_INFO_FOREACH(n, cpi)) {
867 		if (cpuinfo.mid == cpi->mid ||
868 		    (cpi->flags & CPUFLG_HATCHED) == 0)
869 			continue;
870 
871 		/*
872 		 * This PROM utility makes the target CPU return
873 		 * from its prom_cpuidle(0) call (see intr.c:nmi_soft()).
874 		 */
875 		if (prom_cpuresume(cpi->node) != 0)
876 			printf("cpu%d could not be resumed\n", cpi->ci_cpuid);
877 	}
878 }
879 
880 /*
881  * Tell all CPUs except the current one to hurry back into the prom
882  */
883 void
mp_halt_cpus(void)884 mp_halt_cpus(void)
885 {
886 	CPU_INFO_ITERATOR n;
887 	struct cpu_info *cpi;
888 
889 	for (CPU_INFO_FOREACH(n, cpi)) {
890 		int r;
891 
892 		if (cpuinfo.mid == cpi->mid)
893 			continue;
894 
895 		/*
896 		 * This PROM utility will put the OPENPROM_MBX_STOP
897 		 * message (0xfb) in the target CPU's mailbox and then
898 		 * send it a level 15 soft interrupt.
899 		 */
900 		r = prom_cpustop(cpi->node);
901 		printf("cpu%d %shalted\n", cpi->ci_cpuid,
902 			r == 0 ? "" : "(boot CPU?) can not be ");
903 	}
904 }
905 
906 #if defined(DDB)
907 void
mp_pause_cpus_ddb(void)908 mp_pause_cpus_ddb(void)
909 {
910 	CPU_INFO_ITERATOR n;
911 	struct cpu_info *cpi;
912 
913 	for (CPU_INFO_FOREACH(n, cpi)) {
914 		if (cpi == NULL || cpi->mid == cpuinfo.mid ||
915 		    (cpi->flags & CPUFLG_HATCHED) == 0)
916 			continue;
917 
918 		cpi->msg_lev15.tag = XPMSG15_PAUSECPU;
919 		raise_ipi(cpi,15);	/* high priority intr */
920 	}
921 }
922 
923 void
mp_resume_cpus_ddb(void)924 mp_resume_cpus_ddb(void)
925 {
926 	CPU_INFO_ITERATOR n;
927 	struct cpu_info *cpi;
928 
929 	for (CPU_INFO_FOREACH(n, cpi)) {
930 		if (cpi == NULL || cpuinfo.mid == cpi->mid ||
931 		    (cpi->flags & CPUFLG_PAUSED) == 0)
932 			continue;
933 
934 		/* tell it to continue */
935 		cpi->flags &= ~CPUFLG_PAUSED;
936 	}
937 }
938 #endif /* DDB */
939 #endif /* MULTIPROCESSOR */
940 
941 /*
942  * fpu_init() must be run on associated CPU.
943  */
944 void
fpu_init(struct cpu_info * sc)945 fpu_init(struct cpu_info *sc)
946 {
947 	struct fpstate fpstate;
948 	int fpuvers;
949 
950 	/*
951 	 * Get the FSR and clear any exceptions.  If we do not unload
952 	 * the queue here and it is left over from a previous crash, we
953 	 * will panic in the first loadfpstate(), due to a sequence
954 	 * error, so we need to dump the whole state anyway.
955 	 *
956 	 * If there is no FPU, trap.c will advance over all the stores,
957 	 * so we initialize fs_fsr here.
958 	 */
959 
960 	/* 7 is reserved for "none" */
961 	fpstate.fs_fsr = 7 << FSR_VER_SHIFT;
962 	savefpstate(&fpstate);
963 	sc->fpuvers = fpuvers =
964 		(fpstate.fs_fsr >> FSR_VER_SHIFT) & (FSR_VER >> FSR_VER_SHIFT);
965 
966 	if (fpuvers == 7) {
967 		sc->fpu_name = "no";
968 		return;
969 	}
970 
971 	sc->fpupresent = 1;
972 	sc->fpu_name = fsrtoname(sc->cpu_impl, sc->cpu_vers, fpuvers);
973 	if (sc->fpu_name == NULL) {
974 		snprintf(sc->fpu_namebuf, sizeof(sc->fpu_namebuf),
975 		    "version 0x%x", fpuvers);
976 		sc->fpu_name = sc->fpu_namebuf;
977 	}
978 }
979 
980 void
cache_print(struct cpu_softc * sc)981 cache_print(struct cpu_softc *sc)
982 {
983 	struct cacheinfo *ci = &sc->sc_cpuinfo->cacheinfo;
984 
985 	if (sc->sc_cpuinfo->flags & CPUFLG_SUN4CACHEBUG)
986 		printf("%s: cache chip bug; trap page uncached\n",
987 		    device_xname(sc->sc_dev));
988 
989 	printf("%s: ", device_xname(sc->sc_dev));
990 
991 	if (ci->c_totalsize == 0) {
992 		printf("no cache\n");
993 		return;
994 	}
995 
996 	if (ci->c_split) {
997 		const char *sep = "";
998 
999 		printf("%s", (ci->c_physical ? "physical " : ""));
1000 		if (ci->ic_totalsize > 0) {
1001 			printf("%s%dK instruction (%d b/l)", sep,
1002 			    ci->ic_totalsize/1024, ci->ic_linesize);
1003 			sep = ", ";
1004 		}
1005 		if (ci->dc_totalsize > 0) {
1006 			printf("%s%dK data (%d b/l)", sep,
1007 			    ci->dc_totalsize/1024, ci->dc_linesize);
1008 		}
1009 	} else if (ci->c_physical) {
1010 		/* combined, physical */
1011 		printf("physical %dK combined cache (%d bytes/line)",
1012 		    ci->c_totalsize/1024, ci->c_linesize);
1013 	} else {
1014 		/* combined, virtual */
1015 		printf("%dK byte write-%s, %d bytes/line, %cw flush",
1016 		    ci->c_totalsize/1024,
1017 		    (ci->c_vactype == VAC_WRITETHROUGH) ? "through" : "back",
1018 		    ci->c_linesize,
1019 		    ci->c_hwflush ? 'h' : 's');
1020 	}
1021 
1022 	if (ci->ec_totalsize > 0) {
1023 		printf(", %dK external (%d b/l)",
1024 		    ci->ec_totalsize/1024, ci->ec_linesize);
1025 	}
1026 	printf(": ");
1027 	if (ci->c_enabled)
1028 		printf("cache enabled");
1029 	printf("\n");
1030 }
1031 
1032 
1033 /*------------*/
1034 
1035 
1036 void cpumatch_unknown(struct cpu_info *, struct module_info *, int);
1037 void cpumatch_sun4(struct cpu_info *, struct module_info *, int);
1038 void cpumatch_sun4c(struct cpu_info *, struct module_info *, int);
1039 void cpumatch_ms1(struct cpu_info *, struct module_info *, int);
1040 void cpumatch_viking(struct cpu_info *, struct module_info *, int);
1041 void cpumatch_hypersparc(struct cpu_info *, struct module_info *, int);
1042 void cpumatch_turbosparc(struct cpu_info *, struct module_info *, int);
1043 
1044 void getcacheinfo_sun4(struct cpu_info *, int node);
1045 void getcacheinfo_sun4c(struct cpu_info *, int node);
1046 void getcacheinfo_obp(struct cpu_info *, int node);
1047 void getcacheinfo_sun4d(struct cpu_info *, int node);
1048 
1049 void sun4_hotfix(struct cpu_info *);
1050 void viking_hotfix(struct cpu_info *);
1051 void turbosparc_hotfix(struct cpu_info *);
1052 void swift_hotfix(struct cpu_info *);
1053 
1054 void ms1_mmu_enable(void);
1055 void viking_mmu_enable(void);
1056 void swift_mmu_enable(void);
1057 void hypersparc_mmu_enable(void);
1058 
1059 void srmmu_get_syncflt(void);
1060 void ms1_get_syncflt(void);
1061 void viking_get_syncflt(void);
1062 void swift_get_syncflt(void);
1063 void turbosparc_get_syncflt(void);
1064 void hypersparc_get_syncflt(void);
1065 void cypress_get_syncflt(void);
1066 
1067 int srmmu_get_asyncflt(u_int *, u_int *);
1068 int hypersparc_get_asyncflt(u_int *, u_int *);
1069 int cypress_get_asyncflt(u_int *, u_int *);
1070 int no_asyncflt_regs(u_int *, u_int *);
1071 
1072 int hypersparc_getmid(void);
1073 /* cypress and hypersparc can share this function, see ctlreg.h */
1074 #define cypress_getmid	hypersparc_getmid
1075 int viking_getmid(void);
1076 
1077 #if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D)
1078 extern int (*moduleerr_handler)(void);
1079 int viking_module_error(void);
1080 #endif
1081 
1082 struct module_info module_unknown = {
1083 	CPUTYP_UNKNOWN,
1084 	VAC_UNKNOWN,
1085 	cpumatch_unknown
1086 };
1087 
1088 
1089 void
cpumatch_unknown(struct cpu_info * sc,struct module_info * mp,int node)1090 cpumatch_unknown(struct cpu_info *sc, struct module_info *mp, int node)
1091 {
1092 
1093 	panic("Unknown CPU type: "
1094 	      "cpu: impl %d, vers %d; mmu: impl %d, vers %d",
1095 		sc->cpu_impl, sc->cpu_vers,
1096 		sc->mmu_impl, sc->mmu_vers);
1097 }
1098 
1099 #if defined(SUN4)
1100 struct module_info module_sun4 = {
1101 	CPUTYP_UNKNOWN,
1102 	VAC_WRITETHROUGH,
1103 	cpumatch_sun4,
1104 	getcacheinfo_sun4,
1105 	sun4_hotfix,
1106 	0,
1107 	sun4_cache_enable,
1108 	0,
1109 	0,			/* ncontext set in `match' function */
1110 	0,			/* get_syncflt(); unused in sun4c */
1111 	0,			/* get_asyncflt(); unused in sun4c */
1112 	sun4_cache_flush,
1113 	sun4_vcache_flush_page, NULL,
1114 	sun4_vcache_flush_segment, NULL,
1115 	sun4_vcache_flush_region, NULL,
1116 	sun4_vcache_flush_context, NULL,
1117 	NULL, NULL,
1118 	noop_pcache_flush_page,
1119 	noop_pure_vcache_flush,
1120 	noop_cache_flush_all,
1121 	0,
1122 	pmap_zero_page4_4c,
1123 	pmap_copy_page4_4c
1124 };
1125 
1126 void
getcacheinfo_sun4(struct cpu_info * sc,int node)1127 getcacheinfo_sun4(struct cpu_info *sc, int node)
1128 {
1129 	struct cacheinfo *ci = &sc->cacheinfo;
1130 
1131 	switch (sc->cpu_type) {
1132 	case CPUTYP_4_100:
1133 		ci->c_vactype = VAC_NONE;
1134 		ci->c_totalsize = 0;
1135 		ci->c_hwflush = 0;
1136 		ci->c_linesize = 0;
1137 		ci->c_l2linesize = 0;
1138 		ci->c_split = 0;
1139 		ci->c_nlines = 0;
1140 
1141 		/* Override cache flush functions */
1142 		sc->cache_flush = noop_cache_flush;
1143 		sc->sp_vcache_flush_page = noop_vcache_flush_page;
1144 		sc->sp_vcache_flush_segment = noop_vcache_flush_segment;
1145 		sc->sp_vcache_flush_region = noop_vcache_flush_region;
1146 		sc->sp_vcache_flush_context = noop_vcache_flush_context;
1147 		break;
1148 	case CPUTYP_4_200:
1149 		ci->c_vactype = VAC_WRITEBACK;
1150 		ci->c_totalsize = 128*1024;
1151 		ci->c_hwflush = 0;
1152 		ci->c_linesize = 16;
1153 		ci->c_l2linesize = 4;
1154 		ci->c_split = 0;
1155 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1156 		break;
1157 	case CPUTYP_4_300:
1158 		ci->c_vactype = VAC_WRITEBACK;
1159 		ci->c_totalsize = 128*1024;
1160 		ci->c_hwflush = 0;
1161 		ci->c_linesize = 16;
1162 		ci->c_l2linesize = 4;
1163 		ci->c_split = 0;
1164 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1165 		sc->flags |= CPUFLG_SUN4CACHEBUG;
1166 		break;
1167 	case CPUTYP_4_400:
1168 		ci->c_vactype = VAC_WRITEBACK;
1169 		ci->c_totalsize = 128 * 1024;
1170 		ci->c_hwflush = 0;
1171 		ci->c_linesize = 32;
1172 		ci->c_l2linesize = 5;
1173 		ci->c_split = 0;
1174 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1175 		break;
1176 	}
1177 }
1178 
1179 void
cpumatch_sun4(struct cpu_info * sc,struct module_info * mp,int node)1180 cpumatch_sun4(struct cpu_info *sc, struct module_info *mp, int node)
1181 {
1182 	struct idprom *idp = prom_getidprom();
1183 
1184 	switch (idp->idp_machtype) {
1185 	case ID_SUN4_100:
1186 		sc->cpu_type = CPUTYP_4_100;
1187 		sc->classlvl = 100;
1188 		sc->mmu_ncontext = 8;
1189 		sc->mmu_nsegment = 256;
1190 /*XXX*/		sc->hz = 14280000;
1191 		break;
1192 	case ID_SUN4_200:
1193 		sc->cpu_type = CPUTYP_4_200;
1194 		sc->classlvl = 200;
1195 		sc->mmu_nsegment = 512;
1196 		sc->mmu_ncontext = 16;
1197 /*XXX*/		sc->hz = 16670000;
1198 		break;
1199 	case ID_SUN4_300:
1200 		sc->cpu_type = CPUTYP_4_300;
1201 		sc->classlvl = 300;
1202 		sc->mmu_nsegment = 256;
1203 		sc->mmu_ncontext = 16;
1204 /*XXX*/		sc->hz = 25000000;
1205 		break;
1206 	case ID_SUN4_400:
1207 		sc->cpu_type = CPUTYP_4_400;
1208 		sc->classlvl = 400;
1209 		sc->mmu_nsegment = 1024;
1210 		sc->mmu_ncontext = 64;
1211 		sc->mmu_nregion = 256;
1212 /*XXX*/		sc->hz = 33000000;
1213 		sc->sun4_mmu3l = 1;
1214 		break;
1215 	}
1216 
1217 }
1218 #endif /* SUN4 */
1219 
1220 #if defined(SUN4C)
1221 struct module_info module_sun4c = {
1222 	CPUTYP_UNKNOWN,
1223 	VAC_WRITETHROUGH,
1224 	cpumatch_sun4c,
1225 	getcacheinfo_sun4c,
1226 	sun4_hotfix,
1227 	0,
1228 	sun4_cache_enable,
1229 	0,
1230 	0,			/* ncontext set in `match' function */
1231 	0,			/* get_syncflt(); unused in sun4c */
1232 	0,			/* get_asyncflt(); unused in sun4c */
1233 	sun4_cache_flush,
1234 	sun4_vcache_flush_page, NULL,
1235 	sun4_vcache_flush_segment, NULL,
1236 	sun4_vcache_flush_region, NULL,
1237 	sun4_vcache_flush_context, NULL,
1238 	NULL, NULL,
1239 	noop_pcache_flush_page,
1240 	noop_pure_vcache_flush,
1241 	noop_cache_flush_all,
1242 	0,
1243 	pmap_zero_page4_4c,
1244 	pmap_copy_page4_4c
1245 };
1246 
1247 void
cpumatch_sun4c(struct cpu_info * sc,struct module_info * mp,int node)1248 cpumatch_sun4c(struct cpu_info *sc, struct module_info *mp, int node)
1249 {
1250 	int	rnode;
1251 
1252 	rnode = findroot();
1253 	sc->mmu_npmeg = sc->mmu_nsegment =
1254 		prom_getpropint(rnode, "mmu-npmg", 128);
1255 	sc->mmu_ncontext = prom_getpropint(rnode, "mmu-nctx", 8);
1256 
1257 	/* Get clock frequency */
1258 	sc->hz = prom_getpropint(rnode, "clock-frequency", 0);
1259 }
1260 
1261 void
getcacheinfo_sun4c(struct cpu_info * sc,int node)1262 getcacheinfo_sun4c(struct cpu_info *sc, int node)
1263 {
1264 	struct cacheinfo *ci = &sc->cacheinfo;
1265 	int i, l;
1266 
1267 	if (node == 0)
1268 		/* Bootstrapping */
1269 		return;
1270 
1271 	/* Sun4c's have only virtually-addressed caches */
1272 	ci->c_physical = 0;
1273 	ci->c_totalsize = prom_getpropint(node, "vac-size", 65536);
1274 	/*
1275 	 * Note: vac-hwflush is spelled with an underscore
1276 	 * on the 4/75s.
1277 	 */
1278 	ci->c_hwflush =
1279 		prom_getpropint(node, "vac_hwflush", 0) |
1280 		prom_getpropint(node, "vac-hwflush", 0);
1281 
1282 	ci->c_linesize = l = prom_getpropint(node, "vac-linesize", 16);
1283 	for (i = 0; (1 << i) < l; i++)
1284 		/* void */;
1285 	if ((1 << i) != l)
1286 		panic("bad cache line size %d", l);
1287 	ci->c_l2linesize = i;
1288 	ci->c_associativity = 1;
1289 	ci->c_nlines = ci->c_totalsize >> i;
1290 
1291 	ci->c_vactype = VAC_WRITETHROUGH;
1292 
1293 	/*
1294 	 * Machines with "buserr-type" 1 have a bug in the cache
1295 	 * chip that affects traps.  (I wish I knew more about this
1296 	 * mysterious buserr-type variable....)
1297 	 */
1298 	if (prom_getpropint(node, "buserr-type", 0) == 1)
1299 		sc->flags |= CPUFLG_SUN4CACHEBUG;
1300 }
1301 #endif /* SUN4C */
1302 
1303 void
sun4_hotfix(struct cpu_info * sc)1304 sun4_hotfix(struct cpu_info *sc)
1305 {
1306 
1307 	if ((sc->flags & CPUFLG_SUN4CACHEBUG) != 0)
1308 		kvm_uncache((char *)trapbase, 1);
1309 
1310 	/* Use the hardware-assisted page flush routine, if present */
1311 	if (sc->cacheinfo.c_hwflush)
1312 		sc->vcache_flush_page = sun4_vcache_flush_page_hw;
1313 }
1314 
1315 #if defined(SUN4M)
1316 void
getcacheinfo_obp(struct cpu_info * sc,int node)1317 getcacheinfo_obp(struct cpu_info *sc, int node)
1318 {
1319 	struct cacheinfo *ci = &sc->cacheinfo;
1320 	int i, l;
1321 
1322 #if defined(MULTIPROCESSOR)
1323 	/*
1324 	 * We really really want the cache info early for MP systems,
1325 	 * so figure out the boot node, if we can.
1326 	 *
1327 	 * XXX this loop stolen from mainbus_attach()
1328 	 */
1329 	if (node == 0 && CPU_ISSUN4M && bootmid != 0) {
1330 		const char *cp;
1331 		char namebuf[32];
1332 		int mid, node2;
1333 
1334 		for (node2 = firstchild(findroot());
1335 		     node2;
1336 		     node2 = nextsibling(node2)) {
1337 			cp = prom_getpropstringA(node2, "device_type",
1338 					    namebuf, sizeof namebuf);
1339 			if (strcmp(cp, "cpu") != 0)
1340 				continue;
1341 
1342 			mid = prom_getpropint(node2, "mid", -1);
1343 			if (mid == bootmid) {
1344 				node = node2;
1345 				break;
1346 			}
1347 		}
1348 	}
1349 #endif
1350 
1351 	if (node == 0)
1352 		/* Bootstrapping */
1353 		return;
1354 
1355 	/*
1356 	 * Determine the Sun4m cache organization.
1357 	 */
1358 	ci->c_physical = node_has_property(node, "cache-physical?");
1359 
1360 	if (prom_getpropint(node, "ncaches", 1) == 2)
1361 		ci->c_split = 1;
1362 	else
1363 		ci->c_split = 0;
1364 
1365 	/* hwflush is used only by sun4/4c code */
1366 	ci->c_hwflush = 0;
1367 
1368 	if (node_has_property(node, "icache-nlines") &&
1369 	    node_has_property(node, "dcache-nlines") &&
1370 	    ci->c_split) {
1371 		/* Harvard architecture: get I and D cache sizes */
1372 		ci->ic_nlines = prom_getpropint(node, "icache-nlines", 0);
1373 		ci->ic_linesize = l =
1374 			prom_getpropint(node, "icache-line-size", 0);
1375 		for (i = 0; (1 << i) < l && l; i++)
1376 			/* void */;
1377 		if ((1 << i) != l && l)
1378 			panic("bad icache line size %d", l);
1379 		ci->ic_l2linesize = i;
1380 		ci->ic_associativity =
1381 			prom_getpropint(node, "icache-associativity", 1);
1382 		ci->ic_totalsize = l * ci->ic_nlines * ci->ic_associativity;
1383 
1384 		ci->dc_nlines = prom_getpropint(node, "dcache-nlines", 0);
1385 		ci->dc_linesize = l =
1386 			prom_getpropint(node, "dcache-line-size",0);
1387 		for (i = 0; (1 << i) < l && l; i++)
1388 			/* void */;
1389 		if ((1 << i) != l && l)
1390 			panic("bad dcache line size %d", l);
1391 		ci->dc_l2linesize = i;
1392 		ci->dc_associativity =
1393 			prom_getpropint(node, "dcache-associativity", 1);
1394 		ci->dc_totalsize = l * ci->dc_nlines * ci->dc_associativity;
1395 
1396 		ci->c_l2linesize = min(ci->ic_l2linesize, ci->dc_l2linesize);
1397 		ci->c_linesize = min(ci->ic_linesize, ci->dc_linesize);
1398 		ci->c_totalsize = max(ci->ic_totalsize, ci->dc_totalsize);
1399 		ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1400 	} else {
1401 		/* unified I/D cache */
1402 		ci->c_nlines = prom_getpropint(node, "cache-nlines", 128);
1403 		ci->c_linesize = l =
1404 			prom_getpropint(node, "cache-line-size", 0);
1405 		for (i = 0; (1 << i) < l && l; i++)
1406 			/* void */;
1407 		if ((1 << i) != l && l)
1408 			panic("bad cache line size %d", l);
1409 		ci->c_l2linesize = i;
1410 		ci->c_associativity =
1411 			prom_getpropint(node, "cache-associativity", 1);
1412 		ci->dc_associativity = ci->ic_associativity =
1413 			ci->c_associativity;
1414 		ci->c_totalsize = l * ci->c_nlines * ci->c_associativity;
1415 	}
1416 
1417 	if (node_has_property(node, "ecache-nlines")) {
1418 		/* we have a L2 "e"xternal cache */
1419 		ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768);
1420 		ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0);
1421 		for (i = 0; (1 << i) < l && l; i++)
1422 			/* void */;
1423 		if ((1 << i) != l && l)
1424 			panic("bad ecache line size %d", l);
1425 		ci->ec_l2linesize = i;
1426 		ci->ec_associativity =
1427 			prom_getpropint(node, "ecache-associativity", 1);
1428 		ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity;
1429 	}
1430 	if (ci->c_totalsize == 0)
1431 		printf("warning: couldn't identify cache\n");
1432 }
1433 
1434 /*
1435  * We use the max. number of contexts on the micro and
1436  * hyper SPARCs. The SuperSPARC would let us use up to 65536
1437  * contexts (by powers of 2), but we keep it at 4096 since
1438  * the table must be aligned to #context*4. With 4K contexts,
1439  * we waste at most 16K of memory. Note that the context
1440  * table is *always* page-aligned, so there can always be
1441  * 1024 contexts without sacrificing memory space (given
1442  * that the chip supports 1024 contexts).
1443  *
1444  * Currently known limits: MS1=64, MS2=256, HS=4096, SS=65536
1445  * 	some old SS's=4096
1446  */
1447 
1448 /* TI Microsparc I */
1449 struct module_info module_ms1 = {
1450 	CPUTYP_MS1,
1451 	VAC_NONE,
1452 	cpumatch_ms1,
1453 	getcacheinfo_obp,
1454 	0,
1455 	ms1_mmu_enable,
1456 	ms1_cache_enable,
1457 	0,
1458 	64,
1459 	ms1_get_syncflt,
1460 	no_asyncflt_regs,
1461 	ms1_cache_flush,
1462 	noop_vcache_flush_page, NULL,
1463 	noop_vcache_flush_segment, NULL,
1464 	noop_vcache_flush_region, NULL,
1465 	noop_vcache_flush_context, NULL,
1466 	noop_vcache_flush_range, NULL,
1467 	noop_pcache_flush_page,
1468 	noop_pure_vcache_flush,
1469 	ms1_cache_flush_all,
1470 	memerr4m,
1471 	pmap_zero_page4m,
1472 	pmap_copy_page4m
1473 };
1474 
1475 void
cpumatch_ms1(struct cpu_info * sc,struct module_info * mp,int node)1476 cpumatch_ms1(struct cpu_info *sc, struct module_info *mp, int node)
1477 {
1478 
1479 	/*
1480 	 * Turn off page zeroing in the idle loop; an unidentified
1481 	 * bug causes (very sporadic) user process corruption.
1482 	 */
1483 	vm_page_zero_enable = 0;
1484 }
1485 
1486 void
ms1_mmu_enable(void)1487 ms1_mmu_enable(void)
1488 {
1489 }
1490 
1491 /* TI Microsparc II */
1492 struct module_info module_ms2 = {		/* UNTESTED */
1493 	CPUTYP_MS2,
1494 	VAC_WRITETHROUGH,
1495 	0,
1496 	getcacheinfo_obp,
1497 	0,
1498 	0,
1499 	swift_cache_enable,
1500 	0,
1501 	256,
1502 	srmmu_get_syncflt,
1503 	srmmu_get_asyncflt,
1504 	srmmu_cache_flush,
1505 	srmmu_vcache_flush_page, NULL,
1506 	srmmu_vcache_flush_segment, NULL,
1507 	srmmu_vcache_flush_region, NULL,
1508 	srmmu_vcache_flush_context, NULL,
1509 	srmmu_vcache_flush_range, NULL,
1510 	noop_pcache_flush_page,
1511 	noop_pure_vcache_flush,
1512 	srmmu_cache_flush_all,
1513 	memerr4m,
1514 	pmap_zero_page4m,
1515 	pmap_copy_page4m
1516 };
1517 
1518 
1519 struct module_info module_swift = {
1520 	CPUTYP_MS2,
1521 	VAC_WRITETHROUGH,
1522 	0,
1523 	getcacheinfo_obp,
1524 	swift_hotfix,
1525 	0,
1526 	swift_cache_enable,
1527 	0,
1528 	256,
1529 	swift_get_syncflt,
1530 	no_asyncflt_regs,
1531 	srmmu_cache_flush,
1532 	srmmu_vcache_flush_page, NULL,
1533 	srmmu_vcache_flush_segment, NULL,
1534 	srmmu_vcache_flush_region, NULL,
1535 	srmmu_vcache_flush_context, NULL,
1536 	srmmu_vcache_flush_range, NULL,
1537 	noop_pcache_flush_page,
1538 	noop_pure_vcache_flush,
1539 	srmmu_cache_flush_all,
1540 	memerr4m,
1541 	pmap_zero_page4m,
1542 	pmap_copy_page4m
1543 };
1544 
1545 void
swift_hotfix(struct cpu_info * sc)1546 swift_hotfix(struct cpu_info *sc)
1547 {
1548 	int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1549 
1550 	/* Turn off branch prediction */
1551 	pcr &= ~SWIFT_PCR_BF;
1552 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1553 }
1554 
1555 void
swift_mmu_enable(void)1556 swift_mmu_enable(void)
1557 {
1558 }
1559 
1560 
1561 /* ROSS Hypersparc */
1562 struct module_info module_hypersparc = {
1563 	CPUTYP_UNKNOWN,
1564 	VAC_WRITEBACK,
1565 	cpumatch_hypersparc,
1566 	getcacheinfo_obp,
1567 	0,
1568 	hypersparc_mmu_enable,
1569 	hypersparc_cache_enable,
1570 	hypersparc_getmid,
1571 	4096,
1572 	hypersparc_get_syncflt,
1573 	hypersparc_get_asyncflt,
1574 	srmmu_cache_flush,
1575 	srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page,
1576 	srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
1577 	srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
1578 	srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
1579 	srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
1580 	noop_pcache_flush_page,
1581 	hypersparc_pure_vcache_flush,
1582 	hypersparc_cache_flush_all,
1583 	hypersparc_memerr,
1584 	pmap_zero_page4m,
1585 	pmap_copy_page4m
1586 };
1587 
1588 void
cpumatch_hypersparc(struct cpu_info * sc,struct module_info * mp,int node)1589 cpumatch_hypersparc(struct cpu_info *sc, struct module_info *mp, int node)
1590 {
1591 
1592 	sc->cpu_type = CPUTYP_HS_MBUS;/*XXX*/
1593 
1594 	if (node == 0) {
1595 		/* Flush I-cache */
1596 		sta(0, ASI_HICACHECLR, 0);
1597 
1598 		/* Disable `unimplemented flush' traps during boot-up */
1599 		wrasr(rdasr(HYPERSPARC_ASRNUM_ICCR) | HYPERSPARC_ICCR_FTD,
1600 			HYPERSPARC_ASRNUM_ICCR);
1601 	}
1602 }
1603 
1604 void
hypersparc_mmu_enable(void)1605 hypersparc_mmu_enable(void)
1606 {
1607 #if 0
1608 	int pcr;
1609 
1610 	pcr = lda(SRMMU_PCR, ASI_SRMMU);
1611 	pcr |= HYPERSPARC_PCR_C;
1612 	pcr &= ~HYPERSPARC_PCR_CE;
1613 
1614 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1615 #endif
1616 }
1617 
1618 int
hypersparc_getmid(void)1619 hypersparc_getmid(void)
1620 {
1621 	u_int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1622 	return ((pcr & HYPERSPARC_PCR_MID) >> 15);
1623 }
1624 
1625 
1626 /* Cypress 605 */
1627 struct module_info module_cypress = {
1628 	CPUTYP_CYPRESS,
1629 	VAC_WRITEBACK,
1630 	0,
1631 	getcacheinfo_obp,
1632 	0,
1633 	0,
1634 	cypress_cache_enable,
1635 	cypress_getmid,
1636 	4096,
1637 	cypress_get_syncflt,
1638 	cypress_get_asyncflt,
1639 	srmmu_cache_flush,
1640 	srmmu_vcache_flush_page, ft_srmmu_vcache_flush_page,
1641 	srmmu_vcache_flush_segment, ft_srmmu_vcache_flush_segment,
1642 	srmmu_vcache_flush_region, ft_srmmu_vcache_flush_region,
1643 	srmmu_vcache_flush_context, ft_srmmu_vcache_flush_context,
1644 	srmmu_vcache_flush_range, ft_srmmu_vcache_flush_range,
1645 	noop_pcache_flush_page,
1646 	noop_pure_vcache_flush,
1647 	cypress_cache_flush_all,
1648 	memerr4m,
1649 	pmap_zero_page4m,
1650 	pmap_copy_page4m
1651 };
1652 
1653 
1654 /* Fujitsu Turbosparc */
1655 struct module_info module_turbosparc = {
1656 	CPUTYP_MS2,
1657 	VAC_WRITEBACK,
1658 	cpumatch_turbosparc,
1659 	getcacheinfo_obp,
1660 	turbosparc_hotfix,
1661 	0,
1662 	turbosparc_cache_enable,
1663 	0,
1664 	256,
1665 	turbosparc_get_syncflt,
1666 	no_asyncflt_regs,
1667 	srmmu_cache_flush,
1668 	srmmu_vcache_flush_page, NULL,
1669 	srmmu_vcache_flush_segment, NULL,
1670 	srmmu_vcache_flush_region, NULL,
1671 	srmmu_vcache_flush_context, NULL,
1672 	srmmu_vcache_flush_range, NULL,
1673 	noop_pcache_flush_page,
1674 	noop_pure_vcache_flush,
1675 	srmmu_cache_flush_all,
1676 	memerr4m,
1677 	pmap_zero_page4m,
1678 	pmap_copy_page4m
1679 };
1680 
1681 void
cpumatch_turbosparc(struct cpu_info * sc,struct module_info * mp,int node)1682 cpumatch_turbosparc(struct cpu_info *sc, struct module_info *mp, int node)
1683 {
1684 	int i;
1685 
1686 	if (node == 0 || sc->master == 0)
1687 		return;
1688 
1689 	i = getpsr();
1690 	if (sc->cpu_vers == IU_VERS(i))
1691 		return;
1692 
1693 	/*
1694 	 * A cloaked Turbosparc: clear any items in cpuinfo that
1695 	 * might have been set to uS2 versions during bootstrap.
1696 	 */
1697 	sc->cpu_longname = 0;
1698 	sc->mmu_ncontext = 0;
1699 	sc->cpu_type = 0;
1700 	sc->cacheinfo.c_vactype = 0;
1701 	sc->hotfix = 0;
1702 	sc->mmu_enable = 0;
1703 	sc->cache_enable = 0;
1704 	sc->get_syncflt = 0;
1705 	sc->cache_flush = 0;
1706 	sc->sp_vcache_flush_page = 0;
1707 	sc->sp_vcache_flush_segment = 0;
1708 	sc->sp_vcache_flush_region = 0;
1709 	sc->sp_vcache_flush_context = 0;
1710 	sc->pcache_flush_page = 0;
1711 }
1712 
1713 void
turbosparc_hotfix(struct cpu_info * sc)1714 turbosparc_hotfix(struct cpu_info *sc)
1715 {
1716 	int pcf;
1717 
1718 	pcf = lda(SRMMU_PCFG, ASI_SRMMU);
1719 	if (pcf & TURBOSPARC_PCFG_US2) {
1720 		/* Turn off uS2 emulation bit */
1721 		pcf &= ~TURBOSPARC_PCFG_US2;
1722 		sta(SRMMU_PCFG, ASI_SRMMU, pcf);
1723 	}
1724 }
1725 #endif /* SUN4M */
1726 
1727 #if defined(SUN4M)
1728 struct module_info module_viking = {
1729 	CPUTYP_UNKNOWN,		/* set in cpumatch() */
1730 	VAC_NONE,
1731 	cpumatch_viking,
1732 	getcacheinfo_obp,
1733 	viking_hotfix,
1734 	viking_mmu_enable,
1735 	viking_cache_enable,
1736 	viking_getmid,
1737 	4096,
1738 	viking_get_syncflt,
1739 	no_asyncflt_regs,
1740 	/* supersparcs use cached DVMA, no need to flush */
1741 	noop_cache_flush,
1742 	noop_vcache_flush_page, NULL,
1743 	noop_vcache_flush_segment, NULL,
1744 	noop_vcache_flush_region, NULL,
1745 	noop_vcache_flush_context, NULL,
1746 	noop_vcache_flush_range, NULL,
1747 	viking_pcache_flush_page,
1748 	noop_pure_vcache_flush,
1749 	noop_cache_flush_all,
1750 	viking_memerr,
1751 	pmap_zero_page4m,
1752 	pmap_copy_page4m
1753 };
1754 #endif /* SUN4M */
1755 
1756 #if defined(SUN4M) || defined(SUN4D)
1757 void
cpumatch_viking(struct cpu_info * sc,struct module_info * mp,int node)1758 cpumatch_viking(struct cpu_info *sc, struct module_info *mp, int node)
1759 {
1760 
1761 	if (node == 0)
1762 		viking_hotfix(sc);
1763 }
1764 
1765 void
viking_hotfix(struct cpu_info * sc)1766 viking_hotfix(struct cpu_info *sc)
1767 {
1768 static	int mxcc = -1;
1769 	int pcr = lda(SRMMU_PCR, ASI_SRMMU);
1770 
1771 	/* Test if we're directly on the MBus */
1772 	if ((pcr & VIKING_PCR_MB) == 0) {
1773 		sc->mxcc = 1;
1774 		sc->flags |= CPUFLG_CACHE_MANDATORY;
1775 		sc->zero_page = pmap_zero_page_viking_mxcc;
1776 		sc->copy_page = pmap_copy_page_viking_mxcc;
1777 #if !defined(MSIIEP)
1778 		moduleerr_handler = viking_module_error;
1779 #endif
1780 
1781 		/*
1782 		 * Ok to cache PTEs; set the flag here, so we don't
1783 		 * uncache in pmap_bootstrap().
1784 		 */
1785 		if ((pcr & VIKING_PCR_TC) == 0)
1786 			printf("[viking: PCR_TC is off]");
1787 		else
1788 			sc->flags |= CPUFLG_CACHEPAGETABLES;
1789 	} else {
1790 #ifdef MULTIPROCESSOR
1791 		if (sparc_ncpus > 1 && sc->cacheinfo.ec_totalsize == 0)
1792 			sc->cache_flush = srmmu_cache_flush;
1793 #endif
1794 	}
1795 	/* Check all modules have the same MXCC configuration */
1796 	if (mxcc != -1 && sc->mxcc != mxcc)
1797 		panic("MXCC module mismatch");
1798 
1799 	mxcc = sc->mxcc;
1800 
1801 	/* XXX! */
1802 	if (sc->mxcc)
1803 		sc->cpu_type = CPUTYP_SS1_MBUS_MXCC;
1804 	else
1805 		sc->cpu_type = CPUTYP_SS1_MBUS_NOMXCC;
1806 }
1807 
1808 void
viking_mmu_enable(void)1809 viking_mmu_enable(void)
1810 {
1811 	int pcr;
1812 
1813 	pcr = lda(SRMMU_PCR, ASI_SRMMU);
1814 
1815 	if (cpuinfo.mxcc) {
1816 		if ((pcr & VIKING_PCR_TC) == 0) {
1817 			printf("[viking: turn on PCR_TC]");
1818 		}
1819 		pcr |= VIKING_PCR_TC;
1820 		cpuinfo.flags |= CPUFLG_CACHEPAGETABLES;
1821 	} else
1822 		pcr &= ~VIKING_PCR_TC;
1823 	sta(SRMMU_PCR, ASI_SRMMU, pcr);
1824 }
1825 
1826 int
viking_getmid(void)1827 viking_getmid(void)
1828 {
1829 
1830 	if (cpuinfo.mxcc) {
1831 		u_int v = ldda(MXCC_MBUSPORT, ASI_CONTROL) & 0xffffffff;
1832 		return ((v >> 24) & 0xf);
1833 	}
1834 	return (0);
1835 }
1836 
1837 #if !defined(MSIIEP)
1838 int
viking_module_error(void)1839 viking_module_error(void)
1840 {
1841 	uint64_t v;
1842 	int fatal = 0;
1843 	CPU_INFO_ITERATOR n;
1844 	struct cpu_info *cpi;
1845 
1846 	/* Report on MXCC error registers in each module */
1847 	for (CPU_INFO_FOREACH(n, cpi)) {
1848 		if (cpi->ci_mxccregs == 0) {
1849 			printf("\tMXCC registers not mapped\n");
1850 			continue;
1851 		}
1852 
1853 		printf("module%d:\n", cpi->ci_cpuid);
1854 		v = *((uint64_t *)(cpi->ci_mxccregs + 0xe00));
1855 		printf("\tmxcc error 0x%llx\n", v);
1856 		v = *((uint64_t *)(cpi->ci_mxccregs + 0xb00));
1857 		printf("\tmxcc status 0x%llx\n", v);
1858 		v = *((uint64_t *)(cpi->ci_mxccregs + 0xc00));
1859 		printf("\tmxcc reset 0x%llx", v);
1860 		if (v & MXCC_MRST_WD)
1861 			printf(" (WATCHDOG RESET)"), fatal = 1;
1862 		if (v & MXCC_MRST_SI)
1863 			printf(" (SOFTWARE RESET)"), fatal = 1;
1864 		printf("\n");
1865 	}
1866 	return (fatal);
1867 }
1868 #endif /* MSIIEP */
1869 #endif /* SUN4M || SUN4D */
1870 
1871 #if defined(SUN4D)
1872 void
getcacheinfo_sun4d(struct cpu_info * sc,int node)1873 getcacheinfo_sun4d(struct cpu_info *sc, int node)
1874 {
1875 	struct cacheinfo *ci = &sc->cacheinfo;
1876 	int i, l;
1877 
1878 	if (node == 0)
1879 		/* Bootstrapping */
1880 		return;
1881 
1882 	/*
1883 	 * The Sun4d always has TI TMS390Z55 Viking CPUs; we hard-code
1884 	 * much of the cache information here.
1885 	 */
1886 
1887 	ci->c_physical = 1;
1888 	ci->c_split = 1;
1889 
1890 	/* hwflush is used only by sun4/4c code */
1891 	ci->c_hwflush = 0;
1892 
1893 	ci->ic_nlines = 0x00000040;
1894 	ci->ic_linesize = 0x00000040;
1895 	ci->ic_l2linesize = 6;
1896 	ci->ic_associativity = 0x00000005;
1897 	ci->ic_totalsize = ci->ic_linesize * ci->ic_nlines *
1898 	    ci->ic_associativity;
1899 
1900 	ci->dc_nlines = 0x00000080;
1901 	ci->dc_linesize = 0x00000020;
1902 	ci->dc_l2linesize = 5;
1903 	ci->dc_associativity = 0x00000004;
1904 	ci->dc_totalsize = ci->dc_linesize * ci->dc_nlines *
1905 	    ci->dc_associativity;
1906 
1907 	ci->c_l2linesize = min(ci->ic_l2linesize, ci->dc_l2linesize);
1908 	ci->c_linesize = min(ci->ic_linesize, ci->dc_linesize);
1909 	ci->c_totalsize = max(ci->ic_totalsize, ci->dc_totalsize);
1910 	ci->c_nlines = ci->c_totalsize >> ci->c_l2linesize;
1911 
1912 	if (node_has_property(node, "ecache-nlines")) {
1913 		/* we have a L2 "e"xternal cache */
1914 		ci->ec_nlines = prom_getpropint(node, "ecache-nlines", 32768);
1915 		ci->ec_linesize = l = prom_getpropint(node, "ecache-line-size", 0);
1916 		for (i = 0; (1 << i) < l && l; i++)
1917 			/* void */;
1918 		if ((1 << i) != l && l)
1919 			panic("bad ecache line size %d", l);
1920 		ci->ec_l2linesize = i;
1921 		ci->ec_associativity =
1922 			prom_getpropint(node, "ecache-associativity", 1);
1923 		ci->ec_totalsize = l * ci->ec_nlines * ci->ec_associativity;
1924 	}
1925 }
1926 
1927 struct module_info module_viking_sun4d = {
1928 	CPUTYP_UNKNOWN,		/* set in cpumatch() */
1929 	VAC_NONE,
1930 	cpumatch_viking,
1931 	getcacheinfo_sun4d,
1932 	viking_hotfix,
1933 	viking_mmu_enable,
1934 	viking_cache_enable,
1935 	viking_getmid,
1936 	4096,
1937 	viking_get_syncflt,
1938 	no_asyncflt_regs,
1939 	/* supersparcs use cached DVMA, no need to flush */
1940 	noop_cache_flush,
1941 	noop_vcache_flush_page, NULL,
1942 	noop_vcache_flush_segment, NULL,
1943 	noop_vcache_flush_region, NULL,
1944 	noop_vcache_flush_context, NULL,
1945 	noop_vcache_flush_range, NULL,
1946 	viking_pcache_flush_page,
1947 	noop_pure_vcache_flush,
1948 	noop_cache_flush_all,
1949 	viking_memerr,
1950 	pmap_zero_page4m,
1951 	pmap_copy_page4m
1952 };
1953 #endif /* SUN4D */
1954 
1955 #define	ANY	-1	/* match any version */
1956 
1957 struct cpu_conf {
1958 	int	arch;
1959 	int	cpu_impl;
1960 	int	cpu_vers;
1961 	int	mmu_impl;
1962 	int	mmu_vers;
1963 	const char	*name;
1964 	struct	module_info *minfo;
1965 } cpu_conf[] = {
1966 #if defined(SUN4)
1967 	{ CPU_SUN4, 0, 0, ANY, ANY, "MB86900/1A or L64801", &module_sun4 },
1968 	{ CPU_SUN4, 1, 0, ANY, ANY, "L64811", &module_sun4 },
1969 	{ CPU_SUN4, 1, 1, ANY, ANY, "CY7C601", &module_sun4 },
1970 #endif
1971 
1972 #if defined(SUN4C)
1973 	{ CPU_SUN4C, 0, 0, ANY, ANY, "MB86900/1A or L64801", &module_sun4c },
1974 	{ CPU_SUN4C, 1, 0, ANY, ANY, "L64811", &module_sun4c },
1975 	{ CPU_SUN4C, 1, 1, ANY, ANY, "CY7C601", &module_sun4c },
1976 	{ CPU_SUN4C, 9, 0, ANY, ANY, "W8601/8701 or MB86903", &module_sun4c },
1977 #endif
1978 
1979 #if defined(SUN4M)
1980 	{ CPU_SUN4M, 0, 4, 0, 4, "MB86904", &module_swift },
1981 	{ CPU_SUN4M, 0, 5, 0, 5, "MB86907", &module_turbosparc },
1982 	{ CPU_SUN4M, 1, 1, 1, 0, "CY7C601/604", &module_cypress },
1983 	{ CPU_SUN4M, 1, 1, 1, 0xb, "CY7C601/605 (v.b)", &module_cypress },
1984 	{ CPU_SUN4M, 1, 1, 1, 0xc, "CY7C601/605 (v.c)", &module_cypress },
1985 	{ CPU_SUN4M, 1, 1, 1, 0xf, "CY7C601/605 (v.f)", &module_cypress },
1986 	{ CPU_SUN4M, 1, 3, 1, ANY, "CY7C611", &module_cypress },
1987 	{ CPU_SUN4M, 1, 0xe, 1, 7, "RT620/625", &module_hypersparc },
1988 	{ CPU_SUN4M, 1, 0xf, 1, 7, "RT620/625", &module_hypersparc },
1989 	{ CPU_SUN4M, 4, 0, 0, ANY, "TMS390Z50 v0 or TMS390Z55", &module_viking },
1990 	{ CPU_SUN4M, 4, 1, 0, ANY, "TMS390Z50 v1", &module_viking },
1991 	{ CPU_SUN4M, 4, 1, 4, ANY, "TMS390S10", &module_ms1 },
1992 	{ CPU_SUN4M, 4, 2, 0, ANY, "TI_MS2", &module_ms2 },
1993 	{ CPU_SUN4M, 4, 3, ANY, ANY, "TI_4_3", &module_viking },
1994 	{ CPU_SUN4M, 4, 4, ANY, ANY, "TI_4_4", &module_viking },
1995 #endif
1996 
1997 #if defined(SUN4D)
1998 	{ CPU_SUN4D, 4, 0, 0, ANY, "TMS390Z50 v0 or TMS390Z55",
1999 	  &module_viking_sun4d },
2000 #endif
2001 
2002 	{ ANY, ANY, ANY, ANY, ANY, "Unknown", &module_unknown }
2003 };
2004 
2005 void
getcpuinfo(struct cpu_info * sc,int node)2006 getcpuinfo(struct cpu_info *sc, int node)
2007 {
2008 	struct cpu_conf *mp;
2009 	int i;
2010 	int cpu_impl, cpu_vers;
2011 	int mmu_impl, mmu_vers;
2012 
2013 	/*
2014 	 * Set up main criteria for selection from the CPU configuration
2015 	 * table: the CPU implementation/version fields from the PSR
2016 	 * register, and -- on sun4m machines -- the MMU
2017 	 * implementation/version from the SCR register.
2018 	 */
2019 	if (sc->master) {
2020 		i = getpsr();
2021 		if (node == 0 ||
2022 		    (cpu_impl =
2023 		     prom_getpropint(node, "psr-implementation", -1)) == -1)
2024 			cpu_impl = IU_IMPL(i);
2025 
2026 		if (node == 0 ||
2027 		    (cpu_vers = prom_getpropint(node, "psr-version", -1)) == -1)
2028 			cpu_vers = IU_VERS(i);
2029 
2030 		if (CPU_HAS_SRMMU) {
2031 			i = lda(SRMMU_PCR, ASI_SRMMU);
2032 			if (node == 0 ||
2033 			    (mmu_impl =
2034 			     prom_getpropint(node, "implementation", -1)) == -1)
2035 				mmu_impl = SRMMU_IMPL(i);
2036 
2037 			if (node == 0 ||
2038 			    (mmu_vers = prom_getpropint(node, "version", -1)) == -1)
2039 				mmu_vers = SRMMU_VERS(i);
2040 		} else {
2041 			mmu_impl = ANY;
2042 			mmu_vers = ANY;
2043 		}
2044 	} else {
2045 		/*
2046 		 * Get CPU version/implementation from ROM. If not
2047 		 * available, assume same as boot CPU.
2048 		 */
2049 		cpu_impl = prom_getpropint(node, "psr-implementation",
2050 					   cpuinfo.cpu_impl);
2051 		cpu_vers = prom_getpropint(node, "psr-version",
2052 					   cpuinfo.cpu_vers);
2053 
2054 		/* Get MMU version/implementation from ROM always */
2055 		mmu_impl = prom_getpropint(node, "implementation", -1);
2056 		mmu_vers = prom_getpropint(node, "version", -1);
2057 	}
2058 
2059 	for (mp = cpu_conf; ; mp++) {
2060 		if (mp->arch != cputyp && mp->arch != ANY)
2061 			continue;
2062 
2063 #define MATCH(x)	(mp->x == x || mp->x == ANY)
2064 		if (!MATCH(cpu_impl) ||
2065 		    !MATCH(cpu_vers) ||
2066 		    !MATCH(mmu_impl) ||
2067 		    !MATCH(mmu_vers))
2068 			continue;
2069 #undef MATCH
2070 
2071 		/*
2072 		 * Got CPU type.
2073 		 */
2074 		sc->cpu_impl = cpu_impl;
2075 		sc->cpu_vers = cpu_vers;
2076 		sc->mmu_impl = mmu_impl;
2077 		sc->mmu_vers = mmu_vers;
2078 
2079 		if (mp->minfo->cpu_match) {
2080 			/* Additional fixups */
2081 			mp->minfo->cpu_match(sc, mp->minfo, node);
2082 		}
2083 		if (sc->cpu_longname == 0)
2084 			sc->cpu_longname = mp->name;
2085 
2086 		if (sc->mmu_ncontext == 0)
2087 			sc->mmu_ncontext = mp->minfo->ncontext;
2088 
2089 		if (sc->cpu_type == 0)
2090 			sc->cpu_type = mp->minfo->cpu_type;
2091 
2092 		if (sc->cacheinfo.c_vactype == VAC_UNKNOWN)
2093 			sc->cacheinfo.c_vactype = mp->minfo->vactype;
2094 
2095 		if (sc->master && mp->minfo->getmid != NULL)
2096 			bootmid = mp->minfo->getmid();
2097 
2098 		mp->minfo->getcacheinfo(sc, node);
2099 
2100 		if (node && sc->hz == 0 && !CPU_ISSUN4/*XXX*/) {
2101 			sc->hz = prom_getpropint(node, "clock-frequency", 0);
2102 			if (sc->hz == 0) {
2103 				/*
2104 				 * Try to find it in the OpenPROM root...
2105 				 */
2106 				sc->hz = prom_getpropint(findroot(),
2107 						    "clock-frequency", 0);
2108 			}
2109 		}
2110 
2111 		/*
2112 		 * Copy CPU/MMU/Cache specific routines into cpu_info.
2113 		 */
2114 #define MPCOPY(x)	if (sc->x == 0) sc->x = mp->minfo->x;
2115 		MPCOPY(hotfix);
2116 		MPCOPY(mmu_enable);
2117 		MPCOPY(cache_enable);
2118 		MPCOPY(get_syncflt);
2119 		MPCOPY(get_asyncflt);
2120 		MPCOPY(cache_flush);
2121 		MPCOPY(sp_vcache_flush_page);
2122 		MPCOPY(sp_vcache_flush_segment);
2123 		MPCOPY(sp_vcache_flush_region);
2124 		MPCOPY(sp_vcache_flush_context);
2125 		MPCOPY(sp_vcache_flush_range);
2126 		MPCOPY(ft_vcache_flush_page);
2127 		MPCOPY(ft_vcache_flush_segment);
2128 		MPCOPY(ft_vcache_flush_region);
2129 		MPCOPY(ft_vcache_flush_context);
2130 		MPCOPY(ft_vcache_flush_range);
2131 		MPCOPY(pcache_flush_page);
2132 		MPCOPY(pure_vcache_flush);
2133 		MPCOPY(cache_flush_all);
2134 		MPCOPY(memerr);
2135 		MPCOPY(zero_page);
2136 		MPCOPY(copy_page);
2137 #undef MPCOPY
2138 		/*
2139 		 * Use the single-processor cache flush functions until
2140 		 * all CPUs are initialized.
2141 		 */
2142 		sc->vcache_flush_page = sc->sp_vcache_flush_page;
2143 		sc->vcache_flush_segment = sc->sp_vcache_flush_segment;
2144 		sc->vcache_flush_region = sc->sp_vcache_flush_region;
2145 		sc->vcache_flush_context = sc->sp_vcache_flush_context;
2146 		(*sc->cache_flush_all)();
2147 		return;
2148 	}
2149 	panic("Out of CPUs");
2150 }
2151 
2152 /*
2153  * The following tables convert <IU impl, IU version, FPU version> triples
2154  * into names for the CPU and FPU chip.  In most cases we do not need to
2155  * inspect the FPU version to name the IU chip, but there is one exception
2156  * (for Tsunami), and this makes the tables the same.
2157  *
2158  * The table contents (and much of the structure here) are from Guy Harris.
2159  *
2160  */
2161 struct info {
2162 	int	valid;
2163 	int	iu_impl;
2164 	int	iu_vers;
2165 	int	fpu_vers;
2166 	const char	*name;
2167 };
2168 
2169 /* XXX trim this table on a per-ARCH basis */
2170 /* NB: table order matters here; specific numbers must appear before ANY. */
2171 static struct info fpu_types[] = {
2172 	/*
2173 	 * Vendor 0, IU Fujitsu0.
2174 	 */
2175 	{ 1, 0x0, ANY, 0, "MB86910 or WTL1164/5" },
2176 	{ 1, 0x0, ANY, 1, "MB86911 or WTL1164/5" },
2177 	{ 1, 0x0, ANY, 2, "L64802 or ACT8847" },
2178 	{ 1, 0x0, ANY, 3, "WTL3170/2" },
2179 	{ 1, 0x0, 4,   4, "on-chip" },		/* Swift */
2180 	{ 1, 0x0, 5,   5, "on-chip" },		/* TurboSparc */
2181 	{ 1, 0x0, ANY, 4, "L64804" },
2182 
2183 	/*
2184 	 * Vendor 1, IU ROSS0/1 or Pinnacle.
2185 	 */
2186 	{ 1, 0x1, 0xf, 0, "on-chip" },		/* Pinnacle */
2187 	{ 1, 0x1, 0xe, 0, "on-chip" },		/* Hypersparc RT 625/626 */
2188 	{ 1, 0x1, ANY, 0, "L64812 or ACT8847" },
2189 	{ 1, 0x1, ANY, 1, "L64814" },
2190 	{ 1, 0x1, ANY, 2, "TMS390C602A" },
2191 	{ 1, 0x1, ANY, 3, "RT602 or WTL3171" },
2192 
2193 	/*
2194 	 * Vendor 2, IU BIT0.
2195 	 */
2196 	{ 1, 0x2, ANY, 0, "B5010 or B5110/20 or B5210" },
2197 
2198 	/*
2199 	 * Vendor 4, Texas Instruments.
2200 	 */
2201 	{ 1, 0x4, ANY, 0, "on-chip" },		/* Viking */
2202 	{ 1, 0x4, ANY, 4, "on-chip" },		/* Tsunami */
2203 
2204 	/*
2205 	 * Vendor 5, IU Matsushita0.
2206 	 */
2207 	{ 1, 0x5, ANY, 0, "on-chip" },
2208 
2209 	/*
2210 	 * Vendor 9, Weitek.
2211 	 */
2212 	{ 1, 0x9, ANY, 3, "on-chip" },
2213 
2214 	{ 0 }
2215 };
2216 
2217 static const char *
fsrtoname(int impl,int vers,int fver)2218 fsrtoname(int impl, int vers, int fver)
2219 {
2220 	struct info *p;
2221 
2222 	for (p = fpu_types; p->valid; p++) {
2223 		if (p->iu_impl == impl &&
2224 		    (p->iu_vers == vers || p->iu_vers == ANY) &&
2225 		    (p->fpu_vers == fver))
2226 			return (p->name);
2227 	}
2228 	return (NULL);
2229 }
2230 
2231 #ifdef DDB
2232 
2233 #include <ddb/db_output.h>
2234 #include <machine/db_machdep.h>
2235 
2236 #include "ioconf.h"
2237 
2238 /*
2239  * Dump CPU information from ddb.
2240  */
2241 void
cpu_debug_dump(void)2242 cpu_debug_dump(void)
2243 {
2244 	struct cpu_info *ci;
2245 	CPU_INFO_ITERATOR cii;
2246 
2247 	db_printf("%-4s %-10s %-8s %-10s %-10s %-10s %-10s\n",
2248 	    "CPU#", "CPUINFO", "FLAGS", "CURLWP", "CURPROC", "FPLWP", "CPCB");
2249 	for (CPU_INFO_FOREACH(cii, ci)) {
2250 		db_printf("%-4d %-10p %-8x %-10p %-10p %-10p %-10p\n",
2251 		    ci->ci_cpuid,
2252 		    ci,
2253 		    ci->flags,
2254 		    ci->ci_curlwp,
2255 		    ci->ci_curlwp == NULL ? NULL : ci->ci_curlwp->l_proc,
2256 		    ci->fplwp,
2257 		    ci->curpcb);
2258 	}
2259 }
2260 
2261 #if defined(MULTIPROCESSOR)
2262 /*
2263  * Dump CPU xcall from ddb.
2264  */
2265 void
cpu_xcall_dump(void)2266 cpu_xcall_dump(void)
2267 {
2268 	struct cpu_info *ci;
2269 	CPU_INFO_ITERATOR cii;
2270 
2271 	db_printf("%-4s %-10s %-10s %-10s %-10s %-10s "
2272 		    "%-4s %-4s %-4s\n",
2273 	          "CPU#", "FUNC", "TRAP", "ARG0", "ARG1", "ARG2",
2274 	            "TAG", "RECV", "COMPL");
2275 	for (CPU_INFO_FOREACH(cii, ci)) {
2276 		db_printf("%-4d %-10p %-10p 0x%-8x 0x%-8x 0x%-8x "
2277 			    "%-4d %-4d %-4d\n",
2278 		    ci->ci_cpuid,
2279 		    ci->msg.u.xpmsg_func.func,
2280 		    ci->msg.u.xpmsg_func.trap,
2281 		    ci->msg.u.xpmsg_func.arg0,
2282 		    ci->msg.u.xpmsg_func.arg1,
2283 		    ci->msg.u.xpmsg_func.arg2,
2284 		    ci->msg.tag,
2285 		    ci->msg.received,
2286 		    ci->msg.complete);
2287 	}
2288 }
2289 #endif
2290 
2291 #endif
2292