xref: /illumos-gate/usr/src/uts/sun4u/cpu/us3_cheetah.c (revision f808c858)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/systm.h>
30 #include <sys/ddi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/archsystm.h>
33 #include <sys/vmsystm.h>
34 #include <sys/machparam.h>
35 #include <sys/machsystm.h>
36 #include <sys/machthread.h>
37 #include <sys/cpu.h>
38 #include <sys/cmp.h>
39 #include <sys/elf_SPARC.h>
40 #include <vm/hat_sfmmu.h>
41 #include <vm/seg_kmem.h>
42 #include <sys/cpuvar.h>
43 #include <sys/cheetahregs.h>
44 #include <sys/us3_module.h>
45 #include <sys/async.h>
46 #include <sys/cmn_err.h>
47 #include <sys/debug.h>
48 #include <sys/dditypes.h>
49 #include <sys/prom_debug.h>
50 #include <sys/prom_plat.h>
51 #include <sys/cpu_module.h>
52 #include <sys/sysmacros.h>
53 #include <sys/intreg.h>
54 #include <sys/clock.h>
55 #include <sys/platform_module.h>
56 #include <sys/machtrap.h>
57 #include <sys/ontrap.h>
58 #include <sys/panic.h>
59 #include <sys/memlist.h>
60 #include <sys/bootconf.h>
61 #include <sys/ivintr.h>
62 #include <sys/atomic.h>
63 #include <sys/fm/protocol.h>
64 #include <sys/fm/cpu/UltraSPARC-III.h>
65 #include <vm/vm_dep.h>
66 
67 #ifdef	CHEETAHPLUS_ERRATUM_25
68 #include <sys/cyclic.h>
69 #endif	/* CHEETAHPLUS_ERRATUM_25 */
70 
71 /*
72  * Setup trap handlers.
73  */
74 void
75 cpu_init_trap(void)
76 {
77 	CH_SET_TRAP(tt_pil15, ch_pil15_interrupt_instr);
78 
79 	CH_SET_TRAP(tt0_fecc, fecc_err_instr);
80 	CH_SET_TRAP(tt1_fecc, fecc_err_tl1_instr);
81 	CH_SET_TRAP(tt1_swtrap0, fecc_err_tl1_cont_instr);
82 }
83 
84 static int
85 getintprop(pnode_t node, char *name, int deflt)
86 {
87 	int	value;
88 
89 	switch (prom_getproplen(node, name)) {
90 	case sizeof (int):
91 		(void) prom_getprop(node, name, (caddr_t)&value);
92 		break;
93 
94 	default:
95 		value = deflt;
96 		break;
97 	}
98 
99 	return (value);
100 }
101 
102 /*
103  * Set the magic constants of the implementation.
104  */
105 /*ARGSUSED*/
106 void
107 cpu_fiximp(pnode_t dnode)
108 {
109 	int i, a;
110 
111 	static struct {
112 		char	*name;
113 		int	*var;
114 		int	defval;
115 	} prop[] = {
116 		"dcache-size", &dcache_size, CH_DCACHE_SIZE,
117 		"dcache-line-size", &dcache_linesize, CH_DCACHE_LSIZE,
118 		"icache-size", &icache_size, CH_ICACHE_SIZE,
119 		"icache-line-size", &icache_linesize, CH_ICACHE_LSIZE,
120 		"ecache-size", &ecache_size, CH_ECACHE_MAX_SIZE,
121 		"ecache-line-size", &ecache_alignsize, CH_ECACHE_MAX_LSIZE,
122 		"ecache-associativity", &ecache_associativity, CH_ECACHE_NWAY
123 	};
124 
125 	extern int exec_lpg_disable, use_brk_lpg, use_stk_lpg, use_zmap_lpg;
126 	extern size_t max_shm_lpsize;
127 
128 
129 	for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++)
130 		*prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval);
131 
132 	ecache_setsize = ecache_size / ecache_associativity;
133 
134 	vac_size = CH_VAC_SIZE;
135 	vac_mask = MMU_PAGEMASK & (vac_size - 1);
136 	i = 0; a = vac_size;
137 	while (a >>= 1)
138 		++i;
139 	vac_shift = i;
140 	shm_alignment = vac_size;
141 	vac = 1;
142 
143 	/*
144 	 * Cheetah's large page support has problems with large numbers of
145 	 * large pages, so just disable large pages out-of-the-box.
146 	 */
147 	exec_lpg_disable = 1;
148 	use_brk_lpg = 0;
149 	use_stk_lpg = 0;
150 	use_zmap_lpg = 0;
151 	max_shm_lpsize = MMU_PAGESIZE;
152 }
153 
154 void
155 send_mondo_set(cpuset_t set)
156 {
157 	int lo, busy, nack, shipped = 0;
158 	uint16_t i, cpuids[IDSR_BN_SETS];
159 	uint64_t idsr, nackmask = 0, busymask, curnack, curbusy;
160 	uint64_t starttick, endtick, tick, lasttick;
161 #if (NCPU > IDSR_BN_SETS)
162 	int index = 0;
163 	int ncpuids = 0;
164 #endif
165 #ifdef	CHEETAHPLUS_ERRATUM_25
166 	int recovered = 0;
167 	int cpuid;
168 #endif
169 
170 	ASSERT(!CPUSET_ISNULL(set));
171 	starttick = lasttick = gettick();
172 
173 #if (NCPU <= IDSR_BN_SETS)
174 	for (i = 0; i < NCPU; i++)
175 		if (CPU_IN_SET(set, i)) {
176 			shipit(i, shipped);
177 			nackmask |= IDSR_NACK_BIT(shipped);
178 			cpuids[shipped++] = i;
179 			CPUSET_DEL(set, i);
180 			if (CPUSET_ISNULL(set))
181 				break;
182 		}
183 	CPU_STATS_ADDQ(CPU, sys, xcalls, shipped);
184 #else
185 	for (i = 0; i < NCPU; i++)
186 		if (CPU_IN_SET(set, i)) {
187 			ncpuids++;
188 
189 			/*
190 			 * Ship only to the first (IDSR_BN_SETS) CPUs.  If we
191 			 * find we have shipped to more than (IDSR_BN_SETS)
192 			 * CPUs, set "index" to the highest numbered CPU in
193 			 * the set so we can ship to other CPUs a bit later on.
194 			 */
195 			if (shipped < IDSR_BN_SETS) {
196 				shipit(i, shipped);
197 				nackmask |= IDSR_NACK_BIT(shipped);
198 				cpuids[shipped++] = i;
199 				CPUSET_DEL(set, i);
200 				if (CPUSET_ISNULL(set))
201 					break;
202 			} else
203 				index = (int)i;
204 		}
205 
206 	CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids);
207 #endif
208 
209 	busymask = IDSR_NACK_TO_BUSY(nackmask);
210 	busy = nack = 0;
211 	endtick = starttick + xc_tick_limit;
212 	for (;;) {
213 		idsr = getidsr();
214 #if (NCPU <= IDSR_BN_SETS)
215 		if (idsr == 0)
216 			break;
217 #else
218 		if (idsr == 0 && shipped == ncpuids)
219 			break;
220 #endif
221 		tick = gettick();
222 		/*
223 		 * If there is a big jump between the current tick
224 		 * count and lasttick, we have probably hit a break
225 		 * point.  Adjust endtick accordingly to avoid panic.
226 		 */
227 		if (tick > (lasttick + xc_tick_jump_limit))
228 			endtick += (tick - lasttick);
229 		lasttick = tick;
230 		if (tick > endtick) {
231 			if (panic_quiesce)
232 				return;
233 #ifdef	CHEETAHPLUS_ERRATUM_25
234 			cpuid = -1;
235 			for (i = 0; i < IDSR_BN_SETS; i++) {
236 				if (idsr & (IDSR_NACK_BIT(i) |
237 				    IDSR_BUSY_BIT(i))) {
238 					cpuid = cpuids[i];
239 					break;
240 				}
241 			}
242 			if (cheetah_sendmondo_recover && cpuid != -1 &&
243 			    recovered == 0) {
244 				if (mondo_recover(cpuid, i)) {
245 					/*
246 					 * We claimed the whole memory or
247 					 * full scan is disabled.
248 					 */
249 					recovered++;
250 				}
251 				tick = gettick();
252 				endtick = tick + xc_tick_limit;
253 				lasttick = tick;
254 				/*
255 				 * Recheck idsr
256 				 */
257 				continue;
258 			} else
259 #endif	/* CHEETAHPLUS_ERRATUM_25 */
260 			{
261 				cmn_err(CE_CONT, "send mondo timeout "
262 				    "[%d NACK %d BUSY]\nIDSR 0x%"
263 				    "" PRIx64 "  cpuids:", nack, busy, idsr);
264 				for (i = 0; i < IDSR_BN_SETS; i++) {
265 					if (idsr & (IDSR_NACK_BIT(i) |
266 					    IDSR_BUSY_BIT(i))) {
267 						cmn_err(CE_CONT, " 0x%x",
268 						    cpuids[i]);
269 					}
270 				}
271 				cmn_err(CE_CONT, "\n");
272 				cmn_err(CE_PANIC, "send_mondo_set: timeout");
273 			}
274 		}
275 		curnack = idsr & nackmask;
276 		curbusy = idsr & busymask;
277 #if (NCPU > IDSR_BN_SETS)
278 		if (shipped < ncpuids) {
279 			uint64_t cpus_left;
280 			uint16_t next = (uint16_t)index;
281 
282 			cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) &
283 				busymask;
284 
285 			if (cpus_left) {
286 				do {
287 					/*
288 					 * Sequence through and ship to the
289 					 * remainder of the CPUs in the system
290 					 * (e.g. other than the first
291 					 * (IDSR_BN_SETS)) in reverse order.
292 					 */
293 					lo = lowbit(cpus_left) - 1;
294 					i = IDSR_BUSY_IDX(lo);
295 					shipit(next, i);
296 					shipped++;
297 					cpuids[i] = next;
298 
299 					/*
300 					 * If we've processed all the CPUs,
301 					 * exit the loop now and save
302 					 * instructions.
303 					 */
304 					if (shipped == ncpuids)
305 						break;
306 
307 					for ((index = ((int)next - 1));
308 					    index >= 0; index--)
309 						if (CPU_IN_SET(set, index)) {
310 							next = (uint16_t)index;
311 							break;
312 						}
313 
314 					cpus_left &= ~(1ull << lo);
315 				} while (cpus_left);
316 #ifdef	CHEETAHPLUS_ERRATUM_25
317 				/*
318 				 * Clear recovered because we are sending to
319 				 * a new set of targets.
320 				 */
321 				recovered = 0;
322 #endif
323 				continue;
324 			}
325 		}
326 #endif
327 		if (curbusy) {
328 			busy++;
329 			continue;
330 		}
331 
332 #ifdef SEND_MONDO_STATS
333 		{
334 			int n = gettick() - starttick;
335 			if (n < 8192)
336 				x_nack_stimes[n >> 7]++;
337 		}
338 #endif
339 		while (gettick() < (tick + sys_clock_mhz))
340 			;
341 		do {
342 			lo = lowbit(curnack) - 1;
343 			i = IDSR_NACK_IDX(lo);
344 			shipit(cpuids[i], i);
345 			curnack &= ~(1ull << lo);
346 		} while (curnack);
347 		nack++;
348 		busy = 0;
349 	}
350 #ifdef SEND_MONDO_STATS
351 	{
352 		int n = gettick() - starttick;
353 		if (n < 8192)
354 			x_set_stimes[n >> 7]++;
355 		else
356 			x_set_ltimes[(n >> 13) & 0xf]++;
357 	}
358 	x_set_cpus[shipped]++;
359 #endif
360 }
361 
362 /*
363  * Handles error logging for implementation specific error types.
364  */
365 /*ARGSUSED*/
366 int
367 cpu_impl_async_log_err(void *flt, errorq_elem_t *eqep)
368 {
369 	/* There aren't any error types which are specific to cheetah only */
370 	return (CH_ASYNC_LOG_UNKNOWN);
371 }
372 
373 /*
374  * Figure out if Ecache is direct-mapped (Cheetah or Cheetah+ with Ecache
375  * control ECCR_ASSOC bit off or 2-way (Cheetah+ with ECCR_ASSOC on).
376  * We need to do this on the fly because we may have mixed Cheetah+'s with
377  * both direct and 2-way Ecaches.
378  */
379 int
380 cpu_ecache_nway(void)
381 {
382 	return (CH_ECACHE_NWAY);
383 }
384 
385 /*
386  * Note that these are entered into the table: Fatal Errors (PERR, IERR,
387  * ISAP, EMU) first, orphaned UCU/UCC, AFAR Overwrite policy, finally IVU, IVC.
388  * Afar overwrite policy is:
389  *   UCU,UCC > UE,EDU,WDU,CPU > CE,EDC,EMC,WDC,CPC > TO,BERR
390  */
391 ecc_type_to_info_t ecc_type_to_info[] = {
392 
393 	/* Fatal Errors */
394 	C_AFSR_PERR,	"PERR ",	ECC_ALL_TRAPS,	CPU_FATAL,
395 		"PERR Fatal",
396 		FM_EREPORT_PAYLOAD_SYSTEM2,
397 		FM_EREPORT_CPU_USIII_PERR,
398 	C_AFSR_IERR,	"IERR ", 	ECC_ALL_TRAPS,	CPU_FATAL,
399 		"IERR Fatal",
400 		FM_EREPORT_PAYLOAD_SYSTEM2,
401 		FM_EREPORT_CPU_USIII_IERR,
402 	C_AFSR_ISAP,	"ISAP ",	ECC_ALL_TRAPS,	CPU_FATAL,
403 		"ISAP Fatal",
404 		FM_EREPORT_PAYLOAD_SYSTEM1,
405 		FM_EREPORT_CPU_USIII_ISAP,
406 	C_AFSR_EMU,	"EMU ",		ECC_ASYNC_TRAPS, CPU_FATAL,
407 		"EMU Fatal",
408 		FM_EREPORT_PAYLOAD_MEMORY,
409 		FM_EREPORT_CPU_USIII_EMU,
410 
411 	/* Orphaned UCC/UCU Errors */
412 	C_AFSR_UCU,	"OUCU ",	ECC_ORPH_TRAPS, CPU_ORPH,
413 		"Orphaned UCU",
414 		FM_EREPORT_PAYLOAD_L2_DATA,
415 		FM_EREPORT_CPU_USIII_UCU,
416 	C_AFSR_UCC,	"OUCC ",	ECC_ORPH_TRAPS, CPU_ORPH,
417 		"Orphaned UCC",
418 		FM_EREPORT_PAYLOAD_L2_DATA,
419 		FM_EREPORT_CPU_USIII_UCC,
420 
421 	/* UCU, UCC */
422 	C_AFSR_UCU,	"UCU ",		ECC_F_TRAP,	CPU_UE_ECACHE,
423 		"UCU",
424 		FM_EREPORT_PAYLOAD_L2_DATA,
425 		FM_EREPORT_CPU_USIII_UCU,
426 	C_AFSR_UCC,	"UCC ",		ECC_F_TRAP,	CPU_CE_ECACHE,
427 		"UCC",
428 		FM_EREPORT_PAYLOAD_L2_DATA,
429 		FM_EREPORT_CPU_USIII_UCC,
430 
431 	/* UE, EDU:ST, EDU:BLD, WDU, CPU */
432 	C_AFSR_UE,	"UE ",		ECC_ASYNC_TRAPS, CPU_UE,
433 		"Uncorrectable system bus (UE)",
434 		FM_EREPORT_PAYLOAD_MEMORY,
435 		FM_EREPORT_CPU_USIII_UE,
436 	C_AFSR_EDU,	"EDU ",		ECC_C_TRAP,	CPU_UE_ECACHE_RETIRE,
437 		"EDU:ST",
438 		FM_EREPORT_PAYLOAD_L2_DATA,
439 		FM_EREPORT_CPU_USIII_EDUST,
440 	C_AFSR_EDU,	"EDU ",		ECC_D_TRAP,	CPU_UE_ECACHE_RETIRE,
441 		"EDU:BLD",
442 		FM_EREPORT_PAYLOAD_L2_DATA,
443 		FM_EREPORT_CPU_USIII_EDUBL,
444 	C_AFSR_WDU,	"WDU ",		ECC_C_TRAP,	CPU_UE_ECACHE_RETIRE,
445 		"WDU",
446 		FM_EREPORT_PAYLOAD_L2_DATA,
447 		FM_EREPORT_CPU_USIII_WDU,
448 	C_AFSR_CPU,	"CPU ",		ECC_C_TRAP,	CPU_UE_ECACHE,
449 		"CPU",
450 		FM_EREPORT_PAYLOAD_L2_DATA,
451 		FM_EREPORT_CPU_USIII_CPU,
452 
453 	/* CE, EDC, EMC, WDC, CPC */
454 	C_AFSR_CE,	"CE ",		ECC_C_TRAP,	CPU_CE,
455 		"Corrected system bus (CE)",
456 		FM_EREPORT_PAYLOAD_MEMORY,
457 		FM_EREPORT_CPU_USIII_CE,
458 	C_AFSR_EDC,	"EDC ",		ECC_C_TRAP,	CPU_CE_ECACHE,
459 		"EDC",
460 		FM_EREPORT_PAYLOAD_L2_DATA,
461 		FM_EREPORT_CPU_USIII_EDC,
462 	C_AFSR_EMC,	"EMC ",		ECC_C_TRAP,	CPU_EMC,
463 		"EMC",
464 		FM_EREPORT_PAYLOAD_MEMORY,
465 		FM_EREPORT_CPU_USIII_EMC,
466 	C_AFSR_WDC,	"WDC ",		ECC_C_TRAP,	CPU_CE_ECACHE,
467 		"WDC",
468 		FM_EREPORT_PAYLOAD_L2_DATA,
469 		FM_EREPORT_CPU_USIII_WDC,
470 	C_AFSR_CPC,	"CPC ",		ECC_C_TRAP,	CPU_CE_ECACHE,
471 		"CPC",
472 		FM_EREPORT_PAYLOAD_L2_DATA,
473 		FM_EREPORT_CPU_USIII_CPC,
474 
475 	/* TO, BERR */
476 	C_AFSR_TO,	"TO ",		ECC_ASYNC_TRAPS, CPU_TO,
477 		"Timeout (TO)",
478 		FM_EREPORT_PAYLOAD_IO,
479 		FM_EREPORT_CPU_USIII_TO,
480 	C_AFSR_BERR,	"BERR ",	ECC_ASYNC_TRAPS, CPU_BERR,
481 		"Bus Error (BERR)",
482 		FM_EREPORT_PAYLOAD_IO,
483 		FM_EREPORT_CPU_USIII_BERR,
484 
485 	/* IVU, IVC */
486 	C_AFSR_IVU,	"IVU ",		ECC_C_TRAP,	CPU_IV,
487 		"IVU",
488 		FM_EREPORT_PAYLOAD_SYSTEM1,
489 		FM_EREPORT_CPU_USIII_IVU,
490 	C_AFSR_IVC,	"IVC ",		ECC_C_TRAP,	CPU_IV,
491 		"IVC",
492 		FM_EREPORT_PAYLOAD_SYSTEM1,
493 		FM_EREPORT_CPU_USIII_IVC,
494 
495 	0,		NULL,		0,		0,
496 		NULL,
497 		FM_EREPORT_PAYLOAD_UNKNOWN,
498 		FM_EREPORT_CPU_USIII_UNKNOWN,
499 };
500 
501 /*
502  * Prioritized list of Error bits for AFAR overwrite.
503  * See Cheetah PRM P.6.1
504  *   Class 4:  UCC, UCU
505  *   Class 3:  UE, EDU, EMU, WDU, CPU
506  *   Class 2:  CE, EDC, EMC, WDC, CPC
507  *   Class 1:  TO, BERR
508  */
509 uint64_t afar_overwrite[] = {
510 	C_AFSR_UCC | C_AFSR_UCU,
511 	C_AFSR_UE | C_AFSR_EDU | C_AFSR_EMU | C_AFSR_WDU | C_AFSR_CPU,
512 	C_AFSR_CE | C_AFSR_EDC | C_AFSR_EMC | C_AFSR_WDC | C_AFSR_CPC,
513 	C_AFSR_TO | C_AFSR_BERR,
514 	0
515 };
516 
517 /*
518  * Prioritized list of Error bits for ESYND overwrite.
519  * See Cheetah PRM P.6.2
520  *   Class 2:  UE, IVU, EDU, WDU, UCU, CPU
521  *   Class 1:  CE, IVC, EDC, WDC, UCC, CPC
522  */
523 uint64_t esynd_overwrite[] = {
524 	C_AFSR_UE | C_AFSR_IVU | C_AFSR_EDU | C_AFSR_WDU | C_AFSR_UCU |
525 	    C_AFSR_CPU,
526 	C_AFSR_CE | C_AFSR_IVC | C_AFSR_EDC | C_AFSR_WDC | C_AFSR_UCC |
527 	    C_AFSR_CPC,
528 	0
529 };
530 
531 /*
532  * Prioritized list of Error bits for MSYND overwrite.
533  * See Cheetah PRM P.6.3
534  *   Class 2:  EMU
535  *   Class 1:  EMC
536  */
537 uint64_t msynd_overwrite[] = {
538 	C_AFSR_EMU,
539 	C_AFSR_EMC,
540 	0
541 };
542 
543 /*
544  * change cpu speed bits -- new speed will be normal-speed/divisor.
545  *
546  * The Jalapeno memory controllers are required to drain outstanding
547  * memory transactions within 32 JBus clocks in order to be ready
548  * to enter Estar mode.  In some corner cases however, that time
549  * fell short.
550  *
551  * A safe software solution is to force MCU to act like in Estar mode,
552  * then delay 1us (in ppm code) prior to assert J_CHNG_L signal.
553  * To reverse the effect, upon exiting Estar, software restores the
554  * MCU to its original state.
555  */
556 /* ARGSUSED1 */
557 void
558 cpu_change_speed(uint64_t divisor, uint64_t arg2)
559 {
560 	bus_config_eclk_t *bceclk;
561 	uint64_t		reg;
562 
563 	for (bceclk = bus_config_eclk; bceclk->divisor; bceclk++) {
564 		if (bceclk->divisor != divisor)
565 			continue;
566 		reg = get_safari_config();
567 		reg &= ~SAFARI_CONFIG_ECLK_MASK;
568 		reg |= bceclk->mask;
569 		set_safari_config(reg);
570 		CPU->cpu_m.divisor = (uchar_t)divisor;
571 		return;
572 	}
573 	/*
574 	 * We will reach here only if OBP and kernel don't agree on
575 	 * the speeds supported by the CPU.
576 	 */
577 	cmn_err(CE_WARN, "cpu_change_speed: bad divisor %" PRIu64, divisor);
578 }
579 
580 /*
581  * Cpu private initialization.  This includes allocating the cpu_private
582  * data structure, initializing it, and initializing the scrubber for this
583  * cpu.  This function calls cpu_init_ecache_scrub_dr to init the scrubber.
584  * We use kmem_cache_create for the cheetah private data structure because
585  * it needs to be allocated on a PAGESIZE (8192) byte boundary.
586  */
587 void
588 cpu_init_private(struct cpu *cp)
589 {
590 	cheetah_private_t *chprp;
591 	int i;
592 
593 	ASSERT(CPU_PRIVATE(cp) == NULL);
594 
595 	/* LINTED: E_TRUE_LOGICAL_EXPR */
596 	ASSERT((offsetof(cheetah_private_t, chpr_tl1_err_data) +
597 	    sizeof (ch_err_tl1_data_t) * CH_ERR_TL1_TLMAX) <= PAGESIZE);
598 
599 	/*
600 	 * Running with a Cheetah+, Jaguar, or Panther on a Cheetah CPU
601 	 * machine is not a supported configuration. Attempting to do so
602 	 * may result in unpredictable failures (e.g. running Cheetah+
603 	 * CPUs with Cheetah E$ disp flush) so don't allow it.
604 	 *
605 	 * This is just defensive code since this configuration mismatch
606 	 * should have been caught prior to OS execution.
607 	 */
608 	if (!IS_CHEETAH(cpunodes[cp->cpu_id].implementation)) {
609 		cmn_err(CE_PANIC, "CPU%d: UltraSPARC-III+/IV/IV+ not"
610 		    " supported on UltraSPARC-III code\n", cp->cpu_id);
611 	}
612 
613 	/*
614 	 * If the ch_private_cache has not been created, create it.
615 	 */
616 	if (ch_private_cache == NULL) {
617 		ch_private_cache = kmem_cache_create("ch_private_cache",
618 		    sizeof (cheetah_private_t), PAGESIZE, NULL, NULL,
619 		    NULL, NULL, static_arena, 0);
620 	}
621 
622 	chprp = CPU_PRIVATE(cp) = kmem_cache_alloc(ch_private_cache, KM_SLEEP);
623 
624 	bzero(chprp, sizeof (cheetah_private_t));
625 	chprp->chpr_fecctl0_logout.clo_data.chd_afar = LOGOUT_INVALID;
626 	chprp->chpr_cecc_logout.clo_data.chd_afar = LOGOUT_INVALID;
627 	chprp->chpr_async_logout.clo_data.chd_afar = LOGOUT_INVALID;
628 	for (i = 0; i < CH_ERR_TL1_TLMAX; i++)
629 		chprp->chpr_tl1_err_data[i].ch_err_tl1_logout.clo_data.chd_afar
630 		    = LOGOUT_INVALID;
631 
632 	chprp->chpr_icache_size = CH_ICACHE_SIZE;
633 	chprp->chpr_icache_linesize = CH_ICACHE_LSIZE;
634 
635 	cpu_init_ecache_scrub_dr(cp);
636 
637 	chprp->chpr_ec_set_size = cpunodes[cp->cpu_id].ecache_size /
638 	    cpu_ecache_nway();
639 
640 	adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size);
641 	ch_err_tl1_paddrs[cp->cpu_id] = va_to_pa(chprp);
642 	ASSERT(ch_err_tl1_paddrs[cp->cpu_id] != -1);
643 }
644 
645 /*
646  * Clear the error state registers for this CPU.
647  * For Cheetah, just clear the AFSR
648  */
649 void
650 set_cpu_error_state(ch_cpu_errors_t *cpu_error_regs)
651 {
652 	set_asyncflt(cpu_error_regs->afsr & ~C_AFSR_FATAL_ERRS);
653 }
654 
655 /*
656  * For Cheetah, the error recovery code uses an alternate flush area in the
657  * TL>0 fast ECC handler.  ecache_tl1_flushaddr is the physical address of
658  * this exclusive displacement flush area.
659  */
660 uint64_t ecache_tl1_flushaddr = (uint64_t)-1; /* physaddr for E$ flushing */
661 
662 /*
663  * Allocate and initialize the exclusive displacement flush area.
664  * Must be called before startup_bop_gone().
665  */
666 caddr_t
667 ecache_init_scrub_flush_area(caddr_t alloc_base)
668 {
669 	unsigned size = 2 * CH_ECACHE_8M_SIZE;
670 	caddr_t tmp_alloc_base = alloc_base;
671 	caddr_t flush_alloc_base =
672 	    (caddr_t)roundup((uintptr_t)alloc_base, size);
673 	caddr_t ecache_tl1_virtaddr;
674 
675 	/*
676 	 * Allocate the physical memory for the exclusive flush area
677 	 *
678 	 * Need to allocate an exclusive flush area that is twice the
679 	 * largest supported E$ size, physically contiguous, and
680 	 * aligned on twice the largest E$ size boundary.
681 	 *
682 	 * Memory allocated via BOP_ALLOC is included in the "cage"
683 	 * from the DR perspective and due to this, its physical
684 	 * address will never change and the memory will not be
685 	 * removed.
686 	 *
687 	 * BOP_ALLOC takes 4 arguments: bootops, virtual address hint,
688 	 * size of the area to allocate, and alignment of the area to
689 	 * allocate. It returns zero if the allocation fails, or the
690 	 * virtual address for a successful allocation. Memory BOP_ALLOC'd
691 	 * is physically contiguous.
692 	 */
693 	if ((ecache_tl1_virtaddr = (caddr_t)BOP_ALLOC(bootops,
694 	    flush_alloc_base, size, size)) != NULL) {
695 
696 		tmp_alloc_base =
697 		    (caddr_t)roundup((uintptr_t)(ecache_tl1_virtaddr + size),
698 		    ecache_alignsize);
699 
700 		/*
701 		 * get the physical address of the exclusive flush area
702 		 */
703 		ecache_tl1_flushaddr = va_to_pa(ecache_tl1_virtaddr);
704 
705 	} else {
706 		ecache_tl1_virtaddr = (caddr_t)-1;
707 		cmn_err(CE_NOTE, "!ecache_init_scrub_flush_area failed\n");
708 	}
709 
710 	return (tmp_alloc_base);
711 }
712 
713 /*
714  * Update cpu_offline_set so the scrubber knows which cpus are offline
715  */
716 /*ARGSUSED*/
717 int
718 cpu_scrub_cpu_setup(cpu_setup_t what, int cpuid, void *arg)
719 {
720 	switch (what) {
721 	case CPU_ON:
722 	case CPU_INIT:
723 		CPUSET_DEL(cpu_offline_set, cpuid);
724 		break;
725 	case CPU_OFF:
726 		CPUSET_ADD(cpu_offline_set, cpuid);
727 		break;
728 	default:
729 		break;
730 	}
731 	return (0);
732 }
733