xref: /linux/arch/sparc/kernel/traps_64.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* arch/sparc64/kernel/traps.c
3  *
4  * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
5  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6  */
7 
8 /*
9  * I like traps on v9, :))))
10  */
11 
12 #include <linux/extable.h>
13 #include <linux/sched/mm.h>
14 #include <linux/sched/debug.h>
15 #include <linux/linkage.h>
16 #include <linux/kernel.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/mm.h>
20 #include <linux/init.h>
21 #include <linux/kdebug.h>
22 #include <linux/ftrace.h>
23 #include <linux/reboot.h>
24 #include <linux/gfp.h>
25 #include <linux/context_tracking.h>
26 
27 #include <asm/smp.h>
28 #include <asm/delay.h>
29 #include <asm/ptrace.h>
30 #include <asm/oplib.h>
31 #include <asm/page.h>
32 #include <asm/pgtable.h>
33 #include <asm/unistd.h>
34 #include <linux/uaccess.h>
35 #include <asm/fpumacro.h>
36 #include <asm/lsu.h>
37 #include <asm/dcu.h>
38 #include <asm/estate.h>
39 #include <asm/chafsr.h>
40 #include <asm/sfafsr.h>
41 #include <asm/psrcompat.h>
42 #include <asm/processor.h>
43 #include <asm/timer.h>
44 #include <asm/head.h>
45 #include <asm/prom.h>
46 #include <asm/memctrl.h>
47 #include <asm/cacheflush.h>
48 #include <asm/setup.h>
49 
50 #include "entry.h"
51 #include "kernel.h"
52 #include "kstack.h"
53 
54 /* When an irrecoverable trap occurs at tl > 0, the trap entry
55  * code logs the trap state registers at every level in the trap
56  * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
57  * is as follows:
58  */
59 struct tl1_traplog {
60 	struct {
61 		unsigned long tstate;
62 		unsigned long tpc;
63 		unsigned long tnpc;
64 		unsigned long tt;
65 	} trapstack[4];
66 	unsigned long tl;
67 };
68 
69 static void dump_tl1_traplog(struct tl1_traplog *p)
70 {
71 	int i, limit;
72 
73 	printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
74 	       "dumping track stack.\n", p->tl);
75 
76 	limit = (tlb_type == hypervisor) ? 2 : 4;
77 	for (i = 0; i < limit; i++) {
78 		printk(KERN_EMERG
79 		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
80 		       "TNPC[%016lx] TT[%lx]\n",
81 		       i + 1,
82 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
83 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
84 		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
85 	}
86 }
87 
88 void bad_trap(struct pt_regs *regs, long lvl)
89 {
90 	char buffer[36];
91 
92 	if (notify_die(DIE_TRAP, "bad trap", regs,
93 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
94 		return;
95 
96 	if (lvl < 0x100) {
97 		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
98 		die_if_kernel(buffer, regs);
99 	}
100 
101 	lvl -= 0x100;
102 	if (regs->tstate & TSTATE_PRIV) {
103 		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
104 		die_if_kernel(buffer, regs);
105 	}
106 	if (test_thread_flag(TIF_32BIT)) {
107 		regs->tpc &= 0xffffffff;
108 		regs->tnpc &= 0xffffffff;
109 	}
110 	force_sig_fault(SIGILL, ILL_ILLTRP,
111 			(void __user *)regs->tpc, lvl, current);
112 }
113 
114 void bad_trap_tl1(struct pt_regs *regs, long lvl)
115 {
116 	char buffer[36];
117 
118 	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
119 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
120 		return;
121 
122 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
123 
124 	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
125 	die_if_kernel (buffer, regs);
126 }
127 
128 #ifdef CONFIG_DEBUG_BUGVERBOSE
129 void do_BUG(const char *file, int line)
130 {
131 	bust_spinlocks(1);
132 	printk("kernel BUG at %s:%d!\n", file, line);
133 }
134 EXPORT_SYMBOL(do_BUG);
135 #endif
136 
137 static DEFINE_SPINLOCK(dimm_handler_lock);
138 static dimm_printer_t dimm_handler;
139 
140 static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
141 {
142 	unsigned long flags;
143 	int ret = -ENODEV;
144 
145 	spin_lock_irqsave(&dimm_handler_lock, flags);
146 	if (dimm_handler) {
147 		ret = dimm_handler(synd_code, paddr, buf, buflen);
148 	} else if (tlb_type == spitfire) {
149 		if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
150 			ret = -EINVAL;
151 		else
152 			ret = 0;
153 	} else
154 		ret = -ENODEV;
155 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
156 
157 	return ret;
158 }
159 
160 int register_dimm_printer(dimm_printer_t func)
161 {
162 	unsigned long flags;
163 	int ret = 0;
164 
165 	spin_lock_irqsave(&dimm_handler_lock, flags);
166 	if (!dimm_handler)
167 		dimm_handler = func;
168 	else
169 		ret = -EEXIST;
170 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
171 
172 	return ret;
173 }
174 EXPORT_SYMBOL_GPL(register_dimm_printer);
175 
176 void unregister_dimm_printer(dimm_printer_t func)
177 {
178 	unsigned long flags;
179 
180 	spin_lock_irqsave(&dimm_handler_lock, flags);
181 	if (dimm_handler == func)
182 		dimm_handler = NULL;
183 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
184 }
185 EXPORT_SYMBOL_GPL(unregister_dimm_printer);
186 
187 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
188 {
189 	enum ctx_state prev_state = exception_enter();
190 
191 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
192 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
193 		goto out;
194 
195 	if (regs->tstate & TSTATE_PRIV) {
196 		printk("spitfire_insn_access_exception: SFSR[%016lx] "
197 		       "SFAR[%016lx], going.\n", sfsr, sfar);
198 		die_if_kernel("Iax", regs);
199 	}
200 	if (test_thread_flag(TIF_32BIT)) {
201 		regs->tpc &= 0xffffffff;
202 		regs->tnpc &= 0xffffffff;
203 	}
204 	force_sig_fault(SIGSEGV, SEGV_MAPERR,
205 			(void __user *)regs->tpc, 0, current);
206 out:
207 	exception_exit(prev_state);
208 }
209 
210 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
211 {
212 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
213 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
214 		return;
215 
216 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
217 	spitfire_insn_access_exception(regs, sfsr, sfar);
218 }
219 
220 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
221 {
222 	unsigned short type = (type_ctx >> 16);
223 	unsigned short ctx  = (type_ctx & 0xffff);
224 
225 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
226 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
227 		return;
228 
229 	if (regs->tstate & TSTATE_PRIV) {
230 		printk("sun4v_insn_access_exception: ADDR[%016lx] "
231 		       "CTX[%04x] TYPE[%04x], going.\n",
232 		       addr, ctx, type);
233 		die_if_kernel("Iax", regs);
234 	}
235 
236 	if (test_thread_flag(TIF_32BIT)) {
237 		regs->tpc &= 0xffffffff;
238 		regs->tnpc &= 0xffffffff;
239 	}
240 	force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *) addr, 0, current);
241 }
242 
243 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
244 {
245 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
246 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
247 		return;
248 
249 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
250 	sun4v_insn_access_exception(regs, addr, type_ctx);
251 }
252 
253 bool is_no_fault_exception(struct pt_regs *regs)
254 {
255 	unsigned char asi;
256 	u32 insn;
257 
258 	if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT)
259 		return false;
260 
261 	/*
262 	 * Must do a little instruction decoding here in order to
263 	 * decide on a course of action. The bits of interest are:
264 	 *  insn[31:30] = op, where 3 indicates the load/store group
265 	 *  insn[24:19] = op3, which identifies individual opcodes
266 	 *  insn[13] indicates an immediate offset
267 	 *  op3[4]=1 identifies alternate space instructions
268 	 *  op3[5:4]=3 identifies floating point instructions
269 	 *  op3[2]=1 identifies stores
270 	 * See "Opcode Maps" in the appendix of any Sparc V9
271 	 * architecture spec for full details.
272 	 */
273 	if ((insn & 0xc0800000) == 0xc0800000) {    /* op=3, op3[4]=1   */
274 		if (insn & 0x2000)		    /* immediate offset */
275 			asi = (regs->tstate >> 24); /* saved %asi       */
276 		else
277 			asi = (insn >> 5);	    /* immediate asi    */
278 		if ((asi & 0xf2) == ASI_PNF) {
279 			if (insn & 0x1000000) {     /* op3[5:4]=3       */
280 				handle_ldf_stq(insn, regs);
281 				return true;
282 			} else if (insn & 0x200000) { /* op3[2], stores */
283 				return false;
284 			}
285 			handle_ld_nf(insn, regs);
286 			return true;
287 		}
288 	}
289 	return false;
290 }
291 
292 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
293 {
294 	enum ctx_state prev_state = exception_enter();
295 
296 	if (notify_die(DIE_TRAP, "data access exception", regs,
297 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
298 		goto out;
299 
300 	if (regs->tstate & TSTATE_PRIV) {
301 		/* Test if this comes from uaccess places. */
302 		const struct exception_table_entry *entry;
303 
304 		entry = search_exception_tables(regs->tpc);
305 		if (entry) {
306 			/* Ouch, somebody is trying VM hole tricks on us... */
307 #ifdef DEBUG_EXCEPTIONS
308 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
309 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
310 			       regs->tpc, entry->fixup);
311 #endif
312 			regs->tpc = entry->fixup;
313 			regs->tnpc = regs->tpc + 4;
314 			goto out;
315 		}
316 		/* Shit... */
317 		printk("spitfire_data_access_exception: SFSR[%016lx] "
318 		       "SFAR[%016lx], going.\n", sfsr, sfar);
319 		die_if_kernel("Dax", regs);
320 	}
321 
322 	if (is_no_fault_exception(regs))
323 		return;
324 
325 	force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)sfar, 0, current);
326 out:
327 	exception_exit(prev_state);
328 }
329 
330 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
331 {
332 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
333 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
334 		return;
335 
336 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
337 	spitfire_data_access_exception(regs, sfsr, sfar);
338 }
339 
340 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
341 {
342 	unsigned short type = (type_ctx >> 16);
343 	unsigned short ctx  = (type_ctx & 0xffff);
344 
345 	if (notify_die(DIE_TRAP, "data access exception", regs,
346 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
347 		return;
348 
349 	if (regs->tstate & TSTATE_PRIV) {
350 		/* Test if this comes from uaccess places. */
351 		const struct exception_table_entry *entry;
352 
353 		entry = search_exception_tables(regs->tpc);
354 		if (entry) {
355 			/* Ouch, somebody is trying VM hole tricks on us... */
356 #ifdef DEBUG_EXCEPTIONS
357 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
358 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
359 			       regs->tpc, entry->fixup);
360 #endif
361 			regs->tpc = entry->fixup;
362 			regs->tnpc = regs->tpc + 4;
363 			return;
364 		}
365 		printk("sun4v_data_access_exception: ADDR[%016lx] "
366 		       "CTX[%04x] TYPE[%04x], going.\n",
367 		       addr, ctx, type);
368 		die_if_kernel("Dax", regs);
369 	}
370 
371 	if (test_thread_flag(TIF_32BIT)) {
372 		regs->tpc &= 0xffffffff;
373 		regs->tnpc &= 0xffffffff;
374 	}
375 	if (is_no_fault_exception(regs))
376 		return;
377 
378 	/* MCD (Memory Corruption Detection) disabled trap (TT=0x19) in HV
379 	 * is vectored thorugh data access exception trap with fault type
380 	 * set to HV_FAULT_TYPE_MCD_DIS. Check for MCD disabled trap.
381 	 * Accessing an address with invalid ASI for the address, for
382 	 * example setting an ADI tag on an address with ASI_MCD_PRIMARY
383 	 * when TTE.mcd is not set for the VA, is also vectored into
384 	 * kerbel by HV as data access exception with fault type set to
385 	 * HV_FAULT_TYPE_INV_ASI.
386 	 */
387 	switch (type) {
388 	case HV_FAULT_TYPE_INV_ASI:
389 		force_sig_fault(SIGILL, ILL_ILLADR, (void __user *)addr, 0,
390 				current);
391 		break;
392 	case HV_FAULT_TYPE_MCD_DIS:
393 		force_sig_fault(SIGSEGV, SEGV_ACCADI, (void __user *)addr, 0,
394 				current);
395 		break;
396 	default:
397 		force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)addr, 0,
398 				current);
399 		break;
400 	}
401 }
402 
403 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
404 {
405 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
406 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
407 		return;
408 
409 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
410 	sun4v_data_access_exception(regs, addr, type_ctx);
411 }
412 
413 #ifdef CONFIG_PCI
414 #include "pci_impl.h"
415 #endif
416 
417 /* When access exceptions happen, we must do this. */
418 static void spitfire_clean_and_reenable_l1_caches(void)
419 {
420 	unsigned long va;
421 
422 	if (tlb_type != spitfire)
423 		BUG();
424 
425 	/* Clean 'em. */
426 	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
427 		spitfire_put_icache_tag(va, 0x0);
428 		spitfire_put_dcache_tag(va, 0x0);
429 	}
430 
431 	/* Re-enable in LSU. */
432 	__asm__ __volatile__("flush %%g6\n\t"
433 			     "membar #Sync\n\t"
434 			     "stxa %0, [%%g0] %1\n\t"
435 			     "membar #Sync"
436 			     : /* no outputs */
437 			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
438 				    LSU_CONTROL_IM | LSU_CONTROL_DM),
439 			     "i" (ASI_LSU_CONTROL)
440 			     : "memory");
441 }
442 
443 static void spitfire_enable_estate_errors(void)
444 {
445 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
446 			     "membar	#Sync"
447 			     : /* no outputs */
448 			     : "r" (ESTATE_ERR_ALL),
449 			       "i" (ASI_ESTATE_ERROR_EN));
450 }
451 
452 static char ecc_syndrome_table[] = {
453 	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
454 	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
455 	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
456 	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
457 	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
458 	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
459 	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
460 	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
461 	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
462 	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
463 	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
464 	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
465 	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
466 	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
467 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
468 	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
469 	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
470 	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
471 	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
472 	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
473 	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
474 	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
475 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
476 	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
477 	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
478 	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
479 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
480 	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
481 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
482 	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
483 	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
484 	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
485 };
486 
487 static char *syndrome_unknown = "<Unknown>";
488 
489 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
490 {
491 	unsigned short scode;
492 	char memmod_str[64], *p;
493 
494 	if (udbl & bit) {
495 		scode = ecc_syndrome_table[udbl & 0xff];
496 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
497 			p = syndrome_unknown;
498 		else
499 			p = memmod_str;
500 		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
501 		       "Memory Module \"%s\"\n",
502 		       smp_processor_id(), scode, p);
503 	}
504 
505 	if (udbh & bit) {
506 		scode = ecc_syndrome_table[udbh & 0xff];
507 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
508 			p = syndrome_unknown;
509 		else
510 			p = memmod_str;
511 		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
512 		       "Memory Module \"%s\"\n",
513 		       smp_processor_id(), scode, p);
514 	}
515 
516 }
517 
518 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
519 {
520 
521 	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
522 	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
523 	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
524 
525 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
526 
527 	/* We always log it, even if someone is listening for this
528 	 * trap.
529 	 */
530 	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
531 		   0, TRAP_TYPE_CEE, SIGTRAP);
532 
533 	/* The Correctable ECC Error trap does not disable I/D caches.  So
534 	 * we only have to restore the ESTATE Error Enable register.
535 	 */
536 	spitfire_enable_estate_errors();
537 }
538 
539 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
540 {
541 	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
542 	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
543 	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
544 
545 	/* XXX add more human friendly logging of the error status
546 	 * XXX as is implemented for cheetah
547 	 */
548 
549 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
550 
551 	/* We always log it, even if someone is listening for this
552 	 * trap.
553 	 */
554 	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
555 		   0, tt, SIGTRAP);
556 
557 	if (regs->tstate & TSTATE_PRIV) {
558 		if (tl1)
559 			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
560 		die_if_kernel("UE", regs);
561 	}
562 
563 	/* XXX need more intelligent processing here, such as is implemented
564 	 * XXX for cheetah errors, in fact if the E-cache still holds the
565 	 * XXX line with bad parity this will loop
566 	 */
567 
568 	spitfire_clean_and_reenable_l1_caches();
569 	spitfire_enable_estate_errors();
570 
571 	if (test_thread_flag(TIF_32BIT)) {
572 		regs->tpc &= 0xffffffff;
573 		regs->tnpc &= 0xffffffff;
574 	}
575 	force_sig_fault(SIGBUS, BUS_OBJERR, (void *)0, 0, current);
576 }
577 
578 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
579 {
580 	unsigned long afsr, tt, udbh, udbl;
581 	int tl1;
582 
583 	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
584 	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
585 	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
586 	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
587 	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
588 
589 #ifdef CONFIG_PCI
590 	if (tt == TRAP_TYPE_DAE &&
591 	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
592 		spitfire_clean_and_reenable_l1_caches();
593 		spitfire_enable_estate_errors();
594 
595 		pci_poke_faulted = 1;
596 		regs->tnpc = regs->tpc + 4;
597 		return;
598 	}
599 #endif
600 
601 	if (afsr & SFAFSR_UE)
602 		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
603 
604 	if (tt == TRAP_TYPE_CEE) {
605 		/* Handle the case where we took a CEE trap, but ACK'd
606 		 * only the UE state in the UDB error registers.
607 		 */
608 		if (afsr & SFAFSR_UE) {
609 			if (udbh & UDBE_CE) {
610 				__asm__ __volatile__(
611 					"stxa	%0, [%1] %2\n\t"
612 					"membar	#Sync"
613 					: /* no outputs */
614 					: "r" (udbh & UDBE_CE),
615 					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
616 			}
617 			if (udbl & UDBE_CE) {
618 				__asm__ __volatile__(
619 					"stxa	%0, [%1] %2\n\t"
620 					"membar	#Sync"
621 					: /* no outputs */
622 					: "r" (udbl & UDBE_CE),
623 					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
624 			}
625 		}
626 
627 		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
628 	}
629 }
630 
631 int cheetah_pcache_forced_on;
632 
633 void cheetah_enable_pcache(void)
634 {
635 	unsigned long dcr;
636 
637 	printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
638 	       smp_processor_id());
639 
640 	__asm__ __volatile__("ldxa [%%g0] %1, %0"
641 			     : "=r" (dcr)
642 			     : "i" (ASI_DCU_CONTROL_REG));
643 	dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
644 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
645 			     "membar #Sync"
646 			     : /* no outputs */
647 			     : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
648 }
649 
650 /* Cheetah error trap handling. */
651 static unsigned long ecache_flush_physbase;
652 static unsigned long ecache_flush_linesize;
653 static unsigned long ecache_flush_size;
654 
655 /* This table is ordered in priority of errors and matches the
656  * AFAR overwrite policy as well.
657  */
658 
659 struct afsr_error_table {
660 	unsigned long mask;
661 	const char *name;
662 };
663 
664 static const char CHAFSR_PERR_msg[] =
665 	"System interface protocol error";
666 static const char CHAFSR_IERR_msg[] =
667 	"Internal processor error";
668 static const char CHAFSR_ISAP_msg[] =
669 	"System request parity error on incoming address";
670 static const char CHAFSR_UCU_msg[] =
671 	"Uncorrectable E-cache ECC error for ifetch/data";
672 static const char CHAFSR_UCC_msg[] =
673 	"SW Correctable E-cache ECC error for ifetch/data";
674 static const char CHAFSR_UE_msg[] =
675 	"Uncorrectable system bus data ECC error for read";
676 static const char CHAFSR_EDU_msg[] =
677 	"Uncorrectable E-cache ECC error for stmerge/blkld";
678 static const char CHAFSR_EMU_msg[] =
679 	"Uncorrectable system bus MTAG error";
680 static const char CHAFSR_WDU_msg[] =
681 	"Uncorrectable E-cache ECC error for writeback";
682 static const char CHAFSR_CPU_msg[] =
683 	"Uncorrectable ECC error for copyout";
684 static const char CHAFSR_CE_msg[] =
685 	"HW corrected system bus data ECC error for read";
686 static const char CHAFSR_EDC_msg[] =
687 	"HW corrected E-cache ECC error for stmerge/blkld";
688 static const char CHAFSR_EMC_msg[] =
689 	"HW corrected system bus MTAG ECC error";
690 static const char CHAFSR_WDC_msg[] =
691 	"HW corrected E-cache ECC error for writeback";
692 static const char CHAFSR_CPC_msg[] =
693 	"HW corrected ECC error for copyout";
694 static const char CHAFSR_TO_msg[] =
695 	"Unmapped error from system bus";
696 static const char CHAFSR_BERR_msg[] =
697 	"Bus error response from system bus";
698 static const char CHAFSR_IVC_msg[] =
699 	"HW corrected system bus data ECC error for ivec read";
700 static const char CHAFSR_IVU_msg[] =
701 	"Uncorrectable system bus data ECC error for ivec read";
702 static struct afsr_error_table __cheetah_error_table[] = {
703 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
704 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
705 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
706 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
707 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
708 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
709 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
710 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
711 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
712 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
713 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
714 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
715 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
716 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
717 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
718 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
719 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
720 	/* These two do not update the AFAR. */
721 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
722 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
723 	{	0,		NULL			},
724 };
725 static const char CHPAFSR_DTO_msg[] =
726 	"System bus unmapped error for prefetch/storequeue-read";
727 static const char CHPAFSR_DBERR_msg[] =
728 	"System bus error for prefetch/storequeue-read";
729 static const char CHPAFSR_THCE_msg[] =
730 	"Hardware corrected E-cache Tag ECC error";
731 static const char CHPAFSR_TSCE_msg[] =
732 	"SW handled correctable E-cache Tag ECC error";
733 static const char CHPAFSR_TUE_msg[] =
734 	"Uncorrectable E-cache Tag ECC error";
735 static const char CHPAFSR_DUE_msg[] =
736 	"System bus uncorrectable data ECC error due to prefetch/store-fill";
737 static struct afsr_error_table __cheetah_plus_error_table[] = {
738 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
739 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
740 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
741 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
742 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
743 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
744 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
745 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
746 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
747 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
748 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
749 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
750 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
751 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
752 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
753 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
754 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
755 	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
756 	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
757 	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
758 	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
759 	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
760 	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
761 	/* These two do not update the AFAR. */
762 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
763 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
764 	{	0,		NULL			},
765 };
766 static const char JPAFSR_JETO_msg[] =
767 	"System interface protocol error, hw timeout caused";
768 static const char JPAFSR_SCE_msg[] =
769 	"Parity error on system snoop results";
770 static const char JPAFSR_JEIC_msg[] =
771 	"System interface protocol error, illegal command detected";
772 static const char JPAFSR_JEIT_msg[] =
773 	"System interface protocol error, illegal ADTYPE detected";
774 static const char JPAFSR_OM_msg[] =
775 	"Out of range memory error has occurred";
776 static const char JPAFSR_ETP_msg[] =
777 	"Parity error on L2 cache tag SRAM";
778 static const char JPAFSR_UMS_msg[] =
779 	"Error due to unsupported store";
780 static const char JPAFSR_RUE_msg[] =
781 	"Uncorrectable ECC error from remote cache/memory";
782 static const char JPAFSR_RCE_msg[] =
783 	"Correctable ECC error from remote cache/memory";
784 static const char JPAFSR_BP_msg[] =
785 	"JBUS parity error on returned read data";
786 static const char JPAFSR_WBP_msg[] =
787 	"JBUS parity error on data for writeback or block store";
788 static const char JPAFSR_FRC_msg[] =
789 	"Foreign read to DRAM incurring correctable ECC error";
790 static const char JPAFSR_FRU_msg[] =
791 	"Foreign read to DRAM incurring uncorrectable ECC error";
792 static struct afsr_error_table __jalapeno_error_table[] = {
793 	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
794 	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
795 	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
796 	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
797 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
798 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
799 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
800 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
801 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
802 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
803 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
804 	{	JPAFSR_OM,	JPAFSR_OM_msg		},
805 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
806 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
807 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
808 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
809 	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
810 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
811 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
812 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
813 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
814 	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
815 	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
816 	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
817 	{	JPAFSR_BP,	JPAFSR_BP_msg		},
818 	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
819 	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
820 	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
821 	/* These two do not update the AFAR. */
822 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
823 	{	0,		NULL			},
824 };
825 static struct afsr_error_table *cheetah_error_table;
826 static unsigned long cheetah_afsr_errors;
827 
828 struct cheetah_err_info *cheetah_error_log;
829 
830 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
831 {
832 	struct cheetah_err_info *p;
833 	int cpu = smp_processor_id();
834 
835 	if (!cheetah_error_log)
836 		return NULL;
837 
838 	p = cheetah_error_log + (cpu * 2);
839 	if ((afsr & CHAFSR_TL1) != 0UL)
840 		p++;
841 
842 	return p;
843 }
844 
845 extern unsigned int tl0_icpe[], tl1_icpe[];
846 extern unsigned int tl0_dcpe[], tl1_dcpe[];
847 extern unsigned int tl0_fecc[], tl1_fecc[];
848 extern unsigned int tl0_cee[], tl1_cee[];
849 extern unsigned int tl0_iae[], tl1_iae[];
850 extern unsigned int tl0_dae[], tl1_dae[];
851 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
852 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
853 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
854 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
855 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
856 
857 void __init cheetah_ecache_flush_init(void)
858 {
859 	unsigned long largest_size, smallest_linesize, order, ver;
860 	int i, sz;
861 
862 	/* Scan all cpu device tree nodes, note two values:
863 	 * 1) largest E-cache size
864 	 * 2) smallest E-cache line size
865 	 */
866 	largest_size = 0UL;
867 	smallest_linesize = ~0UL;
868 
869 	for (i = 0; i < NR_CPUS; i++) {
870 		unsigned long val;
871 
872 		val = cpu_data(i).ecache_size;
873 		if (!val)
874 			continue;
875 
876 		if (val > largest_size)
877 			largest_size = val;
878 
879 		val = cpu_data(i).ecache_line_size;
880 		if (val < smallest_linesize)
881 			smallest_linesize = val;
882 
883 	}
884 
885 	if (largest_size == 0UL || smallest_linesize == ~0UL) {
886 		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
887 			    "parameters.\n");
888 		prom_halt();
889 	}
890 
891 	ecache_flush_size = (2 * largest_size);
892 	ecache_flush_linesize = smallest_linesize;
893 
894 	ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
895 
896 	if (ecache_flush_physbase == ~0UL) {
897 		prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
898 			    "contiguous physical memory.\n",
899 			    ecache_flush_size);
900 		prom_halt();
901 	}
902 
903 	/* Now allocate error trap reporting scoreboard. */
904 	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
905 	for (order = 0; order < MAX_ORDER; order++) {
906 		if ((PAGE_SIZE << order) >= sz)
907 			break;
908 	}
909 	cheetah_error_log = (struct cheetah_err_info *)
910 		__get_free_pages(GFP_KERNEL, order);
911 	if (!cheetah_error_log) {
912 		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
913 			    "error logging scoreboard (%d bytes).\n", sz);
914 		prom_halt();
915 	}
916 	memset(cheetah_error_log, 0, PAGE_SIZE << order);
917 
918 	/* Mark all AFSRs as invalid so that the trap handler will
919 	 * log new new information there.
920 	 */
921 	for (i = 0; i < 2 * NR_CPUS; i++)
922 		cheetah_error_log[i].afsr = CHAFSR_INVALID;
923 
924 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
925 	if ((ver >> 32) == __JALAPENO_ID ||
926 	    (ver >> 32) == __SERRANO_ID) {
927 		cheetah_error_table = &__jalapeno_error_table[0];
928 		cheetah_afsr_errors = JPAFSR_ERRORS;
929 	} else if ((ver >> 32) == 0x003e0015) {
930 		cheetah_error_table = &__cheetah_plus_error_table[0];
931 		cheetah_afsr_errors = CHPAFSR_ERRORS;
932 	} else {
933 		cheetah_error_table = &__cheetah_error_table[0];
934 		cheetah_afsr_errors = CHAFSR_ERRORS;
935 	}
936 
937 	/* Now patch trap tables. */
938 	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
939 	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
940 	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
941 	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
942 	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
943 	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
944 	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
945 	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
946 	if (tlb_type == cheetah_plus) {
947 		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
948 		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
949 		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
950 		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
951 	}
952 	flushi(PAGE_OFFSET);
953 }
954 
955 static void cheetah_flush_ecache(void)
956 {
957 	unsigned long flush_base = ecache_flush_physbase;
958 	unsigned long flush_linesize = ecache_flush_linesize;
959 	unsigned long flush_size = ecache_flush_size;
960 
961 	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
962 			     "   bne,pt	%%xcc, 1b\n\t"
963 			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
964 			     : "=&r" (flush_size)
965 			     : "0" (flush_size), "r" (flush_base),
966 			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
967 }
968 
969 static void cheetah_flush_ecache_line(unsigned long physaddr)
970 {
971 	unsigned long alias;
972 
973 	physaddr &= ~(8UL - 1UL);
974 	physaddr = (ecache_flush_physbase +
975 		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
976 	alias = physaddr + (ecache_flush_size >> 1UL);
977 	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
978 			     "ldxa [%1] %2, %%g0\n\t"
979 			     "membar #Sync"
980 			     : /* no outputs */
981 			     : "r" (physaddr), "r" (alias),
982 			       "i" (ASI_PHYS_USE_EC));
983 }
984 
985 /* Unfortunately, the diagnostic access to the I-cache tags we need to
986  * use to clear the thing interferes with I-cache coherency transactions.
987  *
988  * So we must only flush the I-cache when it is disabled.
989  */
990 static void __cheetah_flush_icache(void)
991 {
992 	unsigned int icache_size, icache_line_size;
993 	unsigned long addr;
994 
995 	icache_size = local_cpu_data().icache_size;
996 	icache_line_size = local_cpu_data().icache_line_size;
997 
998 	/* Clear the valid bits in all the tags. */
999 	for (addr = 0; addr < icache_size; addr += icache_line_size) {
1000 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1001 				     "membar #Sync"
1002 				     : /* no outputs */
1003 				     : "r" (addr | (2 << 3)),
1004 				       "i" (ASI_IC_TAG));
1005 	}
1006 }
1007 
1008 static void cheetah_flush_icache(void)
1009 {
1010 	unsigned long dcu_save;
1011 
1012 	/* Save current DCU, disable I-cache. */
1013 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1014 			     "or %0, %2, %%g1\n\t"
1015 			     "stxa %%g1, [%%g0] %1\n\t"
1016 			     "membar #Sync"
1017 			     : "=r" (dcu_save)
1018 			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
1019 			     : "g1");
1020 
1021 	__cheetah_flush_icache();
1022 
1023 	/* Restore DCU register */
1024 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1025 			     "membar #Sync"
1026 			     : /* no outputs */
1027 			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
1028 }
1029 
1030 static void cheetah_flush_dcache(void)
1031 {
1032 	unsigned int dcache_size, dcache_line_size;
1033 	unsigned long addr;
1034 
1035 	dcache_size = local_cpu_data().dcache_size;
1036 	dcache_line_size = local_cpu_data().dcache_line_size;
1037 
1038 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1039 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1040 				     "membar #Sync"
1041 				     : /* no outputs */
1042 				     : "r" (addr), "i" (ASI_DCACHE_TAG));
1043 	}
1044 }
1045 
1046 /* In order to make the even parity correct we must do two things.
1047  * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1048  * Next, we clear out all 32-bytes of data for that line.  Data of
1049  * all-zero + tag parity value of zero == correct parity.
1050  */
1051 static void cheetah_plus_zap_dcache_parity(void)
1052 {
1053 	unsigned int dcache_size, dcache_line_size;
1054 	unsigned long addr;
1055 
1056 	dcache_size = local_cpu_data().dcache_size;
1057 	dcache_line_size = local_cpu_data().dcache_line_size;
1058 
1059 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1060 		unsigned long tag = (addr >> 14);
1061 		unsigned long line;
1062 
1063 		__asm__ __volatile__("membar	#Sync\n\t"
1064 				     "stxa	%0, [%1] %2\n\t"
1065 				     "membar	#Sync"
1066 				     : /* no outputs */
1067 				     : "r" (tag), "r" (addr),
1068 				       "i" (ASI_DCACHE_UTAG));
1069 		for (line = addr; line < addr + dcache_line_size; line += 8)
1070 			__asm__ __volatile__("membar	#Sync\n\t"
1071 					     "stxa	%%g0, [%0] %1\n\t"
1072 					     "membar	#Sync"
1073 					     : /* no outputs */
1074 					     : "r" (line),
1075 					       "i" (ASI_DCACHE_DATA));
1076 	}
1077 }
1078 
1079 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1080  * something palatable to the memory controller driver get_unumber
1081  * routine.
1082  */
1083 #define MT0	137
1084 #define MT1	138
1085 #define MT2	139
1086 #define NONE	254
1087 #define MTC0	140
1088 #define MTC1	141
1089 #define MTC2	142
1090 #define MTC3	143
1091 #define C0	128
1092 #define C1	129
1093 #define C2	130
1094 #define C3	131
1095 #define C4	132
1096 #define C5	133
1097 #define C6	134
1098 #define C7	135
1099 #define C8	136
1100 #define M2	144
1101 #define M3	145
1102 #define M4	146
1103 #define M	147
1104 static unsigned char cheetah_ecc_syntab[] = {
1105 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1106 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1107 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1108 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1109 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1110 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1111 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1112 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1113 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1114 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1115 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1116 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1117 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1118 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1119 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1120 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1121 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1122 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1123 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1124 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1125 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1126 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1127 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1128 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1129 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1130 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1131 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1132 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1133 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1134 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1135 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1136 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1137 };
1138 static unsigned char cheetah_mtag_syntab[] = {
1139        NONE, MTC0,
1140        MTC1, NONE,
1141        MTC2, NONE,
1142        NONE, MT0,
1143        MTC3, NONE,
1144        NONE, MT1,
1145        NONE, MT2,
1146        NONE, NONE
1147 };
1148 
1149 /* Return the highest priority error conditon mentioned. */
1150 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1151 {
1152 	unsigned long tmp = 0;
1153 	int i;
1154 
1155 	for (i = 0; cheetah_error_table[i].mask; i++) {
1156 		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1157 			return tmp;
1158 	}
1159 	return tmp;
1160 }
1161 
1162 static const char *cheetah_get_string(unsigned long bit)
1163 {
1164 	int i;
1165 
1166 	for (i = 0; cheetah_error_table[i].mask; i++) {
1167 		if ((bit & cheetah_error_table[i].mask) != 0UL)
1168 			return cheetah_error_table[i].name;
1169 	}
1170 	return "???";
1171 }
1172 
1173 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1174 			       unsigned long afsr, unsigned long afar, int recoverable)
1175 {
1176 	unsigned long hipri;
1177 	char unum[256];
1178 
1179 	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1180 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1181 	       afsr, afar,
1182 	       (afsr & CHAFSR_TL1) ? 1 : 0);
1183 	printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1184 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1185 	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1186 	printk("%s" "ERROR(%d): ",
1187 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1188 	printk("TPC<%pS>\n", (void *) regs->tpc);
1189 	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1190 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1191 	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1192 	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1193 	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1194 	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1195 	hipri = cheetah_get_hipri(afsr);
1196 	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1197 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1198 	       hipri, cheetah_get_string(hipri));
1199 
1200 	/* Try to get unumber if relevant. */
1201 #define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1202 			 CHAFSR_CPC | CHAFSR_CPU | \
1203 			 CHAFSR_UE  | CHAFSR_CE  | \
1204 			 CHAFSR_EDC | CHAFSR_EDU  | \
1205 			 CHAFSR_UCC | CHAFSR_UCU  | \
1206 			 CHAFSR_WDU | CHAFSR_WDC)
1207 #define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1208 	if (afsr & ESYND_ERRORS) {
1209 		int syndrome;
1210 		int ret;
1211 
1212 		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1213 		syndrome = cheetah_ecc_syntab[syndrome];
1214 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1215 		if (ret != -1)
1216 			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1217 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1218 			       smp_processor_id(), unum);
1219 	} else if (afsr & MSYND_ERRORS) {
1220 		int syndrome;
1221 		int ret;
1222 
1223 		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1224 		syndrome = cheetah_mtag_syntab[syndrome];
1225 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1226 		if (ret != -1)
1227 			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1228 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1229 			       smp_processor_id(), unum);
1230 	}
1231 
1232 	/* Now dump the cache snapshots. */
1233 	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1234 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1235 	       (int) info->dcache_index,
1236 	       info->dcache_tag,
1237 	       info->dcache_utag,
1238 	       info->dcache_stag);
1239 	printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1240 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1241 	       info->dcache_data[0],
1242 	       info->dcache_data[1],
1243 	       info->dcache_data[2],
1244 	       info->dcache_data[3]);
1245 	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1246 	       "u[%016llx] l[%016llx]\n",
1247 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1248 	       (int) info->icache_index,
1249 	       info->icache_tag,
1250 	       info->icache_utag,
1251 	       info->icache_stag,
1252 	       info->icache_upper,
1253 	       info->icache_lower);
1254 	printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1255 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1256 	       info->icache_data[0],
1257 	       info->icache_data[1],
1258 	       info->icache_data[2],
1259 	       info->icache_data[3]);
1260 	printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1261 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1262 	       info->icache_data[4],
1263 	       info->icache_data[5],
1264 	       info->icache_data[6],
1265 	       info->icache_data[7]);
1266 	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1267 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1268 	       (int) info->ecache_index, info->ecache_tag);
1269 	printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1270 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1271 	       info->ecache_data[0],
1272 	       info->ecache_data[1],
1273 	       info->ecache_data[2],
1274 	       info->ecache_data[3]);
1275 
1276 	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1277 	while (afsr != 0UL) {
1278 		unsigned long bit = cheetah_get_hipri(afsr);
1279 
1280 		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1281 		       (recoverable ? KERN_WARNING : KERN_CRIT),
1282 		       bit, cheetah_get_string(bit));
1283 
1284 		afsr &= ~bit;
1285 	}
1286 
1287 	if (!recoverable)
1288 		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1289 }
1290 
1291 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1292 {
1293 	unsigned long afsr, afar;
1294 	int ret = 0;
1295 
1296 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1297 			     : "=r" (afsr)
1298 			     : "i" (ASI_AFSR));
1299 	if ((afsr & cheetah_afsr_errors) != 0) {
1300 		if (logp != NULL) {
1301 			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1302 					     : "=r" (afar)
1303 					     : "i" (ASI_AFAR));
1304 			logp->afsr = afsr;
1305 			logp->afar = afar;
1306 		}
1307 		ret = 1;
1308 	}
1309 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1310 			     "membar #Sync\n\t"
1311 			     : : "r" (afsr), "i" (ASI_AFSR));
1312 
1313 	return ret;
1314 }
1315 
1316 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1317 {
1318 	struct cheetah_err_info local_snapshot, *p;
1319 	int recoverable;
1320 
1321 	/* Flush E-cache */
1322 	cheetah_flush_ecache();
1323 
1324 	p = cheetah_get_error_log(afsr);
1325 	if (!p) {
1326 		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1327 			    afsr, afar);
1328 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1329 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1330 		prom_halt();
1331 	}
1332 
1333 	/* Grab snapshot of logged error. */
1334 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1335 
1336 	/* If the current trap snapshot does not match what the
1337 	 * trap handler passed along into our args, big trouble.
1338 	 * In such a case, mark the local copy as invalid.
1339 	 *
1340 	 * Else, it matches and we mark the afsr in the non-local
1341 	 * copy as invalid so we may log new error traps there.
1342 	 */
1343 	if (p->afsr != afsr || p->afar != afar)
1344 		local_snapshot.afsr = CHAFSR_INVALID;
1345 	else
1346 		p->afsr = CHAFSR_INVALID;
1347 
1348 	cheetah_flush_icache();
1349 	cheetah_flush_dcache();
1350 
1351 	/* Re-enable I-cache/D-cache */
1352 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1353 			     "or %%g1, %1, %%g1\n\t"
1354 			     "stxa %%g1, [%%g0] %0\n\t"
1355 			     "membar #Sync"
1356 			     : /* no outputs */
1357 			     : "i" (ASI_DCU_CONTROL_REG),
1358 			       "i" (DCU_DC | DCU_IC)
1359 			     : "g1");
1360 
1361 	/* Re-enable error reporting */
1362 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1363 			     "or %%g1, %1, %%g1\n\t"
1364 			     "stxa %%g1, [%%g0] %0\n\t"
1365 			     "membar #Sync"
1366 			     : /* no outputs */
1367 			     : "i" (ASI_ESTATE_ERROR_EN),
1368 			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1369 			     : "g1");
1370 
1371 	/* Decide if we can continue after handling this trap and
1372 	 * logging the error.
1373 	 */
1374 	recoverable = 1;
1375 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1376 		recoverable = 0;
1377 
1378 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1379 	 * error was logged while we had error reporting traps disabled.
1380 	 */
1381 	if (cheetah_recheck_errors(&local_snapshot)) {
1382 		unsigned long new_afsr = local_snapshot.afsr;
1383 
1384 		/* If we got a new asynchronous error, die... */
1385 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1386 				CHAFSR_WDU | CHAFSR_CPU |
1387 				CHAFSR_IVU | CHAFSR_UE |
1388 				CHAFSR_BERR | CHAFSR_TO))
1389 			recoverable = 0;
1390 	}
1391 
1392 	/* Log errors. */
1393 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1394 
1395 	if (!recoverable)
1396 		panic("Irrecoverable Fast-ECC error trap.\n");
1397 
1398 	/* Flush E-cache to kick the error trap handlers out. */
1399 	cheetah_flush_ecache();
1400 }
1401 
1402 /* Try to fix a correctable error by pushing the line out from
1403  * the E-cache.  Recheck error reporting registers to see if the
1404  * problem is intermittent.
1405  */
1406 static int cheetah_fix_ce(unsigned long physaddr)
1407 {
1408 	unsigned long orig_estate;
1409 	unsigned long alias1, alias2;
1410 	int ret;
1411 
1412 	/* Make sure correctable error traps are disabled. */
1413 	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1414 			     "andn	%0, %1, %%g1\n\t"
1415 			     "stxa	%%g1, [%%g0] %2\n\t"
1416 			     "membar	#Sync"
1417 			     : "=&r" (orig_estate)
1418 			     : "i" (ESTATE_ERROR_CEEN),
1419 			       "i" (ASI_ESTATE_ERROR_EN)
1420 			     : "g1");
1421 
1422 	/* We calculate alias addresses that will force the
1423 	 * cache line in question out of the E-cache.  Then
1424 	 * we bring it back in with an atomic instruction so
1425 	 * that we get it in some modified/exclusive state,
1426 	 * then we displace it again to try and get proper ECC
1427 	 * pushed back into the system.
1428 	 */
1429 	physaddr &= ~(8UL - 1UL);
1430 	alias1 = (ecache_flush_physbase +
1431 		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1432 	alias2 = alias1 + (ecache_flush_size >> 1);
1433 	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1434 			     "ldxa	[%1] %3, %%g0\n\t"
1435 			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1436 			     "ldxa	[%0] %3, %%g0\n\t"
1437 			     "ldxa	[%1] %3, %%g0\n\t"
1438 			     "membar	#Sync"
1439 			     : /* no outputs */
1440 			     : "r" (alias1), "r" (alias2),
1441 			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1442 
1443 	/* Did that trigger another error? */
1444 	if (cheetah_recheck_errors(NULL)) {
1445 		/* Try one more time. */
1446 		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1447 				     "membar #Sync"
1448 				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1449 		if (cheetah_recheck_errors(NULL))
1450 			ret = 2;
1451 		else
1452 			ret = 1;
1453 	} else {
1454 		/* No new error, intermittent problem. */
1455 		ret = 0;
1456 	}
1457 
1458 	/* Restore error enables. */
1459 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1460 			     "membar	#Sync"
1461 			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1462 
1463 	return ret;
1464 }
1465 
1466 /* Return non-zero if PADDR is a valid physical memory address. */
1467 static int cheetah_check_main_memory(unsigned long paddr)
1468 {
1469 	unsigned long vaddr = PAGE_OFFSET + paddr;
1470 
1471 	if (vaddr > (unsigned long) high_memory)
1472 		return 0;
1473 
1474 	return kern_addr_valid(vaddr);
1475 }
1476 
1477 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1478 {
1479 	struct cheetah_err_info local_snapshot, *p;
1480 	int recoverable, is_memory;
1481 
1482 	p = cheetah_get_error_log(afsr);
1483 	if (!p) {
1484 		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1485 			    afsr, afar);
1486 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1487 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1488 		prom_halt();
1489 	}
1490 
1491 	/* Grab snapshot of logged error. */
1492 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1493 
1494 	/* If the current trap snapshot does not match what the
1495 	 * trap handler passed along into our args, big trouble.
1496 	 * In such a case, mark the local copy as invalid.
1497 	 *
1498 	 * Else, it matches and we mark the afsr in the non-local
1499 	 * copy as invalid so we may log new error traps there.
1500 	 */
1501 	if (p->afsr != afsr || p->afar != afar)
1502 		local_snapshot.afsr = CHAFSR_INVALID;
1503 	else
1504 		p->afsr = CHAFSR_INVALID;
1505 
1506 	is_memory = cheetah_check_main_memory(afar);
1507 
1508 	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1509 		/* XXX Might want to log the results of this operation
1510 		 * XXX somewhere... -DaveM
1511 		 */
1512 		cheetah_fix_ce(afar);
1513 	}
1514 
1515 	{
1516 		int flush_all, flush_line;
1517 
1518 		flush_all = flush_line = 0;
1519 		if ((afsr & CHAFSR_EDC) != 0UL) {
1520 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1521 				flush_line = 1;
1522 			else
1523 				flush_all = 1;
1524 		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1525 			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1526 				flush_line = 1;
1527 			else
1528 				flush_all = 1;
1529 		}
1530 
1531 		/* Trap handler only disabled I-cache, flush it. */
1532 		cheetah_flush_icache();
1533 
1534 		/* Re-enable I-cache */
1535 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1536 				     "or %%g1, %1, %%g1\n\t"
1537 				     "stxa %%g1, [%%g0] %0\n\t"
1538 				     "membar #Sync"
1539 				     : /* no outputs */
1540 				     : "i" (ASI_DCU_CONTROL_REG),
1541 				     "i" (DCU_IC)
1542 				     : "g1");
1543 
1544 		if (flush_all)
1545 			cheetah_flush_ecache();
1546 		else if (flush_line)
1547 			cheetah_flush_ecache_line(afar);
1548 	}
1549 
1550 	/* Re-enable error reporting */
1551 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1552 			     "or %%g1, %1, %%g1\n\t"
1553 			     "stxa %%g1, [%%g0] %0\n\t"
1554 			     "membar #Sync"
1555 			     : /* no outputs */
1556 			     : "i" (ASI_ESTATE_ERROR_EN),
1557 			       "i" (ESTATE_ERROR_CEEN)
1558 			     : "g1");
1559 
1560 	/* Decide if we can continue after handling this trap and
1561 	 * logging the error.
1562 	 */
1563 	recoverable = 1;
1564 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1565 		recoverable = 0;
1566 
1567 	/* Re-check AFSR/AFAR */
1568 	(void) cheetah_recheck_errors(&local_snapshot);
1569 
1570 	/* Log errors. */
1571 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1572 
1573 	if (!recoverable)
1574 		panic("Irrecoverable Correctable-ECC error trap.\n");
1575 }
1576 
1577 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1578 {
1579 	struct cheetah_err_info local_snapshot, *p;
1580 	int recoverable, is_memory;
1581 
1582 #ifdef CONFIG_PCI
1583 	/* Check for the special PCI poke sequence. */
1584 	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1585 		cheetah_flush_icache();
1586 		cheetah_flush_dcache();
1587 
1588 		/* Re-enable I-cache/D-cache */
1589 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1590 				     "or %%g1, %1, %%g1\n\t"
1591 				     "stxa %%g1, [%%g0] %0\n\t"
1592 				     "membar #Sync"
1593 				     : /* no outputs */
1594 				     : "i" (ASI_DCU_CONTROL_REG),
1595 				       "i" (DCU_DC | DCU_IC)
1596 				     : "g1");
1597 
1598 		/* Re-enable error reporting */
1599 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1600 				     "or %%g1, %1, %%g1\n\t"
1601 				     "stxa %%g1, [%%g0] %0\n\t"
1602 				     "membar #Sync"
1603 				     : /* no outputs */
1604 				     : "i" (ASI_ESTATE_ERROR_EN),
1605 				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1606 				     : "g1");
1607 
1608 		(void) cheetah_recheck_errors(NULL);
1609 
1610 		pci_poke_faulted = 1;
1611 		regs->tpc += 4;
1612 		regs->tnpc = regs->tpc + 4;
1613 		return;
1614 	}
1615 #endif
1616 
1617 	p = cheetah_get_error_log(afsr);
1618 	if (!p) {
1619 		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1620 			    afsr, afar);
1621 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1622 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1623 		prom_halt();
1624 	}
1625 
1626 	/* Grab snapshot of logged error. */
1627 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1628 
1629 	/* If the current trap snapshot does not match what the
1630 	 * trap handler passed along into our args, big trouble.
1631 	 * In such a case, mark the local copy as invalid.
1632 	 *
1633 	 * Else, it matches and we mark the afsr in the non-local
1634 	 * copy as invalid so we may log new error traps there.
1635 	 */
1636 	if (p->afsr != afsr || p->afar != afar)
1637 		local_snapshot.afsr = CHAFSR_INVALID;
1638 	else
1639 		p->afsr = CHAFSR_INVALID;
1640 
1641 	is_memory = cheetah_check_main_memory(afar);
1642 
1643 	{
1644 		int flush_all, flush_line;
1645 
1646 		flush_all = flush_line = 0;
1647 		if ((afsr & CHAFSR_EDU) != 0UL) {
1648 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1649 				flush_line = 1;
1650 			else
1651 				flush_all = 1;
1652 		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1653 			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1654 				flush_line = 1;
1655 			else
1656 				flush_all = 1;
1657 		}
1658 
1659 		cheetah_flush_icache();
1660 		cheetah_flush_dcache();
1661 
1662 		/* Re-enable I/D caches */
1663 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1664 				     "or %%g1, %1, %%g1\n\t"
1665 				     "stxa %%g1, [%%g0] %0\n\t"
1666 				     "membar #Sync"
1667 				     : /* no outputs */
1668 				     : "i" (ASI_DCU_CONTROL_REG),
1669 				     "i" (DCU_IC | DCU_DC)
1670 				     : "g1");
1671 
1672 		if (flush_all)
1673 			cheetah_flush_ecache();
1674 		else if (flush_line)
1675 			cheetah_flush_ecache_line(afar);
1676 	}
1677 
1678 	/* Re-enable error reporting */
1679 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1680 			     "or %%g1, %1, %%g1\n\t"
1681 			     "stxa %%g1, [%%g0] %0\n\t"
1682 			     "membar #Sync"
1683 			     : /* no outputs */
1684 			     : "i" (ASI_ESTATE_ERROR_EN),
1685 			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1686 			     : "g1");
1687 
1688 	/* Decide if we can continue after handling this trap and
1689 	 * logging the error.
1690 	 */
1691 	recoverable = 1;
1692 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1693 		recoverable = 0;
1694 
1695 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1696 	 * error was logged while we had error reporting traps disabled.
1697 	 */
1698 	if (cheetah_recheck_errors(&local_snapshot)) {
1699 		unsigned long new_afsr = local_snapshot.afsr;
1700 
1701 		/* If we got a new asynchronous error, die... */
1702 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1703 				CHAFSR_WDU | CHAFSR_CPU |
1704 				CHAFSR_IVU | CHAFSR_UE |
1705 				CHAFSR_BERR | CHAFSR_TO))
1706 			recoverable = 0;
1707 	}
1708 
1709 	/* Log errors. */
1710 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1711 
1712 	/* "Recoverable" here means we try to yank the page from ever
1713 	 * being newly used again.  This depends upon a few things:
1714 	 * 1) Must be main memory, and AFAR must be valid.
1715 	 * 2) If we trapped from user, OK.
1716 	 * 3) Else, if we trapped from kernel we must find exception
1717 	 *    table entry (ie. we have to have been accessing user
1718 	 *    space).
1719 	 *
1720 	 * If AFAR is not in main memory, or we trapped from kernel
1721 	 * and cannot find an exception table entry, it is unacceptable
1722 	 * to try and continue.
1723 	 */
1724 	if (recoverable && is_memory) {
1725 		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1726 			/* OK, usermode access. */
1727 			recoverable = 1;
1728 		} else {
1729 			const struct exception_table_entry *entry;
1730 
1731 			entry = search_exception_tables(regs->tpc);
1732 			if (entry) {
1733 				/* OK, kernel access to userspace. */
1734 				recoverable = 1;
1735 
1736 			} else {
1737 				/* BAD, privileged state is corrupted. */
1738 				recoverable = 0;
1739 			}
1740 
1741 			if (recoverable) {
1742 				if (pfn_valid(afar >> PAGE_SHIFT))
1743 					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1744 				else
1745 					recoverable = 0;
1746 
1747 				/* Only perform fixup if we still have a
1748 				 * recoverable condition.
1749 				 */
1750 				if (recoverable) {
1751 					regs->tpc = entry->fixup;
1752 					regs->tnpc = regs->tpc + 4;
1753 				}
1754 			}
1755 		}
1756 	} else {
1757 		recoverable = 0;
1758 	}
1759 
1760 	if (!recoverable)
1761 		panic("Irrecoverable deferred error trap.\n");
1762 }
1763 
1764 /* Handle a D/I cache parity error trap.  TYPE is encoded as:
1765  *
1766  * Bit0:	0=dcache,1=icache
1767  * Bit1:	0=recoverable,1=unrecoverable
1768  *
1769  * The hardware has disabled both the I-cache and D-cache in
1770  * the %dcr register.
1771  */
1772 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1773 {
1774 	if (type & 0x1)
1775 		__cheetah_flush_icache();
1776 	else
1777 		cheetah_plus_zap_dcache_parity();
1778 	cheetah_flush_dcache();
1779 
1780 	/* Re-enable I-cache/D-cache */
1781 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1782 			     "or %%g1, %1, %%g1\n\t"
1783 			     "stxa %%g1, [%%g0] %0\n\t"
1784 			     "membar #Sync"
1785 			     : /* no outputs */
1786 			     : "i" (ASI_DCU_CONTROL_REG),
1787 			       "i" (DCU_DC | DCU_IC)
1788 			     : "g1");
1789 
1790 	if (type & 0x2) {
1791 		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1792 		       smp_processor_id(),
1793 		       (type & 0x1) ? 'I' : 'D',
1794 		       regs->tpc);
1795 		printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1796 		panic("Irrecoverable Cheetah+ parity error.");
1797 	}
1798 
1799 	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1800 	       smp_processor_id(),
1801 	       (type & 0x1) ? 'I' : 'D',
1802 	       regs->tpc);
1803 	printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1804 }
1805 
1806 struct sun4v_error_entry {
1807 	/* Unique error handle */
1808 /*0x00*/u64		err_handle;
1809 
1810 	/* %stick value at the time of the error */
1811 /*0x08*/u64		err_stick;
1812 
1813 /*0x10*/u8		reserved_1[3];
1814 
1815 	/* Error type */
1816 /*0x13*/u8		err_type;
1817 #define SUN4V_ERR_TYPE_UNDEFINED	0
1818 #define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
1819 #define SUN4V_ERR_TYPE_PRECISE_NONRES	2
1820 #define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
1821 #define SUN4V_ERR_TYPE_SHUTDOWN_RQST	4
1822 #define SUN4V_ERR_TYPE_DUMP_CORE	5
1823 #define SUN4V_ERR_TYPE_SP_STATE_CHANGE	6
1824 #define SUN4V_ERR_TYPE_NUM		7
1825 
1826 	/* Error attributes */
1827 /*0x14*/u32		err_attrs;
1828 #define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
1829 #define SUN4V_ERR_ATTRS_MEMORY		0x00000002
1830 #define SUN4V_ERR_ATTRS_PIO		0x00000004
1831 #define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
1832 #define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
1833 #define SUN4V_ERR_ATTRS_SHUTDOWN_RQST	0x00000020
1834 #define SUN4V_ERR_ATTRS_ASR		0x00000040
1835 #define SUN4V_ERR_ATTRS_ASI		0x00000080
1836 #define SUN4V_ERR_ATTRS_PRIV_REG	0x00000100
1837 #define SUN4V_ERR_ATTRS_SPSTATE_MSK	0x00000600
1838 #define SUN4V_ERR_ATTRS_MCD		0x00000800
1839 #define SUN4V_ERR_ATTRS_SPSTATE_SHFT	9
1840 #define SUN4V_ERR_ATTRS_MODE_MSK	0x03000000
1841 #define SUN4V_ERR_ATTRS_MODE_SHFT	24
1842 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
1843 
1844 #define SUN4V_ERR_SPSTATE_FAULTED	0
1845 #define SUN4V_ERR_SPSTATE_AVAILABLE	1
1846 #define SUN4V_ERR_SPSTATE_NOT_PRESENT	2
1847 
1848 #define SUN4V_ERR_MODE_USER		1
1849 #define SUN4V_ERR_MODE_PRIV		2
1850 
1851 	/* Real address of the memory region or PIO transaction */
1852 /*0x18*/u64		err_raddr;
1853 
1854 	/* Size of the operation triggering the error, in bytes */
1855 /*0x20*/u32		err_size;
1856 
1857 	/* ID of the CPU */
1858 /*0x24*/u16		err_cpu;
1859 
1860 	/* Grace periof for shutdown, in seconds */
1861 /*0x26*/u16		err_secs;
1862 
1863 	/* Value of the %asi register */
1864 /*0x28*/u8		err_asi;
1865 
1866 /*0x29*/u8		reserved_2;
1867 
1868 	/* Value of the ASR register number */
1869 /*0x2a*/u16		err_asr;
1870 #define SUN4V_ERR_ASR_VALID		0x8000
1871 
1872 /*0x2c*/u32		reserved_3;
1873 /*0x30*/u64		reserved_4;
1874 /*0x38*/u64		reserved_5;
1875 };
1876 
1877 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1878 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1879 
1880 static const char *sun4v_err_type_to_str(u8 type)
1881 {
1882 	static const char *types[SUN4V_ERR_TYPE_NUM] = {
1883 		"undefined",
1884 		"uncorrected resumable",
1885 		"precise nonresumable",
1886 		"deferred nonresumable",
1887 		"shutdown request",
1888 		"dump core",
1889 		"SP state change",
1890 	};
1891 
1892 	if (type < SUN4V_ERR_TYPE_NUM)
1893 		return types[type];
1894 
1895 	return "unknown";
1896 }
1897 
1898 static void sun4v_emit_err_attr_strings(u32 attrs)
1899 {
1900 	static const char *attr_names[] = {
1901 		"processor",
1902 		"memory",
1903 		"PIO",
1904 		"int-registers",
1905 		"fpu-registers",
1906 		"shutdown-request",
1907 		"ASR",
1908 		"ASI",
1909 		"priv-reg",
1910 	};
1911 	static const char *sp_states[] = {
1912 		"sp-faulted",
1913 		"sp-available",
1914 		"sp-not-present",
1915 		"sp-state-reserved",
1916 	};
1917 	static const char *modes[] = {
1918 		"mode-reserved0",
1919 		"user",
1920 		"priv",
1921 		"mode-reserved1",
1922 	};
1923 	u32 sp_state, mode;
1924 	int i;
1925 
1926 	for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1927 		if (attrs & (1U << i)) {
1928 			const char *s = attr_names[i];
1929 
1930 			pr_cont("%s ", s);
1931 		}
1932 	}
1933 
1934 	sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1935 		    SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1936 	pr_cont("%s ", sp_states[sp_state]);
1937 
1938 	mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1939 		SUN4V_ERR_ATTRS_MODE_SHFT);
1940 	pr_cont("%s ", modes[mode]);
1941 
1942 	if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1943 		pr_cont("res-queue-full ");
1944 }
1945 
1946 /* When the report contains a real-address of "-1" it means that the
1947  * hardware did not provide the address.  So we compute the effective
1948  * address of the load or store instruction at regs->tpc and report
1949  * that.  Usually when this happens it's a PIO and in such a case we
1950  * are using physical addresses with bypass ASIs anyways, so what we
1951  * report here is exactly what we want.
1952  */
1953 static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1954 {
1955 	unsigned int insn;
1956 	u64 addr;
1957 
1958 	if (!(regs->tstate & TSTATE_PRIV))
1959 		return;
1960 
1961 	insn = *(unsigned int *) regs->tpc;
1962 
1963 	addr = compute_effective_address(regs, insn, 0);
1964 
1965 	printk("%s: insn effective address [0x%016llx]\n",
1966 	       pfx, addr);
1967 }
1968 
1969 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1970 			    int cpu, const char *pfx, atomic_t *ocnt)
1971 {
1972 	u64 *raw_ptr = (u64 *) ent;
1973 	u32 attrs;
1974 	int cnt;
1975 
1976 	printk("%s: Reporting on cpu %d\n", pfx, cpu);
1977 	printk("%s: TPC [0x%016lx] <%pS>\n",
1978 	       pfx, regs->tpc, (void *) regs->tpc);
1979 
1980 	printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1981 	       pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1982 	printk("%s:      %016llx:%016llx:%016llx:%016llx]\n",
1983 	       pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1984 
1985 	printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1986 	       pfx, ent->err_handle, ent->err_stick);
1987 
1988 	printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
1989 
1990 	attrs = ent->err_attrs;
1991 	printk("%s: attrs [0x%08x] < ", pfx, attrs);
1992 	sun4v_emit_err_attr_strings(attrs);
1993 	pr_cont(">\n");
1994 
1995 	/* Various fields in the error report are only valid if
1996 	 * certain attribute bits are set.
1997 	 */
1998 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
1999 		     SUN4V_ERR_ATTRS_PIO |
2000 		     SUN4V_ERR_ATTRS_ASI)) {
2001 		printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
2002 
2003 		if (ent->err_raddr == ~(u64)0)
2004 			sun4v_report_real_raddr(pfx, regs);
2005 	}
2006 
2007 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
2008 		printk("%s: size [0x%x]\n", pfx, ent->err_size);
2009 
2010 	if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
2011 		     SUN4V_ERR_ATTRS_INT_REGISTERS |
2012 		     SUN4V_ERR_ATTRS_FPU_REGISTERS |
2013 		     SUN4V_ERR_ATTRS_PRIV_REG))
2014 		printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
2015 
2016 	if (attrs & SUN4V_ERR_ATTRS_ASI)
2017 		printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
2018 
2019 	if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
2020 		      SUN4V_ERR_ATTRS_FPU_REGISTERS |
2021 		      SUN4V_ERR_ATTRS_PRIV_REG)) &&
2022 	    (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
2023 		printk("%s: reg [0x%04x]\n",
2024 		       pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
2025 
2026 	show_regs(regs);
2027 
2028 	if ((cnt = atomic_read(ocnt)) != 0) {
2029 		atomic_set(ocnt, 0);
2030 		wmb();
2031 		printk("%s: Queue overflowed %d times.\n",
2032 		       pfx, cnt);
2033 	}
2034 }
2035 
2036 /* Handle memory corruption detected error which is vectored in
2037  * through resumable error trap.
2038  */
2039 void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent)
2040 {
2041 	if (notify_die(DIE_TRAP, "MCD error", regs, 0, 0x34,
2042 		       SIGSEGV) == NOTIFY_STOP)
2043 		return;
2044 
2045 	if (regs->tstate & TSTATE_PRIV) {
2046 		/* MCD exception could happen because the task was
2047 		 * running a system call with MCD enabled and passed a
2048 		 * non-versioned pointer or pointer with bad version
2049 		 * tag to the system call. In such cases, hypervisor
2050 		 * places the address of offending instruction in the
2051 		 * resumable error report. This is a deferred error,
2052 		 * so the read/write that caused the trap was potentially
2053 		 * retired long time back and we may have no choice
2054 		 * but to send SIGSEGV to the process.
2055 		 */
2056 		const struct exception_table_entry *entry;
2057 
2058 		entry = search_exception_tables(regs->tpc);
2059 		if (entry) {
2060 			/* Looks like a bad syscall parameter */
2061 #ifdef DEBUG_EXCEPTIONS
2062 			pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
2063 				 regs->tpc);
2064 			pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
2065 				 ent.err_raddr, entry->fixup);
2066 #endif
2067 			regs->tpc = entry->fixup;
2068 			regs->tnpc = regs->tpc + 4;
2069 			return;
2070 		}
2071 	}
2072 
2073 	/* Send SIGSEGV to the userspace process with the right signal
2074 	 * code
2075 	 */
2076 	force_sig_fault(SIGSEGV, SEGV_ADIDERR, (void __user *)ent.err_raddr,
2077 			0, current);
2078 }
2079 
2080 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2081  * Log the event and clear the first word of the entry.
2082  */
2083 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
2084 {
2085 	enum ctx_state prev_state = exception_enter();
2086 	struct sun4v_error_entry *ent, local_copy;
2087 	struct trap_per_cpu *tb;
2088 	unsigned long paddr;
2089 	int cpu;
2090 
2091 	cpu = get_cpu();
2092 
2093 	tb = &trap_block[cpu];
2094 	paddr = tb->resum_kernel_buf_pa + offset;
2095 	ent = __va(paddr);
2096 
2097 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2098 
2099 	/* We have a local copy now, so release the entry.  */
2100 	ent->err_handle = 0;
2101 	wmb();
2102 
2103 	put_cpu();
2104 
2105 	if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2106 		/* We should really take the seconds field of
2107 		 * the error report and use it for the shutdown
2108 		 * invocation, but for now do the same thing we
2109 		 * do for a DS shutdown request.
2110 		 */
2111 		pr_info("Shutdown request, %u seconds...\n",
2112 			local_copy.err_secs);
2113 		orderly_poweroff(true);
2114 		goto out;
2115 	}
2116 
2117 	/* If this is a memory corruption detected error vectored in
2118 	 * by HV through resumable error trap, call the handler
2119 	 */
2120 	if (local_copy.err_attrs & SUN4V_ERR_ATTRS_MCD) {
2121 		do_mcd_err(regs, local_copy);
2122 		return;
2123 	}
2124 
2125 	sun4v_log_error(regs, &local_copy, cpu,
2126 			KERN_ERR "RESUMABLE ERROR",
2127 			&sun4v_resum_oflow_cnt);
2128 out:
2129 	exception_exit(prev_state);
2130 }
2131 
2132 /* If we try to printk() we'll probably make matters worse, by trying
2133  * to retake locks this cpu already holds or causing more errors. So
2134  * just bump a counter, and we'll report these counter bumps above.
2135  */
2136 void sun4v_resum_overflow(struct pt_regs *regs)
2137 {
2138 	atomic_inc(&sun4v_resum_oflow_cnt);
2139 }
2140 
2141 /* Given a set of registers, get the virtual addressi that was being accessed
2142  * by the faulting instructions at tpc.
2143  */
2144 static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
2145 {
2146 	unsigned int insn;
2147 
2148 	if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
2149 		return compute_effective_address(regs, insn,
2150 						 (insn >> 25) & 0x1f);
2151 	}
2152 	return 0;
2153 }
2154 
2155 /* Attempt to handle non-resumable errors generated from userspace.
2156  * Returns true if the signal was handled, false otherwise.
2157  */
2158 bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
2159 				  struct sun4v_error_entry *ent) {
2160 
2161 	unsigned int attrs = ent->err_attrs;
2162 
2163 	if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
2164 		unsigned long addr = ent->err_raddr;
2165 
2166 		if (addr == ~(u64)0) {
2167 			/* This seems highly unlikely to ever occur */
2168 			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
2169 		} else {
2170 			unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
2171 							      PAGE_SIZE);
2172 
2173 			/* Break the unfortunate news. */
2174 			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
2175 				 addr);
2176 			pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
2177 				 page_cnt);
2178 
2179 			while (page_cnt-- > 0) {
2180 				if (pfn_valid(addr >> PAGE_SHIFT))
2181 					get_page(pfn_to_page(addr >> PAGE_SHIFT));
2182 				addr += PAGE_SIZE;
2183 			}
2184 		}
2185 		force_sig(SIGKILL, current);
2186 
2187 		return true;
2188 	}
2189 	if (attrs & SUN4V_ERR_ATTRS_PIO) {
2190 		force_sig_fault(SIGBUS, BUS_ADRERR,
2191 				(void __user *)sun4v_get_vaddr(regs), 0, current);
2192 		return true;
2193 	}
2194 
2195 	/* Default to doing nothing */
2196 	return false;
2197 }
2198 
2199 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2200  * Log the event, clear the first word of the entry, and die.
2201  */
2202 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2203 {
2204 	struct sun4v_error_entry *ent, local_copy;
2205 	struct trap_per_cpu *tb;
2206 	unsigned long paddr;
2207 	int cpu;
2208 
2209 	cpu = get_cpu();
2210 
2211 	tb = &trap_block[cpu];
2212 	paddr = tb->nonresum_kernel_buf_pa + offset;
2213 	ent = __va(paddr);
2214 
2215 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2216 
2217 	/* We have a local copy now, so release the entry.  */
2218 	ent->err_handle = 0;
2219 	wmb();
2220 
2221 	put_cpu();
2222 
2223 	if (!(regs->tstate & TSTATE_PRIV) &&
2224 	    sun4v_nonresum_error_user_handled(regs, &local_copy)) {
2225 		/* DON'T PANIC: This userspace error was handled. */
2226 		return;
2227 	}
2228 
2229 #ifdef CONFIG_PCI
2230 	/* Check for the special PCI poke sequence. */
2231 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2232 		pci_poke_faulted = 1;
2233 		regs->tpc += 4;
2234 		regs->tnpc = regs->tpc + 4;
2235 		return;
2236 	}
2237 #endif
2238 
2239 	sun4v_log_error(regs, &local_copy, cpu,
2240 			KERN_EMERG "NON-RESUMABLE ERROR",
2241 			&sun4v_nonresum_oflow_cnt);
2242 
2243 	panic("Non-resumable error.");
2244 }
2245 
2246 /* If we try to printk() we'll probably make matters worse, by trying
2247  * to retake locks this cpu already holds or causing more errors. So
2248  * just bump a counter, and we'll report these counter bumps above.
2249  */
2250 void sun4v_nonresum_overflow(struct pt_regs *regs)
2251 {
2252 	/* XXX Actually even this can make not that much sense.  Perhaps
2253 	 * XXX we should just pull the plug and panic directly from here?
2254 	 */
2255 	atomic_inc(&sun4v_nonresum_oflow_cnt);
2256 }
2257 
2258 static void sun4v_tlb_error(struct pt_regs *regs)
2259 {
2260 	die_if_kernel("TLB/TSB error", regs);
2261 }
2262 
2263 unsigned long sun4v_err_itlb_vaddr;
2264 unsigned long sun4v_err_itlb_ctx;
2265 unsigned long sun4v_err_itlb_pte;
2266 unsigned long sun4v_err_itlb_error;
2267 
2268 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2269 {
2270 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2271 
2272 	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2273 	       regs->tpc, tl);
2274 	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2275 	printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2276 	printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2277 	       (void *) regs->u_regs[UREG_I7]);
2278 	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2279 	       "pte[%lx] error[%lx]\n",
2280 	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2281 	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
2282 
2283 	sun4v_tlb_error(regs);
2284 }
2285 
2286 unsigned long sun4v_err_dtlb_vaddr;
2287 unsigned long sun4v_err_dtlb_ctx;
2288 unsigned long sun4v_err_dtlb_pte;
2289 unsigned long sun4v_err_dtlb_error;
2290 
2291 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2292 {
2293 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2294 
2295 	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2296 	       regs->tpc, tl);
2297 	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2298 	printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2299 	printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2300 	       (void *) regs->u_regs[UREG_I7]);
2301 	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2302 	       "pte[%lx] error[%lx]\n",
2303 	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2304 	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2305 
2306 	sun4v_tlb_error(regs);
2307 }
2308 
2309 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2310 {
2311 	printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2312 	       err, op);
2313 }
2314 
2315 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2316 {
2317 	printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2318 	       err, op);
2319 }
2320 
2321 static void do_fpe_common(struct pt_regs *regs)
2322 {
2323 	if (regs->tstate & TSTATE_PRIV) {
2324 		regs->tpc = regs->tnpc;
2325 		regs->tnpc += 4;
2326 	} else {
2327 		unsigned long fsr = current_thread_info()->xfsr[0];
2328 		int code;
2329 
2330 		if (test_thread_flag(TIF_32BIT)) {
2331 			regs->tpc &= 0xffffffff;
2332 			regs->tnpc &= 0xffffffff;
2333 		}
2334 		code = FPE_FLTUNK;
2335 		if ((fsr & 0x1c000) == (1 << 14)) {
2336 			if (fsr & 0x10)
2337 				code = FPE_FLTINV;
2338 			else if (fsr & 0x08)
2339 				code = FPE_FLTOVF;
2340 			else if (fsr & 0x04)
2341 				code = FPE_FLTUND;
2342 			else if (fsr & 0x02)
2343 				code = FPE_FLTDIV;
2344 			else if (fsr & 0x01)
2345 				code = FPE_FLTRES;
2346 		}
2347 		force_sig_fault(SIGFPE, code,
2348 				(void __user *)regs->tpc, 0, current);
2349 	}
2350 }
2351 
2352 void do_fpieee(struct pt_regs *regs)
2353 {
2354 	enum ctx_state prev_state = exception_enter();
2355 
2356 	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2357 		       0, 0x24, SIGFPE) == NOTIFY_STOP)
2358 		goto out;
2359 
2360 	do_fpe_common(regs);
2361 out:
2362 	exception_exit(prev_state);
2363 }
2364 
2365 void do_fpother(struct pt_regs *regs)
2366 {
2367 	enum ctx_state prev_state = exception_enter();
2368 	struct fpustate *f = FPUSTATE;
2369 	int ret = 0;
2370 
2371 	if (notify_die(DIE_TRAP, "fpu exception other", regs,
2372 		       0, 0x25, SIGFPE) == NOTIFY_STOP)
2373 		goto out;
2374 
2375 	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2376 	case (2 << 14): /* unfinished_FPop */
2377 	case (3 << 14): /* unimplemented_FPop */
2378 		ret = do_mathemu(regs, f, false);
2379 		break;
2380 	}
2381 	if (ret)
2382 		goto out;
2383 	do_fpe_common(regs);
2384 out:
2385 	exception_exit(prev_state);
2386 }
2387 
2388 void do_tof(struct pt_regs *regs)
2389 {
2390 	enum ctx_state prev_state = exception_enter();
2391 
2392 	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2393 		       0, 0x26, SIGEMT) == NOTIFY_STOP)
2394 		goto out;
2395 
2396 	if (regs->tstate & TSTATE_PRIV)
2397 		die_if_kernel("Penguin overflow trap from kernel mode", regs);
2398 	if (test_thread_flag(TIF_32BIT)) {
2399 		regs->tpc &= 0xffffffff;
2400 		regs->tnpc &= 0xffffffff;
2401 	}
2402 	force_sig_fault(SIGEMT, EMT_TAGOVF,
2403 			(void __user *)regs->tpc, 0, current);
2404 out:
2405 	exception_exit(prev_state);
2406 }
2407 
2408 void do_div0(struct pt_regs *regs)
2409 {
2410 	enum ctx_state prev_state = exception_enter();
2411 
2412 	if (notify_die(DIE_TRAP, "integer division by zero", regs,
2413 		       0, 0x28, SIGFPE) == NOTIFY_STOP)
2414 		goto out;
2415 
2416 	if (regs->tstate & TSTATE_PRIV)
2417 		die_if_kernel("TL0: Kernel divide by zero.", regs);
2418 	if (test_thread_flag(TIF_32BIT)) {
2419 		regs->tpc &= 0xffffffff;
2420 		regs->tnpc &= 0xffffffff;
2421 	}
2422 	force_sig_fault(SIGFPE, FPE_INTDIV,
2423 			(void __user *)regs->tpc, 0, current);
2424 out:
2425 	exception_exit(prev_state);
2426 }
2427 
2428 static void instruction_dump(unsigned int *pc)
2429 {
2430 	int i;
2431 
2432 	if ((((unsigned long) pc) & 3))
2433 		return;
2434 
2435 	printk("Instruction DUMP:");
2436 	for (i = -3; i < 6; i++)
2437 		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2438 	printk("\n");
2439 }
2440 
2441 static void user_instruction_dump(unsigned int __user *pc)
2442 {
2443 	int i;
2444 	unsigned int buf[9];
2445 
2446 	if ((((unsigned long) pc) & 3))
2447 		return;
2448 
2449 	if (copy_from_user(buf, pc - 3, sizeof(buf)))
2450 		return;
2451 
2452 	printk("Instruction DUMP:");
2453 	for (i = 0; i < 9; i++)
2454 		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2455 	printk("\n");
2456 }
2457 
2458 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2459 {
2460 	unsigned long fp, ksp;
2461 	struct thread_info *tp;
2462 	int count = 0;
2463 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2464 	int graph = 0;
2465 #endif
2466 
2467 	ksp = (unsigned long) _ksp;
2468 	if (!tsk)
2469 		tsk = current;
2470 	tp = task_thread_info(tsk);
2471 	if (ksp == 0UL) {
2472 		if (tsk == current)
2473 			asm("mov %%fp, %0" : "=r" (ksp));
2474 		else
2475 			ksp = tp->ksp;
2476 	}
2477 	if (tp == current_thread_info())
2478 		flushw_all();
2479 
2480 	fp = ksp + STACK_BIAS;
2481 
2482 	printk("Call Trace:\n");
2483 	do {
2484 		struct sparc_stackf *sf;
2485 		struct pt_regs *regs;
2486 		unsigned long pc;
2487 
2488 		if (!kstack_valid(tp, fp))
2489 			break;
2490 		sf = (struct sparc_stackf *) fp;
2491 		regs = (struct pt_regs *) (sf + 1);
2492 
2493 		if (kstack_is_trap_frame(tp, regs)) {
2494 			if (!(regs->tstate & TSTATE_PRIV))
2495 				break;
2496 			pc = regs->tpc;
2497 			fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2498 		} else {
2499 			pc = sf->callers_pc;
2500 			fp = (unsigned long)sf->fp + STACK_BIAS;
2501 		}
2502 
2503 		printk(" [%016lx] %pS\n", pc, (void *) pc);
2504 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2505 		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2506 			struct ftrace_ret_stack *ret_stack;
2507 			ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
2508 			if (ret_stack) {
2509 				pc = ret_stack->ret;
2510 				printk(" [%016lx] %pS\n", pc, (void *) pc);
2511 				graph++;
2512 			}
2513 		}
2514 #endif
2515 	} while (++count < 16);
2516 }
2517 
2518 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2519 {
2520 	unsigned long fp = rw->ins[6];
2521 
2522 	if (!fp)
2523 		return NULL;
2524 
2525 	return (struct reg_window *) (fp + STACK_BIAS);
2526 }
2527 
2528 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2529 {
2530 	static int die_counter;
2531 	int count = 0;
2532 
2533 	/* Amuse the user. */
2534 	printk(
2535 "              \\|/ ____ \\|/\n"
2536 "              \"@'/ .. \\`@\"\n"
2537 "              /_| \\__/ |_\\\n"
2538 "                 \\__U_/\n");
2539 
2540 	printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2541 	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2542 	__asm__ __volatile__("flushw");
2543 	show_regs(regs);
2544 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2545 	if (regs->tstate & TSTATE_PRIV) {
2546 		struct thread_info *tp = current_thread_info();
2547 		struct reg_window *rw = (struct reg_window *)
2548 			(regs->u_regs[UREG_FP] + STACK_BIAS);
2549 
2550 		/* Stop the back trace when we hit userland or we
2551 		 * find some badly aligned kernel stack.
2552 		 */
2553 		while (rw &&
2554 		       count++ < 30 &&
2555 		       kstack_valid(tp, (unsigned long) rw)) {
2556 			printk("Caller[%016lx]: %pS\n", rw->ins[7],
2557 			       (void *) rw->ins[7]);
2558 
2559 			rw = kernel_stack_up(rw);
2560 		}
2561 		instruction_dump ((unsigned int *) regs->tpc);
2562 	} else {
2563 		if (test_thread_flag(TIF_32BIT)) {
2564 			regs->tpc &= 0xffffffff;
2565 			regs->tnpc &= 0xffffffff;
2566 		}
2567 		user_instruction_dump ((unsigned int __user *) regs->tpc);
2568 	}
2569 	if (panic_on_oops)
2570 		panic("Fatal exception");
2571 	if (regs->tstate & TSTATE_PRIV)
2572 		do_exit(SIGKILL);
2573 	do_exit(SIGSEGV);
2574 }
2575 EXPORT_SYMBOL(die_if_kernel);
2576 
2577 #define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
2578 #define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
2579 
2580 void do_illegal_instruction(struct pt_regs *regs)
2581 {
2582 	enum ctx_state prev_state = exception_enter();
2583 	unsigned long pc = regs->tpc;
2584 	unsigned long tstate = regs->tstate;
2585 	u32 insn;
2586 
2587 	if (notify_die(DIE_TRAP, "illegal instruction", regs,
2588 		       0, 0x10, SIGILL) == NOTIFY_STOP)
2589 		goto out;
2590 
2591 	if (tstate & TSTATE_PRIV)
2592 		die_if_kernel("Kernel illegal instruction", regs);
2593 	if (test_thread_flag(TIF_32BIT))
2594 		pc = (u32)pc;
2595 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2596 		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2597 			if (handle_popc(insn, regs))
2598 				goto out;
2599 		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2600 			if (handle_ldf_stq(insn, regs))
2601 				goto out;
2602 		} else if (tlb_type == hypervisor) {
2603 			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2604 				if (!vis_emul(regs, insn))
2605 					goto out;
2606 			} else {
2607 				struct fpustate *f = FPUSTATE;
2608 
2609 				/* On UltraSPARC T2 and later, FPU insns which
2610 				 * are not implemented in HW signal an illegal
2611 				 * instruction trap and do not set the FP Trap
2612 				 * Trap in the %fsr to unimplemented_FPop.
2613 				 */
2614 				if (do_mathemu(regs, f, true))
2615 					goto out;
2616 			}
2617 		}
2618 	}
2619 	force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, 0, current);
2620 out:
2621 	exception_exit(prev_state);
2622 }
2623 
2624 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2625 {
2626 	enum ctx_state prev_state = exception_enter();
2627 
2628 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2629 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2630 		goto out;
2631 
2632 	if (regs->tstate & TSTATE_PRIV) {
2633 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2634 		goto out;
2635 	}
2636 	if (is_no_fault_exception(regs))
2637 		return;
2638 
2639 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar, 0, current);
2640 out:
2641 	exception_exit(prev_state);
2642 }
2643 
2644 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2645 {
2646 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2647 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2648 		return;
2649 
2650 	if (regs->tstate & TSTATE_PRIV) {
2651 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2652 		return;
2653 	}
2654 	if (is_no_fault_exception(regs))
2655 		return;
2656 
2657 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) addr, 0, current);
2658 }
2659 
2660 /* sun4v_mem_corrupt_detect_precise() - Handle precise exception on an ADI
2661  * tag mismatch.
2662  *
2663  * ADI version tag mismatch on a load from memory always results in a
2664  * precise exception. Tag mismatch on a store to memory will result in
2665  * precise exception if MCDPER or PMCDPER is set to 1.
2666  */
2667 void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr,
2668 				      unsigned long context)
2669 {
2670 	if (notify_die(DIE_TRAP, "memory corruption precise exception", regs,
2671 		       0, 0x8, SIGSEGV) == NOTIFY_STOP)
2672 		return;
2673 
2674 	if (regs->tstate & TSTATE_PRIV) {
2675 		/* MCD exception could happen because the task was running
2676 		 * a system call with MCD enabled and passed a non-versioned
2677 		 * pointer or pointer with bad version tag to  the system
2678 		 * call.
2679 		 */
2680 		const struct exception_table_entry *entry;
2681 
2682 		entry = search_exception_tables(regs->tpc);
2683 		if (entry) {
2684 			/* Looks like a bad syscall parameter */
2685 #ifdef DEBUG_EXCEPTIONS
2686 			pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
2687 				 regs->tpc);
2688 			pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
2689 				 regs->tpc, entry->fixup);
2690 #endif
2691 			regs->tpc = entry->fixup;
2692 			regs->tnpc = regs->tpc + 4;
2693 			return;
2694 		}
2695 		pr_emerg("%s: ADDR[%016lx] CTX[%lx], going.\n",
2696 			 __func__, addr, context);
2697 		die_if_kernel("MCD precise", regs);
2698 	}
2699 
2700 	if (test_thread_flag(TIF_32BIT)) {
2701 		regs->tpc &= 0xffffffff;
2702 		regs->tnpc &= 0xffffffff;
2703 	}
2704 	force_sig_fault(SIGSEGV, SEGV_ADIPERR, (void __user *)addr, 0, current);
2705 }
2706 
2707 void do_privop(struct pt_regs *regs)
2708 {
2709 	enum ctx_state prev_state = exception_enter();
2710 
2711 	if (notify_die(DIE_TRAP, "privileged operation", regs,
2712 		       0, 0x11, SIGILL) == NOTIFY_STOP)
2713 		goto out;
2714 
2715 	if (test_thread_flag(TIF_32BIT)) {
2716 		regs->tpc &= 0xffffffff;
2717 		regs->tnpc &= 0xffffffff;
2718 	}
2719 	force_sig_fault(SIGILL, ILL_PRVOPC,
2720 			(void __user *)regs->tpc, 0, current);
2721 out:
2722 	exception_exit(prev_state);
2723 }
2724 
2725 void do_privact(struct pt_regs *regs)
2726 {
2727 	do_privop(regs);
2728 }
2729 
2730 /* Trap level 1 stuff or other traps we should never see... */
2731 void do_cee(struct pt_regs *regs)
2732 {
2733 	exception_enter();
2734 	die_if_kernel("TL0: Cache Error Exception", regs);
2735 }
2736 
2737 void do_div0_tl1(struct pt_regs *regs)
2738 {
2739 	exception_enter();
2740 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2741 	die_if_kernel("TL1: DIV0 Exception", regs);
2742 }
2743 
2744 void do_fpieee_tl1(struct pt_regs *regs)
2745 {
2746 	exception_enter();
2747 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2748 	die_if_kernel("TL1: FPU IEEE Exception", regs);
2749 }
2750 
2751 void do_fpother_tl1(struct pt_regs *regs)
2752 {
2753 	exception_enter();
2754 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2755 	die_if_kernel("TL1: FPU Other Exception", regs);
2756 }
2757 
2758 void do_ill_tl1(struct pt_regs *regs)
2759 {
2760 	exception_enter();
2761 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2762 	die_if_kernel("TL1: Illegal Instruction Exception", regs);
2763 }
2764 
2765 void do_irq_tl1(struct pt_regs *regs)
2766 {
2767 	exception_enter();
2768 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2769 	die_if_kernel("TL1: IRQ Exception", regs);
2770 }
2771 
2772 void do_lddfmna_tl1(struct pt_regs *regs)
2773 {
2774 	exception_enter();
2775 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2776 	die_if_kernel("TL1: LDDF Exception", regs);
2777 }
2778 
2779 void do_stdfmna_tl1(struct pt_regs *regs)
2780 {
2781 	exception_enter();
2782 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2783 	die_if_kernel("TL1: STDF Exception", regs);
2784 }
2785 
2786 void do_paw(struct pt_regs *regs)
2787 {
2788 	exception_enter();
2789 	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2790 }
2791 
2792 void do_paw_tl1(struct pt_regs *regs)
2793 {
2794 	exception_enter();
2795 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2796 	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2797 }
2798 
2799 void do_vaw(struct pt_regs *regs)
2800 {
2801 	exception_enter();
2802 	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2803 }
2804 
2805 void do_vaw_tl1(struct pt_regs *regs)
2806 {
2807 	exception_enter();
2808 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2809 	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2810 }
2811 
2812 void do_tof_tl1(struct pt_regs *regs)
2813 {
2814 	exception_enter();
2815 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2816 	die_if_kernel("TL1: Tag Overflow Exception", regs);
2817 }
2818 
2819 void do_getpsr(struct pt_regs *regs)
2820 {
2821 	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2822 	regs->tpc   = regs->tnpc;
2823 	regs->tnpc += 4;
2824 	if (test_thread_flag(TIF_32BIT)) {
2825 		regs->tpc &= 0xffffffff;
2826 		regs->tnpc &= 0xffffffff;
2827 	}
2828 }
2829 
2830 u64 cpu_mondo_counter[NR_CPUS] = {0};
2831 struct trap_per_cpu trap_block[NR_CPUS];
2832 EXPORT_SYMBOL(trap_block);
2833 
2834 /* This can get invoked before sched_init() so play it super safe
2835  * and use hard_smp_processor_id().
2836  */
2837 void notrace init_cur_cpu_trap(struct thread_info *t)
2838 {
2839 	int cpu = hard_smp_processor_id();
2840 	struct trap_per_cpu *p = &trap_block[cpu];
2841 
2842 	p->thread = t;
2843 	p->pgd_paddr = 0;
2844 }
2845 
2846 extern void thread_info_offsets_are_bolixed_dave(void);
2847 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2848 extern void tsb_config_offsets_are_bolixed_dave(void);
2849 
2850 /* Only invoked on boot processor. */
2851 void __init trap_init(void)
2852 {
2853 	/* Compile time sanity check. */
2854 	BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2855 		     TI_FLAGS != offsetof(struct thread_info, flags) ||
2856 		     TI_CPU != offsetof(struct thread_info, cpu) ||
2857 		     TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2858 		     TI_KSP != offsetof(struct thread_info, ksp) ||
2859 		     TI_FAULT_ADDR != offsetof(struct thread_info,
2860 					       fault_address) ||
2861 		     TI_KREGS != offsetof(struct thread_info, kregs) ||
2862 		     TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2863 		     TI_REG_WINDOW != offsetof(struct thread_info,
2864 					       reg_window) ||
2865 		     TI_RWIN_SPTRS != offsetof(struct thread_info,
2866 					       rwbuf_stkptrs) ||
2867 		     TI_GSR != offsetof(struct thread_info, gsr) ||
2868 		     TI_XFSR != offsetof(struct thread_info, xfsr) ||
2869 		     TI_PRE_COUNT != offsetof(struct thread_info,
2870 					      preempt_count) ||
2871 		     TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2872 		     TI_CURRENT_DS != offsetof(struct thread_info,
2873 						current_ds) ||
2874 		     TI_KUNA_REGS != offsetof(struct thread_info,
2875 					      kern_una_regs) ||
2876 		     TI_KUNA_INSN != offsetof(struct thread_info,
2877 					      kern_una_insn) ||
2878 		     TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2879 		     (TI_FPREGS & (64 - 1)));
2880 
2881 	BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2882 						     thread) ||
2883 		     (TRAP_PER_CPU_PGD_PADDR !=
2884 		      offsetof(struct trap_per_cpu, pgd_paddr)) ||
2885 		     (TRAP_PER_CPU_CPU_MONDO_PA !=
2886 		      offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2887 		     (TRAP_PER_CPU_DEV_MONDO_PA !=
2888 		      offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2889 		     (TRAP_PER_CPU_RESUM_MONDO_PA !=
2890 		      offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2891 		     (TRAP_PER_CPU_RESUM_KBUF_PA !=
2892 		      offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2893 		     (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2894 		      offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2895 		     (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2896 		      offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2897 		     (TRAP_PER_CPU_FAULT_INFO !=
2898 		      offsetof(struct trap_per_cpu, fault_info)) ||
2899 		     (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2900 		      offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2901 		     (TRAP_PER_CPU_CPU_LIST_PA !=
2902 		      offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2903 		     (TRAP_PER_CPU_TSB_HUGE !=
2904 		      offsetof(struct trap_per_cpu, tsb_huge)) ||
2905 		     (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2906 		      offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2907 		     (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2908 		      offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2909 		     (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2910 		      offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2911 		     (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2912 		      offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2913 		     (TRAP_PER_CPU_RESUM_QMASK !=
2914 		      offsetof(struct trap_per_cpu, resum_qmask)) ||
2915 		     (TRAP_PER_CPU_NONRESUM_QMASK !=
2916 		      offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2917 		     (TRAP_PER_CPU_PER_CPU_BASE !=
2918 		      offsetof(struct trap_per_cpu, __per_cpu_base)));
2919 
2920 	BUILD_BUG_ON((TSB_CONFIG_TSB !=
2921 		      offsetof(struct tsb_config, tsb)) ||
2922 		     (TSB_CONFIG_RSS_LIMIT !=
2923 		      offsetof(struct tsb_config, tsb_rss_limit)) ||
2924 		     (TSB_CONFIG_NENTRIES !=
2925 		      offsetof(struct tsb_config, tsb_nentries)) ||
2926 		     (TSB_CONFIG_REG_VAL !=
2927 		      offsetof(struct tsb_config, tsb_reg_val)) ||
2928 		     (TSB_CONFIG_MAP_VADDR !=
2929 		      offsetof(struct tsb_config, tsb_map_vaddr)) ||
2930 		     (TSB_CONFIG_MAP_PTE !=
2931 		      offsetof(struct tsb_config, tsb_map_pte)));
2932 
2933 	/* Attach to the address space of init_task.  On SMP we
2934 	 * do this in smp.c:smp_callin for other cpus.
2935 	 */
2936 	mmgrab(&init_mm);
2937 	current->active_mm = &init_mm;
2938 }
2939