xref: /netbsd/sys/arch/mips/mips/mipsX_subr.S (revision c4a72b64)
1/*	$NetBSD: mipsX_subr.S,v 1.10 2002/11/12 14:00:41 nisimura Exp $	*/
2
3/*
4 * Copyright 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 *    notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 *    notice, this list of conditions and the following disclaimer in the
49 *    documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 *    must display the following acknowledgement:
52 *      This product includes software developed by Jonathan R. Stone for
53 *      the NetBSD Project.
54 * 4. The name of the author may not be used to endorse or promote products
55 *    derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * SUCH DAMAGE.
68 */
69
70/*
71 * Copyright (c) 1992, 1993
72 *	The Regents of the University of California.  All rights reserved.
73 *
74 * This code is derived from software contributed to Berkeley by
75 * Digital Equipment Corporation and Ralph Campbell.
76 *
77 * Redistribution and use in source and binary forms, with or without
78 * modification, are permitted provided that the following conditions
79 * are met:
80 * 1. Redistributions of source code must retain the above copyright
81 *    notice, this list of conditions and the following disclaimer.
82 * 2. Redistributions in binary form must reproduce the above copyright
83 *    notice, this list of conditions and the following disclaimer in the
84 *    documentation and/or other materials provided with the distribution.
85 * 3. All advertising materials mentioning features or use of this software
86 *    must display the following acknowledgement:
87 *	This product includes software developed by the University of
88 *	California, Berkeley and its contributors.
89 * 4. Neither the name of the University nor the names of its contributors
90 *    may be used to endorse or promote products derived from this software
91 *    without specific prior written permission.
92 *
93 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
94 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
95 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
96 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
97 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
98 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
99 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
100 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
101 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
102 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
103 * SUCH DAMAGE.
104 *
105 * Copyright (C) 1989 Digital Equipment Corporation.
106 * Permission to use, copy, modify, and distribute this software and
107 * its documentation for any purpose and without fee is hereby granted,
108 * provided that the above copyright notice appears in all copies.
109 * Digital Equipment Corporation makes no representations about the
110 * suitability of this software for any purpose.  It is provided "as is"
111 * without express or implied warranty.
112 *
113 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
114 *	v 1.1 89/07/11 17:55:04 nelson Exp  SPRITE (DECWRL)
115 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
116 *	v 9.2 90/01/29 18:00:39 shirriff Exp  SPRITE (DECWRL)
117 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
118 *	v 1.1 89/07/10 14:27:41 nelson Exp  SPRITE (DECWRL)
119 *
120 *	@(#)locore.s	8.5 (Berkeley) 1/4/94
121 */
122#include "opt_cputype.h"
123#include "opt_ddb.h"
124#include "opt_kgdb.h"
125
126#include <sys/cdefs.h>
127
128#include <mips/asm.h>
129#include <mips/cpuregs.h>
130#if defined(MIPS3) && !defined(MIPS3_5900)
131#include <mips/cache_r4k.h>
132#endif
133
134#include <machine/param.h>
135#include <machine/endian.h>
136
137#include "assym.h"
138
139/*
140 * XXX MIPS3_5900 is still "special" for much of this code.
141 * XXX MIPS3_4100 is still "special" in tlb update code
142 */
143
144#if MIPS1
145#error This file can not be compiled with MIPS1 defined
146#endif
147
148#if MIPS3 + MIPS32 + MIPS64 != 1
149# error  Only one of MIPS{3,32,64} can be defined
150#endif
151
152/*
153 * Use 64bit cp0 instructions?
154 */
155#if defined(MIPS3)
156#define	USE_64BIT_INSTRUCTIONS
157#if defined(MIPS3_5900)		/* the 5900 has mips32-like mmu registers */
158#undef	USE_64BIT_CP0_FUNCTIONS
159#else
160#define	USE_64BIT_CP0_FUNCTIONS
161#endif
162#endif
163
164#if defined(MIPS32)
165#undef	USE_64BIT_INSTRUCTIONS
166#undef	USE_64BIT_CP0_FUNCTIONS
167#endif
168
169#if defined(MIPS64)
170#define	USE_64BIT_INSTRUCTIONS
171#define	USE_64BIT_CP0_FUNCTIONS
172#endif
173
174#if defined(USE_64BIT_CP0_FUNCTIONS)
175#define	_SLL		dsll
176#define	_SRL		dsrl
177#define	WIRED_SHIFT	34
178#else
179#define	_SLL		sll
180#define	_SRL		srl
181#define	WIRED_SHIFT	2
182#endif
183
184/*
185 * Use correct-sized m?c0/dm?c0 opcodes.
186 */
187#if defined(USE_64BIT_CP0_FUNCTIONS)
188#define	_MFC0	dmfc0
189#define	_MTC0	dmtc0
190#else
191#define	_MFC0	mfc0
192#define	_MTC0	mtc0
193#endif
194
195
196/*
197 * Set ISA level for the assembler.
198 */
199#if defined(MIPS3)
200	.set	mips3
201#endif
202
203#if defined(MIPS32)
204	.set	mips32
205#endif
206
207#if defined(MIPS64)
208	.set	mips64
209#endif
210
211
212/*
213 * CPP function renaming macros.
214 */
215
216#if defined(MIPS3)
217#ifdef __STDC__
218#define	MIPSX(name)	mips3_ ## name
219#else
220#define	MIPSX(name)	mips3_/**/name
221#endif
222#endif
223
224#if defined(MIPS3_5900)
225#undef MIPSX
226#ifdef __STDC__
227#define	MIPSX(name)	mips5900_ ## name
228#else
229#define	MIPSX(name)	mips5900_/**/name
230#endif
231#endif
232
233#if defined(MIPS32)
234#ifdef __STDC__
235#define	MIPSX(name)	mips32_ ## name
236#else
237#define	MIPSX(name)	mips32_/**/name
238#endif
239#endif
240
241#if defined(MIPS64)
242#ifdef __STDC__
243#define	MIPSX(name)	mips64_ ## name
244#else
245#define	MIPSX(name)	mips64_/**/name
246#endif
247#endif
248
249#define	_VECTOR_END(name)	VECTOR_END(name)
250
251/*
252 * XXX We need a cleaner way of handling the instruction hazards of
253 * the various processors.  Here are the relevant rules for the QED 52XX:
254 *	tlbw[ri]	-- two integer ops beforehand
255 *	tlbr		-- two integer ops beforehand
256 *	tlbp		-- two integer ops beforehand
257 *	mtc0	[PageMask,EntryHi,Cp0] -- two integer ops afterwards
258 *	changing JTLB	-- two integer ops afterwards
259 *	mtc0	[EPC,ErrorEPC,Status] -- two int ops afterwards before eret
260 *	config.k0	-- five int ops before kseg0, ckseg0 memref
261 *
262 * For the IDT R4000, some hazards are:
263 *	mtc0/mfc0	one integer op before and after
264 *	tlbp		-- one integer op afterwards
265 * Obvious solution is to take least common denominator.
266 *
267 * For the Toshiba R5900, TX79:
268 *	mtc0		following sync.p
269 *	tlbw[ri], tlbp	following sync.p or eret
270 * for those CPU, define COP0_SYNC as sync.p
271 */
272
273
274/*
275 *============================================================================
276 *
277 *  MIPS III ISA support, part 1: locore exception vectors.
278 *  The following code is copied to the vector locations to which
279 *  the CPU jumps in response to an exception or a TLB miss.
280 *
281 *============================================================================
282 */
283	.set	noreorder
284
285/*
286 * TLB handling data.   'segbase' points to the base of the segment
287 * table.   this is read and written by C code in mips_machdep.c.
288 *
289 * XXX: use linear mapped PTs at fixed VA in kseg2 in the future?
290 */
291	.text
292
293
294/*
295 *----------------------------------------------------------------------------
296 *
297 * mips3_TLBMiss --
298 *
299 *	Vector code for the TLB-miss exception vector 0x80000000
300 *	on an r4000.
301 *
302 * This code is copied to the TLB exception vector address to
303 * handle TLB translation misses.
304 * NOTE: This code should be relocatable and max 32 instructions!!!
305 *
306 * Don't check for invalid pte's here. We load them as well and
307 * let the processor trap to load the correct value after service.
308 *----------------------------------------------------------------------------
309 */
310VECTOR(MIPSX(TLBMiss), unknown)
311	.set	noat
312	mfc0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
313	lui	k1, %hi(segbase)		#01: k1=hi of segbase
314	bltz	k0, 4f				#02: k0<0 -> 4f (kernel fault)
315	srl	k0, 20				#03: k0=seg offset (almost)
316	lw	k1, %lo(segbase)(k1)		#04: k1=segment tab base
317	andi	k0, k0, 0xffc			#05: k0=seg offset (mask 0x3)
318	addu	k1, k0, k1			#06: k1=seg entry address
319	lw	k1, 0(k1)			#07: k1=seg entry
320	mfc0	k0, MIPS_COP_0_BAD_VADDR	#08: k0=bad address (again)
321	beq	k1, zero, 5f			#09: ==0 -- no page table
322	srl	k0, 10				#0a: k0=VPN (aka va>>10)
323	andi	k0, k0, 0xff8			#0b: k0=page tab offset
324	addu	k1, k1, k0			#0c: k1=pte address
325	lw	k0, 0(k1)			#0d: k0=lo0 pte
326	lw	k1, 4(k1)			#0e: k1=lo1 pte
327	sll	k0, 2				#0f: chop top 2 bits (part 1a)
328	srl	k0, 2				#10: chop top 2 bits (part 1b)
329#ifdef MIPS3_5900
330	mtc0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
331	sync.p					#12: R5900 cop0 hazard
332	sll	k1, 2				#13: chop top 2 bits (part 2a)
333	srl	k1, 2				#14: chop top 2 bits (part 2b)
334	mtc0	k1, MIPS_COP_0_TLB_LO1		#15: lo1 is loaded
335	sync.p					#16: R5900 cop0 hazard
336#else /* MIPS3_5900 */
337	mtc0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
338	sll	k1, 2				#12: chop top 2 bits (part 2a)
339	srl	k1, 2				#13: chop top 2 bits (part 2b)
340	mtc0	k1, MIPS_COP_0_TLB_LO1		#14: lo1 is loaded
341	nop					#15: standard nop
342	nop					#16: extra nop for QED5230
343#endif /* MIPS3_5900 */
344	tlbwr					#17: write to tlb
345	nop					#18: standard nop
346	nop					#19: needed by R4000/4400
347	nop					#1a: needed by R4000/4400
348	eret					#1b: return from exception
3494:	j _C_LABEL(MIPSX(TLBMissException))	#1c: kernel exception
350	nop					#1d: branch delay slot
3515:	j	slowfault			#1e: no page table present
352	nop					#1f: branch delay slot
353	.set	at
354_VECTOR_END(MIPSX(TLBMiss))
355
356#if defined(USE_64BIT_CP0_FUNCTIONS)
357/*
358 * mips3_XTLBMiss routine
359 *
360 *	Vector code for the XTLB-miss exception vector 0x80000080 on an r4000.
361 *
362 * This code is copied to the XTLB exception vector address to
363 * handle TLB translation misses while in 64-bit mode.
364 * NOTE: This code should be relocatable and max 32 instructions!!!
365 *
366 * Note that we do not support the full size of the PTEs, relying
367 * on appropriate truncation/sign extension.
368 *
369 * Don't check for invalid pte's here. We load them as well and
370 * let the processor trap to load the correct value after service.
371 */
372VECTOR(MIPSX(XTLBMiss), unknown)
373	.set	noat
374	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
375	lui	k1, %hi(segbase)		#01: k1=hi of segbase
376	bltz	k0, 4f				#02: k0<0 -> 4f (kernel fault)
377	srl	k0, 20				#03: k0=seg offset (almost)
378	lw	k1, %lo(segbase)(k1)		#04: k1=segment tab base
379	andi	k0, k0, 0xffc			#05: k0=seg offset (mask 0x3)
380	addu	k1, k0, k1			#06: k1=seg entry address
381	lw	k1, 0(k1)			#07: k1=seg entry
382	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#08: k0=bad address (again)
383	beq	k1, zero, 5f			#09: ==0 -- no page table
384	srl	k0, 10				#0a: k0=VPN (aka va>>10)
385	andi	k0, k0, 0xff8			#0b: k0=page tab offset
386	addu	k1, k1, k0			#0c: k1=pte address
387	lw	k0, 0(k1)			#0d: k0=lo0 pte
388	lw	k1, 4(k1)			#0e: k1=lo1 pte
389	sll	k0, 2				#0f: chop top 2 bits (part 1a)
390	srl	k0, 2				#10: chop top 2 bits (part 1b)
391	mtc0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
392	sll	k1, 2				#12: chop top 2 bits (part 2a)
393	srl	k1, 2				#13: chop top 2 bits (part 2b)
394	mtc0	k1, MIPS_COP_0_TLB_LO1		#14: lo1 is loaded
395	nop					#15: standard nop
396	nop					#16: extra nop for QED5230
397	tlbwr					#17: write to tlb
398	nop					#18: standard nop
399	nop					#19: needed by R4000/4400
400	nop					#1a: needed by R4000/4400
401	eret					#1b: return from exception
4024:	j _C_LABEL(MIPSX(TLBMissException))	#1c: kernel exception
403	nop					#1d: branch delay slot
4045:	j	slowfault			#1e: no page table present
405	nop					#1f: branch delay slot
406	.set	at
407_VECTOR_END(MIPSX(XTLBMiss))
408#endif /* USE_64BIT_CP0_FUNCTIONS */
409
410/*
411 * Vector to real handler in KSEG1.
412 */
413VECTOR(MIPSX(cache), unknown)
414	la	k0, _C_LABEL(MIPSX(cacheException))
415	li	k1, MIPS_PHYS_MASK
416	and	k0, k1
417	li	k1, MIPS_KSEG1_START
418	or	k0, k1
419	j	k0
420	nop
421_VECTOR_END(MIPSX(cache))
422
423/*
424 * Handle MIPS32/MIPS64 style interrupt exception vector.
425 */
426VECTOR(MIPSX(intr), unknown)
427	la	k0, MIPSX(KernIntr)
428	j	k0
429	nop
430_VECTOR_END(MIPSX(intr))
431
432/*
433 *----------------------------------------------------------------------------
434 *
435 * mipsN_exception --
436 *
437 *	Vector code for the general exception vector 0x80000180
438 *	on an r4000 or r4400.
439 *
440 * This code is copied to the general exception vector address to
441 * handle most exceptions.
442 * NOTE: This code should be relocatable and max 32 instructions!!!
443 *----------------------------------------------------------------------------
444 */
445VECTOR(MIPSX(exception), unknown)
446/*
447 * Find out what mode we came from and jump to the proper handler.
448 */
449	.set	noat
450	mfc0	k0, MIPS_COP_0_STATUS		#00: get the status register
451	mfc0	k1, MIPS_COP_0_CAUSE		#01: get the cause register
452	and	k0, k0, MIPS3_SR_KSU_USER	#02: test for user mode
453						#    sneaky but the bits are
454						#    with us........
455	sll	k0, k0, 3			#03: shift user bit for cause index
456	and	k1, k1, MIPS3_CR_EXC_CODE	#04: mask out the cause bits.
457	or	k1, k1, k0			#05: change index to user table
4581:
459	la	k0, MIPSX(excpt_sw)		#06: get base of the jump table
460	addu	k0, k0, k1			#07: get the address of the
461						#     function entry.  Note that
462						#     the cause is already
463						#     shifted left by 2 bits so
464						#     we dont have to shift.
465	lw	k0, 0(k0)			#08: get the function address
466	#nop					#    -slip-
467
468	j	k0				#09: jump to the function
469	nop					#0a: branch delay slot
470	.set	at
471_VECTOR_END(MIPSX(exception))
472
473/*----------------------------------------------------------------------------
474 *
475 * slowfault --
476 *
477 * Alternate entry point into the mips3_UserGenException or
478 * or mips3_user_Kern_exception, when the ULTB miss handler couldn't
479 * find a TLB entry.
480 *
481 * Find out what mode we came from and call the appropriate handler.
482 *
483 *----------------------------------------------------------------------------
484 */
485
486/*
487 * We couldn't find a TLB entry.
488 * Find out what mode we came from and call the appropriate handler.
489 */
490slowfault:
491	.set	noat
492	mfc0	k0, MIPS_COP_0_STATUS
493	nop
494	and	k0, k0, MIPS3_SR_KSU_USER
495	bne	k0, zero, _C_LABEL(MIPSX(UserGenException))
496	nop
497	.set	at
498/*
499 * Fall though ...
500 */
501
502/*
503 * mips3_KernGenException
504 *
505 * Handle an exception from kernel mode.
506 * Build trapframe on stack to hold interrupted kernel context, then
507 * call trap() to process the condition.
508 *
509 * trapframe is pointed to by the 5th arg
510 * and a dummy sixth argument is used to avoid alignment problems
511 *	{
512 *	register_t cf_args[4 + 1];
513 *	register_t cf_pad;		(for 8 word alignment)
514 *	register_t cf_sp;
515 *	register_t cf_ra;
516 *	mips_reg_t kf_regs[17];		- trapframe begins here
517 * 	mips_reg_t kf_sr;		-
518 * 	mips_reg_t kf_mullo;		-
519 * 	mips_reg_t kf_mulhi;		-
520 * 	mips_reg_t kf_epc;		- may be changed by trap() call
521 * };
522 */
523NESTED_NOPROFILE(MIPSX(KernGenException), KERNFRAME_SIZ, ra)
524	.set	noat
525	.mask	0x80000000, -4
526#if defined(DDB) || defined(KGDB)
527	la	k0, _C_LABEL(kdbaux)
528	REG_S	s0, SF_REG_S0(k0)
529	REG_S	s1, SF_REG_S1(k0)
530	REG_S	s2, SF_REG_S2(k0)
531	REG_S	s3, SF_REG_S3(k0)
532	REG_S	s4, SF_REG_S4(k0)
533	REG_S	s5, SF_REG_S5(k0)
534	REG_S	s6, SF_REG_S6(k0)
535	REG_S	s7, SF_REG_S7(k0)
536	REG_S	sp, SF_REG_SP(k0)
537	REG_S	s8, SF_REG_S8(k0)
538	REG_S	gp, SF_REG_RA(k0)
539#endif
540/*
541 * Save the relevant kernel registers onto the stack.
542 * We don't need to save s0 - s8, sp and gp because
543 * the compiler does it for us.
544 */
545	subu	sp, sp, KERNFRAME_SIZ
546	REG_S	AT, TF_BASE+TF_REG_AST(sp)
547	REG_S	v0, TF_BASE+TF_REG_V0(sp)
548	REG_S	v1, TF_BASE+TF_REG_V1(sp)
549	mflo	v0
550	mfhi	v1
551	REG_S	a0, TF_BASE+TF_REG_A0(sp)
552	REG_S	a1, TF_BASE+TF_REG_A1(sp)
553	REG_S	a2, TF_BASE+TF_REG_A2(sp)
554	REG_S	a3, TF_BASE+TF_REG_A3(sp)
555	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
556	REG_S	t0, TF_BASE+TF_REG_T0(sp)
557	REG_S	t1, TF_BASE+TF_REG_T1(sp)
558	REG_S	t2, TF_BASE+TF_REG_T2(sp)
559	REG_S	t3, TF_BASE+TF_REG_T3(sp)
560	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
561	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
562	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
563	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
564	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
565	mfc0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
566	REG_S	t8, TF_BASE+TF_REG_T8(sp)
567	REG_S	t9, TF_BASE+TF_REG_T9(sp)
568	REG_S	ra, TF_BASE+TF_REG_RA(sp)
569	REG_S	a0, TF_BASE+TF_REG_SR(sp)
570	mfc0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
571	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
572	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
573	REG_S	a3, TF_BASE+TF_REG_EPC(sp)
574	addu	v0, sp, TF_BASE
575	sw	v0, KERNFRAME_ARG5(sp)		# 5th arg is p. to trapframe
576#ifdef IPL_ICU_MASK
577	.set at
578	lw	v0, _C_LABEL(md_imask)
579	sw	v0, TF_BASE+TF_PPL(sp)
580	nop
581	.set noat
582#endif
583/*
584 * Call the trap handler.
585 */
586#if defined(DDB) || defined(DEBUG) || defined(KGDB)
587	addu	v0, sp, KERNFRAME_SIZ
588	sw	v0, KERNFRAME_SP(sp)
589#endif
590	mtc0	zero, MIPS_COP_0_STATUS		# Set kernel no error level
591	COP0_SYNC
592	nop
593	nop
594	nop
595	jal	_C_LABEL(trap)			#
596	sw	a3, KERNFRAME_RA(sp)		# for debugging
597
598/*
599 * Restore registers and return from the exception.
600 */
601	mtc0	zero, MIPS_COP_0_STATUS		# Make sure int disabled
602	COP0_SYNC
603	nop					# 3 nop delay
604	nop
605	nop
606#ifdef IPL_ICU_MASK
607	.set at
608	lw	a0, TF_BASE+TF_PPL(sp)
609	sw	a0, _C_LABEL(md_imask)
610	jal	_C_LABEL(md_imask_update)
611	nop
612	.set noat
613#endif
614	REG_L	a0, TF_BASE+TF_REG_SR(sp)	# ??? why differs ???
615	REG_L	t0, TF_BASE+TF_REG_MULLO(sp)
616	REG_L	t1, TF_BASE+TF_REG_MULHI(sp)
617	REG_L	k0, TF_BASE+TF_REG_EPC(sp)	# might be changed inside trap
618	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
619	COP0_SYNC
620	mtlo	t0
621	mthi	t1
622	_MTC0	k0, MIPS_COP_0_EXC_PC		# set return address
623	COP0_SYNC
624	REG_L	AT, TF_BASE+TF_REG_AST(sp)
625	REG_L	v0, TF_BASE+TF_REG_V0(sp)
626	REG_L	v1, TF_BASE+TF_REG_V1(sp)
627	REG_L	a0, TF_BASE+TF_REG_A0(sp)
628	REG_L	a1, TF_BASE+TF_REG_A1(sp)
629	REG_L	a2, TF_BASE+TF_REG_A2(sp)
630	REG_L	a3, TF_BASE+TF_REG_A3(sp)
631	REG_L	t0, TF_BASE+TF_REG_T0(sp)
632	REG_L	t1, TF_BASE+TF_REG_T1(sp)
633	REG_L	t2, TF_BASE+TF_REG_T2(sp)
634	REG_L	t3, TF_BASE+TF_REG_T3(sp)
635	REG_L	ta0, TF_BASE+TF_REG_TA0(sp)
636	REG_L	ta1, TF_BASE+TF_REG_TA1(sp)
637	REG_L	ta2, TF_BASE+TF_REG_TA2(sp)
638	REG_L	ta3, TF_BASE+TF_REG_TA3(sp)
639	REG_L	t8, TF_BASE+TF_REG_T8(sp)
640	REG_L	t9, TF_BASE+TF_REG_T9(sp)
641	REG_L	ra, TF_BASE+TF_REG_RA(sp)
642	addu	sp, sp, KERNFRAME_SIZ
643#ifdef DDBnotyet
644	la	k0, _C_LABEL(kdbaux)
645	REG_L	s0, SF_REG_S0(k0)
646	REG_L	s1, SF_REG_S1(k0)
647	REG_L	s2, SF_REG_S2(k0)
648	REG_L	s3, SF_REG_S3(k0)
649	REG_L	s4, SF_REG_S4(k0)
650	REG_L	s5, SF_REG_S5(k0)
651	REG_L	s6, SF_REG_S6(k0)
652	REG_L	s7, SF_REG_S7(k0)
653	REG_L	sp, SF_REG_SP(k0)
654	REG_L	s8, SF_REG_S8(k0)
655	REG_L	gp, SF_REG_RA(k0)
656#endif
657	eret					# return to interrupted point
658	.set	at
659END(MIPSX(KernGenException))
660
661/*
662 * mipsN_UserGenException
663 *
664 * Handle an exception from user mode.
665 * Save user context atop the kernel stack, then call trap() to process
666 * the condition.  The context can be manipulated alternatively via
667 * curproc->p_md.md_regs.
668 */
669NESTED_NOPROFILE(MIPSX(UserGenException), CALLFRAME_SIZ, ra)
670	.set	noat
671	.mask	0x80000000, -4
672/*
673 * Save all of the registers except for the kernel temporaries in u_pcb.
674 */
675	lw	k1, _C_LABEL(curpcb)
676	#nop					# -slip-
677	addu	k1, k1, USPACE - FRAME_SIZ
678	REG_S	AT, FRAME_AST(k1)
679	REG_S	v0, FRAME_V0(k1)
680	REG_S	v1, FRAME_V1(k1)
681	mflo	v0
682	REG_S	a0, FRAME_A0(k1)
683	REG_S	a1, FRAME_A1(k1)
684	REG_S	a2, FRAME_A2(k1)
685	REG_S	a3, FRAME_A3(k1)
686	mfhi	v1
687	REG_S	t0, FRAME_T0(k1)
688	REG_S	t1, FRAME_T1(k1)
689	REG_S	t2, FRAME_T2(k1)
690	REG_S	t3, FRAME_T3(k1)
691	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
692	REG_S	ta0, FRAME_TA0(k1)
693	REG_S	ta1, FRAME_TA1(k1)
694	REG_S	ta2, FRAME_TA2(k1)
695	REG_S	ta3, FRAME_TA3(k1)
696	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
697	REG_S	s0, FRAME_S0(k1)
698	REG_S	s1, FRAME_S1(k1)
699	REG_S	s2, FRAME_S2(k1)
700	REG_S	s3, FRAME_S3(k1)
701	_MFC0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
702	REG_S	s4, FRAME_S4(k1)
703	REG_S	s5, FRAME_S5(k1)
704	REG_S	s6, FRAME_S6(k1)
705	REG_S	s7, FRAME_S7(k1)
706	_MFC0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
707	REG_S	t8, FRAME_T8(k1)
708	REG_S	t9, FRAME_T9(k1)
709	REG_S	gp, FRAME_GP(k1)
710	REG_S	sp, FRAME_SP(k1)
711	REG_S	s8, FRAME_S8(k1)
712	REG_S	ra, FRAME_RA(k1)
713	REG_S	a0, FRAME_SR(k1)
714	REG_S	v0, FRAME_MULLO(k1)
715	REG_S	v1, FRAME_MULHI(k1)
716	REG_S	a3, FRAME_EPC(k1)
717#ifdef IPL_ICU_MASK
718	.set at
719	lw	t0, _C_LABEL(md_imask)
720	sw	t0, FRAME_PPL(k1)
721	.set noat
722#endif
723	addu	sp, k1, -CALLFRAME_SIZ	# switch to kernel SP
724#ifdef __GP_SUPPORT__
725	la	gp, _C_LABEL(_gp)		# switch to kernel GP
726#endif
727/*
728 * Turn off fpu and enter kernel mode
729 */
730	.set	at
731	and	t0, a0, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_KSU_MASK | MIPS_SR_INT_IE)
732	.set	noat
733/*
734 * Call the trap handler.
735 */
736	mtc0	t0, MIPS_COP_0_STATUS
737	COP0_SYNC
738	jal	_C_LABEL(trap)
739	sw	a3, CALLFRAME_SIZ-4(sp)		# for debugging
740/*
741 * Check pending asynchronous traps.
742 */
743	lw	t0, _C_LABEL(curproc)
744	lw	t0, P_MD_ASTPENDING(t0)
745	beq	t0, zero, 1f
746	nop
747/*
748 * We have pending asynchronous traps; all the state is already saved.
749 */
750	jal	_C_LABEL(ast)
751	lw	a0, CALLFRAME_SIZ + FRAME_EPC(sp)
7521:
753/*
754 * Restore user registers and return.
755 * First disable interrupts and set exception level.
756 */
757	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupt
758	COP0_SYNC
759	nop					# 3 clock delay before
760	nop					# exceptions blocked
761	nop					# for R4X
762	li	v0, MIPS_SR_EXL
763	mtc0	v0, MIPS_COP_0_STATUS		# set exception level
764	COP0_SYNC
765	nop					# 3 nop delay
766	nop
767	nop
768	addu	a1, sp, CALLFRAME_SIZ
769#ifdef IPL_ICU_MASK
770	.set at
771	lw	t0, FRAME_PPL(a1)
772	sw	t0, _C_LABEL(md_imask)
773	jal	_C_LABEL(md_imask_update)
774	nop
775	addu	a1, sp, CALLFRAME_SIZ
776	.set noat
777#endif
778 #	REG_L	a0, FRAME_SR(a1)
779	REG_L	t0, FRAME_MULLO(a1)
780	REG_L	t1, FRAME_MULHI(a1)
781	REG_L	v0, FRAME_EPC(a1)
782 #	mtc0	a0, MIPS_COP_0_STATUS		# still exception level
783	mtlo	t0
784	mthi	t1
785	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
786	COP0_SYNC
787	move	k1, a1
788	REG_L	AT, FRAME_AST(k1)
789	REG_L	v0, FRAME_V0(k1)
790	REG_L	v1, FRAME_V1(k1)
791	REG_L	a0, FRAME_A0(k1)
792	REG_L	a1, FRAME_A1(k1)
793	REG_L	a2, FRAME_A2(k1)
794	REG_L	a3, FRAME_A3(k1)
795	REG_L	t0, FRAME_T0(k1)
796	REG_L	t1, FRAME_T1(k1)
797	REG_L	t2, FRAME_T2(k1)
798	REG_L	t3, FRAME_T3(k1)
799	REG_L	ta0, FRAME_TA0(k1)
800	REG_L	ta1, FRAME_TA1(k1)
801	REG_L	ta2, FRAME_TA2(k1)
802	REG_L	ta3, FRAME_TA3(k1)
803	REG_L	s0, FRAME_S0(k1)
804	REG_L	s1, FRAME_S1(k1)
805	REG_L	s2, FRAME_S2(k1)
806	REG_L	s3, FRAME_S3(k1)
807	REG_L	s4, FRAME_S4(k1)
808	REG_L	s5, FRAME_S5(k1)
809	REG_L	s6, FRAME_S6(k1)
810	REG_L	s7, FRAME_S7(k1)
811	REG_L	t8, FRAME_T8(k1)
812	REG_L	t9, FRAME_T9(k1)
813	REG_L	k0, FRAME_SR(k1)
814	DYNAMIC_STATUS_MASK_TOUSER(k0, ra)	# machine dependent masking
815	REG_L	gp, FRAME_GP(k1)
816	REG_L	sp, FRAME_SP(k1)
817	REG_L	s8, FRAME_S8(k1)
818	REG_L	ra, FRAME_RA(k1)
819	mtc0	k0, MIPS_COP_0_STATUS		# restore status
820	COP0_SYNC
821	nop
822	nop
823	eret					# return to interrupted point
824	.set	at
825END(MIPSX(UserGenException))
826
827/*
828 * mipsN_SystemCall
829 *
830 * Save user context in u_pcb, then call syscall() to process a system call.
831 * The context can be manipulated alternatively via curproc->p_md.md_regs;
832 */
833NESTED_NOPROFILE(MIPSX(SystemCall), CALLFRAME_SIZ, ra)
834	.set	noat
835	.mask	0x80000000, -4
836	lw	k1, _C_LABEL(curpcb)
837	#nop					# -slip-
838	addu	k1, k1, USPACE - FRAME_SIZ
839	#REG_S	AT, FRAME_AST(k1)
840	REG_S	v0, FRAME_V0(k1)		# syscall #
841	REG_S	v1, FRAME_V1(k1)		# used by syscall()
842	mflo	v0
843	REG_S	a0, FRAME_A0(k1)
844	REG_S	a1, FRAME_A1(k1)
845	REG_S	a2, FRAME_A2(k1)
846	REG_S	a3, FRAME_A3(k1)
847	lw	a0, _C_LABEL(curproc)		# 1st arg is curproc
848	mfhi	v1
849	#REG_S	t0, FRAME_T0(k1)		# no need to save temp regs
850	#REG_S	t1, FRAME_T1(k1)
851	#REG_S	t2, FRAME_T2(k1)
852	#REG_S	t3, FRAME_T3(k1)
853	mfc0	a1, MIPS_COP_0_STATUS		# 2nd arg is STATUS
854	#REG_S	ta0, FRAME_TA0(k1)
855	#REG_S	ta1, FRAME_TA1(k1)
856	#REG_S	ta2, FRAME_TA2(k1)
857	#REG_S	ta3, FRAME_TA3(k1)
858	mfc0	a2, MIPS_COP_0_CAUSE		# 3rd arg is CAUSE
859	REG_S	s0, FRAME_S0(k1)
860	REG_S	s1, FRAME_S1(k1)
861	REG_S	s2, FRAME_S2(k1)
862	REG_S	s3, FRAME_S3(k1)
863	mfc0	a3, MIPS_COP_0_EXC_PC		# 4th arg is PC
864	REG_S	s4, FRAME_S4(k1)
865	REG_S	s5, FRAME_S5(k1)
866	REG_S	s6, FRAME_S6(k1)
867	REG_S	s7, FRAME_S7(k1)
868	#REG_S	t8, FRAME_T8(k1)
869	#REG_S	t9, FRAME_T9(k1)
870	REG_S	gp, FRAME_GP(k1)
871	REG_S	sp, FRAME_SP(k1)
872	REG_S	s8, FRAME_S8(k1)
873	REG_S	ra, FRAME_RA(k1)
874	REG_S	a1, FRAME_SR(k1)
875	REG_S	v0, FRAME_MULLO(k1)
876	REG_S	v1, FRAME_MULHI(k1)
877	REG_S	a3, FRAME_EPC(k1)
878#ifdef IPL_ICU_MASK
879	.set at
880	lw	t0, _C_LABEL(md_imask)
881	sw	t0, FRAME_PPL(k1)
882	.set noat
883#endif
884	addu	sp, k1, -CALLFRAME_SIZ
885#ifdef __GP_SUPPORT__
886	la	gp, _C_LABEL(_gp)		# switch to kernel GP
887#endif
888/*
889 * Turn off fpu and enter kernel mode
890 */
891	.set	at
892	lw	t1, P_MD_SYSCALL(a0)		# t1 = syscall
893	and	t0, a1, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_KSU_MASK)
894	.set	noat
895#if defined(DDB) || defined(DEBUG) || defined(KGDB)
896	move	ra, a3
897	sw	ra, CALLFRAME_RA(sp)
898#endif
899/*
900 * Call the system call handler.
901 */
902	mtc0	t0, MIPS_COP_0_STATUS		# re-enable interrupts
903	COP0_SYNC
904	jal	t1
905	nop
906/*
907 * Check pending asynchronous traps.
908 */
909	lw	t0, _C_LABEL(curproc)
910	lw	t0, P_MD_ASTPENDING(t0)
911	beq	t0, zero, 1f
912	nop
913/*
914 * We have pending asynchronous traps; all the state is already saved.
915 */
916	jal	_C_LABEL(ast)
917	lw	a0, CALLFRAME_SIZ + FRAME_EPC(sp)
9181:
919/*
920 * Restore user registers and return.
921 * First disable interrupts and set exception level.
922 */
923	mtc0	zero, MIPS_COP_0_STATUS		# disable int
924	COP0_SYNC
925	nop					# 3 op delay
926	nop
927	nop
928
929	li	v0, MIPS_SR_EXL
930	mtc0	v0, MIPS_COP_0_STATUS		# set exception level
931	COP0_SYNC
932	nop					# 3 op delay
933	nop
934	nop
935/*
936 * Restore user registers and return.
937 */
938	addu	a1, sp, CALLFRAME_SIZ
939#ifdef IPL_ICU_MASK
940	.set at
941	lw	t0, FRAME_PPL(a1)
942	sw	t0, _C_LABEL(md_imask)
943	jal	_C_LABEL(md_imask_update)
944	nop
945	addu	a1, sp, CALLFRAME_SIZ
946	.set noat
947#endif
948 #	REG_L	a0, FRAME_SR(a1)
949	REG_L	t0, FRAME_MULLO(a1)
950	REG_L	t1, FRAME_MULHI(a1)
951	REG_L	v0, FRAME_EPC(a1)		# might be changed in syscall
952 #	mtc0	a0, MIPS_COP_0_STATUS		# this should disable interrupts
953	mtlo	t0
954	mthi	t1
955	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
956	COP0_SYNC
957	move	k1, a1
958	REG_L	AT, FRAME_AST(k1)
959	REG_L	v0, FRAME_V0(k1)
960	REG_L	v1, FRAME_V1(k1)
961	REG_L	a0, FRAME_A0(k1)
962	REG_L	a1, FRAME_A1(k1)
963	REG_L	a2, FRAME_A2(k1)
964	REG_L	a3, FRAME_A3(k1)
965	REG_L	t0, FRAME_T0(k1)
966	REG_L	t1, FRAME_T1(k1)
967	REG_L	t2, FRAME_T2(k1)
968	REG_L	t3, FRAME_T3(k1)
969	REG_L	ta0, FRAME_TA0(k1)
970	REG_L	ta1, FRAME_TA1(k1)
971	REG_L	ta2, FRAME_TA2(k1)
972	REG_L	ta3, FRAME_TA3(k1)
973	REG_L	s0, FRAME_S0(k1)
974	REG_L	s1, FRAME_S1(k1)
975	REG_L	s2, FRAME_S2(k1)
976	REG_L	s3, FRAME_S3(k1)
977	REG_L	s4, FRAME_S4(k1)
978	REG_L	s5, FRAME_S5(k1)
979	REG_L	s6, FRAME_S6(k1)
980	REG_L	s7, FRAME_S7(k1)
981	REG_L	t8, FRAME_T8(k1)
982	REG_L	t9, FRAME_T9(k1)
983	REG_L	k0, FRAME_SR(k1)
984	DYNAMIC_STATUS_MASK_TOUSER(k0, ra)	# machine dependent masking
985	REG_L	gp, FRAME_GP(k1)
986	REG_L	sp, FRAME_SP(k1)
987	REG_L	s8, FRAME_S8(k1)
988	REG_L	ra, FRAME_RA(k1)
989	mtc0	k0, MIPS_COP_0_STATUS
990	COP0_SYNC
991	nop
992	nop
993	nop
994
995	eret					# return to syscall point
996	.set	at
997END(MIPSX(SystemCall))
998
999/*
1000 * Panic on cache errors.  A lot more could be done to recover
1001 * from some types of errors but it is tricky.
1002 */
1003NESTED_NOPROFILE(MIPSX(cacheException), KERNFRAME_SIZ, ra)
1004	.set	noat
1005	.mask	0x80000000, -4
1006#ifdef sbmips	/* XXX!  SB-1 needs a real cache error handler */
1007	eret
1008	nop
1009#endif
1010	la	k0, panic			# return to panic
1011	la	a0, 9f				# panicstr
1012	_MFC0	a1, MIPS_COP_0_ERROR_PC
1013	mfc0	a2, MIPS_COP_0_ECC
1014	mfc0	a3, MIPS_COP_0_CACHE_ERR
1015
1016	_MTC0	k0, MIPS_COP_0_ERROR_PC		# set return address
1017	COP0_SYNC
1018
1019	mfc0	k0, MIPS_COP_0_STATUS		# restore status
1020	li	k1, MIPS3_SR_DIAG_PE		# ignore further errors
1021	or	k0, k1
1022	mtc0	k0, MIPS_COP_0_STATUS		# restore status
1023	COP0_SYNC
1024	nop
1025	nop
1026	nop
1027
1028	eret
1029
1030	MSG("cache error @ EPC 0x%x ErrCtl 0x%x CacheErr 0x%x");
1031	.set	at
1032END(MIPSX(cacheException))
1033
1034/*
1035 * mipsX_KernIntr
1036 *
1037 * Handle an interrupt from kernel mode.
1038 * Build intrframe on stack to hold interrupted kernel context, then
1039 * call cpu_intr() to process it.
1040 *
1041 */
1042NESTED_NOPROFILE(MIPSX(KernIntr), KERNFRAME_SIZ, ra)
1043	.set	noat
1044	.mask	0x80000000, -4
1045	subu	sp, sp, KERNFRAME_SIZ
1046/*
1047 * Save the relevant kernel registers onto the stack.
1048 * We don't need to save s0 - s8, sp and gp because
1049 * the compiler does it for us.
1050 */
1051	REG_S	AT, TF_BASE+TF_REG_AST(sp)
1052	REG_S	v0, TF_BASE+TF_REG_V0(sp)
1053	REG_S	v1, TF_BASE+TF_REG_V1(sp)
1054	mflo	v0
1055	mfhi	v1
1056	REG_S	a0, TF_BASE+TF_REG_A0(sp)
1057	REG_S	a1, TF_BASE+TF_REG_A1(sp)
1058	REG_S	a2, TF_BASE+TF_REG_A2(sp)
1059	REG_S	a3, TF_BASE+TF_REG_A3(sp)
1060	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
1061	REG_S	t0, TF_BASE+TF_REG_T0(sp)
1062	REG_S	t1, TF_BASE+TF_REG_T1(sp)
1063	REG_S	t2, TF_BASE+TF_REG_T2(sp)
1064	REG_S	t3, TF_BASE+TF_REG_T3(sp)
1065	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
1066	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
1067	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
1068	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
1069	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
1070	mfc0	a2, MIPS_COP_0_EXC_PC		# 3rd arg is exception PC
1071	REG_S	t8, TF_BASE+TF_REG_T8(sp)
1072	REG_S	t9, TF_BASE+TF_REG_T9(sp)
1073	REG_S	ra, TF_BASE+TF_REG_RA(sp)
1074	REG_S	a0, TF_BASE+TF_REG_SR(sp)
1075	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
1076	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
1077	REG_S	a2, TF_BASE+TF_REG_EPC(sp)
1078/*
1079 * Call the interrupt handler.
1080 */
1081#if defined(DDB) || defined(DEBUG) || defined(KGDB)
1082	move	ra, a2
1083	sw	ra, KERNFRAME_RA(sp)		# for debugging
1084#endif
1085#ifdef IPL_ICU_MASK
1086	.set at
1087	lw	t0, _C_LABEL(md_imask)
1088	sw	t0, TF_BASE+TF_PPL(sp)
1089	.set noat
1090#endif
1091	mtc0	zero, MIPS_COP_0_STATUS		# Reset exl, trap possible.
1092	COP0_SYNC
1093	jal	_C_LABEL(cpu_intr)
1094	and	a3, a0, a1			# 4th is STATUS & CAUSE
1095/*
1096 * Restore registers and return from the interrupt.
1097 */
1098	mtc0	zero, MIPS_COP_0_STATUS		# Disable interrupt
1099	COP0_SYNC
1100	nop
1101	nop
1102	nop
1103#ifdef IPL_ICU_MASK
1104	.set at
1105	lw	a0, TF_BASE+TF_PPL(sp)
1106	sw	a0, _C_LABEL(md_imask)
1107	jal	_C_LABEL(md_imask_update)
1108	nop
1109	.set noat
1110#endif
1111	REG_L	a0, TF_BASE+TF_REG_SR(sp)	# ??? why differs ???
1112	DYNAMIC_STATUS_MASK(a0, t0)		# machine dependent masking
1113	REG_L	t0, TF_BASE+TF_REG_MULLO(sp)
1114	REG_L	t1, TF_BASE+TF_REG_MULHI(sp)
1115	REG_L	v0, TF_BASE+TF_REG_EPC(sp)
1116	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
1117	COP0_SYNC
1118	mtlo	t0
1119	mthi	t1
1120	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
1121	COP0_SYNC
1122
1123	REG_L	AT, TF_BASE+TF_REG_AST(sp)
1124	REG_L	v0, TF_BASE+TF_REG_V0(sp)
1125	REG_L	v1, TF_BASE+TF_REG_V1(sp)
1126	REG_L	a0, TF_BASE+TF_REG_A0(sp)
1127	REG_L	a1, TF_BASE+TF_REG_A1(sp)
1128	REG_L	a2, TF_BASE+TF_REG_A2(sp)
1129	REG_L	a3, TF_BASE+TF_REG_A3(sp)
1130	REG_L	t0, TF_BASE+TF_REG_T0(sp)
1131	REG_L	t1, TF_BASE+TF_REG_T1(sp)
1132	REG_L	t2, TF_BASE+TF_REG_T2(sp)
1133	REG_L	t3, TF_BASE+TF_REG_T3(sp)
1134	REG_L	ta0, TF_BASE+TF_REG_TA0(sp)
1135	REG_L	ta1, TF_BASE+TF_REG_TA1(sp)
1136	REG_L	ta2, TF_BASE+TF_REG_TA2(sp)
1137	REG_L	ta3, TF_BASE+TF_REG_TA3(sp)
1138	REG_L	t8, TF_BASE+TF_REG_T8(sp)
1139	REG_L	t9, TF_BASE+TF_REG_T9(sp)
1140	REG_L	ra, TF_BASE+TF_REG_RA(sp)
1141	addu	sp, sp, KERNFRAME_SIZ		# restore kernel SP
1142	eret					# return to interrupted point
1143	.set	at
1144END(MIPSX(KernIntr))
1145
1146/*----------------------------------------------------------------------------
1147 * XXX this comment block should be updated XXX
1148 * mipsN_UserIntr --
1149 *
1150 *	Handle an interrupt from user mode.
1151 *	Note: we save minimal state in the u.u_pcb struct and use the standard
1152 *	kernel stack since there has to be a u page if we came from user mode.
1153 *	If there is a pending software interrupt, then save the remaining state
1154 *	and call softintr(). This is all because if we call switch() inside
1155 *	cpu_intr(), not all the user registers have been saved in u.u_pcb.
1156 *
1157 * Results:
1158 * 	None.
1159 *
1160 * Side effects:
1161 *	None.
1162 *
1163 *----------------------------------------------------------------------------
1164 */
1165NESTED_NOPROFILE(MIPSX(UserIntr), CALLFRAME_SIZ, ra)
1166	.set	noat
1167	.mask	0x80000000, -4
1168/*
1169 * Save the relevant user registers into the u_pcb.
1170 * We don't need to save s0 - s8 because the compiler does it for us.
1171 */
1172	lw	k1, _C_LABEL(curpcb)
1173	#nop					# -slip-
1174	addu	k1, k1, USPACE - FRAME_SIZ
1175	REG_S	AT, FRAME_AST(k1)
1176	REG_S	v0, FRAME_V0(k1)
1177	REG_S	v1, FRAME_V1(k1)
1178	mflo	v0
1179	REG_S	a0, FRAME_A0(k1)
1180	REG_S	a1, FRAME_A1(k1)
1181	REG_S	a2, FRAME_A2(k1)
1182	REG_S	a3, FRAME_A3(k1)
1183	mfhi	v1
1184	REG_S	t0, FRAME_T0(k1)
1185	REG_S	t1, FRAME_T1(k1)
1186	REG_S	t2, FRAME_T2(k1)
1187	REG_S	t3, FRAME_T3(k1)
1188	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
1189	REG_S	ta0, FRAME_TA0(k1)
1190	REG_S	ta1, FRAME_TA1(k1)
1191	REG_S	ta2, FRAME_TA2(k1)
1192	REG_S	ta3, FRAME_TA3(k1)
1193	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
1194	REG_S	t8, FRAME_T8(k1)
1195	REG_S	t9, FRAME_T9(k1)
1196	REG_S	gp, FRAME_GP(k1)
1197	REG_S	sp, FRAME_SP(k1)
1198	mfc0	a2, MIPS_COP_0_EXC_PC		# 3rd arg is PC
1199	REG_S	ra, FRAME_RA(k1)
1200	REG_S	a0, FRAME_SR(k1)
1201	REG_S	v0, FRAME_MULLO(k1)
1202	REG_S	v1, FRAME_MULHI(k1)
1203	REG_S	a2, FRAME_EPC(k1)
1204#ifdef IPL_ICU_MASK
1205	.set at
1206	lw	t0, _C_LABEL(md_imask)
1207	sw	t0, FRAME_PPL(k1)
1208	.set noat
1209#endif
1210	addu	sp, k1, -CALLFRAME_SIZ		# switch to kernel SP
1211#ifdef __GP_SUPPORT__
1212	la	gp, _C_LABEL(_gp)		# switch to kernel GP
1213#endif
1214/*
1215 * Turn off fpu and enter kernel mode
1216 */
1217	.set	at
1218	and	t0, a0, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_INT_IE | MIPS_SR_KSU_MASK)
1219	.set	noat
1220#if defined(DDB) || defined(DEBUG) || defined(KGDB)
1221	move	ra, a2
1222	sw	ra, CALLFRAME_RA(sp)
1223#endif
1224/*
1225 * Call the interrupt handler.
1226 */
1227	mtc0	t0, MIPS_COP_0_STATUS
1228	COP0_SYNC
1229	jal	_C_LABEL(cpu_intr)
1230	and	a3, a0, a1			# 4th is STATUS & CAUSE
1231/*
1232 * Restore registers and return from the interrupt.
1233 */
1234	nop
1235	mtc0	zero, MIPS_COP_0_STATUS
1236	COP0_SYNC
1237	nop					# 3 nop hazard
1238	nop
1239	nop
1240	li	v0, MIPS_SR_EXL
1241	mtc0	v0, MIPS_COP_0_STATUS		# set exception level bit.
1242	COP0_SYNC
1243	nop					# 3 nop hazard
1244	nop
1245	nop
1246	lw	v0, _C_LABEL(curproc)
1247	addu	a1, sp, CALLFRAME_SIZ
1248 #	REG_L	a0, FRAME_SR(a1)
1249	lw	v0, P_MD_ASTPENDING(v0)		# any pending ast?
1250 #	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
1251/*
1252 * Check pending asynchronous traps.
1253 */
1254	beq	v0, zero, 1f			# if no, skip ast processing
1255	nop					# -delay slot-
1256/*
1257 * We have pending asynchronous traps; save remaining user state in u_pcb.
1258 */
1259	REG_S	s0, FRAME_S0(a1)
1260	REG_S	s1, FRAME_S1(a1)
1261	REG_S	s2, FRAME_S2(a1)
1262	REG_S	s3, FRAME_S3(a1)
1263	REG_S	s4, FRAME_S4(a1)
1264	REG_S	s5, FRAME_S5(a1)
1265	REG_S	s6, FRAME_S6(a1)
1266	REG_S	s7, FRAME_S7(a1)
1267	REG_S	s8, FRAME_S8(a1)
1268	REG_L	a0, FRAME_EPC(a1)	# argument is interrupted PC
1269#ifdef IPL_ICU_MASK
1270	jal	_C_LABEL(spllowersofthigh);
1271	nop
1272#else
1273	li	t0, MIPS_HARD_INT_MASK | MIPS_SR_INT_IE
1274	DYNAMIC_STATUS_MASK(t0, t1)		# machine dependent masking
1275	mtc0	t0, MIPS_COP_0_STATUS		# enable interrupts (spl0)
1276	COP0_SYNC
1277#endif
1278	jal	_C_LABEL(ast)
1279	nop
1280/*
1281 * Restore user registers and return. NOTE: interrupts are enabled.
1282 */
1283	mtc0	zero, MIPS_COP_0_STATUS
1284	COP0_SYNC
1285	nop					# 3 nop delay
1286	nop
1287	nop
1288	li	v0, MIPS_SR_EXL
1289	mtc0	v0, MIPS_COP_0_STATUS		# set exception level bit.
1290	COP0_SYNC
1291	nop					# 3 nop delay
1292	nop
1293	nop
1294
1295	addu	a1, sp, CALLFRAME_SIZ
1296 #	REG_L	a0, FRAME_SR(a1)
1297	REG_L	s0, FRAME_S0(a1)
1298	REG_L	s1, FRAME_S1(a1)
1299	REG_L	s2, FRAME_S2(a1)
1300	REG_L	s3, FRAME_S3(a1)
1301	REG_L	s4, FRAME_S4(a1)
1302	REG_L	s5, FRAME_S5(a1)
1303	REG_L	s6, FRAME_S6(a1)
1304	REG_L	s7, FRAME_S7(a1)
1305	REG_L	s8, FRAME_S8(a1)
1306 #	mtc0	a0, MIPS_COP_0_STATUS		# this should disable interrupts
1307
13081:
1309	REG_L	t0, FRAME_MULLO(a1)
1310	REG_L	t1, FRAME_MULHI(a1)
1311	REG_L	v0, FRAME_EPC(a1)
1312	mtlo	t0
1313	mthi	t1
1314	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
1315	COP0_SYNC
1316	nop					# ??? how much delay ???
1317	nop
1318
1319	move	k1, a1
1320#ifdef IPL_ICU_MASK
1321	.set at
1322	lw	t0, FRAME_PPL(k1)
1323	sw	t0, _C_LABEL(md_imask)
1324	jal	_C_LABEL(md_imask_update)
1325	nop
1326	.set noat
1327#endif
1328	REG_L	AT, FRAME_AST(k1)
1329	REG_L	v0, FRAME_V0(k1)
1330	REG_L	v1, FRAME_V1(k1)
1331	REG_L	a0, FRAME_A0(k1)
1332	REG_L	a1, FRAME_A1(k1)
1333	REG_L	a2, FRAME_A2(k1)
1334	REG_L	a3, FRAME_A3(k1)
1335	REG_L	t0, FRAME_T0(k1)
1336	REG_L	t1, FRAME_T1(k1)
1337	REG_L	t2, FRAME_T2(k1)
1338	REG_L	t3, FRAME_T3(k1)
1339	REG_L	ta0, FRAME_TA0(k1)
1340	REG_L	ta1, FRAME_TA1(k1)
1341	REG_L	ta2, FRAME_TA2(k1)
1342	REG_L	ta3, FRAME_TA3(k1)
1343	REG_L	t8, FRAME_T8(k1)
1344	REG_L	t9, FRAME_T9(k1)
1345	REG_L	k0, FRAME_SR(k1)
1346	DYNAMIC_STATUS_MASK_TOUSER(k0, ra)	# machine dependent masking
1347	REG_L	gp, FRAME_GP(k1)
1348	REG_L	sp, FRAME_SP(k1)
1349	REG_L	ra, FRAME_RA(k1)
1350	mtc0	k0, MIPS_COP_0_STATUS		# restore the SR
1351	COP0_SYNC
1352	nop					# required for QED 5230
1353	nop
1354	eret					# return to interrupted point
1355	.set	at
1356END(MIPSX(UserIntr))
1357
1358
1359/*----------------------------------------------------------------------------
1360 *
1361 *	R4000 TLB exception handlers
1362 *
1363 *----------------------------------------------------------------------------
1364 */
1365
1366
1367/*----------------------------------------------------------------------------
1368 *
1369 * mips3_TLBInvalidException --
1370 *
1371 *	Handle a TLB invalid exception from kernel mode in kernel space.
1372 *	The BaddVAddr, Context, and EntryHi registers contain the failed
1373 *	virtual address.
1374 *
1375 *	The case of wired TLB entries is special.  The wired TLB entries
1376 *	are used to keep the u area TLB's valid.  The PTE entries for these
1377 *	do not have MIPS3_PG_G set; the kernel instead relies
1378 *	on the switch_resume function to set these bits.
1379 *
1380 *	To preserve this situation, we set PG_G bits on the "other" TLB entries
1381 *	when they are wired.
1382 *
1383 * Results:
1384 *	None.
1385 *
1386 * Side effects:
1387 *	None.
1388 *
1389 *----------------------------------------------------------------------------
1390 */
1391LEAF_NOPROFILE(MIPSX(TLBInvalidException))
1392	.set	noat
1393	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# get the fault address
1394	li	k1, VM_MIN_KERNEL_ADDRESS	# compute index
1395	bgez	k0, _C_LABEL(MIPSX(KernGenException))	# full trap processing
1396	subu	k0, k0, k1
1397	lw	k1, _C_LABEL(Sysmapsize)	# index within range?
1398	srl	k0, k0, PGSHIFT
1399	sltu	k1, k0, k1
1400	beq	k1, zero, outofworld		# No. Failing beyond. . .
1401	lw	k1, _C_LABEL(Sysmap)
1402
1403	sll	k0, k0, 2			# compute offset from index
1404	addu	k1, k1, k0
1405	tlbp					# Probe the invalid entry
1406	COP0_SYNC
1407	and	k0, k0, 4			# check even/odd page
1408	nop					# required for QED 5230
1409	bne	k0, zero, KernTLBIOdd
1410	nop
1411
1412	mfc0	k0, MIPS_COP_0_TLB_INDEX
1413	nop
1414	bltz	k0, outofworld			# ASSERT(TLB entry exists)
1415	lw	k0, 0(k1)			# get PTE entry
1416
1417	_SLL	k0, k0, WIRED_SHIFT		# get rid of "wired" bit
1418	_SRL	k0, k0, WIRED_SHIFT
1419	mtc0	k0, MIPS_COP_0_TLB_LO0		# load PTE entry
1420	COP0_SYNC
1421	and	k0, k0, MIPS3_PG_V		# check for valid entry
1422	nop					# required for QED5230
1423	beq	k0, zero, _C_LABEL(MIPSX(KernGenException))	# PTE invalid
1424	lw	k0, 4(k1)			# get odd PTE entry
1425	_SLL	k0, k0, WIRED_SHIFT
1426	mfc0	k1, MIPS_COP_0_TLB_INDEX
1427	_SRL	k0, k0, WIRED_SHIFT
1428	sltiu	k1, k1, MIPS3_TLB_WIRED_UPAGES	# Luckily this is MIPS3_PG_G
1429	or	k1, k1, k0
1430	_MTC0	k0, MIPS_COP_0_TLB_LO1		# load PTE entry
1431	COP0_SYNC
1432	nop
1433	nop					# required for QED5230
1434	tlbwi					# write TLB
1435	COP0_SYNC
1436	nop
1437	nop
1438	nop
1439	nop
1440	nop
1441	eret
1442
1443KernTLBIOdd:
1444	mfc0	k0, MIPS_COP_0_TLB_INDEX
1445	nop
1446	bltz	k0, outofworld			# assert(TLB Entry exists)
1447	lw	k0, 0(k1)			# get PTE entry
1448
1449	_SLL	k0, k0, WIRED_SHIFT		# get rid of wired bit
1450	_SRL	k0, k0, WIRED_SHIFT
1451	_MTC0	k0, MIPS_COP_0_TLB_LO1		# save PTE entry
1452	COP0_SYNC
1453	and	k0, k0, MIPS3_PG_V		# check for valid entry
1454	nop					# required for QED5230
1455	beq	k0, zero, _C_LABEL(MIPSX(KernGenException))	# PTE invalid
1456	lw	k0, -4(k1)			# get even PTE entry
1457	_SLL	k0, k0, WIRED_SHIFT
1458	mfc0	k1, MIPS_COP_0_TLB_INDEX
1459	_SRL	k0, k0, WIRED_SHIFT
1460	sltiu	k1, k1, MIPS3_TLB_WIRED_UPAGES	# Luckily this is MIPS3_PG_G
1461	or	k1, k1, k0
1462	_MTC0	k0, MIPS_COP_0_TLB_LO0		# save PTE entry
1463	COP0_SYNC
1464	nop
1465	nop					# required for QED5230
1466	tlbwi					# update TLB
1467	COP0_SYNC
1468	nop
1469	nop
1470	nop
1471	nop
1472	nop
1473	eret
1474END(MIPSX(TLBInvalidException))
1475
1476/*----------------------------------------------------------------------------
1477 *
1478 * mipsN_TLBMissException --
1479 *
1480 *	Handle a TLB miss exception from kernel mode in kernel space.
1481 *	The BaddVAddr, Context, and EntryHi registers contain the failed
1482 *	virtual address.
1483 *
1484 * Results:
1485 *	None.
1486 *
1487 * Side effects:
1488 *	None.
1489 *
1490 *----------------------------------------------------------------------------
1491 */
1492LEAF_NOPROFILE(MIPSX(TLBMissException))
1493	.set	noat
1494	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# get the fault address
1495	li	k1, VM_MIN_KERNEL_ADDRESS	# compute index
1496	subu	k0, k0, k1
1497	lw	k1, _C_LABEL(Sysmapsize)	# index within range?
1498	srl	k0, k0, PGSHIFT
1499	sltu	k1, k0, k1
1500#ifdef newsmips
1501	/* news5000 has ROM work area at 0xfff00000. */
1502	bne	k1, zero, 1f
1503	nop
1504	j	checkromwork
15051:
1506#else
1507	beq	k1, zero, outofworld		# No. Failing beyond. . .
1508#endif
1509	lw	k1, _C_LABEL(Sysmap)
1510	srl	k0, k0, 1
1511	sll	k0, k0, 3			# compute offset from index
1512	addu	k1, k1, k0
1513	lw	k0, 0(k1)			# get PTE entry
1514	lw	k1, 4(k1)			# get odd PTE entry
1515	_SLL	k0, k0, WIRED_SHIFT		# get rid of "wired" bit
1516	_SRL	k0, k0, WIRED_SHIFT
1517	_MTC0	k0, MIPS_COP_0_TLB_LO0		# load PTE entry
1518	COP0_SYNC
1519	_SLL	k1, k1, WIRED_SHIFT
1520	_SRL	k1, k1, WIRED_SHIFT
1521	_MTC0	k1, MIPS_COP_0_TLB_LO1		# load PTE entry
1522	COP0_SYNC
1523	nop
1524	nop					# required for QED5230
1525	tlbwr					# write TLB
1526	COP0_SYNC
1527	nop
1528	nop
1529	nop
1530	nop
1531	nop
1532	eret
1533
1534outofworld:
1535	/* eret to panic so shutdown can use K2.  Try to ensure valid $sp. */
1536	la	a0, _C_LABEL(panic)
1537	_MFC0	a2, MIPS_COP_0_EXC_PC
1538	move	a1, sp
1539	sll	k0, k0, PGSHIFT
1540	_MTC0	a0, MIPS_COP_0_EXC_PC		# return to panic
1541	COP0_SYNC
1542	li	k1, VM_MIN_KERNEL_ADDRESS
1543	addu	a3, k0, k1
1544#if defined(DDB)
1545	bltz	sp, 1f				# for ddb try to keep frame
1546	nop
1547#endif
1548	la	sp, start			# set sp to a valid place
15491:	la	a0, 9f				# string
1550	eret
1551
1552	.set	at
1553END(MIPSX(TLBMissException))
1554
1555	MSG("TLB out of universe: ksp %p epc %p vaddr %p")
1556
1557/*
1558 * Mark where code entered from exception hander jumptable
1559 * ends, for stack traceback code.
1560 */
1561
1562	.globl	_C_LABEL(MIPSX(exceptionentry_end))
1563_C_LABEL(MIPSX(exceptionentry_end)):
1564
1565/*--------------------------------------------------------------------------
1566 *
1567 * mipsN_SetPID --
1568 *
1569 *	Write the given pid into the TLB pid reg.
1570 *
1571 *	mips3_SetPID(pid)
1572 *		int pid;
1573 *
1574 * Results:
1575 *	None.
1576 *
1577 * Side effects:
1578 *	PID set in the entry hi register.
1579 *
1580 *--------------------------------------------------------------------------
1581 */
1582LEAF(MIPSX(SetPID))
1583	_MTC0	a0, MIPS_COP_0_TLB_HI		# Write the hi reg value
1584	COP0_SYNC
1585	/* XXX simonb: lose these nops for mips32/64? */
1586	nop					# required for QED5230
1587	nop					# required for QED5230
1588	j	ra
1589	nop
1590END(MIPSX(SetPID))
1591
1592/*--------------------------------------------------------------------------
1593 *
1594 * mipsN_TLBUpdate --
1595 *
1596 *	Update the TLB if highreg is found; otherwise, enter the data.
1597 *
1598 *	mips3_TLBUpdate(virpageadr, lowregx)
1599 *		unsigned virpageadr, lowregx;
1600 *
1601 * Results:
1602 *	< 0 if loaded >= 0 if updated.
1603 *
1604 * Side effects:
1605 *	None.
1606 *
1607 *--------------------------------------------------------------------------
1608 */
1609LEAF(MIPSX(TLBUpdate))
1610	mfc0	v1, MIPS_COP_0_STATUS	# Save the status register.
1611	mtc0	zero, MIPS_COP_0_STATUS	# Disable interrupts
1612	COP0_SYNC
1613	and	t1, a0, MIPS3_PG_ODDPG	# t1 = Even/Odd flag
1614	li	v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID)
1615	and	a0, a0, v0
1616	_MFC0	t0, MIPS_COP_0_TLB_HI		# Save current PID
1617	_MTC0	a0, MIPS_COP_0_TLB_HI		# Init high reg
1618	COP0_SYNC
1619	and	a2, a1, MIPS3_PG_G		# Copy global bit
1620	nop
1621	nop
1622	tlbp					# Probe for the entry.
1623	COP0_SYNC
1624	_SLL	a1, a1, WIRED_SHIFT		# Clear top 34 bits of EntryLo
1625	_SRL	a1, a1, WIRED_SHIFT
1626	bne	t1, zero, 2f			# Decide even odd
1627	mfc0	v0, MIPS_COP_0_TLB_INDEX	# See what we got
1628# EVEN
1629	nop
1630	bltz	v0, 1f				# index < 0 => !found
1631	nop
1632	nop					# required for QED5230
1633
1634	tlbr					# update, read entry first
1635	COP0_SYNC
1636	nop
1637	nop
1638	nop
1639	_MTC0	a1, MIPS_COP_0_TLB_LO0		# init low reg0.
1640	COP0_SYNC
1641	nop
1642	nop					# required for QED5230
1643	tlbwi					# update slot found
1644	COP0_SYNC
1645	nop					# required for QED5230
1646	nop					# required for QED5230
1647	b	4f
1648	nop
16491:
1650#if defined(MIPS3) && defined(MIPS3_4100)		/* VR4100 core */
1651	lw	v0, _C_LABEL(default_pg_mask)	# default_pg_mask declared
1652	mtc0	v0, MIPS_COP_0_TLB_PG_MASK	#	in mips_machdep.c
1653#else
1654	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# init mask.
1655#endif
1656	COP0_SYNC
1657	_MTC0	a0, MIPS_COP_0_TLB_HI		# init high reg.
1658	COP0_SYNC
1659	_MTC0	a1, MIPS_COP_0_TLB_LO0		# init low reg0.
1660	COP0_SYNC
1661	_MTC0	a2, MIPS_COP_0_TLB_LO1		# init low reg1.
1662	COP0_SYNC
1663	nop
1664	nop					# required for QED5230
1665	tlbwr					# enter into a random slot
1666	COP0_SYNC
1667	nop					# required for QED5230
1668	nop					# required for QED5230
1669	b	4f
1670	nop
1671# ODD
16722:
1673	nop
1674	bltz	v0, 3f				# index < 0 => !found
1675	nop
1676	nop					# required for QED5230
1677
1678	tlbr					# read the entry first
1679	COP0_SYNC
1680	nop
1681	nop
1682	nop
1683	_MTC0	a1, MIPS_COP_0_TLB_LO1		# init low reg1.
1684	COP0_SYNC
1685	nop
1686	nop					# required for QED5230
1687	tlbwi					# update slot found
1688	COP0_SYNC
1689	nop					# required for QED5230
1690	nop					# required for QED5230
1691	b	4f
1692	nop
16933:
1694#if defined(MIPS3) && defined(MIPS3_4100)		/* VR4100 core */
1695	lw	v0, _C_LABEL(default_pg_mask)	# default_pg_mask declared
1696	mtc0	v0, MIPS_COP_0_TLB_PG_MASK	#	in mips_machdep.c
1697#else
1698	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# init mask.
1699#endif
1700	COP0_SYNC
1701	_MTC0	a0, MIPS_COP_0_TLB_HI		# init high reg.
1702	COP0_SYNC
1703	_MTC0	a2, MIPS_COP_0_TLB_LO0		# init low reg0.
1704	COP0_SYNC
1705	_MTC0	a1, MIPS_COP_0_TLB_LO1		# init low reg1.
1706	COP0_SYNC
1707	nop
1708	nop					# required for QED5230
1709	tlbwr					# enter into a random slot
1710	COP0_SYNC
17114:						# Make shure pipeline
1712	nop					# advances before we
1713	nop					# uses the tlb.
1714	nop
1715	nop
1716	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore PID
1717	COP0_SYNC
1718	nop					# required for QED5230
1719	nop					# required for QED5230
1720	j	ra
1721	mtc0	v1, MIPS_COP_0_STATUS		# Restore the status register
1722	COP0_SYNC				# XXXX - not executed!!
1723END(MIPSX(TLBUpdate))
1724
1725/*--------------------------------------------------------------------------
1726 *
1727 * mipsN_TLBRead --
1728 *
1729 *	Read the TLB entry.
1730 *
1731 *	mips3_TLBRead(entry, tlb)
1732 *		unsigned entry;
1733 *		struct tlb *tlb;
1734 *
1735 * Results:
1736 *	None.
1737 *
1738 * Side effects:
1739 *	tlb will contain the TLB entry found.
1740 *
1741 *--------------------------------------------------------------------------
1742 */
1743LEAF(MIPSX(TLBRead))
1744	mfc0	v1, MIPS_COP_0_STATUS		# Save the status register.
1745	mtc0	zero, MIPS_COP_0_STATUS		# Disable interrupts
1746	COP0_SYNC
1747	nop
1748	mfc0	ta2, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
1749	nop
1750	_MFC0	t0, MIPS_COP_0_TLB_HI		# Get current PID
1751
1752	mtc0	a0, MIPS_COP_0_TLB_INDEX	# Set the index register
1753	COP0_SYNC
1754	nop
1755	nop					# required for QED5230
1756	tlbr					# Read from the TLB
1757	COP0_SYNC
1758	nop
1759	nop
1760	nop
1761	mfc0	t2, MIPS_COP_0_TLB_PG_MASK	# fetch the pgMask
1762	_MFC0	t3, MIPS_COP_0_TLB_HI		# fetch the hi entry
1763	_MFC0	ta0, MIPS_COP_0_TLB_LO0		# See what we got
1764	_MFC0	ta1, MIPS_COP_0_TLB_LO1		# See what we got
1765	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore PID
1766	COP0_SYNC
1767	mtc0	ta2, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
1768	COP0_SYNC
1769	nop
1770	nop
1771	nop					# wait for PID active
1772	mtc0	v1, MIPS_COP_0_STATUS		# Restore the status register
1773	COP0_SYNC
1774	nop
1775	sw	t2, 0(a1)
1776	sw	t3, 4(a1)
1777	sw	ta0, 8(a1)
1778	j	ra
1779	sw	ta1, 12(a1)
1780END(MIPSX(TLBRead))
1781
1782#if defined(MIPS3) && !defined(MIPS3_5900)
1783/*----------------------------------------------------------------------------
1784 *
1785 * mips3_VCED --
1786 *
1787 *	Handle virtual coherency exceptions.
1788 *	Called directly from the mips3 execption-table code.
1789 *	only k0, k1 are avaiable on entry
1790 *
1791 * Results:
1792 *	None.
1793 *
1794 * Side effects:
1795 *	Remaps the conflicting address as uncached and returns
1796 *	from the execption.
1797 *
1798 *	NB: cannot be profiled, all registers are user registers on entry.
1799 *
1800 *----------------------------------------------------------------------------
1801 */
1802LEAF_NOPROFILE(MIPSX(VCED))
1803	.set	noat
1804	mfc0	k0, MIPS_COP_0_BAD_VADDR	# fault addr.
1805	li	k1, -16
1806	and	k0, k1
1807	cache	(CACHE_R4K_SD | CACHEOP_R4K_HIT_WB_INV), 0(k0)
1808	cache	(CACHE_R4K_D | CACHEOP_R4K_HIT_INV), 0(k0)
1809#ifdef DEBUG
1810	mfc0	k0, MIPS_COP_0_BAD_VADDR
1811	la	k1, VCED_vaddr
1812	sw	k0, 0(k1)
1813	mfc0	k0, MIPS_COP_0_EXC_PC
1814	la	k1, VCED_epc
1815	sw	k0, 0(k1)
1816	la	k1, VCED_count		# count number of exceptions
1817	srl	k0, k0, 26		# position upper 4 bits of VA
1818	and	k0, k0, 0x3c		# mask it off
1819	add	k1, k0			# get address of count table
1820	lw	k0, 0(k1)
1821	addu	k0, 1
1822	sw	k0, 0(k1)
1823#endif
1824	eret
1825	.set	at
1826
1827#ifdef DEBUG
1828	.data
1829	.globl	_C_LABEL(VCED_count)
1830_C_LABEL(VCED_count):
1831	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1832	.globl	_C_LABEL(VCED_epc)
1833_C_LABEL(VCED_epc):
1834	.word	0
1835	.globl	_C_LABEL(VCED_vaddr)
1836_C_LABEL(VCED_vaddr):
1837	.word	0
1838	.text
1839#endif
1840END(MIPSX(VCED))
1841
1842LEAF_NOPROFILE(MIPSX(VCEI))
1843	.set	noat
1844	mfc0	k0, MIPS_COP_0_BAD_VADDR	# fault addr.
1845	cache	(CACHE_R4K_SD | CACHEOP_R4K_HIT_WB_INV), 0(k0)
1846	cache	(CACHE_R4K_I | CACHEOP_R4K_HIT_INV), 0(k0)
1847#ifdef DEBUG
1848	mfc0	k0, MIPS_COP_0_BAD_VADDR
1849	la	k1, VCEI_vaddr
1850	sw	k0, 0(k1)
1851	la	k1, VCEI_count		# count number of exceptions
1852	srl	k0, k0, 26		# position upper 4 bits of VA
1853	and	k0, k0, 0x3c		# mask it off
1854	add	k1, k0			# get address of count table
1855	lw	k0, 0(k1)
1856	addu	k0, 1
1857	sw	k0, 0(k1)
1858#endif
1859	eret
1860	.set	at
1861
1862#ifdef DEBUG
1863	.data
1864	.globl	_C_LABEL(VCEI_count)
1865_C_LABEL(VCEI_count):
1866	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1867	.globl	_C_LABEL(VCEI_vaddr)
1868_C_LABEL(VCEI_vaddr):
1869	.word	0
1870	.text
1871#endif
1872END(MIPSX(VCEI))
1873#endif /* MIPS3 && !MIPS3_5900 */
1874
1875/*
1876 * mipsN_proc_trampoline()
1877 *
1878 * Arrange for a function to be invoked neatly, after a cpu_switch().
1879 * Call the service function with one argument, specified by the s0
1880 * and s1 respectively.  There is no need register save operation.
1881 */
1882LEAF(MIPSX(proc_trampoline))
1883	addu	sp, sp, -CALLFRAME_SIZ
1884	jal	ra, s0
1885	move	a0, s1
1886	.set	noat
1887	#
1888	# Make sure to disable interrupts here, as otherwise
1889	# we can take an interrupt *after* EXL is set, and
1890	# end up returning to a bogus PC since the PC is not
1891	# saved if EXL=1.
1892	#
1893	mtc0	zero, MIPS_COP_0_STATUS		# disable int
1894	COP0_SYNC
1895	nop					# 3 op delay
1896	nop
1897	nop
1898	li	a0, MIPS_SR_EXL			# set exception level
1899	mtc0	a0, MIPS_COP_0_STATUS
1900	COP0_SYNC
1901	nop
1902	nop
1903	addu	a1, sp, CALLFRAME_SIZ
1904 #	REG_L	a0, FRAME_SR(a1)
1905	REG_L	t0, FRAME_MULLO(a1)
1906	REG_L	t1, FRAME_MULHI(a1)
1907	REG_L	v0, FRAME_EPC(a1)
1908	mtlo	t0
1909	mthi	t1
1910	_MTC0	v0, MIPS_COP_0_EXC_PC
1911	COP0_SYNC
1912	nop
1913	move	k1, a1
1914#ifdef IPL_ICU_MASK
1915	.set at
1916	lw	t0, FRAME_PPL(k1)
1917	sw	t0, _C_LABEL(md_imask)
1918	jal	_C_LABEL(md_imask_update)
1919	nop
1920	.set noat
1921#endif
1922	REG_L	AT, FRAME_AST(k1)
1923	REG_L	v0, FRAME_V0(k1)
1924	REG_L	v1, FRAME_V1(k1)
1925	REG_L	a0, FRAME_A0(k1)
1926	REG_L	a1, FRAME_A1(k1)
1927	REG_L	a2, FRAME_A2(k1)
1928	REG_L	a3, FRAME_A3(k1)
1929	REG_L	t0, FRAME_T0(k1)
1930	REG_L	t1, FRAME_T1(k1)
1931	REG_L	t2, FRAME_T2(k1)
1932	REG_L	t3, FRAME_T3(k1)
1933	REG_L	ta0, FRAME_TA0(k1)
1934	REG_L	ta1, FRAME_TA1(k1)
1935	REG_L	ta2, FRAME_TA2(k1)
1936	REG_L	ta3, FRAME_TA3(k1)
1937	REG_L	s0, FRAME_S0(k1)
1938	REG_L	s1, FRAME_S1(k1)
1939	REG_L	s2, FRAME_S2(k1)
1940	REG_L	s3, FRAME_S3(k1)
1941	REG_L	s4, FRAME_S4(k1)
1942	REG_L	s5, FRAME_S5(k1)
1943	REG_L	s6, FRAME_S6(k1)
1944	REG_L	s7, FRAME_S7(k1)
1945	REG_L	t8, FRAME_T8(k1)
1946	REG_L	t9, FRAME_T9(k1)
1947	REG_L	k0, FRAME_SR(k1)
1948	DYNAMIC_STATUS_MASK(k0, sp)		# machine dependent masking
1949	REG_L	gp, FRAME_GP(k1)
1950	REG_L	s8, FRAME_S8(k1)
1951	REG_L	ra, FRAME_RA(k1)
1952	REG_L	sp, FRAME_SP(k1)
1953	mtc0	k0, MIPS_COP_0_STATUS
1954	COP0_SYNC
1955	nop
1956	nop
1957	eret
1958	.set	at
1959END(MIPSX(proc_trampoline))
1960
1961/*
1962 * void mipsN_cpu_switch_resume(struct proc *newproc)
1963 *
1964 * Wiredown the USPACE of newproc in TLB entry#0.  Check whether target
1965 * USPACE is already in another place of TLB before that, and make
1966 * sure TBIS(it) in the case.
1967 */
1968LEAF_NOPROFILE(MIPSX(cpu_switch_resume))
1969	lw	a1, P_MD_UPTE_0(a0)		# a1 = upte[0]
1970	lw	a2, P_MD_UPTE_1(a0)		# a2 = upte[1]
1971	lw	v0, P_ADDR(a0)			# va = p->p_addr
1972	li	s0, MIPS_KSEG2_START
1973	blt	v0, s0, resume
1974	nop
1975
1976	and	s0, v0, MIPS3_PG_ODDPG
1977	beq	s0, zero, entry0
1978	nop
1979
1980	PANIC("USPACE sat on odd page boundary")
1981
1982entry0:
1983	_MTC0	v0, MIPS_COP_0_TLB_HI		# VPN = va
1984	COP0_SYNC
1985	nop
1986	nop
1987	tlbp					# probe VPN
1988	COP0_SYNC
1989	nop
1990	nop
1991	mfc0	s0, MIPS_COP_0_TLB_INDEX
1992	nop
1993	bltz	s0, entry0set
1994	sll	s0, s0, 13			# PAGE_SHIFT + 1
1995	la	s0, MIPS_KSEG0_START(s0)
1996	_MTC0	s0, MIPS_COP_0_TLB_HI
1997	COP0_SYNC
1998	_MTC0	zero, MIPS_COP_0_TLB_LO0
1999	COP0_SYNC
2000	_MTC0	zero, MIPS_COP_0_TLB_LO1
2001	COP0_SYNC
2002	nop
2003	nop
2004	tlbwi
2005	COP0_SYNC
2006	nop
2007	nop
2008	_MTC0	v0, MIPS_COP_0_TLB_HI		# set VPN again
2009	COP0_SYNC
2010entry0set:
2011	mtc0	zero, MIPS_COP_0_TLB_INDEX	# TLB entry #0
2012	COP0_SYNC
2013	or	a1, MIPS3_PG_G
2014	_MTC0	a1, MIPS_COP_0_TLB_LO0		# upte[0] | PG_G
2015	COP0_SYNC
2016	or	a2, MIPS3_PG_G
2017	_MTC0	a2, MIPS_COP_0_TLB_LO1		# upte[1] | PG_G
2018	COP0_SYNC
2019	nop
2020	nop
2021	tlbwi					# set TLB entry #0
2022	COP0_SYNC
2023	nop
2024	nop
2025
2026resume:
2027	j	ra
2028	nop
2029END(MIPSX(cpu_switch_resume))
2030
2031/*
2032 * void mipsN_TBIS(vaddr_t va)
2033 *
2034 * Invalidate a TLB entry which has the given vaddr and ASID if found.
2035 */
2036LEAF_NOPROFILE(MIPSX(TBIS))
2037	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2038	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2039	COP0_SYNC
2040
2041	li	v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID)
2042	_MFC0	t0, MIPS_COP_0_TLB_HI		# save current ASID
2043	mfc0	t3, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2044	and	a0, a0, v0			# make sure valid entryHi
2045	_MTC0	a0, MIPS_COP_0_TLB_HI		# look for the vaddr & ASID
2046	COP0_SYNC
2047	nop
2048	nop
2049	tlbp					# probe the entry in question
2050	COP0_SYNC
2051	nop
2052	nop
2053	mfc0	v0, MIPS_COP_0_TLB_INDEX	# see what we got
2054	#nop					# -slip-
2055	#nop					# -slip-
2056	bltz	v0, 1f				# index < 0 then skip
2057	li	t1, MIPS_KSEG0_START		# invalid address
2058	sll	v0, v0, 13			# PAGE_SHIFT + 1
2059	addu	t1, t1, v0
2060	_MTC0	t1, MIPS_COP_0_TLB_HI		# make entryHi invalid
2061	COP0_SYNC
2062	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2063	COP0_SYNC
2064	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2065	COP0_SYNC
2066	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out pageMask
2067	COP0_SYNC
2068	nop
2069	nop
2070	tlbwi
2071	COP0_SYNC
2072	nop
2073	nop
20741:
2075	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore current ASID
2076	COP0_SYNC
2077	mtc0	t3, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2078	COP0_SYNC
2079	nop
2080	nop
2081	j	ra
2082	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2083	COP0_SYNC				# XXXX - not executed!!
2084END(MIPSX(TBIS))
2085
2086/*
2087 * void mips3_TBIAP(int sizeofTLB)
2088 *
2089 * Invalidate TLB entries belong to per process user spaces while
2090 * leaving entries for kernel space marked global intact.
2091 */
2092LEAF_NOPROFILE(MIPSX(TBIAP))
2093	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2094	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2095	COP0_SYNC
2096
2097	move	t2, a0
2098	mfc0	t1, MIPS_COP_0_TLB_WIRED
2099	li	v0, MIPS_KSEG0_START		# invalid address
2100	mfc0	t3, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2101
2102	# do {} while (t1 < t2)
21031:
2104	mtc0	t1, MIPS_COP_0_TLB_INDEX	# set index
2105	COP0_SYNC
2106	sll	ta0, t1, 13			# PAGE_SHIFT + 1
2107	nop
2108	/* XXX simonb: lose this nop for mips32/64? */
2109	nop
2110	tlbr					# obtain an entry
2111	COP0_SYNC
2112	/* XXX simonb: lose these nops for mips32/64? */
2113	nop
2114	nop
2115	nop
2116	_MFC0	a0, MIPS_COP_0_TLB_LO1
2117	and	a0, a0, MIPS3_PG_G		# check to see it has G bit
2118	bnez	a0, 2f
2119	addu	ta0, ta0, v0
2120
2121	_MTC0	ta0, MIPS_COP_0_TLB_HI		# make entryHi invalid
2122	COP0_SYNC
2123	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2124	COP0_SYNC
2125	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2126	COP0_SYNC
2127	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out mask entry
2128	COP0_SYNC
2129	/* XXX simonb: lose these nops for mips32/64? */
2130	nop
2131	nop
2132	tlbwi					# invalidate the TLB entry
2133	COP0_SYNC
21342:
2135	addu	t1, t1, 1
2136	bne	t1, t2, 1b
2137	nop
2138
2139	mtc0	t3, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2140	COP0_SYNC
2141	/* XXX simonb: lose these nops for mips32/64? */
2142	nop
2143	nop
2144	j	ra				# new ASID will be set soon
2145	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2146	COP0_SYNC				# XXXX - not executed!!
2147END(MIPSX(TBIAP))
2148
2149/*
2150 * void mipsN_TBIA(int sizeofTLB)
2151 *
2152 * Invalidate all of non-wired TLB entries.
2153 */
2154LEAF_NOPROFILE(MIPSX(TBIA))
2155	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2156	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2157	COP0_SYNC
2158
2159	li	v0, MIPS_KSEG0_START		# invalid address
2160	_MFC0	t0, MIPS_COP_0_TLB_HI		# save current ASID
2161	mfc0	t1, MIPS_COP_0_TLB_WIRED
2162	mfc0	t2, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2163
2164	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2165	COP0_SYNC
2166	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2167	COP0_SYNC
2168	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out pageMask
2169	COP0_SYNC
2170
2171	# do {} while (t1 < a0)
21721:
2173	mtc0	t1, MIPS_COP_0_TLB_INDEX	# set TLBindex
2174	COP0_SYNC
2175	sll	ta0, t1, 13			# PAGE_SHIFT + 1
2176	add	ta0, v0, ta0
2177	_MTC0	ta0, MIPS_COP_0_TLB_HI		# make entryHi invalid
2178	COP0_SYNC
2179	nop
2180	nop
2181	tlbwi					# clear the entry
2182	COP0_SYNC
2183	addu	t1, t1, 1			# increment index
2184	bne	t1, a0, 1b
2185	nop
2186
2187	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore ASID
2188	COP0_SYNC
2189	mtc0	t2, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2190	COP0_SYNC
2191	nop
2192	nop
2193	j	ra
2194	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2195	COP0_SYNC				# XXXX - not executed!!
2196END(MIPSX(TBIA))
2197
2198#ifdef USE_64BIT_INSTRUCTIONS
2199LEAF(MIPSX(pagezero))
2200	li	a1, NBPG >> 6
2201
22021:	sd	zero, 0(a0)			# try to miss cache first
2203	sd	zero, 32(a0)
2204	subu	a1, 1
2205	sd	zero, 16(a0)
2206	sd	zero, 48(a0)
2207	sd	zero, 8(a0)			# fill in cache lines
2208	sd	zero, 40(a0)
2209	sd	zero, 24(a0)
2210	sd	zero, 56(a0)
2211	bgtz	a1, 1b
2212	addu	a0, 64
2213
2214	j	ra
2215	nop
2216END(MIPSX(pagezero))
2217#endif /* USE_64BIT_INSTRUCTIONS */
2218
2219	.data
2220
2221	.globl _C_LABEL(MIPSX(locoresw))
2222_C_LABEL(MIPSX(locoresw)):
2223	.word _C_LABEL(MIPSX(cpu_switch_resume))
2224	.word _C_LABEL(MIPSX(proc_trampoline))
2225	.word _C_LABEL(mips_idle)
2226
2227MIPSX(excpt_sw):
2228	####
2229	#### The kernel exception handlers.
2230	####
2231	.word _C_LABEL(MIPSX(KernIntr))		#  0 external interrupt
2232	.word _C_LABEL(MIPSX(KernGenException))	#  1 TLB modification
2233	.word _C_LABEL(MIPSX(TLBInvalidException))# 2 TLB miss (LW/I-fetch)
2234	.word _C_LABEL(MIPSX(TLBInvalidException))# 3 TLB miss (SW)
2235	.word _C_LABEL(MIPSX(KernGenException))	#  4 address error (LW/I-fetch)
2236	.word _C_LABEL(MIPSX(KernGenException))	#  5 address error (SW)
2237	.word _C_LABEL(MIPSX(KernGenException))	#  6 bus error (I-fetch)
2238	.word _C_LABEL(MIPSX(KernGenException))	#  7 bus error (load or store)
2239	.word _C_LABEL(MIPSX(KernGenException))	#  8 system call
2240	.word _C_LABEL(MIPSX(KernGenException))	#  9 breakpoint
2241	.word _C_LABEL(MIPSX(KernGenException))	# 10 reserved instruction
2242	.word _C_LABEL(MIPSX(KernGenException))	# 11 coprocessor unusable
2243	.word _C_LABEL(MIPSX(KernGenException))	# 12 arithmetic overflow
2244	.word _C_LABEL(MIPSX(KernGenException))	# 13 r4k trap exception
2245#if defined(MIPS3) && !defined(MIPS3_5900)
2246	.word _C_LABEL(mips3_VCEI)		# 14 r4k virt coherence
2247#else
2248	.word _C_LABEL(MIPSX(KernGenException))	# 14 reserved
2249#endif
2250	.word _C_LABEL(MIPSX(KernGenException))	# 15 r4k FP exception
2251	.word _C_LABEL(MIPSX(KernGenException))	# 16 reserved
2252	.word _C_LABEL(MIPSX(KernGenException))	# 17 reserved
2253	.word _C_LABEL(MIPSX(KernGenException))	# 18 reserved
2254	.word _C_LABEL(MIPSX(KernGenException))	# 19 reserved
2255	.word _C_LABEL(MIPSX(KernGenException))	# 20 reserved
2256	.word _C_LABEL(MIPSX(KernGenException))	# 21 reserved
2257	.word _C_LABEL(MIPSX(KernGenException))	# 22 reserved
2258	.word _C_LABEL(MIPSX(KernGenException))	# 23 watch exception
2259	.word _C_LABEL(MIPSX(KernGenException))	# 24 reserved
2260	.word _C_LABEL(MIPSX(KernGenException))	# 25 reserved
2261	.word _C_LABEL(MIPSX(KernGenException))	# 26 reserved
2262	.word _C_LABEL(MIPSX(KernGenException))	# 27 reserved
2263	.word _C_LABEL(MIPSX(KernGenException))	# 28 reserved
2264	.word _C_LABEL(MIPSX(KernGenException))	# 29 reserved
2265	.word _C_LABEL(MIPSX(KernGenException))	# 30 reserved
2266#if defined(MIPS3) && !defined(MIPS3_5900)
2267	.word _C_LABEL(mips3_VCED)		# 31 v. coherence exception data
2268#else
2269	.word _C_LABEL(MIPSX(KernGenException))	# 31 reserved
2270#endif
2271	#####
2272	##### The user exception handlers.
2273	#####
2274	.word _C_LABEL(MIPSX(UserIntr))		#  0
2275	.word _C_LABEL(MIPSX(UserGenException))	#  1
2276	.word _C_LABEL(MIPSX(UserGenException))	#  2
2277	.word _C_LABEL(MIPSX(UserGenException))	#  3
2278	.word _C_LABEL(MIPSX(UserGenException))	#  4
2279	.word _C_LABEL(MIPSX(UserGenException))	#  5
2280	.word _C_LABEL(MIPSX(UserGenException))	#  6
2281	.word _C_LABEL(MIPSX(UserGenException))	#  7
2282	.word _C_LABEL(MIPSX(SystemCall))	#  8
2283	.word _C_LABEL(MIPSX(UserGenException))	#  9
2284	.word _C_LABEL(MIPSX(UserGenException))	# 10
2285	.word _C_LABEL(MIPSX(UserGenException))	# 11
2286	.word _C_LABEL(MIPSX(UserGenException))	# 12
2287	.word _C_LABEL(MIPSX(UserGenException))	# 13
2288#if defined(MIPS3) && !defined(MIPS3_5900)
2289	.word _C_LABEL(mips3_VCEI)		# 14
2290#else
2291	.word _C_LABEL(MIPSX(UserGenException))	# 14
2292#endif
2293	.word _C_LABEL(MIPSX(UserGenException))	# 15
2294	.word _C_LABEL(MIPSX(UserGenException))	# 16
2295	.word _C_LABEL(MIPSX(UserGenException))	# 17
2296	.word _C_LABEL(MIPSX(UserGenException))	# 18
2297	.word _C_LABEL(MIPSX(UserGenException))	# 19
2298	.word _C_LABEL(MIPSX(UserGenException))	# 20
2299	.word _C_LABEL(MIPSX(UserGenException))	# 21
2300	.word _C_LABEL(MIPSX(UserGenException))	# 22
2301	.word _C_LABEL(MIPSX(UserGenException))	# 23
2302	.word _C_LABEL(MIPSX(UserGenException))	# 24
2303	.word _C_LABEL(MIPSX(UserGenException))	# 25
2304	.word _C_LABEL(MIPSX(UserGenException))	# 26
2305	.word _C_LABEL(MIPSX(UserGenException))	# 27
2306	.word _C_LABEL(MIPSX(UserGenException))	# 28
2307	.word _C_LABEL(MIPSX(UserGenException))	# 29
2308	.word _C_LABEL(MIPSX(UserGenException))	# 30
2309#if defined(MIPS3) && !defined(MIPS3_5900)
2310	.word _C_LABEL(mips3_VCED)		# 31 v. coherence exception data
2311#else
2312	.word _C_LABEL(MIPSX(UserGenException))	# 31
2313#endif
2314