xref: /netbsd/sys/arch/mips/mips/mipsX_subr.S (revision 6550d01e)
1/*	$NetBSD: mipsX_subr.S,v 1.37 2010/12/22 01:34:17 nisimura Exp $	*/
2
3/*
4 * Copyright 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 *    notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 *    notice, this list of conditions and the following disclaimer in the
49 *    documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 *    must display the following acknowledgement:
52 *      This product includes software developed by Jonathan R. Stone for
53 *      the NetBSD Project.
54 * 4. The name of the author may not be used to endorse or promote products
55 *    derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * SUCH DAMAGE.
68 */
69
70/*
71 * Copyright (c) 1992, 1993
72 *	The Regents of the University of California.  All rights reserved.
73 *
74 * This code is derived from software contributed to Berkeley by
75 * Digital Equipment Corporation and Ralph Campbell.
76 *
77 * Redistribution and use in source and binary forms, with or without
78 * modification, are permitted provided that the following conditions
79 * are met:
80 * 1. Redistributions of source code must retain the above copyright
81 *    notice, this list of conditions and the following disclaimer.
82 * 2. Redistributions in binary form must reproduce the above copyright
83 *    notice, this list of conditions and the following disclaimer in the
84 *    documentation and/or other materials provided with the distribution.
85 * 3. Neither the name of the University nor the names of its contributors
86 *    may be used to endorse or promote products derived from this software
87 *    without specific prior written permission.
88 *
89 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
90 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
91 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
92 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
93 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
94 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
95 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
96 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
97 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
98 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
99 * SUCH DAMAGE.
100 *
101 * Copyright (C) 1989 Digital Equipment Corporation.
102 * Permission to use, copy, modify, and distribute this software and
103 * its documentation for any purpose and without fee is hereby granted,
104 * provided that the above copyright notice appears in all copies.
105 * Digital Equipment Corporation makes no representations about the
106 * suitability of this software for any purpose.  It is provided "as is"
107 * without express or implied warranty.
108 *
109 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
110 *	v 1.1 89/07/11 17:55:04 nelson Exp  SPRITE (DECWRL)
111 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
112 *	v 9.2 90/01/29 18:00:39 shirriff Exp  SPRITE (DECWRL)
113 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
114 *	v 1.1 89/07/10 14:27:41 nelson Exp  SPRITE (DECWRL)
115 *
116 *	@(#)locore.s	8.5 (Berkeley) 1/4/94
117 */
118#include "opt_cputype.h"
119#include "opt_ddb.h"
120#include "opt_kgdb.h"
121#include "opt_mips3_wired.h"
122#include "opt_vmswap.h"
123
124#include <sys/cdefs.h>
125
126#include <mips/asm.h>
127#include <mips/cpuregs.h>
128#if defined(MIPS3) && !defined(MIPS3_5900)
129#include <mips/cache_r4k.h>
130#endif
131
132#include <machine/param.h>
133#include <machine/endian.h>
134
135#include "assym.h"
136
137#ifdef _LP64
138#define	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(reg) \
139	li reg, MIPS_SR_KX; mtc0 reg, MIPS_COP_0_STATUS
140#define	SET_EXCEPTION_LEVEL(reg) \
141	li reg, MIPS_SR_EXL | MIPS_SR_KX; mtc0 reg, MIPS_COP_0_STATUS
142#else
143#define	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(reg) \
144	mtc0 zero, MIPS_COP_0_STATUS
145#define	SET_EXCEPTION_LEVEL(reg) \
146	li reg, MIPS_SR_EXL; mtc0 reg, MIPS_COP_0_STATUS
147#endif
148
149/*
150 * XXX MIPS3_5900 is still "special" for much of this code.
151 */
152
153#if MIPS1
154#error This file can not be compiled with MIPS1 defined
155#endif
156
157#if MIPS3 + MIPS32 + MIPS64 != 1
158# error  Only one of MIPS{3,32,64} can be defined
159#endif
160
161/*
162 * Use 64bit cp0 instructions?
163 */
164#if defined(MIPS3)
165#define	USE_64BIT_INSTRUCTIONS
166#if defined(MIPS3_5900)		/* the 5900 has mips32-like mmu registers */
167#undef	USE_64BIT_CP0_FUNCTIONS
168#else
169#define	USE_64BIT_CP0_FUNCTIONS
170#endif
171#endif
172
173#if defined(MIPS32)
174#undef	USE_64BIT_INSTRUCTIONS
175#undef	USE_64BIT_CP0_FUNCTIONS
176#endif
177
178#if defined(MIPS64)
179#define	USE_64BIT_INSTRUCTIONS
180#define	USE_64BIT_CP0_FUNCTIONS
181#endif
182
183#if defined(USE_64BIT_CP0_FUNCTIONS)
184#define	_SLL		dsll
185#define	_SRL		dsrl
186#define	WIRED_SHIFT	34
187#else
188#define	_SLL		sll
189#define	_SRL		srl
190#define	WIRED_SHIFT	2
191#endif
192
193/*
194 * Use correct-sized m?c0/dm?c0 opcodes.
195 */
196#if defined(USE_64BIT_CP0_FUNCTIONS)
197#define	_MFC0	dmfc0
198#define	_MTC0	dmtc0
199#else
200#define	_MFC0	mfc0
201#define	_MTC0	mtc0
202#endif
203
204
205/*
206 * Set ISA level for the assembler.
207 */
208#if defined(MIPS3)
209	.set	mips3
210#endif
211
212#if defined(MIPS32)
213	.set	mips32
214#endif
215
216#if defined(MIPS64)
217	.set	mips64
218#endif
219
220
221/*
222 * CPP function renaming macros.
223 */
224
225#if defined(MIPS3)
226#ifdef __STDC__
227#define	MIPSX(name)	mips3_ ## name
228#else
229#define	MIPSX(name)	mips3_/**/name
230#endif
231#endif
232
233#if defined(MIPS3_5900)
234#undef MIPSX
235#ifdef __STDC__
236#define	MIPSX(name)	mips5900_ ## name
237#else
238#define	MIPSX(name)	mips5900_/**/name
239#endif
240#endif
241
242#if defined(MIPS32)
243#ifdef __STDC__
244#define	MIPSX(name)	mips32_ ## name
245#else
246#define	MIPSX(name)	mips32_/**/name
247#endif
248#endif
249
250#if defined(MIPS64)
251#ifdef __STDC__
252#define	MIPSX(name)	mips64_ ## name
253#else
254#define	MIPSX(name)	mips64_/**/name
255#endif
256#endif
257
258#define	_VECTOR_END(name)	VECTOR_END(name)
259
260/*
261 * XXX We need a cleaner way of handling the instruction hazards of
262 * the various processors.  Here are the relevant rules for the QED 52XX:
263 *	tlbw[ri]	-- two integer ops beforehand
264 *	tlbr		-- two integer ops beforehand
265 *	tlbp		-- two integer ops beforehand
266 *	mtc0	[PageMask,EntryHi,Cp0] -- two integer ops afterwards
267 *	changing JTLB	-- two integer ops afterwards
268 *	mtc0	[EPC,ErrorEPC,Status] -- two int ops afterwards before eret
269 *	config.k0	-- five int ops before kseg0, ckseg0 memref
270 *
271 * For the IDT R4000, some hazards are:
272 *	mtc0/mfc0	one integer op before and after
273 *	tlbp		-- one integer op afterwards
274 * Obvious solution is to take least common denominator.
275 *
276 * For the Toshiba R5900, TX79:
277 *	mtc0		following sync.p
278 *	tlbw[ri], tlbp	following sync.p or eret
279 * for those CPU, define COP0_SYNC as sync.p
280 */
281
282
283/*
284 *============================================================================
285 *
286 *  MIPS III ISA support, part 1: locore exception vectors.
287 *  The following code is copied to the vector locations to which
288 *  the CPU jumps in response to an exception or a TLB miss.
289 *
290 *============================================================================
291 */
292	.set	noreorder
293
294/*
295 * TLB handling data.   'segbase' points to the base of the segment
296 * table.   this is read and written by C code in mips_machdep.c.
297 *
298 * XXX: use linear mapped PTs at fixed VA in kseg2 in the future?
299 */
300	.text
301
302
303/*
304 * some useful labels for debugging
305 */
306.global	mips_kseg0
307.equiv	mips_kseg0,		MIPS_KSEG0_START
308.global	mips_kseg1
309.equiv	mips_kseg1,		MIPS_KSEG1_START
310.global	mips_kseg2
311.equiv	mips_kseg2,		MIPS_KSEG2_START
312.global	mips_xkphys
313.equiv	mips_xkphys,		MIPS_XKPHYS_START
314.global	mips_xkphys_u
315.equiv	mips_xkphys_u,		MIPS_XKPHYS_UNCACHED
316.global	mips_xkphys_cca3
317.equiv	mips_xkphys_cca3,	MIPS_XKPHYS_CCA3
318.global	mips_xkphys_cca4
319.equiv	mips_xkphys_cca4,	MIPS_XKPHYS_CCA4
320.global	mips_xkseg
321.equiv	mips_xkseg,		MIPS_XKSEG_START
322
323
324/*
325 *----------------------------------------------------------------------------
326 *
327 * mips3_TLBMiss --
328 *
329 *	Vector code for the TLB-miss exception vector 0x80000000
330 *	on an r4000.
331 *
332 * This code is copied to the TLB exception vector address to
333 * handle TLB translation misses.
334 * NOTE: This code should be relocatable and max 32 instructions!!!
335 *
336 * Don't check for invalid pte's here. We load them as well and
337 * let the processor trap to load the correct value after service.
338 *----------------------------------------------------------------------------
339 */
340VECTOR(MIPSX(TLBMiss), unknown)
341	.set	noat
342	_MFC0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
343	lui	k1, %hi(segbase)		#01: k1=hi of segbase
344	bltz	k0, 4f				#02: k0<0 -> 4f (kernel fault)
345	PTR_SRL	k0, 2*PGSHIFT-2-PTR_SCALESHIFT	#03: k0=seg offset (almost)
346	PTR_L	k1, %lo(segbase)(k1)		#04: k1=segment tab base
347	andi	k0, NBPG-(1<<PTR_SCALESHIFT)	#05: k0=seg offset (mask 0x3)
348	PTR_ADDU k1, k0, k1			#06: k1=seg entry address
349	PTR_L	k1, 0(k1)			#07: k1=seg entry
350	_MFC0	k0, MIPS_COP_0_BAD_VADDR	#08: k0=bad address (again)
351	beq	k1, zero, 5f			#09: ==0 -- no page table
352	PTR_SRL	k0, (PGSHIFT-2)			#0a: k0=VPN (aka va>>10)
353	andi	k0, k0, (NBPG-8)		#0b: k0=page tab offset
354	PTR_ADDU k1, k1, k0			#0c: k1=pte address
355	INT_L	k0, 0(k1)			#0d: k0=lo0 pte
356	INT_L	k1, 4(k1)			#0e: k1=lo1 pte
357	_SLL	k0, WIRED_SHIFT			#0f: chop top 2 bits (part 1a)
358	_SRL	k0, WIRED_SHIFT			#10: chop top 2 bits (part 1b)
359#ifdef MIPS3_5900
360	_MTC0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
361	sync.p					#12: R5900 cop0 hazard
362	_SLL	k1, WIRED_SHIFT			#13: chop top 2 bits (part 2a)
363	_SRL	k1, WIRED_SHIFT			#14: chop top 2 bits (part 2b)
364	_MTC0	k1, MIPS_COP_0_TLB_LO1		#15: lo1 is loaded
365	sync.p					#16: R5900 cop0 hazard
366#else /* MIPS3_5900 */
367	_MTC0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
368	_SLL	k1, WIRED_SHIFT			#12: chop top 2 bits (part 2a)
369	_SRL	k1, WIRED_SHIFT			#13: chop top 2 bits (part 2b)
370	_MTC0	k1, MIPS_COP_0_TLB_LO1		#14: lo1 is loaded
371	nop					#15: standard nop
372	nop					#16: extra nop for QED5230
373#endif /* MIPS3_5900 */
374	tlbwr					#17: write to tlb
375	nop					#18: standard nop
376	nop					#19: needed by R4000/4400
377	nop					#1a: needed by R4000/4400
378	eret					#1b: return from exception
3794:	j _C_LABEL(MIPSX(TLBMissException))	#1c: kernel exception
380	nop					#1d: branch delay slot
3815:	j	MIPSX(slowfault)		#1e: no page table present
382	nop					#1f: branch delay slot
383	.set	at
384_VECTOR_END(MIPSX(TLBMiss))
385
386#if defined(USE_64BIT_CP0_FUNCTIONS)
387/*
388 * mips3_XTLBMiss routine
389 *
390 *	Vector code for the XTLB-miss exception vector 0x80000080 on an r4000.
391 *
392 * This code is copied to the XTLB exception vector address to
393 * handle TLB translation misses while in 64-bit mode.
394 * NOTE: This code should be relocatable and max 32 instructions!!!
395 *
396 * Note that we do not support the full size of the PTEs, relying
397 * on appropriate truncation/sign extension.
398 *
399 * Don't check for invalid pte's here. We load them as well and
400 * let the processor trap to load the correct value after service.
401 */
402VECTOR(MIPSX(XTLBMiss), unknown)
403	.set	noat
404	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#00: k0=bad address
405	lui	k1, %hi(segbase)		#01: k1=hi of segbase
406	bltz	k0, 4f				#02: k0<0 -> 4f (kernel fault)
407	PTR_SRL	k0, 2*PGSHIFT-2-PTR_SCALESHIFT	#03: k0=seg offset (almost)
408	PTR_L	k1, %lo(segbase)(k1)		#04: k1=segment tab base
409	andi	k0, NBPG-(1<<PTR_SCALESHIFT)	#05: k0=seg offset (mask 0x3)
410	PTR_ADDU k1, k0, k1			#06: k1=seg entry address
411	PTR_L	k1, 0(k1)			#07: k1=seg entry
412	dmfc0	k0, MIPS_COP_0_BAD_VADDR	#08: k0=bad address (again)
413	beq	k1, zero, 5f			#09: ==0 -- no page table
414	PTR_SRL	k0, (PGSHIFT-2)			#0a: k0=VPN (aka va>>10)
415	andi	k0, k0, (NBPG-8)		#0b: k0=page tab offset
416	PTR_ADDU k1, k1, k0			#0c: k1=pte address
417	INT_L	k0, 0(k1)			#0d: k0=lo0 pte
418	INT_L	k1, 4(k1)			#0e: k1=lo1 pte
419	_SLL	k0, WIRED_SHIFT			#0f: chop top 2 bits (part 1a)
420	_SRL	k0, WIRED_SHIFT			#10: chop top 2 bits (part 1b)
421	_MTC0	k0, MIPS_COP_0_TLB_LO0		#11: lo0 is loaded
422	_SLL	k1, WIRED_SHIFT			#12: chop top 2 bits (part 2a)
423	_SRL	k1, WIRED_SHIFT			#13: chop top 2 bits (part 2b)
424	_MTC0	k1, MIPS_COP_0_TLB_LO1		#14: lo1 is loaded
425	nop					#15: standard nop
426	nop					#16: extra nop for QED5230
427	tlbwr					#17: write to tlb
428	nop					#18: standard nop
429	nop					#19: needed by R4000/4400
430	nop					#1a: needed by R4000/4400
431	eret					#1b: return from exception
4324:	j _C_LABEL(MIPSX(TLBMissException))	#1c: kernel exception
433	nop					#1d: branch delay slot
4345:	j	MIPSX(slowfault)		#1e: no page table present
435	nop					#1f: branch delay slot
436	.set	at
437_VECTOR_END(MIPSX(XTLBMiss))
438#endif /* USE_64BIT_CP0_FUNCTIONS */
439
440/*
441 * Vector to real handler in KSEG1.
442 */
443VECTOR(MIPSX(cache), unknown)
444	PTR_LA	k0, _C_LABEL(MIPSX(cacheException))
445	li	k1, MIPS_PHYS_MASK
446	and	k0, k1
447	li	k1, MIPS_KSEG1_START
448	or	k0, k1
449	j	k0
450	nop
451_VECTOR_END(MIPSX(cache))
452
453/*
454 * Handle MIPS32/MIPS64 style interrupt exception vector.
455 */
456VECTOR(MIPSX(intr), unknown)
457	PTR_LA	k0, MIPSX(KernIntr)
458	j	k0
459	nop
460_VECTOR_END(MIPSX(intr))
461
462/*
463 *----------------------------------------------------------------------------
464 *
465 * mipsN_exception --
466 *
467 *	Vector code for the general exception vector 0x80000180
468 *	on an r4000 or r4400.
469 *
470 * This code is copied to the general exception vector address to
471 * handle most exceptions.
472 * NOTE: This code should be relocatable and max 32 instructions!!!
473 *----------------------------------------------------------------------------
474 */
475VECTOR(MIPSX(exception), unknown)
476/*
477 * Find out what mode we came from and jump to the proper handler.
478 */
479	.set	noat
480	mfc0	k0, MIPS_COP_0_STATUS		#00: get the status register
481	mfc0	k1, MIPS_COP_0_CAUSE		#01: get the cause register
482	and	k0, k0, MIPS3_SR_KSU_USER	#02: test for user mode
483						#    sneaky but the bits are
484						#    with us........
485	sll	k0, k0, 3			#03: shift user bit for cause index
486	and	k1, k1, MIPS3_CR_EXC_CODE	#04: mask out the cause bits.
487	or	k1, k1, k0			#05: change index to user table
488#ifdef _LP64
489	PTR_SLL	k1, k1, 1
490#endif
4911:
492	PTR_LA	k0, MIPSX(excpt_sw)		#06: get base of the jump table
493	PTR_ADDU k0, k0, k1			#08: get the address of the
494						#     function entry.  Note that
495						#     the cause is already
496						#     shifted left by 2 bits so
497						#     we dont have to shift.
498	PTR_L	k0, 0(k0)			#09: get the function address
499	#nop					#    -slip-
500
501	j	k0				#0a: jump to the function
502	nop					#0b: branch delay slot
503	.set	at
504_VECTOR_END(MIPSX(exception))
505
506/*----------------------------------------------------------------------------
507 *
508 * MIPSX(slowfault) --
509 *
510 * Alternate entry point into the mips3_UserGenException or
511 * mips3_KernGenException, when the ULTB miss handler couldn't
512 * find a TLB entry.
513 *
514 * Find out what mode we came from and call the appropriate handler.
515 *
516 *----------------------------------------------------------------------------
517 */
518
519/*
520 * We couldn't find a TLB entry.
521 * Find out what mode we came from and call the appropriate handler.
522 */
523MIPSX(slowfault):
524	.set	noat
525	mfc0	k0, MIPS_COP_0_STATUS
526	nop
527	and	k0, k0, MIPS3_SR_KSU_USER
528	bne	k0, zero, _C_LABEL(MIPSX(UserGenException))
529	nop
530	.set	at
531/*
532 * Fall though ...
533 */
534
535/*
536 * mips3_KernGenException
537 *
538 * Handle an exception from kernel mode.
539 * Build trapframe on stack to hold interrupted kernel context, then
540 * call trap() to process the condition.
541 *
542 * trapframe is pointed to by the 5th arg
543 * and a dummy sixth argument is used to avoid alignment problems
544 *	{
545 *	register_t cf_args[4 + 1];
546 *	register_t cf_pad;		(for 8 word alignment)
547 *	register_t cf_sp;
548 *	register_t cf_ra;
549 *	mips_reg_t kf_regs[17];		- trapframe begins here
550 * 	mips_reg_t kf_sr;		-
551 * 	mips_reg_t kf_mullo;		-
552 * 	mips_reg_t kf_mulhi;		-
553 * 	mips_reg_t kf_epc;		- may be changed by trap() call
554 * };
555 */
556NESTED_NOPROFILE(MIPSX(KernGenException), KERNFRAME_SIZ, ra)
557	.set	noat
558	.mask	0x80000000, -4
559#if defined(DDB) || defined(KGDB)
560	PTR_LA	k0, _C_LABEL(kdbaux)
561	REG_S	s0, SF_REG_S0(k0)
562	REG_S	s1, SF_REG_S1(k0)
563	REG_S	s2, SF_REG_S2(k0)
564	REG_S	s3, SF_REG_S3(k0)
565	REG_S	s4, SF_REG_S4(k0)
566	REG_S	s5, SF_REG_S5(k0)
567	REG_S	s6, SF_REG_S6(k0)
568	REG_S	s7, SF_REG_S7(k0)
569	REG_S	sp, SF_REG_SP(k0)
570	REG_S	s8, SF_REG_S8(k0)
571	REG_S	gp, SF_REG_RA(k0)
572#endif
573/*
574 * Save the relevant kernel registers onto the stack.
575 * We don't need to save s0 - s8, sp and gp because
576 * the compiler does it for us.
577 */
578	PTR_SUBU sp, sp, KERNFRAME_SIZ
579	REG_S	AT, TF_BASE+TF_REG_AST(sp)
580	REG_S	v0, TF_BASE+TF_REG_V0(sp)
581	REG_S	v1, TF_BASE+TF_REG_V1(sp)
582	mflo	v0
583	mfhi	v1
584	REG_S	a0, TF_BASE+TF_REG_A0(sp)
585	REG_S	a1, TF_BASE+TF_REG_A1(sp)
586	REG_S	a2, TF_BASE+TF_REG_A2(sp)
587	REG_S	a3, TF_BASE+TF_REG_A3(sp)
588	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
589	REG_S	t0, TF_BASE+TF_REG_T0(sp)
590	REG_S	t1, TF_BASE+TF_REG_T1(sp)
591	REG_S	t2, TF_BASE+TF_REG_T2(sp)
592	REG_S	t3, TF_BASE+TF_REG_T3(sp)
593	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
594	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
595	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
596	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
597	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
598	_MFC0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
599	REG_S	t8, TF_BASE+TF_REG_T8(sp)
600	REG_S	t9, TF_BASE+TF_REG_T9(sp)
601	REG_S	ra, TF_BASE+TF_REG_RA(sp)
602	REG_S	a0, TF_BASE+TF_REG_SR(sp)
603	_MFC0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
604	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
605	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
606	REG_S	a3, TF_BASE+TF_REG_EPC(sp)
607#if defined(__mips_o32) || defined(__mips_o64)
608	PTR_ADDU v0, sp, TF_BASE
609	REG_S	v0, KERNFRAME_ARG5(sp)		# 5th arg is p. to trapframe
610#endif
611#if defined(__mips_n32) || defined(__mips_n64)
612	PTR_ADDU a4, sp, TF_BASE		# 5th arg is p. to trapframe
613#endif
614#ifdef IPL_ICU_MASK
615	.set at
616	INT_L	v0, _C_LABEL(md_imask)
617	INT_S	v0, TF_BASE+TF_PPL(sp)
618	nop
619	.set noat
620#endif
621/*
622 * Call the trap handler.
623 */
624#if defined(DDB) || defined(DEBUG) || defined(KGDB)
625	PTR_ADDU v0, sp, KERNFRAME_SIZ
626	REG_S	v0, KERNFRAME_SP(sp)
627#endif
628	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
629	COP0_SYNC
630	nop
631	nop
632	nop
633	jal	_C_LABEL(trap)			#
634	REG_S	a3, KERNFRAME_RA(sp)		# for debugging
635
636/*
637 * Restore registers and return from the exception.
638 */
639	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
640	COP0_SYNC
641	nop					# 3 nop delay
642	nop
643	nop
644#ifdef IPL_ICU_MASK
645	.set at
646	INT_L	a0, TF_BASE+TF_PPL(sp)
647	INT_S	a0, _C_LABEL(md_imask)
648	jal	_C_LABEL(md_imask_update)
649	nop
650	.set noat
651#endif
652	REG_L	a0, TF_BASE+TF_REG_SR(sp)	# ??? why differs ???
653	REG_L	t0, TF_BASE+TF_REG_MULLO(sp)
654	REG_L	t1, TF_BASE+TF_REG_MULHI(sp)
655	REG_L	k1, TF_BASE+TF_REG_EPC(sp)	# might be changed inside trap
656	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
657	COP0_SYNC
658	mtlo	t0
659	mthi	t1
660
661#ifdef notyet
662	/* Check for restartable sequences. */
663	lui	t0, %hi(_C_LABEL(_lock_ras_start))
664	ori	t0, zero, %lo(_C_LABEL(_lock_ras_start))
665	li	t1, -MIPS_LOCK_RAS_SIZE
666	and	t1, t1, k1
667	bne	t1, t0, 1f
668	jal	_C_LABEL(_lock_ras)
669	nop
670#endif
671
6721:	_MTC0	k1, MIPS_COP_0_EXC_PC		# set return address
673	COP0_SYNC
674	REG_L	AT, TF_BASE+TF_REG_AST(sp)
675	REG_L	v0, TF_BASE+TF_REG_V0(sp)
676	REG_L	v1, TF_BASE+TF_REG_V1(sp)
677	REG_L	a0, TF_BASE+TF_REG_A0(sp)
678	REG_L	a1, TF_BASE+TF_REG_A1(sp)
679	REG_L	a2, TF_BASE+TF_REG_A2(sp)
680	REG_L	a3, TF_BASE+TF_REG_A3(sp)
681	REG_L	t0, TF_BASE+TF_REG_T0(sp)
682	REG_L	t1, TF_BASE+TF_REG_T1(sp)
683	REG_L	t2, TF_BASE+TF_REG_T2(sp)
684	REG_L	t3, TF_BASE+TF_REG_T3(sp)
685	REG_L	ta0, TF_BASE+TF_REG_TA0(sp)
686	REG_L	ta1, TF_BASE+TF_REG_TA1(sp)
687	REG_L	ta2, TF_BASE+TF_REG_TA2(sp)
688	REG_L	ta3, TF_BASE+TF_REG_TA3(sp)
689	REG_L	t8, TF_BASE+TF_REG_T8(sp)
690	REG_L	t9, TF_BASE+TF_REG_T9(sp)
691	REG_L	ra, TF_BASE+TF_REG_RA(sp)
692	PTR_ADDU sp, sp, KERNFRAME_SIZ
693#ifdef DDBnotyet
694	PTR_LA	k0, _C_LABEL(kdbaux)
695	REG_L	s0, SF_REG_S0(k0)
696	REG_L	s1, SF_REG_S1(k0)
697	REG_L	s2, SF_REG_S2(k0)
698	REG_L	s3, SF_REG_S3(k0)
699	REG_L	s4, SF_REG_S4(k0)
700	REG_L	s5, SF_REG_S5(k0)
701	REG_L	s6, SF_REG_S6(k0)
702	REG_L	s7, SF_REG_S7(k0)
703	REG_L	sp, SF_REG_SP(k0)
704	REG_L	s8, SF_REG_S8(k0)
705	REG_L	gp, SF_REG_RA(k0)
706#endif
707	eret					# return to interrupted point
708	.set	at
709END(MIPSX(KernGenException))
710
711/*
712 * mipsN_UserGenException
713 *
714 * Handle an exception from user mode.
715 * Save user context atop the kernel stack, then call trap() to process
716 * the condition.  The context can be manipulated alternatively via
717 * curlwp->l_md.md_regs.
718 */
719NESTED_NOPROFILE(MIPSX(UserGenException), CALLFRAME_SIZ, ra)
720	.set	noat
721	.mask	0x80000000, -4
722/*
723 * Save all of the registers except for the kernel temporaries in pcb.
724 */
725	PTR_L	k1, CPUVAR(CURLWP)
726	PTR_L	k1, L_PCB(k1)			# XXXuvm_lwp_getuarea
727	PTR_ADDU k1, k1, USPACE - FRAME_SIZ
728	REG_S	AT, FRAME_AST(k1)
729	REG_S	v0, FRAME_V0(k1)
730	REG_S	v1, FRAME_V1(k1)
731	mflo	v0
732	REG_S	a0, FRAME_A0(k1)
733	REG_S	a1, FRAME_A1(k1)
734	REG_S	a2, FRAME_A2(k1)
735	REG_S	a3, FRAME_A3(k1)
736	mfhi	v1
737	REG_S	t0, FRAME_T0(k1)
738	REG_S	t1, FRAME_T1(k1)
739	REG_S	t2, FRAME_T2(k1)
740	REG_S	t3, FRAME_T3(k1)
741	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
742	REG_S	ta0, FRAME_TA0(k1)
743	REG_S	ta1, FRAME_TA1(k1)
744	REG_S	ta2, FRAME_TA2(k1)
745	REG_S	ta3, FRAME_TA3(k1)
746	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
747	REG_S	s0, FRAME_S0(k1)
748	REG_S	s1, FRAME_S1(k1)
749	REG_S	s2, FRAME_S2(k1)
750	REG_S	s3, FRAME_S3(k1)
751	_MFC0	a2, MIPS_COP_0_BAD_VADDR	# 3rd arg is fault address
752	REG_S	s4, FRAME_S4(k1)
753	REG_S	s5, FRAME_S5(k1)
754	REG_S	s6, FRAME_S6(k1)
755	REG_S	s7, FRAME_S7(k1)
756	_MFC0	a3, MIPS_COP_0_EXC_PC		# 4th arg is exception PC
757	REG_S	t8, FRAME_T8(k1)
758	REG_S	t9, FRAME_T9(k1)
759	REG_S	gp, FRAME_GP(k1)
760	REG_S	sp, FRAME_SP(k1)
761	REG_S	s8, FRAME_S8(k1)
762	REG_S	ra, FRAME_RA(k1)
763	REG_S	a0, FRAME_SR(k1)
764	REG_S	v0, FRAME_MULLO(k1)
765	REG_S	v1, FRAME_MULHI(k1)
766	REG_S	a3, FRAME_EPC(k1)
767#ifdef IPL_ICU_MASK
768	.set at
769	INT_L	t0, _C_LABEL(md_imask)
770	INT_S	t0, FRAME_PPL(k1)
771	.set noat
772#endif
773	PTR_ADDU sp, k1, -CALLFRAME_SIZ		# switch to kernel SP
774#ifdef __GP_SUPPORT__
775	PTR_LA	gp, _C_LABEL(_gp)		# switch to kernel GP
776#endif
777/*
778 * Turn off fpu and enter kernel mode
779 */
780	.set	at
781	and	t0, a0, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_KSU_MASK | MIPS_SR_INT_IE)
782	.set	noat
783/*
784 * Call the trap handler.
785 */
786	PTR_L	MIPS_CURLWP, CPUVAR(CURLWP)
787	mtc0	t0, MIPS_COP_0_STATUS
788	COP0_SYNC
789	jal	_C_LABEL(trap)
790	REG_S	a3, CALLFRAME_RA(sp)		# for debugging
791/*
792 * Check pending asynchronous traps.
793 */
794	INT_L	t0, L_MD_ASTPENDING(MIPS_CURLWP)
795	beq	t0, zero, 1f
796	nop
797/*
798 * We have pending asynchronous traps; all the state is already saved.
799 */
800	jal	_C_LABEL(ast)
801	REG_L	a0, CALLFRAME_SIZ + FRAME_EPC(sp)
8021:
803/*
804 * Restore user registers and return.
805 * First disable interrupts and set exception level.
806 */
807	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
808	COP0_SYNC
809	nop					# 3 clock delay before
810	nop					# exceptions blocked
811	nop					# for R4X
812	SET_EXCEPTION_LEVEL(v0)			# set exception level
813	COP0_SYNC
814	nop					# 3 nop delay
815	nop
816	nop
817	PTR_ADDU a1, sp, CALLFRAME_SIZ
818#ifdef IPL_ICU_MASK
819	.set at
820	INT_L	t0, FRAME_PPL(a1)
821	INT_S	t0, _C_LABEL(md_imask)
822	jal	_C_LABEL(md_imask_update)
823	nop
824	PTR_ADDU a1, sp, CALLFRAME_SIZ
825	.set noat
826#endif
827 #	REG_L	a0, FRAME_SR(a1)
828	REG_L	t0, FRAME_MULLO(a1)
829	REG_L	t1, FRAME_MULHI(a1)
830	REG_L	v0, FRAME_EPC(a1)
831 #	mtc0	a0, MIPS_COP_0_STATUS		# still exception level
832	mtlo	t0
833	mthi	t1
834	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
835	COP0_SYNC
836	move	k1, a1
837	REG_L	AT, FRAME_AST(k1)
838	REG_L	v0, FRAME_V0(k1)
839	REG_L	v1, FRAME_V1(k1)
840	REG_L	a0, FRAME_A0(k1)
841	REG_L	a1, FRAME_A1(k1)
842	REG_L	a2, FRAME_A2(k1)
843	REG_L	a3, FRAME_A3(k1)
844	REG_L	t0, FRAME_T0(k1)
845	REG_L	t1, FRAME_T1(k1)
846	REG_L	t2, FRAME_T2(k1)
847	REG_L	t3, FRAME_T3(k1)
848	REG_L	ta0, FRAME_TA0(k1)
849	REG_L	ta1, FRAME_TA1(k1)
850	REG_L	ta2, FRAME_TA2(k1)
851	REG_L	ta3, FRAME_TA3(k1)
852	REG_L	s0, FRAME_S0(k1)
853	REG_L	s1, FRAME_S1(k1)
854	REG_L	s2, FRAME_S2(k1)
855	REG_L	s3, FRAME_S3(k1)
856	REG_L	s4, FRAME_S4(k1)
857	REG_L	s5, FRAME_S5(k1)
858	REG_L	s6, FRAME_S6(k1)
859	REG_L	s7, FRAME_S7(k1)
860	REG_L	t8, FRAME_T8(k1)
861	REG_L	t9, FRAME_T9(k1)
862	REG_L	k0, FRAME_SR(k1)
863	DYNAMIC_STATUS_MASK_TOUSER(k0, ra)	# machine dependent masking
864	REG_L	gp, FRAME_GP(k1)
865	REG_L	sp, FRAME_SP(k1)
866	REG_L	s8, FRAME_S8(k1)
867	REG_L	ra, FRAME_RA(k1)
868	mtc0	k0, MIPS_COP_0_STATUS		# restore status
869	COP0_SYNC
870	nop
871	nop
872	eret					# return to interrupted point
873	.set	at
874END(MIPSX(UserGenException))
875
876/*
877 * mipsN_SystemCall
878 *
879 * Save user context in PCB, then call syscall() to process a system call.
880 * The context can be manipulated alternatively via curlwp->p_md.md_regs;
881 */
882NESTED_NOPROFILE(MIPSX(SystemCall), CALLFRAME_SIZ, ra)
883	.set	noat
884	.mask	0x80000000, -4
885	PTR_L	k1, CPUVAR(CURLWP)
886	PTR_L	k1, L_PCB(k1)			# XXXuvm_lwp_getuarea
887	#nop					# -slip-
888	PTR_ADDU k1, k1, USPACE - FRAME_SIZ
889	#REG_S	AT, FRAME_AST(k1)
890	REG_S	v0, FRAME_V0(k1)		# syscall #
891	REG_S	v1, FRAME_V1(k1)		# used by syscall()
892	mflo	v0
893	REG_S	a0, FRAME_A0(k1)
894	REG_S	a1, FRAME_A1(k1)
895	REG_S	a2, FRAME_A2(k1)
896	REG_S	a3, FRAME_A3(k1)
897	PTR_L	a0, CPUVAR(CURLWP)		# 1st arg is curlwp
898	mfhi	v1
899	mfc0	a2, MIPS_COP_0_CAUSE		# 3rd arg is CAUSE
900	REG_S	s0, FRAME_S0(k1)
901	REG_S	s1, FRAME_S1(k1)
902	REG_S	s2, FRAME_S2(k1)
903	REG_S	s3, FRAME_S3(k1)
904	_MFC0	a3, MIPS_COP_0_EXC_PC		# 4th arg is PC
905	REG_S	s4, FRAME_S4(k1)
906	REG_S	s5, FRAME_S5(k1)
907	REG_S	s6, FRAME_S6(k1)
908	REG_S	s7, FRAME_S7(k1)
909	move	s0, t0				# since the saved registers have
910	move	s1, t1				# been saved we can use them to
911	move	s2, t2				# stash most of temporary regs
912	REG_S	t3, FRAME_T3(k1)		# syscall saved gp for fork
913	mfc0	a1, MIPS_COP_0_STATUS		# 2nd arg is STATUS
914#if defined(__mips_n32) || defined(__mips_n64)
915	REG_S	a4, FRAME_A4(k1)
916	REG_S	a5, FRAME_A5(k1)
917	REG_S	a6, FRAME_A6(k1)
918	REG_S	a7, FRAME_A7(k1)
919#else
920	move	s4, ta0
921	move	s5, ta1
922	move	s6, ta2
923	move	s7, ta3
924#endif
925	#REG_S	t8, FRAME_T8(k1)
926	#REG_S	t9, FRAME_T9(k1)
927	REG_S	gp, FRAME_GP(k1)
928	REG_S	sp, FRAME_SP(k1)
929	REG_S	s8, FRAME_S8(k1)
930	REG_S	ra, FRAME_RA(k1)
931	REG_S	a1, FRAME_SR(k1)
932	REG_S	v0, FRAME_MULLO(k1)
933	REG_S	v1, FRAME_MULHI(k1)
934	REG_S	a3, FRAME_EPC(k1)
935#ifdef IPL_ICU_MASK
936	.set at
937	INT_L	t0, _C_LABEL(md_imask)
938	INT_S	t0, FRAME_PPL(k1)
939	.set noat
940#endif
941	PTR_L	t0, L_PROC(a0)			# curlwp->l_proc (used below)
942	move	MIPS_CURLWP, a0			# set curlwp reg
943	PTR_ADDU sp, k1, -CALLFRAME_SIZ
944#ifdef __GP_SUPPORT__
945	PTR_LA	gp, _C_LABEL(_gp)		# switch to kernel GP
946#endif
947/*
948 * Turn off fpu and enter kernel mode
949 */
950	.set	at
951	PTR_L	t1, P_MD_SYSCALL(t0)		# t1 = syscall
952	and	t0, a1, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_KSU_MASK)
953	.set	noat
954#if defined(DDB) || defined(DEBUG) || defined(KGDB)
955	move	ra, a3
956	REG_S	ra, CALLFRAME_RA(sp)
957#endif
958/*
959 * Call the system call handler.
960 */
961	mtc0	t0, MIPS_COP_0_STATUS		# re-enable interrupts
962	COP0_SYNC
963	jal	t1
964	nop
965/*
966 * Check pending asynchronous traps.
967 */
968	INT_L	t0, L_MD_ASTPENDING(MIPS_CURLWP)
969	beq	t0, zero, 1f
970	nop
971/*
972 * We have pending asynchronous traps; all the state is already saved.
973 */
974	jal	_C_LABEL(ast)
975	PTR_L	a0, CALLFRAME_SIZ + FRAME_EPC(sp)
9761:
977/*
978 * Restore user registers and return.
979 * First disable interrupts and set exception level.
980 */
981	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
982	COP0_SYNC
983	nop					# 3 op delay
984	nop
985	nop
986
987	SET_EXCEPTION_LEVEL(v0)			# set exception level
988	COP0_SYNC
989	nop					# 3 op delay
990	nop
991	nop
992/*
993 * Restore user registers and return.
994 */
995	PTR_ADDU a1, sp, CALLFRAME_SIZ
996#ifdef IPL_ICU_MASK
997	.set at
998	INT_L	t0, FRAME_PPL(a1)
999	INT_S	t0, _C_LABEL(md_imask)
1000	jal	_C_LABEL(md_imask_update)
1001	nop
1002	PTR_ADDU a1, sp, CALLFRAME_SIZ
1003	.set noat
1004#endif
1005 #	REG_L	a0, FRAME_SR(a1)
1006	REG_L	t0, FRAME_MULLO(a1)
1007	REG_L	t1, FRAME_MULHI(a1)
1008	REG_L	v0, FRAME_EPC(a1)		# might be changed in syscall
1009 #	mtc0	a0, MIPS_COP_0_STATUS		# this should disable interrupts
1010	mtlo	t0
1011	mthi	t1
1012	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
1013	COP0_SYNC
1014	move	k1, a1
1015	REG_L	AT, FRAME_AST(k1)
1016	REG_L	v0, FRAME_V0(k1)
1017	REG_L	v1, FRAME_V1(k1)
1018	REG_L	a0, FRAME_A0(k1)
1019	REG_L	a1, FRAME_A1(k1)
1020	REG_L	a2, FRAME_A2(k1)
1021	REG_L	a3, FRAME_A3(k1)
1022	move	t0, s0			# restore the temps
1023	move	t1, s1
1024	move	t2, s2
1025	REG_L	t3, FRAME_T3(k1)	# restore the syscall saved gp
1026#if defined(__mips_n32) || defined(__mips_n64)
1027	REG_L	a4, FRAME_A4(k1)
1028	REG_L	a5, FRAME_A5(k1)
1029	REG_L	a6, FRAME_A6(k1)
1030	REG_L	a7, FRAME_A7(k1)
1031#else
1032	move	ta0, s4
1033	move	ta1, s5
1034	move	ta2, s6
1035	move	ta3, s7
1036#endif
1037	REG_L	s0, FRAME_S0(k1)
1038	REG_L	s1, FRAME_S1(k1)
1039	REG_L	s2, FRAME_S2(k1)
1040	REG_L	s3, FRAME_S3(k1)
1041	REG_L	s4, FRAME_S4(k1)
1042	REG_L	s5, FRAME_S5(k1)
1043	REG_L	s6, FRAME_S6(k1)
1044	REG_L	s7, FRAME_S7(k1)
1045	REG_L	t8, FRAME_T8(k1)
1046	REG_L	t9, FRAME_T9(k1)
1047	REG_L	k0, FRAME_SR(k1)
1048	DYNAMIC_STATUS_MASK_TOUSER(k0, ra)	# machine dependent masking
1049	REG_L	gp, FRAME_GP(k1)
1050	REG_L	sp, FRAME_SP(k1)
1051	REG_L	s8, FRAME_S8(k1)
1052	REG_L	ra, FRAME_RA(k1)
1053	mtc0	k0, MIPS_COP_0_STATUS
1054	COP0_SYNC
1055	nop
1056	nop
1057	nop
1058
1059	eret					# return to syscall point
1060	.set	at
1061END(MIPSX(SystemCall))
1062
1063/*
1064 * Panic on cache errors.  A lot more could be done to recover
1065 * from some types of errors but it is tricky.
1066 */
1067NESTED_NOPROFILE(MIPSX(cacheException), KERNFRAME_SIZ, ra)
1068	.set	noat
1069	.mask	0x80000000, -4
1070#ifdef sbmips	/* XXX!  SB-1 needs a real cache error handler */
1071	eret
1072	nop
1073#endif
1074	PTR_LA	k0, panic			# return to panic
1075	PTR_LA	a0, 9f				# panicstr
1076	_MFC0	a1, MIPS_COP_0_ERROR_PC
1077#if defined(MIPS64_XLS)
1078	li	k1, 0x309	/* L1D_CACHE_ERROR_LOG */
1079	mfcr	a2, k1
1080	li	k1, 0x30b	/* L1D_CACHE_INTERRUPT */
1081	mfcr	a3, k1
1082	mfc0	a4, MIPS_COP_0_STATUS
1083	mfc0	a5, MIPS_COP_0_CAUSE
1084#else
1085	mfc0	a2, MIPS_COP_0_ECC
1086	mfc0	a3, MIPS_COP_0_CACHE_ERR
1087#endif
1088
1089	_MTC0	k0, MIPS_COP_0_ERROR_PC		# set return address
1090	COP0_SYNC
1091
1092	mfc0	k0, MIPS_COP_0_STATUS		# restore status
1093	li	k1, MIPS3_SR_DIAG_PE		# ignore further errors
1094	or	k0, k1
1095	mtc0	k0, MIPS_COP_0_STATUS		# restore status
1096	COP0_SYNC
1097	nop
1098	nop
1099	nop
1100
1101	eret
1102
1103#if defined(MIPS64_XLS)
1104	MSG("cache error @ EPC %#x\nL1D_CACHE_ERROR_LOG %#lx\nL1D_CACHE_INTERRUPT %#lx\nstatus %#x, cause %#x");
1105#else
1106	MSG("cache error @ EPC 0x%x ErrCtl 0x%x CacheErr 0x%x");
1107#endif
1108	.set	at
1109END(MIPSX(cacheException))
1110
1111/*
1112 * mipsX_KernIntr
1113 *
1114 * Handle an interrupt from kernel mode.
1115 * Build intrframe on stack to hold interrupted kernel context, then
1116 * call cpu_intr() to process it.
1117 *
1118 */
1119NESTED_NOPROFILE(MIPSX(KernIntr), KERNFRAME_SIZ, ra)
1120	.set	noat
1121	.mask	0x80000000, -4
1122	PTR_SUBU sp, sp, KERNFRAME_SIZ
1123/*
1124 * Save the relevant kernel registers onto the stack.
1125 * We don't need to save s0 - s8, sp and gp because
1126 * the compiler does it for us.
1127 */
1128	REG_S	AT, TF_BASE+TF_REG_AST(sp)
1129	REG_S	v0, TF_BASE+TF_REG_V0(sp)
1130	REG_S	v1, TF_BASE+TF_REG_V1(sp)
1131	mflo	v0
1132	mfhi	v1
1133	REG_S	a0, TF_BASE+TF_REG_A0(sp)
1134	REG_S	a1, TF_BASE+TF_REG_A1(sp)
1135	REG_S	a2, TF_BASE+TF_REG_A2(sp)
1136	REG_S	a3, TF_BASE+TF_REG_A3(sp)
1137	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
1138	REG_S	t0, TF_BASE+TF_REG_T0(sp)
1139	REG_S	t1, TF_BASE+TF_REG_T1(sp)
1140	REG_S	t2, TF_BASE+TF_REG_T2(sp)
1141	REG_S	t3, TF_BASE+TF_REG_T3(sp)
1142	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
1143	REG_S	ta0, TF_BASE+TF_REG_TA0(sp)
1144	REG_S	ta1, TF_BASE+TF_REG_TA1(sp)
1145	REG_S	ta2, TF_BASE+TF_REG_TA2(sp)
1146	REG_S	ta3, TF_BASE+TF_REG_TA3(sp)
1147	_MFC0	a2, MIPS_COP_0_EXC_PC		# 3rd arg is exception PC
1148	REG_S	t8, TF_BASE+TF_REG_T8(sp)
1149	REG_S	t9, TF_BASE+TF_REG_T9(sp)
1150	REG_S	ra, TF_BASE+TF_REG_RA(sp)
1151	REG_S	a0, TF_BASE+TF_REG_SR(sp)
1152	REG_S	v0, TF_BASE+TF_REG_MULLO(sp)
1153	REG_S	v1, TF_BASE+TF_REG_MULHI(sp)
1154	REG_S	a2, TF_BASE+TF_REG_EPC(sp)
1155/*
1156 * Call the interrupt handler.
1157 */
1158#if defined(DDB) || defined(DEBUG) || defined(KGDB)
1159	move	ra, a2
1160	REG_S	ra, KERNFRAME_RA(sp)		# for debugging
1161#endif
1162#ifdef IPL_ICU_MASK
1163	.set at
1164	INT_L	t0, _C_LABEL(md_imask)
1165	INT_S	t0, TF_BASE+TF_PPL(sp)
1166	.set noat
1167#endif
1168	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
1169	COP0_SYNC
1170	jal	_C_LABEL(cpu_intr)
1171	and	a3, a0, a1			# 4th is STATUS & CAUSE
1172/*
1173 * Restore registers and return from the interrupt.
1174 */
1175	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
1176	COP0_SYNC
1177	nop
1178	nop
1179	nop
1180#ifdef IPL_ICU_MASK
1181	.set at
1182	INT_L	a0, TF_BASE+TF_PPL(sp)
1183	INT_S	a0, _C_LABEL(md_imask)
1184	jal	_C_LABEL(md_imask_update)
1185	nop
1186	.set noat
1187#endif
1188	REG_L	a0, TF_BASE+TF_REG_SR(sp)	# ??? why differs ???
1189	DYNAMIC_STATUS_MASK(a0, t0)		# machine dependent masking
1190	REG_L	t0, TF_BASE+TF_REG_MULLO(sp)
1191	REG_L	t1, TF_BASE+TF_REG_MULHI(sp)
1192	REG_L	v0, TF_BASE+TF_REG_EPC(sp)
1193	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
1194	COP0_SYNC
1195	mtlo	t0
1196	mthi	t1
1197
1198#ifdef notyet
1199	/* Check for restartable sequences. */
1200	lui	t0, %hi(_C_LABEL(_lock_ras_start))
1201	ori	t0, zero, %lo(_C_LABEL(_lock_ras_start))
1202	li	t1, -MIPS_LOCK_RAS_SIZE
1203	and	t1, t1, v0
1204	bne	t1, t0, 1f
1205	move	k1, v0
1206	jal	_C_LABEL(_lock_ras)
1207	nop
1208	mov	v0, k1
1209#endif
1210
12111:	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
1212	COP0_SYNC
1213
1214	REG_L	AT, TF_BASE+TF_REG_AST(sp)
1215	REG_L	v0, TF_BASE+TF_REG_V0(sp)
1216	REG_L	v1, TF_BASE+TF_REG_V1(sp)
1217	REG_L	a0, TF_BASE+TF_REG_A0(sp)
1218	REG_L	a1, TF_BASE+TF_REG_A1(sp)
1219	REG_L	a2, TF_BASE+TF_REG_A2(sp)
1220	REG_L	a3, TF_BASE+TF_REG_A3(sp)
1221	REG_L	t0, TF_BASE+TF_REG_T0(sp)
1222	REG_L	t1, TF_BASE+TF_REG_T1(sp)
1223	REG_L	t2, TF_BASE+TF_REG_T2(sp)
1224	REG_L	t3, TF_BASE+TF_REG_T3(sp)
1225	REG_L	ta0, TF_BASE+TF_REG_TA0(sp)
1226	REG_L	ta1, TF_BASE+TF_REG_TA1(sp)
1227	REG_L	ta2, TF_BASE+TF_REG_TA2(sp)
1228	REG_L	ta3, TF_BASE+TF_REG_TA3(sp)
1229	REG_L	t8, TF_BASE+TF_REG_T8(sp)
1230	REG_L	t9, TF_BASE+TF_REG_T9(sp)
1231	REG_L	ra, TF_BASE+TF_REG_RA(sp)
1232	PTR_ADDU sp, sp, KERNFRAME_SIZ		# restore kernel SP
1233	eret					# return to interrupted point
1234	.set	at
1235END(MIPSX(KernIntr))
1236
1237/*----------------------------------------------------------------------------
1238 * XXX this comment block should be updated XXX
1239 * mipsN_UserIntr --
1240 *
1241 *	Handle an interrupt from user mode.
1242 *	Note: we save minimal state in the PCB and use the standard kernel
1243 *	stack since there has to be a u page if we came from user mode.
1244 *	If there is a pending software interrupt, then save the remaining state
1245 *	and call softintr(). This is all because if we call switch() inside
1246 *	cpu_intr(), not all the user registers have been saved in PCB.
1247 *
1248 * Results:
1249 * 	None.
1250 *
1251 * Side effects:
1252 *	None.
1253 *
1254 *----------------------------------------------------------------------------
1255 */
1256NESTED_NOPROFILE(MIPSX(UserIntr), CALLFRAME_SIZ, ra)
1257	.set	noat
1258	.mask	0x80000000, -4
1259/*
1260 * Save the relevant user registers into the PCB.
1261 * We don't need to save s0 - s8 because the compiler does it for us.
1262 */
1263	PTR_L	k1, CPUVAR(CURLWP)
1264	PTR_L	k1, L_PCB(k1)			# XXXuvm_lwp_getuarea
1265	PTR_ADDU k1, k1, USPACE - FRAME_SIZ
1266	REG_S	AT, FRAME_AST(k1)
1267	REG_S	v0, FRAME_V0(k1)
1268	REG_S	v1, FRAME_V1(k1)
1269	mflo	v0
1270	REG_S	a0, FRAME_A0(k1)
1271	REG_S	a1, FRAME_A1(k1)
1272	REG_S	a2, FRAME_A2(k1)
1273	REG_S	a3, FRAME_A3(k1)
1274	mfhi	v1
1275	REG_S	t0, FRAME_T0(k1)
1276	REG_S	t1, FRAME_T1(k1)
1277	REG_S	t2, FRAME_T2(k1)
1278	REG_S	t3, FRAME_T3(k1)
1279	mfc0	a0, MIPS_COP_0_STATUS		# 1st arg is STATUS
1280	REG_S	ta0, FRAME_TA0(k1)
1281	REG_S	ta1, FRAME_TA1(k1)
1282	REG_S	ta2, FRAME_TA2(k1)
1283	REG_S	ta3, FRAME_TA3(k1)
1284	mfc0	a1, MIPS_COP_0_CAUSE		# 2nd arg is CAUSE
1285	REG_S	t8, FRAME_T8(k1)
1286	REG_S	t9, FRAME_T9(k1)
1287	REG_S	gp, FRAME_GP(k1)
1288	REG_S	sp, FRAME_SP(k1)
1289	_MFC0	a2, MIPS_COP_0_EXC_PC		# 3rd arg is PC
1290	REG_S	ra, FRAME_RA(k1)
1291	REG_S	a0, FRAME_SR(k1)
1292	REG_S	v0, FRAME_MULLO(k1)
1293	REG_S	v1, FRAME_MULHI(k1)
1294	REG_S	a2, FRAME_EPC(k1)
1295#ifdef IPL_ICU_MASK
1296	.set at
1297	INT_L	t0, _C_LABEL(md_imask)
1298	INT_S	t0, FRAME_PPL(k1)
1299	.set noat
1300#endif
1301	PTR_ADDU sp, k1, -CALLFRAME_SIZ		# switch to kernel SP
1302#ifdef __GP_SUPPORT__
1303	PTR_LA	gp, _C_LABEL(_gp)		# switch to kernel GP
1304#endif
1305	PTR_S	MIPS_CURLWP, MIPS_CURLWP_FRAME(k1)# save curlwp reg
1306	PTR_L	MIPS_CURLWP, CPUVAR(CURLWP)	# set curlwp reg
1307/*
1308 * Turn off fpu and enter kernel mode
1309 */
1310	.set	at
1311	and	t0, a0, ~(MIPS_SR_COP_1_BIT | MIPS_SR_EXL | MIPS_SR_INT_IE | MIPS_SR_KSU_MASK)
1312	.set	noat
1313#if defined(DDB) || defined(DEBUG) || defined(KGDB)
1314	move	ra, a2
1315	REG_S	ra, CALLFRAME_RA(sp)
1316#endif
1317/*
1318 * Call the interrupt handler.
1319 */
1320	mtc0	t0, MIPS_COP_0_STATUS
1321	COP0_SYNC
1322	jal	_C_LABEL(cpu_intr)
1323	and	a3, a0, a1			# 4th is STATUS & CAUSE
1324/*
1325 * Restore registers and return from the interrupt.
1326 */
1327	nop
1328	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
1329	COP0_SYNC
1330	nop					# 3 nop hazard
1331	nop
1332	nop
1333	SET_EXCEPTION_LEVEL(v0)			# set exception level bit.
1334	COP0_SYNC
1335	nop					# 3 nop hazard
1336	nop
1337	nop
1338	PTR_ADDU a1, sp, CALLFRAME_SIZ
1339 #	REG_L	a0, FRAME_SR(a1)
1340	INT_L	v0, L_MD_ASTPENDING(MIPS_CURLWP)# any pending ast?
1341 #	mtc0	a0, MIPS_COP_0_STATUS		# restore the SR, disable intrs
1342	nop
1343/*
1344 * Check pending asynchronous traps.
1345 */
1346	beq	v0, zero, 1f			# if no, skip ast processing
1347	PTR_L	MIPS_CURLWP, MIPS_CURLWP_FRAME(a1)# restore curlwp reg
1348/*
1349 * We have pending asynchronous traps; save remaining user state in PCB.
1350 */
1351	REG_S	s0, FRAME_S0(a1)
1352	REG_S	s1, FRAME_S1(a1)
1353	REG_S	s2, FRAME_S2(a1)
1354	REG_S	s3, FRAME_S3(a1)
1355	REG_S	s4, FRAME_S4(a1)
1356	REG_S	s5, FRAME_S5(a1)
1357	REG_S	s6, FRAME_S6(a1)
1358	REG_S	s7, FRAME_S7(a1)
1359	REG_S	s8, FRAME_S8(a1)
1360	REG_L	a0, FRAME_EPC(a1)		# argument is interrupted PC
1361	PTR_L	MIPS_CURLWP, CPUVAR(CURLWP)	# set curlwp reg
1362#ifdef IPL_ICU_MASK
1363	jal	_C_LABEL(spllowersofthigh);
1364	nop
1365#else
1366#ifdef _LP64
1367	li	t0, MIPS_HARD_INT_MASK | MIPS_SR_INT_IE | MIPS_SR_KX
1368#else
1369	li	t0, MIPS_HARD_INT_MASK | MIPS_SR_INT_IE
1370#endif
1371	DYNAMIC_STATUS_MASK(t0, t1)		# machine dependent masking
1372	mtc0	t0, MIPS_COP_0_STATUS		# enable interrupts (spl0)
1373	COP0_SYNC
1374#endif
1375	jal	_C_LABEL(ast)
1376	nop
1377/*
1378 * Restore user registers and return. NOTE: interrupts are enabled.
1379 */
1380	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
1381	COP0_SYNC
1382	nop					# 3 nop delay
1383	nop
1384	nop
1385	SET_EXCEPTION_LEVEL(v0)			# set exception level bit.
1386	COP0_SYNC
1387	nop					# 3 nop delay
1388	nop
1389	nop
1390
1391	PTR_ADDU a1, sp, CALLFRAME_SIZ
1392 #	REG_L	a0, FRAME_SR(a1)
1393	REG_L	s0, FRAME_S0(a1)
1394	REG_L	s1, FRAME_S1(a1)
1395	REG_L	s2, FRAME_S2(a1)
1396	REG_L	s3, FRAME_S3(a1)
1397	REG_L	s4, FRAME_S4(a1)
1398	REG_L	s5, FRAME_S5(a1)
1399	REG_L	s6, FRAME_S6(a1)
1400	REG_L	s7, FRAME_S7(a1)
1401	REG_L	s8, FRAME_S8(a1)
1402 #	mtc0	a0, MIPS_COP_0_STATUS		# this should disable interrupts
1403
14041:
1405	REG_L	t0, FRAME_MULLO(a1)
1406	REG_L	t1, FRAME_MULHI(a1)
1407	REG_L	v0, FRAME_EPC(a1)
1408	mtlo	t0
1409	mthi	t1
1410	_MTC0	v0, MIPS_COP_0_EXC_PC		# set return address
1411	COP0_SYNC
1412	nop					# ??? how much delay ???
1413	nop
1414
1415	move	k1, a1
1416#ifdef IPL_ICU_MASK
1417	.set at
1418	INT_L	t0, FRAME_PPL(k1)
1419	INT_S	t0, _C_LABEL(md_imask)
1420	jal	_C_LABEL(md_imask_update)
1421	nop
1422	.set noat
1423#endif
1424	REG_L	AT, FRAME_AST(k1)
1425	REG_L	v0, FRAME_V0(k1)
1426	REG_L	v1, FRAME_V1(k1)
1427	REG_L	a0, FRAME_A0(k1)
1428	REG_L	a1, FRAME_A1(k1)
1429	REG_L	a2, FRAME_A2(k1)
1430	REG_L	a3, FRAME_A3(k1)
1431	REG_L	t0, FRAME_T0(k1)
1432	REG_L	t1, FRAME_T1(k1)
1433	REG_L	t2, FRAME_T2(k1)
1434	REG_L	t3, FRAME_T3(k1)
1435	REG_L	ta0, FRAME_TA0(k1)
1436	REG_L	ta1, FRAME_TA1(k1)
1437	REG_L	ta2, FRAME_TA2(k1)
1438	REG_L	ta3, FRAME_TA3(k1)
1439	REG_L	t8, FRAME_T8(k1)
1440	REG_L	t9, FRAME_T9(k1)
1441	REG_L	k0, FRAME_SR(k1)
1442	DYNAMIC_STATUS_MASK_TOUSER(k0, ra)	# machine dependent masking
1443	REG_L	gp, FRAME_GP(k1)
1444	REG_L	sp, FRAME_SP(k1)
1445	REG_L	ra, FRAME_RA(k1)
1446	mtc0	k0, MIPS_COP_0_STATUS		# restore the SR
1447	COP0_SYNC
1448	nop					# required for QED 5230
1449	nop
1450	eret					# return to interrupted point
1451	.set	at
1452END(MIPSX(UserIntr))
1453
1454
1455/*----------------------------------------------------------------------------
1456 *
1457 *	R4000 TLB exception handlers
1458 *
1459 *----------------------------------------------------------------------------
1460 */
1461
1462
1463/*----------------------------------------------------------------------------
1464 *
1465 * mips3_TLBInvalidException --
1466 *
1467 *	Handle a TLB invalid exception from kernel mode in kernel space.
1468 *	The BaddVAddr, Context, and EntryHi registers contain the failed
1469 *	virtual address.
1470 *
1471 *	The case of wired TLB entries is special.  The wired TLB entries
1472 *	are used to keep the u area TLB's valid.  The PTE entries for these
1473 *	do not have MIPS3_PG_G set; the kernel instead relies
1474 *	on the switch_resume function to set these bits.
1475 *
1476 *	To preserve this situation, we set PG_G bits on the "other" TLB entries
1477 *	when they are wired.
1478 *
1479 * Results:
1480 *	None.
1481 *
1482 * Side effects:
1483 *	None.
1484 *
1485 *----------------------------------------------------------------------------
1486 */
1487LEAF_NOPROFILE(MIPSX(TLBInvalidException))
1488	.set	noat
1489	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# get the fault address
1490#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
1491	li	k1, VM_MIN_KERNEL_ADDRESS	# compute index
1492#else
1493	li	k1, VM_MIN_KERNEL_ADDRESS>>32	# compute index
1494	dsll32	k1, k1, 0
1495#endif
1496	bgez	k0, _C_LABEL(MIPSX(KernGenException))	# full trap processing
1497	PTR_SUBU k0, k0, k1
1498	INT_L	k1, _C_LABEL(Sysmapsize)	# index within range?
1499	PTR_SRL	k0, k0, PGSHIFT
1500	sltu	k1, k0, k1
1501	beq	k1, zero, MIPSX(outofworld)	# No. Failing beyond. . .
1502	nop					# - delay slot -
1503	PTR_L	k1, _C_LABEL(Sysmap)
1504
1505	PTR_SLL	k0, k0, 2			# compute offset from index
1506	PTR_ADDU k1, k1, k0
1507	tlbp					# Probe the invalid entry
1508	COP0_SYNC
1509	and	k0, k0, 4			# check even/odd page
1510	nop					# required for QED 5230
1511	bne	k0, zero, KernTLBIOdd
1512	nop
1513
1514	mfc0	k0, MIPS_COP_0_TLB_INDEX
1515	nop
1516	bltz	k0, MIPSX(outofworld)		# ASSERT(TLB entry exists)
1517	INT_L	k0, 0(k1)			# get PTE entry
1518
1519	_SLL	k0, k0, WIRED_SHIFT		# get rid of "wired" bit
1520	_SRL	k0, k0, WIRED_SHIFT
1521	_MTC0	k0, MIPS_COP_0_TLB_LO0		# load PTE entry
1522	COP0_SYNC
1523	and	k0, k0, MIPS3_PG_V		# check for valid entry
1524	nop					# required for QED5230
1525	beq	k0, zero, _C_LABEL(MIPSX(KernGenException))	# PTE invalid
1526	INT_L	k0, 4(k1)			# get odd PTE entry
1527	_SLL	k0, k0, WIRED_SHIFT
1528	mfc0	k1, MIPS_COP_0_TLB_INDEX
1529	_SRL	k0, k0, WIRED_SHIFT
1530	sltiu	k1, k1, MIPS3_TLB_WIRED_UPAGES	# Luckily this is MIPS3_PG_G
1531	or	k1, k1, k0
1532	_MTC0	k0, MIPS_COP_0_TLB_LO1		# load PTE entry
1533	COP0_SYNC
1534	nop
1535	nop					# required for QED5230
1536	tlbwi					# write TLB
1537	COP0_SYNC
1538	nop
1539	nop
1540	nop
1541	nop
1542	nop
1543	eret
1544
1545KernTLBIOdd:
1546	mfc0	k0, MIPS_COP_0_TLB_INDEX
1547	nop
1548	bltz	k0, MIPSX(outofworld)		# assert(TLB Entry exists)
1549	INT_L	k0, 0(k1)			# get PTE entry
1550
1551	_SLL	k0, k0, WIRED_SHIFT		# get rid of wired bit
1552	_SRL	k0, k0, WIRED_SHIFT
1553	_MTC0	k0, MIPS_COP_0_TLB_LO1		# save PTE entry
1554	COP0_SYNC
1555	and	k0, k0, MIPS3_PG_V		# check for valid entry
1556	nop					# required for QED5230
1557	beq	k0, zero, _C_LABEL(MIPSX(KernGenException))	# PTE invalid
1558	INT_L	k0, -4(k1)			# get even PTE entry
1559	_SLL	k0, k0, WIRED_SHIFT
1560	mfc0	k1, MIPS_COP_0_TLB_INDEX
1561	_SRL	k0, k0, WIRED_SHIFT
1562	sltiu	k1, k1, MIPS3_TLB_WIRED_UPAGES	# Luckily this is MIPS3_PG_G
1563	or	k1, k1, k0
1564	_MTC0	k0, MIPS_COP_0_TLB_LO0		# save PTE entry
1565	COP0_SYNC
1566	nop
1567	nop					# required for QED5230
1568	tlbwi					# update TLB
1569	COP0_SYNC
1570	nop
1571	nop
1572	nop
1573	nop
1574	nop
1575	eret
1576END(MIPSX(TLBInvalidException))
1577
1578/*----------------------------------------------------------------------------
1579 *
1580 * mipsN_TLBMissException --
1581 *
1582 *	Handle a TLB miss exception from kernel mode in kernel space.
1583 *	The BaddVAddr, Context, and EntryHi registers contain the failed
1584 *	virtual address.
1585 *
1586 * Results:
1587 *	None.
1588 *
1589 * Side effects:
1590 *	None.
1591 *
1592 *----------------------------------------------------------------------------
1593 */
1594LEAF_NOPROFILE(MIPSX(TLBMissException))
1595	.set	noat
1596	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# get the fault address
1597#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
1598	li	k1, VM_MIN_KERNEL_ADDRESS	# compute index
1599#else
1600	li	k1, VM_MIN_KERNEL_ADDRESS>>32	# compute index
1601	dsll32	k1, k1, 0
1602#endif
1603	PTR_SUBU k0, k0, k1
1604	INT_L	k1, _C_LABEL(Sysmapsize)	# index within range?
1605	PTR_SRL	k0, k0, PGSHIFT
1606	sltu	k1, k0, k1
1607#ifdef newsmips
1608	/* news5000 has ROM work area at 0xfff00000. */
1609	bne	k1, zero, 1f
1610	nop
1611	j	checkromwork
1612	nop					# - delay slot -
16131:
1614#else
1615	beq	k1, zero, MIPSX(outofworld)		# No. Failing beyond. . .
1616	nop					# - delay slot -
1617#endif
1618	PTR_L	k1, _C_LABEL(Sysmap)
1619	PTR_SRL	k0, k0, 1
1620	PTR_SLL	k0, k0, 3			# compute offset from index
1621	PTR_ADDU k1, k1, k0
1622	INT_L	k0, 0(k1)			# get PTE entry
1623	INT_L	k1, 4(k1)			# get odd PTE entry
1624	_SLL	k0, k0, WIRED_SHIFT		# get rid of "wired" bit
1625	_SRL	k0, k0, WIRED_SHIFT
1626	_MTC0	k0, MIPS_COP_0_TLB_LO0		# load PTE entry
1627	COP0_SYNC
1628	_SLL	k1, k1, WIRED_SHIFT
1629	_SRL	k1, k1, WIRED_SHIFT
1630	_MTC0	k1, MIPS_COP_0_TLB_LO1		# load PTE entry
1631	COP0_SYNC
1632	nop
1633	nop					# required for QED5230
1634	tlbwr					# write TLB
1635	COP0_SYNC
1636	nop
1637	nop
1638	nop
1639	nop
1640	nop
1641	eret
1642
1643MIPSX(outofworld):
1644	/* eret to panic so shutdown can use K2.  Try to ensure valid $sp. */
1645	PTR_LA	a0, _C_LABEL(panic)
1646	_MFC0	a2, MIPS_COP_0_EXC_PC
1647	move	a1, sp
1648	PTR_SLL	k0, k0, PGSHIFT
1649	_MTC0	a0, MIPS_COP_0_EXC_PC		# return to panic
1650	COP0_SYNC
1651#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
1652	li	k1, VM_MIN_KERNEL_ADDRESS	# compute index
1653#else
1654	li	k1, VM_MIN_KERNEL_ADDRESS>>32	# compute index
1655	dsll32	k1, k1, 0
1656#endif
1657	PTR_ADDU a3, k0, k1
1658#if defined(DDB)
1659	bltz	sp, 1f				# for ddb try to keep frame
1660	nop
1661#endif
1662	PTR_LA	sp, start			# set sp to a valid place
16631:	PTR_LA	a0, 9f				# string
1664	eret
1665
1666	.set	at
1667END(MIPSX(TLBMissException))
1668
1669	MSG("TLB out of universe: ksp %p epc %p vaddr %p")
1670
1671/*
1672 * Mark where code entered from exception hander jumptable
1673 * ends, for stack traceback code.
1674 */
1675
1676	.globl	_C_LABEL(MIPSX(exceptionentry_end))
1677_C_LABEL(MIPSX(exceptionentry_end)):
1678
1679/*--------------------------------------------------------------------------
1680 *
1681 * mipsN_SetPID --
1682 *
1683 *	Write the given pid into the TLB pid reg.
1684 *
1685 *	mips3_SetPID(pid)
1686 *		int pid;
1687 *
1688 * Results:
1689 *	None.
1690 *
1691 * Side effects:
1692 *	PID set in the entry hi register.
1693 *
1694 *--------------------------------------------------------------------------
1695 */
1696LEAF(MIPSX(SetPID))
1697	_MTC0	a0, MIPS_COP_0_TLB_HI		# Write the hi reg value
1698	COP0_SYNC
1699	/* XXX simonb: lose these nops for mips32/64? */
1700	nop					# required for QED5230
1701	nop					# required for QED5230
1702	j	ra
1703	nop
1704END(MIPSX(SetPID))
1705
1706#if defined(ENABLE_MIPS3_WIRED_MAP)
1707/*--------------------------------------------------------------------------
1708 *
1709 * mipsN_TLBWriteIndexedVPS --
1710 *
1711 *      Write the given entry into the TLB at the given index.
1712 *      Pass full R4000 style TLB info including variable page size mask.
1713 *
1714 *      mipsN_TLBWriteIndexed(unsigned int index, struct tlb *tlb)
1715 *
1716 * Results:
1717 *      None.
1718 *
1719 * Side effects:
1720 *      TLB entry set.
1721 *
1722 *--------------------------------------------------------------------------
1723 */
1724LEAF(MIPSX(TLBWriteIndexedVPS))
1725	mfc0	v1, MIPS_COP_0_STATUS		# Save the status register.
1726	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
1727	COP0_SYNC
1728	nop
1729	lw	a2, 8(a1)			# fetch tlb->tlb_lo0
1730	lw	a3, 12(a1)			# fetch tlb->tlb_lo1
1731	mfc0	v0, MIPS_COP_0_TLB_PG_MASK	# Save current page mask.
1732	_MFC0	t0, MIPS_COP_0_TLB_HI		# Save the current PID.
1733
1734	_MTC0	a2, MIPS_COP_0_TLB_LO0		# Set up entry low0.
1735	COP0_SYNC
1736	_MTC0	a3, MIPS_COP_0_TLB_LO1		# Set up entry low1.
1737	COP0_SYNC
1738	nop
1739	lw	a2, 0(a1)			# fetch tlb->tlb_mask
1740	lw	a3, 4(a1)			# fetch tlb->tlb_hi
1741	nop
1742	mtc0	a0, MIPS_COP_0_TLB_INDEX	# Set the index.
1743	COP0_SYNC
1744	mtc0	a2, MIPS_COP_0_TLB_PG_MASK	# Set up entry pagemask.
1745	COP0_SYNC
1746	_MTC0	a3, MIPS_COP_0_TLB_HI		# Set up entry high.
1747	COP0_SYNC
1748	nop
1749	nop
1750	tlbwi					# Write the TLB
1751	COP0_SYNC
1752	nop
1753	nop
1754	nop					# Delay for effect
1755	nop
1756
1757	_MTC0	t0, MIPS_COP_0_TLB_HI		# Restore the PID.
1758	COP0_SYNC
1759	mtc0	v0, MIPS_COP_0_TLB_PG_MASK	# Restore page mask.
1760	COP0_SYNC
1761	nop
1762	nop
1763	j       ra
1764	mtc0	v1, MIPS_COP_0_STATUS		# Restore the status register
1765END(MIPSX(TLBWriteIndexedVPS))
1766#endif /* ENABLE_MIPS3_WIRED_MAP */
1767
1768/*--------------------------------------------------------------------------
1769 *
1770 * mipsN_TLBUpdate --
1771 *
1772 *	Update the TLB if highreg is found; otherwise do nothing.
1773 *
1774 *	mips3_TLBUpdate(virpageadr, lowregx)
1775 *		unsigned virpageadr, lowregx;
1776 *
1777 * Results:
1778 *	< 0 if skipped, >= 0 if updated.
1779 *
1780 * Side effects:
1781 *	None.
1782 *
1783 *--------------------------------------------------------------------------
1784 */
1785LEAF(MIPSX(TLBUpdate))
1786	mfc0	v1, MIPS_COP_0_STATUS	# Save the status register.
1787	mtc0	zero, MIPS_COP_0_STATUS	# Disable interrupts
1788	COP0_SYNC
1789	and	t1, a0, MIPS3_PG_ODDPG	# t1 = Even/Odd flag
1790	li	v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID)
1791	and	a0, a0, v0
1792	_MFC0	t0, MIPS_COP_0_TLB_HI		# Save current PID
1793	_MTC0	a0, MIPS_COP_0_TLB_HI		# Init high reg
1794	COP0_SYNC
1795	and	a2, a1, MIPS3_PG_G		# Copy global bit
1796	nop
1797	nop
1798	tlbp					# Probe for the entry.
1799	COP0_SYNC
1800	_SLL	a1, a1, WIRED_SHIFT		# Clear top 34 bits of EntryLo
1801	_SRL	a1, a1, WIRED_SHIFT
1802	bne	t1, zero, 1f			# Decide even odd
1803	mfc0	v0, MIPS_COP_0_TLB_INDEX	# See what we got
1804# EVEN
1805	nop
1806	bltz	v0, 1f				# index < 0 => !found
1807	nop
1808	nop					# required for QED5230
1809
1810	tlbr					# update, read entry first
1811	COP0_SYNC
1812	nop
1813	nop
1814	nop
1815	_MTC0	a1, MIPS_COP_0_TLB_LO0		# init low reg0.
1816	COP0_SYNC
1817	nop
1818	nop					# required for QED5230
1819	tlbwi					# update slot found
1820	COP0_SYNC
1821	nop					# required for QED5230
1822	nop					# required for QED5230
1823	b	4f
1824	nop
18251:
1826# ODD
1827	nop
1828	bltz	v0, 4f				# index < 0 => !found
1829	nop
1830	nop					# required for QED5230
1831
1832	tlbr					# read the entry first
1833	COP0_SYNC
1834	nop
1835	nop
1836	nop
1837	_MTC0	a1, MIPS_COP_0_TLB_LO1		# init low reg1.
1838	COP0_SYNC
1839	nop
1840	nop					# required for QED5230
1841	tlbwi					# update slot found
1842	COP0_SYNC
1843	nop					# required for QED5230
1844	nop					# required for QED5230
1845	nop
18464:
1847	nop					# Make sure pipeline
1848	nop					# advances before we
1849	nop					# use the TLB.
1850	nop
1851	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore PID
1852	COP0_SYNC
1853	nop					# required for QED5230
1854	nop					# required for QED5230
1855	j	ra
1856	mtc0	v1, MIPS_COP_0_STATUS		# Restore the status register
1857	COP0_SYNC				# XXXX - not executed!!
1858END(MIPSX(TLBUpdate))
1859
1860/*--------------------------------------------------------------------------
1861 *
1862 * mipsN_TLBRead --
1863 *
1864 *	Read the TLB entry.
1865 *
1866 *	mips3_TLBRead(entry, tlb)
1867 *		unsigned entry;
1868 *		struct tlb *tlb;
1869 *
1870 * Results:
1871 *	None.
1872 *
1873 * Side effects:
1874 *	tlb will contain the TLB entry found.
1875 *
1876 *--------------------------------------------------------------------------
1877 */
1878LEAF(MIPSX(TLBRead))
1879	mfc0	v1, MIPS_COP_0_STATUS		# Save the status register.
1880	mtc0	zero, MIPS_COP_0_STATUS		# Disable interrupts
1881	COP0_SYNC
1882	nop
1883	mfc0	ta2, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
1884	nop
1885	_MFC0	t0, MIPS_COP_0_TLB_HI		# Get current PID
1886
1887	mtc0	a0, MIPS_COP_0_TLB_INDEX	# Set the index register
1888	COP0_SYNC
1889	nop
1890	nop					# required for QED5230
1891	tlbr					# Read from the TLB
1892	COP0_SYNC
1893	nop
1894	nop
1895	nop
1896	mfc0	t2, MIPS_COP_0_TLB_PG_MASK	# fetch the pgMask
1897	_MFC0	t3, MIPS_COP_0_TLB_HI		# fetch the hi entry
1898	_MFC0	ta0, MIPS_COP_0_TLB_LO0		# See what we got
1899	_MFC0	ta1, MIPS_COP_0_TLB_LO1		# See what we got
1900	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore PID
1901	COP0_SYNC
1902	mtc0	ta2, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
1903	COP0_SYNC
1904	nop
1905	nop
1906	nop					# wait for PID active
1907	mtc0	v1, MIPS_COP_0_STATUS		# Restore the status register
1908	COP0_SYNC
1909	nop
1910	sw	t2, 0(a1)
1911	sw	t3, 4(a1)
1912	sw	ta0, 8(a1)
1913	j	ra
1914	sw	ta1, 12(a1)
1915END(MIPSX(TLBRead))
1916
1917#if defined(MIPS3) && !defined(MIPS3_5900)
1918/*----------------------------------------------------------------------------
1919 *
1920 * mips3_VCED --
1921 *
1922 *	Handle virtual coherency exceptions.
1923 *	Called directly from the mips3 execption-table code.
1924 *	only k0, k1 are available on entry
1925 *
1926 * Results:
1927 *	None.
1928 *
1929 * Side effects:
1930 *	Remaps the conflicting address as uncached and returns
1931 *	from the execption.
1932 *
1933 *	NB: cannot be profiled, all registers are user registers on entry.
1934 *
1935 *----------------------------------------------------------------------------
1936 */
1937LEAF_NOPROFILE(MIPSX(VCED))
1938	.set	noat
1939	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# fault addr.
1940	li	k1, -16
1941	and	k0, k1
1942	cache	(CACHE_R4K_SD | CACHEOP_R4K_HIT_WB_INV), 0(k0)
1943	cache	(CACHE_R4K_D | CACHEOP_R4K_HIT_INV), 0(k0)
1944#ifdef DEBUG
1945	_MFC0	k0, MIPS_COP_0_BAD_VADDR
1946	PTR_LA	k1, VCED_vaddr
1947	PTR_S	k0, 0(k1)
1948	_MFC0	k0, MIPS_COP_0_EXC_PC
1949	PTR_LA	k1, VCED_epc
1950	PTR_S	k0, 0(k1)
1951	PTR_LA	k1, VCED_count		# count number of exceptions
1952	PTR_SRL	k0, k0, 26		# position upper 4 bits of VA
1953	and	k0, k0, 0x3c		# mask it off
1954	PTR_ADDU k1, k0			# get address of count table
1955	LONG_L	k0, 0(k1)
1956	LONG_ADDU k0, 1
1957	LONG_S	k0, 0(k1)
1958#endif
1959	eret
1960	.set	at
1961
1962#ifdef DEBUG
1963	.data
1964	.globl	_C_LABEL(VCED_count)
1965_C_LABEL(VCED_count):
1966	LONG_WORD	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1967	.globl	_C_LABEL(VCED_epc)
1968_C_LABEL(VCED_epc):
1969	PTR_WORD	0
1970	.globl	_C_LABEL(VCED_vaddr)
1971_C_LABEL(VCED_vaddr):
1972	PTR_WORD	0
1973	.text
1974#endif
1975END(MIPSX(VCED))
1976
1977LEAF_NOPROFILE(MIPSX(VCEI))
1978	.set	noat
1979	_MFC0	k0, MIPS_COP_0_BAD_VADDR	# fault addr.
1980	cache	(CACHE_R4K_SD | CACHEOP_R4K_HIT_WB_INV), 0(k0)
1981	cache	(CACHE_R4K_I | CACHEOP_R4K_HIT_INV), 0(k0)
1982#ifdef DEBUG
1983	_MFC0	k0, MIPS_COP_0_BAD_VADDR
1984	PTR_LA	k1, VCEI_vaddr
1985	PTR_S	k0, 0(k1)
1986	PTR_LA	k1, VCEI_count		# count number of exceptions
1987	PTR_SRL	k0, k0, 26		# position upper 4 bits of VA
1988	and	k0, k0, 0x3c		# mask it off
1989	PTR_ADDU k1, k0			# get address of count table
1990	LONG_L	k0, 0(k1)
1991	PTR_ADDU k0, 1
1992	LONG_S	k0, 0(k1)
1993#endif
1994	eret
1995	.set	at
1996
1997#ifdef DEBUG
1998	.data
1999	.globl	_C_LABEL(VCEI_count)
2000_C_LABEL(VCEI_count):
2001	LONG_WORD	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2002	.globl	_C_LABEL(VCEI_vaddr)
2003_C_LABEL(VCEI_vaddr):
2004	PTR_WORD	0
2005	.text
2006#endif
2007END(MIPSX(VCEI))
2008#endif /* MIPS3 && !MIPS3_5900 */
2009
2010/*
2011 * mipsN_lwp_trampoline()
2012 *
2013 * Arrange for a function to be invoked neatly, after a cpu_switch().
2014 * Call the service function with one argument, specified by the s0
2015 * and s1 respectively.  There is no need register save operation.
2016 */
2017LEAF(MIPSX(lwp_trampoline))
2018	PTR_ADDU sp, sp, -CALLFRAME_SIZ
2019
2020	# Call lwp_startup(), with args from cpu_switchto()/cpu_setfunc()
2021	move	a0, v0
2022	jal	_C_LABEL(lwp_startup)
2023	move	a1, MIPS_CURLWP
2024
2025	# Call the routine specified by cpu_setfunc()
2026	jal	ra, s0
2027	move	a0, s1
2028
2029	#
2030	# Return to user (won't happen if a kernel thread)
2031	#
2032	# Make sure to disable interrupts here, as otherwise
2033	# we can take an interrupt *after* EXL is set, and
2034	# end up returning to a bogus PC since the PC is not
2035	# saved if EXL=1.
2036	#
2037	.set	noat
20381:
2039	RESET_EXCEPTION_LEVEL_DISABLE_INTERRUPTS(v0)
2040	COP0_SYNC
2041	nop					# 3 op delay
2042	nop
2043	nop
2044	SET_EXCEPTION_LEVEL(v0)			# set exception level
2045	COP0_SYNC
2046	nop
2047	nop
2048	PTR_ADDU a1, sp, CALLFRAME_SIZ
2049 #	REG_L	a0, FRAME_SR(a1)
2050	REG_L	t0, FRAME_MULLO(a1)
2051	REG_L	t1, FRAME_MULHI(a1)
2052	REG_L	v0, FRAME_EPC(a1)
2053	mtlo	t0
2054	mthi	t1
2055	_MTC0	v0, MIPS_COP_0_EXC_PC
2056	COP0_SYNC
2057	nop
2058	move	k1, a1
2059#ifdef IPL_ICU_MASK
2060	.set at
2061	INT_L	t0, FRAME_PPL(k1)
2062	INT_S	t0, _C_LABEL(md_imask)
2063	jal	_C_LABEL(md_imask_update)
2064	nop
2065	.set noat
2066#endif
2067	REG_L	AT, FRAME_AST(k1)
2068	REG_L	v0, FRAME_V0(k1)
2069	REG_L	v1, FRAME_V1(k1)
2070	REG_L	a0, FRAME_A0(k1)
2071	REG_L	a1, FRAME_A1(k1)
2072	REG_L	a2, FRAME_A2(k1)
2073	REG_L	a3, FRAME_A3(k1)
2074	REG_L	t0, FRAME_T0(k1)
2075	REG_L	t1, FRAME_T1(k1)
2076	REG_L	t2, FRAME_T2(k1)
2077	REG_L	t3, FRAME_T3(k1)
2078	REG_L	ta0, FRAME_TA0(k1)
2079	REG_L	ta1, FRAME_TA1(k1)
2080	REG_L	ta2, FRAME_TA2(k1)
2081	REG_L	ta3, FRAME_TA3(k1)
2082	REG_L	s0, FRAME_S0(k1)
2083	REG_L	s1, FRAME_S1(k1)
2084	REG_L	s2, FRAME_S2(k1)
2085	REG_L	s3, FRAME_S3(k1)
2086	REG_L	s4, FRAME_S4(k1)
2087	REG_L	s5, FRAME_S5(k1)
2088	REG_L	s6, FRAME_S6(k1)
2089	REG_L	s7, FRAME_S7(k1)
2090	REG_L	t8, FRAME_T8(k1)
2091	REG_L	t9, FRAME_T9(k1)
2092	REG_L	k0, FRAME_SR(k1)
2093	DYNAMIC_STATUS_MASK(k0, sp)		# machine dependent masking
2094	REG_L	gp, FRAME_GP(k1)
2095	REG_L	s8, FRAME_S8(k1)
2096	REG_L	ra, FRAME_RA(k1)
2097	REG_L	sp, FRAME_SP(k1)
2098	mtc0	k0, MIPS_COP_0_STATUS
2099	COP0_SYNC
2100	nop
2101	nop
2102	eret
2103	.set	at
2104END(MIPSX(lwp_trampoline))
2105
2106/*
2107 * Like lwp_trampoline, but do not call lwp_startup
2108 */
2109LEAF(MIPSX(setfunc_trampoline))
2110	PTR_ADDU sp, sp, -CALLFRAME_SIZ
2111
2112	# Call the routine specified by cpu_setfunc()
2113	jal	ra, s0
2114	move	a0, s1
2115
2116	j	1b
2117	nop
2118
2119END(MIPSX(setfunc_trampoline))
2120
2121
2122/*
2123 * void mipsN_cpu_switch_resume(struct lwp *newlwp)
2124 *
2125 * Wiredown the USPACE of newproc in TLB entry#0.  Check whether target
2126 * USPACE is already in another place of TLB before that, and make
2127 * sure TBIS(it) in the case.
2128 */
2129LEAF_NOPROFILE(MIPSX(cpu_switch_resume))
2130#if !defined(ENABLE_MIPS_16KB_PAGE)
2131	INT_L	a1, L_MD_UPTE_0(a0)		# a1 = upte[0]
2132	INT_L	a2, L_MD_UPTE_1(a0)		# a2 = upte[1]
2133	PTR_L	v0, L_PCB(a0)			# va = l->l_addr
2134#if VM_MIN_KERNEL_ADDRESS == MIPS_KSEG2_START
2135	li	t0, VM_MIN_KERNEL_ADDRESS	# compute index
2136	blt	v0, t0, resume
2137	 nop
2138#else
2139	li	t0, MIPS_KSEG0_START		# above XKSEG?
2140	blt	t0, v0, resume
2141	 nop
2142	li	t0, VM_MIN_KERNEL_ADDRESS>>32	# below XKSEG?
2143	dsll32	t0, t0, 0
2144	blt	v0, t0, resume
2145	 nop
2146#endif
2147
2148	and	t0, v0, MIPS3_PG_ODDPG
2149	beq	t0, zero, entry0
2150	 nop
2151
2152	PANIC("USPACE sat on odd page boundary")
2153
2154entry0:
2155	_MTC0	v0, MIPS_COP_0_TLB_HI		# VPN = va
2156	COP0_SYNC
2157	nop
2158	nop
2159	tlbp					# probe VPN
2160	COP0_SYNC
2161	nop
2162	nop
2163	mfc0	t0, MIPS_COP_0_TLB_INDEX
2164	nop
2165	bltz	t0, entry0set
2166	sll	t0, t0, PGSHIFT + 1		# PAGE_SHIFT + 1
2167	PTR_LA	t0, MIPS_KSEG0_START(t0)
2168	_MTC0	t0, MIPS_COP_0_TLB_HI
2169	COP0_SYNC
2170	_MTC0	zero, MIPS_COP_0_TLB_LO0
2171	COP0_SYNC
2172	_MTC0	zero, MIPS_COP_0_TLB_LO1
2173	COP0_SYNC
2174	nop
2175	nop
2176	tlbwi
2177	COP0_SYNC
2178	nop
2179	nop
2180	_MTC0	v0, MIPS_COP_0_TLB_HI		# set VPN again
2181	COP0_SYNC
2182entry0set:
2183	mtc0	zero, MIPS_COP_0_TLB_INDEX	# TLB entry #0
2184	COP0_SYNC
2185	or	a1, MIPS3_PG_G
2186	_MTC0	a1, MIPS_COP_0_TLB_LO0		# upte[0] | PG_G
2187	COP0_SYNC
2188	or	a2, MIPS3_PG_G
2189	_MTC0	a2, MIPS_COP_0_TLB_LO1		# upte[1] | PG_G
2190	COP0_SYNC
2191	nop
2192	nop
2193	tlbwi					# set TLB entry #0
2194	COP0_SYNC
2195	nop
2196	nop
2197
2198resume:
2199#endif /* !ENABLE_MIPS_16KB_PAGE */
2200	j	ra
2201	nop
2202END(MIPSX(cpu_switch_resume))
2203
2204/*
2205 * void mipsN_TBIS(vaddr_t va)
2206 *
2207 * Invalidate a TLB entry which has the given vaddr and ASID if found.
2208 */
2209LEAF_NOPROFILE(MIPSX(TBIS))
2210	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2211	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2212	COP0_SYNC
2213
2214	li	v0, (MIPS3_PG_HVPN | MIPS3_PG_ASID)
2215	_MFC0	t0, MIPS_COP_0_TLB_HI		# save current ASID
2216	mfc0	t3, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2217	and	a0, a0, v0			# make sure valid entryHi
2218	_MTC0	a0, MIPS_COP_0_TLB_HI		# look for the vaddr & ASID
2219	COP0_SYNC
2220	nop
2221	nop
2222	tlbp					# probe the entry in question
2223	COP0_SYNC
2224	nop
2225	nop
2226	mfc0	v0, MIPS_COP_0_TLB_INDEX	# see what we got
2227	#nop					# -slip-
2228	#nop					# -slip-
2229	bltz	v0, 1f				# index < 0 then skip
2230	li	t1, MIPS_KSEG0_START		# invalid address
2231	PTR_SLL	v0, v0, PGSHIFT + 1		# PAGE_SHIFT + 1
2232	PTR_ADDU t1, t1, v0
2233	_MTC0	t1, MIPS_COP_0_TLB_HI		# make entryHi invalid
2234	COP0_SYNC
2235	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2236	COP0_SYNC
2237	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2238	COP0_SYNC
2239#if 1
2240	nop
2241#else
2242	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out pageMask
2243	COP0_SYNC
2244#endif
2245	nop
2246	nop
2247
2248	tlbwi
2249	COP0_SYNC
2250	nop
2251	nop
22521:
2253	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore current ASID
2254	COP0_SYNC
2255	mtc0	t3, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2256	COP0_SYNC
2257	nop
2258	nop
2259	j	ra
2260	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2261	COP0_SYNC				# XXXX - not executed!!
2262END(MIPSX(TBIS))
2263
2264/*
2265 * void mips3_TBIAP(int sizeofTLB)
2266 *
2267 * Invalidate TLB entries belong to per process user spaces while
2268 * leaving entries for kernel space marked global intact.
2269 */
2270LEAF_NOPROFILE(MIPSX(TBIAP))
2271	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2272	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2273	COP0_SYNC
2274
2275	move	t2, a0
2276	mfc0	t1, MIPS_COP_0_TLB_WIRED
2277	li	v0, MIPS_KSEG0_START		# invalid address
2278	mfc0	t3, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2279
2280	# do {} while (t1 < t2)
22811:
2282	mtc0	t1, MIPS_COP_0_TLB_INDEX	# set index
2283	COP0_SYNC
2284	sll	ta0, t1, PGSHIFT + 1		# PAGE_SHIFT + 1
2285	nop
2286	/* XXX simonb: lose this nop for mips32/64? */
2287	nop
2288	tlbr					# obtain an entry
2289	COP0_SYNC
2290	/* XXX simonb: lose these nops for mips32/64? */
2291	nop
2292	nop
2293	nop
2294	_MFC0	a0, MIPS_COP_0_TLB_LO1
2295	and	a0, a0, MIPS3_PG_G		# check to see it has G bit
2296	bnez	a0, 2f
2297	PTR_ADDU ta0, ta0, v0
2298
2299	_MTC0	ta0, MIPS_COP_0_TLB_HI		# make entryHi invalid
2300	COP0_SYNC
2301	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2302	COP0_SYNC
2303	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2304	COP0_SYNC
2305	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out mask entry
2306	COP0_SYNC
2307	/* XXX simonb: lose these nops for mips32/64? */
2308	nop
2309	nop
2310	tlbwi					# invalidate the TLB entry
2311	COP0_SYNC
23122:
2313	addu	t1, t1, 1
2314	bne	t1, t2, 1b
2315	nop
2316
2317	mtc0	t3, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2318	COP0_SYNC
2319	/* XXX simonb: lose these nops for mips32/64? */
2320	nop
2321	nop
2322	j	ra				# new ASID will be set soon
2323	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2324	COP0_SYNC				# XXXX - not executed!!
2325END(MIPSX(TBIAP))
2326
2327/*
2328 * void mipsN_TBIA(int sizeofTLB)
2329 *
2330 * Invalidate all of non-wired TLB entries.
2331 */
2332LEAF_NOPROFILE(MIPSX(TBIA))
2333	mfc0	v1, MIPS_COP_0_STATUS		# save status register
2334	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts
2335	COP0_SYNC
2336
2337	li	v0, MIPS_KSEG0_START		# invalid address
2338	_MFC0	t0, MIPS_COP_0_TLB_HI		# save current ASID
2339	mfc0	t1, MIPS_COP_0_TLB_WIRED
2340	mfc0	t2, MIPS_COP_0_TLB_PG_MASK	# save current pgMask
2341
2342	_MTC0	zero, MIPS_COP_0_TLB_LO0	# zero out entryLo0
2343	COP0_SYNC
2344	_MTC0	zero, MIPS_COP_0_TLB_LO1	# zero out entryLo1
2345	COP0_SYNC
2346	mtc0	zero, MIPS_COP_0_TLB_PG_MASK	# zero out pageMask
2347	COP0_SYNC
2348
2349	# do {} while (t1 < a0)
23501:
2351	mtc0	t1, MIPS_COP_0_TLB_INDEX	# set TLBindex
2352	COP0_SYNC
2353	sll	ta0, t1, PGSHIFT + 1		# PAGE_SHIFT + 1
2354	add	ta0, v0, ta0
2355	_MTC0	ta0, MIPS_COP_0_TLB_HI		# make entryHi invalid
2356	COP0_SYNC
2357	nop
2358	nop
2359	tlbwi					# clear the entry
2360	COP0_SYNC
2361	addu	t1, t1, 1			# increment index
2362	bne	t1, a0, 1b
2363	nop
2364
2365	_MTC0	t0, MIPS_COP_0_TLB_HI		# restore ASID
2366	COP0_SYNC
2367	mtc0	t2, MIPS_COP_0_TLB_PG_MASK	# restore pgMask
2368	COP0_SYNC
2369	nop
2370	nop
2371	j	ra
2372	mtc0	v1, MIPS_COP_0_STATUS		# restore status register
2373	COP0_SYNC				# XXXX - not executed!!
2374END(MIPSX(TBIA))
2375
2376#ifdef USE_64BIT_INSTRUCTIONS
2377LEAF(MIPSX(pagezero))
2378	li	a1, PAGE_SIZE >> 6
2379
23801:	sd	zero, 0(a0)			# try to miss cache first
2381	sd	zero, 32(a0)
2382	subu	a1, 1
2383	sd	zero, 16(a0)
2384	sd	zero, 48(a0)
2385	sd	zero, 8(a0)			# fill in cache lines
2386	sd	zero, 40(a0)
2387	sd	zero, 24(a0)
2388	sd	zero, 56(a0)
2389	bgtz	a1, 1b
2390	addu	a0, 64
2391
2392	j	ra
2393	nop
2394END(MIPSX(pagezero))
2395#endif /* USE_64BIT_INSTRUCTIONS */
2396
2397	.rdata
2398
2399	.globl _C_LABEL(MIPSX(locoresw))
2400_C_LABEL(MIPSX(locoresw)):
2401	PTR_WORD _C_LABEL(MIPSX(cpu_switch_resume))
2402	PTR_WORD _C_LABEL(MIPSX(lwp_trampoline))
2403	PTR_WORD _C_LABEL(nullop)
2404	PTR_WORD _C_LABEL(MIPSX(setfunc_trampoline))
2405
2406MIPSX(excpt_sw):
2407	####
2408	#### The kernel exception handlers.
2409	####
2410	PTR_WORD _C_LABEL(MIPSX(KernIntr))		#  0 external interrupt
2411	PTR_WORD _C_LABEL(MIPSX(KernGenException))	#  1 TLB modification
2412	PTR_WORD _C_LABEL(MIPSX(TLBInvalidException))# 2 TLB miss (LW/I-fetch)
2413	PTR_WORD _C_LABEL(MIPSX(TLBInvalidException))# 3 TLB miss (SW)
2414	PTR_WORD _C_LABEL(MIPSX(KernGenException))	#  4 address error (LW/I-fetch)
2415	PTR_WORD _C_LABEL(MIPSX(KernGenException))	#  5 address error (SW)
2416	PTR_WORD _C_LABEL(MIPSX(KernGenException))	#  6 bus error (I-fetch)
2417	PTR_WORD _C_LABEL(MIPSX(KernGenException))	#  7 bus error (load or store)
2418	PTR_WORD _C_LABEL(MIPSX(KernGenException))	#  8 system call
2419	PTR_WORD _C_LABEL(MIPSX(KernGenException))	#  9 breakpoint
2420	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 10 reserved instruction
2421	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 11 coprocessor unusable
2422	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 12 arithmetic overflow
2423	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 13 r4k trap exception
2424#if defined(MIPS3) && !defined(MIPS3_5900)
2425	PTR_WORD _C_LABEL(mips3_VCEI)			# 14 r4k virt coherence
2426#else
2427	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 14 reserved
2428#endif
2429	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 15 r4k FP exception
2430	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 16 reserved
2431	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 17 reserved
2432	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 18 reserved
2433	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 19 reserved
2434	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 20 reserved
2435	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 21 reserved
2436	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 22 reserved
2437	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 23 watch exception
2438	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 24 reserved
2439	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 25 reserved
2440	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 26 reserved
2441	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 27 reserved
2442	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 28 reserved
2443	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 29 reserved
2444	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 30 reserved
2445#if defined(MIPS3) && !defined(MIPS3_5900)
2446	PTR_WORD _C_LABEL(mips3_VCED)		# 31 v. coherence exception data
2447#else
2448	PTR_WORD _C_LABEL(MIPSX(KernGenException))	# 31 reserved
2449#endif
2450	#####
2451	##### The user exception handlers.
2452	#####
2453	PTR_WORD _C_LABEL(MIPSX(UserIntr))		#  0
2454	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  1
2455	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  2
2456	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  3
2457	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  4
2458	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  5
2459	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  6
2460	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  7
2461	PTR_WORD _C_LABEL(MIPSX(SystemCall))		#  8
2462	PTR_WORD _C_LABEL(MIPSX(UserGenException))	#  9
2463	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 10
2464	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 11
2465	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 12
2466	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 13
2467#if defined(MIPS3) && !defined(MIPS3_5900)
2468	PTR_WORD _C_LABEL(mips3_VCEI)			# 14
2469#else
2470	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 14
2471#endif
2472	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 15
2473	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 16
2474	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 17
2475	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 18
2476	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 19
2477	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 20
2478	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 21
2479	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 22
2480	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 23
2481	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 24
2482	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 25
2483	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 26
2484	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 27
2485	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 28
2486	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 29
2487	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 30
2488#if defined(MIPS3) && !defined(MIPS3_5900)
2489	PTR_WORD _C_LABEL(mips3_VCED)			# 31 v. coherence exception data
2490#else
2491	PTR_WORD _C_LABEL(MIPSX(UserGenException))	# 31
2492#endif
2493