xref: /netbsd/sys/arch/mips/mips/locore.S (revision c4a72b64)
1/*	$NetBSD: locore.S,v 1.148 2002/11/09 02:02:31 nisimura Exp $	*/
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Digital Equipment Corporation and Ralph Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Copyright (C) 1989 Digital Equipment Corporation.
39 * Permission to use, copy, modify, and distribute this software and
40 * its documentation for any purpose and without fee is hereby granted,
41 * provided that the above copyright notice appears in all copies.
42 * Digital Equipment Corporation makes no representations about the
43 * suitability of this software for any purpose.  It is provided "as is"
44 * without express or implied warranty.
45 *
46 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
47 *	v 1.1 89/07/11 17:55:04 nelson Exp  SPRITE (DECWRL)
48 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
49 *	v 9.2 90/01/29 18:00:39 shirriff Exp  SPRITE (DECWRL)
50 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
51 *	v 1.1 89/07/10 14:27:41 nelson Exp  SPRITE (DECWRL)
52 *
53 *	@(#)locore.s	8.5 (Berkeley) 1/4/94
54 */
55
56#include "opt_cputype.h"	/* which mips CPU levels do we support? */
57#include "opt_ddb.h"
58#include "opt_kgdb.h"
59#include "opt_ns.h"
60#include "opt_ccitt.h"
61#include "opt_iso.h"
62#include "opt_lockdebug.h"
63#include "fs_coda.h"
64
65#include <sys/cdefs.h>
66
67#include <machine/param.h>
68#include <mips/asm.h>
69#include <mips/cpuregs.h>
70#include <mips/trap.h>
71
72#include "assym.h"
73
74	.set	noreorder
75
76	.globl	start
77	.globl	_C_LABEL(kernel_text)		# libkvm refers this
78start:
79_C_LABEL(kernel_text):
80#if defined(MIPS3_PLUS) && !defined(MIPS1)
81	/* keep firmware exception handler until we hook. */
82	mfc0	v0, MIPS_COP_0_STATUS
83	and	v0, MIPS_SR_BEV
84	mtc0	v0, MIPS_COP_0_STATUS		# Disable interrupts
85	COP0_SYNC
86#else
87	mtc0	zero, MIPS_COP_0_STATUS		# Disable interrupts
88	COP0_SYNC
89#endif
90/*
91 * Initialize stack and call machine startup.
92 */
93	la	v1, start
94	slt	v0, v1, sp
95	bne	v0, zero, 1f
96	addu	v0, v1, -CALLFRAME_SIZ
97	subu	v0, v1, sp
98	slt	v0, v0, 4096			# within 4KB of _start
99	beq	v0, zero, 2f
100	addu	v0, v1, -CALLFRAME_SIZ
1011:
102	move	sp, v0
1032:
104#ifdef __GP_SUPPORT__
105	la	gp, _C_LABEL(_gp)
106#endif
107
108#ifdef NOFPU /* No FPU; avoid touching FPU registers */
109	li	t0, 0				# Disable interrupts and
110	mtc0	t0, MIPS_COP_0_STATUS		# the fp coprocessor
111	COP0_SYNC
112#ifdef HPCMIPS_L1CACHE_DISABLE
113	mfc0	t0, MIPS_COP_0_CONFIG
114	li	t1, 0xfffffff8
115	and	t0, t0, t1
116	or	t0, 0x00000002			# XXX, KSEG0 is uncached
117	mtc0	t0, MIPS_COP_0_CONFIG
118	COP0_SYNC
119#endif /* HPCMIPS_L1CACHE_DISABLE */
120#else
121	mfc0	t0, MIPS_COP_0_STATUS
122	or	t0, MIPS_SR_COP_1_BIT		# Disable interrupts, and
123	mtc0	t0, MIPS_COP_0_STATUS		# enable the fp coprocessor
124	COP0_SYNC
125#endif
126	nop
127	nop
128	mfc0	t0, MIPS_COP_0_PRID		# read product revision ID
129	nop					# XXX r4000 pipeline:
130	nop					# wait for new SR
131	nop					# to be effective
132	nop
133#ifdef NOFPU /* No FPU; avoid touching FPU registers */
134	add	t1, zero, zero
135#else
136	cfc1	t1, MIPS_FPU_ID			# read FPU ID register
137#endif
138	sw	t0, _C_LABEL(cpu_id)		# save PRID register
139	sw	t1, _C_LABEL(fpu_id)		# save FPU ID register
140	jal	_C_LABEL(mach_init)		# mach_init(a0, a1, a2, a3)
141	nop
142
143	lw	sp, _C_LABEL(proc0paddr)	# switch to proc0 stack
144	nop
145	addu	sp, sp, USPACE - FRAME_SIZ - CALLFRAME_SIZ
146	jal	_C_LABEL(main)			# main(void)
147	nop
148	PANIC("main() returned")		# main never returns
149	.set	at
150	.globl _C_LABEL(verylocore)
151_C_LABEL(verylocore):
152
153/*
154 * When no processes are on the runq, cpu_switch branches to idle
155 * to wait for something to come ready.
156 * Note: this is really a part of cpu_switch() but defined here for kernel
157 * profiling.
158 */
159LEAF(mips_idle)
160	sw	zero, _C_LABEL(curproc)		# set curproc NULL for stats
161#if defined(LOCKDEBUG)
162	jal	_C_LABEL(sched_unlock_idle)	# release sched_lock
163	nop
164#endif
165#ifdef 	IPL_ICU_MASK
166	# all interrupts enable.
167	sw	zero, _C_LABEL(md_imask)
168	jal	_C_LABEL(md_imask_update)
169	nop
170#endif
171	li	t0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
172	DYNAMIC_STATUS_MASK(t0,t1)		# machine dependent masking
173	mtc0	t0, MIPS_COP_0_STATUS		# enable all interrupts
174	COP0_SYNC
175	nop
176
177	/* Try to zero some free pages. */
178	lw	t0, _C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO
179	nop
180	beq	t0, zero, 1f
181	nop
182	jal	_C_LABEL(uvm_pageidlezero)
183	nop
1841:
185#ifdef MIPS_DYNAMIC_STATUS_MASK
186	# Do this again since the mask may have changed.
187	li	t3, (MIPS_INT_MASK | MIPS_SR_INT_IE)
188	DYNAMIC_STATUS_MASK(t3,t1)		# machine dependent masking
189	mtc0	t3, MIPS_COP_0_STATUS		# enable all interrupts
190	COP0_SYNC
191	nop
192#endif
193	lw	t0, _C_LABEL(sched_whichqs)	# look for non-empty queue
194	nop
195	beq	t0, zero, 1b
196	nop
197	mtc0	zero, MIPS_COP_0_STATUS		# disable all interrupts
198	COP0_SYNC
199	nop
200	nop
201	nop
202#if defined(LOCKDEBUG)
203	nop
204	jal	_C_LABEL(sched_lock_idle)	# acquire sched_lock
205	nop
206#endif
207	la	ra, cpu_switch_queuescan
208	j	ra
209	nop
210END(mips_idle)
211
212/*
213 * cpu_switch(struct proc *)
214 * Find the highest priority process and resume it.
215 */
216NESTED(cpu_switch, CALLFRAME_SIZ, ra)
217	lw	a0, P_ADDR(a0)
218	mfc0	t0, MIPS_COP_0_STATUS
219	REG_PROLOGUE
220	REG_S	s0, U_PCB_CONTEXT+SF_REG_S0(a0)
221	REG_S	s1, U_PCB_CONTEXT+SF_REG_S1(a0)
222	REG_S	s2, U_PCB_CONTEXT+SF_REG_S2(a0)
223	REG_S	s3, U_PCB_CONTEXT+SF_REG_S3(a0)
224	REG_S	s4, U_PCB_CONTEXT+SF_REG_S4(a0)
225	REG_S	s5, U_PCB_CONTEXT+SF_REG_S5(a0)
226	REG_S	s6, U_PCB_CONTEXT+SF_REG_S6(a0)
227	REG_S	s7, U_PCB_CONTEXT+SF_REG_S7(a0)
228	REG_S	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
229	REG_S	s8, U_PCB_CONTEXT+SF_REG_S8(a0)
230	REG_S	ra, U_PCB_CONTEXT+SF_REG_RA(a0)
231	REG_S	t0, U_PCB_CONTEXT+SF_REG_SR(a0)
232#ifdef IPL_ICU_MASK
233	lw	t0, _C_LABEL(md_imask)
234	sw	t0, U_PCB_PPL(a0)
235#endif
236	REG_EPILOGUE
237	subu	sp, sp, CALLFRAME_SIZ
238	sw	ra, CALLFRAME_RA(sp)
239	.mask	0x80000000, -4
240/*
241 * Entered here from idle() and switch_exit().  Interrupts are
242 * blocked, and sched_lock is held.
243 */
244	.globl	cpu_switch_queuescan
245cpu_switch_queuescan:
246	lw	t0, _C_LABEL(sched_whichqs)	# look for non-empty queue
247	li	t2, -1				# t2 = lowest bit set
248	bne	t0, zero, 1f
249	lw	t3, _C_LABEL(mips_locoresw) + MIPSX_CPU_IDLE
250	nop					# for r2000/r3000
251	jal	ra, t3
252	nop
2531:
254	move	t3, t0				# t3 = saved whichqs
2551:
256#if defined(MIPS3_5900)	/* work around for branch prediction miss. */
257	nop
258	nop
259	nop
260#endif
261	addu	t2, t2, 1
262	and	t1, t0, 1			# bit set?
263	beq	t1, zero, 1b
264	srl	t0, t0, 1			# try next bit
265/*
266 * Remove process from queue.
267 */
268	sll	t0, t2, 3
269	la	t1, _C_LABEL(sched_qs)
270	addu	t0, t0, t1			# t0 = qp = &qs[highbit]
271	lw	a0, P_FORW(t0)			# a0 = p = highest pri process
272	nop
273	lw	v0, P_FORW(a0)			# v0 = p->p_forw
274	bne	t0, a0, 2f			# make sure something in queue
275	sw	v0, P_FORW(t0)			# qp->ph_link = p->p_forw;
276	PANIC("cpu_switch")			# nothing in queue
2772:
278	sw	t0, P_BACK(v0)			# p->p_forw->p_back = qp
279	bne	v0, t0, 3f			# queue still not empty
280	sw	zero, P_BACK(a0)		## for firewall checking
281	li	v1, 1				# compute bit in 'whichqs'
282	sll	v1, v1, t2
283	xor	t3, t3, v1			# clear bit in 'whichqs'
284	sw	t3, _C_LABEL(sched_whichqs)
2853:
286	/* Squirrel away proc pointer. */
287	move	s7, a0
288#if defined(LOCKDEBUG)
289	/*
290	 * Done mucking with the run queues, release the
291	 * scheduler lock, but keep interrupts out.
292	 */
293	jal	_C_LABEL(sched_unlock_idle)
294	nop
295	move	a0, s7				# restore proc
296#endif
297
298/*
299 * Switch to new context.
300 */
301#if defined(MULTIPROCESSOR)
302	/*
303	 * XXXSMP
304	 * p->p_cpu = curcpu();
305	 */
306#endif
307	li	v1, SONPROC			# p->p_stat = SONPROC
308	sb	v1, P_STAT(a0)
309	lw	t2, _C_LABEL(mips_locoresw) + MIPSX_CPU_SWITCH_RESUME
310	sw	a0, _C_LABEL(curproc)
311	jal	ra, t2
312	nop
313
314#if 1	/* XXX XXX XXX */
315	REG_PROLOGUE
316	lw	a0, P_ADDR(s7)
317	nop
318	REG_L	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
319	nop
320	REG_EPILOGUE
321#endif	/* XXX XXX XXX */
322
323	jal	_C_LABEL(pmap_activate)
324	move	a0, s7				# -BDSLOT-
325
326	lw	a0, P_ADDR(s7)
327	sw	zero, _C_LABEL(want_resched)	# we've context switched
328	sw	a0, _C_LABEL(curpcb)
329
330#ifdef IPL_ICU_MASK
331	# restore ICU state
332	lw	t0, U_PCB_PPL(a0)
333	sw	t0, _C_LABEL(md_imask)
334	jal	_C_LABEL(md_imask_update)
335	nop
336	lw	a0, P_ADDR(s7)			# restore pcb_context pointer.
337#endif /* IPL_ICU_MASK */
338	/*
339	 * Check for restartable atomic sequences (RAS)
340	 */
341	lw	v1, P_NRAS(s7)
342	addu	t0, a0, USPACE - FRAME_SIZ
343	beq	v1, zero, 1f
344	nop
345	move	a0, s7
346	jal	_C_LABEL(ras_lookup)
347	lw	a1, FRAME_EPC(t0)
348	lw	a0, P_ADDR(s7)
349	li	v1, -1
350	beq	v1, v0, 1f
351	addu	t0, a0, USPACE - FRAME_SIZ
352	sw	v0, FRAME_EPC(t0)
3531:
354	REG_PROLOGUE
355	REG_L	v0, U_PCB_CONTEXT+SF_REG_SR(a0)
356	DYNAMIC_STATUS_MASK(v0,ra)		# machine dependent masking
357	REG_L	ra, U_PCB_CONTEXT+SF_REG_RA(a0)
358	REG_L	s0, U_PCB_CONTEXT+SF_REG_S0(a0)
359	REG_L	s1, U_PCB_CONTEXT+SF_REG_S1(a0)
360	REG_L	s2, U_PCB_CONTEXT+SF_REG_S2(a0)
361	REG_L	s3, U_PCB_CONTEXT+SF_REG_S3(a0)
362	REG_L	s4, U_PCB_CONTEXT+SF_REG_S4(a0)
363	REG_L	s5, U_PCB_CONTEXT+SF_REG_S5(a0)
364	REG_L	s6, U_PCB_CONTEXT+SF_REG_S6(a0)
365	REG_L	s7, U_PCB_CONTEXT+SF_REG_S7(a0)
366	REG_L	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
367	REG_L	s8, U_PCB_CONTEXT+SF_REG_S8(a0)
368	REG_EPILOGUE
369	mtc0	v0, MIPS_COP_0_STATUS
370	COP0_SYNC
371	j	ra
372	li	v0, 1				# possible return to 'savectx()'
373END(cpu_switch)
374
375/*
376 * switch_exit(struct proc *)
377 *
378 * Make the named process exit.  Switch SP to proc0 stack, then
379 * call exit2() to schedule exiting proc's vmspace and stack to be
380 * released by the reaper thread.  MUST BE CALLED AT SPLHIGH.
381 */
382LEAF(switch_exit)
383	lw	v0, _C_LABEL(proc0paddr)	# get proc0 p_addr
384	nop
385	sw	v0, _C_LABEL(curpcb)		# set current pcb
386	REG_PROLOGUE
387	REG_L	sp, U_PCB_CONTEXT+SF_REG_SP(v0)	# restore stack pointer
388	REG_EPILOGUE
389	jal	_C_LABEL(exit2)			# proc already in a0
390	nop
391#if defined(LOCKDEBUG)
392	jal	_C_LABEL(sched_lock_idle)	# acquire sched_lock
393	nop
394#endif
395	la	ra, cpu_switch_queuescan	# rathole to cpu_switch()
396	j	ra
397	sub	sp, sp, CALLFRAME_SIZ		#BDSlot: set stack call frame
398END(switch_exit)
399
400/*
401 * savectx(struct user *up)
402 */
403LEAF(savectx)
404	mfc0	v0, MIPS_COP_0_STATUS
405	REG_PROLOGUE
406	REG_S	s0, U_PCB_CONTEXT+SF_REG_S0(a0)
407	REG_S	s1, U_PCB_CONTEXT+SF_REG_S1(a0)
408	REG_S	s2, U_PCB_CONTEXT+SF_REG_S2(a0)
409	REG_S	s3, U_PCB_CONTEXT+SF_REG_S3(a0)
410	REG_S	s4, U_PCB_CONTEXT+SF_REG_S4(a0)
411	REG_S	s5, U_PCB_CONTEXT+SF_REG_S5(a0)
412	REG_S	s6, U_PCB_CONTEXT+SF_REG_S6(a0)
413	REG_S	s7, U_PCB_CONTEXT+SF_REG_S7(a0)
414	REG_S	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
415	REG_S	s8, U_PCB_CONTEXT+SF_REG_S8(a0)
416	REG_S	ra, U_PCB_CONTEXT+SF_REG_RA(a0)
417	REG_S	v0, U_PCB_CONTEXT+SF_REG_SR(a0)
418	REG_EPILOGUE
419	j	ra
420	move	v0, zero
421END(savectx)
422
423#if defined(DDB) || defined(KGDB)
424/*
425 * setjmp(label_t *)
426 * longjmp(label_t *)
427 */
428LEAF(setjmp)
429	mfc0	v0, MIPS_COP_0_STATUS
430	REG_PROLOGUE
431	REG_S	s0, SF_REG_S0(a0)
432	REG_S	s1, SF_REG_S1(a0)
433	REG_S	s2, SF_REG_S2(a0)
434	REG_S	s3, SF_REG_S3(a0)
435	REG_S	s4, SF_REG_S4(a0)
436	REG_S	s5, SF_REG_S5(a0)
437	REG_S	s6, SF_REG_S6(a0)
438	REG_S	s7, SF_REG_S7(a0)
439	REG_S	sp, SF_REG_SP(a0)
440	REG_S	s8, SF_REG_S8(a0)
441	REG_S	ra, SF_REG_RA(a0)
442	REG_S	v0, SF_REG_SR(a0)
443	REG_EPILOGUE
444	j	ra
445	move	v0, zero
446END(setjmp)
447
448LEAF(longjmp)
449	REG_PROLOGUE
450	REG_L	v0, SF_REG_SR(a0)
451	DYNAMIC_STATUS_MASK(v0,ra)		# machine dependent masking
452	REG_L	ra, SF_REG_RA(a0)
453	REG_L	s0, SF_REG_S0(a0)
454	REG_L	s1, SF_REG_S1(a0)
455	REG_L	s2, SF_REG_S2(a0)
456	REG_L	s3, SF_REG_S3(a0)
457	REG_L	s4, SF_REG_S4(a0)
458	REG_L	s5, SF_REG_S5(a0)
459	REG_L	s6, SF_REG_S6(a0)
460	REG_L	s7, SF_REG_S7(a0)
461	REG_L	sp, SF_REG_SP(a0)
462	REG_L	s8, SF_REG_S8(a0)
463	REG_EPILOGUE
464	mtc0	v0, MIPS_COP_0_STATUS
465	COP0_SYNC
466	j	ra
467	li	v0, 1
468END(longjmp)
469#endif
470
471
472/*
473 * MIPS processor interrupt control
474 *
475 * Used as building blocks for spl(9) kernel interface.
476 */
477LEAF(_splraise)
478XLEAF(_splraise_noprof)				# does not get mcount hooks
479	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
480	and	a0, a0, MIPS_INT_MASK		# extract INT bits
481	nor	a0, zero, a0			# bitwise inverse of A0
482	and	a0, a0, v0			# disable retaining other bits
483	DYNAMIC_STATUS_MASK(a0,t0)		# machine dependent masking
484	mtc0	a0, MIPS_COP_0_STATUS		# store back
485	COP0_SYNC
486	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
487	j	ra
488	nop
489END(_splraise)
490
491LEAF(_spllower)
492	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
493	li	v1, ~MIPS_INT_MASK
494	and	v1, v0, v1			# turn off INT bit
495	nor	a0, zero, a0			# bitwise inverse of A0
496	and	a0, a0, MIPS_INT_MASK		# extract INT bits
497	or	a0, a0, v1			# disable making other bits on
498	DYNAMIC_STATUS_MASK(a0,t0)		# machine dependent masking
499	mtc0	a0, MIPS_COP_0_STATUS		# store back
500	COP0_SYNC
501	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
502	j	ra
503	nop
504END(_spllower)
505
506LEAF(_splrestore)
507	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
508	and	a0, a0, MIPS_INT_MASK
509	li	v1, ~MIPS_INT_MASK
510	and	v1, v1, v0			# turn off every INT bit
511	or	v1, v1, a0			# set old INT bits
512	DYNAMIC_STATUS_MASK(v1,t0)		# machine dependent masking
513	mtc0	v1, MIPS_COP_0_STATUS		# store back
514	COP0_SYNC
515	and	v0, v0, MIPS_INT_MASK
516	j	ra
517	nop
518END(_splrestore)
519
520LEAF(_splset)
521XLEAF(_splset_noprof)				# does not get mcount hooks
522	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
523	and	a0, a0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
524	li	v1, ~(MIPS_INT_MASK | MIPS_SR_INT_IE)
525	and	v1, v1, v0			# turn off every INT bit
526	or	v1, v1, a0			# set old INT bits
527	DYNAMIC_STATUS_MASK(v1,t0)		# machine dependent masking
528	mtc0	v1, MIPS_COP_0_STATUS		# store back
529	COP0_SYNC
530	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
531	j	ra
532	nop
533END(_splset)
534
535LEAF(_splget)
536	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
537	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
538	j	ra
539	nop
540END(_splget)
541
542LEAF(_setsoftintr)
543	mfc0	v1, MIPS_COP_0_STATUS		# save status register
544	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts (2 cycles)
545	COP0_SYNC
546	nop
547	nop
548	mfc0	v0, MIPS_COP_0_CAUSE		# fetch cause register
549	nop
550	or	v0, v0, a0			# set soft intr. bits
551	mtc0	v0, MIPS_COP_0_CAUSE		# store back
552	COP0_SYNC
553	mtc0	v1, MIPS_COP_0_STATUS		# enable interrupts
554	COP0_SYNC
555	j	ra
556	nop
557END(_setsoftintr)
558
559LEAF(_clrsoftintr)
560	mfc0	v1, MIPS_COP_0_STATUS		# save status register
561	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts (2 cycles)
562	COP0_SYNC
563	nop
564	nop
565	mfc0	v0, MIPS_COP_0_CAUSE		# fetch cause register
566	nor	a0, zero, a0			# bitwise inverse of A0
567	and	v0, v0, a0			# clear soft intr. bits
568	mtc0	v0, MIPS_COP_0_CAUSE		# store back
569	COP0_SYNC
570	mtc0	v1, MIPS_COP_0_STATUS		# enable interrupts
571	COP0_SYNC
572	j	ra
573	nop
574END(_clrsoftintr)
575
576LEAF(_splnone)
577	mtc0	zero, MIPS_COP_0_CAUSE		# clear SOFT_INT bits
578	COP0_SYNC
579	li	v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
580	DYNAMIC_STATUS_MASK(v0,t0)		# machine dependent masking
581	mtc0	v0, MIPS_COP_0_STATUS		# enable all sources
582	COP0_SYNC
583	nop
584	j	ra
585	nop
586END(_splnone)
587
588#if defined(NS) || defined(ISO) || defined(CCITT) || defined(CODA)
589/*
590 * Insert 'p' after 'q'.
591 *	_insque(p, q)
592 *		caddr_t p, q;
593 */
594LEAF(_insque)
595	lw	v0, 0(a1)		# v0 = q->next
596	sw	a1, 4(a0)		# p->prev = q
597	sw	v0, 0(a0)		# p->next = q->next
598	sw	a0, 4(v0)		# q->next->prev = p
599	j	ra
600	sw	a0, 0(a1)		# q->next = p
601END(_insque)
602
603/*
604 * Remove item 'p' from queue.
605 *	_remque(p)
606 *		caddr_t p;
607 */
608LEAF(_remque)
609	lw	v0, 0(a0)		# v0 = p->next
610	lw	v1, 4(a0)		# v1 = p->prev
611	nop
612	sw	v0, 0(v1)		# p->prev->next = p->next
613	j	ra
614	sw	v1, 4(v0)		# p->next->prev = p->prev
615END(_remque)
616#endif
617
618
619/*
620 * u_int32_t mips_cp0_cause_read(void)
621 *
622 *	Return the current value of the CP0 Cause register.
623 *
624 *	Note: Not profiled, skews CPU-clock measurement (mips_mcclock.c)
625 *	to uselessness.
626 */
627LEAF_NOPROFILE(mips_cp0_cause_read)
628	mfc0	v0, MIPS_COP_0_CAUSE
629	j	ra
630	nop
631END(mips_cp0_cause_read)
632
633/*
634 * void mips_cp0_cause_write(u_int32_t)
635 *
636 *	Set the value of the CP0 Cause register.
637 */
638LEAF(mips_cp0_cause_write)
639	mtc0	a0, MIPS_COP_0_CAUSE
640	COP0_SYNC
641	nop
642	nop
643	j	ra
644	nop
645END(mips_cp0_cause_write)
646
647
648/*
649 * u_int32_t mips_cp0_status_read(void)
650 *
651 *	Return the current value of the CP0 Status register.
652 */
653LEAF(mips_cp0_status_read)
654	mfc0	v0, MIPS_COP_0_STATUS
655	j	ra
656	nop
657END(mips_cp0_status_read)
658
659/*
660 * void mips_cp0_status_write(u_int32_t)
661 *
662 *	Set the value of the CP0 Status register.
663 *
664 *	Note: This is almost certainly not the way you want to write a
665 *	"permanent" value to to the CP0 Status register, since it gets
666 *	saved in trap frames and restores.
667 */
668LEAF(mips_cp0_status_write)
669	mtc0	a0, MIPS_COP_0_STATUS
670	COP0_SYNC
671	nop
672	nop
673	j	ra
674	nop
675END(mips_cp0_status_write)
676
677
678#if !defined(NOFPU) && !defined(SOFTFLOAT)
679/*----------------------------------------------------------------------------
680 *
681 * MachFPInterrupt --
682 * MachFPTrap --
683 *
684 *	Handle a floating point interrupt (r3k) or trap (r4k).
685 *	the handlers are indentical, only the reporting mechanisms differ.
686 *
687 *	MachFPInterrupt(status, cause, pc, frame)
688 *		unsigned status;
689 *		unsigned cause;
690 *		unsigned pc;
691 *		int *frame;
692 *
693 *	MachFPTrap(status, cause, pc, frame)
694 *		unsigned status;
695 *		unsigned cause;
696 *		unsigned pc;
697 *		int *frame;
698 *
699 * Results:
700 *	None.
701 *
702 * Side effects:
703 *	None.
704 *
705 *----------------------------------------------------------------------------
706 */
707NESTED(MachFPInterrupt, CALLFRAME_SIZ, ra)
708XNESTED(MachFPTrap)
709	.mask	0x80000000, -4
710	subu	sp, sp, CALLFRAME_SIZ
711	mfc0	t0, MIPS_COP_0_STATUS
712	sw	ra, CALLFRAME_RA(sp)
713	or	t0, t0, MIPS_SR_COP_1_BIT
714	mtc0	t0, MIPS_COP_0_STATUS
715	COP0_SYNC
716	nop
717	nop
718	nop				# 1st extra nop for r4k
719	nop				# 2nd extra nop for r4k
720
721	cfc1	t0, MIPS_FPU_CSR	# stall til FP done
722	cfc1	t0, MIPS_FPU_CSR	# now get status
723	nop
724	sll	t2, t0, (31 - 17)	# unimplemented operation?
725	bgez	t2, 3f			# no, normal trap
726	nop
727/*
728 * We got an unimplemented operation trap so
729 * We received an unimplemented operation trap.
730 *
731 * We check whether it's an unimplemented FP instruction here rather
732 * than invoking MachEmulateInst(), since it is faster.
733 *
734 * fetch the instruction and emulate the instruction.
735 */
736	bgez	a1, 1f			# Check the branch delay bit.
737	nop
738/*
739 * The instruction is in the branch delay slot.
740 */
741	b	2f
742	lw	a0, 4(a2)			# a0 = coproc instruction
743/*
744 * This is not in the branch delay slot so calculate the resulting
745 * PC (epc + 4) into v0 and continue to MachEmulateFP().
746 */
7471:
748	lw	a0, 0(a2)			# a0 = coproc instruction
7492:
750	move	a2, a1
751
752/*
753 * Check to see if the instruction to be emulated is a floating-point
754 * instruction.
755 */
756	srl	t0, a0, MIPS_OPCODE_SHIFT
757	beq	t0, MIPS_OPCODE_C1, 4f
758	nop
759
760/*
761 * Send a floating point exception signal to the current process.
762 */
763	li	t0, 0xFFFFFF00
764	and	a1, a1, t0
765	ori	a1, a1, T_RES_INST << MIPS_CR_EXC_CODE_SHIFT
766	REG_PROLOGUE
767	REG_S	a1, FRAME_CAUSE(a3)
768	REG_EPILOGUE
769
770	move	a2, a0				# code = instruction
771	lw	a0, _C_LABEL(curproc)		# get current process
772	jal	_C_LABEL(trapsignal)
773	li	a1, SIGILL
774
775	b	FPReturn
776	nop
777
778/*
779 * Send a FPE signal to the current process if it tripped the any of
780 * the VZOUI bits.
781 */
7823:
783	REG_PROLOGUE
784	REG_S	a1, FRAME_CAUSE(a3)
785	REG_EPILOGUE
786
787	move	a2, a0				# code = instruction
788	lw	a0, _C_LABEL(curproc)		# get current process
789	jal	_C_LABEL(trapsignal)
790	li	a1, SIGFPE			# BDSLOT
791
792	b	FPReturn
793	nop
794
795/*
796 * Finally, we can call MachEmulateFP() where a0 is the instruction to emulate.
797 */
7984:
799	jal	_C_LABEL(MachEmulateFP)
800	move	a1, a3
801
802/*
803 * Turn off the floating point coprocessor and return.
804 */
805FPReturn:
806	mfc0	t0, MIPS_COP_0_STATUS
807	lw	ra, CALLFRAME_RA(sp)
808	and	t0, t0, ~MIPS_SR_COP_1_BIT
809	mtc0	t0, MIPS_COP_0_STATUS
810	COP0_SYNC
811	j	ra
812	addu	sp, sp, CALLFRAME_SIZ
813END(MachFPInterrupt)
814#endif /* !defined(NOFPU) && !defined(SOFTFLOAT) */
815
816LEAF(mips_pagecopy)
817#if defined(__mips_n32) || defined(_LP64)
818	li	a2, NBPG >> 6
819
8201:	ld	t0, 0(a1)
821	ld	ta0, 32(a1)
822	ld	t2, 16(a1)
823	ld	ta2, 48(a1)
824	subu	a2, 1
825	ld	t1, 8(a1)
826	ld	t3, 24(a1)
827	ld	ta1, 40(a1)
828	ld	ta3, 56(a1)
829
830	sd	t0, 0(a0)
831	sd	ta0, 32(a0)
832	sd	t2, 16(a0)
833	sd	ta2, 48(a0)
834	addu	a1, 64
835	sd	t1, 8(a0)
836	sd	t3, 24(a0)
837	sd	ta1, 40(a0)
838	sd	ta3, 56(a0)
839	bgtz	a2,1b
840	addu	a0, 64
841#else
842	/* o32 */
843	li	a2, NBPG >> 5
844
8451:	lw	t0, 0(a1)
846	lw	ta0, 16(a1)
847	subu	a2, 1
848	lw	t1, 4(a1)
849	lw	t2, 8(a1)
850	lw	t3, 12(a1)
851	lw	ta1, 20(a1)
852	lw	ta2, 24(a1)
853	lw	ta3, 28(a1)
854
855	sw	t0, 0(a0)
856	sw	ta0, 16(a0)
857	addu	a1, 32
858	sw	t1, 4(a0)
859	sw	t2, 8(a0)
860	sw	t3, 12(a0)
861	sw	ta1, 20(a0)
862	sw	ta2, 24(a0)
863	sw	ta3, 28(a0)
864	bgtz	a2,1b
865	addu	a0, 32
866#endif /* __mips_n32 || _LP64 */
867	j	ra
868	nop
869END(mips_pagecopy)
870
871LEAF(mips_pagezero)
872#if defined(__mips_n32) || defined(_LP64)
873	li	a1, NBPG >> 6
874
8751:	sd	zero, 0(a0)			# try to miss cache first
876	sd	zero, 32(a0)
877	subu	a1, 1
878	sd	zero, 16(a0)
879	sd	zero, 48(a0)
880	sd	zero, 8(a0)			# fill in cache lines
881	sd	zero, 40(a0)
882	sd	zero, 24(a0)
883	sd	zero, 56(a0)
884	bgtz	a1,1b
885	addu	a0, 64
886#else
887	/* o32 */
888	li	a1, NBPG >> 5
889
8901:	sw	zero, 0(a0)
891	sw	zero, 16(a0)			# try to miss cache first
892	subu	a1, 1
893	sw	zero, 4(a0)
894	sw	zero, 8(a0)
895	sw	zero, 12(a0)
896	sw	zero, 20(a0)
897	sw	zero, 24(a0)
898	sw	zero, 28(a0)
899	bgtz	a1,1b
900	addu	a0, 32
901#endif /* __mips_n32 || _LP64 */
902	j	ra
903	nop
904END(mips_pagezero)
905
906
907#ifndef DDB_TRACE
908
909#if defined(DEBUG) || defined(DDB) || defined(KGDB) || defined(geo)
910/*
911 * Stacktrace support hooks which use type punnign to access
912 * the caller's registers.
913 */
914
915
916/*
917 * stacktrace() -- print a stack backtrace to the console.
918 *	implicitly accesses caller's a0-a3.
919 */
920NESTED(stacktrace, CALLFRAME_SIZ+24, ra)
921XNESTED(logstacktrace)
922	subu	sp, sp, CALLFRAME_SIZ+24	# four arg-passing slots
923
924	move	t0, ra				# save caller's PC
925	addu	t1, sp, CALLFRAME_SIZ+24	# compute caller's SP
926	move	t2, s8				# non-virtual frame pointer
927
928	la	v0, _C_LABEL(printf)
929
930	sw	ra, 36(sp)			# save return address
931
932	/* a0-a3 are still caller's a0-a3, pass in-place as given. */
933	sw	t0, 16(sp)			# push caller's PC
934	sw	t1, 20(sp)			# push caller's SP
935	sw	t2, 24(sp)			# push caller's FP, in case
936	sw	zero, 28(sp)			# caller's RA on stack
937	jal	_C_LABEL(stacktrace_subr)
938	sw	v0, 32(sp)			# push printf
939
940	lw	ra, 36(sp)
941	addu	sp, sp, CALLFRAME_SIZ+24
942	j	ra
943	nop
944END(stacktrace)
945#endif	/* DEBUG || DDB */
946#endif	/* DDB_TRACE */
947
948	.sdata
949	.globl	_C_LABEL(esym)
950_C_LABEL(esym):
951	.word 0
952
953	.globl	_C_LABEL(cpu_id)
954	.globl	_C_LABEL(fpu_id)
955_C_LABEL(cpu_id):
956	.word	0
957_C_LABEL(fpu_id):
958	.word	0
959
960#ifdef MIPS_DYNAMIC_STATUS_MASK
961	.globl	_C_LABEL(mips_dynamic_status_mask)
962_C_LABEL(mips_dynamic_status_mask):
963	.word	0xffffffff
964#endif
965