xref: /netbsd/sys/arch/mips/mips/locore.S (revision bf9ec67e)
1/*	$NetBSD: locore.S,v 1.139 2002/04/25 06:55:53 simonb Exp $	*/
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Digital Equipment Corporation and Ralph Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Copyright (C) 1989 Digital Equipment Corporation.
39 * Permission to use, copy, modify, and distribute this software and
40 * its documentation for any purpose and without fee is hereby granted,
41 * provided that the above copyright notice appears in all copies.
42 * Digital Equipment Corporation makes no representations about the
43 * suitability of this software for any purpose.  It is provided "as is"
44 * without express or implied warranty.
45 *
46 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
47 *	v 1.1 89/07/11 17:55:04 nelson Exp  SPRITE (DECWRL)
48 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
49 *	v 9.2 90/01/29 18:00:39 shirriff Exp  SPRITE (DECWRL)
50 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
51 *	v 1.1 89/07/10 14:27:41 nelson Exp  SPRITE (DECWRL)
52 *
53 *	@(#)locore.s	8.5 (Berkeley) 1/4/94
54 */
55
56#include "opt_cputype.h"	/* which mips CPU levels do we support? */
57#include "opt_ddb.h"
58#include "opt_kgdb.h"
59#include "opt_compat_ultrix.h"
60#include "opt_compat_linux.h"
61#include "opt_compat_irix.h"
62#include "opt_ns.h"
63#include "opt_ccitt.h"
64#include "opt_iso.h"
65#include "opt_lockdebug.h"
66#include "fs_coda.h"
67
68#include <sys/cdefs.h>
69
70#include <sys/errno.h>
71#include <sys/syscall.h>
72#ifdef COMPAT_ULTRIX
73#include <compat/ultrix/ultrix_syscall.h>
74#endif
75#ifdef COMPAT_LINUX
76#include <compat/linux/linux_syscall.h>
77#endif
78#ifdef COMPAT_IRIX
79#include <compat/irix/irix_syscall.h>
80#endif
81
82#include <machine/param.h>
83#include <mips/asm.h>
84#include <mips/cpuregs.h>
85#include <mips/trap.h>
86
87#include "assym.h"
88
89	.set	noreorder
90
91	.globl	start
92	.globl	_C_LABEL(kernel_text)		# libkvm refers this
93start:
94_C_LABEL(kernel_text):
95#if defined(MIPS3_PLUS) && !defined(MIPS1)
96	/* keep firmware exception handler until we hook. */
97	mfc0	v0, MIPS_COP_0_STATUS
98	and	v0, MIPS3_SR_DIAG_BEV
99	mtc0	v0, MIPS_COP_0_STATUS		# Disable interrupts
100	COP0_SYNC
101#else
102	mtc0	zero, MIPS_COP_0_STATUS		# Disable interrupts
103	COP0_SYNC
104#endif
105/*
106 * Initialize stack and call machine startup.
107 */
108	la	v1, start
109	slt	v0, v1, sp
110	bne	v0, zero, 1f
111	addu	v0, v1, -CALLFRAME_SIZ
112	subu	v0, v1, sp
113	slt	v0, v0, 4096			# within 4KB of _start
114	beq	v0, zero, 2f
115	addu	v0, v1, -CALLFRAME_SIZ
1161:
117	move	sp, v0
1182:
119#ifdef __GP_SUPPORT__
120	la	gp, _C_LABEL(_gp)
121#endif
122
123#ifdef NOFPU /* No FPU; avoid touching FPU registers */
124	li	t0, 0				# Disable interrupts and
125	mtc0	t0, MIPS_COP_0_STATUS		# the fp coprocessor
126	COP0_SYNC
127#ifdef HPCMIPS_L1CACHE_DISABLE
128	mfc0	t0, MIPS_COP_0_CONFIG
129	li	t1, 0xfffffff8
130	and	t0, t0, t1
131	or	t0, 0x00000002			# XXX, KSEG0 is uncached
132	mtc0	t0, MIPS_COP_0_CONFIG
133	COP0_SYNC
134#endif /* HPCMIPS_L1CACHE_DISABLE */
135#else
136	mfc0	t0, MIPS_COP_0_STATUS
137	or	t0, MIPS_SR_COP_1_BIT		# Disable interrupts, and
138	mtc0	t0, MIPS_COP_0_STATUS		# enable the fp coprocessor
139	COP0_SYNC
140#endif
141	nop
142	nop
143	mfc0	t0, MIPS_COP_0_PRID		# read product revision ID
144	nop					# XXX r4000 pipeline:
145	nop					# wait for new SR
146	nop					# to be effective
147	nop
148#ifdef NOFPU /* No FPU; avoid touching FPU registers */
149	add	t1, zero, zero
150#else
151	cfc1	t1, MIPS_FPU_ID			# read FPU ID register
152#endif
153	sw	t0, _C_LABEL(cpu_id)		# save PRID register
154	sw	t1, _C_LABEL(fpu_id)		# save FPU ID register
155	jal	_C_LABEL(mach_init)		# mach_init(a0, a1, a2, a3)
156	nop
157
158	lw	sp, _C_LABEL(proc0paddr)	# switch to proc0 stack
159	nop
160	addu	sp, sp, USPACE - FRAME_SIZ - CALLFRAME_SIZ
161	jal	_C_LABEL(main)			# main(void)
162	nop
163	PANIC("main() returned")		# main never returns
164	.set	at
165	.globl _C_LABEL(verylocore)
166_C_LABEL(verylocore):
167
168/*
169 * This code is copied the user's stack for returning from signal handlers
170 * (see sendsig() and sigreturn()). We have to compute the address
171 * of the sigcontext struct for the sigreturn call.
172 *
173 * NB: we cannot profile sigcode(), it executes from userspace.
174 */
175LEAF_NOPROFILE(sigcode)
176	addu	a0, sp, 16		# address of sigcontext
177	li	v0, SYS___sigreturn14	# sigreturn(scp)
178	syscall
179	break	0			# just in case sigreturn fails
180END(sigcode)
181XLEAF(esigcode)
182
183#ifdef COMPAT_ULTRIX
184LEAF_NOPROFILE(ultrix_sigcode)
185	addu	a0, sp, 16		# address of sigcontext
186	li	v0, ULTRIX_SYS_sigreturn	# sigreturn(scp)
187	syscall
188	break	0			# just in case sigreturn fails
189END(ultrix_sigcode)
190XLEAF(ultrix_esigcode)
191#endif
192
193#ifdef COMPAT_LINUX
194#define SYSCALL_SHIFT 4000		# 4000 shift as in linux_syscall.c
195LEAF_NOPROFILE(linux_sigcode)
196	move	a0, sp
197	li	v0, LINUX_SYS_sigreturn	+ SYSCALL_SHIFT # sigreturn(sf)
198	syscall
199	break	0			# just in case sigreturn fails
200END(linux_sigcode)
201XLEAF(linux_esigcode)
202#undef SYSCALL_SHIFT
203#endif
204
205/*
206 * The following primitives manipulate the run queues.  whichqs tells which
207 * of the 32 queues qs have processes in them.  Setrunqueue puts processes
208 * into queues, remrunqueue removes them from queues.  The running process is
209 * on no queue, other processes are on a queue related to p->p_priority,
210 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
211 * available queues.
212 */
213/*
214 * setrunqueue(struct proc *)
215 *
216 * Call should be made at splclock(), and p->p_stat should be SRUN.
217 */
218NESTED(setrunqueue, CALLFRAME_SIZ, ra)
219	.mask	0x80000000, -4
220	subu	sp, sp, CALLFRAME_SIZ
221	lw	t0, P_BACK(a0)		# firewall: p->p_back must be 0
222	sw	ra, CALLFRAME_RA(sp)
223	beq	t0, zero, 1f
224	lbu	t0, P_PRIORITY(a0)	# put on p->p_priority / 4 queue
225	PANIC("setrunqueue")		#
2261:
227	li	t1, 1			# compute corresponding bit
228	srl	t0, t0, 2		# compute index into 'whichqs'
229	sll	t1, t1, t0
230	lw	t2, _C_LABEL(sched_whichqs)	# set corresponding bit
231	nop
232	or	t2, t2, t1
233	sw	t2, _C_LABEL(sched_whichqs)
234	sll	t0, t0, 3		# compute index into 'qs'
235	la	t1, _C_LABEL(sched_qs)
236	addu	t0, t0, t1		# t0 = qp = &qs[pri >> 2]
237	lw	t1, P_BACK(t0)		# t1 = qp->ph_rlink
238	sw	t0, P_FORW(a0)		# p->p_forw = qp
239	sw	t1, P_BACK(a0)		# p->p_back = qp->ph_rlink
240	sw	a0, P_FORW(t1)		# p->p_back->p_forw = p;
241	sw	a0, P_BACK(t0)		# qp->ph_rlink = p
242	j	ra
243	addu	sp, sp, CALLFRAME_SIZ
244END(setrunqueue)
245
246/*
247 * remrunqueue(struct proc *)
248 *
249 * Call should be made at splclock().
250 */
251NESTED(remrunqueue, CALLFRAME_SIZ, ra)
252	.mask	0x80000000, -4
253	subu	sp, sp, CALLFRAME_SIZ
254	lbu	t0, P_PRIORITY(a0)	# get from p->p_priority / 4 queue
255	li	t1, 1			# compute corresponding bit
256	srl	t0, t0, 2		# compute index into 'whichqs'
257	lw	t2, _C_LABEL(sched_whichqs)	# check corresponding bit
258	sll	t1, t1, t0
259	and	v0, t2, t1
260	sw	ra, CALLFRAME_RA(sp)
261	bne	v0, zero, 1f
262	lw	v0, P_BACK(a0)		# v0 = p->p_back
263	PANIC("remrunqueue")		# it wasn't recorded to be on its q
2641:
265	lw	v1, P_FORW(a0)		# v1 = p->p_forw
266	nop
267	sw	v1, P_FORW(v0)		# p->p_back->p_forw = p->p_forw;
268	sw	v0, P_BACK(v1)		# p->p_forw->p_back = p->r_rlink
269	sll	t0, t0, 3		# compute index into 'qs'
270	la	v0, _C_LABEL(sched_qs)
271	addu	t0, t0, v0		# t0 = qp = &qs[pri >> 2]
272	lw	v0, P_FORW(t0)		# check if queue empty
273	nop
274	bne	v0, t0, 2f		# No. qp->ph_link != qp
275	nop
276	xor	t2, t2, t1		# clear corresponding bit in 'whichqs'
277	sw	t2, _C_LABEL(sched_whichqs)
2782:
279	sw	zero, P_BACK(a0)	# for firewall checking
280	j	ra
281	addu	sp, sp, CALLFRAME_SIZ
282END(remrunqueue)
283
284/*
285 * When no processes are on the runq, cpu_switch branches to idle
286 * to wait for something to come ready.
287 * Note: this is really a part of cpu_switch() but defined here for kernel
288 * profiling.
289 */
290LEAF(mips_idle)
291#ifdef 	IPL_ICU_MASK
292	# all interrupts enable.
293	sw	zero, _C_LABEL(md_imask)
294	jal	_C_LABEL(md_imask_update)
295	nop
296#endif
297	li	t0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
298	DYNAMIC_STATUS_MASK(t0,t1)		# machine dependent masking
299	mtc0	t0, MIPS_COP_0_STATUS		# enable all interrupts
300	COP0_SYNC
301	nop
302	sw	zero, _C_LABEL(curproc)		# set curproc NULL for stats
303#if defined(LOCKDEBUG)
304	jal	_C_LABEL(sched_unlock_idle)	# release sched_lock
305	nop
306#endif
307
308	/* Try to zero some free pages. */
309	lw	t0, _C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO
310	nop
311	beq	t0, zero, 1f
312	nop
313	jal	_C_LABEL(uvm_pageidlezero)
314	nop
3151:
316#ifdef MIPS_DYNAMIC_STATUS_MASK
317	# Do this again since the mask may have changed.
318	li	t3, (MIPS_INT_MASK | MIPS_SR_INT_IE)
319	DYNAMIC_STATUS_MASK(t3,t1)		# machine dependent masking
320	mtc0	t3, MIPS_COP_0_STATUS		# enable all interrupts
321	COP0_SYNC
322	nop
323#endif
324	lw	t0, _C_LABEL(sched_whichqs)	# look for non-empty queue
325	nop
326	beq	t0, zero, 1b
327	nop
328#if defined(LOCKDEBUG)
329	mtc0	zero, MIPS_COP_0_STATUS		# disable all interrupts
330	COP0_SYNC
331	nop
332	nop
333	nop
334	nop
335	jal	_C_LABEL(sched_lock_idle)	# acquire sched_lock
336	nop
337	la	ra, cpu_switch_queuescan
338	j	ra
339	nop
340#else
341	mtc0	zero, MIPS_COP_0_STATUS		# disable all interrupts
342	COP0_SYNC
343	nop
344	nop
345	nop
346	la	ra, cpu_switch_queuescan
347	j	ra
348	nop
349#endif
350END(mips_idle)
351
352/*
353 * cpu_switch(struct proc *)
354 * Find the highest priority process and resume it.
355 */
356NESTED(cpu_switch, CALLFRAME_SIZ, ra)
357	lw	a0, P_ADDR(a0)
358	mfc0	t0, MIPS_COP_0_STATUS
359	REG_PROLOGUE
360	REG_S	s0, U_PCB_CONTEXT+SF_REG_S0(a0)
361	REG_S	s1, U_PCB_CONTEXT+SF_REG_S1(a0)
362	REG_S	s2, U_PCB_CONTEXT+SF_REG_S2(a0)
363	REG_S	s3, U_PCB_CONTEXT+SF_REG_S3(a0)
364	REG_S	s4, U_PCB_CONTEXT+SF_REG_S4(a0)
365	REG_S	s5, U_PCB_CONTEXT+SF_REG_S5(a0)
366	REG_S	s6, U_PCB_CONTEXT+SF_REG_S6(a0)
367	REG_S	s7, U_PCB_CONTEXT+SF_REG_S7(a0)
368	REG_S	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
369	REG_S	s8, U_PCB_CONTEXT+SF_REG_S8(a0)
370	REG_S	ra, U_PCB_CONTEXT+SF_REG_RA(a0)
371	REG_S	t0, U_PCB_CONTEXT+SF_REG_SR(a0)
372#ifdef IPL_ICU_MASK
373	lw	t0, _C_LABEL(md_imask)
374	sw	t0, U_PCB_PPL(a0)
375#endif
376	REG_EPILOGUE
377	subu	sp, sp, CALLFRAME_SIZ
378	sw	ra, CALLFRAME_RA(sp)
379	.mask	0x80000000, -4
380/*
381 * Entered here from idle() and switch_exit().  Interrupts are
382 * blocked, and sched_lock is held.
383 */
384	.globl	cpu_switch_queuescan
385cpu_switch_queuescan:
386	lw	t0, _C_LABEL(sched_whichqs)	# look for non-empty queue
387	li	t2, -1				# t2 = lowest bit set
388	bne	t0, zero, 1f
389	lw	t3, _C_LABEL(mips_locoresw) + MIPSX_CPU_IDLE
390	nop					# for r2000/r3000
391	jal	ra, t3
392	nop
3931:
394	move	t3, t0				# t3 = saved whichqs
3951:
396#if defined(MIPS3_5900)	/* work around for branch prediction miss. */
397	nop
398	nop
399	nop
400#endif
401	addu	t2, t2, 1
402	and	t1, t0, 1			# bit set?
403	beq	t1, zero, 1b
404	srl	t0, t0, 1			# try next bit
405/*
406 * Remove process from queue.
407 */
408	sll	t0, t2, 3
409	la	t1, _C_LABEL(sched_qs)
410	addu	t0, t0, t1			# t0 = qp = &qs[highbit]
411	lw	a0, P_FORW(t0)			# a0 = p = highest pri process
412	nop
413	lw	v0, P_FORW(a0)			# v0 = p->p_forw
414	bne	t0, a0, 2f			# make sure something in queue
415	sw	v0, P_FORW(t0)			# qp->ph_link = p->p_forw;
416	PANIC("cpu_switch")			# nothing in queue
4172:
418	sw	t0, P_BACK(v0)			# p->p_forw->p_back = qp
419	bne	v0, t0, 3f			# queue still not empty
420	sw	zero, P_BACK(a0)		## for firewall checking
421	li	v1, 1				# compute bit in 'whichqs'
422	sll	v1, v1, t2
423	xor	t3, t3, v1			# clear bit in 'whichqs'
424	sw	t3, _C_LABEL(sched_whichqs)
4253:
426	/* Squirrel away proc pointer. */
427	move	s7, a0
428#if defined(LOCKDEBUG)
429	/*
430	 * Done mucking with the run queues, release the
431	 * scheduler lock, but keep interrupts out.
432	 */
433	jal	_C_LABEL(sched_unlock_idle)
434	nop
435	move	a0, s7				# restore proc
436#endif
437
438/*
439 * Switch to new context.
440 */
441#if defined(MULTIPROCESSOR)
442	/*
443	 * XXXSMP
444	 * p->p_cpu = curcpu();
445	 */
446#endif
447	li	v1, SONPROC			# p->p_stat = SONPROC
448	sb	v1, P_STAT(a0)
449	lw	t2, _C_LABEL(mips_locoresw) + MIPSX_CPU_SWITCH_RESUME
450	sw	a0, _C_LABEL(curproc)
451	jal	ra, t2
452	nop
453
454#if 1	/* XXX XXX XXX */
455	REG_PROLOGUE
456	lw	a0, P_ADDR(s7)
457	nop
458	REG_L	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
459	nop
460	REG_EPILOGUE
461#endif	/* XXX XXX XXX */
462
463	jal	_C_LABEL(pmap_activate)
464	move	a0, s7				# -BDSLOT-
465
466	lw	a0, P_ADDR(s7)
467	sw	zero, _C_LABEL(want_resched)	# we've context switched
468	sw	a0, _C_LABEL(curpcb)
469
470#ifdef IPL_ICU_MASK
471	# restore ICU state
472	lw	t0, U_PCB_PPL(a0)
473	sw	t0, _C_LABEL(md_imask)
474	jal	_C_LABEL(md_imask_update)
475	nop
476	lw	a0, P_ADDR(s7)			# restore pcb_context pointer.
477#endif /* IPL_ICU_MASK */
478	REG_PROLOGUE
479	REG_L	v0, U_PCB_CONTEXT+SF_REG_SR(a0)
480	DYNAMIC_STATUS_MASK(v0,ra)		# machine dependent masking
481	REG_L	ra, U_PCB_CONTEXT+SF_REG_RA(a0)
482	REG_L	s0, U_PCB_CONTEXT+SF_REG_S0(a0)
483	REG_L	s1, U_PCB_CONTEXT+SF_REG_S1(a0)
484	REG_L	s2, U_PCB_CONTEXT+SF_REG_S2(a0)
485	REG_L	s3, U_PCB_CONTEXT+SF_REG_S3(a0)
486	REG_L	s4, U_PCB_CONTEXT+SF_REG_S4(a0)
487	REG_L	s5, U_PCB_CONTEXT+SF_REG_S5(a0)
488	REG_L	s6, U_PCB_CONTEXT+SF_REG_S6(a0)
489	REG_L	s7, U_PCB_CONTEXT+SF_REG_S7(a0)
490	REG_L	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
491	REG_L	s8, U_PCB_CONTEXT+SF_REG_S8(a0)
492	REG_EPILOGUE
493	mtc0	v0, MIPS_COP_0_STATUS
494	COP0_SYNC
495	j	ra
496	li	v0, 1				# possible return to 'savectx()'
497END(cpu_switch)
498
499/*
500 * switch_exit(struct proc *)
501 *
502 * Make the named process exit.  Switch SP to proc0 stack, then
503 * call exit2() to schedule exiting proc's vmspace and stack to be
504 * released by the reaper thread.  MUST BE CALLED AT SPLHIGH.
505 */
506LEAF(switch_exit)
507	lw	v0, _C_LABEL(proc0paddr)	# get proc0 p_addr
508	nop
509	sw	v0, _C_LABEL(curpcb)		# set current pcb
510	REG_PROLOGUE
511	REG_L	sp, U_PCB_CONTEXT+SF_REG_SP(v0)	# restore stack pointer
512	REG_EPILOGUE
513	jal	_C_LABEL(exit2)			# proc already in a0
514	nop
515#if defined(LOCKDEBUG)
516	jal	_C_LABEL(sched_lock_idle)	# acquire sched_lock
517	nop
518#endif
519	la	ra, cpu_switch_queuescan	# rathole to cpu_switch()
520	j	ra
521	sub	sp, sp, CALLFRAME_SIZ		#BDSlot: set stack call frame
522END(switch_exit)
523
524/*
525 * savectx(struct user *up)
526 */
527LEAF(savectx)
528	mfc0	v0, MIPS_COP_0_STATUS
529	REG_PROLOGUE
530	REG_S	s0, U_PCB_CONTEXT+SF_REG_S0(a0)
531	REG_S	s1, U_PCB_CONTEXT+SF_REG_S1(a0)
532	REG_S	s2, U_PCB_CONTEXT+SF_REG_S2(a0)
533	REG_S	s3, U_PCB_CONTEXT+SF_REG_S3(a0)
534	REG_S	s4, U_PCB_CONTEXT+SF_REG_S4(a0)
535	REG_S	s5, U_PCB_CONTEXT+SF_REG_S5(a0)
536	REG_S	s6, U_PCB_CONTEXT+SF_REG_S6(a0)
537	REG_S	s7, U_PCB_CONTEXT+SF_REG_S7(a0)
538	REG_S	sp, U_PCB_CONTEXT+SF_REG_SP(a0)
539	REG_S	s8, U_PCB_CONTEXT+SF_REG_S8(a0)
540	REG_S	ra, U_PCB_CONTEXT+SF_REG_RA(a0)
541	REG_S	v0, U_PCB_CONTEXT+SF_REG_SR(a0)
542	REG_EPILOGUE
543	j	ra
544	move	v0, zero
545END(savectx)
546
547#if defined(DDB) || defined(KGDB)
548/*
549 * setjmp(label_t *)
550 * longjmp(label_t *)
551 */
552LEAF(setjmp)
553	mfc0	v0, MIPS_COP_0_STATUS
554	REG_PROLOGUE
555	REG_S	s0, SF_REG_S0(a0)
556	REG_S	s1, SF_REG_S1(a0)
557	REG_S	s2, SF_REG_S2(a0)
558	REG_S	s3, SF_REG_S3(a0)
559	REG_S	s4, SF_REG_S4(a0)
560	REG_S	s5, SF_REG_S5(a0)
561	REG_S	s6, SF_REG_S6(a0)
562	REG_S	s7, SF_REG_S7(a0)
563	REG_S	sp, SF_REG_SP(a0)
564	REG_S	s8, SF_REG_S8(a0)
565	REG_S	ra, SF_REG_RA(a0)
566	REG_S	v0, SF_REG_SR(a0)
567	REG_EPILOGUE
568	j	ra
569	move	v0, zero
570END(setjmp)
571
572LEAF(longjmp)
573	REG_PROLOGUE
574	REG_L	v0, SF_REG_SR(a0)
575	DYNAMIC_STATUS_MASK(v0,ra)		# machine dependent masking
576	REG_L	ra, SF_REG_RA(a0)
577	REG_L	s0, SF_REG_S0(a0)
578	REG_L	s1, SF_REG_S1(a0)
579	REG_L	s2, SF_REG_S2(a0)
580	REG_L	s3, SF_REG_S3(a0)
581	REG_L	s4, SF_REG_S4(a0)
582	REG_L	s5, SF_REG_S5(a0)
583	REG_L	s6, SF_REG_S6(a0)
584	REG_L	s7, SF_REG_S7(a0)
585	REG_L	sp, SF_REG_SP(a0)
586	REG_L	s8, SF_REG_S8(a0)
587	REG_EPILOGUE
588	mtc0	v0, MIPS_COP_0_STATUS
589	COP0_SYNC
590	j	ra
591	li	v0, 1
592END(longjmp)
593#endif
594
595
596/*
597 * MIPS processor interrupt control
598 *
599 * Used as building blocks for spl(9) kernel interface.
600 */
601LEAF(_splraise)
602XLEAF(_splraise_noprof)				# does not get mcount hooks
603	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
604	and	a0, a0, MIPS_INT_MASK		# extract INT bits
605	nor	a0, zero, a0			# bitwise inverse of A0
606	and	a0, a0, v0			# disable retaining other bits
607	DYNAMIC_STATUS_MASK(a0,t0)		# machine dependent masking
608	mtc0	a0, MIPS_COP_0_STATUS		# store back
609	COP0_SYNC
610	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
611	j	ra
612	nop
613END(_splraise)
614
615LEAF(_spllower)
616	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
617	li	v1, ~MIPS_INT_MASK
618	and	v1, v0, v1			# turn off INT bit
619	nor	a0, zero, a0			# bitwise inverse of A0
620	and	a0, a0, MIPS_INT_MASK		# extract INT bits
621	or	a0, a0, v1			# disable making other bits on
622	DYNAMIC_STATUS_MASK(a0,t0)		# machine dependent masking
623	mtc0	a0, MIPS_COP_0_STATUS		# store back
624	COP0_SYNC
625	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
626	j	ra
627	nop
628END(_spllower)
629
630LEAF(_splrestore)
631	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
632	and	a0, a0, MIPS_INT_MASK
633	li	v1, ~MIPS_INT_MASK
634	and	v1, v1, v0			# turn off every INT bit
635	or	v1, v1, a0			# set old INT bits
636	DYNAMIC_STATUS_MASK(v1,t0)		# machine dependent masking
637	mtc0	v1, MIPS_COP_0_STATUS		# store back
638	COP0_SYNC
639	and	v0, v0, MIPS_INT_MASK
640	j	ra
641	nop
642END(_splrestore)
643
644LEAF(_splset)
645XLEAF(_splset_noprof)				# does not get mcount hooks
646	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
647	and	a0, a0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
648	li	v1, ~(MIPS_INT_MASK | MIPS_SR_INT_IE)
649	and	v1, v1, v0			# turn off every INT bit
650	or	v1, v1, a0			# set old INT bits
651	DYNAMIC_STATUS_MASK(v1,t0)		# machine dependent masking
652	mtc0	v1, MIPS_COP_0_STATUS		# store back
653	COP0_SYNC
654	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
655	j	ra
656	nop
657END(_splset)
658
659LEAF(_splget)
660	mfc0	v0, MIPS_COP_0_STATUS		# fetch status register
661	and	v0, v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
662	j	ra
663	nop
664END(_splget)
665
666LEAF(_setsoftintr)
667	mfc0	v1, MIPS_COP_0_STATUS		# save status register
668	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts (2 cycles)
669	COP0_SYNC
670	nop
671	nop
672	mfc0	v0, MIPS_COP_0_CAUSE		# fetch cause register
673	nop
674	or	v0, v0, a0			# set soft intr. bits
675	mtc0	v0, MIPS_COP_0_CAUSE		# store back
676	COP0_SYNC
677	mtc0	v1, MIPS_COP_0_STATUS		# enable interrupts
678	COP0_SYNC
679	j	ra
680	nop
681END(_setsoftintr)
682
683LEAF(_clrsoftintr)
684	mfc0	v1, MIPS_COP_0_STATUS		# save status register
685	mtc0	zero, MIPS_COP_0_STATUS		# disable interrupts (2 cycles)
686	COP0_SYNC
687	nop
688	nop
689	mfc0	v0, MIPS_COP_0_CAUSE		# fetch cause register
690	nor	a0, zero, a0			# bitwise inverse of A0
691	and	v0, v0, a0			# clear soft intr. bits
692	mtc0	v0, MIPS_COP_0_CAUSE		# store back
693	COP0_SYNC
694	mtc0	v1, MIPS_COP_0_STATUS		# enable interrupts
695	COP0_SYNC
696	j	ra
697	nop
698END(_clrsoftintr)
699
700LEAF(_splnone)
701	mtc0	zero, MIPS_COP_0_CAUSE		# clear SOFT_INT bits
702	COP0_SYNC
703	li	v0, (MIPS_INT_MASK | MIPS_SR_INT_IE)
704	DYNAMIC_STATUS_MASK(v0,t0)		# machine dependent masking
705	mtc0	v0, MIPS_COP_0_STATUS		# enable all sources
706	COP0_SYNC
707	nop
708	j	ra
709	nop
710END(_splnone)
711
712
713/*
714 * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
715 * Copy a NIL-terminated string, at most maxlen characters long.  Return the
716 * number of characters copied (including the NIL) in *lencopied.  If the
717 * string is too long, return ENAMETOOLONG; else return 0.
718 */
719LEAF(copystr)
720	move	t0, a2
721	beq	a2, zero, 4f
7221:
723	lbu	v0, 0(a0)
724	subu	a2, a2, 1
725	beq	v0, zero, 2f
726	sb	v0, 0(a1)			# each byte until NIL
727	addu	a0, a0, 1
728	bne	a2, zero, 1b			# less than maxlen
729	addu	a1, a1, 1
7304:
731	li	v0, ENAMETOOLONG		# run out of space
7322:
733	beq	a3, zero, 3f			# return num. of copied bytes
734	subu	a2, t0, a2			# if the 4th arg was non-NULL
735	sw	a2, 0(a3)
7363:
737	j	ra				# v0 is 0 or ENAMETOOLONG
738	nop
739END(copystr)
740
741/*
742 * int copyinstr(void *uaddr, void *kaddr, size_t maxlen, size_t *lencopied)
743 * Copy a NIL-terminated string, at most maxlen characters long, from the
744 * user's address space.  Return the number of characters copied (including
745 * the NIL) in *lencopied.  If the string is too long, return ENAMETOOLONG;
746 * else return 0 or EFAULT.
747 */
748LEAF(copyinstr)
749	lw	v1, _C_LABEL(curpcb)
750	la	v0, _C_LABEL(copystrerr)
751	blt	a0, zero, _C_LABEL(copystrerr)
752	sw	v0, U_PCB_ONFAULT(v1)
753	move	t0, a2
754	beq	a2, zero, 4f
7551:
756	lbu	v0, 0(a0)
757	subu	a2, a2, 1
758	beq	v0, zero, 2f
759	sb	v0, 0(a1)
760	addu	a0, a0, 1
761	bne	a2, zero, 1b
762	addu	a1, a1, 1
7634:
764	li	v0, ENAMETOOLONG
7652:
766	beq	a3, zero, 3f
767	subu	a2, t0, a2
768	sw	a2, 0(a3)
7693:
770	j	ra				# v0 is 0 or ENAMETOOLONG
771	sw	zero, U_PCB_ONFAULT(v1)
772END(copyinstr)
773
774/*
775 * int copyoutstr(void *uaddr, void *kaddr, size_t maxlen, size_t *lencopied);
776 * Copy a NIL-terminated string, at most maxlen characters long, into the
777 * user's address space.  Return the number of characters copied (including
778 * the NIL) in *lencopied.  If the string is too long, return ENAMETOOLONG;
779 * else return 0 or EFAULT.
780 */
781LEAF(copyoutstr)
782	lw	v1, _C_LABEL(curpcb)
783	la	v0, _C_LABEL(copystrerr)
784	blt	a1, zero, _C_LABEL(copystrerr)
785	sw	v0, U_PCB_ONFAULT(v1)
786	move	t0, a2
787	beq	a2, zero, 4f
7881:
789	lbu	v0, 0(a0)
790	subu	a2, a2, 1
791	beq	v0, zero, 2f
792	sb	v0, 0(a1)
793	addu	a0, a0, 1
794	bne	a2, zero, 1b
795	addu	a1, a1, 1
7964:
797	li	v0, ENAMETOOLONG
7982:
799	beq	a3, zero, 3f
800	subu	a2, t0, a2
801	sw	a2, 0(a3)
8023:
803	j	ra				# v0 is 0 or ENAMETOOLONG
804	sw	zero, U_PCB_ONFAULT(v1)
805END(copyoutstr)
806
807LEAF(copystrerr)
808	sw	zero, U_PCB_ONFAULT(v1)
809	j	ra
810	li	v0, EFAULT			# return EFAULT
811END(copystrerr)
812
813/*
814 * kcopy(const void *src, void *dst, size_t len);
815 *
816 * Copy len bytes from src to dst, aborting if we encounter a fatal
817 * page fault.
818 *
819 * kcopy() _must_ save and restore the old fault handler since it is
820 * called by uiomove(), which may be in the path of servicing a non-fatal
821 * page fault.
822 */
823NESTED(kcopy, 48, ra)
824	subu	sp, sp, 48			# set up stack frame
825	/* Frame contains RA (31) and S0 (16). */
826	.mask	0x80010000, -4
827	sw	ra, 44(sp)			# save ra
828	sw	s0, 32(sp)			# save s0
829	move	v0, a0				# swap a0, a1 for call to memcpy
830	move	a0, a1
831	move	a1, v0
832	lw	v1, _C_LABEL(curpcb)		# set up fault handler
833	la	v0, _C_LABEL(kcopyerr)
834	lw	s0, U_PCB_ONFAULT(v1)		# save old handler
835	jal	memcpy
836	sw	v0, U_PCB_ONFAULT(v1)
837
838	lw	v1, _C_LABEL(curpcb)		# restore the old handler
839	lw	ra, 44(sp)			# restore ra
840	sw	s0, U_PCB_ONFAULT(v1)
841	lw	s0, 32(sp)			# restore s0
842	addu	sp, sp, 48			# kill stack frame
843	j	ra
844	move	v0, zero			# success!
845END(kcopy)
846
847LEAF(kcopyerr)
848	lw	v1, _C_LABEL(curpcb)		# restore the old handler
849	lw	ra, 44(sp)			# restore ra
850	sw	s0, U_PCB_ONFAULT(v1)
851	lw	s0, 32(sp)			# restore s0
852	addu	sp, sp, 48			# kill stack frame
853	j	ra
854	li	v0, EFAULT			# return EFAULT
855END(kcopyerr)
856
857/*
858 * int copyin(void *uaddr, void *kaddr, size_t len)
859 * Copies len bytes of data from the user-space address uaddr to the
860 * kernel-space address kaddr.  copyin returns 0 on success or EFAULT
861 * if a bad address is encountered.
862 */
863NESTED(copyin, CALLFRAME_SIZ, ra)
864	subu	sp, sp, CALLFRAME_SIZ
865	.mask	0x80000000, -4
866	sw	ra, CALLFRAME_RA(sp)
867	blt	a0, zero, _C_LABEL(copyerr)
868	move	v0, a0				# swap a0, a1 for call to memcpy
869	move	a0, a1
870	move	a1, v0
871	lw	v1, _C_LABEL(curpcb)
872	la	v0, _C_LABEL(copyerr)
873	jal	memcpy
874	sw	v0, U_PCB_ONFAULT(v1)
875
876	lw	v1, _C_LABEL(curpcb)
877	lw	ra, CALLFRAME_RA(sp)
878	addu	sp, sp, CALLFRAME_SIZ
879	sw	zero, U_PCB_ONFAULT(v1)
880	j	ra
881	move	v0, zero
882END(copyin)
883
884/*
885 * int copyout(void *kaddr, void *uaddr, size_t len)
886 * Copies len bytes of data from the kernel-space address kaddr to the
887 * user-space address uaddr.  copyout returns 0 on success or EFAULT
888 * if a bad address is encountered.
889 */
890NESTED(copyout, CALLFRAME_SIZ, ra)
891	subu	sp, sp, CALLFRAME_SIZ
892	.mask	0x80000000, -4
893	sw	ra, CALLFRAME_RA(sp)
894	blt	a1, zero, _C_LABEL(copyerr)
895	move	v0, a0				# swap a0, a1 for call to memcpy
896	move	a0, a1
897	move	a1, v0
898	lw	v1, _C_LABEL(curpcb)
899	la	v0, _C_LABEL(copyerr)
900	jal	memcpy
901	sw	v0, U_PCB_ONFAULT(v1)
902
903	lw	v1, _C_LABEL(curpcb)
904	lw	ra, CALLFRAME_RA(sp)
905	addu	sp, sp, CALLFRAME_SIZ
906	sw	zero, U_PCB_ONFAULT(v1)
907	j	ra
908	move	v0, zero
909END(copyout)
910
911LEAF(copyerr)
912	lw	v1, _C_LABEL(curpcb)
913	lw	ra, CALLFRAME_RA(sp)
914	addu	sp, sp, CALLFRAME_SIZ
915	sw	zero, U_PCB_ONFAULT(v1)
916	j	ra
917	li	v0, EFAULT			# return EFAULT
918END(copyerr)
919
920/*
921 * int fuswintr(void *)
922 * Fetches a short word of data from the user-space address.
923 * This function is safe to call during an interrupt context.
924 */
925LEAF(fuswintr)
926	lw	v1, _C_LABEL(curpcb)
927	la	v0, _C_LABEL(fswintrberr)
928	lw	a2, U_PCB_ONFAULT(v1)
929	blt	a0, zero, _C_LABEL(fswintrberr)
930	sw	v0, U_PCB_ONFAULT(v1)
931	lhu	v0, 0(a0)			# fetch short
932	j	ra
933	sw	a2, U_PCB_ONFAULT(v1)
934END(fuswintr)
935
936/*
937 * int suswintr(void *, short);
938 * Stores a short word of data to the user-space address.
939 * This function is safe to call during an interrupt context.
940 */
941LEAF(suswintr)
942	lw	v1, _C_LABEL(curpcb)
943	la	v0, _C_LABEL(fswintrberr)
944	lw	a2, U_PCB_ONFAULT(v1)
945	blt	a0, zero, _C_LABEL(fswintrberr)
946	sw	v0, U_PCB_ONFAULT(v1)
947	sh	a1, 0(a0)			# store short
948	sw	a2, U_PCB_ONFAULT(v1)
949	j	ra
950	move	v0, zero
951END(suswintr)
952
953/*
954 * int fuword(void *)
955 * Fetches a word of data from the user-space address.
956 */
957LEAF(fuword)
958XLEAF(fuiword)
959	lw	v1, _C_LABEL(curpcb)
960	la	v0, _C_LABEL(fswberr)
961	blt	a0, zero, _C_LABEL(fswberr)
962	sw	v0, U_PCB_ONFAULT(v1)
963	lw	v0, 0(a0)			# fetch word
964	j	ra
965	sw	zero, U_PCB_ONFAULT(v1)
966END(fuword)
967
968/*
969 * int fusword(void *)
970 * Fetches a short word of data from the user-space address.
971 */
972LEAF(fusword)
973XLEAF(fuisword)
974	lw	v1, _C_LABEL(curpcb)
975	la	v0, _C_LABEL(fswberr)
976	blt	a0, zero, _C_LABEL(fswberr)
977	sw	v0, U_PCB_ONFAULT(v1)
978	lhu	v0, 0(a0)			# fetch short
979	j	ra
980	sw	zero, U_PCB_ONFAULT(v1)
981END(fusword)
982
983/*
984 * int fubyte(void *)
985 * Fetch a byte from the user's address space.
986 */
987LEAF(fubyte)
988XLEAF(fuibyte)
989	lw	v1, _C_LABEL(curpcb)
990	la	v0, _C_LABEL(fswberr)
991	blt	a0, zero, _C_LABEL(fswberr)
992	sw	v0, U_PCB_ONFAULT(v1)
993	lbu	v0, 0(a0)			# fetch byte
994	j	ra
995	sw	zero, U_PCB_ONFAULT(v1)
996END(fubyte)
997
998/*
999 * int suword(void *, int)
1000 * Stores a word of data to the user-space address.
1001 */
1002LEAF(suword)
1003	lw	v1, _C_LABEL(curpcb)
1004	la	v0, _C_LABEL(fswberr)
1005	blt	a0, zero, _C_LABEL(fswberr)
1006	sw	v0, U_PCB_ONFAULT(v1)
1007	sw	a1, 0(a0)			# store word
1008	sw	zero, U_PCB_ONFAULT(v1)
1009	j	ra
1010	move	v0, zero
1011END(suword)
1012
1013/*
1014 * int suiword(void *, int)
1015 * Have to flush instruction cache afterwards.
1016 */
1017LEAF(suiword)
1018	lw	v1, _C_LABEL(curpcb)
1019	la	v0, _C_LABEL(fswberr)
1020	blt	a0, zero, _C_LABEL(fswberr)
1021	sw	v0, U_PCB_ONFAULT(v1)
1022	sw	a1, 0(a0)			# store word
1023	sw	zero, U_PCB_ONFAULT(v1)
1024	move	v0, zero
1025	lw	v1, _C_LABEL(mips_cache_ops) + MIPSX_FLUSHICACHE
1026	j	v1				# NOTE: must not clobber v0!
1027	li	a1, 4				# size of word
1028END(suiword)
1029
1030/*
1031 * int susword(void *, short)
1032 * Stores a short word of data to the user-space address.
1033 */
1034LEAF(susword)
1035XLEAF(suisword)
1036	lw	v1, _C_LABEL(curpcb)
1037	la	v0, _C_LABEL(fswberr)
1038	blt	a0, zero, _C_LABEL(fswberr)
1039	sw	v0, U_PCB_ONFAULT(v1)
1040	sh	a1, 0(a0)			# store short
1041	sw	zero, U_PCB_ONFAULT(v1)
1042	j	ra
1043	move	v0, zero
1044END(susword)
1045
1046/*
1047 * int subyte(void *, int)
1048 * Stores a byte of data to the user-space address.
1049 */
1050LEAF(subyte)
1051XLEAF(suibyte)
1052	lw	v1, _C_LABEL(curpcb)
1053	la	v0, _C_LABEL(fswberr)
1054	blt	a0, zero, _C_LABEL(fswberr)
1055	sw	v0, U_PCB_ONFAULT(v1)
1056	sb	a1, 0(a0)			# store byte
1057	sw	zero, U_PCB_ONFAULT(v1)
1058	j	ra
1059	move	v0, zero
1060END(subyte)
1061
1062/*
1063 * int badaddr(void addr, int len)
1064 * See if access to addr with a len type instruction causes a machine check.
1065 * len is length of access (1=byte, 2=short, 4=long)
1066 */
1067LEAF(badaddr)
1068	lw	v1, _C_LABEL(curpcb)
1069	la	v0, _C_LABEL(baderr)
1070	bne	a1, 1, 2f
1071	sw	v0, U_PCB_ONFAULT(v1)
1072	b	5f
1073	lbu	v0, (a0)
10742:
1075	bne	a1, 2, 4f
1076	nop
1077	b	5f
1078	lhu	v0, (a0)
10794:
1080	lw	v0, (a0)
10815:
1082	sw	zero, U_PCB_ONFAULT(v1)
1083	j	ra
1084	move	v0, zero		# made it w/o errors
1085END(badaddr)
1086
1087/*
1088 * Error routine for {f,s}uswintr.  The fault handler in trap.c
1089 * checks for pcb_onfault set to this fault handler and
1090 * "bails out" before calling the VM fault handler.
1091 * (We can not call VM code from interrupt level.)
1092 */
1093LEAF(fswintrberr)
1094	nop
1095	sw	a2, U_PCB_ONFAULT(v1)
1096	j	ra
1097	li	v0, -1
1098END(fswintrberr)
1099
1100LEAF(fswberr)
1101XLEAF(baderr)
1102	sw	zero, U_PCB_ONFAULT(v1)
1103	j	ra
1104	li	v0, -1
1105END(fswberr)
1106
1107#if defined(NS) || defined(ISO) || defined(CCITT) || defined(CODA)
1108/*
1109 * Insert 'p' after 'q'.
1110 *	_insque(p, q)
1111 *		caddr_t p, q;
1112 */
1113LEAF(_insque)
1114	lw	v0, 0(a1)		# v0 = q->next
1115	sw	a1, 4(a0)		# p->prev = q
1116	sw	v0, 0(a0)		# p->next = q->next
1117	sw	a0, 4(v0)		# q->next->prev = p
1118	j	ra
1119	sw	a0, 0(a1)		# q->next = p
1120END(_insque)
1121
1122/*
1123 * Remove item 'p' from queue.
1124 *	_remque(p)
1125 *		caddr_t p;
1126 */
1127LEAF(_remque)
1128	lw	v0, 0(a0)		# v0 = p->next
1129	lw	v1, 4(a0)		# v1 = p->prev
1130	nop
1131	sw	v0, 0(v1)		# p->prev->next = p->next
1132	j	ra
1133	sw	v1, 4(v0)		# p->next->prev = p->prev
1134END(_remque)
1135#endif
1136
1137
1138/*
1139 * u_int32_t mips_cp0_cause_read(void)
1140 *
1141 *	Return the current value of the CP0 Cause register.
1142 *
1143 *	Note: Not profiled, skews CPU-clock measurement (mips_mcclock.c)
1144 *	to uselessness.
1145 */
1146LEAF_NOPROFILE(mips_cp0_cause_read)
1147	mfc0	v0, MIPS_COP_0_CAUSE
1148	j	ra
1149	nop
1150END(mips_cp0_cause_read)
1151
1152/*
1153 * void mips_cp0_cause_write(u_int32_t)
1154 *
1155 *	Set the value of the CP0 Cause register.
1156 */
1157LEAF(mips_cp0_cause_write)
1158	mtc0	a0, MIPS_COP_0_CAUSE
1159	COP0_SYNC
1160	nop
1161	nop
1162	j	ra
1163	nop
1164END(mips_cp0_cause_write)
1165
1166
1167/*
1168 * u_int32_t mips_cp0_status_read(void)
1169 *
1170 *	Return the current value of the CP0 Status register.
1171 */
1172LEAF(mips_cp0_status_read)
1173	mfc0	v0, MIPS_COP_0_STATUS
1174	j	ra
1175	nop
1176END(mips_cp0_status_read)
1177
1178/*
1179 * void mips_cp0_status_write(u_int32_t)
1180 *
1181 *	Set the value of the CP0 Status register.
1182 *
1183 *	Note: This is almost certainly not the way you want to write a
1184 *	"permanent" value to to the CP0 Status register, since it gets
1185 *	saved in trap frames and restores.
1186 */
1187LEAF(mips_cp0_status_write)
1188	mtc0	a0, MIPS_COP_0_STATUS
1189	COP0_SYNC
1190	nop
1191	nop
1192	j	ra
1193	nop
1194END(mips_cp0_status_write)
1195
1196
1197#if !defined(NOFPU) || defined(SOFTFLOAT)
1198/*----------------------------------------------------------------------------
1199 *
1200 * MachFPInterrupt --
1201 * MachFPTrap --
1202 *
1203 *	Handle a floating point interrupt (r3k) or trap (r4k).
1204 *	the handlers are indentical, only the reporting mechanisms differ.
1205 *
1206 *	MachFPInterrupt(status, cause, pc, frame)
1207 *		unsigned status;
1208 *		unsigned cause;
1209 *		unsigned pc;
1210 *		int *frame;
1211 *
1212 *	MachFPTrap(status, cause, pc, frame)
1213 *		unsigned status;
1214 *		unsigned cause;
1215 *		unsigned pc;
1216 *		int *frame;
1217 *
1218 * Results:
1219 *	None.
1220 *
1221 * Side effects:
1222 *	None.
1223 *
1224 *----------------------------------------------------------------------------
1225 */
1226NESTED(MachFPInterrupt, CALLFRAME_SIZ, ra)
1227XNESTED(MachFPTrap)
1228	.mask	0x80000000, -4
1229	subu	sp, sp, CALLFRAME_SIZ
1230#ifndef SOFTFLOAT
1231	mfc0	t0, MIPS_COP_0_STATUS
1232#endif
1233	sw	ra, CALLFRAME_RA(sp)
1234#ifndef SOFTFLOAT
1235	or	t0, t0, MIPS_SR_COP_1_BIT
1236	mtc0	t0, MIPS_COP_0_STATUS
1237	COP0_SYNC
1238	nop
1239	nop
1240	nop				# 1st extra nop for r4k
1241	nop				# 2nd extra nop for r4k
1242
1243	cfc1	t0, MIPS_FPU_CSR	# stall til FP done
1244	cfc1	t0, MIPS_FPU_CSR	# now get status
1245	nop
1246	sll	t2, t0, (31 - 17)	# unimplemented operation?
1247	bgez	t2, 3f			# no, normal trap
1248	nop
1249#endif
1250/*
1251 * We got an unimplemented operation trap so
1252 * fetch the instruction and emulate the instruction.
1253 */
1254	bgez	a1, 1f			# Check the branch delay bit.
1255	nop
1256/*
1257 * The instruction is in the branch delay slot.
1258 */
1259	b	2f
1260	lw	a0, 4(a2)			# a0 = coproc instruction
1261/*
1262 * This is not in the branch delay slot so calculate the resulting
1263 * PC (epc + 4) into v0 and continue to MachEmulateFP().
1264 */
12651:
1266	lw	a0, 0(a2)			# a0 = coproc instruction
12672:
1268	move	a2, a1
1269
1270/*
1271 * Check to see if the instruction to be emulated is a floating-point
1272 * instruction.
1273 */
1274	srl	t0, a0, MIPS_OPCODE_SHIFT
1275	beq	t0, MIPS_OPCODE_C1, 4f
1276	nop
1277
1278#ifdef SOFTFLOAT
1279	REG_PROLOGUE
1280	REG_S	zero, FRAME_ZERO(a3)		# ensure zero has value 0
1281	REG_EPILOGUE
1282
1283	beq	t0, MIPS_OPCODE_LWC1, 5f
1284	nop
1285	beq	t0, MIPS_OPCODE_LDC1, 6f
1286	nop
1287	beq	t0, MIPS_OPCODE_SWC1, 7f
1288	nop
1289	beq	t0, MIPS_OPCODE_SDC1, 8f
1290	nop
1291#endif
1292
1293/*
1294 * Send a floating point exception signal to the current process.
1295 */
1296	li	t0, 0xFFFFFF00
1297	and	a1, a1, t0
1298	ori	a1, a1, T_RES_INST << MIPS_CR_EXC_CODE_SHIFT
1299	REG_PROLOGUE
1300	REG_S	a1, FRAME_CAUSE(a3)
1301	REG_EPILOGUE
1302
1303	move	a2, a0				# code = instruction
1304	lw	a0, _C_LABEL(curproc)		# get current process
1305	jal	_C_LABEL(trapsignal)
1306	li	a1, SIGILL
1307
1308	b	FPReturn
1309	nop
1310
1311/*
1312 * Send a FPE signal to the current process if it tripped the any of
1313 * the VZOUI bits.
1314 */
13153:
1316	REG_PROLOGUE
1317	REG_S	a1, FRAME_CAUSE(a3)
1318	REG_EPILOGUE
1319
1320	move	a2, a0				# code = instruction
1321	lw	a0, _C_LABEL(curproc)		# get current process
1322	jal	_C_LABEL(trapsignal)
1323	li	a1, SIGFPE			# BDSLOT
1324
1325	b	FPReturn
1326	nop
1327
1328/*
1329 * Finally, we can call MachEmulateFP() where a0 is the instruction to emulate.
1330 */
13314:
1332	jal	_C_LABEL(MachEmulateFP)
1333	move	a1, a3
1334
1335/*
1336 * Turn off the floating point coprocessor and return.
1337 */
1338FPReturn:
1339#ifndef SOFTFLOAT
1340	mfc0	t0, MIPS_COP_0_STATUS
1341#endif
1342	lw	ra, CALLFRAME_RA(sp)
1343#ifndef SOFTFLOAT
1344	and	t0, t0, ~MIPS_SR_COP_1_BIT
1345	mtc0	t0, MIPS_COP_0_STATUS
1346	COP0_SYNC
1347#else
1348	nop
1349#endif
1350	j	ra
1351	addu	sp, sp, CALLFRAME_SIZ
1352
1353#ifdef SOFTFLOAT
13545:	# lwc1
1355	jal	_C_LABEL(MachEmulateLWC1)
1356	move	a1, a3
1357	b	FPReturn
1358	nop
1359
13606:	# ldc1
1361	jal	_C_LABEL(MachEmulateLDC1)
1362	move	a1, a3
1363	b	FPReturn
1364	nop
1365
13667:	# swc1
1367	jal	_C_LABEL(MachEmulateSWC1)
1368	move	a1, a3
1369	b	FPReturn
1370	nop
1371
13728:	# sdc1
1373	jal	_C_LABEL(MachEmulateSDC1)
1374	move	a1, a3
1375	b	FPReturn
1376	nop
1377
1378#endif
1379END(MachFPInterrupt)
1380#endif /* !defined(NOFPU) || defined(SOFTFLOAT) */
1381
1382LEAF(mips_pagecopy)
1383#if !defined(_MIPS_BSD_API) || _MIPS_BSD_API == _MIPS_BSD_API_LP32
1384	li	a2, NBPG >> 5
1385
13861:	lw	t0, 0(a1)
1387	lw	t4, 16(a1)
1388	subu	a2, 1
1389	lw	t1, 4(a1)
1390	lw	t2, 8(a1)
1391	lw	t3, 12(a1)
1392	lw	t5, 20(a1)
1393	lw	t6, 24(a1)
1394	lw	t7, 28(a1)
1395
1396	sw	t0, 0(a0)
1397	sw	t4, 16(a0)
1398	addu	a1, 32
1399	sw	t1, 4(a0)
1400	sw	t2, 8(a0)
1401	sw	t3, 12(a0)
1402	sw	t5, 20(a0)
1403	sw	t6, 24(a0)
1404	sw	t7, 28(a0)
1405	bgtz	a2,1b
1406	addu	a0, 32
1407#else
1408	li	a2, NBPG >> 6
1409
1410	REG_PROLOGUE
14111:	ld	t0, 0(a1)
1412	ld	t4, 32(a1)
1413	ld	t2, 16(a1)
1414	ld	t6, 48(a1)
1415	subu	a2, 1
1416	ld	t1, 8(a1)
1417	ld	t3, 24(a1)
1418	ld	t5, 40(a1)
1419	ld	t7, 56(a1)
1420
1421	sd	t0, 0(a0)
1422	sd	t4, 32(a0)
1423	sd	t2, 16(a0)
1424	sd	t6, 48(a0)
1425	addu	a1, 64
1426	sd	t1, 8(a0)
1427	sd	t3, 24(a0)
1428	sd	t5, 40(a0)
1429	sd	t7, 56(a0)
1430	bgtz	a2,1b
1431	addu	a0, 64
1432	REG_EPILOGUE
1433#endif
1434
1435	j	ra
1436	nop
1437END(mips_pagecopy)
1438
1439LEAF(mips_pagezero)
1440#if !defined(_MIPS_BSD_API) || _MIPS_BSD_API == _MIPS_BSD_API_LP32
1441	li	a1, NBPG >> 5
1442
14431:	sw	zero, 0(a0)
1444	sw	zero, 16(a0)			# try to miss cache first
1445	subu	a1, 1
1446	sw	zero, 4(a0)
1447	sw	zero, 8(a0)
1448	sw	zero, 12(a0)
1449	sw	zero, 20(a0)
1450	sw	zero, 24(a0)
1451	sw	zero, 28(a0)
1452	bgtz	a1,1b
1453	addu	a0, 32
1454#else
1455	li	a1, NBPG >> 6
1456
1457	REG_PROLOGUE
14581:	sd	zero, 0(a0)			# try to miss cache first
1459	sd	zero, 32(a0)
1460	subu	a1, 1
1461	sd	zero, 16(a0)
1462	sd	zero, 48(a0)
1463	sd	zero, 8(a0)			# fill in cache lines
1464	sd	zero, 40(a0)
1465	sd	zero, 24(a0)
1466	sd	zero, 56(a0)
1467	REG_EPILOGUE
1468	bgtz	a1,1b
1469	addu	a0, 64
1470#endif
1471
1472	j	ra
1473	nop
1474END(mips_pagezero)
1475
1476
1477#ifndef DDB_TRACE
1478
1479#if defined(DEBUG) || defined(DDB) || defined(KGDB) || defined(geo)
1480/*
1481 * Stacktrace support hooks which use type punnign to access
1482 * the caller's registers.
1483 */
1484
1485
1486/*
1487 * stacktrace() -- print a stack backtrace to the console.
1488 *	implicitly accesses caller's a0-a3.
1489 */
1490NESTED(stacktrace, CALLFRAME_SIZ+24, ra)
1491XNESTED(logstacktrace)
1492	subu	sp, sp, CALLFRAME_SIZ+24	# four arg-passing slots
1493
1494	move	t0, ra				# save caller's PC
1495	addu	t1, sp, CALLFRAME_SIZ+24	# compute caller's SP
1496	move	t2, s8				# non-virtual frame pointer
1497
1498	la	v0, _C_LABEL(printf)
1499
1500	sw	ra, 36(sp)			# save return address
1501
1502	/* a0-a3 are still caller's a0-a3, pass in-place as given. */
1503	sw	t0, 16(sp)			# push caller's PC
1504	sw	t1, 20(sp)			# push caller's SP
1505	sw	t2, 24(sp)			# push caller's FP, in case
1506	sw	zero, 28(sp)			# caller's RA on stack
1507	jal	_C_LABEL(stacktrace_subr)
1508	sw	v0, 32(sp)			# push printf
1509
1510	lw	ra, 36(sp)
1511	addu	sp, sp, CALLFRAME_SIZ+24
1512	j	ra
1513	nop
1514END(stacktrace)
1515#endif	/* DEBUG || DDB */
1516#endif	/* DDB_TRACE */
1517
1518	.sdata
1519	.globl	_C_LABEL(esym)
1520_C_LABEL(esym):
1521	.word 0
1522
1523	.globl	_C_LABEL(cpu_id)
1524	.globl	_C_LABEL(fpu_id)
1525_C_LABEL(cpu_id):
1526	.word	0
1527_C_LABEL(fpu_id):
1528	.word	0
1529
1530#ifdef MIPS_DYNAMIC_STATUS_MASK
1531	.globl	_C_LABEL(mips_dynamic_status_mask)
1532_C_LABEL(mips_dynamic_status_mask):
1533	.word	0xffffffff
1534#endif
1535