xref: /netbsd/sys/arch/alpha/alpha/locore.s (revision bf9ec67e)
1/* $NetBSD: locore.s,v 1.100 2002/05/13 21:38:09 thorpej Exp $ */
2
3/*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Author: Chris G. Demetriou
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67.stabs	__FILE__,100,0,0,kernel_text
68
69#include "opt_ddb.h"
70#include "opt_kgdb.h"
71#include "opt_multiprocessor.h"
72#include "opt_lockdebug.h"
73
74#include <machine/asm.h>
75
76__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.100 2002/05/13 21:38:09 thorpej Exp $");
77
78#include "assym.h"
79
80.stabs	__FILE__,132,0,0,kernel_text
81
82#if defined(MULTIPROCESSOR)
83
84/*
85 * Get various per-cpu values.  A pointer to our cpu_info structure
86 * is stored in SysValue.  These macros clobber v0, t0, t8..t11.
87 *
88 * All return values are in v0.
89 */
90#define	GET_CPUINFO		call_pal PAL_OSF1_rdval
91
92#define	GET_CURPROC							\
93	call_pal PAL_OSF1_rdval					;	\
94	addq	v0, CPU_INFO_CURPROC, v0
95
96#define	GET_FPCURPROC							\
97	call_pal PAL_OSF1_rdval					;	\
98	addq	v0, CPU_INFO_FPCURPROC, v0
99
100#define	GET_CURPCB							\
101	call_pal PAL_OSF1_rdval					;	\
102	addq	v0, CPU_INFO_CURPCB, v0
103
104#define	GET_IDLE_PCB(reg)						\
105	call_pal PAL_OSF1_rdval					;	\
106	ldq	reg, CPU_INFO_IDLE_PCB_PADDR(v0)
107
108#else	/* if not MULTIPROCESSOR... */
109
110IMPORT(cpu_info_primary, CPU_INFO_SIZEOF)
111
112#define	GET_CPUINFO		lda v0, cpu_info_primary
113
114#define	GET_CURPROC		lda v0, cpu_info_primary + CPU_INFO_CURPROC
115
116#define	GET_FPCURPROC		lda v0, cpu_info_primary + CPU_INFO_FPCURPROC
117
118#define	GET_CURPCB		lda v0, cpu_info_primary + CPU_INFO_CURPCB
119
120#define	GET_IDLE_PCB(reg)						\
121	lda	reg, cpu_info_primary				;	\
122	ldq	reg, CPU_INFO_IDLE_PCB_PADDR(reg)
123#endif
124
125/*
126 * Perform actions necessary to switch to a new context.  The
127 * hwpcb should be in a0.  Clobbers v0, t0, t8..t11, a0.
128 */
129#define	SWITCH_CONTEXT							\
130	/* Make a note of the context we're running on. */		\
131	GET_CURPCB						;	\
132	stq	a0, 0(v0)					;	\
133									\
134	/* Swap in the new context. */					\
135	call_pal PAL_OSF1_swpctx
136
137
138	/* don't reorder instructions; paranoia. */
139	.set noreorder
140	.text
141
142	.macro	bfalse	reg, dst
143	beq	\reg, \dst
144	.endm
145
146	.macro	btrue	reg, dst
147	bne	\reg, \dst
148	.endm
149
150/*
151 * This is for kvm_mkdb, and should be the address of the beginning
152 * of the kernel text segment (not necessarily the same as kernbase).
153 */
154	EXPORT(kernel_text)
155.loc	1 __LINE__
156kernel_text:
157
158/*
159 * bootstack: a temporary stack, for booting.
160 *
161 * Extends from 'start' down.
162 */
163bootstack:
164
165/*
166 * locorestart: Kernel start. This is no longer the actual entry
167 * point, although jumping to here (the first kernel address) will
168 * in fact work just fine.
169 *
170 * Arguments:
171 *	a0 is the first free page frame number (PFN)
172 *	a1 is the page table base register (PTBR)
173 *	a2 is the bootinfo magic number
174 *	a3 is the pointer to the bootinfo structure
175 *
176 * All arguments are passed to alpha_init().
177 */
178NESTED_NOPROFILE(locorestart,1,0,ra,0,0)
179	br	pv,1f
1801:	LDGP(pv)
181
182	/* Switch to the boot stack. */
183	lda	sp,bootstack
184
185	/* Load KGP with current GP. */
186	mov	a0, s0			/* save pfn */
187	mov	gp, a0
188	call_pal PAL_OSF1_wrkgp		/* clobbers a0, t0, t8-t11 */
189	mov	s0, a0			/* restore pfn */
190
191	/*
192	 * Call alpha_init() to do pre-main initialization.
193	 * alpha_init() gets the arguments we were called with,
194	 * which are already in a0, a1, a2, a3, and a4.
195	 */
196	CALL(alpha_init)
197
198	/* Set up the virtual page table pointer. */
199	ldiq	a0, VPTBASE
200	call_pal PAL_OSF1_wrvptptr	/* clobbers a0, t0, t8-t11 */
201
202	/*
203	 * Switch to proc0's PCB.
204	 */
205	lda	a0, proc0
206	ldq	a0, P_MD_PCBPADDR(a0)		/* phys addr of PCB */
207	SWITCH_CONTEXT
208
209	/*
210	 * We've switched to a new page table base, so invalidate the TLB
211	 * and I-stream.  This happens automatically everywhere but here.
212	 */
213	ldiq	a0, -2				/* TBIA */
214	call_pal PAL_OSF1_tbi
215	call_pal PAL_imb
216
217	/*
218	 * All ready to go!  Call main()!
219	 */
220	CALL(main)
221
222	/* This should never happen. */
223	PANIC("main() returned",Lmain_returned_pmsg)
224	END(locorestart)
225
226/**************************************************************************/
227
228/*
229 * Pull in the PROM interface routines; these are needed for
230 * prom printf (while bootstrapping), and for determining the
231 * boot device, etc.
232 */
233#include <alpha/alpha/prom_disp.s>
234
235/**************************************************************************/
236
237/*
238 * Pull in the PALcode function stubs.
239 */
240#include <alpha/alpha/pal.s>
241
242/**************************************************************************/
243
244/**************************************************************************/
245
246#if defined(MULTIPROCESSOR)
247/*
248 * Pull in the multiprocssor glue.
249 */
250#include <alpha/alpha/multiproc.s>
251#endif /* MULTIPROCESSOR */
252
253/**************************************************************************/
254
255/**************************************************************************/
256
257#if defined(DDB) || defined(KGDB)
258/*
259 * Pull in debugger glue.
260 */
261#include <alpha/alpha/debug.s>
262#endif /* DDB || KGDB */
263
264/**************************************************************************/
265
266/**************************************************************************/
267
268	.text
269.stabs	__FILE__,132,0,0,backtolocore1	/* done with includes */
270.loc	1 __LINE__
271backtolocore1:
272/**************************************************************************/
273
274/*
275 * Signal "trampoline" code. Invoked from RTE setup by sendsig().
276 *
277 * On entry, stack & registers look like:
278 *
279 *      a0	signal number
280 *      a1	signal specific code
281 *      a2	pointer to signal context frame (scp)
282 *      pv	address of handler
283 *      sp+0	saved hardware state
284 *                      .
285 *                      .
286 *      scp+0	beginning of signal context frame
287 */
288
289NESTED_NOPROFILE(sigcode,0,0,ra,0,0)
290	lda	sp, -16(sp)		/* save the sigcontext pointer */
291	stq	a2, 0(sp)
292	jsr	ra, (t12)		/* call the signal handler (t12==pv) */
293	ldq	a0, 0(sp)		/* get the sigcontext pointer */
294	lda	sp, 16(sp)
295	CALLSYS_NOERROR(__sigreturn14)	/* and call sigreturn() with it. */
296	mov	v0, a0			/* if that failed, get error code */
297	CALLSYS_NOERROR(exit)		/* and call exit() with it. */
298XNESTED(esigcode,0)
299	END(sigcode)
300
301/**************************************************************************/
302
303/*
304 * exception_return: return from trap, exception, or syscall
305 */
306
307IMPORT(ssir, 8)
308
309LEAF(exception_return, 1)			/* XXX should be NESTED */
310	br	pv, 1f
3111:	LDGP(pv)
312
313	ldq	s1, (FRAME_PS * 8)(sp)		/* get the saved PS */
314	and	s1, ALPHA_PSL_IPL_MASK, t0	/* look at the saved IPL */
315	bne	t0, 4f				/* != 0: can't do AST or SIR */
316
317	/* see if we can do an SIR */
3182:	ldq	t1, ssir			/* SIR pending? */
319	bne	t1, 5f				/* yes */
320	/* no */
321
322	/* check for AST */
3233:	and	s1, ALPHA_PSL_USERMODE, t0	/* are we returning to user? */
324	beq	t0, 4f				/* no: just return */
325	/* yes */
326
327	/* GET_CPUINFO clobbers v0, t0, t8...t11. */
328	GET_CPUINFO
329	ldq	t1, CPU_INFO_CURPROC(v0)
330	ldl	t2, P_MD_ASTPENDING(t1)		/* AST pending? */
331	bne	t2, 6f				/* yes */
332	/* no: return & deal with FP */
333
334	/*
335	 * We are going back to usermode.  Enable the FPU based on whether
336	 * the current proc is fpcurproc.
337	 */
338	ldq	t2, CPU_INFO_FPCURPROC(v0)
339	cmpeq	t1, t2, t1
340	mov	zero, a0
341	cmovne	t1, 1, a0
342	call_pal PAL_OSF1_wrfen
343
344	/* restore the registers, and return */
3454:	bsr	ra, exception_restore_regs	/* jmp/CALL trashes pv/t12 */
346	ldq	ra,(FRAME_RA*8)(sp)
347	.set noat
348	ldq	at_reg,(FRAME_AT*8)(sp)
349
350	lda	sp,(FRAME_SW_SIZE*8)(sp)
351	call_pal PAL_OSF1_rti
352	.set at
353	/* NOTREACHED */
354
355	/* We've got a SIR */
3565:	ldiq	a0, ALPHA_PSL_IPL_SOFT
357	call_pal PAL_OSF1_swpipl
358	mov	v0, s2				/* remember old IPL */
359	CALL(softintr_dispatch)
360
361	/* SIR handled; restore IPL and check again */
362	mov	s2, a0
363	call_pal PAL_OSF1_swpipl
364	br	2b
365
366	/* We've got an AST */
3676:	stl	zero, P_MD_ASTPENDING(t1)	/* no AST pending */
368
369	ldiq	a0, ALPHA_PSL_IPL_0		/* drop IPL to zero */
370	call_pal PAL_OSF1_swpipl
371	mov	v0, s2				/* remember old IPL */
372
373	mov	sp, a0				/* only arg is frame */
374	CALL(ast)
375
376	/* AST handled; restore IPL and check again */
377	mov	s2, a0
378	call_pal PAL_OSF1_swpipl
379	br	3b
380
381	END(exception_return)
382
383LEAF(exception_save_regs, 0)
384	stq	v0,(FRAME_V0*8)(sp)
385	stq	a3,(FRAME_A3*8)(sp)
386	stq	a4,(FRAME_A4*8)(sp)
387	stq	a5,(FRAME_A5*8)(sp)
388	stq	s0,(FRAME_S0*8)(sp)
389	stq	s1,(FRAME_S1*8)(sp)
390	stq	s2,(FRAME_S2*8)(sp)
391	stq	s3,(FRAME_S3*8)(sp)
392	stq	s4,(FRAME_S4*8)(sp)
393	stq	s5,(FRAME_S5*8)(sp)
394	stq	s6,(FRAME_S6*8)(sp)
395	stq	t0,(FRAME_T0*8)(sp)
396	stq	t1,(FRAME_T1*8)(sp)
397	stq	t2,(FRAME_T2*8)(sp)
398	stq	t3,(FRAME_T3*8)(sp)
399	stq	t4,(FRAME_T4*8)(sp)
400	stq	t5,(FRAME_T5*8)(sp)
401	stq	t6,(FRAME_T6*8)(sp)
402	stq	t7,(FRAME_T7*8)(sp)
403	stq	t8,(FRAME_T8*8)(sp)
404	stq	t9,(FRAME_T9*8)(sp)
405	stq	t10,(FRAME_T10*8)(sp)
406	stq	t11,(FRAME_T11*8)(sp)
407	stq	t12,(FRAME_T12*8)(sp)
408	RET
409	END(exception_save_regs)
410
411LEAF(exception_restore_regs, 0)
412	ldq	v0,(FRAME_V0*8)(sp)
413	ldq	a3,(FRAME_A3*8)(sp)
414	ldq	a4,(FRAME_A4*8)(sp)
415	ldq	a5,(FRAME_A5*8)(sp)
416	ldq	s0,(FRAME_S0*8)(sp)
417	ldq	s1,(FRAME_S1*8)(sp)
418	ldq	s2,(FRAME_S2*8)(sp)
419	ldq	s3,(FRAME_S3*8)(sp)
420	ldq	s4,(FRAME_S4*8)(sp)
421	ldq	s5,(FRAME_S5*8)(sp)
422	ldq	s6,(FRAME_S6*8)(sp)
423	ldq	t0,(FRAME_T0*8)(sp)
424	ldq	t1,(FRAME_T1*8)(sp)
425	ldq	t2,(FRAME_T2*8)(sp)
426	ldq	t3,(FRAME_T3*8)(sp)
427	ldq	t4,(FRAME_T4*8)(sp)
428	ldq	t5,(FRAME_T5*8)(sp)
429	ldq	t6,(FRAME_T6*8)(sp)
430	ldq	t7,(FRAME_T7*8)(sp)
431	ldq	t8,(FRAME_T8*8)(sp)
432	ldq	t9,(FRAME_T9*8)(sp)
433	ldq	t10,(FRAME_T10*8)(sp)
434	ldq	t11,(FRAME_T11*8)(sp)
435	ldq	t12,(FRAME_T12*8)(sp)
436	RET
437	END(exception_restore_regs)
438
439/**************************************************************************/
440
441/*
442 * XentArith:
443 * System arithmetic trap entry point.
444 */
445
446	PALVECT(XentArith)		/* setup frame, save registers */
447
448	/* a0, a1, & a2 already set up */
449	ldiq	a3, ALPHA_KENTRY_ARITH
450	mov	sp, a4			; .loc 1 __LINE__
451	CALL(trap)
452
453	jmp	zero, exception_return
454	END(XentArith)
455
456/**************************************************************************/
457
458/*
459 * XentIF:
460 * System instruction fault trap entry point.
461 */
462
463	PALVECT(XentIF)			/* setup frame, save registers */
464
465	/* a0, a1, & a2 already set up */
466	ldiq	a3, ALPHA_KENTRY_IF
467	mov	sp, a4			; .loc 1 __LINE__
468	CALL(trap)
469	jmp	zero, exception_return
470	END(XentIF)
471
472/**************************************************************************/
473
474/*
475 * XentInt:
476 * System interrupt entry point.
477 */
478
479	PALVECT(XentInt)		/* setup frame, save registers */
480
481	/* a0, a1, & a2 already set up */
482	mov	sp, a3			; .loc 1 __LINE__
483	CALL(interrupt)
484	jmp	zero, exception_return
485	END(XentInt)
486
487/**************************************************************************/
488
489/*
490 * XentMM:
491 * System memory management fault entry point.
492 */
493
494	PALVECT(XentMM)			/* setup frame, save registers */
495
496	/* a0, a1, & a2 already set up */
497	ldiq	a3, ALPHA_KENTRY_MM
498	mov	sp, a4			; .loc 1 __LINE__
499	CALL(trap)
500
501	jmp	zero, exception_return
502	END(XentMM)
503
504/**************************************************************************/
505
506/*
507 * XentSys:
508 * System call entry point.
509 */
510
511	ESETUP(XentSys)			; .loc 1 __LINE__
512
513	stq	v0,(FRAME_V0*8)(sp)		/* in case we need to restart */
514	stq	s0,(FRAME_S0*8)(sp)
515	stq	s1,(FRAME_S1*8)(sp)
516	stq	s2,(FRAME_S2*8)(sp)
517	stq	s3,(FRAME_S3*8)(sp)
518	stq	s4,(FRAME_S4*8)(sp)
519	stq	s5,(FRAME_S5*8)(sp)
520	stq	s6,(FRAME_S6*8)(sp)
521	stq	a0,(FRAME_A0*8)(sp)
522	stq	a1,(FRAME_A1*8)(sp)
523	stq	a2,(FRAME_A2*8)(sp)
524	stq	a3,(FRAME_A3*8)(sp)
525	stq	a4,(FRAME_A4*8)(sp)
526	stq	a5,(FRAME_A5*8)(sp)
527	stq	ra,(FRAME_RA*8)(sp)
528
529	/* syscall number, passed in v0, is first arg, frame pointer second */
530	mov	v0,a1
531	GET_CURPROC
532	ldq	a0,0(v0)
533	mov	sp,a2			; .loc 1 __LINE__
534	ldq	t12,P_MD_SYSCALL(a0)
535	CALL((t12))
536
537	jmp	zero, exception_return
538	END(XentSys)
539
540/**************************************************************************/
541
542/*
543 * XentUna:
544 * System unaligned access entry point.
545 */
546
547LEAF(XentUna, 3)				/* XXX should be NESTED */
548	.set noat
549	lda	sp,-(FRAME_SW_SIZE*8)(sp)
550	stq	at_reg,(FRAME_AT*8)(sp)
551	.set at
552	stq	ra,(FRAME_RA*8)(sp)
553	bsr	ra, exception_save_regs		/* jmp/CALL trashes pv/t12 */
554
555	/* a0, a1, & a2 already set up */
556	ldiq	a3, ALPHA_KENTRY_UNA
557	mov	sp, a4			; .loc 1 __LINE__
558	CALL(trap)
559
560	jmp	zero, exception_return
561	END(XentUna)
562
563/**************************************************************************/
564
565/*
566 * savefpstate: Save a process's floating point state.
567 *
568 * Arguments:
569 *	a0	'struct fpstate *' to save into
570 */
571
572LEAF(savefpstate, 1)
573	LDGP(pv)
574	/* save all of the FP registers */
575	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
576	stt	$f0,   (0 * 8)(t1)	/* save first register, using hw name */
577	stt	$f1,   (1 * 8)(t1)	/* etc. */
578	stt	$f2,   (2 * 8)(t1)
579	stt	$f3,   (3 * 8)(t1)
580	stt	$f4,   (4 * 8)(t1)
581	stt	$f5,   (5 * 8)(t1)
582	stt	$f6,   (6 * 8)(t1)
583	stt	$f7,   (7 * 8)(t1)
584	stt	$f8,   (8 * 8)(t1)
585	stt	$f9,   (9 * 8)(t1)
586	stt	$f10, (10 * 8)(t1)
587	stt	$f11, (11 * 8)(t1)
588	stt	$f12, (12 * 8)(t1)
589	stt	$f13, (13 * 8)(t1)
590	stt	$f14, (14 * 8)(t1)
591	stt	$f15, (15 * 8)(t1)
592	stt	$f16, (16 * 8)(t1)
593	stt	$f17, (17 * 8)(t1)
594	stt	$f18, (18 * 8)(t1)
595	stt	$f19, (19 * 8)(t1)
596	stt	$f20, (20 * 8)(t1)
597	stt	$f21, (21 * 8)(t1)
598	stt	$f22, (22 * 8)(t1)
599	stt	$f23, (23 * 8)(t1)
600	stt	$f24, (24 * 8)(t1)
601	stt	$f25, (25 * 8)(t1)
602	stt	$f26, (26 * 8)(t1)
603	stt	$f27, (27 * 8)(t1)
604	.set noat
605	stt	$f28, (28 * 8)(t1)
606	.set at
607	stt	$f29, (29 * 8)(t1)
608	stt	$f30, (30 * 8)(t1)
609
610	/*
611	 * Then save the FPCR; note that the necessary 'trapb's are taken
612	 * care of on kernel entry and exit.
613	 */
614	mf_fpcr	ft0
615	stt	ft0, FPREG_FPR_CR(a0)	/* store to FPCR save area */
616
617	RET
618	END(savefpstate)
619
620/**************************************************************************/
621
622/*
623 * restorefpstate: Restore a process's floating point state.
624 *
625 * Arguments:
626 *	a0	'struct fpstate *' to restore from
627 */
628
629LEAF(restorefpstate, 1)
630	LDGP(pv)
631	/*
632	 * Restore the FPCR; note that the necessary 'trapb's are taken care of
633	 * on kernel entry and exit.
634	 */
635	ldt	ft0, FPREG_FPR_CR(a0)	/* load from FPCR save area */
636	mt_fpcr	ft0
637
638	/* Restore all of the FP registers. */
639	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
640	ldt	$f0,   (0 * 8)(t1)	/* restore first reg., using hw name */
641	ldt	$f1,   (1 * 8)(t1)	/* etc. */
642	ldt	$f2,   (2 * 8)(t1)
643	ldt	$f3,   (3 * 8)(t1)
644	ldt	$f4,   (4 * 8)(t1)
645	ldt	$f5,   (5 * 8)(t1)
646	ldt	$f6,   (6 * 8)(t1)
647	ldt	$f7,   (7 * 8)(t1)
648	ldt	$f8,   (8 * 8)(t1)
649	ldt	$f9,   (9 * 8)(t1)
650	ldt	$f10, (10 * 8)(t1)
651	ldt	$f11, (11 * 8)(t1)
652	ldt	$f12, (12 * 8)(t1)
653	ldt	$f13, (13 * 8)(t1)
654	ldt	$f14, (14 * 8)(t1)
655	ldt	$f15, (15 * 8)(t1)
656	ldt	$f16, (16 * 8)(t1)
657	ldt	$f17, (17 * 8)(t1)
658	ldt	$f18, (18 * 8)(t1)
659	ldt	$f19, (19 * 8)(t1)
660	ldt	$f20, (20 * 8)(t1)
661	ldt	$f21, (21 * 8)(t1)
662	ldt	$f22, (22 * 8)(t1)
663	ldt	$f23, (23 * 8)(t1)
664	ldt	$f24, (24 * 8)(t1)
665	ldt	$f25, (25 * 8)(t1)
666	ldt	$f26, (26 * 8)(t1)
667	ldt	$f27, (27 * 8)(t1)
668	ldt	$f28, (28 * 8)(t1)
669	ldt	$f29, (29 * 8)(t1)
670	ldt	$f30, (30 * 8)(t1)
671
672	RET
673	END(restorefpstate)
674
675/**************************************************************************/
676
677/*
678 * savectx: save process context, i.e. callee-saved registers
679 *
680 * Note that savectx() only works for processes other than curproc,
681 * since cpu_switch will copy over the info saved here.  (It _can_
682 * sanely be used for curproc iff cpu_switch won't be called again, e.g.
683 * if called from boot().)
684 *
685 * Arguments:
686 *	a0	'struct user *' of the process that needs its context saved
687 *
688 * Return:
689 *	v0	0.  (note that for child processes, it seems
690 *		like savectx() returns 1, because the return address
691 *		in the PCB is set to the return address from savectx().)
692 */
693
694LEAF(savectx, 1)
695	br	pv, 1f
6961:	LDGP(pv)
697	stq	sp, U_PCB_HWPCB_KSP(a0)		/* store sp */
698	stq	s0, U_PCB_CONTEXT+(0 * 8)(a0)	/* store s0 - s6 */
699	stq	s1, U_PCB_CONTEXT+(1 * 8)(a0)
700	stq	s2, U_PCB_CONTEXT+(2 * 8)(a0)
701	stq	s3, U_PCB_CONTEXT+(3 * 8)(a0)
702	stq	s4, U_PCB_CONTEXT+(4 * 8)(a0)
703	stq	s5, U_PCB_CONTEXT+(5 * 8)(a0)
704	stq	s6, U_PCB_CONTEXT+(6 * 8)(a0)
705	stq	ra, U_PCB_CONTEXT+(7 * 8)(a0)	/* store ra */
706	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
707	stq	v0, U_PCB_CONTEXT+(8 * 8)(a0)	/* store ps, for ipl */
708
709	mov	zero, v0
710	RET
711	END(savectx)
712
713/**************************************************************************/
714
715IMPORT(sched_whichqs, 4)
716
717/*
718 * When no processes are on the runq, cpu_switch branches to idle
719 * to wait for something to come ready.
720 * Note: this is really a part of cpu_switch() but defined here for kernel
721 * profiling.
722 */
723LEAF(idle, 0)
724	br	pv, 1f
7251:	LDGP(pv)
726	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
727	GET_CURPROC
728	stq	zero, 0(v0)			/* curproc <- NULL for stats */
729#if defined(MULTIPROCESSOR)
730	/*
731	 * Switch to the idle PCB unless we're already running on it
732	 * (if s0 == NULL, we're already on it...)
733	 */
734	beq	s0, 1f				/* skip if s0 == NULL */
735	mov	s0, a0
736	CALL(pmap_deactivate)			/* pmap_deactivate(oldproc) */
737	GET_IDLE_PCB(a0)
738	SWITCH_CONTEXT
739	mov	zero, s0			/* no outgoing proc */
7401:
741#endif
742#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
743	CALL(sched_unlock_idle)			/* release sched_lock */
744#endif
745	mov	zero, a0			/* enable all interrupts */
746	call_pal PAL_OSF1_swpipl
747	ldl	t0, sched_whichqs		/* look for non-empty queue */
748	bne	t0, 4f
7492:	lda	t0, uvm
750	ldl	t0, UVM_PAGE_IDLE_ZERO(t0)	/* should we zero some pages? */
751	beq	t0, 3f				/* nope. */
752	CALL(uvm_pageidlezero)
7533:	ldl	t0, sched_whichqs		/* look for non-empty queue */
754	beq	t0, 2b
7554:	ldiq	a0, ALPHA_PSL_IPL_HIGH		/* disable all interrupts */
756	call_pal PAL_OSF1_swpipl
757#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
758	CALL(sched_lock_idle)			/* acquire sched_lock */
759#endif
760	jmp	zero, cpu_switch_queuescan	/* jump back into the fire */
761	END(idle)
762
763/*
764 * cpu_switch()
765 * Find the highest priority process and resume it.
766 */
767LEAF(cpu_switch, 0)
768	LDGP(pv)
769	/*
770	 * do an inline savectx(), to save old context
771	 * Note: GET_CURPROC clobbers v0, t0, t8...t11.
772	 */
773	GET_CURPROC
774	ldq	a0, 0(v0)
775	ldq	a1, P_ADDR(a0)
776	/* NOTE: ksp is stored by the swpctx */
777	stq	s0, U_PCB_CONTEXT+(0 * 8)(a1)	/* store s0 - s6 */
778	stq	s1, U_PCB_CONTEXT+(1 * 8)(a1)
779	stq	s2, U_PCB_CONTEXT+(2 * 8)(a1)
780	stq	s3, U_PCB_CONTEXT+(3 * 8)(a1)
781	stq	s4, U_PCB_CONTEXT+(4 * 8)(a1)
782	stq	s5, U_PCB_CONTEXT+(5 * 8)(a1)
783	stq	s6, U_PCB_CONTEXT+(6 * 8)(a1)
784	stq	ra, U_PCB_CONTEXT+(7 * 8)(a1)	/* store ra */
785	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
786	stq	v0, U_PCB_CONTEXT+(8 * 8)(a1)	/* store ps, for ipl */
787
788	mov	a0, s0				/* save old curproc */
789	mov	a1, s1				/* save old U-area */
790
791cpu_switch_queuescan:
792	br	pv, 1f
7931:	LDGP(pv)
794	ldl	t0, sched_whichqs		/* look for non-empty queue */
795	beq	t0, idle			/* and if none, go idle */
796	mov	t0, t3				/* t3 = saved whichqs */
797	mov	zero, t2			/* t2 = lowest bit set */
798	blbs	t0, 3f				/* if low bit set, done! */
799
8002:	srl	t0, 1, t0			/* try next bit */
801	addq	t2, 1, t2
802	blbc	t0, 2b				/* if clear, try again */
803
8043:	/*
805	 * Remove process from queue
806	 */
807	lda	t1, sched_qs			/* get queues */
808	sll	t2, 4, t0			/* queue head is 16 bytes */
809	addq	t1, t0, t0			/* t0 = qp = &qs[firstbit] */
810
811	ldq	t4, PH_LINK(t0)			/* t4 = p = highest pri proc */
812	bne	t4, 4f				/* make sure p != NULL */
813	PANIC("cpu_switch",Lcpu_switch_pmsg)	/* nothing in queue! */
814
8154:
816	ldq	t5, P_FORW(t4)			/* t5 = p->p_forw */
817	stq	t5, PH_LINK(t0)			/* qp->ph_link = p->p_forw */
818	stq	t0, P_BACK(t5)			/* p->p_forw->p_back = qp */
819	stq	zero, P_BACK(t4)		/* firewall: p->p_back = NULL */
820	cmpeq	t0, t5, t0			/* see if queue is empty */
821	beq	t0, 5f				/* nope, it's not! */
822
823	ldiq	t0, 1				/* compute bit in whichqs */
824	sll	t0, t2, t0
825	xor	t3, t0, t3			/* clear bit in whichqs */
826	stl	t3, sched_whichqs
827
8285:
829	mov	t4, s2				/* save new proc */
830	ldq	s3, P_MD_PCBPADDR(s2)		/* save new pcbpaddr */
831
832	/*
833	 * Check to see if we're switching to ourself.  If we are,
834	 * don't bother loading the new context.
835	 *
836	 * Note that even if we re-enter cpu_switch() from idle(),
837	 * s0 will still contain the old curproc value because any
838	 * users of that register between then and now must have
839	 * saved it.  Also note that switch_exit() ensures that
840	 * s0 is clear before jumping here to find a new process.
841	 */
842	cmpeq	s0, s2, t0			/* oldproc == newproc? */
843	bne	t0, 7f				/* Yes!  Skip! */
844
845	/*
846	 * Deactivate the old address space before activating the
847	 * new one.  We need to do this before activating the
848	 * new process's address space in the event that new
849	 * process is using the same vmspace as the old.  If we
850	 * do this after we activate, then we might end up
851	 * incorrectly marking the pmap inactive!
852	 *
853	 * Note that don't deactivate if we don't have to...
854	 * We know this if oldproc (s0) == NULL.  This is the
855	 * case if we've come from switch_exit() (pmap no longer
856	 * exists; vmspace has been freed), or if we switched to
857	 * the Idle PCB in the MULTIPROCESSOR case.
858	 */
859	beq	s0, 6f
860
861	mov	s0, a0				/* pmap_deactivate(oldproc) */
862	CALL(pmap_deactivate)
863
8646:	/*
865	 * Activate the new process's address space and perform
866	 * the actual context swap.
867	 */
868
869	mov	s2, a0				/* pmap_activate(p) */
870	CALL(pmap_activate)
871
872	mov	s3, a0				/* swap the context */
873	SWITCH_CONTEXT
874
8757:
876#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
877	/*
878	 * Done mucking with the run queues, and we have fully switched
879	 * to the new process.  Release the scheduler lock, but keep
880	 * interrupts out.
881	 */
882	CALL(sched_unlock_idle)
883#endif
884
885	/*
886	 * Now that the switch is done, update curproc and other
887	 * globals.  We must do this even if switching to ourselves
888	 * because we might have re-entered cpu_switch() from idle(),
889	 * in which case curproc would be NULL.
890	 *
891	 * Note: GET_CPUINFO clobbers v0, t0, t8...t11.
892	 */
893#ifdef __alpha_bwx__
894	ldiq	t0, SONPROC			/* p->p_stat = SONPROC */
895	stb	t0, P_STAT(s2)
896#else
897	addq	s2, P_STAT, t3			/* p->p_stat = SONPROC */
898	ldq_u	t1, 0(t3)
899	ldiq	t0, SONPROC
900	insbl	t0, t3, t0
901	mskbl	t1, t3, t1
902	or	t0, t1, t0
903	stq_u	t0, 0(t3)
904#endif /* __alpha_bwx__ */
905
906	GET_CPUINFO
907	/* p->p_cpu initialized in fork1() for single-processor */
908#if defined(MULTIPROCESSOR)
909	stq	v0, P_CPU(s2)			/* p->p_cpu = curcpu() */
910#endif
911	stq	s2, CPU_INFO_CURPROC(v0)	/* curproc = p */
912	stq	zero, CPU_INFO_WANT_RESCHED(v0)	/* we've rescheduled */
913
914	/*
915	 * Now running on the new u struct.
916	 * Restore registers and return.
917	 */
918	ldq	t0, P_ADDR(s2)
919
920	/* NOTE: ksp is restored by the swpctx */
921	ldq	s0, U_PCB_CONTEXT+(0 * 8)(t0)		/* restore s0 - s6 */
922	ldq	s1, U_PCB_CONTEXT+(1 * 8)(t0)
923	ldq	s2, U_PCB_CONTEXT+(2 * 8)(t0)
924	ldq	s3, U_PCB_CONTEXT+(3 * 8)(t0)
925	ldq	s4, U_PCB_CONTEXT+(4 * 8)(t0)
926	ldq	s5, U_PCB_CONTEXT+(5 * 8)(t0)
927	ldq	s6, U_PCB_CONTEXT+(6 * 8)(t0)
928	ldq	ra, U_PCB_CONTEXT+(7 * 8)(t0)		/* restore ra */
929	ldq	a0, U_PCB_CONTEXT+(8 * 8)(t0)		/* restore ipl */
930	and	a0, ALPHA_PSL_IPL_MASK, a0
931	call_pal PAL_OSF1_swpipl
932
933	ldiq	v0, 1				/* possible ret to savectx() */
934	RET
935	END(cpu_switch)
936
937/*
938 * proc_trampoline()
939 *
940 * Arrange for a function to be invoked neatly, after a cpu_fork().
941 *
942 * Invokes the function specified by the s0 register with the return
943 * address specified by the s1 register and with one argument specified
944 * by the s2 register.
945 */
946LEAF_NOPROFILE(proc_trampoline, 0)
947#if defined(MULTIPROCESSOR)
948	CALL(proc_trampoline_mp)
949#endif
950	mov	s0, pv
951	mov	s1, ra
952	mov	s2, a0
953	jmp	zero, (pv)
954	END(proc_trampoline)
955
956/*
957 * switch_exit(struct proc *p)
958 * Make a the named process exit.  Partially switch to our idle thread
959 * (we don't update curproc or restore registers), and jump into the middle
960 * of cpu_switch to switch into a few process.  The process reaper will
961 * free the dead process's VM resources.  MUST BE CALLED AT SPLHIGH.
962 */
963LEAF(switch_exit, 1)
964	LDGP(pv)
965
966	/* save the exiting proc pointer */
967	mov	a0, s2
968
969	/* Switch to our idle stack. */
970	GET_IDLE_PCB(a0)			/* clobbers v0, t0, t8-t11 */
971	SWITCH_CONTEXT
972
973	/*
974	 * Now running as idle thread, except for the value of 'curproc' and
975	 * the saved regs.
976	 */
977
978	/* Schedule the vmspace and stack to be freed. */
979	mov	s2, a0
980	CALL(exit2)
981
982#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
983	CALL(sched_lock_idle)			/* acquire sched_lock */
984#endif
985
986	/*
987	 * Now jump back into the middle of cpu_switch().  Note that
988	 * we must clear s0 to guarantee that the check for switching
989	 * to ourselves in cpu_switch() will fail.  This is safe since
990	 * s0 will be restored when a new process is resumed.
991	 */
992	mov	zero, s0
993	jmp	zero, cpu_switch_queuescan
994	END(switch_exit)
995
996/**************************************************************************/
997
998/*
999 * Copy a null-terminated string within the kernel's address space.
1000 * If lenp is not NULL, store the number of chars copied in *lenp
1001 *
1002 * int copystr(char *from, char *to, size_t len, size_t *lenp);
1003 */
1004LEAF(copystr, 4)
1005	LDGP(pv)
1006
1007	mov	a2, t0			/* t0 = i = len */
1008	bne	a2, 1f			/* if (len != 0), proceed */
1009	ldiq	t1, 1			/* else bail */
1010	br	zero, 2f
1011
10121:	ldq_u	t1, 0(a0)		/* t1 = *from */
1013	extbl	t1, a0, t1
1014	ldq_u	t3, 0(a1)		/* set up t2 with quad around *to */
1015	insbl	t1, a1, t2
1016	mskbl	t3, a1, t3
1017	or	t3, t2, t3		/* add *from to quad around *to */
1018	stq_u	t3, 0(a1)		/* write out that quad */
1019
1020	subl	a2, 1, a2		/* len-- */
1021	beq	t1, 2f			/* if (*from == 0), bail out */
1022	addq	a1, 1, a1		/* to++ */
1023	addq	a0, 1, a0		/* from++ */
1024	bne	a2, 1b			/* if (len != 0) copy more */
1025
10262:	beq	a3, 3f			/* if (lenp != NULL) */
1027	subl	t0, a2, t0		/* *lenp = (i - len) */
1028	stq	t0, 0(a3)
10293:	beq	t1, 4f			/* *from == '\0'; leave quietly */
1030
1031	ldiq	v0, ENAMETOOLONG	/* *from != '\0'; error. */
1032	RET
1033
10344:	mov	zero, v0		/* return 0. */
1035	RET
1036	END(copystr)
1037
1038NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
1039	LDGP(pv)
1040	lda	sp, -16(sp)			/* set up stack frame	     */
1041	stq	ra, (16-8)(sp)			/* save ra		     */
1042	stq	s0, (16-16)(sp)			/* save s0		     */
1043	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1044	cmpult	a0, t0, t1			/* is in user space.	     */
1045	beq	t1, copyerr			/* if it's not, error out.   */
1046	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1047	GET_CURPROC
1048	mov	v0, s0
1049	lda	v0, copyerr			/* set up fault handler.     */
1050	.set noat
1051	ldq	at_reg, 0(s0)
1052	ldq	at_reg, P_ADDR(at_reg)
1053	stq	v0, U_PCB_ONFAULT(at_reg)
1054	.set at
1055	CALL(copystr)				/* do the copy.		     */
1056	.set noat
1057	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
1058	ldq	at_reg, P_ADDR(at_reg)
1059	stq	zero, U_PCB_ONFAULT(at_reg)
1060	.set at
1061	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1062	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1063	lda	sp, 16(sp)			/* kill stack frame.	     */
1064	RET					/* v0 left over from copystr */
1065	END(copyinstr)
1066
1067NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
1068	LDGP(pv)
1069	lda	sp, -16(sp)			/* set up stack frame	     */
1070	stq	ra, (16-8)(sp)			/* save ra		     */
1071	stq	s0, (16-16)(sp)			/* save s0		     */
1072	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
1073	cmpult	a1, t0, t1			/* is in user space.	     */
1074	beq	t1, copyerr			/* if it's not, error out.   */
1075	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1076	GET_CURPROC
1077	mov	v0, s0
1078	lda	v0, copyerr			/* set up fault handler.     */
1079	.set noat
1080	ldq	at_reg, 0(s0)
1081	ldq	at_reg, P_ADDR(at_reg)
1082	stq	v0, U_PCB_ONFAULT(at_reg)
1083	.set at
1084	CALL(copystr)				/* do the copy.		     */
1085	.set noat
1086	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
1087	ldq	at_reg, P_ADDR(at_reg)
1088	stq	zero, U_PCB_ONFAULT(at_reg)
1089	.set at
1090	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1091	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1092	lda	sp, 16(sp)			/* kill stack frame.	     */
1093	RET					/* v0 left over from copystr */
1094	END(copyoutstr)
1095
1096/*
1097 * kcopy(const void *src, void *dst, size_t len);
1098 *
1099 * Copy len bytes from src to dst, aborting if we encounter a fatal
1100 * page fault.
1101 *
1102 * kcopy() _must_ save and restore the old fault handler since it is
1103 * called by uiomove(), which may be in the path of servicing a non-fatal
1104 * page fault.
1105 */
1106NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
1107	LDGP(pv)
1108	lda	sp, -32(sp)			/* set up stack frame	     */
1109	stq	ra, (32-8)(sp)			/* save ra		     */
1110	stq	s0, (32-16)(sp)			/* save s0		     */
1111	stq	s1, (32-24)(sp)			/* save s1		     */
1112	/* Swap a0, a1, for call to memcpy(). */
1113	mov	a1, v0
1114	mov	a0, a1
1115	mov	v0, a0
1116	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1117	GET_CURPROC
1118	ldq	s1, 0(v0)			/* s1 = curproc		     */
1119	lda	v0, kcopyerr			/* set up fault handler.     */
1120	.set noat
1121	ldq	at_reg, P_ADDR(s1)
1122	ldq	s0, U_PCB_ONFAULT(at_reg)	/* save old handler.	     */
1123	stq	v0, U_PCB_ONFAULT(at_reg)
1124	.set at
1125	CALL(memcpy)				/* do the copy.		     */
1126	.set noat
1127	ldq	at_reg, P_ADDR(s1)		/* restore the old handler.  */
1128	stq	s0, U_PCB_ONFAULT(at_reg)
1129	.set at
1130	ldq	ra, (32-8)(sp)			/* restore ra.		     */
1131	ldq	s0, (32-16)(sp)			/* restore s0.		     */
1132	ldq	s1, (32-24)(sp)			/* restore s1.		     */
1133	lda	sp, 32(sp)			/* kill stack frame.	     */
1134	mov	zero, v0			/* return 0. */
1135	RET
1136	END(kcopy)
1137
1138LEAF(kcopyerr, 0)
1139	LDGP(pv)
1140	.set noat
1141	ldq	at_reg, P_ADDR(s1)		/* restore the old handler.  */
1142	stq	s0, U_PCB_ONFAULT(at_reg)
1143	.set at
1144	ldq	ra, (32-8)(sp)			/* restore ra.		     */
1145	ldq	s0, (32-16)(sp)			/* restore s0.		     */
1146	ldq	s1, (32-24)(sp)			/* restore s1.		     */
1147	lda	sp, 32(sp)			/* kill stack frame.	     */
1148	ldiq	v0, EFAULT			/* return EFAULT.	     */
1149	RET
1150END(kcopyerr)
1151
1152NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0)
1153	LDGP(pv)
1154	lda	sp, -16(sp)			/* set up stack frame	     */
1155	stq	ra, (16-8)(sp)			/* save ra		     */
1156	stq	s0, (16-16)(sp)			/* save s0		     */
1157	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1158	cmpult	a0, t0, t1			/* is in user space.	     */
1159	beq	t1, copyerr			/* if it's not, error out.   */
1160	/* Swap a0, a1, for call to memcpy(). */
1161	mov	a1, v0
1162	mov	a0, a1
1163	mov	v0, a0
1164	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1165	GET_CURPROC
1166	ldq	s0, 0(v0)			/* s0 = curproc		     */
1167	lda	v0, copyerr			/* set up fault handler.     */
1168	.set noat
1169	ldq	at_reg, P_ADDR(s0)
1170	stq	v0, U_PCB_ONFAULT(at_reg)
1171	.set at
1172	CALL(memcpy)				/* do the copy.		     */
1173	.set noat
1174	ldq	at_reg, P_ADDR(s0)		/* kill the fault handler.   */
1175	stq	zero, U_PCB_ONFAULT(at_reg)
1176	.set at
1177	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1178	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1179	lda	sp, 16(sp)			/* kill stack frame.	     */
1180	mov	zero, v0			/* return 0. */
1181	RET
1182	END(copyin)
1183
1184NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
1185	LDGP(pv)
1186	lda	sp, -16(sp)			/* set up stack frame	     */
1187	stq	ra, (16-8)(sp)			/* save ra		     */
1188	stq	s0, (16-16)(sp)			/* save s0		     */
1189	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
1190	cmpult	a1, t0, t1			/* is in user space.	     */
1191	beq	t1, copyerr			/* if it's not, error out.   */
1192	/* Swap a0, a1, for call to memcpy(). */
1193	mov	a1, v0
1194	mov	a0, a1
1195	mov	v0, a0
1196	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1197	GET_CURPROC
1198	ldq	s0, 0(v0)			/* s0 = curproc		     */
1199	lda	v0, copyerr			/* set up fault handler.     */
1200	.set noat
1201	ldq	at_reg, P_ADDR(s0)
1202	stq	v0, U_PCB_ONFAULT(at_reg)
1203	.set at
1204	CALL(memcpy)				/* do the copy.		     */
1205	.set noat
1206	ldq	at_reg, P_ADDR(s0)		/* kill the fault handler.   */
1207	stq	zero, U_PCB_ONFAULT(at_reg)
1208	.set at
1209	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1210	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1211	lda	sp, 16(sp)			/* kill stack frame.	     */
1212	mov	zero, v0			/* return 0. */
1213	RET
1214	END(copyout)
1215
1216LEAF(copyerr, 0)
1217	LDGP(pv)
1218	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1219	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1220	lda	sp, 16(sp)			/* kill stack frame.	     */
1221	ldiq	v0, EFAULT			/* return EFAULT.	     */
1222	RET
1223END(copyerr)
1224
1225/**************************************************************************/
1226
1227/*
1228 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
1229 * user text space.
1230 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
1231 * user data space.
1232 */
1233LEAF(fuword, 1)
1234XLEAF(fuiword, 1)
1235	LDGP(pv)
1236	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1237	cmpult	a0, t0, t1			/* is in user space. */
1238	beq	t1, fswberr			/* if it's not, error out. */
1239	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1240	GET_CURPROC
1241	ldq	t1, 0(v0)
1242	lda	t0, fswberr
1243	.set noat
1244	ldq	at_reg, P_ADDR(t1)
1245	stq	t0, U_PCB_ONFAULT(at_reg)
1246	.set at
1247	ldq	v0, 0(a0)
1248	zap	v0, 0xf0, v0
1249	.set noat
1250	ldq	at_reg, P_ADDR(t1)
1251	stq	zero, U_PCB_ONFAULT(at_reg)
1252	.set at
1253	RET
1254	END(fuword)
1255
1256LEAF(fusword, 1)
1257XLEAF(fuisword, 1)
1258	LDGP(pv)
1259	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1260	cmpult	a0, t0, t1			/* is in user space. */
1261	beq	t1, fswberr			/* if it's not, error out. */
1262	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1263	GET_CURPROC
1264	ldq	t1, 0(v0)
1265	lda	t0, fswberr
1266	.set noat
1267	ldq	at_reg, P_ADDR(t1)
1268	stq	t0, U_PCB_ONFAULT(at_reg)
1269	.set at
1270	/* XXX FETCH IT */
1271	.set noat
1272	ldq	at_reg, P_ADDR(t1)
1273	stq	zero, U_PCB_ONFAULT(at_reg)
1274	.set at
1275	RET
1276	END(fusword)
1277
1278LEAF(fubyte, 1)
1279XLEAF(fuibyte, 1)
1280	LDGP(pv)
1281	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1282	cmpult	a0, t0, t1			/* is in user space. */
1283	beq	t1, fswberr			/* if it's not, error out. */
1284	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1285	GET_CURPROC
1286	ldq	t1, 0(v0)
1287	lda	t0, fswberr
1288	.set noat
1289	ldq	at_reg, P_ADDR(t1)
1290	stq	t0, U_PCB_ONFAULT(at_reg)
1291	.set at
1292	/* XXX FETCH IT */
1293	.set noat
1294	ldq	at_reg, P_ADDR(t1)
1295	stq	zero, U_PCB_ONFAULT(at_reg)
1296	.set at
1297	RET
1298	END(fubyte)
1299
1300LEAF(suword, 2)
1301	LDGP(pv)
1302	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1303	cmpult	a0, t0, t1			/* is in user space. */
1304	beq	t1, fswberr			/* if it's not, error out. */
1305	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1306	GET_CURPROC
1307	ldq	t1, 0(v0)
1308	lda	t0, fswberr
1309	.set noat
1310	ldq	at_reg, P_ADDR(t1)
1311	stq	t0, U_PCB_ONFAULT(at_reg)
1312	.set at
1313	stq	a1, 0(a0)			/* do the store. */
1314	.set noat
1315	ldq	at_reg, P_ADDR(t1)
1316	stq	zero, U_PCB_ONFAULT(at_reg)
1317	.set at
1318	mov	zero, v0
1319	RET
1320	END(suword)
1321
1322#ifdef notdef
1323LEAF(suiword, 2)
1324	LDGP(pv)
1325	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1326	cmpult	a0, t0, t1			/* is in user space. */
1327	beq	t1, fswberr			/* if it's not, error out. */
1328	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1329	GET_CURPROC
1330	ldq	t1, 0(v0)
1331	lda	t0, fswberr
1332	.set noat
1333	ldq	at_reg, P_ADDR(t1)
1334	stq	t0, U_PCB_ONFAULT(at_reg)
1335	.set at
1336	/* XXX STORE IT */
1337	.set noat
1338	ldq	at_reg, P_ADDR(t1)
1339	stq	zero, U_PCB_ONFAULT(at_reg)
1340	.set at
1341	call_pal PAL_OSF1_imb			/* sync instruction stream */
1342	mov	zero, v0
1343	RET
1344	END(suiword)
1345
1346LEAF(susword, 2)
1347	LDGP(pv)
1348	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1349	cmpult	a0, t0, t1			/* is in user space. */
1350	beq	t1, fswberr			/* if it's not, error out. */
1351	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1352	GET_CURPROC
1353	ldq	t1, 0(v0)
1354	lda	t0, fswberr
1355	.set noat
1356	ldq	at_reg, P_ADDR(t1)
1357	stq	t0, U_PCB_ONFAULT(at_reg)
1358	.set at
1359	/* XXX STORE IT */
1360	.set noat
1361	ldq	at_reg, P_ADDR(t1)
1362	stq	zero, U_PCB_ONFAULT(at_reg)
1363	.set at
1364	mov	zero, v0
1365	RET
1366	END(susword)
1367
1368LEAF(suisword, 2)
1369	LDGP(pv)
1370	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1371	cmpult	a0, t0, t1			/* is in user space. */
1372	beq	t1, fswberr			/* if it's not, error out. */
1373	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1374	GET_CURPROC
1375	ldq	t1, 0(v0)
1376	lda	t0, fswberr
1377	.set noat
1378	ldq	at_reg, P_ADDR(t1)
1379	stq	t0, U_PCB_ONFAULT(at_reg)
1380	.set at
1381	/* XXX STORE IT */
1382	.set noat
1383	ldq	at_reg, P_ADDR(t1)
1384	stq	zero, U_PCB_ONFAULT(at_reg)
1385	.set at
1386	call_pal PAL_OSF1_imb			/* sync instruction stream */
1387	mov	zero, v0
1388	RET
1389	END(suisword)
1390#endif /* notdef */
1391
1392LEAF(subyte, 2)
1393	LDGP(pv)
1394	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1395	cmpult	a0, t0, t1			/* is in user space. */
1396	beq	t1, fswberr			/* if it's not, error out. */
1397	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1398	GET_CURPROC
1399	ldq	t1, 0(v0)
1400	lda	t0, fswberr
1401	.set noat
1402	ldq	at_reg, P_ADDR(t1)
1403	stq	t0, U_PCB_ONFAULT(at_reg)
1404	.set at
1405	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1406	insbl	a1, a0, a1			/* move it to the right byte */
1407	ldq_u	t0, 0(a0)			/* load quad around byte */
1408	mskbl	t0, a0, t0			/* kill the target byte */
1409	or	t0, a1, a1			/* put the result together */
1410	stq_u	a1, 0(a0)			/* and store it. */
1411	.set noat
1412	ldq	at_reg, P_ADDR(t1)
1413	stq	zero, U_PCB_ONFAULT(at_reg)
1414	.set at
1415	mov	zero, v0
1416	RET
1417	END(subyte)
1418
1419LEAF(suibyte, 2)
1420	LDGP(pv)
1421	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1422	cmpult	a0, t0, t1			/* is in user space. */
1423	beq	t1, fswberr			/* if it's not, error out. */
1424	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1425	GET_CURPROC
1426	ldq	t1, 0(v0)
1427	lda	t0, fswberr
1428	.set noat
1429	ldq	at_reg, P_ADDR(t1)
1430	stq	t0, U_PCB_ONFAULT(at_reg)
1431	.set at
1432	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1433	insbl	a1, a0, a1			/* move it to the right byte */
1434	ldq_u	t0, 0(a0)			/* load quad around byte */
1435	mskbl	t0, a0, t0			/* kill the target byte */
1436	or	t0, a1, a1			/* put the result together */
1437	stq_u	a1, 0(a0)			/* and store it. */
1438	.set noat
1439	ldq	at_reg, P_ADDR(t1)
1440	stq	zero, U_PCB_ONFAULT(at_reg)
1441	.set at
1442	call_pal PAL_OSF1_imb			/* sync instruction stream */
1443	mov	zero, v0
1444	RET
1445	END(suibyte)
1446
1447LEAF(fswberr, 0)
1448	LDGP(pv)
1449	ldiq	v0, -1
1450	RET
1451	END(fswberr)
1452
1453/**************************************************************************/
1454
1455#ifdef notdef
1456/*
1457 * fuswintr and suswintr are just like fusword and susword except that if
1458 * the page is not in memory or would cause a trap, then we return an error.
1459 * The important thing is to prevent sleep() and switch().
1460 */
1461
1462LEAF(fuswintr, 2)
1463	LDGP(pv)
1464	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1465	cmpult	a0, t0, t1			/* is in user space. */
1466	beq	t1, fswintrberr			/* if it's not, error out. */
1467	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1468	GET_CURPROC
1469	ldq	t1, 0(v0)
1470	lda	t0, fswintrberr
1471	.set noat
1472	ldq	at_reg, P_ADDR(t1)
1473	stq	t0, U_PCB_ONFAULT(at_reg)
1474	stq	a0, U_PCB_ACCESSADDR(at_reg)
1475	.set at
1476	/* XXX FETCH IT */
1477	.set noat
1478	ldq	at_reg, P_ADDR(t1)
1479	stq	zero, U_PCB_ONFAULT(at_reg)
1480	.set at
1481	RET
1482	END(fuswintr)
1483
1484LEAF(suswintr, 2)
1485	LDGP(pv)
1486	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1487	cmpult	a0, t0, t1			/* is in user space. */
1488	beq	t1, fswintrberr			/* if it's not, error out. */
1489	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1490	GET_CURPROC
1491	ldq	t1, 0(v0)
1492	lda	t0, fswintrberr
1493	.set noat
1494	ldq	at_reg, P_ADDR(t1)
1495	stq	t0, U_PCB_ONFAULT(at_reg)
1496	stq	a0, U_PCB_ACCESSADDR(at_reg)
1497	.set at
1498	/* XXX STORE IT */
1499	.set noat
1500	ldq	at_reg, P_ADDR(t1)
1501	stq	zero, U_PCB_ONFAULT(at_reg)
1502	.set at
1503	mov	zero, v0
1504	RET
1505	END(suswintr)
1506#endif
1507
1508LEAF(fswintrberr, 0)
1509XLEAF(fuswintr, 2)				/* XXX what is a 'word'? */
1510XLEAF(suswintr, 2)				/* XXX what is a 'word'? */
1511	LDGP(pv)
1512	ldiq	v0, -1
1513	RET
1514	END(fswberr)
1515
1516/**************************************************************************/
1517
1518/*
1519 * Some bogus data, to keep vmstat happy, for now.
1520 */
1521
1522	.section .rodata
1523EXPORT(intrnames)
1524	.quad	0
1525EXPORT(eintrnames)
1526EXPORT(intrcnt)
1527	.quad	0
1528EXPORT(eintrcnt)
1529	.text
1530
1531/**************************************************************************/
1532
1533/*
1534 * console 'restart' routine to be placed in HWRPB.
1535 */
1536LEAF(XentRestart, 1)			/* XXX should be NESTED */
1537	.set noat
1538	lda	sp,-(FRAME_SIZE*8)(sp)
1539	stq	at_reg,(FRAME_AT*8)(sp)
1540	.set at
1541	stq	v0,(FRAME_V0*8)(sp)
1542	stq	a0,(FRAME_A0*8)(sp)
1543	stq	a1,(FRAME_A1*8)(sp)
1544	stq	a2,(FRAME_A2*8)(sp)
1545	stq	a3,(FRAME_A3*8)(sp)
1546	stq	a4,(FRAME_A4*8)(sp)
1547	stq	a5,(FRAME_A5*8)(sp)
1548	stq	s0,(FRAME_S0*8)(sp)
1549	stq	s1,(FRAME_S1*8)(sp)
1550	stq	s2,(FRAME_S2*8)(sp)
1551	stq	s3,(FRAME_S3*8)(sp)
1552	stq	s4,(FRAME_S4*8)(sp)
1553	stq	s5,(FRAME_S5*8)(sp)
1554	stq	s6,(FRAME_S6*8)(sp)
1555	stq	t0,(FRAME_T0*8)(sp)
1556	stq	t1,(FRAME_T1*8)(sp)
1557	stq	t2,(FRAME_T2*8)(sp)
1558	stq	t3,(FRAME_T3*8)(sp)
1559	stq	t4,(FRAME_T4*8)(sp)
1560	stq	t5,(FRAME_T5*8)(sp)
1561	stq	t6,(FRAME_T6*8)(sp)
1562	stq	t7,(FRAME_T7*8)(sp)
1563	stq	t8,(FRAME_T8*8)(sp)
1564	stq	t9,(FRAME_T9*8)(sp)
1565	stq	t10,(FRAME_T10*8)(sp)
1566	stq	t11,(FRAME_T11*8)(sp)
1567	stq	t12,(FRAME_T12*8)(sp)
1568	stq	ra,(FRAME_RA*8)(sp)
1569
1570	br	pv,1f
15711:	LDGP(pv)
1572
1573	mov	sp,a0
1574	CALL(console_restart)
1575
1576	call_pal PAL_halt
1577	END(XentRestart)
1578
1579/**************************************************************************/
1580
1581/*
1582 * Kernel setjmp and longjmp.  Rather minimalist.
1583 *
1584 *	longjmp(label_t *a)
1585 * will generate a "return (1)" from the last call to
1586 *	setjmp(label_t *a)
1587 * by restoring registers from the stack,
1588 */
1589
1590	.set	noreorder
1591
1592LEAF(setjmp, 1)
1593	LDGP(pv)
1594
1595	stq	ra, (0 * 8)(a0)			/* return address */
1596	stq	s0, (1 * 8)(a0)			/* callee-saved registers */
1597	stq	s1, (2 * 8)(a0)
1598	stq	s2, (3 * 8)(a0)
1599	stq	s3, (4 * 8)(a0)
1600	stq	s4, (5 * 8)(a0)
1601	stq	s5, (6 * 8)(a0)
1602	stq	s6, (7 * 8)(a0)
1603	stq	sp, (8 * 8)(a0)
1604
1605	ldiq	t0, 0xbeeffedadeadbabe		/* set magic number */
1606	stq	t0, (9 * 8)(a0)
1607
1608	mov	zero, v0			/* return zero */
1609	RET
1610END(setjmp)
1611
1612LEAF(longjmp, 1)
1613	LDGP(pv)
1614
1615	ldiq	t0, 0xbeeffedadeadbabe		/* check magic number */
1616	ldq	t1, (9 * 8)(a0)
1617	cmpeq	t0, t1, t0
1618	beq	t0, longjmp_botch		/* if bad, punt */
1619
1620	ldq	ra, (0 * 8)(a0)			/* return address */
1621	ldq	s0, (1 * 8)(a0)			/* callee-saved registers */
1622	ldq	s1, (2 * 8)(a0)
1623	ldq	s2, (3 * 8)(a0)
1624	ldq	s3, (4 * 8)(a0)
1625	ldq	s4, (5 * 8)(a0)
1626	ldq	s5, (6 * 8)(a0)
1627	ldq	s6, (7 * 8)(a0)
1628	ldq	sp, (8 * 8)(a0)
1629
1630	ldiq	v0, 1
1631	RET
1632
1633longjmp_botch:
1634	lda	a0, longjmp_botchmsg
1635	mov	ra, a1
1636	CALL(panic)
1637	call_pal PAL_bugchk
1638
1639	.data
1640longjmp_botchmsg:
1641	.asciz	"longjmp botch from %p"
1642	.text
1643END(longjmp)
1644
1645/*
1646 * void sts(int rn, u_int32_t *rval);
1647 * void stt(int rn, u_int64_t *rval);
1648 * void lds(int rn, u_int32_t *rval);
1649 * void ldt(int rn, u_int64_t *rval);
1650 */
1651
1652.macro	make_freg_util name, op
1653	LEAF(alpha_\name, 2)
1654	and	a0, 0x1f, a0
1655	s8addq	a0, pv, pv
1656	addq	pv, 1f - alpha_\name, pv
1657	jmp	(pv)
16581:
1659	rn = 0
1660	.rept	32
1661	\op	$f0 + rn, 0(a1)
1662	RET
1663	rn = rn + 1
1664	.endr
1665	END(alpha_\name)
1666.endm
1667/*
1668LEAF(alpha_sts, 2)
1669LEAF(alpha_stt, 2)
1670LEAF(alpha_lds, 2)
1671LEAF(alpha_ldt, 2)
1672 */
1673	make_freg_util sts, sts
1674	make_freg_util stt, stt
1675	make_freg_util lds, lds
1676	make_freg_util ldt, ldt
1677
1678LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16
1679	lda	sp, -framesz(sp)
1680	stt	$f30, f30save(sp)
1681	mf_fpcr	$f30
1682	stt	$f30, rettmp(sp)
1683	ldt	$f30, f30save(sp)
1684	ldq	v0, rettmp(sp)
1685	lda	sp, framesz(sp)
1686	RET
1687END(alpha_read_fpcr)
1688
1689LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16
1690	lda	sp, -framesz(sp)
1691	stq	a0, fpcrtmp(sp)
1692	stt	$f30, f30save(sp)
1693	ldt	$f30, fpcrtmp(sp)
1694	mt_fpcr	$f30
1695	ldt	$f30, f30save(sp)
1696	lda	sp, framesz(sp)
1697	RET
1698END(alpha_write_fpcr)
1699