xref: /netbsd/sys/arch/alpha/alpha/locore.s (revision c4a72b64)
1/* $NetBSD: locore.s,v 1.102 2002/09/18 02:35:08 thorpej Exp $ */
2
3/*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Author: Chris G. Demetriou
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67.stabs	__FILE__,100,0,0,kernel_text
68
69#include "opt_ddb.h"
70#include "opt_kgdb.h"
71#include "opt_multiprocessor.h"
72#include "opt_lockdebug.h"
73
74#include <machine/asm.h>
75
76__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.102 2002/09/18 02:35:08 thorpej Exp $");
77
78#include "assym.h"
79
80.stabs	__FILE__,132,0,0,kernel_text
81
82#if defined(MULTIPROCESSOR)
83
84/*
85 * Get various per-cpu values.  A pointer to our cpu_info structure
86 * is stored in SysValue.  These macros clobber v0, t0, t8..t11.
87 *
88 * All return values are in v0.
89 */
90#define	GET_CPUINFO		call_pal PAL_OSF1_rdval
91
92#define	GET_CURPROC							\
93	call_pal PAL_OSF1_rdval					;	\
94	addq	v0, CPU_INFO_CURPROC, v0
95
96#define	GET_FPCURPROC							\
97	call_pal PAL_OSF1_rdval					;	\
98	addq	v0, CPU_INFO_FPCURPROC, v0
99
100#define	GET_CURPCB							\
101	call_pal PAL_OSF1_rdval					;	\
102	addq	v0, CPU_INFO_CURPCB, v0
103
104#define	GET_IDLE_PCB(reg)						\
105	call_pal PAL_OSF1_rdval					;	\
106	ldq	reg, CPU_INFO_IDLE_PCB_PADDR(v0)
107
108#else	/* if not MULTIPROCESSOR... */
109
110IMPORT(cpu_info_primary, CPU_INFO_SIZEOF)
111
112#define	GET_CPUINFO		lda v0, cpu_info_primary
113
114#define	GET_CURPROC		lda v0, cpu_info_primary + CPU_INFO_CURPROC
115
116#define	GET_FPCURPROC		lda v0, cpu_info_primary + CPU_INFO_FPCURPROC
117
118#define	GET_CURPCB		lda v0, cpu_info_primary + CPU_INFO_CURPCB
119
120#define	GET_IDLE_PCB(reg)						\
121	lda	reg, cpu_info_primary				;	\
122	ldq	reg, CPU_INFO_IDLE_PCB_PADDR(reg)
123#endif
124
125/*
126 * Perform actions necessary to switch to a new context.  The
127 * hwpcb should be in a0.  Clobbers v0, t0, t8..t11, a0.
128 */
129#define	SWITCH_CONTEXT							\
130	/* Make a note of the context we're running on. */		\
131	GET_CURPCB						;	\
132	stq	a0, 0(v0)					;	\
133									\
134	/* Swap in the new context. */					\
135	call_pal PAL_OSF1_swpctx
136
137
138	/* don't reorder instructions; paranoia. */
139	.set noreorder
140	.text
141
142	.macro	bfalse	reg, dst
143	beq	\reg, \dst
144	.endm
145
146	.macro	btrue	reg, dst
147	bne	\reg, \dst
148	.endm
149
150/*
151 * This is for kvm_mkdb, and should be the address of the beginning
152 * of the kernel text segment (not necessarily the same as kernbase).
153 */
154	EXPORT(kernel_text)
155.loc	1 __LINE__
156kernel_text:
157
158/*
159 * bootstack: a temporary stack, for booting.
160 *
161 * Extends from 'start' down.
162 */
163bootstack:
164
165/*
166 * locorestart: Kernel start. This is no longer the actual entry
167 * point, although jumping to here (the first kernel address) will
168 * in fact work just fine.
169 *
170 * Arguments:
171 *	a0 is the first free page frame number (PFN)
172 *	a1 is the page table base register (PTBR)
173 *	a2 is the bootinfo magic number
174 *	a3 is the pointer to the bootinfo structure
175 *
176 * All arguments are passed to alpha_init().
177 */
178NESTED_NOPROFILE(locorestart,1,0,ra,0,0)
179	br	pv,1f
1801:	LDGP(pv)
181
182	/* Switch to the boot stack. */
183	lda	sp,bootstack
184
185	/* Load KGP with current GP. */
186	mov	a0, s0			/* save pfn */
187	mov	gp, a0
188	call_pal PAL_OSF1_wrkgp		/* clobbers a0, t0, t8-t11 */
189	mov	s0, a0			/* restore pfn */
190
191	/*
192	 * Call alpha_init() to do pre-main initialization.
193	 * alpha_init() gets the arguments we were called with,
194	 * which are already in a0, a1, a2, a3, and a4.
195	 */
196	CALL(alpha_init)
197
198	/* Set up the virtual page table pointer. */
199	ldiq	a0, VPTBASE
200	call_pal PAL_OSF1_wrvptptr	/* clobbers a0, t0, t8-t11 */
201
202	/*
203	 * Switch to proc0's PCB.
204	 */
205	lda	a0, proc0
206	ldq	a0, P_MD_PCBPADDR(a0)		/* phys addr of PCB */
207	SWITCH_CONTEXT
208
209	/*
210	 * We've switched to a new page table base, so invalidate the TLB
211	 * and I-stream.  This happens automatically everywhere but here.
212	 */
213	ldiq	a0, -2				/* TBIA */
214	call_pal PAL_OSF1_tbi
215	call_pal PAL_imb
216
217	/*
218	 * All ready to go!  Call main()!
219	 */
220	CALL(main)
221
222	/* This should never happen. */
223	PANIC("main() returned",Lmain_returned_pmsg)
224	END(locorestart)
225
226/**************************************************************************/
227
228/*
229 * Pull in the PROM interface routines; these are needed for
230 * prom printf (while bootstrapping), and for determining the
231 * boot device, etc.
232 */
233#include <alpha/alpha/prom_disp.s>
234
235/**************************************************************************/
236
237/*
238 * Pull in the PALcode function stubs.
239 */
240#include <alpha/alpha/pal.s>
241
242/**************************************************************************/
243
244/**************************************************************************/
245
246#if defined(MULTIPROCESSOR)
247/*
248 * Pull in the multiprocssor glue.
249 */
250#include <alpha/alpha/multiproc.s>
251#endif /* MULTIPROCESSOR */
252
253/**************************************************************************/
254
255/**************************************************************************/
256
257#if defined(DDB) || defined(KGDB)
258/*
259 * Pull in debugger glue.
260 */
261#include <alpha/alpha/debug.s>
262#endif /* DDB || KGDB */
263
264/**************************************************************************/
265
266/**************************************************************************/
267
268	.text
269.stabs	__FILE__,132,0,0,backtolocore1	/* done with includes */
270.loc	1 __LINE__
271backtolocore1:
272/**************************************************************************/
273
274/*
275 * Signal "trampoline" code.
276 *
277 * The kernel arranges for the handler to be invoked directly.  This
278 * trampoline is used only to return from the signal.
279 *
280 * The stack pointer points to the saved sigcontext.
281 */
282
283NESTED_NOPROFILE(sigcode,0,0,ra,0,0)
284	mov	sp, a0			/* get pointer to sigcontext */
285	CALLSYS_NOERROR(__sigreturn14)	/* and call sigreturn() with it. */
286	mov	v0, a0			/* if that failed, get error code */
287	CALLSYS_NOERROR(exit)		/* and call exit() with it. */
288XNESTED(esigcode,0)
289	END(sigcode)
290
291/**************************************************************************/
292
293/*
294 * exception_return: return from trap, exception, or syscall
295 */
296
297IMPORT(ssir, 8)
298
299LEAF(exception_return, 1)			/* XXX should be NESTED */
300	br	pv, 1f
3011:	LDGP(pv)
302
303	ldq	s1, (FRAME_PS * 8)(sp)		/* get the saved PS */
304	and	s1, ALPHA_PSL_IPL_MASK, t0	/* look at the saved IPL */
305	bne	t0, 4f				/* != 0: can't do AST or SIR */
306
307	/* see if we can do an SIR */
3082:	ldq	t1, ssir			/* SIR pending? */
309	bne	t1, 5f				/* yes */
310	/* no */
311
312	/* check for AST */
3133:	and	s1, ALPHA_PSL_USERMODE, t0	/* are we returning to user? */
314	beq	t0, 4f				/* no: just return */
315	/* yes */
316
317	/* GET_CPUINFO clobbers v0, t0, t8...t11. */
318	GET_CPUINFO
319	ldq	t1, CPU_INFO_CURPROC(v0)
320	ldl	t2, P_MD_ASTPENDING(t1)		/* AST pending? */
321	bne	t2, 6f				/* yes */
322	/* no: return & deal with FP */
323
324	/*
325	 * We are going back to usermode.  Enable the FPU based on whether
326	 * the current proc is fpcurproc.
327	 */
328	ldq	t2, CPU_INFO_FPCURPROC(v0)
329	cmpeq	t1, t2, t1
330	mov	zero, a0
331	cmovne	t1, 1, a0
332	call_pal PAL_OSF1_wrfen
333
334	/* restore the registers, and return */
3354:	bsr	ra, exception_restore_regs	/* jmp/CALL trashes pv/t12 */
336	ldq	ra,(FRAME_RA*8)(sp)
337	.set noat
338	ldq	at_reg,(FRAME_AT*8)(sp)
339
340	lda	sp,(FRAME_SW_SIZE*8)(sp)
341	call_pal PAL_OSF1_rti
342	.set at
343	/* NOTREACHED */
344
345	/* We've got a SIR */
3465:	ldiq	a0, ALPHA_PSL_IPL_SOFT
347	call_pal PAL_OSF1_swpipl
348	mov	v0, s2				/* remember old IPL */
349	CALL(softintr_dispatch)
350
351	/* SIR handled; restore IPL and check again */
352	mov	s2, a0
353	call_pal PAL_OSF1_swpipl
354	br	2b
355
356	/* We've got an AST */
3576:	stl	zero, P_MD_ASTPENDING(t1)	/* no AST pending */
358
359	ldiq	a0, ALPHA_PSL_IPL_0		/* drop IPL to zero */
360	call_pal PAL_OSF1_swpipl
361	mov	v0, s2				/* remember old IPL */
362
363	mov	sp, a0				/* only arg is frame */
364	CALL(ast)
365
366	/* AST handled; restore IPL and check again */
367	mov	s2, a0
368	call_pal PAL_OSF1_swpipl
369	br	3b
370
371	END(exception_return)
372
373LEAF(exception_save_regs, 0)
374	stq	v0,(FRAME_V0*8)(sp)
375	stq	a3,(FRAME_A3*8)(sp)
376	stq	a4,(FRAME_A4*8)(sp)
377	stq	a5,(FRAME_A5*8)(sp)
378	stq	s0,(FRAME_S0*8)(sp)
379	stq	s1,(FRAME_S1*8)(sp)
380	stq	s2,(FRAME_S2*8)(sp)
381	stq	s3,(FRAME_S3*8)(sp)
382	stq	s4,(FRAME_S4*8)(sp)
383	stq	s5,(FRAME_S5*8)(sp)
384	stq	s6,(FRAME_S6*8)(sp)
385	stq	t0,(FRAME_T0*8)(sp)
386	stq	t1,(FRAME_T1*8)(sp)
387	stq	t2,(FRAME_T2*8)(sp)
388	stq	t3,(FRAME_T3*8)(sp)
389	stq	t4,(FRAME_T4*8)(sp)
390	stq	t5,(FRAME_T5*8)(sp)
391	stq	t6,(FRAME_T6*8)(sp)
392	stq	t7,(FRAME_T7*8)(sp)
393	stq	t8,(FRAME_T8*8)(sp)
394	stq	t9,(FRAME_T9*8)(sp)
395	stq	t10,(FRAME_T10*8)(sp)
396	stq	t11,(FRAME_T11*8)(sp)
397	stq	t12,(FRAME_T12*8)(sp)
398	RET
399	END(exception_save_regs)
400
401LEAF(exception_restore_regs, 0)
402	ldq	v0,(FRAME_V0*8)(sp)
403	ldq	a3,(FRAME_A3*8)(sp)
404	ldq	a4,(FRAME_A4*8)(sp)
405	ldq	a5,(FRAME_A5*8)(sp)
406	ldq	s0,(FRAME_S0*8)(sp)
407	ldq	s1,(FRAME_S1*8)(sp)
408	ldq	s2,(FRAME_S2*8)(sp)
409	ldq	s3,(FRAME_S3*8)(sp)
410	ldq	s4,(FRAME_S4*8)(sp)
411	ldq	s5,(FRAME_S5*8)(sp)
412	ldq	s6,(FRAME_S6*8)(sp)
413	ldq	t0,(FRAME_T0*8)(sp)
414	ldq	t1,(FRAME_T1*8)(sp)
415	ldq	t2,(FRAME_T2*8)(sp)
416	ldq	t3,(FRAME_T3*8)(sp)
417	ldq	t4,(FRAME_T4*8)(sp)
418	ldq	t5,(FRAME_T5*8)(sp)
419	ldq	t6,(FRAME_T6*8)(sp)
420	ldq	t7,(FRAME_T7*8)(sp)
421	ldq	t8,(FRAME_T8*8)(sp)
422	ldq	t9,(FRAME_T9*8)(sp)
423	ldq	t10,(FRAME_T10*8)(sp)
424	ldq	t11,(FRAME_T11*8)(sp)
425	ldq	t12,(FRAME_T12*8)(sp)
426	RET
427	END(exception_restore_regs)
428
429/**************************************************************************/
430
431/*
432 * XentArith:
433 * System arithmetic trap entry point.
434 */
435
436	PALVECT(XentArith)		/* setup frame, save registers */
437
438	/* a0, a1, & a2 already set up */
439	ldiq	a3, ALPHA_KENTRY_ARITH
440	mov	sp, a4			; .loc 1 __LINE__
441	CALL(trap)
442
443	jmp	zero, exception_return
444	END(XentArith)
445
446/**************************************************************************/
447
448/*
449 * XentIF:
450 * System instruction fault trap entry point.
451 */
452
453	PALVECT(XentIF)			/* setup frame, save registers */
454
455	/* a0, a1, & a2 already set up */
456	ldiq	a3, ALPHA_KENTRY_IF
457	mov	sp, a4			; .loc 1 __LINE__
458	CALL(trap)
459	jmp	zero, exception_return
460	END(XentIF)
461
462/**************************************************************************/
463
464/*
465 * XentInt:
466 * System interrupt entry point.
467 */
468
469	PALVECT(XentInt)		/* setup frame, save registers */
470
471	/* a0, a1, & a2 already set up */
472	mov	sp, a3			; .loc 1 __LINE__
473	CALL(interrupt)
474	jmp	zero, exception_return
475	END(XentInt)
476
477/**************************************************************************/
478
479/*
480 * XentMM:
481 * System memory management fault entry point.
482 */
483
484	PALVECT(XentMM)			/* setup frame, save registers */
485
486	/* a0, a1, & a2 already set up */
487	ldiq	a3, ALPHA_KENTRY_MM
488	mov	sp, a4			; .loc 1 __LINE__
489	CALL(trap)
490
491	jmp	zero, exception_return
492	END(XentMM)
493
494/**************************************************************************/
495
496/*
497 * XentSys:
498 * System call entry point.
499 */
500
501	ESETUP(XentSys)			; .loc 1 __LINE__
502
503	stq	v0,(FRAME_V0*8)(sp)		/* in case we need to restart */
504	stq	s0,(FRAME_S0*8)(sp)
505	stq	s1,(FRAME_S1*8)(sp)
506	stq	s2,(FRAME_S2*8)(sp)
507	stq	s3,(FRAME_S3*8)(sp)
508	stq	s4,(FRAME_S4*8)(sp)
509	stq	s5,(FRAME_S5*8)(sp)
510	stq	s6,(FRAME_S6*8)(sp)
511	stq	a0,(FRAME_A0*8)(sp)
512	stq	a1,(FRAME_A1*8)(sp)
513	stq	a2,(FRAME_A2*8)(sp)
514	stq	a3,(FRAME_A3*8)(sp)
515	stq	a4,(FRAME_A4*8)(sp)
516	stq	a5,(FRAME_A5*8)(sp)
517	stq	ra,(FRAME_RA*8)(sp)
518
519	/* syscall number, passed in v0, is first arg, frame pointer second */
520	mov	v0,a1
521	GET_CURPROC
522	ldq	a0,0(v0)
523	mov	sp,a2			; .loc 1 __LINE__
524	ldq	t12,P_MD_SYSCALL(a0)
525	CALL((t12))
526
527	jmp	zero, exception_return
528	END(XentSys)
529
530/**************************************************************************/
531
532/*
533 * XentUna:
534 * System unaligned access entry point.
535 */
536
537LEAF(XentUna, 3)				/* XXX should be NESTED */
538	.set noat
539	lda	sp,-(FRAME_SW_SIZE*8)(sp)
540	stq	at_reg,(FRAME_AT*8)(sp)
541	.set at
542	stq	ra,(FRAME_RA*8)(sp)
543	bsr	ra, exception_save_regs		/* jmp/CALL trashes pv/t12 */
544
545	/* a0, a1, & a2 already set up */
546	ldiq	a3, ALPHA_KENTRY_UNA
547	mov	sp, a4			; .loc 1 __LINE__
548	CALL(trap)
549
550	jmp	zero, exception_return
551	END(XentUna)
552
553/**************************************************************************/
554
555/*
556 * savefpstate: Save a process's floating point state.
557 *
558 * Arguments:
559 *	a0	'struct fpstate *' to save into
560 */
561
562LEAF(savefpstate, 1)
563	LDGP(pv)
564	/* save all of the FP registers */
565	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
566	stt	$f0,   (0 * 8)(t1)	/* save first register, using hw name */
567	stt	$f1,   (1 * 8)(t1)	/* etc. */
568	stt	$f2,   (2 * 8)(t1)
569	stt	$f3,   (3 * 8)(t1)
570	stt	$f4,   (4 * 8)(t1)
571	stt	$f5,   (5 * 8)(t1)
572	stt	$f6,   (6 * 8)(t1)
573	stt	$f7,   (7 * 8)(t1)
574	stt	$f8,   (8 * 8)(t1)
575	stt	$f9,   (9 * 8)(t1)
576	stt	$f10, (10 * 8)(t1)
577	stt	$f11, (11 * 8)(t1)
578	stt	$f12, (12 * 8)(t1)
579	stt	$f13, (13 * 8)(t1)
580	stt	$f14, (14 * 8)(t1)
581	stt	$f15, (15 * 8)(t1)
582	stt	$f16, (16 * 8)(t1)
583	stt	$f17, (17 * 8)(t1)
584	stt	$f18, (18 * 8)(t1)
585	stt	$f19, (19 * 8)(t1)
586	stt	$f20, (20 * 8)(t1)
587	stt	$f21, (21 * 8)(t1)
588	stt	$f22, (22 * 8)(t1)
589	stt	$f23, (23 * 8)(t1)
590	stt	$f24, (24 * 8)(t1)
591	stt	$f25, (25 * 8)(t1)
592	stt	$f26, (26 * 8)(t1)
593	stt	$f27, (27 * 8)(t1)
594	.set noat
595	stt	$f28, (28 * 8)(t1)
596	.set at
597	stt	$f29, (29 * 8)(t1)
598	stt	$f30, (30 * 8)(t1)
599
600	/*
601	 * Then save the FPCR; note that the necessary 'trapb's are taken
602	 * care of on kernel entry and exit.
603	 */
604	mf_fpcr	ft0
605	stt	ft0, FPREG_FPR_CR(a0)	/* store to FPCR save area */
606
607	RET
608	END(savefpstate)
609
610/**************************************************************************/
611
612/*
613 * restorefpstate: Restore a process's floating point state.
614 *
615 * Arguments:
616 *	a0	'struct fpstate *' to restore from
617 */
618
619LEAF(restorefpstate, 1)
620	LDGP(pv)
621	/*
622	 * Restore the FPCR; note that the necessary 'trapb's are taken care of
623	 * on kernel entry and exit.
624	 */
625	ldt	ft0, FPREG_FPR_CR(a0)	/* load from FPCR save area */
626	mt_fpcr	ft0
627
628	/* Restore all of the FP registers. */
629	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
630	ldt	$f0,   (0 * 8)(t1)	/* restore first reg., using hw name */
631	ldt	$f1,   (1 * 8)(t1)	/* etc. */
632	ldt	$f2,   (2 * 8)(t1)
633	ldt	$f3,   (3 * 8)(t1)
634	ldt	$f4,   (4 * 8)(t1)
635	ldt	$f5,   (5 * 8)(t1)
636	ldt	$f6,   (6 * 8)(t1)
637	ldt	$f7,   (7 * 8)(t1)
638	ldt	$f8,   (8 * 8)(t1)
639	ldt	$f9,   (9 * 8)(t1)
640	ldt	$f10, (10 * 8)(t1)
641	ldt	$f11, (11 * 8)(t1)
642	ldt	$f12, (12 * 8)(t1)
643	ldt	$f13, (13 * 8)(t1)
644	ldt	$f14, (14 * 8)(t1)
645	ldt	$f15, (15 * 8)(t1)
646	ldt	$f16, (16 * 8)(t1)
647	ldt	$f17, (17 * 8)(t1)
648	ldt	$f18, (18 * 8)(t1)
649	ldt	$f19, (19 * 8)(t1)
650	ldt	$f20, (20 * 8)(t1)
651	ldt	$f21, (21 * 8)(t1)
652	ldt	$f22, (22 * 8)(t1)
653	ldt	$f23, (23 * 8)(t1)
654	ldt	$f24, (24 * 8)(t1)
655	ldt	$f25, (25 * 8)(t1)
656	ldt	$f26, (26 * 8)(t1)
657	ldt	$f27, (27 * 8)(t1)
658	ldt	$f28, (28 * 8)(t1)
659	ldt	$f29, (29 * 8)(t1)
660	ldt	$f30, (30 * 8)(t1)
661
662	RET
663	END(restorefpstate)
664
665/**************************************************************************/
666
667/*
668 * savectx: save process context, i.e. callee-saved registers
669 *
670 * Note that savectx() only works for processes other than curproc,
671 * since cpu_switch will copy over the info saved here.  (It _can_
672 * sanely be used for curproc iff cpu_switch won't be called again, e.g.
673 * if called from boot().)
674 *
675 * Arguments:
676 *	a0	'struct user *' of the process that needs its context saved
677 *
678 * Return:
679 *	v0	0.  (note that for child processes, it seems
680 *		like savectx() returns 1, because the return address
681 *		in the PCB is set to the return address from savectx().)
682 */
683
684LEAF(savectx, 1)
685	br	pv, 1f
6861:	LDGP(pv)
687	stq	sp, U_PCB_HWPCB_KSP(a0)		/* store sp */
688	stq	s0, U_PCB_CONTEXT+(0 * 8)(a0)	/* store s0 - s6 */
689	stq	s1, U_PCB_CONTEXT+(1 * 8)(a0)
690	stq	s2, U_PCB_CONTEXT+(2 * 8)(a0)
691	stq	s3, U_PCB_CONTEXT+(3 * 8)(a0)
692	stq	s4, U_PCB_CONTEXT+(4 * 8)(a0)
693	stq	s5, U_PCB_CONTEXT+(5 * 8)(a0)
694	stq	s6, U_PCB_CONTEXT+(6 * 8)(a0)
695	stq	ra, U_PCB_CONTEXT+(7 * 8)(a0)	/* store ra */
696	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
697	stq	v0, U_PCB_CONTEXT+(8 * 8)(a0)	/* store ps, for ipl */
698
699	mov	zero, v0
700	RET
701	END(savectx)
702
703/**************************************************************************/
704
705IMPORT(sched_whichqs, 4)
706
707/*
708 * When no processes are on the runq, cpu_switch branches to idle
709 * to wait for something to come ready.
710 * Note: this is really a part of cpu_switch() but defined here for kernel
711 * profiling.
712 */
713LEAF(idle, 0)
714	br	pv, 1f
7151:	LDGP(pv)
716	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
717	GET_CURPROC
718	stq	zero, 0(v0)			/* curproc <- NULL for stats */
719#if defined(MULTIPROCESSOR)
720	/*
721	 * Switch to the idle PCB unless we're already running on it
722	 * (if s0 == NULL, we're already on it...)
723	 */
724	beq	s0, 1f				/* skip if s0 == NULL */
725	mov	s0, a0
726	CALL(pmap_deactivate)			/* pmap_deactivate(oldproc) */
727	GET_IDLE_PCB(a0)
728	SWITCH_CONTEXT
729	mov	zero, s0			/* no outgoing proc */
7301:
731#endif
732#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
733	CALL(sched_unlock_idle)			/* release sched_lock */
734#endif
735	mov	zero, a0			/* enable all interrupts */
736	call_pal PAL_OSF1_swpipl
737	ldl	t0, sched_whichqs		/* look for non-empty queue */
738	bne	t0, 4f
7392:	lda	t0, uvm
740	ldl	t0, UVM_PAGE_IDLE_ZERO(t0)	/* should we zero some pages? */
741	beq	t0, 3f				/* nope. */
742	CALL(uvm_pageidlezero)
7433:	ldl	t0, sched_whichqs		/* look for non-empty queue */
744	beq	t0, 2b
7454:	ldiq	a0, ALPHA_PSL_IPL_HIGH		/* disable all interrupts */
746	call_pal PAL_OSF1_swpipl
747#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
748	CALL(sched_lock_idle)			/* acquire sched_lock */
749#endif
750	jmp	zero, cpu_switch_queuescan	/* jump back into the fire */
751	END(idle)
752
753/*
754 * cpu_switch()
755 * Find the highest priority process and resume it.
756 */
757LEAF(cpu_switch, 0)
758	LDGP(pv)
759	/*
760	 * do an inline savectx(), to save old context
761	 * Note: GET_CURPROC clobbers v0, t0, t8...t11.
762	 */
763	GET_CURPROC
764	ldq	a0, 0(v0)
765	ldq	a1, P_ADDR(a0)
766	/* NOTE: ksp is stored by the swpctx */
767	stq	s0, U_PCB_CONTEXT+(0 * 8)(a1)	/* store s0 - s6 */
768	stq	s1, U_PCB_CONTEXT+(1 * 8)(a1)
769	stq	s2, U_PCB_CONTEXT+(2 * 8)(a1)
770	stq	s3, U_PCB_CONTEXT+(3 * 8)(a1)
771	stq	s4, U_PCB_CONTEXT+(4 * 8)(a1)
772	stq	s5, U_PCB_CONTEXT+(5 * 8)(a1)
773	stq	s6, U_PCB_CONTEXT+(6 * 8)(a1)
774	stq	ra, U_PCB_CONTEXT+(7 * 8)(a1)	/* store ra */
775	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
776	stq	v0, U_PCB_CONTEXT+(8 * 8)(a1)	/* store ps, for ipl */
777
778	mov	a0, s0				/* save old curproc */
779	mov	a1, s1				/* save old U-area */
780
781cpu_switch_queuescan:
782	br	pv, 1f
7831:	LDGP(pv)
784	ldl	t0, sched_whichqs		/* look for non-empty queue */
785	beq	t0, idle			/* and if none, go idle */
786	mov	t0, t3				/* t3 = saved whichqs */
787	mov	zero, t2			/* t2 = lowest bit set */
788	blbs	t0, 3f				/* if low bit set, done! */
789
7902:	srl	t0, 1, t0			/* try next bit */
791	addq	t2, 1, t2
792	blbc	t0, 2b				/* if clear, try again */
793
7943:	/*
795	 * Remove process from queue
796	 */
797	lda	t1, sched_qs			/* get queues */
798	sll	t2, 4, t0			/* queue head is 16 bytes */
799	addq	t1, t0, t0			/* t0 = qp = &qs[firstbit] */
800
801	ldq	t4, PH_LINK(t0)			/* t4 = p = highest pri proc */
802	bne	t4, 4f				/* make sure p != NULL */
803	PANIC("cpu_switch",Lcpu_switch_pmsg)	/* nothing in queue! */
804
8054:
806	ldq	t5, P_FORW(t4)			/* t5 = p->p_forw */
807	stq	t5, PH_LINK(t0)			/* qp->ph_link = p->p_forw */
808	stq	t0, P_BACK(t5)			/* p->p_forw->p_back = qp */
809	stq	zero, P_BACK(t4)		/* firewall: p->p_back = NULL */
810	cmpeq	t0, t5, t0			/* see if queue is empty */
811	beq	t0, 5f				/* nope, it's not! */
812
813	ldiq	t0, 1				/* compute bit in whichqs */
814	sll	t0, t2, t0
815	xor	t3, t0, t3			/* clear bit in whichqs */
816	stl	t3, sched_whichqs
817
8185:
819	mov	t4, s2				/* save new proc */
820	ldq	s3, P_MD_PCBPADDR(s2)		/* save new pcbpaddr */
821
822	/*
823	 * Check to see if we're switching to ourself.  If we are,
824	 * don't bother loading the new context.
825	 *
826	 * Note that even if we re-enter cpu_switch() from idle(),
827	 * s0 will still contain the old curproc value because any
828	 * users of that register between then and now must have
829	 * saved it.  Also note that switch_exit() ensures that
830	 * s0 is clear before jumping here to find a new process.
831	 */
832	cmpeq	s0, s2, t0			/* oldproc == newproc? */
833	bne	t0, 7f				/* Yes!  Skip! */
834
835	/*
836	 * Deactivate the old address space before activating the
837	 * new one.  We need to do this before activating the
838	 * new process's address space in the event that new
839	 * process is using the same vmspace as the old.  If we
840	 * do this after we activate, then we might end up
841	 * incorrectly marking the pmap inactive!
842	 *
843	 * Note that don't deactivate if we don't have to...
844	 * We know this if oldproc (s0) == NULL.  This is the
845	 * case if we've come from switch_exit() (pmap no longer
846	 * exists; vmspace has been freed), or if we switched to
847	 * the Idle PCB in the MULTIPROCESSOR case.
848	 */
849	beq	s0, 6f
850
851	mov	s0, a0				/* pmap_deactivate(oldproc) */
852	CALL(pmap_deactivate)
853
8546:	/*
855	 * Activate the new process's address space and perform
856	 * the actual context swap.
857	 */
858
859	mov	s2, a0				/* pmap_activate(p) */
860	CALL(pmap_activate)
861
862	mov	s3, a0				/* swap the context */
863	SWITCH_CONTEXT
864
8657:
866#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
867	/*
868	 * Done mucking with the run queues, and we have fully switched
869	 * to the new process.  Release the scheduler lock, but keep
870	 * interrupts out.
871	 */
872	CALL(sched_unlock_idle)
873#endif
874
875	/*
876	 * Now that the switch is done, update curproc and other
877	 * globals.  We must do this even if switching to ourselves
878	 * because we might have re-entered cpu_switch() from idle(),
879	 * in which case curproc would be NULL.
880	 *
881	 * Note: GET_CPUINFO clobbers v0, t0, t8...t11.
882	 */
883#ifdef __alpha_bwx__
884	ldiq	t0, SONPROC			/* p->p_stat = SONPROC */
885	stb	t0, P_STAT(s2)
886#else
887	addq	s2, P_STAT, t3			/* p->p_stat = SONPROC */
888	ldq_u	t1, 0(t3)
889	ldiq	t0, SONPROC
890	insbl	t0, t3, t0
891	mskbl	t1, t3, t1
892	or	t0, t1, t0
893	stq_u	t0, 0(t3)
894#endif /* __alpha_bwx__ */
895
896	GET_CPUINFO
897	/* p->p_cpu initialized in fork1() for single-processor */
898#if defined(MULTIPROCESSOR)
899	stq	v0, P_CPU(s2)			/* p->p_cpu = curcpu() */
900#endif
901	stq	s2, CPU_INFO_CURPROC(v0)	/* curproc = p */
902	stq	zero, CPU_INFO_WANT_RESCHED(v0)	/* we've rescheduled */
903
904	/*
905	 * Now running on the new u struct.
906	 */
907	ldq	s0, P_ADDR(s2)
908	ldq	a0, U_PCB_CONTEXT+(8 * 8)(s0)	/* restore ipl */
909	and	a0, ALPHA_PSL_IPL_MASK, a0
910	call_pal PAL_OSF1_swpipl
911
912	/*
913	 * Check for restartable atomic sequences (RAS).
914	 */
915	ldl	t0, P_NRAS(s2)			/* p->p_nras == 0? */
916	beq	t0, 1f				/* yes, skip */
917	ldq	s1, P_MD_TF(s2)			/* s1 = p->p_md.md_tf */
918	mov	s2, a0				/* first ras_lookup() arg */
919	ldq	a1, (FRAME_PC*8)(s1)		/* second ras_lookup() arg */
920	CALL(ras_lookup)			/* ras_lookup(p, PC) */
921	addq	v0, 1, t0			/* -1 means "not in ras" */
922	beq	t0, 1f
923	stq	v0, (FRAME_PC*8)(s1)
924
9251:
926	/*
927	 * Restore registers and return.
928	 * NOTE: ksp is restored by the swpctx.
929	 */
930	ldq	s1, U_PCB_CONTEXT+(1 * 8)(s0)		/* restore s1-s6 */
931	ldq	s2, U_PCB_CONTEXT+(2 * 8)(s0)
932	ldq	s3, U_PCB_CONTEXT+(3 * 8)(s0)
933	ldq	s4, U_PCB_CONTEXT+(4 * 8)(s0)
934	ldq	s5, U_PCB_CONTEXT+(5 * 8)(s0)
935	ldq	s6, U_PCB_CONTEXT+(6 * 8)(s0)
936	ldq	ra, U_PCB_CONTEXT+(7 * 8)(s0)		/* restore ra */
937	ldq	s0, U_PCB_CONTEXT+(0 * 8)(s0)		/* restore s0 */
938
939	RET
940	END(cpu_switch)
941
942/*
943 * proc_trampoline()
944 *
945 * Arrange for a function to be invoked neatly, after a cpu_fork().
946 *
947 * Invokes the function specified by the s0 register with the return
948 * address specified by the s1 register and with one argument specified
949 * by the s2 register.
950 */
951LEAF_NOPROFILE(proc_trampoline, 0)
952#if defined(MULTIPROCESSOR)
953	CALL(proc_trampoline_mp)
954#endif
955	mov	s0, pv
956	mov	s1, ra
957	mov	s2, a0
958	jmp	zero, (pv)
959	END(proc_trampoline)
960
961/*
962 * switch_exit(struct proc *p)
963 * Make a the named process exit.  Partially switch to our idle thread
964 * (we don't update curproc or restore registers), and jump into the middle
965 * of cpu_switch to switch into a few process.  The process reaper will
966 * free the dead process's VM resources.  MUST BE CALLED AT SPLHIGH.
967 */
968LEAF(switch_exit, 1)
969	LDGP(pv)
970
971	/* save the exiting proc pointer */
972	mov	a0, s2
973
974	/* Switch to our idle stack. */
975	GET_IDLE_PCB(a0)			/* clobbers v0, t0, t8-t11 */
976	SWITCH_CONTEXT
977
978	/*
979	 * Now running as idle thread, except for the value of 'curproc' and
980	 * the saved regs.
981	 */
982
983	/* Schedule the vmspace and stack to be freed. */
984	mov	s2, a0
985	CALL(exit2)
986
987#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
988	CALL(sched_lock_idle)			/* acquire sched_lock */
989#endif
990
991	/*
992	 * Now jump back into the middle of cpu_switch().  Note that
993	 * we must clear s0 to guarantee that the check for switching
994	 * to ourselves in cpu_switch() will fail.  This is safe since
995	 * s0 will be restored when a new process is resumed.
996	 */
997	mov	zero, s0
998	jmp	zero, cpu_switch_queuescan
999	END(switch_exit)
1000
1001/**************************************************************************/
1002
1003/*
1004 * Copy a null-terminated string within the kernel's address space.
1005 * If lenp is not NULL, store the number of chars copied in *lenp
1006 *
1007 * int copystr(char *from, char *to, size_t len, size_t *lenp);
1008 */
1009LEAF(copystr, 4)
1010	LDGP(pv)
1011
1012	mov	a2, t0			/* t0 = i = len */
1013	bne	a2, 1f			/* if (len != 0), proceed */
1014	ldiq	t1, 1			/* else bail */
1015	br	zero, 2f
1016
10171:	ldq_u	t1, 0(a0)		/* t1 = *from */
1018	extbl	t1, a0, t1
1019	ldq_u	t3, 0(a1)		/* set up t2 with quad around *to */
1020	insbl	t1, a1, t2
1021	mskbl	t3, a1, t3
1022	or	t3, t2, t3		/* add *from to quad around *to */
1023	stq_u	t3, 0(a1)		/* write out that quad */
1024
1025	subl	a2, 1, a2		/* len-- */
1026	beq	t1, 2f			/* if (*from == 0), bail out */
1027	addq	a1, 1, a1		/* to++ */
1028	addq	a0, 1, a0		/* from++ */
1029	bne	a2, 1b			/* if (len != 0) copy more */
1030
10312:	beq	a3, 3f			/* if (lenp != NULL) */
1032	subl	t0, a2, t0		/* *lenp = (i - len) */
1033	stq	t0, 0(a3)
10343:	beq	t1, 4f			/* *from == '\0'; leave quietly */
1035
1036	ldiq	v0, ENAMETOOLONG	/* *from != '\0'; error. */
1037	RET
1038
10394:	mov	zero, v0		/* return 0. */
1040	RET
1041	END(copystr)
1042
1043NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
1044	LDGP(pv)
1045	lda	sp, -16(sp)			/* set up stack frame	     */
1046	stq	ra, (16-8)(sp)			/* save ra		     */
1047	stq	s0, (16-16)(sp)			/* save s0		     */
1048	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1049	cmpult	a0, t0, t1			/* is in user space.	     */
1050	beq	t1, copyerr			/* if it's not, error out.   */
1051	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1052	GET_CURPROC
1053	mov	v0, s0
1054	lda	v0, copyerr			/* set up fault handler.     */
1055	.set noat
1056	ldq	at_reg, 0(s0)
1057	ldq	at_reg, P_ADDR(at_reg)
1058	stq	v0, U_PCB_ONFAULT(at_reg)
1059	.set at
1060	CALL(copystr)				/* do the copy.		     */
1061	.set noat
1062	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
1063	ldq	at_reg, P_ADDR(at_reg)
1064	stq	zero, U_PCB_ONFAULT(at_reg)
1065	.set at
1066	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1067	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1068	lda	sp, 16(sp)			/* kill stack frame.	     */
1069	RET					/* v0 left over from copystr */
1070	END(copyinstr)
1071
1072NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
1073	LDGP(pv)
1074	lda	sp, -16(sp)			/* set up stack frame	     */
1075	stq	ra, (16-8)(sp)			/* save ra		     */
1076	stq	s0, (16-16)(sp)			/* save s0		     */
1077	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
1078	cmpult	a1, t0, t1			/* is in user space.	     */
1079	beq	t1, copyerr			/* if it's not, error out.   */
1080	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1081	GET_CURPROC
1082	mov	v0, s0
1083	lda	v0, copyerr			/* set up fault handler.     */
1084	.set noat
1085	ldq	at_reg, 0(s0)
1086	ldq	at_reg, P_ADDR(at_reg)
1087	stq	v0, U_PCB_ONFAULT(at_reg)
1088	.set at
1089	CALL(copystr)				/* do the copy.		     */
1090	.set noat
1091	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
1092	ldq	at_reg, P_ADDR(at_reg)
1093	stq	zero, U_PCB_ONFAULT(at_reg)
1094	.set at
1095	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1096	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1097	lda	sp, 16(sp)			/* kill stack frame.	     */
1098	RET					/* v0 left over from copystr */
1099	END(copyoutstr)
1100
1101/*
1102 * kcopy(const void *src, void *dst, size_t len);
1103 *
1104 * Copy len bytes from src to dst, aborting if we encounter a fatal
1105 * page fault.
1106 *
1107 * kcopy() _must_ save and restore the old fault handler since it is
1108 * called by uiomove(), which may be in the path of servicing a non-fatal
1109 * page fault.
1110 */
1111NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
1112	LDGP(pv)
1113	lda	sp, -32(sp)			/* set up stack frame	     */
1114	stq	ra, (32-8)(sp)			/* save ra		     */
1115	stq	s0, (32-16)(sp)			/* save s0		     */
1116	stq	s1, (32-24)(sp)			/* save s1		     */
1117	/* Swap a0, a1, for call to memcpy(). */
1118	mov	a1, v0
1119	mov	a0, a1
1120	mov	v0, a0
1121	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1122	GET_CURPROC
1123	ldq	s1, 0(v0)			/* s1 = curproc		     */
1124	lda	v0, kcopyerr			/* set up fault handler.     */
1125	.set noat
1126	ldq	at_reg, P_ADDR(s1)
1127	ldq	s0, U_PCB_ONFAULT(at_reg)	/* save old handler.	     */
1128	stq	v0, U_PCB_ONFAULT(at_reg)
1129	.set at
1130	CALL(memcpy)				/* do the copy.		     */
1131	.set noat
1132	ldq	at_reg, P_ADDR(s1)		/* restore the old handler.  */
1133	stq	s0, U_PCB_ONFAULT(at_reg)
1134	.set at
1135	ldq	ra, (32-8)(sp)			/* restore ra.		     */
1136	ldq	s0, (32-16)(sp)			/* restore s0.		     */
1137	ldq	s1, (32-24)(sp)			/* restore s1.		     */
1138	lda	sp, 32(sp)			/* kill stack frame.	     */
1139	mov	zero, v0			/* return 0. */
1140	RET
1141	END(kcopy)
1142
1143LEAF(kcopyerr, 0)
1144	LDGP(pv)
1145	.set noat
1146	ldq	at_reg, P_ADDR(s1)		/* restore the old handler.  */
1147	stq	s0, U_PCB_ONFAULT(at_reg)
1148	.set at
1149	ldq	ra, (32-8)(sp)			/* restore ra.		     */
1150	ldq	s0, (32-16)(sp)			/* restore s0.		     */
1151	ldq	s1, (32-24)(sp)			/* restore s1.		     */
1152	lda	sp, 32(sp)			/* kill stack frame.	     */
1153	ldiq	v0, EFAULT			/* return EFAULT.	     */
1154	RET
1155END(kcopyerr)
1156
1157NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0)
1158	LDGP(pv)
1159	lda	sp, -16(sp)			/* set up stack frame	     */
1160	stq	ra, (16-8)(sp)			/* save ra		     */
1161	stq	s0, (16-16)(sp)			/* save s0		     */
1162	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1163	cmpult	a0, t0, t1			/* is in user space.	     */
1164	beq	t1, copyerr			/* if it's not, error out.   */
1165	/* Swap a0, a1, for call to memcpy(). */
1166	mov	a1, v0
1167	mov	a0, a1
1168	mov	v0, a0
1169	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1170	GET_CURPROC
1171	ldq	s0, 0(v0)			/* s0 = curproc		     */
1172	lda	v0, copyerr			/* set up fault handler.     */
1173	.set noat
1174	ldq	at_reg, P_ADDR(s0)
1175	stq	v0, U_PCB_ONFAULT(at_reg)
1176	.set at
1177	CALL(memcpy)				/* do the copy.		     */
1178	.set noat
1179	ldq	at_reg, P_ADDR(s0)		/* kill the fault handler.   */
1180	stq	zero, U_PCB_ONFAULT(at_reg)
1181	.set at
1182	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1183	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1184	lda	sp, 16(sp)			/* kill stack frame.	     */
1185	mov	zero, v0			/* return 0. */
1186	RET
1187	END(copyin)
1188
1189NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
1190	LDGP(pv)
1191	lda	sp, -16(sp)			/* set up stack frame	     */
1192	stq	ra, (16-8)(sp)			/* save ra		     */
1193	stq	s0, (16-16)(sp)			/* save s0		     */
1194	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
1195	cmpult	a1, t0, t1			/* is in user space.	     */
1196	beq	t1, copyerr			/* if it's not, error out.   */
1197	/* Swap a0, a1, for call to memcpy(). */
1198	mov	a1, v0
1199	mov	a0, a1
1200	mov	v0, a0
1201	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1202	GET_CURPROC
1203	ldq	s0, 0(v0)			/* s0 = curproc		     */
1204	lda	v0, copyerr			/* set up fault handler.     */
1205	.set noat
1206	ldq	at_reg, P_ADDR(s0)
1207	stq	v0, U_PCB_ONFAULT(at_reg)
1208	.set at
1209	CALL(memcpy)				/* do the copy.		     */
1210	.set noat
1211	ldq	at_reg, P_ADDR(s0)		/* kill the fault handler.   */
1212	stq	zero, U_PCB_ONFAULT(at_reg)
1213	.set at
1214	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1215	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1216	lda	sp, 16(sp)			/* kill stack frame.	     */
1217	mov	zero, v0			/* return 0. */
1218	RET
1219	END(copyout)
1220
1221LEAF(copyerr, 0)
1222	LDGP(pv)
1223	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1224	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1225	lda	sp, 16(sp)			/* kill stack frame.	     */
1226	ldiq	v0, EFAULT			/* return EFAULT.	     */
1227	RET
1228END(copyerr)
1229
1230/**************************************************************************/
1231
1232/*
1233 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
1234 * user text space.
1235 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
1236 * user data space.
1237 */
1238LEAF(fuword, 1)
1239XLEAF(fuiword, 1)
1240	LDGP(pv)
1241	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1242	cmpult	a0, t0, t1			/* is in user space. */
1243	beq	t1, fswberr			/* if it's not, error out. */
1244	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1245	GET_CURPROC
1246	ldq	t1, 0(v0)
1247	lda	t0, fswberr
1248	.set noat
1249	ldq	at_reg, P_ADDR(t1)
1250	stq	t0, U_PCB_ONFAULT(at_reg)
1251	.set at
1252	ldq	v0, 0(a0)
1253	zap	v0, 0xf0, v0
1254	.set noat
1255	ldq	at_reg, P_ADDR(t1)
1256	stq	zero, U_PCB_ONFAULT(at_reg)
1257	.set at
1258	RET
1259	END(fuword)
1260
1261LEAF(fusword, 1)
1262XLEAF(fuisword, 1)
1263	LDGP(pv)
1264	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1265	cmpult	a0, t0, t1			/* is in user space. */
1266	beq	t1, fswberr			/* if it's not, error out. */
1267	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1268	GET_CURPROC
1269	ldq	t1, 0(v0)
1270	lda	t0, fswberr
1271	.set noat
1272	ldq	at_reg, P_ADDR(t1)
1273	stq	t0, U_PCB_ONFAULT(at_reg)
1274	.set at
1275	/* XXX FETCH IT */
1276	.set noat
1277	ldq	at_reg, P_ADDR(t1)
1278	stq	zero, U_PCB_ONFAULT(at_reg)
1279	.set at
1280	RET
1281	END(fusword)
1282
1283LEAF(fubyte, 1)
1284XLEAF(fuibyte, 1)
1285	LDGP(pv)
1286	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1287	cmpult	a0, t0, t1			/* is in user space. */
1288	beq	t1, fswberr			/* if it's not, error out. */
1289	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1290	GET_CURPROC
1291	ldq	t1, 0(v0)
1292	lda	t0, fswberr
1293	.set noat
1294	ldq	at_reg, P_ADDR(t1)
1295	stq	t0, U_PCB_ONFAULT(at_reg)
1296	.set at
1297	/* XXX FETCH IT */
1298	.set noat
1299	ldq	at_reg, P_ADDR(t1)
1300	stq	zero, U_PCB_ONFAULT(at_reg)
1301	.set at
1302	RET
1303	END(fubyte)
1304
1305LEAF(suword, 2)
1306	LDGP(pv)
1307	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1308	cmpult	a0, t0, t1			/* is in user space. */
1309	beq	t1, fswberr			/* if it's not, error out. */
1310	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1311	GET_CURPROC
1312	ldq	t1, 0(v0)
1313	lda	t0, fswberr
1314	.set noat
1315	ldq	at_reg, P_ADDR(t1)
1316	stq	t0, U_PCB_ONFAULT(at_reg)
1317	.set at
1318	stq	a1, 0(a0)			/* do the store. */
1319	.set noat
1320	ldq	at_reg, P_ADDR(t1)
1321	stq	zero, U_PCB_ONFAULT(at_reg)
1322	.set at
1323	mov	zero, v0
1324	RET
1325	END(suword)
1326
1327#ifdef notdef
1328LEAF(suiword, 2)
1329	LDGP(pv)
1330	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1331	cmpult	a0, t0, t1			/* is in user space. */
1332	beq	t1, fswberr			/* if it's not, error out. */
1333	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1334	GET_CURPROC
1335	ldq	t1, 0(v0)
1336	lda	t0, fswberr
1337	.set noat
1338	ldq	at_reg, P_ADDR(t1)
1339	stq	t0, U_PCB_ONFAULT(at_reg)
1340	.set at
1341	/* XXX STORE IT */
1342	.set noat
1343	ldq	at_reg, P_ADDR(t1)
1344	stq	zero, U_PCB_ONFAULT(at_reg)
1345	.set at
1346	call_pal PAL_OSF1_imb			/* sync instruction stream */
1347	mov	zero, v0
1348	RET
1349	END(suiword)
1350
1351LEAF(susword, 2)
1352	LDGP(pv)
1353	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1354	cmpult	a0, t0, t1			/* is in user space. */
1355	beq	t1, fswberr			/* if it's not, error out. */
1356	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1357	GET_CURPROC
1358	ldq	t1, 0(v0)
1359	lda	t0, fswberr
1360	.set noat
1361	ldq	at_reg, P_ADDR(t1)
1362	stq	t0, U_PCB_ONFAULT(at_reg)
1363	.set at
1364	/* XXX STORE IT */
1365	.set noat
1366	ldq	at_reg, P_ADDR(t1)
1367	stq	zero, U_PCB_ONFAULT(at_reg)
1368	.set at
1369	mov	zero, v0
1370	RET
1371	END(susword)
1372
1373LEAF(suisword, 2)
1374	LDGP(pv)
1375	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1376	cmpult	a0, t0, t1			/* is in user space. */
1377	beq	t1, fswberr			/* if it's not, error out. */
1378	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1379	GET_CURPROC
1380	ldq	t1, 0(v0)
1381	lda	t0, fswberr
1382	.set noat
1383	ldq	at_reg, P_ADDR(t1)
1384	stq	t0, U_PCB_ONFAULT(at_reg)
1385	.set at
1386	/* XXX STORE IT */
1387	.set noat
1388	ldq	at_reg, P_ADDR(t1)
1389	stq	zero, U_PCB_ONFAULT(at_reg)
1390	.set at
1391	call_pal PAL_OSF1_imb			/* sync instruction stream */
1392	mov	zero, v0
1393	RET
1394	END(suisword)
1395#endif /* notdef */
1396
1397LEAF(subyte, 2)
1398	LDGP(pv)
1399	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1400	cmpult	a0, t0, t1			/* is in user space. */
1401	beq	t1, fswberr			/* if it's not, error out. */
1402	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1403	GET_CURPROC
1404	ldq	t1, 0(v0)
1405	lda	t0, fswberr
1406	.set noat
1407	ldq	at_reg, P_ADDR(t1)
1408	stq	t0, U_PCB_ONFAULT(at_reg)
1409	.set at
1410	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1411	insbl	a1, a0, a1			/* move it to the right byte */
1412	ldq_u	t0, 0(a0)			/* load quad around byte */
1413	mskbl	t0, a0, t0			/* kill the target byte */
1414	or	t0, a1, a1			/* put the result together */
1415	stq_u	a1, 0(a0)			/* and store it. */
1416	.set noat
1417	ldq	at_reg, P_ADDR(t1)
1418	stq	zero, U_PCB_ONFAULT(at_reg)
1419	.set at
1420	mov	zero, v0
1421	RET
1422	END(subyte)
1423
1424LEAF(suibyte, 2)
1425	LDGP(pv)
1426	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1427	cmpult	a0, t0, t1			/* is in user space. */
1428	beq	t1, fswberr			/* if it's not, error out. */
1429	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1430	GET_CURPROC
1431	ldq	t1, 0(v0)
1432	lda	t0, fswberr
1433	.set noat
1434	ldq	at_reg, P_ADDR(t1)
1435	stq	t0, U_PCB_ONFAULT(at_reg)
1436	.set at
1437	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1438	insbl	a1, a0, a1			/* move it to the right byte */
1439	ldq_u	t0, 0(a0)			/* load quad around byte */
1440	mskbl	t0, a0, t0			/* kill the target byte */
1441	or	t0, a1, a1			/* put the result together */
1442	stq_u	a1, 0(a0)			/* and store it. */
1443	.set noat
1444	ldq	at_reg, P_ADDR(t1)
1445	stq	zero, U_PCB_ONFAULT(at_reg)
1446	.set at
1447	call_pal PAL_OSF1_imb			/* sync instruction stream */
1448	mov	zero, v0
1449	RET
1450	END(suibyte)
1451
1452LEAF(fswberr, 0)
1453	LDGP(pv)
1454	ldiq	v0, -1
1455	RET
1456	END(fswberr)
1457
1458/**************************************************************************/
1459
1460#ifdef notdef
1461/*
1462 * fuswintr and suswintr are just like fusword and susword except that if
1463 * the page is not in memory or would cause a trap, then we return an error.
1464 * The important thing is to prevent sleep() and switch().
1465 */
1466
1467LEAF(fuswintr, 2)
1468	LDGP(pv)
1469	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1470	cmpult	a0, t0, t1			/* is in user space. */
1471	beq	t1, fswintrberr			/* if it's not, error out. */
1472	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1473	GET_CURPROC
1474	ldq	t1, 0(v0)
1475	lda	t0, fswintrberr
1476	.set noat
1477	ldq	at_reg, P_ADDR(t1)
1478	stq	t0, U_PCB_ONFAULT(at_reg)
1479	stq	a0, U_PCB_ACCESSADDR(at_reg)
1480	.set at
1481	/* XXX FETCH IT */
1482	.set noat
1483	ldq	at_reg, P_ADDR(t1)
1484	stq	zero, U_PCB_ONFAULT(at_reg)
1485	.set at
1486	RET
1487	END(fuswintr)
1488
1489LEAF(suswintr, 2)
1490	LDGP(pv)
1491	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1492	cmpult	a0, t0, t1			/* is in user space. */
1493	beq	t1, fswintrberr			/* if it's not, error out. */
1494	/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
1495	GET_CURPROC
1496	ldq	t1, 0(v0)
1497	lda	t0, fswintrberr
1498	.set noat
1499	ldq	at_reg, P_ADDR(t1)
1500	stq	t0, U_PCB_ONFAULT(at_reg)
1501	stq	a0, U_PCB_ACCESSADDR(at_reg)
1502	.set at
1503	/* XXX STORE IT */
1504	.set noat
1505	ldq	at_reg, P_ADDR(t1)
1506	stq	zero, U_PCB_ONFAULT(at_reg)
1507	.set at
1508	mov	zero, v0
1509	RET
1510	END(suswintr)
1511#endif
1512
1513LEAF(fswintrberr, 0)
1514XLEAF(fuswintr, 2)				/* XXX what is a 'word'? */
1515XLEAF(suswintr, 2)				/* XXX what is a 'word'? */
1516	LDGP(pv)
1517	ldiq	v0, -1
1518	RET
1519	END(fswberr)
1520
1521/**************************************************************************/
1522
1523/*
1524 * Some bogus data, to keep vmstat happy, for now.
1525 */
1526
1527	.section .rodata
1528EXPORT(intrnames)
1529	.quad	0
1530EXPORT(eintrnames)
1531EXPORT(intrcnt)
1532	.quad	0
1533EXPORT(eintrcnt)
1534	.text
1535
1536/**************************************************************************/
1537
1538/*
1539 * console 'restart' routine to be placed in HWRPB.
1540 */
1541LEAF(XentRestart, 1)			/* XXX should be NESTED */
1542	.set noat
1543	lda	sp,-(FRAME_SIZE*8)(sp)
1544	stq	at_reg,(FRAME_AT*8)(sp)
1545	.set at
1546	stq	v0,(FRAME_V0*8)(sp)
1547	stq	a0,(FRAME_A0*8)(sp)
1548	stq	a1,(FRAME_A1*8)(sp)
1549	stq	a2,(FRAME_A2*8)(sp)
1550	stq	a3,(FRAME_A3*8)(sp)
1551	stq	a4,(FRAME_A4*8)(sp)
1552	stq	a5,(FRAME_A5*8)(sp)
1553	stq	s0,(FRAME_S0*8)(sp)
1554	stq	s1,(FRAME_S1*8)(sp)
1555	stq	s2,(FRAME_S2*8)(sp)
1556	stq	s3,(FRAME_S3*8)(sp)
1557	stq	s4,(FRAME_S4*8)(sp)
1558	stq	s5,(FRAME_S5*8)(sp)
1559	stq	s6,(FRAME_S6*8)(sp)
1560	stq	t0,(FRAME_T0*8)(sp)
1561	stq	t1,(FRAME_T1*8)(sp)
1562	stq	t2,(FRAME_T2*8)(sp)
1563	stq	t3,(FRAME_T3*8)(sp)
1564	stq	t4,(FRAME_T4*8)(sp)
1565	stq	t5,(FRAME_T5*8)(sp)
1566	stq	t6,(FRAME_T6*8)(sp)
1567	stq	t7,(FRAME_T7*8)(sp)
1568	stq	t8,(FRAME_T8*8)(sp)
1569	stq	t9,(FRAME_T9*8)(sp)
1570	stq	t10,(FRAME_T10*8)(sp)
1571	stq	t11,(FRAME_T11*8)(sp)
1572	stq	t12,(FRAME_T12*8)(sp)
1573	stq	ra,(FRAME_RA*8)(sp)
1574
1575	br	pv,1f
15761:	LDGP(pv)
1577
1578	mov	sp,a0
1579	CALL(console_restart)
1580
1581	call_pal PAL_halt
1582	END(XentRestart)
1583
1584/**************************************************************************/
1585
1586/*
1587 * Kernel setjmp and longjmp.  Rather minimalist.
1588 *
1589 *	longjmp(label_t *a)
1590 * will generate a "return (1)" from the last call to
1591 *	setjmp(label_t *a)
1592 * by restoring registers from the stack,
1593 */
1594
1595	.set	noreorder
1596
1597LEAF(setjmp, 1)
1598	LDGP(pv)
1599
1600	stq	ra, (0 * 8)(a0)			/* return address */
1601	stq	s0, (1 * 8)(a0)			/* callee-saved registers */
1602	stq	s1, (2 * 8)(a0)
1603	stq	s2, (3 * 8)(a0)
1604	stq	s3, (4 * 8)(a0)
1605	stq	s4, (5 * 8)(a0)
1606	stq	s5, (6 * 8)(a0)
1607	stq	s6, (7 * 8)(a0)
1608	stq	sp, (8 * 8)(a0)
1609
1610	ldiq	t0, 0xbeeffedadeadbabe		/* set magic number */
1611	stq	t0, (9 * 8)(a0)
1612
1613	mov	zero, v0			/* return zero */
1614	RET
1615END(setjmp)
1616
1617LEAF(longjmp, 1)
1618	LDGP(pv)
1619
1620	ldiq	t0, 0xbeeffedadeadbabe		/* check magic number */
1621	ldq	t1, (9 * 8)(a0)
1622	cmpeq	t0, t1, t0
1623	beq	t0, longjmp_botch		/* if bad, punt */
1624
1625	ldq	ra, (0 * 8)(a0)			/* return address */
1626	ldq	s0, (1 * 8)(a0)			/* callee-saved registers */
1627	ldq	s1, (2 * 8)(a0)
1628	ldq	s2, (3 * 8)(a0)
1629	ldq	s3, (4 * 8)(a0)
1630	ldq	s4, (5 * 8)(a0)
1631	ldq	s5, (6 * 8)(a0)
1632	ldq	s6, (7 * 8)(a0)
1633	ldq	sp, (8 * 8)(a0)
1634
1635	ldiq	v0, 1
1636	RET
1637
1638longjmp_botch:
1639	lda	a0, longjmp_botchmsg
1640	mov	ra, a1
1641	CALL(panic)
1642	call_pal PAL_bugchk
1643
1644	.data
1645longjmp_botchmsg:
1646	.asciz	"longjmp botch from %p"
1647	.text
1648END(longjmp)
1649
1650/*
1651 * void sts(int rn, u_int32_t *rval);
1652 * void stt(int rn, u_int64_t *rval);
1653 * void lds(int rn, u_int32_t *rval);
1654 * void ldt(int rn, u_int64_t *rval);
1655 */
1656
1657.macro	make_freg_util name, op
1658	LEAF(alpha_\name, 2)
1659	and	a0, 0x1f, a0
1660	s8addq	a0, pv, pv
1661	addq	pv, 1f - alpha_\name, pv
1662	jmp	(pv)
16631:
1664	rn = 0
1665	.rept	32
1666	\op	$f0 + rn, 0(a1)
1667	RET
1668	rn = rn + 1
1669	.endr
1670	END(alpha_\name)
1671.endm
1672/*
1673LEAF(alpha_sts, 2)
1674LEAF(alpha_stt, 2)
1675LEAF(alpha_lds, 2)
1676LEAF(alpha_ldt, 2)
1677 */
1678	make_freg_util sts, sts
1679	make_freg_util stt, stt
1680	make_freg_util lds, lds
1681	make_freg_util ldt, ldt
1682
1683LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16
1684	lda	sp, -framesz(sp)
1685	stt	$f30, f30save(sp)
1686	mf_fpcr	$f30
1687	stt	$f30, rettmp(sp)
1688	ldt	$f30, f30save(sp)
1689	ldq	v0, rettmp(sp)
1690	lda	sp, framesz(sp)
1691	RET
1692END(alpha_read_fpcr)
1693
1694LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16
1695	lda	sp, -framesz(sp)
1696	stq	a0, fpcrtmp(sp)
1697	stt	$f30, f30save(sp)
1698	ldt	$f30, fpcrtmp(sp)
1699	mt_fpcr	$f30
1700	ldt	$f30, f30save(sp)
1701	lda	sp, framesz(sp)
1702	RET
1703END(alpha_write_fpcr)
1704