xref: /netbsd/sys/arch/alpha/alpha/locore.s (revision 6550d01e)
1/* $NetBSD: locore.s,v 1.120 2010/07/07 01:17:49 chs Exp $ */
2
3/*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved.
36 *
37 * Author: Chris G. Demetriou
38 *
39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation.
44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 *
49 * Carnegie Mellon requests users of this software to return to
50 *
51 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
52 *  School of Computer Science
53 *  Carnegie Mellon University
54 *  Pittsburgh PA 15213-3890
55 *
56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes.
58 */
59
60.stabs	__FILE__,100,0,0,kernel_text
61
62#include "opt_ddb.h"
63#include "opt_kgdb.h"
64#include "opt_multiprocessor.h"
65#include "opt_lockdebug.h"
66#include "opt_compat_netbsd.h"
67
68#include <machine/asm.h>
69
70__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.120 2010/07/07 01:17:49 chs Exp $");
71
72#include "assym.h"
73
74.stabs	__FILE__,132,0,0,kernel_text
75
76/*
77 * Perform actions necessary to switch to a new context.  The
78 * hwpcb should be in a0.  Clobbers v0, t0, t8..t11, a0.
79 */
80#define	SWITCH_CONTEXT							\
81	/* Make a note of the context we're running on. */		\
82	GET_CURPCB						;	\
83	stq	a0, 0(v0)					;	\
84									\
85	/* Swap in the new context. */					\
86	call_pal PAL_OSF1_swpctx
87
88
89	/* don't reorder instructions; paranoia. */
90	.set noreorder
91	.text
92
93	.macro	bfalse	reg, dst
94	beq	\reg, \dst
95	.endm
96
97	.macro	btrue	reg, dst
98	bne	\reg, \dst
99	.endm
100
101/*
102 * This is for kvm_mkdb, and should be the address of the beginning
103 * of the kernel text segment (not necessarily the same as kernbase).
104 */
105	EXPORT(kernel_text)
106.loc	1 __LINE__
107kernel_text:
108
109/*
110 * bootstack: a temporary stack, for booting.
111 *
112 * Extends from 'start' down.
113 */
114bootstack:
115
116/*
117 * locorestart: Kernel start. This is no longer the actual entry
118 * point, although jumping to here (the first kernel address) will
119 * in fact work just fine.
120 *
121 * Arguments:
122 *	a0 is the first free page frame number (PFN)
123 *	a1 is the page table base register (PTBR)
124 *	a2 is the bootinfo magic number
125 *	a3 is the pointer to the bootinfo structure
126 *
127 * All arguments are passed to alpha_init().
128 */
129NESTED_NOPROFILE(locorestart,1,0,ra,0,0)
130	br	pv,1f
1311:	LDGP(pv)
132
133	/* Switch to the boot stack. */
134	lda	sp,bootstack
135
136	/* Load KGP with current GP. */
137	mov	a0, s0			/* save pfn */
138	mov	gp, a0
139	call_pal PAL_OSF1_wrkgp		/* clobbers a0, t0, t8-t11 */
140	mov	s0, a0			/* restore pfn */
141
142	/*
143	 * Call alpha_init() to do pre-main initialization.
144	 * alpha_init() gets the arguments we were called with,
145	 * which are already in a0, a1, a2, a3, and a4.
146	 */
147	CALL(alpha_init)
148
149	/* Set up the virtual page table pointer. */
150	ldiq	a0, VPTBASE
151	call_pal PAL_OSF1_wrvptptr	/* clobbers a0, t0, t8-t11 */
152
153	/*
154	 * Switch to lwp0's PCB.
155	 */
156	lda	a0, lwp0
157	ldq	a0, L_MD_PCBPADDR(a0)		/* phys addr of PCB */
158	SWITCH_CONTEXT
159
160	/*
161	 * We've switched to a new page table base, so invalidate the TLB
162	 * and I-stream.  This happens automatically everywhere but here.
163	 */
164	ldiq	a0, -2				/* TBIA */
165	call_pal PAL_OSF1_tbi
166	call_pal PAL_imb
167
168	/*
169	 * All ready to go!  Call main()!
170	 */
171	CALL(main)
172
173	/* This should never happen. */
174	PANIC("main() returned",Lmain_returned_pmsg)
175	END(locorestart)
176
177/**************************************************************************/
178
179/*
180 * Pull in the PROM interface routines; these are needed for
181 * prom printf (while bootstrapping), and for determining the
182 * boot device, etc.
183 */
184#include <alpha/alpha/prom_disp.s>
185
186/**************************************************************************/
187
188/*
189 * Pull in the PALcode function stubs.
190 */
191#include <alpha/alpha/pal.s>
192
193/**************************************************************************/
194
195/**************************************************************************/
196
197#if defined(MULTIPROCESSOR)
198/*
199 * Pull in the multiprocssor glue.
200 */
201#include <alpha/alpha/multiproc.s>
202#endif /* MULTIPROCESSOR */
203
204/**************************************************************************/
205
206/**************************************************************************/
207
208#if defined(DDB) || defined(KGDB)
209/*
210 * Pull in debugger glue.
211 */
212#include <alpha/alpha/debug.s>
213#endif /* DDB || KGDB */
214
215/**************************************************************************/
216
217/**************************************************************************/
218
219	.text
220.stabs	__FILE__,132,0,0,backtolocore1	/* done with includes */
221.loc	1 __LINE__
222backtolocore1:
223/**************************************************************************/
224
225#ifdef COMPAT_16
226/*
227 * Signal "trampoline" code.
228 *
229 * The kernel arranges for the handler to be invoked directly.  This
230 * trampoline is used only to return from the signal.
231 *
232 * The stack pointer points to the saved sigcontext.
233 */
234
235NESTED_NOPROFILE(sigcode,0,0,ra,0,0)
236	mov	sp, a0			/* get pointer to sigcontext */
237	CALLSYS_NOERROR(compat_16___sigreturn14)	/* and call sigreturn() with it. */
238	mov	v0, a0			/* if that failed, get error code */
239	CALLSYS_NOERROR(exit)		/* and call exit() with it. */
240XNESTED(esigcode,0)
241	END(sigcode)
242#endif /* COMPAT_16 */
243
244/**************************************************************************/
245
246/*
247 * exception_return: return from trap, exception, or syscall
248 */
249
250IMPORT(ssir, 8)
251
252LEAF(exception_return, 1)			/* XXX should be NESTED */
253	br	pv, 1f
2541:	LDGP(pv)
255
256	ldq	s1, (FRAME_PS * 8)(sp)		/* get the saved PS */
257	and	s1, ALPHA_PSL_IPL_MASK, t0	/* look at the saved IPL */
258	bne	t0, 5f				/* != 0: can't do AST or SIR */
259
260	/* see if we can do an SIR */
2612:	ldq	t1, ssir			/* SIR pending? */
262	bne	t1, 6f				/* yes */
263	/* no */
264
265	and	s1, ALPHA_PSL_USERMODE, t0	/* are we returning to user? */
266	beq	t0, 5f				/* no: just return */
267	/* yes */
268
269	/* GET_CPUINFO clobbers v0, t0, t8...t11. */
2703:	GET_CPUINFO
271
272	/* check for AST */
273	ldq	t1, CPU_INFO_CURLWP(v0)
274	ldl	t3, L_MD_ASTPENDING(t1)		/* AST pending? */
275	bne	t3, 7f				/* yes */
276	/* no: headed back to user space */
277
278	/* Enable the FPU based on whether the current proc is fpcurlwp. */
2794:	ldq	t2, CPU_INFO_FPCURLWP(v0)
280	cmpeq	t1, t2, t1
281	mov	zero, a0
282	cmovne	t1, 1, a0
283	call_pal PAL_OSF1_wrfen
284
285	/* restore the registers, and return */
2865:	bsr	ra, exception_restore_regs	/* jmp/CALL trashes pv/t12 */
287	ldq	ra,(FRAME_RA*8)(sp)
288	.set noat
289	ldq	at_reg,(FRAME_AT*8)(sp)
290
291	lda	sp,(FRAME_SW_SIZE*8)(sp)
292	call_pal PAL_OSF1_rti
293	.set at
294	/* NOTREACHED */
295
296	/* We've got a SIR */
2976:	ldiq	a0, ALPHA_PSL_IPL_SOFT
298	call_pal PAL_OSF1_swpipl
299	mov	v0, s2				/* remember old IPL */
300	CALL(softintr_dispatch)
301
302	/* SIR handled; restore IPL and check again */
303	mov	s2, a0
304	call_pal PAL_OSF1_swpipl
305	br	2b
306
307	/* We've got an AST */
3087:	stl	zero, L_MD_ASTPENDING(t1)	/* no AST pending */
309
310	ldiq	a0, ALPHA_PSL_IPL_0		/* drop IPL to zero */
311	call_pal PAL_OSF1_swpipl
312	mov	v0, s2				/* remember old IPL */
313
314	mov	sp, a0				/* only arg is frame */
315	CALL(ast)
316
317	/* AST handled; restore IPL and check again */
318	mov	s2, a0
319	call_pal PAL_OSF1_swpipl
320	br	3b
321
322	END(exception_return)
323
324LEAF(exception_save_regs, 0)
325	stq	v0,(FRAME_V0*8)(sp)
326	stq	a3,(FRAME_A3*8)(sp)
327	stq	a4,(FRAME_A4*8)(sp)
328	stq	a5,(FRAME_A5*8)(sp)
329	stq	s0,(FRAME_S0*8)(sp)
330	stq	s1,(FRAME_S1*8)(sp)
331	stq	s2,(FRAME_S2*8)(sp)
332	stq	s3,(FRAME_S3*8)(sp)
333	stq	s4,(FRAME_S4*8)(sp)
334	stq	s5,(FRAME_S5*8)(sp)
335	stq	s6,(FRAME_S6*8)(sp)
336	stq	t0,(FRAME_T0*8)(sp)
337	stq	t1,(FRAME_T1*8)(sp)
338	stq	t2,(FRAME_T2*8)(sp)
339	stq	t3,(FRAME_T3*8)(sp)
340	stq	t4,(FRAME_T4*8)(sp)
341	stq	t5,(FRAME_T5*8)(sp)
342	stq	t6,(FRAME_T6*8)(sp)
343	stq	t7,(FRAME_T7*8)(sp)
344	stq	t8,(FRAME_T8*8)(sp)
345	stq	t9,(FRAME_T9*8)(sp)
346	stq	t10,(FRAME_T10*8)(sp)
347	stq	t11,(FRAME_T11*8)(sp)
348	stq	t12,(FRAME_T12*8)(sp)
349	RET
350	END(exception_save_regs)
351
352LEAF(exception_restore_regs, 0)
353	ldq	v0,(FRAME_V0*8)(sp)
354	ldq	a3,(FRAME_A3*8)(sp)
355	ldq	a4,(FRAME_A4*8)(sp)
356	ldq	a5,(FRAME_A5*8)(sp)
357	ldq	s0,(FRAME_S0*8)(sp)
358	ldq	s1,(FRAME_S1*8)(sp)
359	ldq	s2,(FRAME_S2*8)(sp)
360	ldq	s3,(FRAME_S3*8)(sp)
361	ldq	s4,(FRAME_S4*8)(sp)
362	ldq	s5,(FRAME_S5*8)(sp)
363	ldq	s6,(FRAME_S6*8)(sp)
364	ldq	t0,(FRAME_T0*8)(sp)
365	ldq	t1,(FRAME_T1*8)(sp)
366	ldq	t2,(FRAME_T2*8)(sp)
367	ldq	t3,(FRAME_T3*8)(sp)
368	ldq	t4,(FRAME_T4*8)(sp)
369	ldq	t5,(FRAME_T5*8)(sp)
370	ldq	t6,(FRAME_T6*8)(sp)
371	ldq	t7,(FRAME_T7*8)(sp)
372	ldq	t8,(FRAME_T8*8)(sp)
373	ldq	t9,(FRAME_T9*8)(sp)
374	ldq	t10,(FRAME_T10*8)(sp)
375	ldq	t11,(FRAME_T11*8)(sp)
376	ldq	t12,(FRAME_T12*8)(sp)
377	RET
378	END(exception_restore_regs)
379
380/**************************************************************************/
381
382/*
383 * XentArith:
384 * System arithmetic trap entry point.
385 */
386
387	PALVECT(XentArith)		/* setup frame, save registers */
388
389	/* a0, a1, & a2 already set up */
390	ldiq	a3, ALPHA_KENTRY_ARITH
391	mov	sp, a4			; .loc 1 __LINE__
392	CALL(trap)
393
394	jmp	zero, exception_return
395	END(XentArith)
396
397/**************************************************************************/
398
399/*
400 * XentIF:
401 * System instruction fault trap entry point.
402 */
403
404	PALVECT(XentIF)			/* setup frame, save registers */
405
406	/* a0, a1, & a2 already set up */
407	ldiq	a3, ALPHA_KENTRY_IF
408	mov	sp, a4			; .loc 1 __LINE__
409	CALL(trap)
410	jmp	zero, exception_return
411	END(XentIF)
412
413/**************************************************************************/
414
415/*
416 * XentInt:
417 * System interrupt entry point.
418 */
419
420	PALVECT(XentInt)		/* setup frame, save registers */
421
422	/* a0, a1, & a2 already set up */
423	mov	sp, a3			; .loc 1 __LINE__
424	CALL(interrupt)
425	jmp	zero, exception_return
426	END(XentInt)
427
428/**************************************************************************/
429
430/*
431 * XentMM:
432 * System memory management fault entry point.
433 */
434
435	PALVECT(XentMM)			/* setup frame, save registers */
436
437	/* a0, a1, & a2 already set up */
438	ldiq	a3, ALPHA_KENTRY_MM
439	mov	sp, a4			; .loc 1 __LINE__
440	CALL(trap)
441
442	jmp	zero, exception_return
443	END(XentMM)
444
445/**************************************************************************/
446
447/*
448 * XentSys:
449 * System call entry point.
450 */
451
452	ESETUP(XentSys)			; .loc 1 __LINE__
453
454	stq	v0,(FRAME_V0*8)(sp)		/* in case we need to restart */
455	stq	s0,(FRAME_S0*8)(sp)
456	stq	s1,(FRAME_S1*8)(sp)
457	stq	s2,(FRAME_S2*8)(sp)
458	stq	s3,(FRAME_S3*8)(sp)
459	stq	s4,(FRAME_S4*8)(sp)
460	stq	s5,(FRAME_S5*8)(sp)
461	stq	s6,(FRAME_S6*8)(sp)
462	stq	a0,(FRAME_A0*8)(sp)
463	stq	a1,(FRAME_A1*8)(sp)
464	stq	a2,(FRAME_A2*8)(sp)
465	stq	a3,(FRAME_A3*8)(sp)
466	stq	a4,(FRAME_A4*8)(sp)
467	stq	a5,(FRAME_A5*8)(sp)
468	stq	ra,(FRAME_RA*8)(sp)
469
470	/* syscall number, passed in v0, is first arg, frame pointer second */
471	mov	v0,a1
472	GET_CURLWP
473	ldq	a0,0(v0)
474	mov	sp,a2			; .loc 1 __LINE__
475	ldq	t11,L_PROC(a0)
476	ldq	t12,P_MD_SYSCALL(t11)
477	CALL((t12))
478
479	jmp	zero, exception_return
480	END(XentSys)
481
482/**************************************************************************/
483
484/*
485 * XentUna:
486 * System unaligned access entry point.
487 */
488
489LEAF(XentUna, 3)				/* XXX should be NESTED */
490	.set noat
491	lda	sp,-(FRAME_SW_SIZE*8)(sp)
492	stq	at_reg,(FRAME_AT*8)(sp)
493	.set at
494	stq	ra,(FRAME_RA*8)(sp)
495	bsr	ra, exception_save_regs		/* jmp/CALL trashes pv/t12 */
496
497	/* a0, a1, & a2 already set up */
498	ldiq	a3, ALPHA_KENTRY_UNA
499	mov	sp, a4			; .loc 1 __LINE__
500	CALL(trap)
501
502	jmp	zero, exception_return
503	END(XentUna)
504
505/**************************************************************************/
506
507/*
508 * savefpstate: Save a process's floating point state.
509 *
510 * Arguments:
511 *	a0	'struct fpstate *' to save into
512 */
513
514LEAF(savefpstate, 1)
515	LDGP(pv)
516	/* save all of the FP registers */
517	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
518	stt	$f0,   (0 * 8)(t1)	/* save first register, using hw name */
519	stt	$f1,   (1 * 8)(t1)	/* etc. */
520	stt	$f2,   (2 * 8)(t1)
521	stt	$f3,   (3 * 8)(t1)
522	stt	$f4,   (4 * 8)(t1)
523	stt	$f5,   (5 * 8)(t1)
524	stt	$f6,   (6 * 8)(t1)
525	stt	$f7,   (7 * 8)(t1)
526	stt	$f8,   (8 * 8)(t1)
527	stt	$f9,   (9 * 8)(t1)
528	stt	$f10, (10 * 8)(t1)
529	stt	$f11, (11 * 8)(t1)
530	stt	$f12, (12 * 8)(t1)
531	stt	$f13, (13 * 8)(t1)
532	stt	$f14, (14 * 8)(t1)
533	stt	$f15, (15 * 8)(t1)
534	stt	$f16, (16 * 8)(t1)
535	stt	$f17, (17 * 8)(t1)
536	stt	$f18, (18 * 8)(t1)
537	stt	$f19, (19 * 8)(t1)
538	stt	$f20, (20 * 8)(t1)
539	stt	$f21, (21 * 8)(t1)
540	stt	$f22, (22 * 8)(t1)
541	stt	$f23, (23 * 8)(t1)
542	stt	$f24, (24 * 8)(t1)
543	stt	$f25, (25 * 8)(t1)
544	stt	$f26, (26 * 8)(t1)
545	stt	$f27, (27 * 8)(t1)
546	.set noat
547	stt	$f28, (28 * 8)(t1)
548	.set at
549	stt	$f29, (29 * 8)(t1)
550	stt	$f30, (30 * 8)(t1)
551
552	/*
553	 * Then save the FPCR; note that the necessary 'trapb's are taken
554	 * care of on kernel entry and exit.
555	 */
556	mf_fpcr	ft0
557	stt	ft0, FPREG_FPR_CR(a0)	/* store to FPCR save area */
558
559	RET
560	END(savefpstate)
561
562/**************************************************************************/
563
564/*
565 * restorefpstate: Restore a process's floating point state.
566 *
567 * Arguments:
568 *	a0	'struct fpstate *' to restore from
569 */
570
571LEAF(restorefpstate, 1)
572	LDGP(pv)
573	/*
574	 * Restore the FPCR; note that the necessary 'trapb's are taken care of
575	 * on kernel entry and exit.
576	 */
577	ldt	ft0, FPREG_FPR_CR(a0)	/* load from FPCR save area */
578	mt_fpcr	ft0
579
580	/* Restore all of the FP registers. */
581	lda	t1, FPREG_FPR_REGS(a0)	/* get address of FP reg. save area */
582	ldt	$f0,   (0 * 8)(t1)	/* restore first reg., using hw name */
583	ldt	$f1,   (1 * 8)(t1)	/* etc. */
584	ldt	$f2,   (2 * 8)(t1)
585	ldt	$f3,   (3 * 8)(t1)
586	ldt	$f4,   (4 * 8)(t1)
587	ldt	$f5,   (5 * 8)(t1)
588	ldt	$f6,   (6 * 8)(t1)
589	ldt	$f7,   (7 * 8)(t1)
590	ldt	$f8,   (8 * 8)(t1)
591	ldt	$f9,   (9 * 8)(t1)
592	ldt	$f10, (10 * 8)(t1)
593	ldt	$f11, (11 * 8)(t1)
594	ldt	$f12, (12 * 8)(t1)
595	ldt	$f13, (13 * 8)(t1)
596	ldt	$f14, (14 * 8)(t1)
597	ldt	$f15, (15 * 8)(t1)
598	ldt	$f16, (16 * 8)(t1)
599	ldt	$f17, (17 * 8)(t1)
600	ldt	$f18, (18 * 8)(t1)
601	ldt	$f19, (19 * 8)(t1)
602	ldt	$f20, (20 * 8)(t1)
603	ldt	$f21, (21 * 8)(t1)
604	ldt	$f22, (22 * 8)(t1)
605	ldt	$f23, (23 * 8)(t1)
606	ldt	$f24, (24 * 8)(t1)
607	ldt	$f25, (25 * 8)(t1)
608	ldt	$f26, (26 * 8)(t1)
609	ldt	$f27, (27 * 8)(t1)
610	ldt	$f28, (28 * 8)(t1)
611	ldt	$f29, (29 * 8)(t1)
612	ldt	$f30, (30 * 8)(t1)
613
614	RET
615	END(restorefpstate)
616
617/**************************************************************************/
618
619/*
620 * savectx: save process context, i.e. callee-saved registers
621 *
622 * Note that savectx() only works for processes other than curlwp,
623 * since cpu_switchto will copy over the info saved here.  (It _can_
624 * sanely be used for curlwp iff cpu_switchto won't be called again, e.g.
625 * if called from boot().)
626 *
627 * Arguments:
628 *	a0	'struct pcb *' of the process that needs its context saved
629 *
630 * Return:
631 *	v0	0.  (note that for child processes, it seems
632 *		like savectx() returns 1, because the return address
633 *		in the PCB is set to the return address from savectx().)
634 */
635
636LEAF(savectx, 1)
637	br	pv, 1f
6381:	LDGP(pv)
639	stq	sp, PCB_HWPCB_KSP(a0)		/* store sp */
640	stq	s0, PCB_CONTEXT+(0 * 8)(a0)	/* store s0 - s6 */
641	stq	s1, PCB_CONTEXT+(1 * 8)(a0)
642	stq	s2, PCB_CONTEXT+(2 * 8)(a0)
643	stq	s3, PCB_CONTEXT+(3 * 8)(a0)
644	stq	s4, PCB_CONTEXT+(4 * 8)(a0)
645	stq	s5, PCB_CONTEXT+(5 * 8)(a0)
646	stq	s6, PCB_CONTEXT+(6 * 8)(a0)
647	stq	ra, PCB_CONTEXT+(7 * 8)(a0)	/* store ra */
648	call_pal PAL_OSF1_rdps			/* NOTE: doesn't kill a0 */
649	stq	v0, PCB_CONTEXT+(8 * 8)(a0)	/* store ps, for ipl */
650
651	mov	zero, v0
652	RET
653	END(savectx)
654
655/**************************************************************************/
656
657
658/*
659 * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next)
660 * Switch to the specified next LWP
661 * Arguments:
662 *	a0	'struct lwp *' of the LWP to switch from
663 *	a1	'struct lwp *' of the LWP to switch to
664 */
665LEAF(cpu_switchto, 0)
666	LDGP(pv)
667
668	beq	a0, 1f
669
670	/*
671	 * do an inline savectx(), to save old context
672	 */
673	ldq	a2, L_PCB(a0)
674	/* NOTE: ksp is stored by the swpctx */
675	stq	s0, PCB_CONTEXT+(0 * 8)(a2)	/* store s0 - s6 */
676	stq	s1, PCB_CONTEXT+(1 * 8)(a2)
677	stq	s2, PCB_CONTEXT+(2 * 8)(a2)
678	stq	s3, PCB_CONTEXT+(3 * 8)(a2)
679	stq	s4, PCB_CONTEXT+(4 * 8)(a2)
680	stq	s5, PCB_CONTEXT+(5 * 8)(a2)
681	stq	s6, PCB_CONTEXT+(6 * 8)(a2)
682	stq	ra, PCB_CONTEXT+(7 * 8)(a2)	/* store ra */
683
6841:
685	mov	a0, s4				/* save old curlwp */
686	mov	a1, s2				/* save new lwp */
687	ldq	a0, L_MD_PCBPADDR(s2)		/* save new pcbpaddr */
688
689	SWITCH_CONTEXT				/* swap the context */
690
691	GET_CPUINFO
692	stq	s2, CPU_INFO_CURLWP(v0)		/* curlwp = l */
693
694	/*
695	 * Now running on the new PCB.
696	 */
697	ldq	s0, L_PCB(s2)
698
699	/*
700	 * Check for restartable atomic sequences (RAS).
701	 */
702	ldq	a0, L_PROC(s2)			/* first ras_lookup() arg */
703	ldq	t0, P_RASLIST(a0)		/* any RAS entries? */
704	beq	t0, 1f				/* no, skip */
705	ldq	s1, L_MD_TF(s2)			/* s1 = l->l_md.md_tf */
706	ldq	a1, (FRAME_PC*8)(s1)		/* second ras_lookup() arg */
707	CALL(ras_lookup)			/* ras_lookup(p, PC) */
708	addq	v0, 1, t0			/* -1 means "not in ras" */
709	beq	t0, 1f
710	stq	v0, (FRAME_PC*8)(s1)
711
7121:
713	mov	s4, v0				/* return the old lwp */
714	/*
715	 * Restore registers and return.
716	 * NOTE: ksp is restored by the swpctx.
717	 */
718	ldq	s1, PCB_CONTEXT+(1 * 8)(s0)		/* restore s1-s6 */
719	ldq	s2, PCB_CONTEXT+(2 * 8)(s0)
720	ldq	s3, PCB_CONTEXT+(3 * 8)(s0)
721	ldq	s4, PCB_CONTEXT+(4 * 8)(s0)
722	ldq	s5, PCB_CONTEXT+(5 * 8)(s0)
723	ldq	s6, PCB_CONTEXT+(6 * 8)(s0)
724	ldq	ra, PCB_CONTEXT+(7 * 8)(s0)		/* restore ra */
725	ldq	s0, PCB_CONTEXT+(0 * 8)(s0)		/* restore s0 */
726
727	RET
728	END(cpu_switchto)
729
730/*
731 * lwp_trampoline()
732 *
733 * Arrange for a function to be invoked neatly, after a cpu_lwp_fork().
734 *
735 * Invokes the function specified by the s0 register with the return
736 * address specified by the s1 register and with one argument specified
737 * by the s2 register.
738 */
739LEAF_NOPROFILE(lwp_trampoline, 0)
740	mov	v0, a0
741	mov	s3, a1
742	CALL(lwp_startup)
743	mov	s0, pv
744	mov	s1, ra
745	mov	s2, a0
746	jmp	zero, (pv)
747	END(lwp_trampoline)
748
749/*
750 * Simplified version of above: don't call lwp_startup()
751 */
752LEAF_NOPROFILE(setfunc_trampoline, 0)
753	mov	s0, pv
754	mov	s1, ra
755	mov	s2, a0
756	jmp	zero, (pv)
757	END(setfunc_trampoline)
758
759/**************************************************************************/
760
761/*
762 * Copy a null-terminated string within the kernel's address space.
763 * If lenp is not NULL, store the number of chars copied in *lenp
764 *
765 * int copystr(char *from, char *to, size_t len, size_t *lenp);
766 */
767LEAF(copystr, 4)
768	LDGP(pv)
769
770	mov	a2, t0			/* t0 = i = len */
771	bne	a2, 1f			/* if (len != 0), proceed */
772	ldiq	t1, 1			/* else bail */
773	br	zero, 2f
774
7751:	ldq_u	t1, 0(a0)		/* t1 = *from */
776	extbl	t1, a0, t1
777	ldq_u	t3, 0(a1)		/* set up t2 with quad around *to */
778	insbl	t1, a1, t2
779	mskbl	t3, a1, t3
780	or	t3, t2, t3		/* add *from to quad around *to */
781	stq_u	t3, 0(a1)		/* write out that quad */
782
783	subl	a2, 1, a2		/* len-- */
784	beq	t1, 2f			/* if (*from == 0), bail out */
785	addq	a1, 1, a1		/* to++ */
786	addq	a0, 1, a0		/* from++ */
787	bne	a2, 1b			/* if (len != 0) copy more */
788
7892:	beq	a3, 3f			/* if (lenp != NULL) */
790	subl	t0, a2, t0		/* *lenp = (i - len) */
791	stq	t0, 0(a3)
7923:	beq	t1, 4f			/* *from == '\0'; leave quietly */
793
794	ldiq	v0, ENAMETOOLONG	/* *from != '\0'; error. */
795	RET
796
7974:	mov	zero, v0		/* return 0. */
798	RET
799	END(copystr)
800
801NESTED(copyinstr, 4, 16, ra, IM_RA|IM_S0, 0)
802	LDGP(pv)
803	lda	sp, -16(sp)			/* set up stack frame	     */
804	stq	ra, (16-8)(sp)			/* save ra		     */
805	stq	s0, (16-16)(sp)			/* save s0		     */
806	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
807	cmpult	a0, t0, t1			/* is in user space.	     */
808	beq	t1, copyerr_efault		/* if it's not, error out.   */
809	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
810	GET_CURLWP
811	mov	v0, s0
812	lda	v0, copyerr			/* set up fault handler.     */
813	.set noat
814	ldq	at_reg, 0(s0)
815	ldq	at_reg, L_PCB(at_reg)
816	stq	v0, PCB_ONFAULT(at_reg)
817	.set at
818	CALL(copystr)				/* do the copy.		     */
819	.set noat
820	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
821	ldq	at_reg, L_PCB(at_reg)
822	stq	zero, PCB_ONFAULT(at_reg)
823	.set at
824	ldq	ra, (16-8)(sp)			/* restore ra.		     */
825	ldq	s0, (16-16)(sp)			/* restore s0.		     */
826	lda	sp, 16(sp)			/* kill stack frame.	     */
827	RET					/* v0 left over from copystr */
828	END(copyinstr)
829
830NESTED(copyoutstr, 4, 16, ra, IM_RA|IM_S0, 0)
831	LDGP(pv)
832	lda	sp, -16(sp)			/* set up stack frame	     */
833	stq	ra, (16-8)(sp)			/* save ra		     */
834	stq	s0, (16-16)(sp)			/* save s0		     */
835	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
836	cmpult	a1, t0, t1			/* is in user space.	     */
837	beq	t1, copyerr_efault		/* if it's not, error out.   */
838	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
839	GET_CURLWP
840	mov	v0, s0
841	lda	v0, copyerr			/* set up fault handler.     */
842	.set noat
843	ldq	at_reg, 0(s0)
844	ldq	at_reg, L_PCB(at_reg)
845	stq	v0, PCB_ONFAULT(at_reg)
846	.set at
847	CALL(copystr)				/* do the copy.		     */
848	.set noat
849	ldq	at_reg, 0(s0)			/* kill the fault handler.   */
850	ldq	at_reg, L_PCB(at_reg)
851	stq	zero, PCB_ONFAULT(at_reg)
852	.set at
853	ldq	ra, (16-8)(sp)			/* restore ra.		     */
854	ldq	s0, (16-16)(sp)			/* restore s0.		     */
855	lda	sp, 16(sp)			/* kill stack frame.	     */
856	RET					/* v0 left over from copystr */
857	END(copyoutstr)
858
859/*
860 * kcopy(const void *src, void *dst, size_t len);
861 *
862 * Copy len bytes from src to dst, aborting if we encounter a fatal
863 * page fault.
864 *
865 * kcopy() _must_ save and restore the old fault handler since it is
866 * called by uiomove(), which may be in the path of servicing a non-fatal
867 * page fault.
868 */
869NESTED(kcopy, 3, 32, ra, IM_RA|IM_S0|IM_S1, 0)
870	LDGP(pv)
871	lda	sp, -32(sp)			/* set up stack frame	     */
872	stq	ra, (32-8)(sp)			/* save ra		     */
873	stq	s0, (32-16)(sp)			/* save s0		     */
874	stq	s1, (32-24)(sp)			/* save s1		     */
875	/* Swap a0, a1, for call to memcpy(). */
876	mov	a1, v0
877	mov	a0, a1
878	mov	v0, a0
879	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
880	GET_CURLWP
881	ldq	s1, 0(v0)			/* s1 = curlwp		     */
882	lda	v0, kcopyerr			/* set up fault handler.     */
883	.set noat
884	ldq	at_reg, L_PCB(s1)
885	ldq	s0, PCB_ONFAULT(at_reg)	/* save old handler.	     */
886	stq	v0, PCB_ONFAULT(at_reg)
887	.set at
888	CALL(memcpy)				/* do the copy.		     */
889	.set noat
890	ldq	at_reg, L_PCB(s1)		/* restore the old handler.  */
891	stq	s0, PCB_ONFAULT(at_reg)
892	.set at
893	ldq	ra, (32-8)(sp)			/* restore ra.		     */
894	ldq	s0, (32-16)(sp)			/* restore s0.		     */
895	ldq	s1, (32-24)(sp)			/* restore s1.		     */
896	lda	sp, 32(sp)			/* kill stack frame.	     */
897	mov	zero, v0			/* return 0. */
898	RET
899	END(kcopy)
900
901LEAF(kcopyerr, 0)
902	LDGP(pv)
903	.set noat
904	ldq	at_reg, L_PCB(s1)		/* restore the old handler.  */
905	stq	s0, PCB_ONFAULT(at_reg)
906	.set at
907	ldq	ra, (32-8)(sp)			/* restore ra.		     */
908	ldq	s0, (32-16)(sp)			/* restore s0.		     */
909	ldq	s1, (32-24)(sp)			/* restore s1.		     */
910	lda	sp, 32(sp)			/* kill stack frame.	     */
911	RET
912END(kcopyerr)
913
914NESTED(copyin, 3, 16, ra, IM_RA|IM_S0, 0)
915	LDGP(pv)
916	lda	sp, -16(sp)			/* set up stack frame	     */
917	stq	ra, (16-8)(sp)			/* save ra		     */
918	stq	s0, (16-16)(sp)			/* save s0		     */
919	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
920	cmpult	a0, t0, t1			/* is in user space.	     */
921	beq	t1, copyerr_efault		/* if it's not, error out.   */
922	/* Swap a0, a1, for call to memcpy(). */
923	mov	a1, v0
924	mov	a0, a1
925	mov	v0, a0
926	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
927	GET_CURLWP
928	ldq	s0, 0(v0)			/* s0 = curlwp		     */
929	lda	v0, copyerr			/* set up fault handler.     */
930	.set noat
931	ldq	at_reg, L_PCB(s0)
932	stq	v0, PCB_ONFAULT(at_reg)
933	.set at
934	CALL(memcpy)				/* do the copy.		     */
935	.set noat
936	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
937	stq	zero, PCB_ONFAULT(at_reg)
938	.set at
939	ldq	ra, (16-8)(sp)			/* restore ra.		     */
940	ldq	s0, (16-16)(sp)			/* restore s0.		     */
941	lda	sp, 16(sp)			/* kill stack frame.	     */
942	mov	zero, v0			/* return 0. */
943	RET
944	END(copyin)
945
946NESTED(copyout, 3, 16, ra, IM_RA|IM_S0, 0)
947	LDGP(pv)
948	lda	sp, -16(sp)			/* set up stack frame	     */
949	stq	ra, (16-8)(sp)			/* save ra		     */
950	stq	s0, (16-16)(sp)			/* save s0		     */
951	ldiq	t0, VM_MAX_ADDRESS		/* make sure that dest addr  */
952	cmpult	a1, t0, t1			/* is in user space.	     */
953	beq	t1, copyerr_efault		/* if it's not, error out.   */
954	/* Swap a0, a1, for call to memcpy(). */
955	mov	a1, v0
956	mov	a0, a1
957	mov	v0, a0
958	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
959	GET_CURLWP
960	ldq	s0, 0(v0)			/* s0 = curlwp		     */
961	lda	v0, copyerr			/* set up fault handler.     */
962	.set noat
963	ldq	at_reg, L_PCB(s0)
964	stq	v0, PCB_ONFAULT(at_reg)
965	.set at
966	CALL(memcpy)				/* do the copy.		     */
967	.set noat
968	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
969	stq	zero, PCB_ONFAULT(at_reg)
970	.set at
971	ldq	ra, (16-8)(sp)			/* restore ra.		     */
972	ldq	s0, (16-16)(sp)			/* restore s0.		     */
973	lda	sp, 16(sp)			/* kill stack frame.	     */
974	mov	zero, v0			/* return 0. */
975	RET
976	END(copyout)
977
978LEAF(copyerr_efault, 0)
979	ldiq	v0, EFAULT			/* return EFAULT.	     */
980XLEAF(copyerr, 0)
981	LDGP(pv)
982	ldq	ra, (16-8)(sp)			/* restore ra.		     */
983	ldq	s0, (16-16)(sp)			/* restore s0.		     */
984	lda	sp, 16(sp)			/* kill stack frame.	     */
985	RET
986END(copyerr)
987
988/**************************************************************************/
989
990/*
991 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
992 * user text space.
993 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
994 * user data space.
995 */
996LEAF(fuword, 1)
997XLEAF(fuiword, 1)
998	LDGP(pv)
999	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1000	cmpult	a0, t0, t1			/* is in user space. */
1001	beq	t1, fswberr			/* if it's not, error out. */
1002	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1003	GET_CURLWP
1004	ldq	t1, 0(v0)
1005	lda	t0, fswberr
1006	.set noat
1007	ldq	at_reg, L_PCB(t1)
1008	stq	t0, PCB_ONFAULT(at_reg)
1009	.set at
1010	ldq	v0, 0(a0)
1011	zap	v0, 0xf0, v0
1012	.set noat
1013	ldq	at_reg, L_PCB(t1)
1014	stq	zero, PCB_ONFAULT(at_reg)
1015	.set at
1016	RET
1017	END(fuword)
1018
1019LEAF(fusword, 1)
1020XLEAF(fuisword, 1)
1021	LDGP(pv)
1022	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1023	cmpult	a0, t0, t1			/* is in user space. */
1024	beq	t1, fswberr			/* if it's not, error out. */
1025	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1026	GET_CURLWP
1027	ldq	t1, 0(v0)
1028	lda	t0, fswberr
1029	.set noat
1030	ldq	at_reg, L_PCB(t1)
1031	stq	t0, PCB_ONFAULT(at_reg)
1032	.set at
1033	/* XXX FETCH IT */
1034	.set noat
1035	ldq	at_reg, L_PCB(t1)
1036	stq	zero, PCB_ONFAULT(at_reg)
1037	.set at
1038	RET
1039	END(fusword)
1040
1041LEAF(fubyte, 1)
1042XLEAF(fuibyte, 1)
1043	LDGP(pv)
1044	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1045	cmpult	a0, t0, t1			/* is in user space. */
1046	beq	t1, fswberr			/* if it's not, error out. */
1047	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1048	GET_CURLWP
1049	ldq	t1, 0(v0)
1050	lda	t0, fswberr
1051	.set noat
1052	ldq	at_reg, L_PCB(t1)
1053	stq	t0, PCB_ONFAULT(at_reg)
1054	.set at
1055	/* XXX FETCH IT */
1056	.set noat
1057	ldq	at_reg, L_PCB(t1)
1058	stq	zero, PCB_ONFAULT(at_reg)
1059	.set at
1060	RET
1061	END(fubyte)
1062
1063LEAF(suword, 2)
1064	LDGP(pv)
1065	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1066	cmpult	a0, t0, t1			/* is in user space. */
1067	beq	t1, fswberr			/* if it's not, error out. */
1068	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1069	GET_CURLWP
1070	ldq	t1, 0(v0)
1071	lda	t0, fswberr
1072	.set noat
1073	ldq	at_reg, L_PCB(t1)
1074	stq	t0, PCB_ONFAULT(at_reg)
1075	.set at
1076	stq	a1, 0(a0)			/* do the store. */
1077	.set noat
1078	ldq	at_reg, L_PCB(t1)
1079	stq	zero, PCB_ONFAULT(at_reg)
1080	.set at
1081	mov	zero, v0
1082	RET
1083	END(suword)
1084
1085#ifdef notdef
1086LEAF(suiword, 2)
1087	LDGP(pv)
1088	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1089	cmpult	a0, t0, t1			/* is in user space. */
1090	beq	t1, fswberr			/* if it's not, error out. */
1091	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1092	GET_CURLWP
1093	ldq	t1, 0(v0)
1094	lda	t0, fswberr
1095	.set noat
1096	ldq	at_reg, L_PCB(t1)
1097	stq	t0, PCB_ONFAULT(at_reg)
1098	.set at
1099	/* XXX STORE IT */
1100	.set noat
1101	ldq	at_reg, L_PCB(t1)
1102	stq	zero, PCB_ONFAULT(at_reg)
1103	.set at
1104	call_pal PAL_OSF1_imb			/* sync instruction stream */
1105	mov	zero, v0
1106	RET
1107	END(suiword)
1108
1109LEAF(susword, 2)
1110	LDGP(pv)
1111	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1112	cmpult	a0, t0, t1			/* is in user space. */
1113	beq	t1, fswberr			/* if it's not, error out. */
1114	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1115	GET_CURLWP
1116	ldq	t1, 0(v0)
1117	lda	t0, fswberr
1118	.set noat
1119	ldq	at_reg, L_PCB(t1)
1120	stq	t0, PCB_ONFAULT(at_reg)
1121	.set at
1122	/* XXX STORE IT */
1123	.set noat
1124	ldq	at_reg, L_PCB(t1)
1125	stq	zero, PCB_ONFAULT(at_reg)
1126	.set at
1127	mov	zero, v0
1128	RET
1129	END(susword)
1130
1131LEAF(suisword, 2)
1132	LDGP(pv)
1133	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1134	cmpult	a0, t0, t1			/* is in user space. */
1135	beq	t1, fswberr			/* if it's not, error out. */
1136	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1137	GET_CURLWP
1138	ldq	t1, 0(v0)
1139	lda	t0, fswberr
1140	.set noat
1141	ldq	at_reg, L_PCB(t1)
1142	stq	t0, PCB_ONFAULT(at_reg)
1143	.set at
1144	/* XXX STORE IT */
1145	.set noat
1146	ldq	at_reg, L_PCB(t1)
1147	stq	zero, PCB_ONFAULT(at_reg)
1148	.set at
1149	call_pal PAL_OSF1_imb			/* sync instruction stream */
1150	mov	zero, v0
1151	RET
1152	END(suisword)
1153#endif /* notdef */
1154
1155LEAF(subyte, 2)
1156	LDGP(pv)
1157	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1158	cmpult	a0, t0, t1			/* is in user space. */
1159	beq	t1, fswberr			/* if it's not, error out. */
1160	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1161	GET_CURLWP
1162	ldq	t1, 0(v0)
1163	lda	t0, fswberr
1164	.set noat
1165	ldq	at_reg, L_PCB(t1)
1166	stq	t0, PCB_ONFAULT(at_reg)
1167	.set at
1168	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1169	insbl	a1, a0, a1			/* move it to the right byte */
1170	ldq_u	t0, 0(a0)			/* load quad around byte */
1171	mskbl	t0, a0, t0			/* kill the target byte */
1172	or	t0, a1, a1			/* put the result together */
1173	stq_u	a1, 0(a0)			/* and store it. */
1174	.set noat
1175	ldq	at_reg, L_PCB(t1)
1176	stq	zero, PCB_ONFAULT(at_reg)
1177	.set at
1178	mov	zero, v0
1179	RET
1180	END(subyte)
1181
1182LEAF(suibyte, 2)
1183	LDGP(pv)
1184	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1185	cmpult	a0, t0, t1			/* is in user space. */
1186	beq	t1, fswberr			/* if it's not, error out. */
1187	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1188	GET_CURLWP
1189	ldq	t1, 0(v0)
1190	lda	t0, fswberr
1191	.set noat
1192	ldq	at_reg, L_PCB(t1)
1193	stq	t0, PCB_ONFAULT(at_reg)
1194	.set at
1195	zap	a1, 0xfe, a1			/* kill arg's high bytes */
1196	insbl	a1, a0, a1			/* move it to the right byte */
1197	ldq_u	t0, 0(a0)			/* load quad around byte */
1198	mskbl	t0, a0, t0			/* kill the target byte */
1199	or	t0, a1, a1			/* put the result together */
1200	stq_u	a1, 0(a0)			/* and store it. */
1201	.set noat
1202	ldq	at_reg, L_PCB(t1)
1203	stq	zero, PCB_ONFAULT(at_reg)
1204	.set at
1205	call_pal PAL_OSF1_imb			/* sync instruction stream */
1206	mov	zero, v0
1207	RET
1208	END(suibyte)
1209
1210LEAF(fswberr, 0)
1211	LDGP(pv)
1212	ldiq	v0, -1
1213	RET
1214	END(fswberr)
1215
1216/**************************************************************************/
1217
1218#ifdef notdef
1219/*
1220 * fuswintr and suswintr are just like fusword and susword except that if
1221 * the page is not in memory or would cause a trap, then we return an error.
1222 * The important thing is to prevent sleep() and switch().
1223 */
1224
1225LEAF(fuswintr, 2)
1226	LDGP(pv)
1227	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1228	cmpult	a0, t0, t1			/* is in user space. */
1229	beq	t1, fswintrberr			/* if it's not, error out. */
1230	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1231	GET_CURLWP
1232	ldq	t1, 0(v0)
1233	lda	t0, fswintrberr
1234	.set noat
1235	ldq	at_reg, L_PCB(t1)
1236	stq	t0, PCB_ONFAULT(at_reg)
1237	stq	a0, PCB_ACCESSADDR(at_reg)
1238	.set at
1239	/* XXX FETCH IT */
1240	.set noat
1241	ldq	at_reg, L_PCB(t1)
1242	stq	zero, PCB_ONFAULT(at_reg)
1243	.set at
1244	RET
1245	END(fuswintr)
1246
1247LEAF(suswintr, 2)
1248	LDGP(pv)
1249	ldiq	t0, VM_MAX_ADDRESS		/* make sure that addr */
1250	cmpult	a0, t0, t1			/* is in user space. */
1251	beq	t1, fswintrberr			/* if it's not, error out. */
1252	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1253	GET_CURLWP
1254	ldq	t1, 0(v0)
1255	lda	t0, fswintrberr
1256	.set noat
1257	ldq	at_reg, L_PCB(t1)
1258	stq	t0, PCB_ONFAULT(at_reg)
1259	stq	a0, PCB_ACCESSADDR(at_reg)
1260	.set at
1261	/* XXX STORE IT */
1262	.set noat
1263	ldq	at_reg, L_PCB(t1)
1264	stq	zero, PCB_ONFAULT(at_reg)
1265	.set at
1266	mov	zero, v0
1267	RET
1268	END(suswintr)
1269#endif
1270
1271LEAF(fswintrberr, 0)
1272XLEAF(fuswintr, 2)				/* XXX what is a 'word'? */
1273XLEAF(suswintr, 2)				/* XXX what is a 'word'? */
1274	LDGP(pv)
1275	ldiq	v0, -1
1276	RET
1277	END(fswberr)
1278
1279/*
1280 * int ucas_32(volatile int32_t *uptr, int32_t old, int32_t new, int32_t *ret);
1281 */
1282
1283NESTED(ucas_32, 4, 16, ra, IM_S0 | IM_RA, 0)
1284	LDGP(pv)
1285	lda	sp, -16(sp)			/* set up stack frame	     */
1286	stq	ra, (16-8)(sp)			/* save ra		     */
1287	stq	s0, (16-16)(sp)			/* save s0		     */
1288	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1289	cmpult	a0, t0, t1			/* is in user space.	     */
1290	beq	t1, copyerr_efault		/* if it's not, error out.   */
1291	and	a0, 3, t1			/* check if addr is aligned. */
1292	bne	t1, copyerr_efault		/* if it's not, error out.   */
1293	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1294	GET_CURLWP
1295	ldq	s0, 0(v0)			/* s0 = curlwp		     */
1296	lda	v0, copyerr			/* set up fault handler.     */
1297	.set noat
1298	ldq	at_reg, L_PCB(s0)
1299	stq	v0, PCB_ONFAULT(at_reg)
1300	.set at
1301
13023:
1303	ldl_l	t0, 0(a0)			/* t0 = *uptr */
1304	cmpeq	t0, a1, t1			/* does t0 = old? */
1305	beq	t1, 1f				/* if not, skip */
1306	mov	a2, t1
1307	stl_c	t1, 0(a0)			/* *uptr ~= new */
1308	beq	t1, 2f				/* did it work? */
13091:
1310	stl	t0, 0(a3)			/* *ret = t0 */
1311	mov	zero, v0
1312
1313	.set noat
1314	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
1315	stq	zero, PCB_ONFAULT(at_reg)
1316	.set at
1317	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1318	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1319	lda	sp, 16(sp)			/* kill stack frame.	     */
1320	RET					/* v0 left over from copystr */
1321
13222:
1323	br	3b
1324END(ucas_32)
1325
1326STRONG_ALIAS(ucas_int,ucas_32)
1327
1328/*
1329 * int ucas_64(volatile int64_t *uptr, int64_t old, int64_t new, int64_t *ret);
1330 */
1331
1332NESTED(ucas_64, 4, 16, ra, IM_S0 | IM_RA, 0)
1333	LDGP(pv)
1334	lda	sp, -16(sp)			/* set up stack frame	     */
1335	stq	ra, (16-8)(sp)			/* save ra		     */
1336	stq	s0, (16-16)(sp)			/* save s0		     */
1337	ldiq	t0, VM_MAX_ADDRESS		/* make sure that src addr   */
1338	cmpult	a0, t0, t1			/* is in user space.	     */
1339	beq	t1, copyerr_efault		/* if it's not, error out.   */
1340	and	a0, 3, t1			/* check if addr is aligned. */
1341	bne	t1, copyerr_efault		/* if it's not, error out.   */
1342	/* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
1343	GET_CURLWP
1344	ldq	s0, 0(v0)			/* s0 = curlwp		     */
1345	lda	v0, copyerr			/* set up fault handler.     */
1346	.set noat
1347	ldq	at_reg, L_PCB(s0)
1348	stq	v0, PCB_ONFAULT(at_reg)
1349	.set at
1350
13513:
1352	ldq_l	t0, 0(a0)			/* t0 = *uptr */
1353	cmpeq	t0, a1, t1			/* does t0 = old? */
1354	beq	t1, 1f				/* if not, skip */
1355	mov	a2, t1
1356	stq_c	t1, 0(a0)			/* *uptr ~= new */
1357	beq	t1, 2f				/* did it work? */
13581:
1359	stq	t0, 0(a3)			/* *ret = t0 */
1360	mov	zero, v0
1361
1362	.set noat
1363	ldq	at_reg, L_PCB(s0)		/* kill the fault handler.   */
1364	stq	zero, PCB_ONFAULT(at_reg)
1365	.set at
1366	ldq	ra, (16-8)(sp)			/* restore ra.		     */
1367	ldq	s0, (16-16)(sp)			/* restore s0.		     */
1368	lda	sp, 16(sp)			/* kill stack frame.	     */
1369	RET					/* v0 left over from copystr */
1370
13712:
1372	br	3b
1373END(ucas_64)
1374
1375STRONG_ALIAS(ucas_ptr,ucas_64)
1376
1377/**************************************************************************/
1378
1379/*
1380 * console 'restart' routine to be placed in HWRPB.
1381 */
1382LEAF(XentRestart, 1)			/* XXX should be NESTED */
1383	.set noat
1384	lda	sp,-(FRAME_SIZE*8)(sp)
1385	stq	at_reg,(FRAME_AT*8)(sp)
1386	.set at
1387	stq	v0,(FRAME_V0*8)(sp)
1388	stq	a0,(FRAME_A0*8)(sp)
1389	stq	a1,(FRAME_A1*8)(sp)
1390	stq	a2,(FRAME_A2*8)(sp)
1391	stq	a3,(FRAME_A3*8)(sp)
1392	stq	a4,(FRAME_A4*8)(sp)
1393	stq	a5,(FRAME_A5*8)(sp)
1394	stq	s0,(FRAME_S0*8)(sp)
1395	stq	s1,(FRAME_S1*8)(sp)
1396	stq	s2,(FRAME_S2*8)(sp)
1397	stq	s3,(FRAME_S3*8)(sp)
1398	stq	s4,(FRAME_S4*8)(sp)
1399	stq	s5,(FRAME_S5*8)(sp)
1400	stq	s6,(FRAME_S6*8)(sp)
1401	stq	t0,(FRAME_T0*8)(sp)
1402	stq	t1,(FRAME_T1*8)(sp)
1403	stq	t2,(FRAME_T2*8)(sp)
1404	stq	t3,(FRAME_T3*8)(sp)
1405	stq	t4,(FRAME_T4*8)(sp)
1406	stq	t5,(FRAME_T5*8)(sp)
1407	stq	t6,(FRAME_T6*8)(sp)
1408	stq	t7,(FRAME_T7*8)(sp)
1409	stq	t8,(FRAME_T8*8)(sp)
1410	stq	t9,(FRAME_T9*8)(sp)
1411	stq	t10,(FRAME_T10*8)(sp)
1412	stq	t11,(FRAME_T11*8)(sp)
1413	stq	t12,(FRAME_T12*8)(sp)
1414	stq	ra,(FRAME_RA*8)(sp)
1415
1416	br	pv,1f
14171:	LDGP(pv)
1418
1419	mov	sp,a0
1420	CALL(console_restart)
1421
1422	call_pal PAL_halt
1423	END(XentRestart)
1424
1425/**************************************************************************/
1426
1427/*
1428 * Kernel setjmp and longjmp.  Rather minimalist.
1429 *
1430 *	longjmp(label_t *a)
1431 * will generate a "return (1)" from the last call to
1432 *	setjmp(label_t *a)
1433 * by restoring registers from the stack,
1434 */
1435
1436	.set	noreorder
1437
1438LEAF(setjmp, 1)
1439	LDGP(pv)
1440
1441	stq	ra, (0 * 8)(a0)			/* return address */
1442	stq	s0, (1 * 8)(a0)			/* callee-saved registers */
1443	stq	s1, (2 * 8)(a0)
1444	stq	s2, (3 * 8)(a0)
1445	stq	s3, (4 * 8)(a0)
1446	stq	s4, (5 * 8)(a0)
1447	stq	s5, (6 * 8)(a0)
1448	stq	s6, (7 * 8)(a0)
1449	stq	sp, (8 * 8)(a0)
1450
1451	ldiq	t0, 0xbeeffedadeadbabe		/* set magic number */
1452	stq	t0, (9 * 8)(a0)
1453
1454	mov	zero, v0			/* return zero */
1455	RET
1456END(setjmp)
1457
1458LEAF(longjmp, 1)
1459	LDGP(pv)
1460
1461	ldiq	t0, 0xbeeffedadeadbabe		/* check magic number */
1462	ldq	t1, (9 * 8)(a0)
1463	cmpeq	t0, t1, t0
1464	beq	t0, longjmp_botch		/* if bad, punt */
1465
1466	ldq	ra, (0 * 8)(a0)			/* return address */
1467	ldq	s0, (1 * 8)(a0)			/* callee-saved registers */
1468	ldq	s1, (2 * 8)(a0)
1469	ldq	s2, (3 * 8)(a0)
1470	ldq	s3, (4 * 8)(a0)
1471	ldq	s4, (5 * 8)(a0)
1472	ldq	s5, (6 * 8)(a0)
1473	ldq	s6, (7 * 8)(a0)
1474	ldq	sp, (8 * 8)(a0)
1475
1476	ldiq	v0, 1
1477	RET
1478
1479longjmp_botch:
1480	lda	a0, longjmp_botchmsg
1481	mov	ra, a1
1482	CALL(panic)
1483	call_pal PAL_bugchk
1484
1485	.data
1486longjmp_botchmsg:
1487	.asciz	"longjmp botch from %p"
1488	.text
1489END(longjmp)
1490
1491/*
1492 * void sts(int rn, u_int32_t *rval);
1493 * void stt(int rn, u_int64_t *rval);
1494 * void lds(int rn, u_int32_t *rval);
1495 * void ldt(int rn, u_int64_t *rval);
1496 */
1497
1498.macro	make_freg_util name, op
1499	LEAF(alpha_\name, 2)
1500	and	a0, 0x1f, a0
1501	s8addq	a0, pv, pv
1502	addq	pv, 1f - alpha_\name, pv
1503	jmp	(pv)
15041:
1505	rn = 0
1506	.rept	32
1507	\op	$f0 + rn, 0(a1)
1508	RET
1509	rn = rn + 1
1510	.endr
1511	END(alpha_\name)
1512.endm
1513/*
1514LEAF(alpha_sts, 2)
1515LEAF(alpha_stt, 2)
1516LEAF(alpha_lds, 2)
1517LEAF(alpha_ldt, 2)
1518 */
1519	make_freg_util sts, sts
1520	make_freg_util stt, stt
1521	make_freg_util lds, lds
1522	make_freg_util ldt, ldt
1523
1524LEAF(alpha_read_fpcr, 0); f30save = 0; rettmp = 8; framesz = 16
1525	lda	sp, -framesz(sp)
1526	stt	$f30, f30save(sp)
1527	mf_fpcr	$f30
1528	stt	$f30, rettmp(sp)
1529	ldt	$f30, f30save(sp)
1530	ldq	v0, rettmp(sp)
1531	lda	sp, framesz(sp)
1532	RET
1533END(alpha_read_fpcr)
1534
1535LEAF(alpha_write_fpcr, 1); f30save = 0; fpcrtmp = 8; framesz = 16
1536	lda	sp, -framesz(sp)
1537	stq	a0, fpcrtmp(sp)
1538	stt	$f30, f30save(sp)
1539	ldt	$f30, fpcrtmp(sp)
1540	mt_fpcr	$f30
1541	ldt	$f30, f30save(sp)
1542	lda	sp, framesz(sp)
1543	RET
1544END(alpha_write_fpcr)
1545