xref: /original-bsd/sys/sparc/sparc/locore.s (revision fafeb71d)
1/*
2 * Copyright (c) 1992 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 *	This product includes software developed by the University of
12 *	California, Lawrence Berkeley Laboratories.
13 *
14 * %sccs.include.redist.c%
15 *
16 *	@(#)locore.s	7.2 (Berkeley) 07/21/92
17 *
18 * from: $Header: locore.s,v 1.45 92/07/12 08:19:55 torek Exp $
19 */
20
21#define	LOCORE
22#include "assym.s"
23#include "ctlreg.h"
24#include "intreg.h"
25#include "timerreg.h"
26#ifdef notyet
27#include "zsaddr.h"
28#include "../dev/zsreg.h"
29#endif
30#include "machine/psl.h"
31#include "machine/signal.h"
32#include "machine/trap.h"
33
34/*
35 * GNU assembler does not understand `.empty' directive; Sun assembler
36 * gripes about labels without it.  To allow cross-compilation using
37 * the Sun assembler, and because .empty directives are useful documentation,
38 * we use this trick.  While we are at it we fix the .align mismatch.
39 *
40 * XXX	new GNU assemblers match the Sun assembler in interpretation of
41 * XXX	.align -- will have to fix this for them
42 */
43#ifdef SUN_AS
44#define	EMPTY	.empty
45#define	ALIGN	.align 4
46#else
47#define	EMPTY	/* .empty */
48#define	ALIGN	.align 2
49#endif
50
51/*
52 * CCFSZ (C Compiler Frame SiZe) is the size of a stack frame required if
53 * a function is to call C code.  It should be just 64, but Sun defined
54 * their frame with space to hold arguments 0 through 5 (plus some junk),
55 * and varargs routines (such as printf) demand this, and gcc uses this
56 * area at times anyway.
57 */
58#define	CCFSZ	96
59
60/*
61 * A handy macro for maintaining instrumentation counters.
62 * Note that this clobbers %o0 and %o1.  Normal usage is
63 * something like:
64 *	foointr:
65 *		TRAP_SETUP(...)		! makes %o registers safe
66 *		INCR(_cnt+V_FOO)	! count a foo
67 */
68#define INCR(what) \
69	sethi	%hi(what), %o0; \
70	ld	[%o0 + %lo(what)], %o1; \
71	inc	%o1; \
72	st	%o1, [%o0 + %lo(what)]
73
74/*
75 * Another handy macro: load one register window, given `base' address.
76 * This can be either a simple register (e.g., %sp) or include an initial
77 * offset (e.g., %g6 + PCB_RW).
78 */
79#define	LOADWIN(addr) \
80	ldd	[addr], %l0; \
81	ldd	[addr + 8], %l2; \
82	ldd	[addr + 16], %l4; \
83	ldd	[addr + 24], %l6; \
84	ldd	[addr + 32], %i0; \
85	ldd	[addr + 40], %i2; \
86	ldd	[addr + 48], %i4; \
87	ldd	[addr + 56], %i6
88
89/*
90 * To return from trap we need the two-instruction sequence
91 * `jmp %l1; rett %l2', which is defined here for convenience.
92 */
93#define	RETT	jmp %l1; rett %l2
94
95	.data
96/*
97 * The interrupt stack.
98 *
99 * This is the very first thing in the data segment, and therefore has
100 * the lowest kernel stack address.  We count on this in the interrupt
101 * trap-frame setup code, since we may need to switch from the kernel
102 * stack to the interrupt stack (iff we are not already on the interrupt
103 * stack).  One sethi+cmp is all we need since this is so carefully
104 * arranged.
105 */
106	.globl	_intstack
107	.globl	_eintstack
108_intstack:
109	.skip	4 * NBPG		! 16k = 128 128-byte stack frames
110_eintstack:
111
112/*
113 * When a process exits and its u. area goes away, we set cpcb to point
114 * to this `u.', leaving us with something to use for an interrupt stack,
115 * and letting all the register save code have a pcb_uw to examine.
116 * This is also carefully arranged (to come just before u0, so that
117 * process 0's kernel stack can quietly overrun into it during bootup, if
118 * we feel like doing that).
119 */
120	.globl	_idle_u
121_idle_u:
122	.skip	UPAGES * NBPG
123
124/*
125 * Process 0's u.
126 *
127 * This must be aligned on an 8 byte boundary.
128 */
129	.globl	_u0
130_u0:	.skip	UPAGES * NBPG
131estack0:
132
133#ifdef KGDB
134/*
135 * Another item that must be aligned, easiest to put it here.
136 */
137KGDB_STACK_SIZE = 2048
138	.globl	_kgdb_stack
139_kgdb_stack:
140	.skip	KGDB_STACK_SIZE		! hope this is enough
141#endif
142
143/*
144 * _cpcb points to the current pcb (and hence u. area).
145 * Initially this is the special one.
146 */
147	.globl	_cpcb
148_cpcb:	.word	_u0
149
150	.text
151
152/*
153 * The first thing in the real text segment is the trap vector table,
154 * which must be aligned on a 4096 byte boundary.  The text segment
155 * starts beyond page 0 of KERNBASE so that there is a red zone
156 * between user and kernel space.  Since the boot ROM loads us at
157 * 0x4000, it is far easier to start at KERNBASE+0x4000 than to
158 * buck the trend.  This is four pages in; we can stuff something
159 * into the three pages left beneath us later ... like, oh, say, the
160 * message buffer (1 page).
161 */
162	.globl	_msgbuf
163msgbufsize = NBPG			! 1 page for msg buffer
164_msgbuf	= KERNBASE + NBPG
165
166/*
167 * The remaining two physical pages are currently unused.  We need to
168 * map the interrupt enable register very early on in the boot process,
169 * so that we can handle NMIs (parity errors) halfway sensibly during
170 * boot.  We use virtual address f8002000 (`page 2') for this, wasting
171 * 4096 bytes of physical memory.
172 */
173IE_reg_addr = _msgbuf + msgbufsize	! this page not used; points to IEreg
174
175/*
176 * Each trap has room for four instructions, of which one perforce must
177 * be a branch.  On entry the hardware has copied pc and npc to %l1 and
178 * %l2 respectively.  We use two more to read the psr into %l0, and to
179 * put the trap type value into %l3 (with a few exceptions below).
180 * We could read the trap type field of %tbr later in the code instead,
181 * but there is no need, and that would require more instructions
182 * (read+mask, vs 1 `mov' here).
183 *
184 * I used to generate these numbers by address arithmetic, but gas's
185 * expression evaluator has about as much sense as your average slug
186 * (oddly enough, the code looks about as slimy too).  Thus, all the
187 * trap numbers are given as arguments to the trap macros.  This means
188 * there is one line per trap.  Sigh.
189 *
190 * Note that only the local registers may be used, since the trap
191 * window is potentially the last window.  Its `in' registers are
192 * the previous window's outs (as usual), but more important, its
193 * `out' registers may be in use as the `topmost' window's `in' registers.
194 * The global registers are of course verboten (well, until we save
195 * them away).
196 *
197 * Hardware interrupt vectors can be `linked'---the linkage is to regular
198 * C code---or rewired to fast in-window handlers.  The latter are good
199 * for unbuffered hardware like the Zilog serial chip and the AMD audio
200 * chip, where many interrupts can be handled trivially with pseudo-DMA or
201 * similar.  Only one `fast' interrupt can be used per level, however, and
202 * direct and `fast' interrupts are incompatible.  Routines in intr.c
203 * handle setting these, with optional paranoia.
204 */
205
206	/* regular vectored traps */
207#define	VTRAP(type, label) \
208	mov (type), %l3; b label; mov %psr, %l0; nop
209
210	/* hardware interrupts (can be linked or made `fast') */
211#define	HARDINT(lev) \
212	mov (lev), %l3; b _sparc_interrupt; mov %psr, %l0; nop
213
214	/* software interrupts (may not be made direct, sorry---but you
215	   should not be using them trivially anyway) */
216#define	SOFTINT(lev, bit) \
217	mov (lev), %l3; mov (bit), %l4; b softintr; mov %psr, %l0
218
219	/* traps that just call trap() */
220#define	TRAP(type)	VTRAP(type, slowtrap)
221
222	/* architecturally undefined traps (cause panic) */
223#define	UTRAP(type)	VTRAP(type, slowtrap)
224
225	/* software undefined traps (may be replaced) */
226#define	STRAP(type)	VTRAP(type, slowtrap)
227
228/* breakpoint acts differently under kgdb */
229#ifdef KGDB
230#define	BPT		VTRAP(T_BREAKPOINT, bpt)
231#define	BPT_KGDB_EXEC	VTRAP(T_KGDB_EXEC, bpt)
232#else
233#define	BPT		TRAP(T_BREAKPOINT)
234#define	BPT_KGDB_EXEC	TRAP(T_KGDB_EXEC)
235#endif
236
237/* special high-speed 1-instruction-shaved-off traps (get nothing in %l3) */
238#ifdef COMPAT_SUNOS
239#define	SUN_SYSCALL	b sun_syscall; mov %psr, %l0; nop; nop
240#else
241#define	SUN_SYSCALL	TRAP(T_SUN_SYSCALL)
242#endif
243#define	SYSCALL		b syscall; mov %psr, %l0; nop; nop
244#define	WINDOW_OF	b window_of; mov %psr, %l0; nop; nop
245#define	WINDOW_UF	b window_uf; mov %psr, %l0; nop; nop
246#ifdef notyet
247#define	ZS_INTERRUPT	b zshard; mov %psr, %l0; nop; nop
248#else
249#define	ZS_INTERRUPT	HARDINT(12)
250#endif
251
252	.globl	start
253	.globl	_trapbase
254start:
255_trapbase:
256/* trap 0 is special since we cannot receive it */
257	b dostart; nop; nop; nop	! 00 = reset (fake)
258	VTRAP(T_TEXTFAULT, memfault)	! 01 = instr. fetch fault
259	TRAP(T_ILLINST)			! 02 = illegal instruction
260	TRAP(T_PRIVINST)		! 03 = privileged instruction
261	TRAP(T_FPDISABLED)		! 04 = fp instr, but EF bit off in psr
262	WINDOW_OF			! 05 = window overflow
263	WINDOW_UF			! 06 = window underflow
264	TRAP(T_ALIGN)			! 07 = address alignment error
265	VTRAP(T_FPE, fp_exception)	! 08 = fp exception
266	VTRAP(T_DATAFAULT, memfault)	! 09 = data fetch fault
267	TRAP(T_TAGOF)			! 0a = tag overflow
268	UTRAP(0x0b)
269	UTRAP(0x0c)
270	UTRAP(0x0d)
271	UTRAP(0x0e)
272	UTRAP(0x0f)
273	UTRAP(0x10)
274	SOFTINT(1, IE_L1)		! 11 = level 1 interrupt
275	HARDINT(2)			! 12 = level 2 interrupt
276	HARDINT(3)			! 13 = level 3 interrupt
277	SOFTINT(4, IE_L4)		! 14 = level 4 interrupt
278	HARDINT(5)			! 15 = level 5 interrupt
279	SOFTINT(6, IE_L6)		! 16 = level 6 interrupt
280	HARDINT(7)			! 17 = level 7 interrupt
281	HARDINT(8)			! 18 = level 8 interrupt
282	HARDINT(9)			! 19 = level 9 interrupt
283	HARDINT(10)			! 1a = level 10 interrupt
284	HARDINT(11)			! 1b = level 11 interrupt
285	ZS_INTERRUPT			! 1c = level 12 (zs) interrupt
286	HARDINT(13)			! 1d = level 13 interrupt
287	HARDINT(14)			! 1e = level 14 interrupt
288	VTRAP(15, nmi)			! 1f = nonmaskable interrupt
289	UTRAP(0x20)
290	UTRAP(0x21)
291	UTRAP(0x22)
292	UTRAP(0x23)
293	UTRAP(0x24)
294	UTRAP(0x25)
295	UTRAP(0x26)
296	UTRAP(0x27)
297	UTRAP(0x28)
298	UTRAP(0x29)
299	UTRAP(0x2a)
300	UTRAP(0x2b)
301	UTRAP(0x2c)
302	UTRAP(0x2d)
303	UTRAP(0x2e)
304	UTRAP(0x2f)
305	UTRAP(0x30)
306	UTRAP(0x31)
307	UTRAP(0x32)
308	UTRAP(0x33)
309	UTRAP(0x34)
310	UTRAP(0x35)
311	TRAP(T_CPDISABLED)	! 36 = coprocessor instr, EC bit off in psr
312	UTRAP(0x37)
313	UTRAP(0x38)
314	UTRAP(0x39)
315	UTRAP(0x3a)
316	UTRAP(0x3b)
317	UTRAP(0x3c)
318	UTRAP(0x3d)
319	UTRAP(0x3e)
320	UTRAP(0x3f)
321	TRAP(T_CPEXCEPTION)	! 40 = coprocessor exception
322	UTRAP(0x41)
323	UTRAP(0x42)
324	UTRAP(0x43)
325	UTRAP(0x44)
326	UTRAP(0x45)
327	UTRAP(0x46)
328	UTRAP(0x47)
329	UTRAP(0x48)
330	UTRAP(0x49)
331	UTRAP(0x4a)
332	UTRAP(0x4b)
333	UTRAP(0x4c)
334	UTRAP(0x4d)
335	UTRAP(0x4e)
336	UTRAP(0x4f)
337	UTRAP(0x50)
338	UTRAP(0x51)
339	UTRAP(0x52)
340	UTRAP(0x53)
341	UTRAP(0x54)
342	UTRAP(0x55)
343	UTRAP(0x56)
344	UTRAP(0x57)
345	UTRAP(0x58)
346	UTRAP(0x59)
347	UTRAP(0x5a)
348	UTRAP(0x5b)
349	UTRAP(0x5c)
350	UTRAP(0x5d)
351	UTRAP(0x5e)
352	UTRAP(0x5f)
353	UTRAP(0x60)
354	UTRAP(0x61)
355	UTRAP(0x62)
356	UTRAP(0x63)
357	UTRAP(0x64)
358	UTRAP(0x65)
359	UTRAP(0x66)
360	UTRAP(0x67)
361	UTRAP(0x68)
362	UTRAP(0x69)
363	UTRAP(0x6a)
364	UTRAP(0x6b)
365	UTRAP(0x6c)
366	UTRAP(0x6d)
367	UTRAP(0x6e)
368	UTRAP(0x6f)
369	UTRAP(0x70)
370	UTRAP(0x71)
371	UTRAP(0x72)
372	UTRAP(0x73)
373	UTRAP(0x74)
374	UTRAP(0x75)
375	UTRAP(0x76)
376	UTRAP(0x77)
377	UTRAP(0x78)
378	UTRAP(0x79)
379	UTRAP(0x7a)
380	UTRAP(0x7b)
381	UTRAP(0x7c)
382	UTRAP(0x7d)
383	UTRAP(0x7e)
384	UTRAP(0x7f)
385	SUN_SYSCALL		! 80 = sun syscall
386	BPT			! 81 = pseudo breakpoint instruction
387	TRAP(T_DIV0)		! 82 = divide by zero
388	TRAP(T_FLUSHWIN)	! 83 = flush windows
389	TRAP(T_CLEANWIN)	! 84 = provide clean windows
390	TRAP(T_RANGECHECK)	! 85 = ???
391	TRAP(T_FIXALIGN)	! 86 = fix up unaligned accesses
392	TRAP(T_INTOF)		! 87 = integer overflow
393	BPT_KGDB_EXEC		! 88 = enter kernel gdb on kernel startup
394	SYSCALL			! 89 = bsd syscall
395	STRAP(0x8a)
396	STRAP(0x8b)
397	STRAP(0x8c)
398	STRAP(0x8d)
399	STRAP(0x8e)
400	STRAP(0x8f)
401	STRAP(0x90)
402	STRAP(0x91)
403	STRAP(0x92)
404	STRAP(0x93)
405	STRAP(0x94)
406	STRAP(0x95)
407	STRAP(0x96)
408	STRAP(0x97)
409	STRAP(0x98)
410	STRAP(0x99)
411	STRAP(0x9a)
412	STRAP(0x9b)
413	STRAP(0x9c)
414	STRAP(0x9d)
415	STRAP(0x9e)
416	STRAP(0x9f)
417	STRAP(0xa0)
418	STRAP(0xa1)
419	STRAP(0xa2)
420	STRAP(0xa3)
421	STRAP(0xa4)
422	STRAP(0xa5)
423	STRAP(0xa6)
424	STRAP(0xa7)
425	STRAP(0xa8)
426	STRAP(0xa9)
427	STRAP(0xaa)
428	STRAP(0xab)
429	STRAP(0xac)
430	STRAP(0xad)
431	STRAP(0xae)
432	STRAP(0xaf)
433	STRAP(0xb0)
434	STRAP(0xb1)
435	STRAP(0xb2)
436	STRAP(0xb3)
437	STRAP(0xb4)
438	STRAP(0xb5)
439	STRAP(0xb6)
440	STRAP(0xb7)
441	STRAP(0xb8)
442	STRAP(0xb9)
443	STRAP(0xba)
444	STRAP(0xbb)
445	STRAP(0xbc)
446	STRAP(0xbd)
447	STRAP(0xbe)
448	STRAP(0xbf)
449	STRAP(0xc0)
450	STRAP(0xc1)
451	STRAP(0xc2)
452	STRAP(0xc3)
453	STRAP(0xc4)
454	STRAP(0xc5)
455	STRAP(0xc6)
456	STRAP(0xc7)
457	STRAP(0xc8)
458	STRAP(0xc9)
459	STRAP(0xca)
460	STRAP(0xcb)
461	STRAP(0xcc)
462	STRAP(0xcd)
463	STRAP(0xce)
464	STRAP(0xcf)
465	STRAP(0xd0)
466	STRAP(0xd1)
467	STRAP(0xd2)
468	STRAP(0xd3)
469	STRAP(0xd4)
470	STRAP(0xd5)
471	STRAP(0xd6)
472	STRAP(0xd7)
473	STRAP(0xd8)
474	STRAP(0xd9)
475	STRAP(0xda)
476	STRAP(0xdb)
477	STRAP(0xdc)
478	STRAP(0xdd)
479	STRAP(0xde)
480	STRAP(0xdf)
481	STRAP(0xe0)
482	STRAP(0xe1)
483	STRAP(0xe2)
484	STRAP(0xe3)
485	STRAP(0xe4)
486	STRAP(0xe5)
487	STRAP(0xe6)
488	STRAP(0xe7)
489	STRAP(0xe8)
490	STRAP(0xe9)
491	STRAP(0xea)
492	STRAP(0xeb)
493	STRAP(0xec)
494	STRAP(0xed)
495	STRAP(0xee)
496	STRAP(0xef)
497	STRAP(0xf0)
498	STRAP(0xf1)
499	STRAP(0xf2)
500	STRAP(0xf3)
501	STRAP(0xf4)
502	STRAP(0xf5)
503	STRAP(0xf6)
504	STRAP(0xf7)
505	STRAP(0xf8)
506	STRAP(0xf9)
507	STRAP(0xfa)
508	STRAP(0xfb)
509	STRAP(0xfc)
510	STRAP(0xfd)
511	STRAP(0xfe)
512	STRAP(0xff)
513
514	/* the message buffer is always mapped */
515_msgbufmapped:
516	.word	1
517
518#ifdef DEBUG
519/*
520 * A hardware red zone is impossible.  We simulate one in software by
521 * keeping a `red zone' pointer; if %sp becomes less than this, we panic.
522 * This is expensive and is only enabled when debugging.
523 */
524#define	REDSIZE	(8*96)		/* some room for bouncing */
525#define	REDSTACK 2048		/* size of `panic: stack overflow' region */
526	.data
527_redzone:
528	.word	_idle_u + REDSIZE
529_redstack:
530	.skip	REDSTACK
531	.text
532Lpanic_red:
533	.asciz	"stack overflow"
534	ALIGN
535
536	/* set stack pointer redzone to base+minstack; alters base */
537#define	SET_SP_REDZONE(base, tmp) \
538	add	base, REDSIZE, base; \
539	sethi	%hi(_redzone), tmp; \
540	st	base, [tmp + %lo(_redzone)]
541
542	/* variant with a constant */
543#define	SET_SP_REDZONE_CONST(const, tmp1, tmp2) \
544	set	(const) + REDSIZE, tmp1; \
545	sethi	%hi(_redzone), tmp2; \
546	st	tmp1, [tmp2 + %lo(_redzone)]
547
548	/* check stack pointer against redzone (uses two temps) */
549#define	CHECK_SP_REDZONE(t1, t2) \
550	sethi	%hi(_redzone), t1; \
551	ld	[t1 + %lo(_redzone)], t2; \
552	cmp	%sp, t2;	/* if sp >= t2, not in red zone */ \
553	bgeu	7f; nop;	/* and can continue normally */ \
554	/* move to panic stack */ \
555	st	%g0, [t1 + %lo(_redzone)]; \
556	set	_redstack + REDSTACK - 96, %sp; \
557	/* prevent panic() from lowering ipl */ \
558	sethi	%hi(_panicstr), t2; \
559	set	Lpanic_red, t2; \
560	st	t2, [t1 + %lo(_panicstr)]; \
561	rd	%psr, t1;		/* t1 = splhigh() */ \
562	or	t1, PSR_PIL, t2; \
563	wr	t2, 0, %psr; \
564	wr	t2, PSR_ET, %psr;	/* turn on traps */ \
565	nop; nop; nop; \
566	save	%sp, -96, %sp;		/* preserve current window */ \
567	sethi	%hi(Lpanic_red), %o0; \
568	call	_panic; or %o0, %lo(Lpanic_red), %o0; \
5697:
570
571#else
572
573#define	SET_SP_REDZONE(base, tmp)
574#define	SET_SP_REDZONE_CONST(const, t1, t2)
575#define	CHECK_SP_REDZONE(t1, t2)
576#endif
577
578/*
579 * The window code must verify user stack addresses before using them.
580 * A user stack pointer is invalid if:
581 *	- it is not on an 8 byte boundary;
582 *	- its pages (a register window, being 64 bytes, can occupy
583 *	  two pages) are not readable or writable.
584 * We define three separate macros here for testing user stack addresses.
585 *
586 * PTE_OF_ADDR locates a PTE, branching to a `bad address'
587 *	handler if the stack pointer points into the hole in the
588 *	address space (i.e., top 3 bits are not either all 1 or all 0);
589 * CMP_PTE_USER_READ compares the located PTE against `user read' mode;
590 * CMP_PTE_USER_WRITE compares the located PTE against `user write' mode.
591 * The compares give `equal' if read or write is OK.
592 *
593 * Note that the user stack pointer usually points into high addresses
594 * (top 3 bits all 1), so that is what we check first.
595 *
596 * The code below also assumes that PTE_OF_ADDR is safe in a delay
597 * slot; it is, at it merely sets its `pte' register to a temporary value.
598 */
599	/* input: addr, output: pte; aux: bad address label */
600#define	PTE_OF_ADDR(addr, pte, bad) \
601	sra	addr, PG_VSHIFT, pte; \
602	cmp	pte, -1; \
603	be,a	1f; andn addr, 4095, pte; \
604	tst	pte; \
605	bne	bad; EMPTY; \
606	andn	addr, 4095, pte; \
6071:
608
609	/* input: pte; output: condition codes */
610#define	CMP_PTE_USER_READ(pte) \
611	lda	[pte] ASI_PTE, pte; \
612	srl	pte, PG_PROTSHIFT, pte; \
613	andn	pte, (PG_W >> PG_PROTSHIFT), pte; \
614	cmp	pte, PG_PROTUREAD
615
616	/* input: pte; output: condition codes */
617#define	CMP_PTE_USER_WRITE(pte) \
618	lda	[pte] ASI_PTE, pte; \
619	srl	pte, PG_PROTSHIFT, pte; \
620	cmp	pte, PG_PROTUWRITE
621
622/*
623 * The calculations in PTE_OF_ADDR and CMP_PTE_USER_* are rather slow:
624 * in particular, according to Gordon Irlam of the University of Adelaide
625 * in Australia, these consume at least 18 cycles on an SS1 and 37 on an
626 * SS2.  Hence, we try to avoid them in the common case.
627 *
628 * A chunk of 64 bytes is on a single page if and only if:
629 *
630 *	((base + 64 - 1) & ~4095) == (base & ~4095)
631 *
632 * Equivalently (and faster to test), the low order bits (base & 4095) must
633 * be small enough so that the sum (base + 63) does not carry out into the
634 * upper page-address bits, i.e.,
635 *
636 *	(base & 4095) < (4096 - 63)
637 *
638 * so we allow testing that here.  This macro is also assumed to be safe
639 * in a delay slot (modulo overwriting its temporary).
640 */
641#define	SLT_IF_1PAGE_RW(addr, tmp) \
642	and	addr, 4095, tmp; \
643	cmp	tmp, (4096 - 63)
644
645/*
646 * Every trap that enables traps must set up stack space.
647 * If the trap is from user mode, this involves switching to the kernel
648 * stack for the current process, and we must also set cpcb->pcb_uw
649 * so that the window overflow handler can tell user windows from kernel
650 * windows.
651 *
652 * The number of user windows is:
653 *
654 *	cpcb->pcb_uw = (cpcb->pcb_wim - 1 - CWP) % nwindows
655 *
656 * (where pcb_wim = log2(current %wim) and CWP = low 5 bits of %psr).
657 * We compute this expression by table lookup in uwtab[CWP - pcb_wim],
658 * which has been set up as:
659 *
660 *	for i in [-nwin+1 .. nwin-1]
661 *		uwtab[i] = (nwin - 1 - i) % nwin;
662 *
663 * (If you do not believe this works, try it for yourself.)
664 *
665 * We also keep one or two more tables:
666 *
667 *	for i in 0..nwin-1
668 *		wmask[i] = 1 << ((i + 1) % nwindows);
669 *
670 * wmask[CWP] tells whether a `rett' would return into the invalid window.
671 */
672	.data
673	.skip	32			! alignment byte & negative indicies
674uwtab:	.skip	32			! u_char uwtab[-31..31];
675wmask:	.skip	32			! u_char wmask[0..31];
676
677	.text
678/*
679 * Things begin to grow uglier....
680 *
681 * Each trap handler may (always) be running in the trap window.
682 * If this is the case, it cannot enable further traps until it writes
683 * the register windows into the stack (or, if the stack is no good,
684 * the current pcb).
685 *
686 * ASSUMPTIONS: TRAP_SETUP() is called with:
687 *	%l0 = %psr
688 *	%l1 = return pc
689 *	%l2 = return npc
690 *	%l3 = (some value that must not be altered)
691 * which means we have 4 registers to work with.
692 *
693 * The `stackspace' argument is the number of stack bytes to allocate
694 * for register-saving, and must be at least -64 (and typically more,
695 * for global registers and %y).
696 *
697 * Trapframes should use -CCFSZ-80.  (80 = sizeof(struct trapframe);
698 * see trap.h.  This basically means EVERYONE.  Interrupt frames could
699 * get away with less, but currently do not.)
700 *
701 * The basic outline here is:
702 *
703 *	if (trap came from kernel mode) {
704 *		if (we are in the trap window)
705 *			save it away;
706 *		%sp = %fp - stackspace;
707 *	} else {
708 *		compute the number of user windows;
709 *		if (we are in the trap window)
710 *			save it away;
711 *		%sp = (top of kernel stack) - stackspace;
712 *	}
713 *
714 * Again, the number of user windows is:
715 *
716 *	cpcb->pcb_uw = (cpcb->pcb_wim - 1 - CWP) % nwindows
717 *
718 * (where pcb_wim = log2(current %wim) and CWP is the low 5 bits of %psr),
719 * and this is computed as `uwtab[CWP - pcb_wim]'.
720 *
721 * NOTE: if you change this code, you will have to look carefully
722 * at the window overflow and underflow handlers and make sure they
723 * have similar changes made as needed.
724 */
725#define	CALL_CLEAN_TRAP_WINDOW \
726	sethi	%hi(clean_trap_window), %l7; \
727	jmpl	%l7 + %lo(clean_trap_window), %l4; \
728	 mov	%g7, %l7	/* save %g7 in %l7 for clean_trap_window */
729
730#define	TRAP_SETUP(stackspace) \
731	rd	%wim, %l4; \
732	mov	1, %l5; \
733	sll	%l5, %l0, %l5; \
734	btst	PSR_PS, %l0; \
735	bz	1f; \
736	 btst	%l5, %l4; \
737	/* came from kernel mode; cond codes indicate trap window */ \
738	bz,a	3f; \
739	 add	%fp, stackspace, %sp;	/* want to just set %sp */ \
740	CALL_CLEAN_TRAP_WINDOW;		/* but maybe need to clean first */ \
741	b	3f; \
742	 add	%fp, stackspace, %sp; \
7431: \
744	/* came from user mode: compute pcb_nw */ \
745	sethi	%hi(_cpcb), %l6; \
746	ld	[%l6 + %lo(_cpcb)], %l6; \
747	ld	[%l6 + PCB_WIM], %l5; \
748	and	%l0, 31, %l4; \
749	sub	%l4, %l5, %l5; \
750	set	uwtab, %l4; \
751	ldub	[%l4 + %l5], %l5; \
752	st	%l5, [%l6 + PCB_UW]; \
753	/* cond codes still indicate whether in trap window */ \
754	bz,a	2f; \
755	 sethi	%hi(UPAGES*NBPG+(stackspace)), %l5; \
756	/* yes, in trap window; must clean it */ \
757	CALL_CLEAN_TRAP_WINDOW; \
758	sethi	%hi(_cpcb), %l6; \
759	ld	[%l6 + %lo(_cpcb)], %l6; \
760	sethi	%hi(UPAGES*NBPG+(stackspace)), %l5; \
7612: \
762	/* trap window is (now) clean: set %sp */ \
763	or	%l5, %lo(UPAGES*NBPG+(stackspace)), %l5; \
764	add	%l6, %l5, %sp; \
765	SET_SP_REDZONE(%l6, %l5); \
7663: \
767	CHECK_SP_REDZONE(%l6, %l5)
768
769/*
770 * Interrupt setup is almost exactly like trap setup, but we need to
771 * go to the interrupt stack if (a) we came from user mode or (b) we
772 * came from kernel mode on the kernel stack.
773 */
774#define	INTR_SETUP(stackspace) \
775	rd	%wim, %l4; \
776	mov	1, %l5; \
777	sll	%l5, %l0, %l5; \
778	btst	PSR_PS, %l0; \
779	bz	1f; \
780	 btst	%l5, %l4; \
781	/* came from kernel mode; cond codes still indicate trap window */ \
782	bz,a	0f; \
783	 sethi	%hi(_eintstack), %l7; \
784	CALL_CLEAN_TRAP_WINDOW; \
785	sethi	%hi(_eintstack), %l7; \
7860:	/* now if %fp >= eintstack, we were on the kernel stack */ \
787	cmp	%fp, %l7; \
788	bge,a	3f; \
789	 add	%l7, stackspace, %sp;	/* so switch to intstack */ \
790	b	4f; \
791	 add	%fp, stackspace, %sp;	/* else stay on intstack */ \
7921: \
793	/* came from user mode: compute pcb_nw */ \
794	sethi	%hi(_cpcb), %l6; \
795	ld	[%l6 + %lo(_cpcb)], %l6; \
796	ld	[%l6 + PCB_WIM], %l5; \
797	and	%l0, 31, %l4; \
798	sub	%l4, %l5, %l5; \
799	set	uwtab, %l4; \
800	ldub	[%l4 + %l5], %l5; \
801	st	%l5, [%l6 + PCB_UW]; \
802	/* cond codes still indicate whether in trap window */ \
803	bz,a	2f; \
804	 sethi	%hi(_eintstack), %l7; \
805	/* yes, in trap window; must save regs */ \
806	CALL_CLEAN_TRAP_WINDOW; \
807	sethi	%hi(_eintstack), %l7; \
8082: \
809	add	%l7, stackspace, %sp; \
8103: \
811	SET_SP_REDZONE_CONST(_intstack, %l6, %l5); \
8124: \
813	CHECK_SP_REDZONE(%l6, %l5)
814
815/*
816 * Handler for making the trap window shiny clean.
817 *
818 * On entry:
819 *	cpcb->pcb_nw = number of user windows
820 *	%l0 = %psr
821 *	%l1 must not be clobbered
822 *	%l2 must not be clobbered
823 *	%l3 must not be clobbered
824 *	%l4 = address for `return'
825 *	%l7 = saved %g7 (we put this in a delay slot above, to save work)
826 *
827 * On return:
828 *	%wim has changed, along with cpcb->pcb_wim
829 *	%g7 has been restored
830 *
831 * Normally, we push only one window.
832 */
833clean_trap_window:
834	mov	%g5, %l5		! save %g5
835	mov	%g6, %l6		! ... and %g6
836/*	mov	%g7, %l7		! ... and %g7 (already done for us) */
837	sethi	%hi(_cpcb), %g6		! get current pcb
838	ld	[%g6 + %lo(_cpcb)], %g6
839
840	/* Figure out whether it is a user window (cpcb->pcb_uw > 0). */
841	ld	[%g6 + PCB_UW], %g7
842	deccc	%g7
843	bge	ctw_user
844	 save	%g0, %g0, %g0		! in any case, enter window to save
845
846	/* The window to be pushed is a kernel window. */
847	std	%l0, [%sp + (0*8)]
848ctw_merge:
849	std	%l2, [%sp + (1*8)]
850	std	%l4, [%sp + (2*8)]
851	std	%l6, [%sp + (3*8)]
852	std	%i0, [%sp + (4*8)]
853	std	%i2, [%sp + (5*8)]
854	std	%i4, [%sp + (6*8)]
855	std	%i6, [%sp + (7*8)]
856
857	/* Set up new window invalid mask, and update cpcb->pcb_wim. */
858	rd	%psr, %g7		! g7 = (junk << 5) + new_cwp
859	mov	1, %g5			! g5 = 1 << new_cwp;
860	sll	%g5, %g7, %g5
861	wr	%g5, 0, %wim		! setwim(g5);
862	and	%g7, 31, %g7		! cpcb->pcb_wim = g7 & 31;
863	st	%g7, [%g6 + PCB_WIM]
864	nop
865	restore				! back to trap window
866
867	mov	%l5, %g5		! restore g5
868	mov	%l6, %g6		! ... and g6
869	jmp	%l4 + 8			! return to caller
870	 mov	%l7, %g7		! ... and g7
871	/* NOTREACHED */
872
873ctw_user:
874	/*
875	 * The window to be pushed is a user window.
876	 * We must verify the stack pointer (alignment & permissions).
877	 * See comments above definition of PTE_OF_ADDR.
878	 */
879	st	%g7, [%g6 + PCB_UW]	! cpcb->pcb_uw--;
880	btst	7, %sp			! if not aligned,
881	bne	ctw_invalid		! choke on it
882	 EMPTY
883	PTE_OF_ADDR(%sp, %g7, ctw_invalid)
884	CMP_PTE_USER_WRITE(%g7)		! likewise if not writable
885	bne	ctw_invalid
886	 EMPTY
887	SLT_IF_1PAGE_RW(%sp, %g7)
888	bl,a	ctw_merge		! all ok if only 1
889	 std	%l0, [%sp]
890	add	%sp, 7*8, %g5		! check last addr too
891	PTE_OF_ADDR(%g5, %g7, ctw_invalid)
892	CMP_PTE_USER_WRITE(%g7)
893	be,a	ctw_merge		! all ok: store <l0,l1> and merge
894	 std	%l0, [%sp]
895
896	/*
897	 * The window we wanted to push could not be pushed.
898	 * Instead, save ALL user windows into the pcb.
899	 * We will notice later that we did this, when we
900	 * get ready to return from our trap or syscall.
901	 *
902	 * The code here is run rarely and need not be optimal.
903	 */
904ctw_invalid:
905	/*
906	 * Reread cpcb->pcb_uw.  We decremented this earlier,
907	 * so it is off by one.
908	 */
909	ld	[%g6 + PCB_UW], %g7	! (number of user windows) - 1
910	add	%g6, PCB_RW, %g5
911
912	/* save g7+1 windows, starting with the current one */
9131:					! do {
914	std	%l0, [%g5 + (0*8)]	!	rw->rw_local[0] = l0;
915	std	%l2, [%g5 + (1*8)]	!	...
916	std	%l4, [%g5 + (2*8)]
917	std	%l6, [%g5 + (3*8)]
918	std	%i0, [%g5 + (4*8)]
919	std	%i2, [%g5 + (5*8)]
920	std	%i4, [%g5 + (6*8)]
921	std	%i6, [%g5 + (7*8)]
922	deccc	%g7			!	if (n > 0) save(), rw++;
923	bge,a	1b			! } while (--n >= 0);
924	 save	%g5, 64, %g5
925
926	/* stash sp for bottommost window */
927	st	%sp, [%g5 + 64 + (7*8)]
928
929	/* set up new wim */
930	rd	%psr, %g7		! g7 = (junk << 5) + new_cwp;
931	mov	1, %g5			! g5 = 1 << new_cwp;
932	sll	%g5, %g7, %g5
933	wr	%g5, 0, %wim		! wim = g5;
934	and	%g7, 31, %g7
935	st	%g7, [%g6 + PCB_WIM]	! cpcb->pcb_wim = new_cwp;
936
937	/* fix up pcb fields */
938	ld	[%g6 + PCB_UW], %g7	! n = cpcb->pcb_uw;
939	add	%g7, 1, %g5
940	st	%g5, [%g6 + PCB_NSAVED]	! cpcb->pcb_nsaved = n + 1;
941	st	%g0, [%g6 + PCB_UW]	! cpcb->pcb_uw = 0;
942
943	/* return to trap window */
9441:	deccc	%g7			! do {
945	bge	1b			!	restore();
946	 restore			! } while (--n >= 0);
947
948	mov	%l5, %g5		! restore g5, g6, & g7, and return
949	mov	%l6, %g6
950	jmp	%l4 + 8
951	 mov	%l7, %g7
952	/* NOTREACHED */
953
954
955/*
956 * Each memory access (text or data) fault, from user or kernel mode,
957 * comes here.  We read the error register and figure out what has
958 * happened.
959 *
960 * This cannot be done from C code since we must not enable traps (and
961 * hence may not use the `save' instruction) until we have decided that
962 * the error is or is not an asynchronous one that showed up after a
963 * synchronous error, but which must be handled before the sync err.
964 *
965 * Most memory faults are user mode text or data faults, which can cause
966 * signal delivery or ptracing, for which we must build a full trapframe.
967 * It does not seem worthwhile to work to avoid this in the other cases,
968 * so we store all the %g registers on the stack immediately.
969 *
970 * On entry:
971 *	%l0 = %psr
972 *	%l1 = return pc
973 *	%l2 = return npc
974 *	%l3 = T_TEXTFAULT or T_DATAFAULT
975 *
976 * Internal:
977 *	%l4 = %y, until we call mem_access_fault (then onto trapframe)
978 *	%l5 = IE_reg_addr, if async mem error
979 *
980 * We know about the layout of the error registers here.
981 *	addr	reg
982 *	----	---
983 *	a	AC_SYNC_ERR
984 *	a+4	AC_SYNC_VA
985 *	a+8	AC_ASYNC_ERR
986 *	a+12	AC_ASYNC_VA
987 */
988memfault:
989	TRAP_SETUP(-CCFSZ-80)
990
991	INCR(_cnt+V_FAULTS)		! cnt.v_faults++ (clobbers %o0,%o1)
992
993	st	%g1, [%sp + CCFSZ + 20]	! save g1
994	rd	%y, %l4			! save y
995
996#if AC_SYNC_ERR + 4 != AC_SYNC_VA || \
997    AC_SYNC_ERR + 8 != AC_ASYNC_ERR || AC_SYNC_ERR + 12 != AC_ASYNC_VA
998	help help help		! I, I, I wanna be a lifeguard
999#endif
1000	set	AC_SYNC_ERR, %o0
1001	std	%g2, [%sp + CCFSZ + 24]	! save g2, g3
1002	lda	[%o0] ASI_CONTROL, %o1	! sync err reg
1003	inc	4, %o0
1004	std	%g4, [%sp + CCFSZ + 32]	! (sneak g4,g5 in here)
1005	lda	[%o0] ASI_CONTROL, %o2	! sync virt addr
1006	btst	SER_MEMERR, %o1		! memory error?
1007	std	%g6, [%sp + CCFSZ + 40]
1008	bz,a	normal_mem_fault	! no, just a regular fault
1009 	 wr	%l0, PSR_ET, %psr	! (and reenable traps)
1010
1011	/*
1012	 * We got a synchronous memory error.  It could be one that
1013	 * happened because there were two stores in a row, and the
1014	 * first went into the write buffer, and the second caused this
1015	 * synchronous trap; so there could now be a pending async error.
1016	 * This is in fact the case iff the two va's differ.
1017	 */
1018	inc	4, %o0
1019	lda	[%o0] ASI_CONTROL, %o3	! async err reg
1020	inc	4, %o0
1021	lda	[%o0] ASI_CONTROL, %o4	! async virt addr
1022	cmp	%o2, %o4
1023	be,a	1f			! no, not an async err
1024	 wr	%l0, PSR_ET, %psr	! (and reenable traps)
1025
1026	/*
1027	 * Handle the async error; ignore the sync error for now
1028	 * (we may end up getting it again, but so what?).
1029	 * This code is essentially the same as that at `nmi' below,
1030	 * but the register usage is different and we cannot merge.
1031	 */
1032	sethi	%hi(IE_reg_addr), %l5	! ienab_bic(IE_ALLIE);
1033	ldub	[%l5 + %lo(IE_reg_addr)], %o0
1034	andn	%o0, IE_ALLIE, %o0
1035	stb	%o0, [%l5 + %lo(IE_reg_addr)]
1036
1037	/*
1038	 * Now reenable traps and call C code.
1039	 * %o1 through %o4 still hold the error reg contents.
1040	 * If memerr() returns, return from the trap.
1041	 */
1042	wr	%l0, PSR_ET, %psr
1043	call	_memerr			! memerr(0, ser, sva, aer, ava)
1044	 clr	%o0
1045
1046	ld	[%sp + CCFSZ + 20], %g1	! restore g1 through g7
1047	wr	%l0, 0, %psr		! and disable traps, 3 instr delay
1048	ldd	[%sp + CCFSZ + 24], %g2
1049	ldd	[%sp + CCFSZ + 32], %g4
1050	ldd	[%sp + CCFSZ + 40], %g6
1051	/* now safe to set IE_ALLIE again */
1052	ldub	[%l5 + %lo(IE_reg_addr)], %o1
1053	or	%o1, IE_ALLIE, %o1
1054	stb	%o1, [%l5 + %lo(IE_reg_addr)]
1055	b	return_from_trap
1056	 wr	%l4, 0, %y		! restore y
1057
1058	/*
1059	 * Trap was a synchronous memory error.
1060	 * %o1 through %o4 still hold the error reg contents.
1061	 */
10621:
1063	call	_memerr			! memerr(1, ser, sva, aer, ava)
1064	 mov	1, %o0
1065
1066	ld	[%sp + CCFSZ + 20], %g1	! restore g1 through g7
1067	ldd	[%sp + CCFSZ + 24], %g2
1068	ldd	[%sp + CCFSZ + 32], %g4
1069	ldd	[%sp + CCFSZ + 40], %g6
1070	wr	%l4, 0, %y		! restore y
1071	b	return_from_trap
1072	 wr	%l0, 0, %psr
1073	/* NOTREACHED */
1074
1075normal_mem_fault:
1076	/*
1077	 * Trap was some other error; call C code to deal with it.
1078	 * Must finish trap frame (psr,pc,npc,%y,%o0..%o7) in case
1079	 * we decide to deliver a signal or ptrace the process.
1080	 * %g1..%g7 were already set up above.
1081	 */
1082	std	%l0, [%sp + CCFSZ + 0]	! set tf.tf_psr, tf.tf_pc
1083	mov	%l3, %o0		! (argument: type)
1084	st	%l2, [%sp + CCFSZ + 8]	! set tf.tf_npc
1085	st	%l4, [%sp + CCFSZ + 12]	! set tf.tf_y
1086	mov	%l1, %o3		! (argument: pc)
1087	std	%i0, [%sp + CCFSZ + 48]	! tf.tf_out[0], etc
1088	std	%i2, [%sp + CCFSZ + 56]
1089	mov	%l0, %o4		! (argument: psr)
1090	std	%i4, [%sp + CCFSZ + 64]
1091	std	%i6, [%sp + CCFSZ + 72]
1092	call	_mem_access_fault	! mem_access_fault(type, ser, sva,
1093					!		pc, psr, &tf);
1094	 add	%sp, CCFSZ, %o5		! (argument: &tf)
1095
1096	ldd	[%sp + CCFSZ + 0], %l0	! load new values
1097	ldd	[%sp + CCFSZ + 8], %l2
1098	wr	%l3, 0, %y
1099	ld	[%sp + CCFSZ + 20], %g1
1100	ldd	[%sp + CCFSZ + 24], %g2
1101	ldd	[%sp + CCFSZ + 32], %g4
1102	ldd	[%sp + CCFSZ + 40], %g6
1103	ldd	[%sp + CCFSZ + 48], %i0
1104	ldd	[%sp + CCFSZ + 56], %i2
1105	ldd	[%sp + CCFSZ + 64], %i4
1106	ldd	[%sp + CCFSZ + 72], %i6
1107
1108	b	return_from_trap	! go return
1109	 wr	%l0, 0, %psr		! (but first disable traps again)
1110
1111
1112/*
1113 * fp_exception has to check to see if we are trying to save
1114 * the FP state, and if so, continue to save the FP state.
1115 *
1116 * We do not even bother checking to see if we were in kernel mode,
1117 * since users have no access to the special_fp_store instruction.
1118 *
1119 * This whole idea was stolen from Sprite.
1120 */
1121fp_exception:
1122	set	special_fp_store, %l4	! see if we came from the special one
1123	cmp	%l1, %l4		! pc == special_fp_store?
1124	bne	slowtrap		! no, go handle per usual
1125	 EMPTY
1126	sethi	%hi(savefpcont), %l4	! yes, "return" to the special code
1127	or	%lo(savefpcont), %l4, %l4
1128	jmp	%l4
1129	 rett	%l4 + 4
1130
1131/*
1132 * slowtrap() builds a trap frame and calls trap().
1133 * This is called `slowtrap' because it *is*....
1134 * We have to build a full frame for ptrace(), for instance.
1135 *
1136 * Registers:
1137 *	%l0 = %psr
1138 *	%l1 = return pc
1139 *	%l2 = return npc
1140 *	%l3 = trap code
1141 */
1142slowtrap:
1143	TRAP_SETUP(-CCFSZ-80)
1144	/*
1145	 * Phew, ready to enable traps and call C code.
1146	 */
1147	mov	%l3, %o0		! put type in %o0 for later
1148Lslowtrap_reenter:
1149	wr	%l0, PSR_ET, %psr	! traps on again
1150	std	%l0, [%sp + CCFSZ]	! tf.tf_psr = psr; tf.tf_pc = ret_pc;
1151	rd	%y, %l3
1152	std	%l2, [%sp + CCFSZ + 8]	! tf.tf_npc = return_npc; tf.tf_y = %y;
1153	st	%g1, [%sp + CCFSZ + 20]
1154	std	%g2, [%sp + CCFSZ + 24]
1155	std	%g4, [%sp + CCFSZ + 32]
1156	std	%g6, [%sp + CCFSZ + 40]
1157	std	%i0, [%sp + CCFSZ + 48]
1158	mov	%l0, %o1		! (psr)
1159	std	%i2, [%sp + CCFSZ + 56]
1160	mov	%l1, %o2		! (pc)
1161	std	%i4, [%sp + CCFSZ + 64]
1162	add	%sp, CCFSZ, %o3		! (&tf)
1163	call	_trap			! trap(type, psr, pc, &tf)
1164	 std	%i6, [%sp + CCFSZ + 72]
1165
1166	ldd	[%sp + CCFSZ], %l0	! load new values
1167	ldd	[%sp + CCFSZ + 8], %l2
1168	wr	%l3, 0, %y
1169	ld	[%sp + CCFSZ + 20], %g1
1170	ldd	[%sp + CCFSZ + 24], %g2
1171	ldd	[%sp + CCFSZ + 32], %g4
1172	ldd	[%sp + CCFSZ + 40], %g6
1173	ldd	[%sp + CCFSZ + 48], %i0
1174	ldd	[%sp + CCFSZ + 56], %i2
1175	ldd	[%sp + CCFSZ + 64], %i4
1176	ldd	[%sp + CCFSZ + 72], %i6
1177	b	return_from_trap
1178	 wr	%l0, 0, %psr
1179
1180/*
1181 * Do a `software' trap by re-entering the trap code, possibly first
1182 * switching from interrupt stack to kernel stack.  This is used for
1183 * scheduling and signal ASTs (which generally occur from softclock or
1184 * tty or net interrupts) and register window saves (which might occur
1185 * from anywhere).
1186 *
1187 * The current window is the trap window, and it is by definition clean.
1188 * We enter with the trap type in %o0.  All we have to do is jump to
1189 * Lslowtrap_reenter above, but maybe after switching stacks....
1190 */
1191softtrap:
1192	sethi	%hi(_eintstack), %l7
1193	cmp	%sp, %l7
1194	bge	Lslowtrap_reenter
1195	 EMPTY
1196	sethi	%hi(_cpcb), %l6
1197	ld	[%l6 + %lo(_cpcb)], %l6
1198	set	UPAGES*NBPG - CCFSZ - 80, %l5
1199	add	%l6, %l5, %l7
1200	SET_SP_REDZONE(%l6, %l5)
1201	b	Lslowtrap_reenter
1202	 mov	%l7, %sp
1203
1204#ifdef KGDB
1205/*
1206 * bpt is entered on all breakpoint traps.
1207 * If this is a kernel breakpoint, we do not want to call trap().
1208 * Among other reasons, this way we can set breakpoints in trap().
1209 */
1210bpt:
1211	btst	PSR_PS, %l0		! breakpoint from kernel?
1212	bz	slowtrap		! no, go do regular trap
1213	 nop
1214
1215	/*
1216	 * Build a trap frame for kgdb_trap_glue to copy.
1217	 * Enable traps but set ipl high so that we will not
1218	 * see interrupts from within breakpoints.
1219	 */
1220	TRAP_SETUP(-CCFSZ-80)
1221	or	%l0, PSR_PIL, %l4	! splhigh()
1222	wr	%l4, 0, %psr		! the manual claims that this
1223	wr	%l4, PSR_ET, %psr	! song and dance is necessary
1224	std	%l0, [%sp + CCFSZ + 0]	! tf.tf_psr, tf.tf_pc
1225	mov	%l3, %o0		! trap type arg for kgdb_trap_glue
1226	rd	%y, %l3
1227	std	%l2, [%sp + CCFSZ + 8]	! tf.tf_npc, tf.tf_y
1228	rd	%wim, %l3
1229	st	%l3, [%sp + CCFSZ + 16]	! tf.tf_wim (a kgdb-only r/o field)
1230	st	%g1, [%sp + CCFSZ + 20]	! tf.tf_global[1]
1231	std	%g2, [%sp + CCFSZ + 24]	! etc
1232	std	%g4, [%sp + CCFSZ + 32]
1233	std	%g6, [%sp + CCFSZ + 40]
1234	std	%i0, [%sp + CCFSZ + 48]	! tf.tf_in[0..1]
1235	std	%i2, [%sp + CCFSZ + 56]	! etc
1236	std	%i4, [%sp + CCFSZ + 64]
1237	std	%i6, [%sp + CCFSZ + 72]
1238
1239	/*
1240	 * Now call kgdb_trap_glue(); if it returns, call trap().
1241	 */
1242	mov	%o0, %l3		! gotta save trap type
1243	call	_kgdb_trap_glue		! kgdb_trap_glue(type, &trapframe)
1244	 add	%sp, CCFSZ, %o1		! (&trapframe)
1245
1246	/*
1247	 * Use slowtrap to call trap---but first erase our tracks
1248	 * (put the registers back the way they were).
1249	 */
1250	mov	%l3, %o0		! slowtrap will need trap type
1251	ld	[%sp + CCFSZ + 12], %l3
1252	wr	%l3, 0, %y
1253	ld	[%sp + CCFSZ + 20], %g1
1254	ldd	[%sp + CCFSZ + 24], %g2
1255	ldd	[%sp + CCFSZ + 32], %g4
1256	b	Lslowtrap_reenter
1257	 ldd	[%sp + CCFSZ + 40], %g6
1258
1259/*
1260 * Enter kernel breakpoint.  Write all the windows (not including the
1261 * current window) into the stack, so that backtrace works.  Copy the
1262 * supplied trap frame to the kgdb stack and switch stacks.
1263 *
1264 * kgdb_trap_glue(type, tf0)
1265 *	int type;
1266 *	struct trapframe *tf0;
1267 */
1268	.globl	_kgdb_trap_glue
1269_kgdb_trap_glue:
1270	save	%sp, -CCFSZ, %sp
1271
1272	call	_write_all_windows
1273	 mov	%sp, %l4		! %l4 = current %sp
1274
1275	/* copy trapframe to top of kgdb stack */
1276	set	_kgdb_stack + KGDB_STACK_SIZE - 80, %l0
1277					! %l0 = tfcopy -> end_of_kgdb_stack
1278	mov	80, %l1
12791:	ldd	[%i1], %l2
1280	inc	8, %i1
1281	deccc	8, %l1
1282	std	%l2, [%l0]
1283	bg	1b
1284	 inc	8, %l0
1285
1286#ifdef DEBUG
1287	/* save old red zone and then turn it off */
1288	sethi	%hi(_redzone), %l7
1289	ld	[%l7 + %lo(_redzone)], %l6
1290	st	%g0, [%l7 + %lo(_redzone)]
1291#endif
1292	/* switch to kgdb stack */
1293	add	%l0, -CCFSZ-80, %sp
1294
1295	/* if (kgdb_trap(type, tfcopy)) kgdb_rett(tfcopy); */
1296	mov	%i0, %o0
1297	call	_kgdb_trap
1298	add	%l0, -80, %o1
1299	tst	%o0
1300	bnz,a	kgdb_rett
1301	 add	%l0, -80, %g1
1302
1303	/*
1304	 * kgdb_trap() did not handle the trap at all so the stack is
1305	 * still intact.  A simple `restore' will put everything back,
1306	 * after we reset the stack pointer.
1307	 */
1308	mov	%l4, %sp
1309#ifdef DEBUG
1310	st	%l6, [%l7 + %lo(_redzone)]	! restore red zone
1311#endif
1312	ret
1313	restore
1314
1315/*
1316 * Return from kgdb trap.  This is sort of special.
1317 *
1318 * We know that kgdb_trap_glue wrote the window above it, so that we will
1319 * be able to (and are sure to have to) load it up.  We also know that we
1320 * came from kernel land and can assume that the %fp (%i6) we load here
1321 * is proper.  We must also be sure not to lower ipl (it is at splhigh())
1322 * until we have traps disabled, due to the SPARC taking traps at the
1323 * new ipl before noticing that PSR_ET has been turned off.  We are on
1324 * the kgdb stack, so this could be disastrous.
1325 *
1326 * Note that the trapframe argument in %g1 points into the current stack
1327 * frame (current window).  We abandon this window when we move %g1->tf_psr
1328 * into %psr, but we will not have loaded the new %sp yet, so again traps
1329 * must be disabled.
1330 */
1331kgdb_rett:
1332	rd	%psr, %g4		! turn off traps
1333	wr	%g4, PSR_ET, %psr
1334	/* use the three-instruction delay to do something useful */
1335	ld	[%g1], %g2		! pick up new %psr
1336	ld	[%g1 + 12], %g3		! set %y
1337	wr	%g3, 0, %y
1338#ifdef DEBUG
1339	st	%l6, [%l7 + %lo(_redzone)] ! and restore red zone
1340#endif
1341	wr	%g0, 0, %wim		! enable window changes
1342	nop; nop; nop
1343	/* now safe to set the new psr (changes CWP, leaves traps disabled) */
1344	wr	%g2, 0, %psr		! set rett psr (including cond codes)
1345	/* 3 instruction delay before we can use the new window */
1346/*1*/	ldd	[%g1 + 24], %g2		! set new %g2, %g3
1347/*2*/	ldd	[%g1 + 32], %g4		! set new %g4, %g5
1348/*3*/	ldd	[%g1 + 40], %g6		! set new %g6, %g7
1349
1350	/* now we can use the new window */
1351	mov	%g1, %l4
1352	ld	[%l4 + 4], %l1		! get new pc
1353	ld	[%l4 + 8], %l2		! get new npc
1354	ld	[%l4 + 20], %g1		! set new %g1
1355
1356	/* set up returnee's out registers, including its %sp */
1357	ldd	[%l4 + 48], %i0
1358	ldd	[%l4 + 56], %i2
1359	ldd	[%l4 + 64], %i4
1360	ldd	[%l4 + 72], %i6
1361
1362	/* load returnee's window, making the window above it be invalid */
1363	restore
1364	restore	%g0, 1, %l1		! move to inval window and set %l1 = 1
1365	rd	%psr, %l0
1366	sll	%l1, %l0, %l1
1367	wr	%l1, 0, %wim		! %wim = 1 << (%psr & 31)
1368	sethi	%hi(_cpcb), %l1
1369	ld	[%l1 + %lo(_cpcb)], %l1
1370	and	%l0, 31, %l0		! CWP = %psr & 31;
1371	st	%l0, [%l1 + PCB_WIM]	! cpcb->pcb_wim = CWP;
1372	save	%g0, %g0, %g0		! back to window to reload
1373	LOADWIN(%sp)
1374	save	%g0, %g0, %g0		! back to trap window
1375	/* note, we have not altered condition codes; safe to just rett */
1376	RETT
1377#endif
1378
1379/*
1380 * syscall() builds a trap frame and calls syscall().
1381 * sun_syscall is same but delivers sun system call number
1382 * XXX	should not have to save&reload ALL the registers just for
1383 *	ptrace...
1384 */
1385#ifdef COMPAT_SUNOS
1386sun_syscall:
1387	TRAP_SETUP(-CCFSZ-80)
1388	b	sys_merge
1389	 mov	1, %o3			! third arg to syscall: sun compat
1390syscall:
1391	TRAP_SETUP(-CCFSZ-80)
1392	clr	%o3			! third arg to syscall: native bsd
1393sys_merge:
1394#else
1395syscall:
1396	TRAP_SETUP(-CCFSZ-80)
1397#endif
1398	wr	%l0, PSR_ET, %psr
1399	std	%l0, [%sp + CCFSZ + 0]	! tf_psr, tf_pc
1400	rd	%y, %l3
1401	std	%l2, [%sp + CCFSZ + 8]	! tf_npc, tf_y
1402	st	%g1, [%sp + CCFSZ + 20]	! tf_g[1]
1403	std	%g2, [%sp + CCFSZ + 24]	! tf_g[2], tf_g[3]
1404	std	%g4, [%sp + CCFSZ + 32]	! etc
1405	std	%g6, [%sp + CCFSZ + 40]
1406	mov	%g1, %o0		! (code)
1407	std	%i0, [%sp + CCFSZ + 48]
1408	add	%sp, CCFSZ, %o1		! (&tf)
1409	std	%i2, [%sp + CCFSZ + 56]
1410	mov	%l1, %o2		! (pc)
1411	std	%i4, [%sp + CCFSZ + 64]
1412	call	_syscall		! syscall(code, &tf, pc, suncompat)
1413	 std	%i6, [%sp + CCFSZ + 72]
1414	! now load em all up again, sigh
1415	ldd	[%sp + CCFSZ + 0], %l0	! new %psr, new pc
1416	ldd	[%sp + CCFSZ + 8], %l2	! new npc, new %y
1417	wr	%l3, 0, %y
1418	ld	[%sp + CCFSZ + 20], %g1
1419	ldd	[%sp + CCFSZ + 24], %g2
1420	ldd	[%sp + CCFSZ + 32], %g4
1421	ldd	[%sp + CCFSZ + 40], %g6
1422	ldd	[%sp + CCFSZ + 48], %i0
1423	ldd	[%sp + CCFSZ + 56], %i2
1424	ldd	[%sp + CCFSZ + 64], %i4
1425	ldd	[%sp + CCFSZ + 72], %i6
1426	b	return_from_trap
1427	 wr	%l0, 0, %psr
1428
1429/*
1430 * Interrupts.  Software interrupts must be cleared from the software
1431 * interrupt enable register.  Rather than calling ienab_bic for each,
1432 * we do them in-line before enabling traps.
1433 *
1434 * After preliminary setup work, the interrupt is passed to each
1435 * registered handler in turn.  These are expected to return nonzero if
1436 * they took care of the interrupt.  If a handler claims the interrupt,
1437 * we exit (hardware interrupts are latched in the requestor so we'll
1438 * just take another interrupt in the unlikely event of simultaneous
1439 * interrupts from two different devices at the same level).  If we go
1440 * through all the registered handlers and no one claims it, we report a
1441 * stray interrupt.  This is more or less done as:
1442 *
1443 *	for (ih = intrhand[intlev]; ih; ih = ih->ih_next)
1444 *		if ((*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : &frame))
1445 *			return;
1446 *	strayintr(&frame);
1447 *
1448 * Software interrupts are almost the same with three exceptions:
1449 * (1) we clear the interrupt from the software interrupt enable
1450 *     register before calling any handler (we have to clear it first
1451 *     to avoid an interrupt-losing race),
1452 * (2) we always call all the registered handlers (there is no way
1453 *     to tell if the single bit in the software interrupt register
1454 *     represents one or many requests)
1455 * (3) we never announce a stray interrupt (because of (1), another
1456 *     interrupt request can come in while we're in the handler.  If
1457 *     the handler deal with everything for both the original & the
1458 *     new request, we'll erroneously report a stray interrupt when
1459 *     we take the software interrupt for the new request.
1460 *
1461 * Inputs:
1462 *	%l0 = %psr
1463 *	%l1 = return pc
1464 *	%l2 = return npc
1465 *	%l3 = interrupt level
1466 *	(software interrupt only) %l4 = bits to clear in interrupt register
1467 *
1468 * Internal:
1469 *	%l4, %l5: local variables
1470 *	%l6 = %y
1471 *	%l7 = %g1
1472 *	%g2..%g7 go to stack
1473 *
1474 * An interrupt frame is built in the space for a full trapframe;
1475 * this contains the psr, pc, npc, and interrupt level.
1476 */
1477	.comm	_intrhand, 15 * 8	! intrhand[0..14]; 0 => error
1478softintr:
1479	sethi	%hi(IE_reg_addr), %l6
1480	ldub	[%l6 + %lo(IE_reg_addr)], %l5
1481	andn	%l5, %l4, %l5
1482	stb	%l5, [%l6 + %lo(IE_reg_addr)]
1483	INTR_SETUP(-CCFSZ-80)
1484	std	%g2, [%sp + CCFSZ + 24]	! save registers
1485	INCR(_cnt+V_INTR)		! cnt.v_intr++; (clobbers %o0,%o1)
1486	mov	%g1, %l7
1487	rd	%y, %l6
1488	std	%g4, [%sp + CCFSZ + 32]
1489	andn	%l0, PSR_PIL, %l4	! %l4 = psr & ~PSR_PIL |
1490	sll	%l3, 8, %l5		!	intlev << IPLSHIFT
1491	std	%g6, [%sp + CCFSZ + 40]
1492	or	%l5, %l4, %l4		!			;
1493	wr	%l4, 0, %psr		! the manual claims this
1494	wr	%l4, PSR_ET, %psr	! song and dance is necessary
1495	std	%l0, [%sp + CCFSZ + 0]	! set up intrframe/clockframe
1496	sll	%l3, 2, %l5
1497	set	_intrcnt, %l4		! intrcnt[intlev]++;
1498	ld	[%l4 + %l5], %o0
1499	std	%l2, [%sp + CCFSZ + 8]
1500	inc	%o0
1501	st	%o0, [%l4 + %l5]
1502	set	_intrhand, %l4		! %l4 = intrhand[intlev];
1503	ld	[%l4 + %l5], %l4
1504	b	3f
1505	 st	%fp, [%sp + CCFSZ + 16]
1506
15071:	ld	[%l4], %o1
1508	ld	[%l4 + 4], %o0
1509	tst	%o0
1510	bz,a	2f
1511	 add	%sp, CCFSZ, %o0
15122:	jmpl	%o1, %o7		!	(void)(*ih->ih_fun)(...)
1513	 ld	[%l4 + 8], %l4		!	and ih = ih->ih_next
15143:	tst	%l4			! while ih != NULL
1515	bnz	1b
1516	 nop
1517	mov	%l7, %g1
1518	wr	%l6, 0, %y
1519	ldd	[%sp + CCFSZ + 24], %g2
1520	ldd	[%sp + CCFSZ + 32], %g4
1521	ldd	[%sp + CCFSZ + 40], %g6
1522	b	return_from_trap
1523	 wr	%l0, 0, %psr
1524
1525	/*
1526	 * _sparc_interrupt is exported for paranoia checking (see intr.c).
1527	 */
1528	.globl	_sparc_interrupt
1529_sparc_interrupt:
1530	INTR_SETUP(-CCFSZ-80)
1531	std	%g2, [%sp + CCFSZ + 24]	! save registers
1532	INCR(_cnt+V_INTR)		! cnt.v_intr++; (clobbers %o0,%o1)
1533	mov	%g1, %l7
1534	rd	%y, %l6
1535	std	%g4, [%sp + CCFSZ + 32]
1536	andn	%l0, PSR_PIL, %l4	! %l4 = psr & ~PSR_PIL |
1537	sll	%l3, 8, %l5		!	intlev << IPLSHIFT
1538	std	%g6, [%sp + CCFSZ + 40]
1539	or	%l5, %l4, %l4		!			;
1540	wr	%l4, 0, %psr		! the manual claims this
1541	wr	%l4, PSR_ET, %psr	! song and dance is necessary
1542	std	%l0, [%sp + CCFSZ + 0]	! set up intrframe/clockframe
1543	sll	%l3, 2, %l5
1544	set	_intrcnt, %l4		! intrcnt[intlev]++;
1545	ld	[%l4 + %l5], %o0
1546	std	%l2, [%sp + CCFSZ + 8]	! set up intrframe/clockframe
1547	inc	%o0
1548	st	%o0, [%l4 + %l5]
1549	set	_intrhand, %l4		! %l4 = intrhand[intlev];
1550	ld	[%l4 + %l5], %l4
1551	b	3f
1552	 st	%fp, [%sp + CCFSZ + 16]
1553
15541:	ld	[%l4], %o1
1555	ld	[%l4 + 4], %o0
1556	tst	%o0
1557	bz,a	2f
1558	 add	%sp, CCFSZ, %o0
15592:	jmpl	%o1, %o7		!	handled = (*ih->ih_fun)(...)
1560	 ld	[%l4 + 8], %l4		!	and ih = ih->ih_next
1561	tst	%o0
1562	bnz	4f			! if (handled) break
1563	 nop
15643:	tst	%l4
1565	bnz	1b			! while (ih)
1566	 nop
1567	call	_strayintr		!	strayintr(&intrframe)
1568	 add	%sp, CCFSZ, %o0
1569	/* all done: restore registers and go return */
15704:	mov	%l7, %g1
1571	wr	%l6, 0, %y
1572	ldd	[%sp + CCFSZ + 24], %g2
1573	ldd	[%sp + CCFSZ + 32], %g4
1574	ldd	[%sp + CCFSZ + 40], %g6
1575	b	return_from_trap
1576	 wr	%l0, 0, %psr
1577
1578#ifdef notyet
1579/*
1580 * Level 12 (ZS serial) interrupt.  Handle it quickly, schedule a
1581 * software interrupt, and get out.  Do the software interrupt directly
1582 * if we would just take it on the way out.
1583 *
1584 * Input:
1585 *	%l0 = %psr
1586 *	%l1 = return pc
1587 *	%l2 = return npc
1588 * Internal:
1589 *	%l3 = zs device
1590 *	%l4, %l5 = temporary
1591 *	%l6 = rr3 (or temporary data) + 0x100 => need soft int
1592 *	%l7 = zs soft status
1593 */
1594zshard:
1595#endif /* notyet */
1596
1597/*
1598 * Level 15 interrupt.  An async memory error has occurred;
1599 * take care of it (typically by panicking, but hey...).
1600 *	%l0 = %psr
1601 *	%l1 = return pc
1602 *	%l2 = return npc
1603 *	%l3 = 15 * 4 (why? just because!)
1604 *
1605 * Internal:
1606 *	%l4 = %y
1607 *	%l5 = %g1
1608 *	%l6 = %g6
1609 *	%l7 = %g7
1610 *  g2, g3, g4, g5 go to stack
1611 *
1612 * This code is almost the same as that in mem_access_fault,
1613 * except that we already know the problem is not a `normal' fault,
1614 * and that we must be extra-careful with interrupt enables.
1615 */
1616nmi:
1617	INTR_SETUP(-CCFSZ-80)
1618	INCR(_cnt+V_INTR)		! cnt.v_intr++; (clobbers %o0,%o1)
1619	/*
1620	 * Level 15 interrupts are nonmaskable, so with traps off,
1621	 * disable all interrupts to prevent recursion.
1622	 */
1623	sethi	%hi(IE_reg_addr), %o0
1624	ldub	[%o0 + %lo(IE_reg_addr)], %o1
1625	andn	%o1, IE_ALLIE, %o1
1626	stb	%o1, [%o0 + %lo(IE_reg_addr)]
1627	wr	%l0, PSR_ET, %psr	! okay, turn traps on again
1628
1629	std	%g2, [%sp + CCFSZ + 0]	! save g2, g3
1630	rd	%y, %l4			! save y
1631
1632	! must read the sync error register too.
1633	set	AC_SYNC_ERR, %o0
1634	lda	[%o0] ASI_CONTROL, %o1	! sync err reg
1635	inc	4, %o0
1636	lda	[%o0] ASI_CONTROL, %o2	! sync virt addr
1637	std	%g4, [%sp + CCFSZ + 8]	! save g4,g5
1638	mov	%g1, %l5		! save g1,g6,g7
1639	mov	%g6, %l6
1640	mov	%g7, %l7
1641	inc	4, %o0
1642	lda	[%o0] ASI_CONTROL, %o3	! async err reg
1643	inc	4, %o0
1644	lda	[%o0] ASI_CONTROL, %o4	! async virt addr
1645
1646	! and call C code
1647	call	_memerr			! memerr(0, ser, sva, aer, ava)
1648	clr	%o0
1649
1650	mov	%l5, %g1		! restore g1 through g7
1651	ldd	[%sp + CCFSZ + 0], %g2
1652	ldd	[%sp + CCFSZ + 8], %g4
1653	wr	%l0, 0, %psr		! re-disable traps
1654	mov	%l6, %g6
1655	mov	%l7, %g7
1656
1657	! set IE_ALLIE again (safe, we disabled traps again above)
1658	sethi	%hi(IE_reg_addr), %o0
1659	ldub	[%o0 + %lo(IE_reg_addr)], %o1
1660	or	%o1, IE_ALLIE, %o1
1661	stb	%o1, [%o0 + %lo(IE_reg_addr)]
1662	b	return_from_trap
1663	 wr	%l4, 0, %y		! restore y
1664
1665
1666/*
1667 * Window overflow trap handler.
1668 *	%l0 = %psr
1669 *	%l1 = return pc
1670 *	%l2 = return npc
1671 */
1672window_of:
1673#ifdef TRIVIAL_WINDOW_OVERFLOW_HANDLER
1674	/* a trivial version that assumes %sp is ok */
1675	/* (for testing only!) */
1676	save	%g0, %g0, %g0
1677	std	%l0, [%sp + (0*8)]
1678	rd	%psr, %l0
1679	mov	1, %l1
1680	sll	%l1, %l0, %l0
1681	wr	%l0, 0, %wim
1682	std	%l2, [%sp + (1*8)]
1683	std	%l4, [%sp + (2*8)]
1684	std	%l6, [%sp + (3*8)]
1685	std	%i0, [%sp + (4*8)]
1686	std	%i2, [%sp + (5*8)]
1687	std	%i4, [%sp + (6*8)]
1688	std	%i6, [%sp + (7*8)]
1689	restore
1690	RETT
1691#else
1692	/*
1693	 * This is similar to TRAP_SETUP, but we do not want to spend
1694	 * a lot of time, so we have separate paths for kernel and user.
1695	 * We also know for sure that the window has overflowed.
1696	 */
1697	btst	PSR_PS, %l0
1698	bz	winof_user
1699	 sethi	%hi(clean_trap_window), %l7
1700
1701	/*
1702	 * Overflow from kernel mode.  Call clean_trap_window to
1703	 * do the dirty work, then just return, since we know prev
1704	 * window is valid.  clean_trap_windows might dump all *user*
1705	 * windows into the pcb, but we do not care: there is at
1706	 * least one kernel window (a trap or interrupt frame!)
1707	 * above us.
1708	 */
1709	jmpl	%l7 + %lo(clean_trap_window), %l4
1710	 mov	%g7, %l7		! for clean_trap_window
1711
1712	wr	%l0, 0, %psr		! put back the @%*! cond. codes
1713	nop				! (let them settle in)
1714	RETT
1715
1716winof_user:
1717	/*
1718	 * Overflow from user mode.
1719	 * If clean_trap_window dumps the registers into the pcb,
1720	 * rft_user will need to call trap(), so we need space for
1721	 * a trap frame.  We also have to compute pcb_nw.
1722	 *
1723	 * SHOULD EXPAND IN LINE TO AVOID BUILDING TRAP FRAME ON
1724	 * `EASY' SAVES
1725	 */
1726	sethi	%hi(_cpcb), %l6
1727	ld	[%l6 + %lo(_cpcb)], %l6
1728	ld	[%l6 + PCB_WIM], %l5
1729	and	%l0, 31, %l3
1730	sub	%l3, %l5, %l5 		/* l5 = CWP - pcb_wim */
1731	set	uwtab, %l4
1732	ldub	[%l4 + %l5], %l5	/* l5 = uwtab[l5] */
1733	st	%l5, [%l6 + PCB_UW]
1734	jmpl	%l7 + %lo(clean_trap_window), %l4
1735	 mov	%g7, %l7		! for clean_trap_window
1736	sethi	%hi(_cpcb), %l6
1737	ld	[%l6 + %lo(_cpcb)], %l6
1738	set	UPAGES*NBPG-CCFSZ-80, %l5
1739	add	%l6, %l5, %sp		/* over to kernel stack */
1740	CHECK_SP_REDZONE(%l6, %l5)
1741
1742	/*
1743	 * Copy return_from_trap far enough to allow us
1744	 * to jump directly to rft_user_or_recover_pcb_windows
1745	 * (since we know that is where we are headed).
1746	 */
1747!	and	%l0, 31, %l3		! still set (clean_trap_window
1748					! leaves this register alone)
1749	set	wmask, %l6
1750	ldub	[%l6 + %l3], %l5	! %l5 = 1 << ((CWP + 1) % nwindows)
1751	b	rft_user_or_recover_pcb_windows
1752	 rd	%wim, %l4		! (read %wim first)
1753#endif /* end `real' version of window overflow trap handler */
1754
1755/*
1756 * Window underflow trap handler.
1757 *	%l0 = %psr
1758 *	%l1 = return pc
1759 *	%l2 = return npc
1760 *
1761 * A picture:
1762 *
1763 *	  T R I X
1764 *	0 0 0 1 0 0 0	(%wim)
1765 * [bit numbers increase towards the right;
1766 * `restore' moves right & `save' moves left]
1767 *
1768 * T is the current (Trap) window, R is the window that attempted
1769 * a `Restore' instruction, I is the Invalid window, and X is the
1770 * window we want to make invalid before we return.
1771 *
1772 * Since window R is valid, we cannot use rft_user to restore stuff
1773 * for us.  We have to duplicate its logic.  YUCK.
1774 *
1775 * Incidentally, TRIX are for kids.  Silly rabbit!
1776 */
1777window_uf:
1778#ifdef TRIVIAL_WINDOW_UNDERFLOW_HANDLER
1779	wr	%g0, 0, %wim		! allow us to enter I
1780	restore				! to R
1781	nop
1782	nop
1783	restore				! to I
1784	restore	%g0, 1, %l1		! to X
1785	rd	%psr, %l0
1786	sll	%l1, %l0, %l0
1787	wr	%l0, 0, %wim
1788	save	%g0, %g0, %g0		! back to I
1789	LOADWIN(%sp)
1790	save	%g0, %g0, %g0		! back to R
1791	save	%g0, %g0, %g0		! back to T
1792	RETT
1793#else
1794	wr	%g0, 0, %wim		! allow us to enter I
1795	btst	PSR_PS, %l0
1796	restore				! enter window R
1797	bz	winuf_user
1798	 restore			! enter window I
1799
1800	/*
1801	 * Underflow from kernel mode.  Just recover the
1802	 * registers and go (except that we have to update
1803	 * the blasted user pcb fields).
1804	 */
1805	restore	%g0, 1, %l1		! enter window X, then set %l1 to 1
1806	rd	%psr, %l0		! cwp = %psr & 31;
1807	and	%l0, 31, %l0
1808	sll	%l1, %l0, %l1		! wim = 1 << cwp;
1809	wr	%l1, 0, %wim		! setwim(wim);
1810	sethi	%hi(_cpcb), %l1
1811	ld	[%l1 + %lo(_cpcb)], %l1
1812	st	%l0, [%l1 + PCB_WIM]	! cpcb->pcb_wim = cwp;
1813	save	%g0, %g0, %g0		! back to window I
1814	LOADWIN(%sp)
1815	save	%g0, %g0, %g0		! back to R
1816	save	%g0, %g0, %g0		! and then to T
1817	wr	%l0, 0, %psr		! fix those cond codes....
1818	nop				! (let them settle in)
1819	RETT
1820
1821winuf_user:
1822	/*
1823	 * Underflow from user mode.
1824	 *
1825	 * We cannot use rft_user (as noted above) because
1826	 * we must re-execute the `restore' instruction.
1827	 * Since it could be, e.g., `restore %l0,0,%l0',
1828	 * it is not okay to touch R's registers either.
1829	 *
1830	 * We are now in window I.
1831	 */
1832	btst	7, %sp			! if unaligned, it is invalid
1833	bne	winuf_invalid
1834	 EMPTY
1835
1836	PTE_OF_ADDR(%sp, %l7, winuf_invalid)
1837	CMP_PTE_USER_READ(%l7)		! if first page not readable,
1838	bne	winuf_invalid		! it is invalid
1839	 EMPTY
1840	SLT_IF_1PAGE_RW(%sp, %l7)	! first page is readable
1841	bl,a	winuf_ok		! if only one page, enter window X
1842	 restore %g0, 1, %l1		! and goto ok, & set %l1 to 1
1843	add	%sp, 7*8, %l5
1844	PTE_OF_ADDR(%l5, %l7, winuf_invalid)
1845	CMP_PTE_USER_READ(%l7)		! check second page too
1846	be,a	winuf_ok		! enter window X and goto ok
1847	 restore %g0, 1, %l1		! (and then set %l1 to 1)
1848
1849winuf_invalid:
1850	/*
1851	 * We were unable to restore the window because %sp
1852	 * is invalid or paged out.  Return to the trap window
1853	 * and call trap(T_WINUF).  This will save R to the user
1854	 * stack, then load both R and I into the pcb rw[] area,
1855	 * and return with pcb_nsaved set to -1 for success, 0 for
1856	 * failure.  `Failure' indicates that someone goofed with the
1857	 * trap registers (e.g., signals), so that we need to return
1858	 * from the trap as from a syscall (probably to a signal handler)
1859	 * and let it retry the restore instruction later.  Note that
1860	 * window R will have been pushed out to user space, and thus
1861	 * be the invalid window, by the time we get back here.  (We
1862	 * continue to label it R anyway.)  We must also set %wim again,
1863	 * and set pcb_uw to 1, before enabling traps.  (Window R is the
1864	 * only window, and it is a user window).
1865	 */
1866	save	%g0, %g0, %g0		! back to R
1867#if 0		/* this gives `as' mild heartburn */
1868	save	%g0, 1, %l4		! back to T, then %l4 = 1
1869#else
1870	save	%g0, %g0, %g0		! back to T
1871	mov	1, %l4			! and set %l4 = 1
1872#endif
1873	sethi	%hi(_cpcb), %l6
1874	ld	[%l6 + %lo(_cpcb)], %l6
1875	st	%l4, [%l6 + PCB_UW]	! pcb_uw = 1
1876	ld	[%l6 + PCB_WIM], %l5	! get log2(%wim)
1877	sll	%l4, %l5, %l4		! %l4 = old %wim
1878	wr	%l4, 0, %wim		! window I is now invalid again
1879	set	UPAGES*NBPG-CCFSZ-80, %l5
1880	add	%l6, %l5, %sp		! get onto kernel stack
1881	CHECK_SP_REDZONE(%l6, %l5)
1882
1883	/*
1884	 * Okay, call trap(T_WINUF, psr, pc, &tf).
1885	 * See `slowtrap' above for operation.
1886	 */
1887	wr	%l0, PSR_ET, %psr
1888	std	%l0, [%sp + CCFSZ + 0]	! tf.tf_psr, tf.tf_pc
1889	rd	%y, %l3
1890	std	%l2, [%sp + CCFSZ + 8]	! tf.tf_npc, tf.tf_y
1891	mov	T_WINUF, %o0
1892	st	%g1, [%sp + CCFSZ + 20]	! tf.tf_global[1]
1893	mov	%l0, %o1
1894	std	%g2, [%sp + CCFSZ + 24]	! etc
1895	mov	%l1, %o2
1896	std	%g4, [%sp + CCFSZ + 32]
1897	add	%sp, CCFSZ, %o3
1898	std	%g6, [%sp + CCFSZ + 40]
1899	std	%i0, [%sp + CCFSZ + 48]	! tf.tf_out[0], etc
1900	std	%i2, [%sp + CCFSZ + 56]
1901	std	%i4, [%sp + CCFSZ + 64]
1902	call	_trap			! trap(T_WINUF, pc, psr, &tf)
1903	 std	%i6, [%sp + CCFSZ + 72]	! tf.tf_out[6]
1904
1905	ldd	[%sp + CCFSZ + 0], %l0	! new psr, pc
1906	ldd	[%sp + CCFSZ + 8], %l2	! new npc, %y
1907	wr	%l3, 0, %y
1908	ld	[%sp + CCFSZ + 20], %g1
1909	ldd	[%sp + CCFSZ + 24], %g2
1910	ldd	[%sp + CCFSZ + 32], %g4
1911	ldd	[%sp + CCFSZ + 40], %g6
1912	ldd	[%sp + CCFSZ + 48], %i0	! %o0 for window R, etc
1913	ldd	[%sp + CCFSZ + 56], %i2
1914	ldd	[%sp + CCFSZ + 64], %i4
1915	wr	%l0, 0, %psr		! disable traps: test must be atomic
1916	ldd	[%sp + CCFSZ + 72], %i6
1917	sethi	%hi(_cpcb), %l6
1918	ld	[%l6 + %lo(_cpcb)], %l6
1919	ld	[%l6 + PCB_NSAVED], %l7	! if nsaved is -1, we have our regs
1920	tst	%l7
1921	bl,a	1f			! got them
1922	 wr	%g0, 0, %wim		! allow us to enter windows R, I
1923	b,a	return_from_trap
1924
1925	/*
1926	 * Got 'em.  Load 'em up.
1927	 */
19281:
1929	mov	%g6, %l3		! save %g6; set %g6 = cpcb
1930	mov	%l6, %g6
1931	st	%g0, [%g6 + PCB_NSAVED]	! and clear magic flag
1932	restore				! from T to R
1933	restore				! from R to I
1934	restore	%g0, 1, %l1		! from I to X, then %l1 = 1
1935	rd	%psr, %l0		! cwp = %psr;
1936	sll	%l1, %l0, %l1
1937	wr	%l1, 0, %wim		! make window X invalid
1938	and	%l0, 31, %l0
1939	st	%l0, [%g6 + PCB_WIM]	! cpcb->pcb_wim = cwp;
1940	nop				! unnecessary? old wim was 0...
1941	save	%g0, %g0, %g0		! back to I
1942	LOADWIN(%g6 + PCB_RW + 64)	! load from rw[1]
1943	save	%g0, %g0, %g0		! back to R
1944	LOADWIN(%g6 + PCB_RW)		! load from rw[0]
1945	save	%g0, %g0, %g0		! back to T
1946	wr	%l0, 0, %psr		! restore condition codes
1947	mov	%l3, %g6		! fix %g6
1948	RETT
1949
1950	/*
1951	 * Restoring from user stack, but everything has checked out
1952	 * as good.  We are now in window X, and %l1 = 1.  Window R
1953	 * is still valid and holds user values.
1954	 */
1955winuf_ok:
1956	rd	%psr, %l0
1957	sll	%l1, %l0, %l1
1958	wr	%l1, 0, %wim		! make this one invalid
1959	sethi	%hi(_cpcb), %l2
1960	ld	[%l2 + %lo(_cpcb)], %l2
1961	and	%l0, 31, %l0
1962	st	%l0, [%l2 + PCB_WIM]	! cpcb->pcb_wim = cwp;
1963	save	%g0, %g0, %g0		! back to I
1964	LOADWIN(%sp)
1965	save	%g0, %g0, %g0		! back to R
1966	save	%g0, %g0, %g0		! back to T
1967	wr	%l0, 0, %psr		! restore condition codes
1968	nop				! it takes three to tangle
1969	RETT
1970#endif /* end `real' version of window underflow trap handler */
1971
1972/*
1973 * Various return-from-trap routines (see return_from_trap).
1974 */
1975
1976/*
1977 * Return from trap, to kernel.
1978 *	%l0 = %psr
1979 *	%l1 = return pc
1980 *	%l2 = return npc
1981 *	%l4 = %wim
1982 *	%l5 = bit for previous window
1983 */
1984rft_kernel:
1985	btst	%l5, %l4		! if (wim & l5)
1986	bnz	1f			!	goto reload;
1987	 wr	%l0, 0, %psr		! but first put !@#*% cond codes back
1988
1989	/* previous window is valid; just rett */
1990	nop				! wait for cond codes to settle in
1991	RETT
1992
1993	/*
1994	 * Previous window is invalid.
1995	 * Update %wim and then reload l0..i7 from frame.
1996	 *
1997	 *	  T I X
1998	 *	0 0 1 0 0   (%wim)
1999	 * [see picture in window_uf handler]
2000	 *
2001	 * T is the current (Trap) window, I is the Invalid window,
2002	 * and X is the window we want to make invalid.  Window X
2003	 * currently has no useful values.
2004	 */
20051:
2006	wr	%g0, 0, %wim		! allow us to enter window I
2007	nop; nop; nop			! (it takes a while)
2008	restore				! enter window I
2009	restore	%g0, 1, %l1		! enter window X, then %l1 = 1
2010	rd	%psr, %l0		! CWP = %psr & 31;
2011	and	%l0, 31, %l0
2012	sll	%l1, %l0, %l1		! wim = 1 << CWP;
2013	wr	%l1, 0, %wim		! setwim(wim);
2014	sethi	%hi(_cpcb), %l1
2015	ld	[%l1 + %lo(_cpcb)], %l1
2016	st	%l0, [%l1 + PCB_WIM]	! cpcb->pcb_wim = l0 & 31;
2017	save	%g0, %g0, %g0		! back to window I
2018	LOADWIN(%sp)
2019	save	%g0, %g0, %g0		! back to window T
2020	/*
2021	 * Note that the condition codes are still set from
2022	 * the code at rft_kernel; we can simply return.
2023	 */
2024	RETT
2025
2026/*
2027 * Return from trap, to user.  Checks for scheduling trap (`ast') first;
2028 * will re-enter trap() if set.  Note that we may have to switch from
2029 * the interrupt stack to the kernel stack in this case.
2030 *	%l0 = %psr
2031 *	%l1 = return pc
2032 *	%l2 = return npc
2033 *	%l4 = %wim
2034 *	%l5 = bit for previous window
2035 *	%l6 = cpcb
2036 * If returning to a valid window, just set psr and return.
2037 */
2038rft_user:
2039!	sethi	%hi(_want_ast), %l7	! (done below)
2040	ld	[%l7 + %lo(_want_ast)], %l7
2041	tst	%l7			! want AST trap?
2042	bne,a	softtrap		! yes, re-enter trap with type T_AST
2043	 mov	T_AST, %o0
2044
2045	btst	%l5, %l4		! if (wim & l5)
2046	bnz	1f			!	goto reload;
2047	 wr	%l0, 0, %psr		! restore cond codes
2048	nop				! (three instruction delay)
2049	RETT
2050
2051	/*
2052	 * Previous window is invalid.
2053	 * Before we try to load it, we must verify its stack pointer.
2054	 * This is much like the underflow handler, but a bit easier
2055	 * since we can use our own local registers.
2056	 */
20571:
2058	btst	7, %fp			! if unaligned, address is invalid
2059	bne	rft_invalid
2060	 EMPTY
2061
2062	PTE_OF_ADDR(%fp, %l7, rft_invalid)
2063	CMP_PTE_USER_READ(%l7)		! try first page
2064	bne	rft_invalid		! no good
2065	 EMPTY
2066	SLT_IF_1PAGE_RW(%fp, %l7)
2067	bl,a	rft_user_ok		! only 1 page: ok
2068	 wr	%g0, 0, %wim
2069	add	%fp, 7*8, %l5
2070	PTE_OF_ADDR(%l5, %l7, rft_invalid)
2071	CMP_PTE_USER_READ(%l7)		! check 2nd page too
2072	be,a	rft_user_ok
2073	 wr	%g0, 0, %wim
2074
2075	/*
2076	 * The window we wanted to pull could not be pulled.  Instead,
2077	 * re-enter trap with type T_RWRET.  This will pull the window
2078	 * into cpcb->pcb_rw[0] and set cpcb->pcb_nsaved to -1, which we
2079	 * will detect when we try to return again.
2080	 */
2081rft_invalid:
2082	b	softtrap
2083	 mov	T_RWRET, %o0
2084
2085	/*
2086	 * The window we want to pull can be pulled directly.
2087	 */
2088rft_user_ok:
2089!	wr	%g0, 0, %wim		! allow us to get into it
2090	wr	%l0, 0, %psr		! fix up the cond codes now
2091	nop; nop; nop
2092	restore				! enter window I
2093	restore	%g0, 1, %l1		! enter window X, then %l1 = 1
2094	rd	%psr, %l0		! l0 = (junk << 5) + CWP;
2095	sll	%l1, %l0, %l1		! %wim = 1 << CWP;
2096	wr	%l1, 0, %wim
2097	sethi	%hi(_cpcb), %l1
2098	ld	[%l1 + %lo(_cpcb)], %l1
2099	and	%l0, 31, %l0
2100	st	%l0, [%l1 + PCB_WIM]	! cpcb->pcb_wim = l0 & 31;
2101	save	%g0, %g0, %g0		! back to window I
2102	LOADWIN(%sp)			! suck hard
2103	save	%g0, %g0, %g0		! back to window T
2104	RETT
2105
2106/*
2107 * Return from trap.  Entered after a
2108 *	wr	%l0, 0, %psr
2109 * which disables traps so that we can rett; registers are:
2110 *
2111 *	%l0 = %psr
2112 *	%l1 = return pc
2113 *	%l2 = return npc
2114 *
2115 * (%l3..%l7 anything).
2116 *
2117 * If we are returning to user code, we must:
2118 *  1.  Check for register windows in the pcb that belong on the stack.
2119 *	If there are any, reenter trap with type T_WINOF.
2120 *  2.  Make sure the register windows will not underflow.  This is
2121 *	much easier in kernel mode....
2122 */
2123return_from_trap:
2124!	wr	%l0, 0, %psr		! disable traps so we can rett
2125! (someone else did this already)
2126	and	%l0, 31, %l5
2127	set	wmask, %l6
2128	ldub	[%l6 + %l5], %l5	! %l5 = 1 << ((CWP + 1) % nwindows)
2129	btst	PSR_PS, %l0		! returning to userland?
2130	bnz	rft_kernel		! no, go return to kernel
2131	 rd	%wim, %l4		! (read %wim in any case)
2132
2133rft_user_or_recover_pcb_windows:
2134	/*
2135	 * (entered with %l4=%wim, %l5=wmask[cwp]; %l0..%l2 as usual)
2136	 *
2137	 * check cpcb->pcb_nsaved:
2138	 * if 0, do a `normal' return to user (see rft_user);
2139	 * if > 0, cpcb->pcb_rw[] holds registers to be copied to stack;
2140	 * if -1, cpcb->pcb_rw[0] holds user registers for rett window
2141	 * from an earlier T_RWRET pseudo-trap.
2142	 */
2143	sethi	%hi(_cpcb), %l6
2144	ld	[%l6 + %lo(_cpcb)], %l6
2145	ld	[%l6 + PCB_NSAVED], %l7
2146	tst	%l7
2147	bz,a	rft_user
2148	 sethi	%hi(_want_ast), %l7	! first instr of rft_user
2149
2150	bg,a	softtrap		! if (pcb_nsaved > 0)
2151	 mov	T_WINOF, %o0		!	trap(T_WINOF);
2152
2153	/*
2154	 * To get here, we must have tried to return from a previous
2155	 * trap and discovered that it would cause a window underflow.
2156	 * We then must have tried to pull the registers out of the
2157	 * user stack (from the address in %fp==%i6) and discovered
2158	 * that it was either unaligned or not loaded in memory, and
2159	 * therefore we ran a trap(T_RWRET), which loaded one set of
2160	 * registers into cpcb->pcb_pcb_rw[0] (if it had killed the
2161	 * process due to a bad stack, we would not be here).
2162	 *
2163	 * We want to load pcb_rw[0] into the previous window, which
2164	 * we know is currently invalid.  In other words, we want
2165	 * %wim to be 1 << ((cwp + 2) % nwindows).
2166	 */
2167	wr	%g0, 0, %wim		! enable restores
2168	mov	%g6, %l3		! save g6 in l3
2169	mov	%l6, %g6		! set g6 = &u
2170	st	%g0, [%g6 + PCB_NSAVED]	! clear cpcb->pcb_nsaved
2171	restore				! enter window I
2172	restore	%g0, 1, %l1		! enter window X, then %l1 = 1
2173	rd	%psr, %l0
2174	sll	%l1, %l0, %l1		! %wim = 1 << CWP;
2175	wr	%l1, 0, %wim
2176	and	%l0, 31, %l0
2177	st	%l0, [%g6 + PCB_WIM]	! cpcb->pcb_wim = CWP;
2178	nop				! unnecessary? old wim was 0...
2179	save	%g0, %g0, %g0		! back to window I
2180	LOADWIN(%g6 + PCB_RW)
2181	save	%g0, %g0, %g0		! back to window T (trap window)
2182	wr	%l0, 0, %psr		! cond codes, cond codes everywhere
2183	mov	%l3, %g6		! restore g6
2184	RETT
2185
2186! exported end marker for kernel gdb
2187	.globl	_endtrapcode
2188_endtrapcode:
2189
2190/*
2191 * init_tables(nwin) int nwin;
2192 *
2193 * Set up the uwtab and wmask tables.
2194 * We know nwin > 1.
2195 */
2196init_tables:
2197	/*
2198	 * for (i = -nwin, j = nwin - 2; ++i < 0; j--)
2199	 *	uwtab[i] = j;
2200	 * (loop runs at least once)
2201	 */
2202	set	uwtab, %o3
2203	sub	%g0, %o0, %o1		! i = -nwin + 1
2204	inc	%o1
2205	add	%o0, -2, %o2		! j = nwin - 2;
22060:
2207	stb	%o2, [%o3 + %o1]	! uwtab[i] = j;
22081:
2209	inccc	%o1			! ++i < 0?
2210	bl	0b			! yes, continue loop
2211	 dec	%o2			! in any case, j--
2212
2213	/*
2214	 * (i now equals 0)
2215	 * for (j = nwin - 1; i < nwin; i++, j--)
2216	 *	uwtab[i] = j;
2217	 * (loop runs at least twice)
2218	 */
2219	sub	%o0, 1, %o2		! j = nwin - 1
22200:
2221	stb	%o2, [%o3 + %o1]	! uwtab[i] = j
2222	inc	%o1			! i++
22231:
2224	cmp	%o1, %o0		! i < nwin?
2225	bl	0b			! yes, continue
2226	 dec	%o2			! in any case, j--
2227
2228	/*
2229	 * We observe that, for i in 0..nwin-2, (i+1)%nwin == i+1;
2230	 * for i==nwin-1, (i+1)%nwin == 0.
2231	 * To avoid adding 1, we run i from 1 to nwin and set
2232	 * wmask[i-1].
2233	 *
2234	 * for (i = j = 1; i < nwin; i++) {
2235	 *	j <<= 1;	(j now == 1 << i)
2236	 *	wmask[i - 1] = j;
2237	 * }
2238	 * (loop runs at least once)
2239	 */
2240	set	wmask - 1, %o3
2241	mov	1, %o1			! i = 1;
2242	mov	2, %o2			! j = 2;
22430:
2244	stb	%o2, [%o3 + %o1]	! (wmask - 1)[i] = j;
2245	inc	%o1			! i++
2246	cmp	%o1, %o0		! i < nwin?
2247	bl,a	0b			! yes, continue
2248	 sll	%o2, 1, %o2		! (and j <<= 1)
2249
2250	/*
2251	 * Now i==nwin, so we want wmask[i-1] = 1.
2252	 */
2253	mov	1, %o2			! j = 1;
2254	retl
2255	 stb	%o2, [%o3 + %o1]	! (wmask - 1)[i] = j;
2256
2257
2258dostart:
2259	/*
2260	 * Startup.
2261	 *
2262	 * We have been loaded in low RAM, at some address which
2263	 * is page aligned (0x4000 actually) rather than where we
2264	 * want to run (0xf8004000).  Until we get everything set,
2265	 * we have to be sure to use only pc-relative addressing.
2266	 */
2267	wr	%g0, 0, %wim		! make sure we can set psr
2268	mov	%o0, %g7		! save prom vector pointer
2269	nop; nop
2270	wr	%g0, PSR_S|PSR_PS|PSR_PIL, %psr	! set initial psr
2271	set	AC_CONTEXT, %g1		! paranoia: set context to kernel
2272	stba	%g0, [%g1] ASI_CONTROL
2273	wr	%g0, 2, %wim		! set initial %wim (w1 invalid)
2274	mov	1, %g1			! set pcb_wim (log2(%wim) = 1)
2275	sethi	%hi(_u0 + PCB_WIM), %g2
2276	st	%g1, [%g2 + %lo(_u0 + PCB_WIM)]
2277
2278	/*
2279	 * Step 1: double map low RAM (addresses [0.._end-start-1])
2280	 * to KERNBASE (addresses [KERNBASE.._end-1]).  None of these
2281	 * are `bad' aliases (since they are all on segment boundaries)
2282	 * so we do not have to worry about cache aliasing.
2283	 *
2284	 * We map in another couple of segments just to have some
2285	 * more memory (512K, actually) guaranteed available for
2286	 * bootstrap code (pmap_bootstrap needs memory to hold MMU
2287	 * and context data structures).
2288	 */
2289	clr	%l0			! lowva
2290	set	KERNBASE, %l1		! highva
2291	set	_end + (2 << 18), %l2	! last va that must be remapped
2292	set	1 << 18, %l3		! segment size in bytes
22930:
2294	lduba	[%l0] ASI_SEGMAP, %l4	! segmap[highva] = segmap[lowva];
2295	stba	%l4, [%l1] ASI_SEGMAP
2296	add	%l3, %l1, %l1		! highva += segsiz;
2297	cmp	%l1, %l2		! done?
2298	bl	0b			! no, loop
2299	 add	%l3, %l0, %l0		! (and lowva += segsz)
2300
2301	/*
2302	 * Now map the interrupt enable register and clear any interrupts,
2303	 * enabling NMIs.  Note that we will not take NMIs until we change
2304	 * %tbr.
2305	 */
2306	set	IE_reg_addr, %l0
2307	set	IE_REG_PTE, %l1
2308	sta	%l1, [%l0] ASI_PTE
2309	mov	IE_ALLIE, %l1
2310	nop; nop			! paranoia
2311	stb	%l1, [%l0]
2312
2313	/*
2314	 * All set, fix pc and npc.  Once we are where we should be,
2315	 * we can give ourselves a stack and enable traps.
2316	 */
2317	set	1f, %l0
2318	jmp	%l0
2319	 nop
23201:
2321	set	USRSTACK - CCFSZ, %fp	! as if called from user code
2322	set	estack0 - CCFSZ - 80, %sp ! via syscall(boot_me_up) or somesuch
2323	rd	%psr, %l0
2324	wr	%l0, PSR_ET, %psr
2325
2326	/*
2327	 * Step 2: clear BSS.  This may just be paranoia; the boot
2328	 * loader might already do it for us; but what the hell.
2329	 */
2330	set	_edata, %o0		! bzero(edata, end - edata)
2331	set	_end, %o1
2332	call	_bzero
2333	 sub	%o1, %o0, %o1
2334
2335	/*
2336	 * Stash prom vectors now, after bzero, as it lives in bss
2337	 * (which we just zeroed).
2338	 * This depends on the fact that bzero does not use %g7.
2339	 */
2340	sethi	%hi(_promvec), %l0
2341	st	%g7, [%l0 + %lo(_promvec)]
2342
2343	/*
2344	 * Step 3: compute number of windows and set up tables.
2345	 * We could do some of this later.
2346	 */
2347	save	%sp, -64, %sp
2348	rd	%psr, %g1
2349	restore
2350	and	%g1, 31, %g1		! want just the CWP bits
2351	add	%g1, 1, %o0		! compute nwindows
2352	sethi	%hi(_nwindows), %o1	! may as well tell everyone
2353	call	init_tables
2354	 st	%o0, [%o1 + %lo(_nwindows)]
2355
2356	/*
2357	 * Step 4: change the trap base register, now that our trap handlers
2358	 * will function (they need the tables we just set up).
2359	 */
2360	set	_trapbase, %l0
2361	wr	%l0, 0, %tbr
2362	nop				! paranoia
2363
2364	/*
2365	 * Bootstrap, call main, and `return' from a fake trap into `icode'.
2366	 */
2367	call	_bootstrap
2368	 nop
2369	call	_main
2370	 nop
2371
2372	/*
2373	 * Return from main means we are process 1 and need to
2374	 * jump to the `icode' (which appears below) which has
2375	 * been copied in to address 0x2000 in the new process.
2376	 *
2377	 * Our stack & frame pointers are already set up to make it
2378	 * look like we are currently running in a trap taken from
2379	 * user mode (there is room for a full blown trapfrape),
2380	 * so all we need do is set up %l0, %l1, and %l2 and branch
2381	 * to return_from_trap.
2382	 */
2383	mov	PSR_S, %l0		! user psr
2384#define XADDR 0x2000 /* XXX */
2385	set	XADDR, %l1		! pc
2386	add	%l1, 4, %l2		! npc
2387	b	return_from_trap
2388	 wr	%l0, 0, %psr		! just like syscall()
2389
2390/*
2391 * Icode is copied out to process 1 to exec init.
2392 * If the exec fails, process 1 exits.
2393 *
2394 * We depend here on the fact that we are copied to address XADDR
2395 * (but so does the code above).
2396 */
2397	.globl	_icode, _szicode
2398_icode:
2399	mov	SYS_execve, %g1
2400	set	init - _icode + XADDR, %o0	! actual location of pathname
2401	set	argv - _icode + XADDR, %o1	! ... and of argv
2402	clr	%o2				! no environment
2403	t	ST_SYSCALL
2404	mov	SYS_exit, %g1
2405	t	ST_SYSCALL
2406
2407init:	.ascii	"/sbin/"
2408init1:	.asciz	"init"
2409	ALIGN
2410argv:	.word	init1 - _icode + XADDR
2411	.word	initflags - _icode + XADDR
2412	.word	0
2413initflags:
2414_szicode:
2415	.long	_szicode - _icode
2416#undef XADDR
2417
2418/*
2419 * The following code is copied to the top of the user stack when each
2420 * process is exec'ed, and signals are `trampolined' off it.
2421 *
2422 * When this code is run, the stack looks like:
2423 *	[%sp]		64 bytes to which registers can be dumped
2424 *	[%sp + 64]	signal number (goes in %o0)
2425 *	[%sp + 64 + 4]	signal code (goes in %o1)
2426 *	[%sp + 64 + 8]	placeholder
2427 *	[%sp + 64 + 12]	argument for %o3, currently unsupported (always 0)
2428 *	[%sp + 64 + 16]	first word of saved state (sigcontext)
2429 *	    .
2430 *	    .
2431 *	    .
2432 *	[%sp + NNN]	last word of saved state
2433 * (followed by previous stack contents or top of signal stack).
2434 * The address of the function to call is in %g1; the old %g1 and %o0
2435 * have already been saved in the sigcontext.  We are running
2436 * in the window that was active when the signal occurred.
2437 *
2438 * Note that [%sp + 64 + 8] == %sp + 64 + 16.  The copy at %sp+64+8
2439 * will eventually be removed, with a hole left in its place, if things
2440 * work out.
2441 */
2442	.globl	_sigcode
2443	.globl	_esigcode
2444_sigcode:
2445	/*
2446	 * Save the %i,%l,%o registers (renaming %o as %i) and set up to
2447	 * call C code (CCFSZ).  At the same time, make room on the stack
2448	 * for 32 %f registers + %fsr.  This comes out to 33*4 or 132 bytes,
2449	 * but this must be aligned to a multiple of 8, or 136 bytes.
2450	 */
2451	save	%sp, -CCFSZ - 136, %sp
2452	mov	%g2, %l2		! save globals in %l registers
2453	mov	%g3, %l3
2454	mov	%g4, %l4
2455	mov	%g5, %l5
2456	mov	%g6, %l6
2457	mov	%g7, %l7
2458	/*
2459	 * Saving the fpu registers is expensive, so do it iff the fsr
2460	 * stored in the sigcontext shows that the fpu is enabled.
2461	 */
2462	ld	[%fp + 64 + 16 + SC_PSR_OFFSET], %l0
2463	sethi	%hi(PSR_EF), %l1	! FPU enable bit is too high for andcc
2464	andcc	%l0, %l1, %l0		! %l0 = fpu enable bit
2465	be	1f			! if not set, skip the saves
2466	 rd	%y, %l1			! in any case, save %y
2467
2468	! fpu is enabled, oh well
2469	st	%fsr, [%sp + CCFSZ + 0]
2470	std	%f0, [%sp + CCFSZ + 8]
2471	std	%f2, [%sp + CCFSZ + 16]
2472	std	%f4, [%sp + CCFSZ + 24]
2473	std	%f6, [%sp + CCFSZ + 32]
2474	std	%f8, [%sp + CCFSZ + 40]
2475	std	%f10, [%sp + CCFSZ + 48]
2476	std	%f12, [%sp + CCFSZ + 56]
2477	std	%f14, [%sp + CCFSZ + 64]
2478	std	%f16, [%sp + CCFSZ + 72]
2479	std	%f18, [%sp + CCFSZ + 80]
2480	std	%f20, [%sp + CCFSZ + 88]
2481	std	%f22, [%sp + CCFSZ + 96]
2482	std	%f24, [%sp + CCFSZ + 104]
2483	std	%f26, [%sp + CCFSZ + 112]
2484	std	%f28, [%sp + CCFSZ + 120]
2485	std	%f30, [%sp + CCFSZ + 128]
2486
24871:
2488	ldd	[%fp + 64], %o0		! sig, code
2489	ld	[%fp + 76], %o3		! arg3
2490	call	%g1			! (*sa->sa_handler)(sig,code,scp,arg3)
2491	 add	%fp, 64 + 16, %o2	! scp
2492
2493	/*
2494	 * Now that the handler has returned, re-establish all the state
2495	 * we just saved above, then do a sigreturn.
2496	 */
2497	tst	%l0			! reload fpu registers?
2498	be	1f			! if not, skip the loads
2499	 wr	%l1, %g0, %y		! in any case, restore %y
2500
2501	ld	[%sp + CCFSZ + 0], %fsr
2502	ldd	[%sp + CCFSZ + 8], %f0
2503	ldd	[%sp + CCFSZ + 16], %f2
2504	ldd	[%sp + CCFSZ + 24], %f4
2505	ldd	[%sp + CCFSZ + 32], %f6
2506	ldd	[%sp + CCFSZ + 40], %f8
2507	ldd	[%sp + CCFSZ + 48], %f10
2508	ldd	[%sp + CCFSZ + 56], %f12
2509	ldd	[%sp + CCFSZ + 64], %f14
2510	ldd	[%sp + CCFSZ + 72], %f16
2511	ldd	[%sp + CCFSZ + 80], %f18
2512	ldd	[%sp + CCFSZ + 88], %f20
2513	ldd	[%sp + CCFSZ + 96], %f22
2514	ldd	[%sp + CCFSZ + 104], %f24
2515	ldd	[%sp + CCFSZ + 112], %f26
2516	ldd	[%sp + CCFSZ + 120], %f28
2517	ldd	[%sp + CCFSZ + 128], %f30
2518
25191:
2520	mov	%l2, %g2
2521	mov	%l3, %g3
2522	mov	%l4, %g4
2523	mov	%l5, %g5
2524	mov	%l6, %g6
2525	mov	%l7, %g7
2526
2527	restore	%g0, SYS_sigreturn, %g1	! get registers back & set syscall #
2528	add	%sp, 64 + 16, %o0	! compute scp
2529	t	ST_SYSCALL		! sigreturn(scp)
2530	! sigreturn does not return unless it fails
2531	mov	SYS_exit, %g1		! exit(errno)
2532	t	ST_SYSCALL
2533_esigcode:
2534
2535/*
2536 * Primitives
2537 */
2538
2539#ifdef GPROF
2540	.globl	mcount
2541#define	ENTRY(x) \
2542	.globl _##x; _##x: \
2543	save	%sp, -CCFSZ, %sp; \
2544	call	mcount; \
2545	nop; \
2546	restore
2547#else
2548#define	ENTRY(x)	.globl _##x; _##x:
2549#endif
2550#define	ALTENTRY(x)	.globl _##x; _##x:
2551
2552/*
2553 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
2554 *
2555 * Copy a null terminated string from the user address space into
2556 * the kernel address space.
2557 */
2558ENTRY(copyinstr)
2559	! %o0 = fromaddr, %o1 = toaddr, %o2 = maxlen, %o3 = &lencopied
2560#ifdef DIAGNOSTIC
2561	tst	%o2			! kernel should never give maxlen <= 0
2562	ble	1f
2563	 EMPTY
2564#endif
2565	set	KERNBASE, %o4
2566	cmp	%o0, %o4		! fromaddr < KERNBASE?
2567	blu,a	Lcsdocopy		! yes, go do it
2568	sethi	%hi(_cpcb), %o4		! (first instr of copy)
2569
2570	b	Lcsdone			! no, return EFAULT
2571	 mov	EFAULT, %o0
2572
25731:
2574	sethi	%hi(2f), %o0
2575	call	_panic
2576	 or	%lo(2f), %o0, %o0
25772:	.asciz	"copyinstr"
2578	ALIGN
2579
2580/*
2581 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
2582 *
2583 * Copy a null terminated string from the kernel
2584 * address space to the user address space.
2585 */
2586ENTRY(copyoutstr)
2587	! %o0 = fromaddr, %o1 = toaddr, %o2 = maxlen, %o3 = &lencopied
2588#ifdef DIAGNOSTIC
2589	tst	%o2
2590	ble	1f
2591	 EMPTY
2592#endif
2593	set	KERNBASE, %o4
2594	cmp	%o1, %o4		! toaddr < KERNBASE?
2595	blu,a	Lcsdocopy		! yes, go do it
2596	 sethi	%hi(_cpcb), %o4		! (first instr of copy)
2597
2598	b	Lcsdone			! no, return EFAULT
2599	 mov	EFAULT, %o0
2600
26011:
2602	sethi	%hi(2f), %o0
2603	call	_panic
2604	 or	%lo(2f), %o0, %o0
26052:	.asciz	"copyoutstr"
2606	ALIGN
2607
2608Lcsdocopy:
2609!	sethi	%hi(_cpcb), %o4		! (done earlier)
2610	ld	[%o4 + %lo(_cpcb)], %o4	! catch faults
2611	set	Lcsfault, %o5
2612	st	%o5, [%o4 + PCB_ONFAULT]
2613
2614	mov	%o1, %o5		!	save = toaddr;
2615! XXX should do this in bigger chunks when possible
26160:					! loop:
2617	ldsb	[%o0], %g1		!	c = *fromaddr;
2618	tst	%g1
2619	stb	%g1, [%o1]		!	*toaddr++ = c;
2620	be	1f			!	if (c == NULL)
2621	 inc	%o1			!		goto ok;
2622	deccc	%o2			!	if (--len > 0) {
2623	bg	0b			!		fromaddr++;
2624	 inc	%o0			!		goto loop;
2625					!	}
2626					!
2627	b	Lcsdone			!	error = ENAMETOOLONG;
2628	 mov	ENAMETOOLONG, %o0	!	goto done;
26291:					! ok:
2630	clr	%o0			!    error = 0;
2631Lcsdone:				! done:
2632	sub	%o1, %o5, %o1		!	len = to - save;
2633	tst	%o3			!	if (lencopied)
2634	bnz,a	3f
2635	 st	%o1, [%o3]		!		*lencopied = len;
26363:
2637	retl				! cpcb->pcb_onfault = 0;
2638	 st	%g0, [%o4 + PCB_ONFAULT]! return (error);
2639
2640Lcsfault:
2641	b	Lcsdone			! error = EFAULT;
2642	 mov	EFAULT, %o0		! goto ret;
2643
2644/*
2645 * copystr(fromaddr, toaddr, maxlength, &lencopied)
2646 *
2647 * Copy a null terminated string from one point to another in
2648 * the kernel address space.  (This is a leaf procedure, but
2649 * it does not seem that way to the C compiler.)
2650 */
2651ENTRY(copystr)
2652#ifdef DIAGNOSTIC
2653	tst	%o2			! 	if (maxlength <= 0)
2654	ble	4f			!		panic(...);
2655	 EMPTY
2656#endif
2657	mov	%o1, %o5		!	to0 = to;
26580:					! loop:
2659	ldsb	[%o0], %o4		!	c = *from;
2660	tst	%o4
2661	stb	%o4, [%o1]		!	*to++ = c;
2662	be	1f			!	if (c == 0)
2663	 inc	%o1			!		goto ok;
2664	deccc	%o2			!	if (--len > 0) {
2665	bg,a	0b			!		from++;
2666	 inc	%o0			!		goto loop;
2667	b	2f			!	}
2668	 mov	ENAMETOOLONG, %o0	!	ret = ENAMETOOLONG; goto done;
26691:					! ok:
2670	clr	%o0			!	ret = 0;
26712:
2672	sub	%o1, %o5, %o1		!	len = to - to0;
2673	tst	%o3			!	if (lencopied)
2674	bnz,a	3f
2675	 st	%o1, [%o3]		!		*lencopied = len;
26763:
2677	retl
2678	 nop
2679#ifdef DIAGNOSTIC
26804:
2681	sethi	%hi(5f), %o0
2682	call	_panic
2683	 or	%lo(5f), %o0, %o0
26845:
2685	.asciz	"copystr"
2686	ALIGN
2687#endif
2688
2689/*
2690 * Copyin(src, dst, len)
2691 *
2692 * Copy specified amount of data from user space into the kernel.
2693 */
2694ENTRY(copyin)
2695	set	KERNBASE, %o3
2696	cmp	%o0, %o3		! src < KERNBASE?
2697	blu,a	Ldocopy			! yes, can try it
2698	 sethi	%hi(_cpcb), %o3
2699
2700	/* source address points into kernel space: return EFAULT */
2701	retl
2702	 mov	EFAULT, %o0
2703
2704/*
2705 * Copyout(src, dst, len)
2706 *
2707 * Copy specified amount of data from kernel to user space.
2708 * Just like copyin, except that the `dst' addresses are user space
2709 * rather than the `src' addresses.
2710 */
2711ENTRY(copyout)
2712	set	KERNBASE, %o3
2713	cmp	%o1, %o3		! dst < KERBASE?
2714	blu,a	Ldocopy
2715	 sethi	%hi(_cpcb), %o3
2716
2717	/* destination address points into kernel space: return EFAULT */
2718	retl
2719	 mov	EFAULT, %o0
2720
2721	/*
2722	 * ******NOTE****** this depends on bcopy() not using %g7
2723	 */
2724Ldocopy:
2725!	sethi	%hi(_cpcb), %o3
2726	ld	[%o3 + %lo(_cpcb)], %o3
2727	set	Lcopyfault, %o4
2728	mov	%o7, %g7		! save return address
2729	call	_bcopy			! bcopy(src, dst, len)
2730	 st	%o4, [%o3 + PCB_ONFAULT]
2731
2732	sethi	%hi(_cpcb), %o3
2733	ld	[%o3 + %lo(_cpcb)], %o3
2734	st	%g0, [%o3 + PCB_ONFAULT]
2735	jmp	%g7 + 8
2736	 clr	%o0			! return 0
2737
2738! Copyin or copyout fault.  Clear cpcb->pcb_onfault and return EFAULT.
2739! Note that although we were in bcopy, there is no state to clean up;
2740! the only special thing is that we have to return to [g7 + 8] rather than
2741! [o7 + 8].
2742Lcopyfault:
2743	sethi	%hi(_cpcb), %o3
2744	ld	[%o3 + %lo(_cpcb)], %o3
2745	st	%g0, [%o3 + PCB_ONFAULT]
2746	jmp	%g7 + 8
2747	 mov	EFAULT, %o0
2748
2749
2750/*
2751 * Write all user windows presently in the CPU back to the user's stack.
2752 * We just do `save' instructions until pcb_uw == 0.
2753 *
2754 *	p = cpcb;
2755 *	nsaves = 0;
2756 *	while (p->pcb_uw > 0)
2757 *		save(), nsaves++;
2758 *	while (--nsaves >= 0)
2759 *		restore();
2760 */
2761ENTRY(write_user_windows)
2762	sethi	%hi(_cpcb), %g6
2763	ld	[%g6 + %lo(_cpcb)], %g6
2764	b	2f
2765	 clr	%g5
27661:
2767	save	%sp, -64, %sp
27682:
2769	ld	[%g6 + PCB_UW], %g7
2770	tst	%g7
2771	bg,a	1b
2772	 inc	%g5
27733:
2774	deccc	%g5
2775	bge,a	3b
2776	 restore
2777	retl
2778	 nop
2779
2780
2781	.comm	_want_resched,4
2782/*
2783 * Masterpaddr is the p->p_addr of the last process on the processor.
2784 * XXX masterpaddr is almost the same as cpcb
2785 * XXX should delete this entirely
2786 */
2787	.comm	_masterpaddr, 4
2788
2789/*
2790 * Switch statistics (for later tweaking):
2791 *	nswitchdiff = p1 => p2 (i.e., chose different process)
2792 *	nswitchexit = number of calls to swtchexit()
2793 *	_cnt.v_swtch = total calls to swtch+swtchexit
2794 */
2795	.comm	_nswitchdiff, 4
2796	.comm	_nswitchexit, 4
2797
2798/*
2799 * REGISTER USAGE IN swtch AND swtchexit:
2800 * This is split into two phases, more or less
2801 * `before we locate a new proc' and `after'.
2802 * Some values are the same in both phases.
2803 * Note that the %o0-registers are not preserved across
2804 * the psr change when entering a new process, since this
2805 * usually changes the CWP field (hence heavy usage of %g's).
2806 *
2807 *	%g1 = oldpsr (excluding ipl bits)
2808 *	%g2 = %hi(_whichqs); newpsr
2809 *	%g3 = p
2810 *	%g4 = lastproc
2811 *	%g5 = <free>; newpcb
2812 *	%g6 = %hi(_cpcb)
2813 *	%g7 = %hi(_curproc)
2814 *	%o0 = tmp 1
2815 *	%o1 = tmp 2
2816 *	%o2 = tmp 3
2817 *	%o3 = tmp 4; whichqs; vm
2818 *	%o4 = tmp 4; which; sswap
2819 *	%o5 = tmp 5; q; <free>
2820 */
2821
2822/*
2823 * swtchexit is called only from cpu_exit() before the current process
2824 * has freed its kernel stack; we must free it.  (curproc is already NULL.)
2825 *
2826 * We lay the process to rest by changing to the `idle' kernel stack,
2827 * and note that the `last loaded process' is nonexistent.
2828 */
2829ENTRY(swtchexit)
2830	mov	%o0, %g2		! save the
2831	mov	%o1, %g3		! ... three parameters
2832	mov	%o2, %g4		! ... to kmem_free
2833
2834	/*
2835	 * Change pcb to idle u. area, i.e., set %sp to top of stack
2836	 * and %psr to PSR_S|PSR_ET, and set cpcb to point to _idle_u.
2837	 * Once we have left the old stack, we can call kmem_free to
2838	 * destroy it.  Call it any sooner and the register windows
2839	 * go bye-bye.
2840	 */
2841	set	_idle_u, %g5
2842	sethi	%hi(_cpcb), %g6
2843	mov	1, %g7
2844	wr	%g0, PSR_S, %psr	! change to window 0, traps off
2845	wr	%g0, 2, %wim		! and make window 1 the trap window
2846	st	%g5, [%g6 + %lo(_cpcb)]	! cpcb = &idle_u
2847	st	%g7, [%g5 + PCB_WIM]	! idle_u.pcb_wim = log2(2) = 1
2848	set	_idle_u + UPAGES * NBPG - CCFSZ, %sp	! set new %sp
2849#ifdef DEBUG
2850	set	_idle_u, %l6
2851	SET_SP_REDZONE(%l6, %l5)
2852#endif
2853	wr	%g0, PSR_S|PSR_ET, %psr	! and then enable traps
2854	mov	%g2, %o0		! now ready to call kmem_free
2855	mov	%g3, %o1
2856	call	_kmem_free
2857	 mov	%g4, %o2
2858
2859	/*
2860	 * Now fall through to `the last swtch'.  %g6 was set to
2861	 * %hi(_cpcb), but may have been clobbered in kmem_free,
2862	 * so all the registers described below will be set here.
2863	 *
2864	 * REGISTER USAGE AT THIS POINT:
2865	 *	%g1 = oldpsr (excluding ipl bits)
2866	 *	%g2 = %hi(_whichqs)
2867	 *	%g4 = lastproc
2868	 *	%g6 = %hi(_cpcb)
2869	 *	%g7 = %hi(_curproc)
2870	 *	%o0 = tmp 1
2871	 *	%o1 = tmp 2
2872	 *	%o3 = whichqs
2873	 */
2874
2875	INCR(_nswitchexit)		! nswitchexit++;
2876	INCR(_cnt+V_SWTCH)		! cnt.v_switch++;
2877
2878	mov	PSR_S|PSR_ET, %g1	! oldpsr = PSR_S | PSR_ET;
2879	sethi	%hi(_whichqs), %g2
2880	clr	%g4			! lastproc = NULL;
2881	sethi	%hi(_cpcb), %g6
2882	sethi	%hi(_curproc), %g7
2883	/* FALLTHROUGH */
2884
2885/*
2886 * When no processes are on the runq, swtch
2887 * idles here watiing for something to come ready.
2888 * The registers are set up as noted above.
2889 */
2890	.globl	idle
2891idle:
2892	st	%g0, [%g7 + %lo(_curproc)] ! curproc = NULL;
2893	wr	%g1, 0, %psr		! (void) spl0();
28941:					! spin reading _whichqs until nonzero
2895	ld	[%g2 + %lo(_whichqs)], %o3
2896	tst	%o3
2897	bnz,a	Lsw_scan
2898	 wr	%g1, PIL_CLOCK << 8, %psr	! (void) splclock();
2899	b,a	1b
2900
2901Lsw_panic_rq:
2902	sethi	%hi(1f), %o0
2903	call	_panic
2904	 or	%lo(1f), %o0, %o0
2905Lsw_panic_wchan:
2906	sethi	%hi(2f), %o0
2907	call	_panic
2908	 or	%lo(2f), %o0, %o0
2909Lsw_panic_srun:
2910	sethi	%hi(3f), %o0
2911	call	_panic
2912	 or	%lo(3f), %o0, %o0
29131:	.asciz	"swtch rq"
29142:	.asciz	"swtch wchan"
29153:	.asciz	"swtch SRUN"
2916	ALIGN
2917
2918/*
2919 * cpu_swtch() picks a process to run and runs it, saving the current
2920 * one away.  On the assumption that (since most workstations are
2921 * single user machines) the chances are quite good that the new
2922 * process will turn out to be the current process, we defer saving
2923 * it here until we have found someone to load.  If that someone
2924 * is the current process we avoid both store and load.
2925 *
2926 * cpu_swtch() is always entered at splstatclock or splhigh.
2927 *
2928 * IT MIGHT BE WORTH SAVING BEFORE ENTERING idle TO AVOID HAVING TO
2929 * SAVE LATER WHEN SOMEONE ELSE IS READY ... MUST MEASURE!
2930 */
2931	.globl	_runtime
2932	.globl	_time
2933ENTRY(cpu_swtch)
2934	/*
2935	 * REGISTER USAGE AT THIS POINT:
2936	 *	%g1 = oldpsr (excluding ipl bits)
2937	 *	%g2 = %hi(_whichqs)
2938	 *	%g3 = p
2939	 *	%g4 = lastproc
2940	 *	%g5 = tmp 0
2941	 *	%g6 = %hi(_cpcb)
2942	 *	%g7 = %hi(_curproc)
2943	 *	%o0 = tmp 1
2944	 *	%o1 = tmp 2
2945	 *	%o2 = tmp 3
2946	 *	%o3 = tmp 4, then at Lsw_scan, whichqs
2947	 *	%o4 = tmp 5, then at Lsw_scan, which
2948	 *	%o5 = tmp 6, then at Lsw_scan, q
2949	 */
2950	sethi	%hi(_whichqs), %g2	! set up addr regs
2951	sethi	%hi(_cpcb), %g6
2952	ld	[%g6 + %lo(_cpcb)], %o0
2953	std	%o6, [%o0 + PCB_SP]	! cpcb->pcb_<sp,pc> = <sp,pc>;
2954	rd	%psr, %g1		! oldpsr = %psr;
2955	sethi	%hi(_curproc), %g7
2956	ld	[%g7 + %lo(_curproc)], %g4	! lastproc = curproc;
2957	st	%g1, [%o0 + PCB_PSR]	! cpcb->pcb_psr = oldpsr;
2958	andn	%g1, PSR_PIL, %g1	! oldpsr &= ~PSR_PIL;
2959
2960	/*
2961	 * In all the fiddling we did to get this far, the thing we are
2962	 * waiting for might have come ready, so let interrupts in briefly
2963	 * before checking for other processes.  Note that we still have
2964	 * curproc set---we have to fix this or we can get in trouble with
2965	 * the run queues below.
2966	 */
2967	st	%g0, [%g7 + %lo(_curproc)]	! curproc = NULL;
2968	wr	%g1, 0, %psr			! (void) spl0();
2969	nop; nop; nop				! paranoia
2970	wr	%g1, PIL_CLOCK <<8 , %psr	! (void) splclock();
2971
2972Lsw_scan:
2973	nop; nop; nop				! paranoia
2974	/*
2975	 * We're about to run a (possibly) new process.  Set runtime
2976	 * to indicate its start time.
2977	 */
2978	sethi	%hi(_time), %o0
2979	ldd	[%o0 + %lo(_time)], %o2
2980	sethi	%hi(_runtime), %o0
2981	std	%o2, [%o0 + %lo(_runtime)]
2982
2983	ld	[%g2 + %lo(_whichqs)], %o3
2984
2985	/*
2986	 * Optimized inline expansion of `which = ffs(whichqs) - 1';
2987	 * branches to idle if ffs(whichqs) was 0.
2988	 */
2989	set	ffstab, %o2
2990	andcc	%o3, 0xff, %o1		! byte 0 zero?
2991	bz,a	1f			! yes, try byte 1
2992	 srl	%o3, 8, %o0
2993	b	2f			! ffs = ffstab[byte0]; which = ffs - 1;
2994	 ldsb	[%o2 + %o1], %o0
29951:	andcc	%o0, 0xff, %o1		! byte 1 zero?
2996	bz,a	1f			! yes, try byte 2
2997	 srl	%o0, 8, %o0
2998	ldsb	[%o2 + %o1], %o0	! which = ffstab[byte1] + 7;
2999	b	3f
3000	 add	%o0, 7, %o4
30011:	andcc	%o0, 0xff, %o1		! byte 2 zero?
3002	bz,a	1f			! yes, try byte 3
3003	 srl	%o0, 8, %o0
3004	ldsb	[%o2 + %o1], %o0	! which = ffstab[byte2] + 15;
3005	b	3f
3006	 add	%o0, 15, %o4
30071:	ldsb	[%o2 + %o0], %o0	! ffs = ffstab[byte3] + 24
3008	addcc	%o0, 24, %o0		! (note that ffstab[0] == -24)
3009	bz	idle			! if answer was 0, go idle
3010	 EMPTY
30112:	sub	%o0, 1, %o4		! which = ffs(whichqs) - 1
30123:	/* end optimized inline expansion */
3013
3014	/*
3015	 * We found a nonempty run queue.  Take its first process.
3016	 */
3017	set	_qs, %o5		! q = &qs[which];
3018	sll	%o4, 3, %o0
3019	add	%o0, %o5, %o5
3020	ld	[%o5], %g3		! p = q->ph_link;
3021	cmp	%g3, %o5		! if (p == q)
3022	be	Lsw_panic_rq		!	panic("swtch rq");
3023	 EMPTY
3024	ld	[%g3], %o0		! tmp0 = p->p_link;
3025	st	%o0, [%o5]		! q->ph_link = tmp0;
3026	st	%o5, [%o0 + 4]		! tmp0->p_rlink = q;
3027	cmp	%o0, %o5		! if (tmp0 == q)
3028	bne	1f
3029	 EMPTY
3030	mov	1, %o1			!	whichqs &= ~(1 << which);
3031	sll	%o1, %o4, %o1
3032	andn	%o3, %o1, %o3
3033	st	%o3, [%g2 + %lo(_whichqs)]
30341:
3035	/*
3036	 * PHASE TWO: NEW REGISTER USAGE:
3037	 *	%g1 = oldpsr (excluding ipl bits)
3038	 *	%g2 = newpsr
3039	 *	%g3 = p
3040	 *	%g4 = lastproc
3041	 *	%g5 = newpcb
3042	 *	%g6 = %hi(_cpcb)
3043	 *	%g7 = %hi(_curproc)
3044	 *	%o0 = tmp 1
3045	 *	%o1 = tmp 2
3046	 *	%o2 = tmp 3
3047	 *	%o3 = vm
3048	 *	%o4 = sswap
3049	 *	%o5 = <free>
3050	 */
3051
3052	/* firewalls */
3053	ld	[%g3 + P_WCHAN], %o0	! if (p->p_wchan)
3054	tst	%o0
3055	bne	Lsw_panic_wchan		!	panic("swtch wchan");
3056	 EMPTY
3057	ldsb	[%g3 + P_STAT], %o0	! if (p->p_stat != SRUN)
3058	cmp	%o0, SRUN
3059	bne	Lsw_panic_srun		!	panic("swtch SRUN");
3060	 EMPTY
3061
3062	/*
3063	 * Committed to running process p.
3064	 * It may be the same as the one we were running before.
3065	 */
3066	sethi	%hi(_want_resched), %o0
3067	st	%g0, [%o0 + %lo(_want_resched)]	! want_resched = 0;
3068	ld	[%g3 + P_ADDR], %g5		! newpcb = p->p_addr;
3069	st	%g0, [%g3 + 4]			! p->p_rlink = NULL;
3070	ld	[%g5 + PCB_PSR], %g2		! newpsr = newpcb->pcb_psr;
3071	st	%g3, [%g7 + %lo(_curproc)]	! curproc = p;
3072
3073	cmp	%g3, %g4		! p == lastproc?
3074	be,a	Lsw_sameproc		! yes, go return 0
3075	 wr	%g2, 0, %psr		! (after restoring ipl)
3076
3077	/*
3078	 * Not the old process.  Save the old process, if any;
3079	 * then load p.
3080	 */
3081	tst	%g4
3082	be,a	Lsw_load		! if no old process, go load
3083	 wr	%g1, (PIL_CLOCK << 8) | PSR_ET, %psr
3084
3085	INCR(_nswitchdiff)		! clobbers %o0,%o1
3086	/*
3087	 * save: write back all windows (including the current one).
3088	 * XXX	crude; knows nwindows <= 8
3089	 */
3090#define	SAVE save %sp, -64, %sp
3091	SAVE; SAVE; SAVE; SAVE; SAVE; SAVE; SAVE	/* 7 of each: */
3092	restore; restore; restore; restore; restore; restore; restore
3093
3094	/*
3095	 * Load the new process.  To load, we must change stacks and
3096	 * alter cpcb and %wim, hence we must disable traps.  %psr is
3097	 * currently equal to oldpsr (%g1) ^ (PIL_CLOCK << 8);
3098	 * this means that PSR_ET is on.  Likewise, PSR_ET is on
3099	 * in newpsr (%g2), although we do not know newpsr's ipl.
3100	 *
3101	 * We also must load up the `in' and `local' registers.
3102	 */
3103	wr	%g1, (PIL_CLOCK << 8) | PSR_ET, %psr
3104Lsw_load:
3105!	wr	%g1, (PIL_CLOCK << 8) | PSR_ET, %psr	! done above
3106	/* compute new wim */
3107	ld	[%g5 + PCB_WIM], %o0
3108	mov	1, %o1
3109	sll	%o1, %o0, %o0
3110	wr	%o0, 0, %wim		! %wim = 1 << newpcb->pcb_wim;
3111	/* now must not change %psr for 3 more instrs */
3112/*1*/	set	PSR_EF|PSR_EC, %o0
3113/*2*/	andn	%g2, %o0, %g2		! newpsr &= ~(PSR_EF|PSR_EC);
3114/*3*/	nop
3115	/* set new psr, but with traps disabled */
3116	wr	%g2, PSR_ET, %psr	! %psr = newpsr ^ PSR_ET;
3117	/* set new cpcb */
3118	st	%g5, [%g6 + %lo(_cpcb)]	! cpcb = newpcb;
3119	/* XXX update masterpaddr too */
3120	sethi	%hi(_masterpaddr), %g7
3121	st	%g5, [%g7 + %lo(_masterpaddr)]
3122	ldd	[%g5 + PCB_SP], %o6	! <sp,pc> = newpcb->pcb_<sp,pc>
3123	/* load window */
3124	ldd	[%sp + (0*8)], %l0
3125	ldd	[%sp + (1*8)], %l2
3126	ldd	[%sp + (2*8)], %l4
3127	ldd	[%sp + (3*8)], %l6
3128	ldd	[%sp + (4*8)], %i0
3129	ldd	[%sp + (5*8)], %i2
3130	ldd	[%sp + (6*8)], %i4
3131	ldd	[%sp + (7*8)], %i6
3132#ifdef DEBUG
3133	mov	%g5, %o0
3134	SET_SP_REDZONE(%o0, %o1)
3135	CHECK_SP_REDZONE(%o0, %o1)
3136#endif
3137	/* finally, enable traps */
3138	wr	%g2, 0, %psr		! psr = newpsr;
3139
3140	/*
3141	 * Now running p.  Make sure it has a context so that it
3142	 * can talk about user space stuff.  (Its pcb_uw is currently
3143	 * zero so it is safe to have interrupts going here.)
3144	 */
3145	ld	[%g3 + P_VMSPACE], %o3	! vm = p->p_vmspace;
3146	ld	[%o3 + VM_PMAP_CTX], %o0! if (vm->vm_pmap.pm_ctx != NULL)
3147	tst	%o0
3148	bnz,a	Lsw_havectx		!	goto havecontext;
3149	 ld	[%o3 + VM_PMAP_CTXNUM], %o0
3150
3151	/* p does not have a context: call ctx_alloc to get one */
3152	save	%sp, -CCFSZ, %sp
3153	call	_ctx_alloc		! ctx_alloc(&vm->vm_pmap);
3154	 add	%i3, VM_PMAP, %o0
3155	ret
3156	 restore
3157
3158	/* p does have a context: just switch to it */
3159Lsw_havectx:
3160!	ld	[%o3 + VM_PMAP_CTXNUM], %o0	! (done in delay slot)
3161	set	AC_CONTEXT, %o1
3162	stba	%o0, [%o1] ASI_CONTROL	! setcontext(vm->vm_pmap.pm_ctxnum);
3163	retl
3164	 nop
3165
3166Lsw_sameproc:
3167	/*
3168	 * We are resuming the process that was running at the
3169	 * call to swtch().  Just set psr ipl and return.
3170	 */
3171!	wr	%g2, 0 %psr		! %psr = newpsr; (done earlier)
3172	nop
3173	retl
3174	 nop
3175
3176
3177/*
3178 * Snapshot the current process so that stack frames are up to date.
3179 * This is called from two places:
3180 *  - just before a crash dump, for the stack update;
3181 *  - in cpu_fork(), before copying the kernel stack.
3182 * In the latter case the pcb and stack will be copied to the child,
3183 * and the child will be made runnable.  Eventually swtch() will run
3184 * it.  When it does, we want its pcb_pc set so that we can appear
3185 * to return 1 from cpu_fork(), so we store the current sp and psr
3186 * in the given pcb, and set its pcb_pc to our return-1 code (offset
3187 * by -8 due to call/ret conventions).  This is not useful in the crash
3188 * dump code but it is easiest to do it anyway.
3189 */
3190ENTRY(snapshot)
3191	st	%o6, [%o0 + PCB_SP]	! save sp
3192	set	1f - 8, %o1		! set child-return pc
3193	st	%o1, [%o0 + PCB_PC]
3194	rd	%psr, %o1		! save psr
3195	st	%o1, [%o0 + PCB_PSR]
3196
3197	/*
3198	 * Just like swtch(); same XXX comments apply.
3199	 * 7 of each.  Minor tweak: the 7th restore is
3200	 * done after a ret.
3201	 */
3202	SAVE; SAVE; SAVE; SAVE; SAVE; SAVE; SAVE
3203	restore; restore; restore; restore; restore; restore; ret; restore
3204
32051:	/* this is reached only after a child gets chosen in swtch() */
3206	mov	1, %i0			! return 1 from cpu_fork
3207	ret
3208	 restore
3209
3210/*
3211 * {fu,su}{,i}{byte,word}
3212 */
3213ALTENTRY(fuiword)
3214ENTRY(fuword)
3215	set	KERNBASE, %o2
3216	cmp	%o0, %o2		! if addr >= KERNBASE...
3217	bgeu	Lfsbadaddr
3218	EMPTY
3219	btst	3, %o0			! or has low bits set...
3220	bnz	Lfsbadaddr		!	go return -1
3221	EMPTY
3222	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = Lfserr;
3223	ld	[%o2 + %lo(_cpcb)], %o2
3224	set	Lfserr, %o3
3225	st	%o3, [%o2 + PCB_ONFAULT]
3226	ld	[%o0], %o0		! fetch the word
3227	retl				! phew, made it, return the word
3228	st	%g0, [%o2 + PCB_ONFAULT]! but first clear onfault
3229
3230Lfserr:
3231	st	%g0, [%o2 + PCB_ONFAULT]! error in r/w, clear pcb_onfault
3232Lfsbadaddr:
3233	retl				! and return error indicator
3234	mov	-1, %o0
3235
3236	/*
3237	 * This is just like Lfserr, but it's a global label that allows
3238	 * mem_access_fault() to check to see that we don't want to try to
3239	 * page in the fault.  It's used by fuswintr() etc.
3240	 */
3241	.globl	_Lfsbail
3242_Lfsbail:
3243	st	%g0, [%o2 + PCB_ONFAULT]! error in r/w, clear pcb_onfault
3244	retl				! and return error indicator
3245	mov	-1, %o0
3246
3247	/*
3248	 * Like fusword but callable from interrupt context.
3249	 * Fails if data isn't resident.
3250	 */
3251ENTRY(fuswintr)
3252	set	KERNBASE, %o2
3253	cmp	%o0, %o2		! if addr >= KERNBASE
3254	bgeu	Lfsbadaddr		!	return error
3255	EMPTY
3256	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = _Lfsbail;
3257	ld	[%o2 + %lo(_cpcb)], %o2
3258	set	_Lfsbail, %o3
3259	st	%o3, [%o2 + PCB_ONFAULT]
3260	lduh	[%o0], %o0		! fetch the halfword
3261	retl				! made it
3262	st	%g0, [%o2 + PCB_ONFAULT]! but first clear onfault
3263
3264ENTRY(fusword)
3265	set	KERNBASE, %o2
3266	cmp	%o0, %o2		! if addr >= KERNBASE
3267	bgeu	Lfsbadaddr		!	return error
3268	EMPTY
3269	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = Lfserr;
3270	ld	[%o2 + %lo(_cpcb)], %o2
3271	set	Lfserr, %o3
3272	st	%o3, [%o2 + PCB_ONFAULT]
3273	lduh	[%o0], %o0		! fetch the halfword
3274	retl				! made it
3275	st	%g0, [%o2 + PCB_ONFAULT]! but first clear onfault
3276
3277ALTENTRY(fuibyte)
3278ENTRY(fubyte)
3279	set	KERNBASE, %o2
3280	cmp	%o0, %o2		! if addr >= KERNBASE
3281	bgeu	Lfsbadaddr		!	return error
3282	EMPTY
3283	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = Lfserr;
3284	ld	[%o2 + %lo(_cpcb)], %o2
3285	set	Lfserr, %o3
3286	st	%o3, [%o2 + PCB_ONFAULT]
3287	ldub	[%o0], %o0		! fetch the byte
3288	retl				! made it
3289	st	%g0, [%o2 + PCB_ONFAULT]! but first clear onfault
3290
3291ALTENTRY(suiword)
3292ENTRY(suword)
3293	set	KERNBASE, %o2
3294	cmp	%o0, %o2		! if addr >= KERNBASE ...
3295	bgeu	Lfsbadaddr
3296	EMPTY
3297	btst	3, %o0			! or has low bits set ...
3298	bnz	Lfsbadaddr		!	go return error
3299	EMPTY
3300	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = Lfserr;
3301	ld	[%o2 + %lo(_cpcb)], %o2
3302	set	Lfserr, %o3
3303	st	%o3, [%o2 + PCB_ONFAULT]
3304	st	%o1, [%o0]		! store the word
3305	st	%g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
3306	retl				! and return 0
3307	clr	%o0
3308
3309ENTRY(suswintr)
3310	set	KERNBASE, %o2
3311	cmp	%o0, %o2		! if addr >= KERNBASE
3312	bgeu	Lfsbadaddr		!	go return error
3313	EMPTY
3314	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = _Lfsbail;
3315	ld	[%o2 + %lo(_cpcb)], %o2
3316	set	_Lfsbail, %o3
3317	st	%o3, [%o2 + PCB_ONFAULT]
3318	sth	%o1, [%o0]		! store the halfword
3319	st	%g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
3320	retl				! and return 0
3321	clr	%o0
3322
3323ENTRY(susword)
3324	set	KERNBASE, %o2
3325	cmp	%o0, %o2		! if addr >= KERNBASE
3326	bgeu	Lfsbadaddr		!	go return error
3327	EMPTY
3328	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = Lfserr;
3329	ld	[%o2 + %lo(_cpcb)], %o2
3330	set	Lfserr, %o3
3331	st	%o3, [%o2 + PCB_ONFAULT]
3332	sth	%o1, [%o0]		! store the halfword
3333	st	%g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
3334	retl				! and return 0
3335	clr	%o0
3336
3337ALTENTRY(suibyte)
3338ENTRY(subyte)
3339	set	KERNBASE, %o2
3340	cmp	%o0, %o2		! if addr >= KERNBASE
3341	bgeu	Lfsbadaddr		!	go return error
3342	EMPTY
3343	sethi	%hi(_cpcb), %o2		! cpcb->pcb_onfault = Lfserr;
3344	ld	[%o2 + %lo(_cpcb)], %o2
3345	set	Lfserr, %o3
3346	st	%o3, [%o2 + PCB_ONFAULT]
3347	stb	%o1, [%o0]		! store the byte
3348	st	%g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
3349	retl				! and return 0
3350	clr	%o0
3351
3352/* probeget and probeset are meant to be used during autoconfiguration */
3353
3354/*
3355 * probeget(addr, size) caddr_t addr; int size;
3356 *
3357 * Read or write a (byte,word,longword) from the given address.
3358 * Like {fu,su}{byte,halfword,word} but our caller is supposed
3359 * to know what he is doing... the address can be anywhere.
3360 *
3361 * We optimize for space, rather than time, here.
3362 */
3363ENTRY(probeget)
3364	! %o0 = addr, %o1 = (1,2,4)
3365	set	KERNBASE, %o2
3366	cmp	%o0, %o2		! if addr < KERNBASE
3367	blu	Lfsbadaddr		!	go return error
3368	 EMPTY
3369	sethi	%hi(_cpcb), %o2
3370	ld	[%o2 + %lo(_cpcb)], %o2	! cpcb->pcb_onfault = Lfserr;
3371	set	Lfserr, %o5
3372	st	%o5, [%o2 + PCB_ONFAULT]
3373	btst	1, %o1
3374	bnz,a	0f			! if (len & 1)
3375	 ldub	[%o0], %o0		!	value = *(char *)addr;
33760:	btst	2, %o1
3377	bnz,a	0f			! if (len & 2)
3378	 lduh	[%o0], %o0		!	value = *(short *)addr;
33790:	btst	4, %o1
3380	bnz,a	0f			! if (len & 4)
3381	 ld	[%o0], %o0		!	value = *(int *)addr;
33820:	retl				! made it, clear onfault and return
3383	 st	%g0, [%o2 + PCB_ONFAULT]
3384
3385/*
3386 * probeset(addr, size, val) caddr_t addr; int size, val;
3387 *
3388 * As above, but we return 0 on success.
3389 */
3390ENTRY(probeset)
3391	! %o0 = addr, %o1 = (1,2,4), %o2 = val
3392	set	KERNBASE, %o2
3393	cmp	%o0, %o2		! if addr < KERNBASE
3394	blu	Lfsbadaddr		!	go return error
3395	 EMPTY
3396	sethi	%hi(_cpcb), %o2
3397	ld	[%o2 + %lo(_cpcb)], %o2	! cpcb->pcb_onfault = Lfserr;
3398	set	Lfserr, %o5
3399	st	%o5, [%o2 + PCB_ONFAULT]
3400	btst	1, %o1
3401	bnz,a	0f			! if (len & 1)
3402	 stb	%o2, [%o0]		!	*(char *)addr = value;
34030:	btst	2, %o1
3404	bnz,a	0f			! if (len & 2)
3405	 sth	%o2, [%o0]		!	*(short *)addr = value;
34060:	btst	4, %o1
3407	bnz,a	0f			! if (len & 4)
3408	 st	%o2, [%o0]		!	*(int *)addr = value;
34090:	clr	%o0			! made it, clear onfault and return 0
3410	retl
3411	 st	%g0, [%o2 + PCB_ONFAULT]
3412
3413/*
3414 * Insert entry into doubly-linked queue.
3415 * We could just do this in C, but gcc does not do leaves well (yet).
3416 */
3417ENTRY(_insque)
3418	! %o0 = e = what to insert; %o1 = after = entry to insert after
3419	st	%o1, [%o0 + 4]		! e->prev = after;
3420	ld	[%o1], %o2		! tmp = after->next;
3421	st	%o2, [%o0]		! e->next = tmp;
3422	st	%o0, [%o1]		! after->next = e;
3423	retl
3424	st	%o0, [%o2 + 4]		! tmp->prev = e;
3425
3426
3427/*
3428 * Remove entry from doubly-linked queue.
3429 */
3430ENTRY(_remque)
3431	! %o0 = e = what to remove
3432	ld	[%o0], %o1		! n = e->next;
3433	ld	[%o0 + 4], %o2		! p = e->prev;
3434	st	%o2, [%o1 + 4]		! n->prev = p;
3435	retl
3436	st	%o1, [%o2]		! p->next = n;
3437
3438/*
3439 * copywords(src, dst, nbytes)
3440 *
3441 * Copy `nbytes' bytes from src to dst, both of which are word-aligned;
3442 * nbytes is a multiple of four.  It may, however, be zero, in which case
3443 * nothing is to be copied.
3444 */
3445ENTRY(copywords)
3446	! %o0 = src, %o1 = dst, %o2 = nbytes
3447	b	1f
3448	deccc	4, %o2
34490:
3450	st	%o3, [%o1 + %o2]
3451	deccc	4, %o2			! while ((n -= 4) >= 0)
34521:
3453	bge,a	0b			!    *(int *)(dst+n) = *(int *)(src+n);
3454	ld	[%o0 + %o2], %o3
3455	retl
3456	nop
3457
3458/*
3459 * qcopy(src, dst, nbytes)
3460 *
3461 * (q for `quad' or `quick', as opposed to b for byte/block copy)
3462 *
3463 * Just like copywords, but everything is multiples of 8.
3464 */
3465ENTRY(qcopy)
3466	b	1f
3467	deccc	8, %o2
34680:
3469	std	%o4, [%o1 + %o2]
3470	deccc	8, %o2
34711:
3472	bge,a	0b
3473	ldd	[%o0 + %o2], %o4
3474	retl
3475	nop
3476
3477/*
3478 * qzero(addr, nbytes)
3479 *
3480 * Zeroes `nbytes' bytes of a quad-aligned virtual address,
3481 * where nbytes is itself a multiple of 8.
3482 */
3483ENTRY(qzero)
3484	! %o0 = addr, %o1 = len (in bytes)
3485	clr	%g1
34860:
3487	deccc	8, %o1			! while ((n =- 8) >= 0)
3488	bge,a	0b
3489	std	%g0, [%o0 + %o1]	!	*(quad *)(addr + n) = 0;
3490	retl
3491	nop
3492
3493/*
3494 * bzero(addr, len)
3495 *
3496 * We should unroll the loop, but at the moment this would
3497 * gain nothing since the `std' instructions are what limits us.
3498 */
3499ALTENTRY(blkclr)
3500ENTRY(bzero)
3501	! %o0 = addr, %o1 = len
3502
3503	! Optimize a common case: addr and len are both multiples of 8.
3504	or	%o0, %o1, %o2
3505	btst	7, %o2			! ((addr | len) & 7) != 0?
3506	bnz	1f			! if so, cannot optimize
3507	clr	%g1			! in any case, we want g1=0
3508
3509	/* `Good' operands, can just store doubles. */
35100:
3511	deccc	8, %o1			! while ((len -= 8) >= 0)
3512	bge,a	0b
3513	std	%g0, [%o0 + %o1]	!	*(quad *)(addr + len) = 0;
3514	retl
3515	nop
3516
3517	/*
3518	 * Either the address is unaligned, or the count is not a
3519	 * multiple of 8, or both.  We will have to align the address
3520	 * in order to use anything `better' than stb.
3521	 */
35221:
3523	cmp	%o1, 15			! len >= 15?
3524	bge,a	Lstd			! yes, use std
3525	btst	1, %o0			! (but first check alignment)
3526
3527	! not enough to bother: do byte-at-a-time loop.
35282:
3529	deccc	%o1			! while (--len >= 0)
3530	bge,a	2b
3531	stb	%g0, [%o0 + %o1]	!	addr[len] = 0;
3532	retl
3533	nop
3534
3535Lstd:
3536	/*
3537	 * There are at least 15 bytes to zero.
3538	 * We may have to zero some initial stuff to align
3539	 * the address.
3540	 */
3541	bz,a	1f			! if (addr & 1) {
3542	btst	2, %o0
3543	stb	%g0, [%o0]		!	*addr = 0;
3544	inc	%o0			!	addr++;
3545	dec	%o1			!	len--;
3546	btst	2, %o0			! }
35471:
3548	bz,a	1f			! if (addr & 2) {
3549	btst	4, %o0
3550	sth	%g0, [%o0]		!	*(short *)addr = 0;
3551	inc	2, %o0			!	addr += 2;
3552	dec	2, %o1			!	len -= 2;
3553	btst	4, %o0			! }
35541:
3555	bz	1f			! if (addr & 4) {
3556	dec	8, %o1
3557	st	%g0, [%o0]		!	*(int *)addr = 0;
3558	inc	4, %o0			!	addr += 4;
3559	dec	4, %o1			!	len -= 4;
3560					! }
3561	/*
3562	 * Address is double word aligned; len is 8 less than
3563	 * the number of bytes remaining (i.e., len is 0 if
3564	 * the remaining count is 8, 1 if it is 9, etc.).
3565	 */
35661:
3567	std	%g0, [%o0]		! do {
35682:					!	*(quad *)addr = 0;
3569	inc	8, %o0			!	addr += 8;
3570	deccc	8, %o1			! } while ((len -= 8) >= 0);
3571	bge,a	2b
3572	std	%g0, [%o0]
3573
3574	/*
3575	 * Len is in [-8..-1] where -8 => done, -7 => 1 byte to zero,
3576	 * -6 => two bytes, etc.  Mop up this remainder, if any.
3577	 */
3578	btst	4, %o1
3579	bz	1f			! if (len & 4) {
3580	btst	2, %o1
3581	st	%g0, [%o0]		!	*(int *)addr = 0;
3582	inc	4, %o0			!	addr += 4;
35831:
3584	bz	1f			! if (len & 2) {
3585	btst	1, %o1
3586	sth	%g0, [%o0]		!	*(short *)addr = 0;
3587	inc	2, %o0			!	addr += 2;
35881:
3589	bnz,a	1f			! if (len & 1)
3590	stb	%g0, [%o0]		!	*addr = 0;
35911:
3592	retl
3593	nop
3594
3595/*
3596 * kernel bcopy/memcpy
3597 * Assumes regions do not overlap; has no useful return value.
3598 *
3599 * Must not use %g7 (see copyin/copyout above).
3600 */
3601
3602#define	BCOPY_SMALL	32	/* if < 32, copy by bytes */
3603
3604ENTRY(memcpy)
3605	/*
3606	 * Swap args for bcopy.  Gcc generates calls to memcpy for
3607	 * structure assignments.
3608	 */
3609	mov	%o0, %o3
3610	mov	%o1, %o0
3611	mov	%o3, %o1
3612ENTRY(bcopy)
3613	cmp	%o2, BCOPY_SMALL
3614Lbcopy_start:
3615	bge,a	Lbcopy_fancy	! if >= this many, go be fancy.
3616	btst	7, %o0		! (part of being fancy)
3617
3618	/*
3619	 * Not much to copy, just do it a byte at a time.
3620	 */
3621	deccc	%o2		! while (--len >= 0)
3622	bl	1f
3623	EMPTY
36240:
3625	inc	%o0
3626	ldsb	[%o0 - 1], %o4	!	(++dst)[-1] = *src++;
3627	stb	%o4, [%o1]
3628	deccc	%o2
3629	bge	0b
3630	inc	%o1
36311:
3632	retl
3633	nop
3634	/* NOTREACHED */
3635
3636	/*
3637	 * Plenty of data to copy, so try to do it optimally.
3638	 */
3639Lbcopy_fancy:
3640	! check for common case first: everything lines up.
3641!	btst	7, %o0		! done already
3642	bne	1f
3643	EMPTY
3644	btst	7, %o1
3645	be,a	Lbcopy_doubles
3646	dec	8, %o2		! if all lined up, len -= 8, goto bcopy_doubes
3647
3648	! If the low bits match, we can make these line up.
36491:
3650	xor	%o0, %o1, %o3	! t = src ^ dst;
3651	btst	1, %o3		! if (t & 1) {
3652	be,a	1f
3653	btst	1, %o0		! [delay slot: if (src & 1)]
3654
3655	! low bits do not match, must copy by bytes.
36560:
3657	ldsb	[%o0], %o4	!	do {
3658	inc	%o0		!		(++dst)[-1] = *src++;
3659	inc	%o1
3660	deccc	%o2
3661	bnz	0b		!	} while (--len != 0);
3662	stb	%o4, [%o1 - 1]
3663	retl
3664	nop
3665	/* NOTREACHED */
3666
3667	! lowest bit matches, so we can copy by words, if nothing else
36681:
3669	be,a	1f		! if (src & 1) {
3670	btst	2, %o3		! [delay slot: if (t & 2)]
3671
3672	! although low bits match, both are 1: must copy 1 byte to align
3673	ldsb	[%o0], %o4	!	*dst++ = *src++;
3674	stb	%o4, [%o1]
3675	inc	%o0
3676	inc	%o1
3677	dec	%o2		!	len--;
3678	btst	2, %o3		! } [if (t & 2)]
36791:
3680	be,a	1f		! if (t & 2) {
3681	btst	2, %o0		! [delay slot: if (src & 2)]
3682	dec	2, %o2		!	len -= 2;
36830:
3684	ldsh	[%o0], %o4	!	do {
3685	sth	%o4, [%o1]	!		*(short *)dst = *(short *)src;
3686	inc	2, %o0		!		dst += 2, src += 2;
3687	deccc	2, %o2		!	} while ((len -= 2) >= 0);
3688	bge	0b
3689	inc	2, %o1
3690	b	Lbcopy_mopb	!	goto mop_up_byte;
3691	btst	1, %o2		! } [delay slot: if (len & 1)]
3692	/* NOTREACHED */
3693
3694	! low two bits match, so we can copy by longwords
36951:
3696	be,a	1f		! if (src & 2) {
3697	btst	4, %o3		! [delay slot: if (t & 4)]
3698
3699	! although low 2 bits match, they are 10: must copy one short to align
3700	ldsh	[%o0], %o4	!	(*short *)dst = *(short *)src;
3701	sth	%o4, [%o1]
3702	inc	2, %o0		!	dst += 2;
3703	inc	2, %o1		!	src += 2;
3704	dec	2, %o2		!	len -= 2;
3705	btst	4, %o3		! } [if (t & 4)]
37061:
3707	be,a	1f		! if (t & 4) {
3708	btst	4, %o0		! [delay slot: if (src & 4)]
3709	dec	4, %o2		!	len -= 4;
37100:
3711	ld	[%o0], %o4	!	do {
3712	st	%o4, [%o1]	!		*(int *)dst = *(int *)src;
3713	inc	4, %o0		!		dst += 4, src += 4;
3714	deccc	4, %o2		!	} while ((len -= 4) >= 0);
3715	bge	0b
3716	inc	4, %o1
3717	b	Lbcopy_mopw	!	goto mop_up_word_and_byte;
3718	btst	2, %o2		! } [delay slot: if (len & 2)]
3719	/* NOTREACHED */
3720
3721	! low three bits match, so we can copy by doublewords
37221:
3723	be	1f		! if (src & 4) {
3724	dec	8, %o2		! [delay slot: len -= 8]
3725	ld	[%o0], %o4	!	*(int *)dst = *(int *)src;
3726	st	%o4, [%o1]
3727	inc	4, %o0		!	dst += 4, src += 4, len -= 4;
3728	inc	4, %o1
3729	dec	4, %o2		! }
37301:
3731Lbcopy_doubles:
3732	ldd	[%o0], %o4	! do {
3733	std	%o4, [%o1]	!	*(double *)dst = *(double *)src;
3734	inc	8, %o0		!	dst += 8, src += 8;
3735	deccc	8, %o2		! } while ((len -= 8) >= 0);
3736	bge	Lbcopy_doubles
3737	inc	8, %o1
3738
3739	! check for a usual case again (save work)
3740	btst	7, %o2		! if ((len & 7) == 0)
3741	be	Lbcopy_done	!	goto bcopy_done;
3742
3743	btst	4, %o2		! if ((len & 4)) == 0)
3744	be,a	Lbcopy_mopw	!	goto mop_up_word_and_byte;
3745	btst	2, %o2		! [delay slot: if (len & 2)]
3746	ld	[%o0], %o4	!	*(int *)dst = *(int *)src;
3747	st	%o4, [%o1]
3748	inc	4, %o0		!	dst += 4;
3749	inc	4, %o1		!	src += 4;
3750	btst	2, %o2		! } [if (len & 2)]
3751
37521:
3753	! mop up trailing word (if present) and byte (if present).
3754Lbcopy_mopw:
3755	be	Lbcopy_mopb	! no word, go mop up byte
3756	btst	1, %o2		! [delay slot: if (len & 1)]
3757	ldsh	[%o0], %o4	! *(short *)dst = *(short *)src;
3758	be	Lbcopy_done	! if ((len & 1) == 0) goto done;
3759	sth	%o4, [%o1]
3760	ldsb	[%o0 + 2], %o4	! dst[2] = src[2];
3761	retl
3762	stb	%o4, [%o1 + 2]
3763	/* NOTREACHED */
3764
3765	! mop up trailing byte (if present).
3766Lbcopy_mopb:
3767	bne,a	1f
3768	ldsb	[%o0], %o4
3769
3770Lbcopy_done:
3771	retl
3772	nop
3773
37741:
3775	retl
3776	stb	%o4,[%o1]
3777/*
3778 * ovbcopy(src, dst, len): like bcopy, but regions may overlap.
3779 */
3780ENTRY(ovbcopy)
3781	cmp	%o0, %o1	! src < dst?
3782	bgeu	Lbcopy_start	! no, go copy forwards as via bcopy
3783	cmp	%o2, BCOPY_SMALL! (check length for doublecopy first)
3784
3785	/*
3786	 * Since src comes before dst, and the regions might overlap,
3787	 * we have to do the copy starting at the end and working backwards.
3788	 */
3789	add	%o2, %o0, %o0	! src += len
3790	add	%o2, %o1, %o1	! dst += len
3791	bge,a	Lback_fancy	! if len >= BCOPY_SMALL, go be fancy
3792	btst	3, %o0
3793
3794	/*
3795	 * Not much to copy, just do it a byte at a time.
3796	 */
3797	deccc	%o2		! while (--len >= 0)
3798	bl	1f
3799	EMPTY
38000:
3801	dec	%o0		!	*--dst = *--src;
3802	ldsb	[%o0], %o4
3803	dec	%o1
3804	deccc	%o2
3805	bge	0b
3806	stb	%o4, [%o1]
38071:
3808	retl
3809	nop
3810
3811	/*
3812	 * Plenty to copy, try to be optimal.
3813	 * We only bother with word/halfword/byte copies here.
3814	 */
3815Lback_fancy:
3816!	btst	3, %o0		! done already
3817	bnz	1f		! if ((src & 3) == 0 &&
3818	btst	3, %o1		!     (dst & 3) == 0)
3819	bz,a	Lback_words	!	goto words;
3820	dec	4, %o2		! (done early for word copy)
3821
38221:
3823	/*
3824	 * See if the low bits match.
3825	 */
3826	xor	%o0, %o1, %o3	! t = src ^ dst;
3827	btst	1, %o3
3828	bz,a	3f		! if (t & 1) == 0, can do better
3829	btst	1, %o0
3830
3831	/*
3832	 * Nope; gotta do byte copy.
3833	 */
38342:
3835	dec	%o0		! do {
3836	ldsb	[%o0], %o4	!	*--dst = *--src;
3837	dec	%o1
3838	deccc	%o2		! } while (--len != 0);
3839	bnz	2b
3840	stb	%o4, [%o1]
3841	retl
3842	nop
3843
38443:
3845	/*
3846	 * Can do halfword or word copy, but might have to copy 1 byte first.
3847	 */
3848!	btst	1, %o0		! done earlier
3849	bz,a	4f		! if (src & 1) {	/* copy 1 byte */
3850	btst	2, %o3		! (done early)
3851	dec	%o0		!	*--dst = *--src;
3852	ldsb	[%o0], %o4
3853	dec	%o1
3854	stb	%o4, [%o1]
3855	dec	%o2		!	len--;
3856	btst	2, %o3		! }
3857
38584:
3859	/*
3860	 * See if we can do a word copy ((t&2) == 0).
3861	 */
3862!	btst	2, %o3		! done earlier
3863	bz,a	6f		! if (t & 2) == 0, can do word copy
3864	btst	2, %o0		! (src&2, done early)
3865
3866	/*
3867	 * Gotta do halfword copy.
3868	 */
3869	dec	2, %o2		! len -= 2;
38705:
3871	dec	2, %o0		! do {
3872	ldsh	[%o0], %o4	!	src -= 2;
3873	dec	2, %o1		!	dst -= 2;
3874	deccc	2, %o0		!	*(short *)dst = *(short *)src;
3875	bge	5b		! } while ((len -= 2) >= 0);
3876	sth	%o4, [%o1]
3877	b	Lback_mopb	! goto mop_up_byte;
3878	btst	1, %o2		! (len&1, done early)
3879
38806:
3881	/*
3882	 * We can do word copies, but we might have to copy
3883	 * one halfword first.
3884	 */
3885!	btst	2, %o0		! done already
3886	bz	7f		! if (src & 2) {
3887	dec	4, %o2		! (len -= 4, done early)
3888	dec	2, %o0		!	src -= 2, dst -= 2;
3889	ldsh	[%o0], %o4	!	*(short *)dst = *(short *)src;
3890	dec	2, %o1
3891	sth	%o4, [%o1]
3892	dec	2, %o2		!	len -= 2;
3893				! }
3894
38957:
3896Lback_words:
3897	/*
3898	 * Do word copies (backwards), then mop up trailing halfword
3899	 * and byte if any.
3900	 */
3901!	dec	4, %o2		! len -= 4, done already
39020:				! do {
3903	dec	4, %o0		!	src -= 4;
3904	dec	4, %o1		!	src -= 4;
3905	ld	[%o0], %o4	!	*(int *)dst = *(int *)src;
3906	deccc	4, %o2		! } while ((len -= 4) >= 0);
3907	bge	0b
3908	st	%o4, [%o1]
3909
3910	/*
3911	 * Check for trailing shortword.
3912	 */
3913	btst	2, %o2		! if (len & 2) {
3914	bz,a	1f
3915	btst	1, %o2		! (len&1, done early)
3916	dec	2, %o0		!	src -= 2, dst -= 2;
3917	ldsh	[%o0], %o4	!	*(short *)dst = *(short *)src;
3918	dec	2, %o1
3919	sth	%o4, [%o1]	! }
3920	btst	1, %o2
3921
3922	/*
3923	 * Check for trailing byte.
3924	 */
39251:
3926Lback_mopb:
3927!	btst	1, %o2		! (done already)
3928	bnz,a	1f		! if (len & 1) {
3929	ldsb	[%o0 - 1], %o4	!	b = src[-1];
3930	retl
3931	nop
39321:
3933	retl			!	dst[-1] = b;
3934	stb	%o4, [%o1 - 1]	! }
3935
3936
3937/*
3938 * savefpstate(f) struct fpstate *f;
3939 *
3940 * Store the current FPU state.  The first `st %fsr' may cause a trap;
3941 * our trap handler knows how to recover (by `returning' to savefpcont).
3942 */
3943ENTRY(savefpstate)
3944	rd	%psr, %o1		! enable FP before we begin
3945	set	PSR_EF, %o2
3946	or	%o1, %o2, %o1
3947	wr	%o1, 0, %psr
3948	/* do some setup work while we wait for PSR_EF to turn on */
3949	set	FSR_QNE, %o5		! QNE = 0x2000, too big for immediate
3950	clr	%o3			! qsize = 0;
3951	nop				! (still waiting for PSR_EF)
3952special_fp_store:
3953	st	%fsr, [%o0 + FS_FSR]	! f->fs_fsr = getfsr();
3954	/*
3955	 * Even if the preceding instruction did not trap, the queue
3956	 * is not necessarily empty: this state save might be happening
3957	 * because user code tried to store %fsr and took the FPU
3958	 * from `exception pending' mode to `exception' mode.
3959	 * So we still have to check the blasted QNE bit.
3960	 * With any luck it will usually not be set.
3961	 */
3962	ld	[%o0 + FS_FSR], %o4	! if (f->fs_fsr & QNE)
3963	btst	%o5, %o4
3964	bnz	Lfp_storeq		!	goto storeq;
3965	 std	%f0, [%o0 + FS_REGS + (4*0)]	! f->fs_f0 = etc;
3966Lfp_finish:
3967	st	%o3, [%o0 + FS_QSIZE]	! f->fs_qsize = qsize;
3968	std	%f2, [%o0 + FS_REGS + (4*2)]
3969	std	%f4, [%o0 + FS_REGS + (4*4)]
3970	std	%f6, [%o0 + FS_REGS + (4*6)]
3971	std	%f8, [%o0 + FS_REGS + (4*8)]
3972	std	%f10, [%o0 + FS_REGS + (4*10)]
3973	std	%f12, [%o0 + FS_REGS + (4*12)]
3974	std	%f14, [%o0 + FS_REGS + (4*14)]
3975	std	%f16, [%o0 + FS_REGS + (4*16)]
3976	std	%f18, [%o0 + FS_REGS + (4*18)]
3977	std	%f20, [%o0 + FS_REGS + (4*20)]
3978	std	%f22, [%o0 + FS_REGS + (4*22)]
3979	std	%f24, [%o0 + FS_REGS + (4*24)]
3980	std	%f26, [%o0 + FS_REGS + (4*26)]
3981	std	%f28, [%o0 + FS_REGS + (4*28)]
3982	retl
3983	 std	%f30, [%o0 + FS_REGS + (4*30)]
3984
3985/*
3986 * Store the (now known nonempty) FP queue.
3987 * We have to reread the fsr each time in order to get the new QNE bit.
3988 */
3989Lfp_storeq:
3990	add	%o0, FS_QUEUE, %o1	! q = &f->fs_queue[0];
39911:
3992	std	%fq, [%o1 + %o3]	! q[qsize++] = fsr_qfront();
3993	st	%fsr, [%o0 + FS_FSR]	! reread fsr
3994	ld	[%o0 + FS_FSR], %o4	! if fsr & QNE, loop
3995	btst	%o5, %o4
3996	bnz	1b
3997	 inc	8, %o3
3998	b	Lfp_finish		! set qsize and finish storing fregs
3999	 srl	%o3, 3, %o3		! (but first fix qsize)
4000
4001/*
4002 * The fsr store trapped.  Do it again; this time it will not trap.
4003 * We could just have the trap handler return to the `st %fsr', but
4004 * if for some reason it *does* trap, that would lock us into a tight
4005 * loop.  This way we panic instead.  Whoopee.
4006 */
4007savefpcont:
4008	b	special_fp_store + 4	! continue
4009	 st	%fsr, [%o0 + FS_FSR]	! but first finish the %fsr store
4010
4011/*
4012 * Load FPU state.
4013 */
4014ENTRY(loadfpstate)
4015	rd	%psr, %o1		! enable FP before we begin
4016	set	PSR_EF, %o2
4017	or	%o1, %o2, %o1
4018	wr	%o1, 0, %psr
4019	nop; nop; nop			! paranoia
4020	ldd	[%o0 + FS_REGS + (4*0)], %f0
4021	ldd	[%o0 + FS_REGS + (4*2)], %f2
4022	ldd	[%o0 + FS_REGS + (4*4)], %f4
4023	ldd	[%o0 + FS_REGS + (4*6)], %f6
4024	ldd	[%o0 + FS_REGS + (4*8)], %f8
4025	ldd	[%o0 + FS_REGS + (4*10)], %f10
4026	ldd	[%o0 + FS_REGS + (4*12)], %f12
4027	ldd	[%o0 + FS_REGS + (4*14)], %f14
4028	ldd	[%o0 + FS_REGS + (4*16)], %f16
4029	ldd	[%o0 + FS_REGS + (4*18)], %f18
4030	ldd	[%o0 + FS_REGS + (4*20)], %f20
4031	ldd	[%o0 + FS_REGS + (4*22)], %f22
4032	ldd	[%o0 + FS_REGS + (4*24)], %f24
4033	ldd	[%o0 + FS_REGS + (4*26)], %f26
4034	ldd	[%o0 + FS_REGS + (4*28)], %f28
4035	ldd	[%o0 + FS_REGS + (4*30)], %f30
4036	retl
4037	 ld	[%o0 + FS_FSR], %fsr	! setfsr(f->fs_fsr);
4038
4039/*
4040 * ienab_bis(bis) int bis;
4041 * ienab_bic(bic) int bic;
4042 *
4043 * Set and clear bits in the interrupt register.
4044 * Since there are no read-modify-write instructions for this,
4045 * and one of the interrupts is nonmaskable, we must disable traps.
4046 *
4047 * NB: ___main is defined here for gcc-2 idiocy.  Ignore it.
4048 */
4049ENTRY(ienab_bis)
4050	! %o0 = bits to set
4051	rd	%psr, %o2
4052	wr	%o2, PSR_ET, %psr	! disable traps
4053	nop; nop			! 3-instr delay until ET turns off
4054	sethi	%hi(IE_reg_addr), %o3
4055	ldub	[%o3 + %lo(IE_reg_addr)], %o4
4056	or	%o4, %o0, %o4		! *IE_reg_addr |= bis;
4057	stb	%o4, [%o3 + %lo(IE_reg_addr)]
4058	wr	%o2, 0, %psr		! reenable traps
4059	nop
4060	.globl	___main
4061___main:
4062	retl
4063	 nop
4064
4065ENTRY(ienab_bic)
4066	! %o0 = bits to clear
4067	rd	%psr, %o2
4068	wr	%o2, PSR_ET, %psr	! disable traps
4069	nop; nop
4070	sethi	%hi(IE_reg_addr), %o3
4071	ldub	[%o3 + %lo(IE_reg_addr)], %o4
4072	andn	%o4, %o0, %o4		! *IE_reg_addr &=~ bic;
4073	stb	%o4, [%o3 + %lo(IE_reg_addr)]
4074	wr	%o2, 0, %psr		! reenable traps
4075	nop
4076	retl
4077	 nop
4078
4079/*
4080 * ffs(), using table lookup.
4081 * The process switch code shares the table, so we just put the
4082 * whole thing here.
4083 */
4084ffstab:
4085	.byte	-24,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 00-0f */
4086	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 10-1f */
4087	.byte	6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 20-2f */
4088	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 30-3f */
4089	.byte	7,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 40-4f */
4090	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 50-5f */
4091	.byte	6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 60-6f */
4092	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 70-7f */
4093	.byte	8,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 80-8f */
4094	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* 10-9f */
4095	.byte	6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* a0-af */
4096	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* b0-bf */
4097	.byte	7,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* c0-cf */
4098	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* d0-df */
4099	.byte	6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* e0-ef */
4100	.byte	5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1	/* f0-ff */
4101
4102/*
4103 * We use a table lookup on each byte.
4104 *
4105 * In each section below, %o1 is the current byte (0, 1, 2, or 3).
4106 * The last byte is handled specially: for the first three,
4107 * if that byte is nonzero, we return the table value
4108 * (plus 0, 8, or 16 for the byte number), but for the last
4109 * one, we just return the table value plus 24.  This means
4110 * that ffstab[0] must be -24 so that ffs(0) will return 0.
4111 */
4112ENTRY(ffs)
4113	set	ffstab, %o2
4114	andcc	%o0, 0xff, %o1	! get low byte
4115	bz,a	1f		! try again if 0
4116	srl	%o0, 8, %o0	! delay slot, get ready for next byte
4117
4118	retl			! return ffstab[%o1]
4119	ldsb	[%o2 + %o1], %o0
4120
41211:
4122	andcc	%o0, 0xff, %o1	! byte 1 like byte 0...
4123	bz,a	2f
4124	srl	%o0, 8, %o0	! (use delay to prepare for byte 2)
4125
4126	ldsb	[%o2 + %o1], %o0
4127	retl			! return ffstab[%o1] + 8
4128	add	%o0, 8, %o0
4129
41302:
4131	andcc	%o0, 0xff, %o1
4132	bz,a	3f
4133	srl	%o0, 8, %o0	! (prepare for byte 3)
4134
4135	ldsb	[%o2 + %o1], %o0
4136	retl			! return ffstab[%o1] + 16
4137	add	%o0, 16, %o0
4138
41393:				! just return ffstab[%o0] + 24
4140	ldsb	[%o2 + %o0], %o0
4141	retl
4142	add	%o0, 24, %o0
4143
4144/*
4145 * Here is a very good random number generator.  This implementation is
4146 * based on ``Two Fast Implementations of the "Minimal Standard" Random
4147 * Number Generator", David G. Carta, Communications of the ACM, Jan 1990,
4148 * Vol 33 No 1.
4149 */
4150	.data
4151randseed:
4152	.word	1
4153	.text
4154ENTRY(random)
4155	sethi	%hi(16807), %o1
4156	wr	%o1, %lo(16807), %y
4157	 sethi	%hi(randseed), %g1
4158	 ld	[%g1 + %lo(randseed)], %o0
4159	 andcc	%g0, 0, %o2
4160	mulscc  %o2, %o0, %o2
4161	mulscc  %o2, %o0, %o2
4162	mulscc  %o2, %o0, %o2
4163	mulscc  %o2, %o0, %o2
4164	mulscc  %o2, %o0, %o2
4165	mulscc  %o2, %o0, %o2
4166	mulscc  %o2, %o0, %o2
4167	mulscc  %o2, %o0, %o2
4168	mulscc  %o2, %o0, %o2
4169	mulscc  %o2, %o0, %o2
4170	mulscc  %o2, %o0, %o2
4171	mulscc  %o2, %o0, %o2
4172	mulscc  %o2, %o0, %o2
4173	mulscc  %o2, %o0, %o2
4174	mulscc  %o2, %o0, %o2
4175	mulscc  %o2, %g0, %o2
4176	rd	%y, %o3
4177	srl	%o2, 16, %o1
4178	set	0xffff, %o4
4179	and	%o4, %o2, %o0
4180	sll	%o0, 15, %o0
4181	srl	%o3, 17, %o3
4182	or	%o3, %o0, %o0
4183	addcc	%o0, %o1, %o0
4184	bl	1f
4185	 sethi	%hi(0x7fffffff), %o1
4186	retl
4187	 st	%o0, [%g1 + %lo(randseed)]
41881:
4189	or	%o1, %lo(0x7fffffff), %o1
4190	add	%o0, 1, %o0
4191	and	%o1, %o0, %o0
4192	retl
4193	 st	%o0, [%g1 + %lo(randseed)]
4194
4195/*
4196 * void microtime(struct timeval *tv)
4197 *
4198 * LBL's sparc bsd 'microtime': We don't need to spl (so this routine
4199 * can be a leaf routine) and we don't keep a 'last' timeval (there
4200 * can't be two calls to this routine in a microsecond).  This seems to
4201 * be about 20 times faster than the Sun code on an SS-2. - vj
4202 *
4203 * Read time values from slowest-changing to fastest-changing,
4204 * then re-read out to slowest.  If the values read before
4205 * the innermost match those read after, the innermost value
4206 * is consistent with the outer values.  If not, it may not
4207 * be and we must retry.  Typically this loop runs only once;
4208 * occasionally it runs twice, and only rarely does it run longer.
4209 */
4210ENTRY(microtime)
4211	sethi	%hi(_time), %g2
4212	sethi	%hi(TIMERREG_VA), %g3
42131:
4214	ldd	[%g2+%lo(_time)], %o2		! time.tv_sec & time.tv_usec
4215	ld	[%g3+%lo(TIMERREG_VA)], %o4	! usec counter
4216	ldd	[%g2+%lo(_time)], %g4		! see if time values changed
4217	cmp	%g4, %o2
4218	bne	1b				! if time.tv_sec changed
4219	 cmp	%g5, %o3
4220	bne	1b				! if time.tv_usec changed
4221	 tst	%o4
4222
4223	bpos	2f				! reached limit?
4224	 srl	%o4, TMR_SHIFT, %o4		! convert counter to usec
4225	sethi	%hi(_tick), %g4			! bump usec by 1 tick
4226	ld	[%g4+%lo(_tick)], %o1
4227	set	TMR_MASK, %g5
4228	add	%o1, %o3, %o3
4229	and	%o4, %g5, %o4
42302:
4231	add	%o4, %o3, %o3
4232	set	1000000, %g5			! normalize usec value
4233	cmp	%o3, %g5
4234	bl,a	3f
4235	 st	%o2, [%o0]			! (should be able to std here)
4236	add	%o2, 1, %o2			! overflow
4237	sub	%o3, %g5, %o3
4238	st	%o2, [%o0]			! (should be able to std here)
42393:
4240	retl
4241	 st	%o3, [%o0+4]
4242
4243/*
4244 * This procedure exists to make stdarg functions work correctly.
4245 * We write the caller's `in' registers into his caller's `arg dump'
4246 * area.  That arg-dump area immediately precedes the argument extension
4247 * area, resulting in a single contiguous block of memory.
4248 *
4249 * This is really the wrong way to do it: the arguments should be written
4250 * to storage local to the stdarg function, and the stdarg `pick up
4251 * the next argument' code should pick it up from whichever region is
4252 * `active' at that point.
4253 */
4254	.globl	___builtin_saveregs
4255___builtin_saveregs:
4256	! not profiled -- this should be done inline anyway
4257	! bleah! the arg dump area is unaligned!  cannot std w/o reg/reg moves
4258	st	%i0, [%fp + 0x44]	! fr->fr_argd[0]
4259	st	%i1, [%fp + 0x48]	! fr->fr_argd[1]
4260	st	%i2, [%fp + 0x4c]	! fr->fr_argd[2]
4261	st	%i3, [%fp + 0x50]	! fr->fr_argd[3]
4262	st	%i4, [%fp + 0x54]	! fr->fr_argd[4]
4263	retl
4264	 st	%i5, [%fp + 0x58]	! fr->fr_argd[5]
4265
4266#ifdef KGDB
4267/*
4268 * Write all windows (user or otherwise), except the current one.
4269 *
4270 * THIS COULD BE DONE IN USER CODE
4271 */
4272ENTRY(write_all_windows)
4273	/*
4274	 * g2 = g1 = nwindows - 1;
4275	 * while (--g1 > 0) save();
4276	 * while (--g2 > 0) restore();
4277	 */
4278	sethi	%hi(_nwindows), %g1
4279	ld	[%g1 + %lo(_nwindows)], %g1
4280	dec	%g1
4281	mov	%g1, %g2
4282
42831:	deccc	%g1
4284	bg,a	1b
4285	 save	%sp, -64, %sp
4286
42872:	deccc	%g2
4288	bg,a	2b
4289	 restore
4290
4291	retl
4292	nop
4293#endif /* KGDB */
4294
4295	.data
4296	.globl	_cold
4297_cold:
4298	.word	1		! cold start flag
4299
4300	.globl	_proc0paddr
4301_proc0paddr:
4302	.word	_u0		! KVA of proc0 uarea
4303
4304/* interrupt counters	XXX THESE BELONG ELSEWHERE (if anywhere) */
4305	.globl	_intrcnt, _eintrcnt, _intrnames, _eintrnames
4306_intrnames:
4307	.asciz	"spur"
4308	.asciz	"lev1"
4309	.asciz	"lev2"
4310	.asciz	"lev3"
4311	.asciz	"lev4"
4312	.asciz	"lev5"
4313	.asciz	"lev6"
4314	.asciz	"lev7"
4315	.asciz  "lev8"
4316	.asciz	"lev9"
4317	.asciz	"clock"
4318	.asciz	"lev11"
4319	.asciz	"lev12"
4320	.asciz	"lev13"
4321	.asciz	"prof"
4322_eintrnames:
4323	ALIGN
4324_intrcnt:
4325	.skip	4*15
4326_eintrcnt:
4327
4328	.comm	_nwindows, 4
4329	.comm	_promvec, 4
4330	.comm	_curproc, 4
4331	.comm	_qs, 32 * 8
4332	.comm	_whichqs, 4
4333