xref: /illumos-gate/usr/src/uts/sun4u/ml/mach_locore.S (revision 55fea89d)
1*5d9d9091SRichard Lowe/*
2*5d9d9091SRichard Lowe * CDDL HEADER START
3*5d9d9091SRichard Lowe *
4*5d9d9091SRichard Lowe * The contents of this file are subject to the terms of the
5*5d9d9091SRichard Lowe * Common Development and Distribution License (the "License").
6*5d9d9091SRichard Lowe * You may not use this file except in compliance with the License.
7*5d9d9091SRichard Lowe *
8*5d9d9091SRichard Lowe * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*5d9d9091SRichard Lowe * or http://www.opensolaris.org/os/licensing.
10*5d9d9091SRichard Lowe * See the License for the specific language governing permissions
11*5d9d9091SRichard Lowe * and limitations under the License.
12*5d9d9091SRichard Lowe *
13*5d9d9091SRichard Lowe * When distributing Covered Code, include this CDDL HEADER in each
14*5d9d9091SRichard Lowe * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*5d9d9091SRichard Lowe * If applicable, add the following below this CDDL HEADER, with the
16*5d9d9091SRichard Lowe * fields enclosed by brackets "[]" replaced with your own identifying
17*5d9d9091SRichard Lowe * information: Portions Copyright [yyyy] [name of copyright owner]
18*5d9d9091SRichard Lowe *
19*5d9d9091SRichard Lowe * CDDL HEADER END
20*5d9d9091SRichard Lowe */
21*5d9d9091SRichard Lowe/*
22*5d9d9091SRichard Lowe * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23*5d9d9091SRichard Lowe * Use is subject to license terms.
24*5d9d9091SRichard Lowe */
25*5d9d9091SRichard Lowe
26*5d9d9091SRichard Lowe#include <sys/asm_linkage.h>
27*5d9d9091SRichard Lowe#include <sys/intreg.h>
28*5d9d9091SRichard Lowe#include <sys/ivintr.h>
29*5d9d9091SRichard Lowe#include <sys/mmu.h>
30*5d9d9091SRichard Lowe#include <sys/machpcb.h>
31*5d9d9091SRichard Lowe#include <sys/machtrap.h>
32*5d9d9091SRichard Lowe#include <sys/machlock.h>
33*5d9d9091SRichard Lowe#include <sys/fdreg.h>
34*5d9d9091SRichard Lowe#include <sys/vis.h>
35*5d9d9091SRichard Lowe#include <sys/traptrace.h>
36*5d9d9091SRichard Lowe#include <sys/panic.h>
37*5d9d9091SRichard Lowe#include <sys/machasi.h>
38*5d9d9091SRichard Lowe#include <sys/clock.h>
39*5d9d9091SRichard Lowe#include <vm/hat_sfmmu.h>
40*5d9d9091SRichard Lowe
41*5d9d9091SRichard Lowe#include "assym.h"
42*5d9d9091SRichard Lowe
43*5d9d9091SRichard Lowe
44*5d9d9091SRichard Lowe!
45*5d9d9091SRichard Lowe! REGOFF must add up to allow double word access to r_tstate.
46*5d9d9091SRichard Lowe! PCB_WBUF must also be aligned.
47*5d9d9091SRichard Lowe!
48*5d9d9091SRichard Lowe#if (REGOFF & 7) != 0
49*5d9d9091SRichard Lowe#error "struct regs not aligned"
50*5d9d9091SRichard Lowe#endif
51*5d9d9091SRichard Lowe
52*5d9d9091SRichard Lowe/*
53*5d9d9091SRichard Lowe * Absolute external symbols.
54*5d9d9091SRichard Lowe * On the sun4u we put the panic buffer in the third and fourth pages.
55*5d9d9091SRichard Lowe * We set things up so that the first 2 pages of KERNELBASE is illegal
56*5d9d9091SRichard Lowe * to act as a redzone during copyin/copyout type operations. One of
57*5d9d9091SRichard Lowe * the reasons the panic buffer is allocated in low memory to
58*5d9d9091SRichard Lowe * prevent being overwritten during booting operations (besides
59*5d9d9091SRichard Lowe * the fact that it is small enough to share pages with others).
60*5d9d9091SRichard Lowe */
61*5d9d9091SRichard Lowe
62*5d9d9091SRichard Lowe	.seg	".data"
63*5d9d9091SRichard Lowe	.global	panicbuf
64*5d9d9091SRichard Lowe
65*5d9d9091SRichard LowePROM	= 0xFFE00000			! address of prom virtual area
66*5d9d9091SRichard Lowepanicbuf = SYSBASE32 + PAGESIZE		! address of panic buffer
67*5d9d9091SRichard Lowe
68*5d9d9091SRichard Lowe	.type	panicbuf, #object
69*5d9d9091SRichard Lowe	.size	panicbuf, PANICBUFSIZE
70*5d9d9091SRichard Lowe
71*5d9d9091SRichard Lowe/*
72*5d9d9091SRichard Lowe * Absolute external symbol - intr_vec_table.
73*5d9d9091SRichard Lowe *
74*5d9d9091SRichard Lowe * With new bus structures supporting a larger number of interrupt
75*5d9d9091SRichard Lowe * numbers, the interrupt vector table, intr_vec_table[] has been
76*5d9d9091SRichard Lowe * moved out of kernel nucleus and allocated after panicbuf.
77*5d9d9091SRichard Lowe */
78*5d9d9091SRichard Lowe	.global intr_vec_table
79*5d9d9091SRichard Lowe
80*5d9d9091SRichard Loweintr_vec_table = SYSBASE32 + PAGESIZE + PANICBUFSIZE ! address of interrupt table
81*5d9d9091SRichard Lowe
82*5d9d9091SRichard Lowe	.type	intr_vec_table, #object
83*5d9d9091SRichard Lowe	.size	intr_vec_table, MAXIVNUM * CPTRSIZE + MAX_RSVD_IV * IV_SIZE + MAX_RSVD_IVX * (IV_SIZE + CPTRSIZE * (NCPU - 1))
84*5d9d9091SRichard Lowe
85*5d9d9091SRichard Lowe/*
86*5d9d9091SRichard Lowe * The thread 0 stack. This must be the first thing in the data
87*5d9d9091SRichard Lowe * segment (other than an sccs string) so that we don't stomp
88*5d9d9091SRichard Lowe * on anything important if the stack overflows. We get a
89*5d9d9091SRichard Lowe * red zone below this stack for free when the kernel text is
90*5d9d9091SRichard Lowe * write protected.
91*5d9d9091SRichard Lowe */
92*5d9d9091SRichard Lowe
93*5d9d9091SRichard Lowe	.global	t0stack
94*5d9d9091SRichard Lowe	.align	16
95*5d9d9091SRichard Lowe	.type	t0stack, #object
96*5d9d9091SRichard Lowet0stack:
97*5d9d9091SRichard Lowe	.skip	T0STKSZ			! thread 0 stack
98*5d9d9091SRichard Lowet0stacktop:
99*5d9d9091SRichard Lowe	.size	t0stack, T0STKSZ
100*5d9d9091SRichard Lowe
101*5d9d9091SRichard Lowe/*
102*5d9d9091SRichard Lowe * cpu0 and its ptl1_panic stack.  The cpu structure must be allocated
103*5d9d9091SRichard Lowe * on a single page for ptl1_panic's physical address accesses.
104*5d9d9091SRichard Lowe */
105*5d9d9091SRichard Lowe	.global	cpu0
106*5d9d9091SRichard Lowe	.align	MMU_PAGESIZE
107*5d9d9091SRichard Lowecpu0:
108*5d9d9091SRichard Lowe	.type	cpu0, #object
109*5d9d9091SRichard Lowe	.skip	CPU_ALLOC_SIZE
110*5d9d9091SRichard Lowe	.size	cpu0, CPU_ALLOC_SIZE
111*5d9d9091SRichard Lowe
112*5d9d9091SRichard Lowe	.global t0
113*5d9d9091SRichard Lowe	.align	PTR24_ALIGN		! alignment for mutex.
114*5d9d9091SRichard Lowe	.type	t0, #object
115*5d9d9091SRichard Lowet0:
116*5d9d9091SRichard Lowe	.skip	THREAD_SIZE		! thread 0
117*5d9d9091SRichard Lowe	.size	t0, THREAD_SIZE
118*5d9d9091SRichard Lowe
119*5d9d9091SRichard Lowe#ifdef	TRAPTRACE
120*5d9d9091SRichard Lowe	.global	trap_trace_ctl
121*5d9d9091SRichard Lowe	.global	trap_tr0
122*5d9d9091SRichard Lowe	.global trap_trace_bufsize
123*5d9d9091SRichard Lowe	.global	trap_freeze
124*5d9d9091SRichard Lowe	.global	trap_freeze_pc
125*5d9d9091SRichard Lowe
126*5d9d9091SRichard Lowe	.align	4
127*5d9d9091SRichard Lowetrap_trace_bufsize:
128*5d9d9091SRichard Lowe	.word	TRAP_TSIZE		! default trap buffer size
129*5d9d9091SRichard Lowetrap_freeze:
130*5d9d9091SRichard Lowe	.word	0
131*5d9d9091SRichard Lowe
132*5d9d9091SRichard Lowe	.align	64
133*5d9d9091SRichard Lowetrap_trace_ctl:
134*5d9d9091SRichard Lowe	.skip	NCPU * TRAPTR_SIZE	! NCPU control headers
135*5d9d9091SRichard Lowe
136*5d9d9091SRichard Lowe	.align	16
137*5d9d9091SRichard Lowetrap_tr0:
138*5d9d9091SRichard Lowe	.skip	TRAP_TSIZE		! one buffer for the boot cpu
139*5d9d9091SRichard Lowe
140*5d9d9091SRichard Lowe/*
141*5d9d9091SRichard Lowe * When an assertion in TRACE_PTR was failed, %pc is saved in trap_freeze_pc to
142*5d9d9091SRichard Lowe * show in which TRACE_PTR the assertion failure happened.
143*5d9d9091SRichard Lowe */
144*5d9d9091SRichard Lowe	.align	8
145*5d9d9091SRichard Lowetrap_freeze_pc:
146*5d9d9091SRichard Lowe	.nword	0
147*5d9d9091SRichard Lowe#endif	/* TRAPTRACE */
148*5d9d9091SRichard Lowe
149*5d9d9091SRichard Lowe	.align 4
150*5d9d9091SRichard Lowe	.seg	".text"
151*5d9d9091SRichard Lowe
152*5d9d9091SRichard Lowe#ifdef	NOPROM
153*5d9d9091SRichard Lowe	.global availmem
154*5d9d9091SRichard Loweavailmem:
155*5d9d9091SRichard Lowe	.word	0
156*5d9d9091SRichard Lowe#endif	/* NOPROM */
157*5d9d9091SRichard Lowe
158*5d9d9091SRichard Lowe	.align	8
159*5d9d9091SRichard Lowe_local_p1275cis:
160*5d9d9091SRichard Lowe	.nword	0
161*5d9d9091SRichard Lowe
162*5d9d9091SRichard Lowe	.seg	".data"
163*5d9d9091SRichard Lowe
164*5d9d9091SRichard Lowe	.global nwindows, nwin_minus_one, winmask
165*5d9d9091SRichard Lowenwindows:
166*5d9d9091SRichard Lowe	.word   8
167*5d9d9091SRichard Lowenwin_minus_one:
168*5d9d9091SRichard Lowe	.word   7
169*5d9d9091SRichard Lowewinmask:
170*5d9d9091SRichard Lowe	.word	8
171*5d9d9091SRichard Lowe
172*5d9d9091SRichard Lowe	.global	afsrbuf
173*5d9d9091SRichard Loweafsrbuf:
174*5d9d9091SRichard Lowe	.word	0,0,0,0
175*5d9d9091SRichard Lowe
176*5d9d9091SRichard Lowe/*
177*5d9d9091SRichard Lowe * System initialization
178*5d9d9091SRichard Lowe *
179*5d9d9091SRichard Lowe * Our contract with the boot prom specifies that the MMU is on and the
180*5d9d9091SRichard Lowe * first 16 meg of memory is mapped with a level-1 pte.  We are called
181*5d9d9091SRichard Lowe * with p1275cis ptr in %o0 and kdi_dvec in %o1; we start execution
182*5d9d9091SRichard Lowe * directly from physical memory, so we need to get up into our proper
183*5d9d9091SRichard Lowe * addresses quickly: all code before we do this must be position
184*5d9d9091SRichard Lowe * independent.
185*5d9d9091SRichard Lowe *
186*5d9d9091SRichard Lowe * NB: Above is not true for boot/stick kernel, the only thing mapped is
187*5d9d9091SRichard Lowe * the text+data+bss. The kernel is loaded directly into KERNELBASE.
188*5d9d9091SRichard Lowe *
189*5d9d9091SRichard Lowe * 	entry, the romvec pointer (romp) is the first argument;
190*5d9d9091SRichard Lowe * 	  i.e., %o0.
191*5d9d9091SRichard Lowe * 	the bootops vector is in the third argument (%o1)
192*5d9d9091SRichard Lowe *
193*5d9d9091SRichard Lowe * Our tasks are:
194*5d9d9091SRichard Lowe * 	save parameters
195*5d9d9091SRichard Lowe * 	construct mappings for KERNELBASE (not needed for boot/stick kernel)
196*5d9d9091SRichard Lowe * 	hop up into high memory           (not needed for boot/stick kernel)
197*5d9d9091SRichard Lowe * 	initialize stack pointer
198*5d9d9091SRichard Lowe * 	initialize trap base register
199*5d9d9091SRichard Lowe * 	initialize window invalid mask
200*5d9d9091SRichard Lowe * 	initialize psr (with traps enabled)
201*5d9d9091SRichard Lowe * 	figure out all the module type stuff
202*5d9d9091SRichard Lowe * 	tear down the 1-1 mappings
203*5d9d9091SRichard Lowe * 	dive into main()
204*5d9d9091SRichard Lowe */
205*5d9d9091SRichard Lowe	ENTRY_NP(_start)
206*5d9d9091SRichard Lowe	!
207*5d9d9091SRichard Lowe	! Stash away our arguments in memory.
208*5d9d9091SRichard Lowe	!
209*5d9d9091SRichard Lowe	sethi	%hi(_local_p1275cis), %g1
210*5d9d9091SRichard Lowe	stn	%o4, [%g1 + %lo(_local_p1275cis)]
211*5d9d9091SRichard Lowe
212*5d9d9091SRichard Lowe	!
213*5d9d9091SRichard Lowe	! Initialize CPU state registers
214*5d9d9091SRichard Lowe	!
215*5d9d9091SRichard Lowe	wrpr	%g0, PSTATE_KERN, %pstate
216*5d9d9091SRichard Lowe	wr	%g0, %g0, %fprs
217*5d9d9091SRichard Lowe
218*5d9d9091SRichard Lowe	!
219*5d9d9091SRichard Lowe	! call krtld to link the world together
220*5d9d9091SRichard Lowe	!
221*5d9d9091SRichard Lowe	call	kobj_start
222*5d9d9091SRichard Lowe	mov	%o4, %o0
223*5d9d9091SRichard Lowe
224*5d9d9091SRichard Lowe	CLEARTICKNPT			! allow user rdtick
225*5d9d9091SRichard Lowe	!
226*5d9d9091SRichard Lowe	! Get maxwin from %ver
227*5d9d9091SRichard Lowe	!
228*5d9d9091SRichard Lowe	rdpr	%ver, %g1
229*5d9d9091SRichard Lowe	and	%g1, VER_MAXWIN, %g1
230*5d9d9091SRichard Lowe
231*5d9d9091SRichard Lowe	!
232*5d9d9091SRichard Lowe	! Stuff some memory cells related to numbers of windows.
233*5d9d9091SRichard Lowe	!
234*5d9d9091SRichard Lowe	sethi	%hi(nwin_minus_one), %g2
235*5d9d9091SRichard Lowe	st	%g1, [%g2 + %lo(nwin_minus_one)]
236*5d9d9091SRichard Lowe	inc	%g1
237*5d9d9091SRichard Lowe	sethi	%hi(nwindows), %g2
238*5d9d9091SRichard Lowe	st	%g1, [%g2 + %lo(nwindows)]
239*5d9d9091SRichard Lowe	dec	%g1
240*5d9d9091SRichard Lowe	mov	-2, %g2
241*5d9d9091SRichard Lowe	sll	%g2, %g1, %g2
242*5d9d9091SRichard Lowe	sethi	%hi(winmask), %g4
243*5d9d9091SRichard Lowe	st	%g2, [%g4 + %lo(winmask)]
244*5d9d9091SRichard Lowe
245*5d9d9091SRichard Lowe	!
246*5d9d9091SRichard Lowe	! save a pointer to obp's tba for later use by kmdb
247*5d9d9091SRichard Lowe	!
248*5d9d9091SRichard Lowe	rdpr	%tba, %g1
249*5d9d9091SRichard Lowe	set	boot_tba, %g2
250*5d9d9091SRichard Lowe	stx	%g1, [%g2]
251*5d9d9091SRichard Lowe
252*5d9d9091SRichard Lowe	!
253*5d9d9091SRichard Lowe	! copy obp's breakpoint trap entry to obp_bpt
254*5d9d9091SRichard Lowe	!
255*5d9d9091SRichard Lowe	rdpr	%tba, %g1
256*5d9d9091SRichard Lowe	set	T_SOFTWARE_TRAP | ST_MON_BREAKPOINT, %g2
257*5d9d9091SRichard Lowe	sll	%g2, 5, %g2
258*5d9d9091SRichard Lowe	or	%g1, %g2, %g1
259*5d9d9091SRichard Lowe	set	obp_bpt, %g2
260*5d9d9091SRichard Lowe	ldx	[%g1], %g3
261*5d9d9091SRichard Lowe	stx	%g3, [%g2]
262*5d9d9091SRichard Lowe	flush	%g2
263*5d9d9091SRichard Lowe	ldx	[%g1 + 8], %g3
264*5d9d9091SRichard Lowe	stx	%g3, [%g2 + 8]
265*5d9d9091SRichard Lowe	flush	%g2 + 8
266*5d9d9091SRichard Lowe	ldx	[%g1 + 16], %g3
267*5d9d9091SRichard Lowe	stx	%g3, [%g2 + 16]
268*5d9d9091SRichard Lowe	flush	%g2 + 16
269*5d9d9091SRichard Lowe	ldx	[%g1 + 24], %g3
270*5d9d9091SRichard Lowe	stx	%g3, [%g2 + 24]
271*5d9d9091SRichard Lowe	flush	%g2 + 24
272*5d9d9091SRichard Lowe
273*5d9d9091SRichard Lowe	!
274*5d9d9091SRichard Lowe	! Initialize thread 0's stack.
275*5d9d9091SRichard Lowe	!
276*5d9d9091SRichard Lowe	set	t0stacktop, %g1		! setup kernel stack pointer
277*5d9d9091SRichard Lowe	sub	%g1, SA(KFPUSIZE+GSR_SIZE), %g2
278*5d9d9091SRichard Lowe	and	%g2, 0x3f, %g3
279*5d9d9091SRichard Lowe	sub	%g2, %g3, %o1
280*5d9d9091SRichard Lowe	sub	%o1, SA(MPCBSIZE) + STACK_BIAS, %sp
281*5d9d9091SRichard Lowe
282*5d9d9091SRichard Lowe	!
283*5d9d9091SRichard Lowe	! Initialize global thread register.
284*5d9d9091SRichard Lowe	!
285*5d9d9091SRichard Lowe	set	t0, THREAD_REG
286*5d9d9091SRichard Lowe
287*5d9d9091SRichard Lowe	!
288*5d9d9091SRichard Lowe	! Fill in enough of the cpu structure so that
289*5d9d9091SRichard Lowe	! the wbuf management code works. Make sure the
290*5d9d9091SRichard Lowe	! boot cpu is inserted in cpu[] based on cpuid.
291*5d9d9091SRichard Lowe	!
292*5d9d9091SRichard Lowe	CPU_INDEX(%g2, %g1)
293*5d9d9091SRichard Lowe	sll	%g2, CPTRSHIFT, %g2		! convert cpuid to cpu[] offset
294*5d9d9091SRichard Lowe	set	cpu0, %o0			! &cpu0
295*5d9d9091SRichard Lowe	set	cpu, %g1			! &cpu[]
296*5d9d9091SRichard Lowe	stn	%o0, [%g1 + %g2]		! cpu[cpuid] = &cpu0
297*5d9d9091SRichard Lowe
298*5d9d9091SRichard Lowe	stn	%o0, [THREAD_REG + T_CPU]	! threadp()->t_cpu = cpu[cpuid]
299*5d9d9091SRichard Lowe	stn	THREAD_REG, [%o0 + CPU_THREAD]	! cpu[cpuid]->cpu_thread = threadp()
300*5d9d9091SRichard Lowe
301*5d9d9091SRichard Lowe
302*5d9d9091SRichard Lowe	!  We do NOT need to bzero our BSS...boot has already done it for us.
303*5d9d9091SRichard Lowe	!  Just need to reference edata so that we don't break /dev/ksyms
304*5d9d9091SRichard Lowe	set	edata, %g0
305*5d9d9091SRichard Lowe
306*5d9d9091SRichard Lowe	!
307*5d9d9091SRichard Lowe	! Call mlsetup with address of prototype user registers.
308*5d9d9091SRichard Lowe	!
309*5d9d9091SRichard Lowe	call	mlsetup
310*5d9d9091SRichard Lowe	add	%sp, REGOFF + STACK_BIAS, %o0
311*5d9d9091SRichard Lowe
312*5d9d9091SRichard Lowe#if (REGOFF != MPCB_REGS)
313*5d9d9091SRichard Lowe#error "hole in struct machpcb between frame and regs?"
314*5d9d9091SRichard Lowe#endif
315*5d9d9091SRichard Lowe
316*5d9d9091SRichard Lowe	!
317*5d9d9091SRichard Lowe	! Now call main.  We will return as process 1 (init).
318*5d9d9091SRichard Lowe	!
319*5d9d9091SRichard Lowe	call	main
320*5d9d9091SRichard Lowe	nop
321*5d9d9091SRichard Lowe
322*5d9d9091SRichard Lowe	!
323*5d9d9091SRichard Lowe	! Main should never return.
324*5d9d9091SRichard Lowe	!
325*5d9d9091SRichard Lowe	set	.mainretmsg, %o0
326*5d9d9091SRichard Lowe	call	panic
327*5d9d9091SRichard Lowe	nop
328*5d9d9091SRichard Lowe	SET_SIZE(_start)
329*5d9d9091SRichard Lowe
330*5d9d9091SRichard Lowe.mainretmsg:
331*5d9d9091SRichard Lowe	.asciz	"main returned"
332*5d9d9091SRichard Lowe	.align	4
333*5d9d9091SRichard Lowe
334*5d9d9091SRichard Lowe
335*5d9d9091SRichard Lowe/*
336*5d9d9091SRichard Lowe * Generic system trap handler.
337*5d9d9091SRichard Lowe *
338*5d9d9091SRichard Lowe * Some kernel trap handlers save themselves from buying a window by
339*5d9d9091SRichard Lowe * borrowing some of sys_trap's unused locals. %l0 thru %l3 may be used
340*5d9d9091SRichard Lowe * for this purpose, as user_rtt and priv_rtt do not depend on them.
341*5d9d9091SRichard Lowe * %l4 thru %l7 should NOT be used this way.
342*5d9d9091SRichard Lowe *
343*5d9d9091SRichard Lowe * Entry Conditions:
344*5d9d9091SRichard Lowe * 	%pstate		am:0 priv:1 ie:0
345*5d9d9091SRichard Lowe * 			globals are either ag or ig (not mg!)
346*5d9d9091SRichard Lowe *
347*5d9d9091SRichard Lowe * Register Inputs:
348*5d9d9091SRichard Lowe * 	%g1		pc of trap handler
349*5d9d9091SRichard Lowe * 	%g2, %g3	args for handler
350*5d9d9091SRichard Lowe * 	%g4		desired %pil (-1 means current %pil)
351*5d9d9091SRichard Lowe * 	%g5, %g6	destroyed
352*5d9d9091SRichard Lowe * 	%g7		saved
353*5d9d9091SRichard Lowe *
354*5d9d9091SRichard Lowe * Register Usage:
355*5d9d9091SRichard Lowe * 	%l0, %l1	temps
356*5d9d9091SRichard Lowe * 	%l3		saved %g1
357*5d9d9091SRichard Lowe * 	%l6		curthread for user traps, %pil for priv traps
358*5d9d9091SRichard Lowe * 	%l7		regs
359*5d9d9091SRichard Lowe *
360*5d9d9091SRichard Lowe * Called function prototype variants:
361*5d9d9091SRichard Lowe *
362*5d9d9091SRichard Lowe *	func(struct regs *rp);
363*5d9d9091SRichard Lowe * 	func(struct regs *rp, uintptr_t arg1 [%g2], uintptr_t arg2 [%g3])
364*5d9d9091SRichard Lowe *	func(struct regs *rp, uintptr_t arg1 [%g2],
365*5d9d9091SRichard Lowe *	    uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h])
366*5d9d9091SRichard Lowe *	func(struct regs *rp, uint32_t arg1 [%g2.l],
367*5d9d9091SRichard Lowe *	    uint32_t arg2 [%g3.l], uint32_t arg3 [%g3.h], uint32_t [%g2.h])
368*5d9d9091SRichard Lowe */
369*5d9d9091SRichard Lowe
370*5d9d9091SRichard Lowe	ENTRY_NP(sys_trap)
371*5d9d9091SRichard Lowe	!
372*5d9d9091SRichard Lowe	! force tl=1, update %cwp, branch to correct handler
373*5d9d9091SRichard Lowe	!
374*5d9d9091SRichard Lowe	wrpr	%g0, 1, %tl
375*5d9d9091SRichard Lowe	rdpr	%tstate, %g5
376*5d9d9091SRichard Lowe	btst	TSTATE_PRIV, %g5
377*5d9d9091SRichard Lowe	and	%g5, TSTATE_CWP, %g6
378*5d9d9091SRichard Lowe	bnz,pn	%xcc, priv_trap
379*5d9d9091SRichard Lowe	wrpr	%g0, %g6, %cwp
380*5d9d9091SRichard Lowe
381*5d9d9091SRichard Lowe	ALTENTRY(user_trap)
382*5d9d9091SRichard Lowe	!
383*5d9d9091SRichard Lowe	! user trap
384*5d9d9091SRichard Lowe	!
385*5d9d9091SRichard Lowe	! make all windows clean for kernel
386*5d9d9091SRichard Lowe	! buy a window using the current thread's stack
387*5d9d9091SRichard Lowe	!
388*5d9d9091SRichard Lowe	sethi	%hi(nwin_minus_one), %g5
389*5d9d9091SRichard Lowe	ld	[%g5 + %lo(nwin_minus_one)], %g5
390*5d9d9091SRichard Lowe	wrpr	%g0, %g5, %cleanwin
391*5d9d9091SRichard Lowe	CPU_ADDR(%g5, %g6)
392*5d9d9091SRichard Lowe	ldn	[%g5 + CPU_THREAD], %g5
393*5d9d9091SRichard Lowe	ldn	[%g5 + T_STACK], %g6
394*5d9d9091SRichard Lowe	sub	%g6, STACK_BIAS, %g6
395*5d9d9091SRichard Lowe	save	%g6, 0, %sp
396*5d9d9091SRichard Lowe	!
397*5d9d9091SRichard Lowe	! set window registers so that current windows are "other" windows
398*5d9d9091SRichard Lowe	!
399*5d9d9091SRichard Lowe	rdpr	%canrestore, %l0
400*5d9d9091SRichard Lowe	rdpr	%wstate, %l1
401*5d9d9091SRichard Lowe	wrpr	%g0, 0, %canrestore
402*5d9d9091SRichard Lowe	sllx	%l1, WSTATE_SHIFT, %l1
403*5d9d9091SRichard Lowe	wrpr	%l1, WSTATE_K64, %wstate
404*5d9d9091SRichard Lowe	wrpr	%g0, %l0, %otherwin
405*5d9d9091SRichard Lowe	!
406*5d9d9091SRichard Lowe	! set pcontext to run kernel
407*5d9d9091SRichard Lowe	!
408*5d9d9091SRichard Lowe	sethi	%hi(kcontextreg), %l0
409*5d9d9091SRichard Lowe	ldx     [%l0 + %lo(kcontextreg)], %l0
410*5d9d9091SRichard Lowe	mov	MMU_PCONTEXT, %l1	! if kcontextreg==PCONTEXT, do nothing
411*5d9d9091SRichard Lowe	ldxa	[%l1]ASI_MMU_CTX, %l2
412*5d9d9091SRichard Lowe	xor	%l0, %l2, %l2
413*5d9d9091SRichard Lowe	srlx	%l2, CTXREG_NEXT_SHIFT, %l2
414*5d9d9091SRichard Lowe	brz	%l2, 2f			! if N_pgsz0/1 changed, need demap
415*5d9d9091SRichard Lowe	sethi	%hi(FLUSH_ADDR), %l3
416*5d9d9091SRichard Lowe	mov	DEMAP_ALL_TYPE, %l2
417*5d9d9091SRichard Lowe	stxa	%g0, [%l2]ASI_DTLB_DEMAP
418*5d9d9091SRichard Lowe	stxa	%g0, [%l2]ASI_ITLB_DEMAP
419*5d9d9091SRichard Lowe2:
420*5d9d9091SRichard Lowe	stxa	%l0, [%l1]ASI_MMU_CTX
421*5d9d9091SRichard Lowe	flush	%l3			! flush required by immu
422*5d9d9091SRichard Lowe1:
423*5d9d9091SRichard Lowe
424*5d9d9091SRichard Lowe	set	utl0, %g6		! bounce to utl0
425*5d9d9091SRichard Lowehave_win:
426*5d9d9091SRichard Lowe	SYSTRAP_TRACE(%o1, %o2, %o3)
427*5d9d9091SRichard Lowe
428*5d9d9091SRichard Lowe
429*5d9d9091SRichard Lowe	!
430*5d9d9091SRichard Lowe	! at this point we have a new window we can play in,
431*5d9d9091SRichard Lowe	! and %g6 is the label we want done to bounce to
432*5d9d9091SRichard Lowe	!
433*5d9d9091SRichard Lowe	! save needed current globals
434*5d9d9091SRichard Lowe	!
435*5d9d9091SRichard Lowe	mov	%g1, %l3	! pc
436*5d9d9091SRichard Lowe	mov	%g2, %o1	! arg #1
437*5d9d9091SRichard Lowe	mov	%g3, %o2	! arg #2
438*5d9d9091SRichard Lowe	srlx	%g3, 32, %o3	! pseudo arg #3
439*5d9d9091SRichard Lowe	srlx	%g2, 32, %o4	! pseudo arg #4
440*5d9d9091SRichard Lowe	mov	%g5, %l6	! curthread if user trap, %pil if priv trap
441*5d9d9091SRichard Lowe	!
442*5d9d9091SRichard Lowe	! save trap state on stack
443*5d9d9091SRichard Lowe	!
444*5d9d9091SRichard Lowe	add	%sp, REGOFF + STACK_BIAS, %l7
445*5d9d9091SRichard Lowe	rdpr	%tpc, %l0
446*5d9d9091SRichard Lowe	rdpr	%tnpc, %l1
447*5d9d9091SRichard Lowe	rdpr	%tstate, %l2
448*5d9d9091SRichard Lowe	stn	%l0, [%l7 + PC_OFF]
449*5d9d9091SRichard Lowe	stn	%l1, [%l7 + nPC_OFF]
450*5d9d9091SRichard Lowe	stx	%l2, [%l7 + TSTATE_OFF]
451*5d9d9091SRichard Lowe	!
452*5d9d9091SRichard Lowe	! setup pil
453*5d9d9091SRichard Lowe	!
454*5d9d9091SRichard Lowe	brlz,pt		%g4, 1f
455*5d9d9091SRichard Lowe	nop
456*5d9d9091SRichard Lowe#ifdef DEBUG
457*5d9d9091SRichard Lowe	!
458*5d9d9091SRichard Lowe	! ASSERT(%g4 >= %pil).
459*5d9d9091SRichard Lowe	!
460*5d9d9091SRichard Lowe	rdpr	%pil, %l0
461*5d9d9091SRichard Lowe	cmp	%g4, %l0
462*5d9d9091SRichard Lowe	bge,pt	%xcc, 0f
463*5d9d9091SRichard Lowe	nop				! yes, nop; to avoid anull
464*5d9d9091SRichard Lowe	set	bad_g4_called, %l3
465*5d9d9091SRichard Lowe	mov	1, %o1
466*5d9d9091SRichard Lowe	st	%o1, [%l3]
467*5d9d9091SRichard Lowe	set	bad_g4, %l3		! pc
468*5d9d9091SRichard Lowe	set	sys_trap_wrong_pil, %o1	! arg #1
469*5d9d9091SRichard Lowe	mov	%g4, %o2		! arg #2
470*5d9d9091SRichard Lowe	ba	1f			! stay at the current %pil
471*5d9d9091SRichard Lowe	mov	%l0, %o3		! arg #3
472*5d9d9091SRichard Lowe0:
473*5d9d9091SRichard Lowe#endif /* DEBUG */
474*5d9d9091SRichard Lowe	wrpr		%g0, %g4, %pil
475*5d9d9091SRichard Lowe1:
476*5d9d9091SRichard Lowe	!
477*5d9d9091SRichard Lowe	! set trap regs to execute in kernel at %g6
478*5d9d9091SRichard Lowe	! done resumes execution there
479*5d9d9091SRichard Lowe	!
480*5d9d9091SRichard Lowe	wrpr	%g0, %g6, %tnpc
481*5d9d9091SRichard Lowe	rdpr	%cwp, %l0
482*5d9d9091SRichard Lowe	set	TSTATE_KERN, %l1
483*5d9d9091SRichard Lowe	wrpr	%l1, %l0, %tstate
484*5d9d9091SRichard Lowe	done
485*5d9d9091SRichard Lowe	/* NOTREACHED */
486*5d9d9091SRichard Lowe	SET_SIZE(user_trap)
487*5d9d9091SRichard Lowe	SET_SIZE(sys_trap)
488*5d9d9091SRichard Lowe
489*5d9d9091SRichard Lowe
490*5d9d9091SRichard Lowe	ENTRY_NP(prom_trap)
491*5d9d9091SRichard Lowe	!
492*5d9d9091SRichard Lowe	! prom trap switches the stack to 32-bit
493*5d9d9091SRichard Lowe	! if we took a trap from a 64-bit window
494*5d9d9091SRichard Lowe	! Then buys a window on the current stack.
495*5d9d9091SRichard Lowe	!
496*5d9d9091SRichard Lowe	save	%sp, -SA64(REGOFF + REGSIZE), %sp
497*5d9d9091SRichard Lowe					/* 32 bit frame, 64 bit sized */
498*5d9d9091SRichard Lowe	set	ptl0, %g6
499*5d9d9091SRichard Lowe	ba,a,pt	%xcc, have_win
500*5d9d9091SRichard Lowe	SET_SIZE(prom_trap)
501*5d9d9091SRichard Lowe
502*5d9d9091SRichard Lowe	ENTRY_NP(priv_trap)
503*5d9d9091SRichard Lowe	!
504*5d9d9091SRichard Lowe	! kernel trap
505*5d9d9091SRichard Lowe	! buy a window on the current stack
506*5d9d9091SRichard Lowe	!
507*5d9d9091SRichard Lowe	! is the trap PC in the range allocated to Open Firmware?
508*5d9d9091SRichard Lowe	rdpr	%tpc, %g5
509*5d9d9091SRichard Lowe	set	OFW_END_ADDR, %g6
510*5d9d9091SRichard Lowe	cmp	%g5, %g6
511*5d9d9091SRichard Lowe	bgu,a,pn %xcc, 1f
512*5d9d9091SRichard Lowe	  rdpr	%pil, %g5
513*5d9d9091SRichard Lowe	set	OFW_START_ADDR, %g6
514*5d9d9091SRichard Lowe	cmp	%g5, %g6
515*5d9d9091SRichard Lowe	bgeu,pn	%xcc, prom_trap
516*5d9d9091SRichard Lowe	  rdpr	%pil, %g5
517*5d9d9091SRichard Lowe1:
518*5d9d9091SRichard Lowe	!
519*5d9d9091SRichard Lowe	! check if the primary context is of kernel.
520*5d9d9091SRichard Lowe	!
521*5d9d9091SRichard Lowe	mov     MMU_PCONTEXT, %g6
522*5d9d9091SRichard Lowe	ldxa    [%g6]ASI_MMU_CTX, %g5
523*5d9d9091SRichard Lowe	sllx    %g5, CTXREG_CTX_SHIFT, %g5      ! keep just the ctx bits
524*5d9d9091SRichard Lowe	brnz,pn %g5, 2f				! assumes KCONTEXT == 0
525*5d9d9091SRichard Lowe	  rdpr  %pil, %g5
526*5d9d9091SRichard Lowe	!
527*5d9d9091SRichard Lowe	! primary context is of kernel.
528*5d9d9091SRichard Lowe	!
529*5d9d9091SRichard Lowe        set     ktl0, %g6
530*5d9d9091SRichard Lowe        save    %sp, -SA(REGOFF + REGSIZE), %sp
531*5d9d9091SRichard Lowe        ba,a,pt %xcc, have_win
532*5d9d9091SRichard Lowe2:
533*5d9d9091SRichard Lowe	!
534*5d9d9091SRichard Lowe	! primary context is of user. caller of sys_trap()
535*5d9d9091SRichard Lowe	! or priv_trap() did not set kernel context. raise
536*5d9d9091SRichard Lowe	! trap level to MAXTL-1 so that ptl1_panic() prints
537*5d9d9091SRichard Lowe	! out all levels of trap data.
538*5d9d9091SRichard Lowe	!
539*5d9d9091SRichard Lowe	rdpr	%ver, %g5
540*5d9d9091SRichard Lowe	srlx	%g5, VER_MAXTL_SHIFT, %g5
541*5d9d9091SRichard Lowe	and	%g5, VER_MAXTL_MASK, %g5	! %g5 = MAXTL
542*5d9d9091SRichard Lowe	sub	%g5, 1, %g5
543*5d9d9091SRichard Lowe	wrpr	%g0, %g5, %tl
544*5d9d9091SRichard Lowe	mov	PTL1_BAD_CTX, %g1
545*5d9d9091SRichard Lowe	ba,a,pt	%xcc, ptl1_panic
546*5d9d9091SRichard Lowe	SET_SIZE(priv_trap)
547*5d9d9091SRichard Lowe
548*5d9d9091SRichard Lowe	ENTRY_NP(utl0)
549*5d9d9091SRichard Lowe	SAVE_GLOBALS(%l7)
550*5d9d9091SRichard Lowe	SAVE_OUTS(%l7)
551*5d9d9091SRichard Lowe	mov	%l6, THREAD_REG
552*5d9d9091SRichard Lowe	wrpr	%g0, PSTATE_KERN, %pstate	! enable ints
553*5d9d9091SRichard Lowe	jmpl	%l3, %o7			! call trap handler
554*5d9d9091SRichard Lowe	mov	%l7, %o0
555*5d9d9091SRichard Lowe	!
556*5d9d9091SRichard Lowe	ALTENTRY(user_rtt)
557*5d9d9091SRichard Lowe	!
558*5d9d9091SRichard Lowe	! Register inputs
559*5d9d9091SRichard Lowe	!	%l7 - regs
560*5d9d9091SRichard Lowe	!
561*5d9d9091SRichard Lowe	! disable interrupts and check for ASTs and wbuf restores
562*5d9d9091SRichard Lowe	! keep cpu_base_spl in %l4 and THREAD_REG in %l6 (needed
563*5d9d9091SRichard Lowe	! in wbuf.s when globals have already been restored).
564*5d9d9091SRichard Lowe	!
565*5d9d9091SRichard Lowe	wrpr	%g0, PIL_MAX, %pil
566*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_CPU], %l0
567*5d9d9091SRichard Lowe	ld	[%l0 + CPU_BASE_SPL], %l4
568*5d9d9091SRichard Lowe
569*5d9d9091SRichard Lowe	ldub	[THREAD_REG + T_ASTFLAG], %l2
570*5d9d9091SRichard Lowe	brz,pt	%l2, 1f
571*5d9d9091SRichard Lowe	ld	[%sp + STACK_BIAS + MPCB_WBCNT], %l3
572*5d9d9091SRichard Lowe	!
573*5d9d9091SRichard Lowe	! call trap to do ast processing
574*5d9d9091SRichard Lowe	!
575*5d9d9091SRichard Lowe	wrpr	%g0, %l4, %pil			! pil = cpu_base_spl
576*5d9d9091SRichard Lowe	mov	%l7, %o0
577*5d9d9091SRichard Lowe	call	trap
578*5d9d9091SRichard Lowe	  mov	T_AST, %o2
579*5d9d9091SRichard Lowe	ba,a,pt	%xcc, user_rtt
580*5d9d9091SRichard Lowe1:
581*5d9d9091SRichard Lowe	brz,pt	%l3, 2f
582*5d9d9091SRichard Lowe	mov	THREAD_REG, %l6
583*5d9d9091SRichard Lowe	!
584*5d9d9091SRichard Lowe	! call restore_wbuf to push wbuf windows to stack
585*5d9d9091SRichard Lowe	!
586*5d9d9091SRichard Lowe	wrpr	%g0, %l4, %pil			! pil = cpu_base_spl
587*5d9d9091SRichard Lowe	mov	%l7, %o0
588*5d9d9091SRichard Lowe	call	trap
589*5d9d9091SRichard Lowe	  mov	T_FLUSH_PCB, %o2
590*5d9d9091SRichard Lowe	ba,a,pt	%xcc, user_rtt
591*5d9d9091SRichard Lowe2:
592*5d9d9091SRichard Lowe#ifdef TRAPTRACE
593*5d9d9091SRichard Lowe	TRACE_RTT(TT_SYS_RTT_USER, %l0, %l1, %l2, %l3)
594*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
595*5d9d9091SRichard Lowe	ld	[%sp + STACK_BIAS + MPCB_WSTATE], %l3	! get wstate
596*5d9d9091SRichard Lowe
597*5d9d9091SRichard Lowe	!
598*5d9d9091SRichard Lowe	! restore user globals and outs
599*5d9d9091SRichard Lowe	!
600*5d9d9091SRichard Lowe	rdpr	%pstate, %l1
601*5d9d9091SRichard Lowe	wrpr	%l1, PSTATE_IE, %pstate
602*5d9d9091SRichard Lowe	RESTORE_GLOBALS(%l7)
603*5d9d9091SRichard Lowe	! switch to alternate globals, saving THREAD_REG in %l6
604*5d9d9091SRichard Lowe	wrpr	%l1, PSTATE_IE | PSTATE_AG, %pstate
605*5d9d9091SRichard Lowe	mov	%sp, %g6	! remember the mpcb pointer in %g6
606*5d9d9091SRichard Lowe	RESTORE_OUTS(%l7)
607*5d9d9091SRichard Lowe	!
608*5d9d9091SRichard Lowe	! set %pil from cpu_base_spl
609*5d9d9091SRichard Lowe	!
610*5d9d9091SRichard Lowe	wrpr	%g0, %l4, %pil
611*5d9d9091SRichard Lowe	!
612*5d9d9091SRichard Lowe	! raise tl (now using nucleus context)
613*5d9d9091SRichard Lowe	!
614*5d9d9091SRichard Lowe	wrpr	%g0, 1, %tl
615*5d9d9091SRichard Lowe
616*5d9d9091SRichard Lowe	! switch "other" windows back to "normal" windows.
617*5d9d9091SRichard Lowe	rdpr	%otherwin, %g1
618*5d9d9091SRichard Lowe	wrpr	%g0, 0, %otherwin
619*5d9d9091SRichard Lowe	add	%l3, WSTATE_CLEAN_OFFSET, %l3	! convert to "clean" wstate
620*5d9d9091SRichard Lowe	wrpr	%g0, %l3, %wstate
621*5d9d9091SRichard Lowe	wrpr	%g0, %g1, %canrestore
622*5d9d9091SRichard Lowe
623*5d9d9091SRichard Lowe	! set pcontext to scontext for user execution
624*5d9d9091SRichard Lowe	mov	MMU_SCONTEXT, %g3
625*5d9d9091SRichard Lowe	ldxa	[%g3]ASI_MMU_CTX, %g2
626*5d9d9091SRichard Lowe
627*5d9d9091SRichard Lowe	mov	MMU_PCONTEXT, %g3
628*5d9d9091SRichard Lowe	ldxa    [%g3]ASI_MMU_CTX, %g4		! need N_pgsz0/1 bits
629*5d9d9091SRichard Lowe        srlx    %g4, CTXREG_NEXT_SHIFT, %g4
630*5d9d9091SRichard Lowe        sllx    %g4, CTXREG_NEXT_SHIFT, %g4
631*5d9d9091SRichard Lowe        or      %g4, %g2, %g2                   ! Or in Nuc pgsz bits
632*5d9d9091SRichard Lowe
633*5d9d9091SRichard Lowe	sethi	%hi(FLUSH_ADDR), %g4
634*5d9d9091SRichard Lowe	stxa	%g2, [%g3]ASI_MMU_CTX
635*5d9d9091SRichard Lowe	flush	%g4				! flush required by immu
636*5d9d9091SRichard Lowe	!
637*5d9d9091SRichard Lowe	! Within the code segment [rtt_ctx_start - rtt_ctx_end],
638*5d9d9091SRichard Lowe	! PCONTEXT is set to run user code. If a trap happens in this
639*5d9d9091SRichard Lowe	! window, and the trap needs to be handled at TL=0, the handler
640*5d9d9091SRichard Lowe	! must make sure to set PCONTEXT to run kernel. A convenience
641*5d9d9091SRichard Lowe	! macro, RESET_USER_RTT_REGS(scr1, scr2, label) is available to
642*5d9d9091SRichard Lowe	! TL>1 handlers for this purpose.
643*5d9d9091SRichard Lowe	!
644*5d9d9091SRichard Lowe	! %g1 = %canrestore
645*5d9d9091SRichard Lowe	! %l7 = regs
646*5d9d9091SRichard Lowe	! %g6 = mpcb
647*5d9d9091SRichard Lowe	!
648*5d9d9091SRichard Lowe	.global	rtt_ctx_start
649*5d9d9091SRichard Lowertt_ctx_start:
650*5d9d9091SRichard Lowe	!
651*5d9d9091SRichard Lowe	! setup trap regs
652*5d9d9091SRichard Lowe	!
653*5d9d9091SRichard Lowe	ldn	[%l7 + PC_OFF], %g3
654*5d9d9091SRichard Lowe	ldn	[%l7 + nPC_OFF], %g2
655*5d9d9091SRichard Lowe	ldx	[%l7 + TSTATE_OFF], %l0
656*5d9d9091SRichard Lowe	andn	%l0, TSTATE_CWP, %g7
657*5d9d9091SRichard Lowe	wrpr	%g3, %tpc
658*5d9d9091SRichard Lowe	wrpr	%g2, %tnpc
659*5d9d9091SRichard Lowe
660*5d9d9091SRichard Lowe	!
661*5d9d9091SRichard Lowe	! Restore to window we originally trapped in.
662*5d9d9091SRichard Lowe	! First attempt to restore from the watchpoint saved register window
663*5d9d9091SRichard Lowe	!
664*5d9d9091SRichard Lowe	tst	%g1
665*5d9d9091SRichard Lowe	bne,a	1f
666*5d9d9091SRichard Lowe	  clrn	[%g6 + STACK_BIAS + MPCB_RSP0]
667*5d9d9091SRichard Lowe	tst	%fp
668*5d9d9091SRichard Lowe	be,a	1f
669*5d9d9091SRichard Lowe	  clrn	[%g6 + STACK_BIAS + MPCB_RSP0]
670*5d9d9091SRichard Lowe	! test for user return window in pcb
671*5d9d9091SRichard Lowe	ldn	[%g6 + STACK_BIAS + MPCB_RSP0], %g1
672*5d9d9091SRichard Lowe	cmp	%fp, %g1
673*5d9d9091SRichard Lowe	bne	1f
674*5d9d9091SRichard Lowe	  clrn	[%g6 + STACK_BIAS + MPCB_RSP0]
675*5d9d9091SRichard Lowe	restored
676*5d9d9091SRichard Lowe	restore
677*5d9d9091SRichard Lowe	! restore from user return window
678*5d9d9091SRichard Lowe	RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN0)
679*5d9d9091SRichard Lowe	!
680*5d9d9091SRichard Lowe	! Attempt to restore from the scond watchpoint saved register window
681*5d9d9091SRichard Lowe	tst	%fp
682*5d9d9091SRichard Lowe	be,a	2f
683*5d9d9091SRichard Lowe	  clrn	[%g6 + STACK_BIAS + MPCB_RSP1]
684*5d9d9091SRichard Lowe	ldn	[%g6 + STACK_BIAS + MPCB_RSP1], %g1
685*5d9d9091SRichard Lowe	cmp	%fp, %g1
686*5d9d9091SRichard Lowe	bne	2f
687*5d9d9091SRichard Lowe	  clrn	[%g6 + STACK_BIAS + MPCB_RSP1]
688*5d9d9091SRichard Lowe	restored
689*5d9d9091SRichard Lowe	restore
690*5d9d9091SRichard Lowe	RESTORE_V9WINDOW(%g6 + STACK_BIAS + MPCB_RWIN1)
691*5d9d9091SRichard Lowe	save
692*5d9d9091SRichard Lowe	b,a	2f
693*5d9d9091SRichard Lowe1:
694*5d9d9091SRichard Lowe	restore				! should not trap
695*5d9d9091SRichard Lowe2:
696*5d9d9091SRichard Lowe	!
697*5d9d9091SRichard Lowe	! set %cleanwin to %canrestore
698*5d9d9091SRichard Lowe	! set %tstate to the correct %cwp
699*5d9d9091SRichard Lowe	! retry resumes user execution
700*5d9d9091SRichard Lowe	!
701*5d9d9091SRichard Lowe	rdpr	%canrestore, %g1
702*5d9d9091SRichard Lowe	wrpr	%g0, %g1, %cleanwin
703*5d9d9091SRichard Lowe	rdpr	%cwp, %g1
704*5d9d9091SRichard Lowe	wrpr	%g1, %g7, %tstate
705*5d9d9091SRichard Lowe	retry
706*5d9d9091SRichard Lowe	.global	rtt_ctx_end
707*5d9d9091SRichard Lowertt_ctx_end:
708*5d9d9091SRichard Lowe	/* NOTREACHED */
709*5d9d9091SRichard Lowe	SET_SIZE(user_rtt)
710*5d9d9091SRichard Lowe	SET_SIZE(utl0)
711*5d9d9091SRichard Lowe
712*5d9d9091SRichard Lowe	ENTRY_NP(ptl0)
713*5d9d9091SRichard Lowe	SAVE_GLOBALS(%l7)
714*5d9d9091SRichard Lowe	SAVE_OUTS(%l7)
715*5d9d9091SRichard Lowe	CPU_ADDR(%g5, %g6)
716*5d9d9091SRichard Lowe	ldn	[%g5 + CPU_THREAD], THREAD_REG
717*5d9d9091SRichard Lowe	wrpr	%g0, PSTATE_KERN, %pstate	! enable ints
718*5d9d9091SRichard Lowe	jmpl	%l3, %o7			! call trap handler
719*5d9d9091SRichard Lowe	mov	%l7, %o0
720*5d9d9091SRichard Lowe	!
721*5d9d9091SRichard Lowe	ALTENTRY(prom_rtt)
722*5d9d9091SRichard Lowe#ifdef TRAPTRACE
723*5d9d9091SRichard Lowe	TRACE_RTT(TT_SYS_RTT_PROM, %l0, %l1, %l2, %l3)
724*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
725*5d9d9091SRichard Lowe	ba,pt	%xcc, common_rtt
726*5d9d9091SRichard Lowe	mov	THREAD_REG, %l0
727*5d9d9091SRichard Lowe	SET_SIZE(prom_rtt)
728*5d9d9091SRichard Lowe	SET_SIZE(ptl0)
729*5d9d9091SRichard Lowe
730*5d9d9091SRichard Lowe	ENTRY_NP(ktl0)
731*5d9d9091SRichard Lowe	SAVE_GLOBALS(%l7)
732*5d9d9091SRichard Lowe	SAVE_OUTS(%l7)				! for the call bug workaround
733*5d9d9091SRichard Lowe	wrpr	%g0, PSTATE_KERN, %pstate	! enable ints
734*5d9d9091SRichard Lowe	jmpl	%l3, %o7			! call trap handler
735*5d9d9091SRichard Lowe	mov	%l7, %o0
736*5d9d9091SRichard Lowe	!
737*5d9d9091SRichard Lowe	ALTENTRY(priv_rtt)
738*5d9d9091SRichard Lowe#ifdef TRAPTRACE
739*5d9d9091SRichard Lowe	TRACE_RTT(TT_SYS_RTT_PRIV, %l0, %l1, %l2, %l3)
740*5d9d9091SRichard Lowe#endif /* TRAPTRACE */
741*5d9d9091SRichard Lowe	!
742*5d9d9091SRichard Lowe	! Register inputs
743*5d9d9091SRichard Lowe	!	%l7 - regs
744*5d9d9091SRichard Lowe	!	%l6 - trap %pil
745*5d9d9091SRichard Lowe	!
746*5d9d9091SRichard Lowe	! Check for a kernel preemption request
747*5d9d9091SRichard Lowe	!
748*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_CPU], %l0
749*5d9d9091SRichard Lowe	ldub	[%l0 + CPU_KPRUNRUN], %l0
750*5d9d9091SRichard Lowe	brz,pt	%l0, 1f
751*5d9d9091SRichard Lowe	nop
752*5d9d9091SRichard Lowe
753*5d9d9091SRichard Lowe	!
754*5d9d9091SRichard Lowe	! Attempt to preempt
755*5d9d9091SRichard Lowe	!
756*5d9d9091SRichard Lowe	ldstub	[THREAD_REG + T_PREEMPT_LK], %l0	! load preempt lock
757*5d9d9091SRichard Lowe	brnz,pn	%l0, 1f			! can't call kpreempt if this thread is
758*5d9d9091SRichard Lowe	nop				!   already in it...
759*5d9d9091SRichard Lowe
760*5d9d9091SRichard Lowe	call	kpreempt
761*5d9d9091SRichard Lowe	mov	%l6, %o0		! pass original interrupt level
762*5d9d9091SRichard Lowe
763*5d9d9091SRichard Lowe	stub	%g0, [THREAD_REG + T_PREEMPT_LK]	! nuke the lock
764*5d9d9091SRichard Lowe
765*5d9d9091SRichard Lowe	rdpr	%pil, %o0		! compare old pil level
766*5d9d9091SRichard Lowe	cmp	%l6, %o0		!   with current pil level
767*5d9d9091SRichard Lowe	movg	%xcc, %o0, %l6		! if current is lower, drop old pil
768*5d9d9091SRichard Lowe1:
769*5d9d9091SRichard Lowe	!
770*5d9d9091SRichard Lowe	! If we interrupted the mutex_owner_running() critical region we
771*5d9d9091SRichard Lowe	! must reset the PC and nPC back to the beginning to prevent missed
772*5d9d9091SRichard Lowe	! wakeups. See the comments in mutex_owner_running() for details.
773*5d9d9091SRichard Lowe	!
774*5d9d9091SRichard Lowe	ldn	[%l7 + PC_OFF], %l0
775*5d9d9091SRichard Lowe	set	mutex_owner_running_critical_start, %l1
776*5d9d9091SRichard Lowe	sub	%l0, %l1, %l0
777*5d9d9091SRichard Lowe	cmp	%l0, mutex_owner_running_critical_size
778*5d9d9091SRichard Lowe	bgeu,pt	%xcc, 2f
779*5d9d9091SRichard Lowe	mov	THREAD_REG, %l0
780*5d9d9091SRichard Lowe	stn	%l1, [%l7 + PC_OFF]	! restart mutex_owner_running()
781*5d9d9091SRichard Lowe	add	%l1, 4, %l1
782*5d9d9091SRichard Lowe	ba,pt	%xcc, common_rtt
783*5d9d9091SRichard Lowe	stn	%l1, [%l7 + nPC_OFF]
784*5d9d9091SRichard Lowe
785*5d9d9091SRichard Lowe2:
786*5d9d9091SRichard Lowe	!
787*5d9d9091SRichard Lowe	! If we interrupted the mutex_exit() critical region we must reset
788*5d9d9091SRichard Lowe	! the PC and nPC back to the beginning to prevent missed wakeups.
789*5d9d9091SRichard Lowe	! See the comments in mutex_exit() for details.
790*5d9d9091SRichard Lowe	!
791*5d9d9091SRichard Lowe	ldn	[%l7 + PC_OFF], %l0
792*5d9d9091SRichard Lowe	set	mutex_exit_critical_start, %l1
793*5d9d9091SRichard Lowe	sub	%l0, %l1, %l0
794*5d9d9091SRichard Lowe	cmp	%l0, mutex_exit_critical_size
795*5d9d9091SRichard Lowe	bgeu,pt	%xcc, common_rtt
796*5d9d9091SRichard Lowe	mov	THREAD_REG, %l0
797*5d9d9091SRichard Lowe	stn	%l1, [%l7 + PC_OFF]	! restart mutex_exit()
798*5d9d9091SRichard Lowe	add	%l1, 4, %l1
799*5d9d9091SRichard Lowe	stn	%l1, [%l7 + nPC_OFF]
800*5d9d9091SRichard Lowe
801*5d9d9091SRichard Lowecommon_rtt:
802*5d9d9091SRichard Lowe	!
803*5d9d9091SRichard Lowe	! restore globals and outs
804*5d9d9091SRichard Lowe	!
805*5d9d9091SRichard Lowe	rdpr	%pstate, %l1
806*5d9d9091SRichard Lowe	wrpr	%l1, PSTATE_IE, %pstate
807*5d9d9091SRichard Lowe	RESTORE_GLOBALS(%l7)
808*5d9d9091SRichard Lowe	! switch to alternate globals
809*5d9d9091SRichard Lowe	wrpr	%l1, PSTATE_IE | PSTATE_AG, %pstate
810*5d9d9091SRichard Lowe	RESTORE_OUTS(%l7)
811*5d9d9091SRichard Lowe	!
812*5d9d9091SRichard Lowe	! set %pil from max(old pil, cpu_base_spl)
813*5d9d9091SRichard Lowe	!
814*5d9d9091SRichard Lowe	ldn	[%l0 + T_CPU], %l0
815*5d9d9091SRichard Lowe	ld	[%l0 + CPU_BASE_SPL], %l0
816*5d9d9091SRichard Lowe	cmp	%l6, %l0
817*5d9d9091SRichard Lowe	movg	%xcc, %l6, %l0
818*5d9d9091SRichard Lowe	wrpr	%g0, %l0, %pil
819*5d9d9091SRichard Lowe	!
820*5d9d9091SRichard Lowe	! raise tl
821*5d9d9091SRichard Lowe	! setup trap regs
822*5d9d9091SRichard Lowe	! restore to window we originally trapped in
823*5d9d9091SRichard Lowe	!
824*5d9d9091SRichard Lowe	wrpr	%g0, 1, %tl
825*5d9d9091SRichard Lowe	ldn	[%l7 + PC_OFF], %g1
826*5d9d9091SRichard Lowe	ldn	[%l7 + nPC_OFF], %g2
827*5d9d9091SRichard Lowe	ldx	[%l7 + TSTATE_OFF], %l0
828*5d9d9091SRichard Lowe	andn	%l0, TSTATE_CWP, %g7
829*5d9d9091SRichard Lowe	wrpr	%g1, %tpc
830*5d9d9091SRichard Lowe	wrpr	%g2, %tnpc
831*5d9d9091SRichard Lowe	restore
832*5d9d9091SRichard Lowe	!
833*5d9d9091SRichard Lowe	! set %tstate to the correct %cwp
834*5d9d9091SRichard Lowe	! retry resumes prom execution
835*5d9d9091SRichard Lowe	!
836*5d9d9091SRichard Lowe	rdpr	%cwp, %g1
837*5d9d9091SRichard Lowe	wrpr	%g1, %g7, %tstate
838*5d9d9091SRichard Lowe	retry
839*5d9d9091SRichard Lowe	/* NOTREACHED */
840*5d9d9091SRichard Lowe	SET_SIZE(priv_rtt)
841*5d9d9091SRichard Lowe	SET_SIZE(ktl0)
842*5d9d9091SRichard Lowe
843*5d9d9091SRichard Lowe#ifdef DEBUG
844*5d9d9091SRichard Lowe	.seg	".data"
845*5d9d9091SRichard Lowe	.align	4
846*5d9d9091SRichard Lowe
847*5d9d9091SRichard Lowe	.global bad_g4_called
848*5d9d9091SRichard Lowebad_g4_called:
849*5d9d9091SRichard Lowe	.word	0
850*5d9d9091SRichard Lowe
851*5d9d9091SRichard Lowesys_trap_wrong_pil:
852*5d9d9091SRichard Lowe	.asciz	"sys_trap: %g4(%d) is lower than %pil(%d)"
853*5d9d9091SRichard Lowe	.align	4
854*5d9d9091SRichard Lowe	.seg	".text"
855*5d9d9091SRichard Lowe
856*5d9d9091SRichard Lowe	ENTRY_NP(bad_g4)
857*5d9d9091SRichard Lowe	mov	%o1, %o0
858*5d9d9091SRichard Lowe	mov	%o2, %o1
859*5d9d9091SRichard Lowe	call	panic
860*5d9d9091SRichard Lowe	mov	%o3, %o2
861*5d9d9091SRichard Lowe	SET_SIZE(bad_g4)
862*5d9d9091SRichard Lowe#endif /* DEBUG */
863*5d9d9091SRichard Lowe
864*5d9d9091SRichard Lowe/*
865*5d9d9091SRichard Lowe * sys_tl1_panic can be called by traps at tl1 which
866*5d9d9091SRichard Lowe * really want to panic, but need the rearrangement of
867*5d9d9091SRichard Lowe * the args as provided by this wrapper routine.
868*5d9d9091SRichard Lowe */
869*5d9d9091SRichard Lowe	ENTRY_NP(sys_tl1_panic)
870*5d9d9091SRichard Lowe	mov	%o1, %o0
871*5d9d9091SRichard Lowe	mov	%o2, %o1
872*5d9d9091SRichard Lowe	call	panic
873*5d9d9091SRichard Lowe	mov	%o3, %o2
874*5d9d9091SRichard Lowe	SET_SIZE(sys_tl1_panic)
875*5d9d9091SRichard Lowe
876*5d9d9091SRichard Lowe/*
877*5d9d9091SRichard Lowe * Turn on or off bits in the auxiliary i/o register.
878*5d9d9091SRichard Lowe *
879*5d9d9091SRichard Lowe * set_auxioreg(bit, flag)
880*5d9d9091SRichard Lowe *	int bit;		bit mask in aux i/o reg
881*5d9d9091SRichard Lowe *	int flag;		0 = off, otherwise on
882*5d9d9091SRichard Lowe *
883*5d9d9091SRichard Lowe * This is intrinsicly ugly but is used by the floppy driver.  It is also
884*5d9d9091SRichard Lowe * used to turn on/off the led.
885*5d9d9091SRichard Lowe */
886*5d9d9091SRichard Lowe
887*5d9d9091SRichard Lowe	.seg	".data"
888*5d9d9091SRichard Lowe	.align	4
889*5d9d9091SRichard Loweauxio_panic:
890*5d9d9091SRichard Lowe	.asciz	"set_auxioreg: interrupts already disabled on entry"
891*5d9d9091SRichard Lowe	.align	4
892*5d9d9091SRichard Lowe	.seg	".text"
893*5d9d9091SRichard Lowe
894*5d9d9091SRichard Lowe	ENTRY_NP(set_auxioreg)
895*5d9d9091SRichard Lowe	/*
896*5d9d9091SRichard Lowe	 * o0 = bit mask
897*5d9d9091SRichard Lowe	 * o1 = flag: 0 = off, otherwise on
898*5d9d9091SRichard Lowe	 *
899*5d9d9091SRichard Lowe	 * disable interrupts while updating auxioreg
900*5d9d9091SRichard Lowe	 */
901*5d9d9091SRichard Lowe	rdpr	%pstate, %o2
902*5d9d9091SRichard Lowe#ifdef	DEBUG
903*5d9d9091SRichard Lowe	andcc	%o2, PSTATE_IE, %g0	/* if interrupts already */
904*5d9d9091SRichard Lowe	bnz,a,pt %icc, 1f		/* disabled, panic */
905*5d9d9091SRichard Lowe	  nop
906*5d9d9091SRichard Lowe	sethi	%hi(auxio_panic), %o0
907*5d9d9091SRichard Lowe	call	panic
908*5d9d9091SRichard Lowe	  or	%o0, %lo(auxio_panic), %o0
909*5d9d9091SRichard Lowe1:
910*5d9d9091SRichard Lowe#endif /* DEBUG */
911*5d9d9091SRichard Lowe	wrpr	%o2, PSTATE_IE, %pstate		/* disable interrupts */
912*5d9d9091SRichard Lowe	sethi	%hi(v_auxio_addr), %o3
913*5d9d9091SRichard Lowe	ldn	[%o3 + %lo(v_auxio_addr)], %o4
914*5d9d9091SRichard Lowe	ldub	[%o4], %g1			/* read aux i/o register */
915*5d9d9091SRichard Lowe	tst	%o1
916*5d9d9091SRichard Lowe	bnz,a	2f
917*5d9d9091SRichard Lowe	 bset	%o0, %g1		/* on */
918*5d9d9091SRichard Lowe	bclr	%o0, %g1		/* off */
919*5d9d9091SRichard Lowe2:
920*5d9d9091SRichard Lowe	or	%g1, AUX_MBO, %g1	/* Must Be Ones */
921*5d9d9091SRichard Lowe	stb	%g1, [%o4]		/* write aux i/o register */
922*5d9d9091SRichard Lowe	retl
923*5d9d9091SRichard Lowe	 wrpr	%g0, %o2, %pstate	/* enable interrupt */
924*5d9d9091SRichard Lowe	SET_SIZE(set_auxioreg)
925*5d9d9091SRichard Lowe
926*5d9d9091SRichard Lowe/*
927*5d9d9091SRichard Lowe * Flush all windows to memory, except for the one we entered in.
928*5d9d9091SRichard Lowe * We do this by doing NWINDOW-2 saves then the same number of restores.
929*5d9d9091SRichard Lowe * This leaves the WIM immediately before window entered in.
930*5d9d9091SRichard Lowe * This is used for context switching.
931*5d9d9091SRichard Lowe */
932*5d9d9091SRichard Lowe
933*5d9d9091SRichard Lowe	ENTRY_NP(flush_windows)
934*5d9d9091SRichard Lowe	retl
935*5d9d9091SRichard Lowe	flushw
936*5d9d9091SRichard Lowe	SET_SIZE(flush_windows)
937*5d9d9091SRichard Lowe
938*5d9d9091SRichard Lowe	ENTRY_NP(debug_flush_windows)
939*5d9d9091SRichard Lowe	set	nwindows, %g1
940*5d9d9091SRichard Lowe	ld	[%g1], %g1
941*5d9d9091SRichard Lowe	mov	%g1, %g2
942*5d9d9091SRichard Lowe
943*5d9d9091SRichard Lowe1:
944*5d9d9091SRichard Lowe	save	%sp, -WINDOWSIZE, %sp
945*5d9d9091SRichard Lowe	brnz	%g2, 1b
946*5d9d9091SRichard Lowe	dec	%g2
947*5d9d9091SRichard Lowe
948*5d9d9091SRichard Lowe	mov	%g1, %g2
949*5d9d9091SRichard Lowe2:
950*5d9d9091SRichard Lowe	restore
951*5d9d9091SRichard Lowe	brnz	%g2, 2b
952*5d9d9091SRichard Lowe	dec	%g2
953*5d9d9091SRichard Lowe
954*5d9d9091SRichard Lowe	retl
955*5d9d9091SRichard Lowe	nop
956*5d9d9091SRichard Lowe
957*5d9d9091SRichard Lowe	SET_SIZE(debug_flush_windows)
958*5d9d9091SRichard Lowe
959*5d9d9091SRichard Lowe/*
960*5d9d9091SRichard Lowe * flush user windows to memory.
961*5d9d9091SRichard Lowe */
962*5d9d9091SRichard Lowe
963*5d9d9091SRichard Lowe	ENTRY_NP(flush_user_windows)
964*5d9d9091SRichard Lowe	rdpr	%otherwin, %g1
965*5d9d9091SRichard Lowe	brz	%g1, 3f
966*5d9d9091SRichard Lowe	clr	%g2
967*5d9d9091SRichard Lowe1:
968*5d9d9091SRichard Lowe	save	%sp, -WINDOWSIZE, %sp
969*5d9d9091SRichard Lowe	rdpr	%otherwin, %g1
970*5d9d9091SRichard Lowe	brnz	%g1, 1b
971*5d9d9091SRichard Lowe	add	%g2, 1, %g2
972*5d9d9091SRichard Lowe2:
973*5d9d9091SRichard Lowe	sub	%g2, 1, %g2		! restore back to orig window
974*5d9d9091SRichard Lowe	brnz	%g2, 2b
975*5d9d9091SRichard Lowe	restore
976*5d9d9091SRichard Lowe3:
977*5d9d9091SRichard Lowe	retl
978*5d9d9091SRichard Lowe	nop
979*5d9d9091SRichard Lowe	SET_SIZE(flush_user_windows)
980*5d9d9091SRichard Lowe
981*5d9d9091SRichard Lowe/*
982*5d9d9091SRichard Lowe * Throw out any user windows in the register file.
983*5d9d9091SRichard Lowe * Used by setregs (exec) to clean out old user.
984*5d9d9091SRichard Lowe * Used by sigcleanup to remove extraneous windows when returning from a
985*5d9d9091SRichard Lowe * signal.
986*5d9d9091SRichard Lowe */
987*5d9d9091SRichard Lowe
988*5d9d9091SRichard Lowe	ENTRY_NP(trash_user_windows)
989*5d9d9091SRichard Lowe	rdpr	%otherwin, %g1
990*5d9d9091SRichard Lowe	brz	%g1, 3f			! no user windows?
991*5d9d9091SRichard Lowe	ldn	[THREAD_REG + T_STACK], %g5
992*5d9d9091SRichard Lowe
993*5d9d9091SRichard Lowe	!
994*5d9d9091SRichard Lowe	! There are old user windows in the register file. We disable ints
995*5d9d9091SRichard Lowe	! and increment cansave so that we don't overflow on these windows.
996*5d9d9091SRichard Lowe	! Also, this sets up a nice underflow when first returning to the
997*5d9d9091SRichard Lowe	! new user.
998*5d9d9091SRichard Lowe	!
999*5d9d9091SRichard Lowe	rdpr	%pstate, %g2
1000*5d9d9091SRichard Lowe	wrpr	%g2, PSTATE_IE, %pstate
1001*5d9d9091SRichard Lowe	rdpr	%cansave, %g3
1002*5d9d9091SRichard Lowe	rdpr	%otherwin, %g1		! re-read in case of interrupt
1003*5d9d9091SRichard Lowe	add	%g3, %g1, %g3
1004*5d9d9091SRichard Lowe	wrpr	%g0, 0, %otherwin
1005*5d9d9091SRichard Lowe	wrpr	%g0, %g3, %cansave
1006*5d9d9091SRichard Lowe	wrpr	%g0, %g2, %pstate
1007*5d9d9091SRichard Lowe3:
1008*5d9d9091SRichard Lowe	retl
1009*5d9d9091SRichard Lowe 	clr     [%g5 + MPCB_WBCNT]       ! zero window buffer cnt
1010*5d9d9091SRichard Lowe	SET_SIZE(trash_user_windows)
1011*5d9d9091SRichard Lowe
1012*5d9d9091SRichard Lowe
1013*5d9d9091SRichard Lowe/*
1014*5d9d9091SRichard Lowe * Setup g7 via the CPU data structure.
1015*5d9d9091SRichard Lowe */
1016*5d9d9091SRichard Lowe
1017*5d9d9091SRichard Lowe	ENTRY_NP(set_tbr)
1018*5d9d9091SRichard Lowe	retl
1019*5d9d9091SRichard Lowe	ta	72		! no tbr, stop simulation
1020*5d9d9091SRichard Lowe	SET_SIZE(set_tbr)
1021*5d9d9091SRichard Lowe
1022*5d9d9091SRichard Lowe
1023*5d9d9091SRichard Lowe#define	PTL1_SAVE_WINDOW(RP)						\
1024*5d9d9091SRichard Lowe	stxa	%l0, [RP + RW64_LOCAL + (0 * RW64_LOCAL_INCR)] %asi;	\
1025*5d9d9091SRichard Lowe	stxa	%l1, [RP + RW64_LOCAL + (1 * RW64_LOCAL_INCR)] %asi;	\
1026*5d9d9091SRichard Lowe	stxa	%l2, [RP + RW64_LOCAL + (2 * RW64_LOCAL_INCR)] %asi;	\
1027*5d9d9091SRichard Lowe	stxa	%l3, [RP + RW64_LOCAL + (3 * RW64_LOCAL_INCR)] %asi;	\
1028*5d9d9091SRichard Lowe	stxa	%l4, [RP + RW64_LOCAL + (4 * RW64_LOCAL_INCR)] %asi;	\
1029*5d9d9091SRichard Lowe	stxa	%l5, [RP + RW64_LOCAL + (5 * RW64_LOCAL_INCR)] %asi;	\
1030*5d9d9091SRichard Lowe	stxa	%l6, [RP + RW64_LOCAL + (6 * RW64_LOCAL_INCR)] %asi;	\
1031*5d9d9091SRichard Lowe	stxa	%l7, [RP + RW64_LOCAL + (7 * RW64_LOCAL_INCR)] %asi;	\
1032*5d9d9091SRichard Lowe	stxa	%i0, [RP + RW64_IN + (0 * RW64_IN_INCR)] %asi;		\
1033*5d9d9091SRichard Lowe	stxa	%i1, [RP + RW64_IN + (1 * RW64_IN_INCR)] %asi;		\
1034*5d9d9091SRichard Lowe	stxa	%i2, [RP + RW64_IN + (2 * RW64_IN_INCR)] %asi;		\
1035*5d9d9091SRichard Lowe	stxa	%i3, [RP + RW64_IN + (3 * RW64_IN_INCR)] %asi;		\
1036*5d9d9091SRichard Lowe	stxa	%i4, [RP + RW64_IN + (4 * RW64_IN_INCR)] %asi;		\
1037*5d9d9091SRichard Lowe	stxa	%i5, [RP + RW64_IN + (5 * RW64_IN_INCR)] %asi;		\
1038*5d9d9091SRichard Lowe	stxa	%i6, [RP + RW64_IN + (6 * RW64_IN_INCR)] %asi;		\
1039*5d9d9091SRichard Lowe	stxa	%i7, [RP + RW64_IN + (7 * RW64_IN_INCR)] %asi
1040*5d9d9091SRichard Lowe#define	PTL1_NEXT_WINDOW(scr)	\
1041*5d9d9091SRichard Lowe	add	scr, RWIN64SIZE, scr
1042*5d9d9091SRichard Lowe
1043*5d9d9091SRichard Lowe#define	PTL1_RESET_RWINDOWS(scr)			\
1044*5d9d9091SRichard Lowe	sethi	%hi(nwin_minus_one), scr;		\
1045*5d9d9091SRichard Lowe	ld	[scr + %lo(nwin_minus_one)], scr;	\
1046*5d9d9091SRichard Lowe	wrpr	scr, %cleanwin;				\
1047*5d9d9091SRichard Lowe	dec	scr;					\
1048*5d9d9091SRichard Lowe	wrpr	scr, %cansave;				\
1049*5d9d9091SRichard Lowe	wrpr	%g0, %canrestore;			\
1050*5d9d9091SRichard Lowe	wrpr	%g0, %otherwin
1051*5d9d9091SRichard Lowe
1052*5d9d9091SRichard Lowe#define	PTL1_DCACHE_LINE_SIZE	4	/* small enough for all CPUs */
1053*5d9d9091SRichard Lowe
1054*5d9d9091SRichard Lowe/*
1055*5d9d9091SRichard Lowe * ptl1_panic is called when the kernel detects that it is in an invalid state
1056*5d9d9091SRichard Lowe * and the trap level is greater than 0.  ptl1_panic is responsible to save the
1057*5d9d9091SRichard Lowe * current CPU state, to restore the CPU state to normal, and to call panic.
1058*5d9d9091SRichard Lowe * The CPU state must be saved reliably without causing traps.  ptl1_panic saves
1059*5d9d9091SRichard Lowe * it in the ptl1_state structure, which is a member of the machcpu structure.
1060*5d9d9091SRichard Lowe * In order to access the ptl1_state structure without causing traps, physical
1061*5d9d9091SRichard Lowe * addresses are used so that we can avoid MMU miss traps.  The restriction of
1062*5d9d9091SRichard Lowe * physical memory accesses is that the ptl1_state structure must be on a single
1063*5d9d9091SRichard Lowe * physical page.  This is because (1) a single physical address for each
1064*5d9d9091SRichard Lowe * ptl1_state structure is needed and (2) it simplifies physical address
1065*5d9d9091SRichard Lowe * calculation for each member of the structure.
1066*5d9d9091SRichard Lowe * ptl1_panic is a likely spot for stack overflows to wind up; thus, the current
1067*5d9d9091SRichard Lowe * stack may not be usable.  In order to call panic reliably in such a state,
1068*5d9d9091SRichard Lowe * each CPU needs a dedicated ptl1 panic stack.
1069*5d9d9091SRichard Lowe * CPU_ALLOC_SIZE, which is defined to be MMU_PAGESIZE, is used to allocate the
1070*5d9d9091SRichard Lowe * cpu structure and a ptl1 panic stack.  They are put together on the same page
1071*5d9d9091SRichard Lowe * for memory space efficiency.  The low address part is used for the cpu
1072*5d9d9091SRichard Lowe * structure, and the high address part is for a ptl1 panic stack.
1073*5d9d9091SRichard Lowe * The cpu_pa array holds the physical addresses of the allocated cpu structures,
1074*5d9d9091SRichard Lowe * as the cpu array holds their virtual addresses.
1075*5d9d9091SRichard Lowe *
1076*5d9d9091SRichard Lowe * %g1 reason to be called
1077*5d9d9091SRichard Lowe * %g2 broken
1078*5d9d9091SRichard Lowe * %g3 broken
1079*5d9d9091SRichard Lowe */
1080*5d9d9091SRichard Lowe	ENTRY_NP(ptl1_panic)
1081*5d9d9091SRichard Lowe	!
1082*5d9d9091SRichard Lowe	! flush D$ first, so that stale data will not be accessed later.
1083*5d9d9091SRichard Lowe	! Data written via ASI_MEM bypasses D$.  If D$ contains data at the same
1084*5d9d9091SRichard Lowe	! address, where data was written via ASI_MEM, a load from that address
1085*5d9d9091SRichard Lowe	! using a virtual address and the default ASI still takes the old data.
1086*5d9d9091SRichard Lowe	! Flushing D$ erases old data in D$, so that it will not be loaded.
1087*5d9d9091SRichard Lowe	! Since we can afford only 2 registers (%g2 and %g3) for this job, we
1088*5d9d9091SRichard Lowe	! flush entire D$.
1089*5d9d9091SRichard Lowe	! For FJ OPL processors (IMPL values < SPITFIRE_IMPL), DC flushing
1090*5d9d9091SRichard Lowe	! is not needed.
1091*5d9d9091SRichard Lowe	!
1092*5d9d9091SRichard Lowe	GET_CPU_IMPL(%g2)
1093*5d9d9091SRichard Lowe	cmp	%g2, SPITFIRE_IMPL
1094*5d9d9091SRichard Lowe	blt,pn	%icc, 1f		! Skip flushing for OPL processors
1095*5d9d9091SRichard Lowe	 nop
1096*5d9d9091SRichard Lowe	sethi	%hi(dcache_size), %g2
1097*5d9d9091SRichard Lowe	ld	[%g2 + %lo(dcache_size)], %g2
1098*5d9d9091SRichard Lowe	sethi	%hi(dcache_linesize), %g3
1099*5d9d9091SRichard Lowe	ld	[%g3 + %lo(dcache_linesize)], %g3
1100*5d9d9091SRichard Lowe	sub	%g2, %g3, %g2
1101*5d9d9091SRichard Lowe0:	stxa	%g0, [%g2] ASI_DC_TAG
1102*5d9d9091SRichard Lowe	membar	#Sync
1103*5d9d9091SRichard Lowe	brnz,pt	%g2, 0b
1104*5d9d9091SRichard Lowe	  sub	%g2, %g3, %g2
1105*5d9d9091SRichard Lowe1:
1106*5d9d9091SRichard Lowe	!
1107*5d9d9091SRichard Lowe	! increment the entry counter.
1108*5d9d9091SRichard Lowe	! save CPU state if this is the first entry.
1109*5d9d9091SRichard Lowe	!
1110*5d9d9091SRichard Lowe	CPU_PADDR(%g2, %g3);
1111*5d9d9091SRichard Lowe	add	%g2, CPU_PTL1, %g2		! pstate = &CPU->mcpu.ptl1_state
1112*5d9d9091SRichard Lowe	wr	%g0, ASI_MEM, %asi		! physical address access
1113*5d9d9091SRichard Lowe	!
1114*5d9d9091SRichard Lowe	! pstate->ptl1_entry_count++
1115*5d9d9091SRichard Lowe	!
1116*5d9d9091SRichard Lowe	lduwa	[%g2 + PTL1_ENTRY_COUNT] %asi, %g3
1117*5d9d9091SRichard Lowe	add	%g3, 1, %g3
1118*5d9d9091SRichard Lowe	stuwa	%g3, [%g2 + PTL1_ENTRY_COUNT] %asi
1119*5d9d9091SRichard Lowe	!
1120*5d9d9091SRichard Lowe	! CPU state saving is skipped from the 2nd entry to ptl1_panic since we
1121*5d9d9091SRichard Lowe	! do not want to clobber the state from the original failure.  panic()
1122*5d9d9091SRichard Lowe	! is responsible for handling multiple or recursive panics.
1123*5d9d9091SRichard Lowe	!
1124*5d9d9091SRichard Lowe	cmp	%g3, 2				! if (ptl1_entry_count >= 2)
1125*5d9d9091SRichard Lowe	bge,pn	%icc, state_saved		!	goto state_saved
1126*5d9d9091SRichard Lowe	  add	%g2, PTL1_REGS, %g3		! %g3 = &pstate->ptl1_regs[0]
1127*5d9d9091SRichard Lowe	!
1128*5d9d9091SRichard Lowe	! save CPU state
1129*5d9d9091SRichard Lowe	!
1130*5d9d9091SRichard Lowesave_cpu_state:
1131*5d9d9091SRichard Lowe	! save current global registers
1132*5d9d9091SRichard Lowe	! so that all them become available for use
1133*5d9d9091SRichard Lowe	!
1134*5d9d9091SRichard Lowe	stxa	%g1, [%g3 + PTL1_G1] %asi
1135*5d9d9091SRichard Lowe	stxa	%g2, [%g3 + PTL1_G2] %asi
1136*5d9d9091SRichard Lowe	stxa	%g3, [%g3 + PTL1_G3] %asi
1137*5d9d9091SRichard Lowe	stxa	%g4, [%g3 + PTL1_G4] %asi
1138*5d9d9091SRichard Lowe	stxa	%g5, [%g3 + PTL1_G5] %asi
1139*5d9d9091SRichard Lowe	stxa	%g6, [%g3 + PTL1_G6] %asi
1140*5d9d9091SRichard Lowe	stxa	%g7, [%g3 + PTL1_G7] %asi
1141*5d9d9091SRichard Lowe	!
1142*5d9d9091SRichard Lowe	! %tl, %tt, %tstate, %tpc, %tnpc for each TL
1143*5d9d9091SRichard Lowe	!
1144*5d9d9091SRichard Lowe	rdpr	%tl, %g1
1145*5d9d9091SRichard Lowe	brz	%g1, 1f				! if(trap_level == 0) -------+
1146*5d9d9091SRichard Lowe	add	%g3, PTL1_TRAP_REGS, %g4	! %g4 = &ptl1_trap_regs[0];  !
1147*5d9d9091SRichard Lowe0:						! -----------<----------+    !
1148*5d9d9091SRichard Lowe	stwa	%g1, [%g4 + PTL1_TL] %asi				!    !
1149*5d9d9091SRichard Lowe	rdpr	%tt, %g5						!    !
1150*5d9d9091SRichard Lowe	stwa	%g5, [%g4 + PTL1_TT] %asi				!    !
1151*5d9d9091SRichard Lowe	rdpr	%tstate, %g5						!    !
1152*5d9d9091SRichard Lowe	stxa	%g5, [%g4 + PTL1_TSTATE] %asi				!    !
1153*5d9d9091SRichard Lowe	rdpr	%tpc, %g5						!    !
1154*5d9d9091SRichard Lowe	stxa	%g5, [%g4 + PTL1_TPC] %asi				!    !
1155*5d9d9091SRichard Lowe	rdpr	%tnpc, %g5						!    !
1156*5d9d9091SRichard Lowe	stxa	%g5, [%g4 + PTL1_TNPC] %asi				!    !
1157*5d9d9091SRichard Lowe	add	%g4, PTL1_TRAP_REGS_INCR, %g4				!    !
1158*5d9d9091SRichard Lowe	deccc	%g1							!    !
1159*5d9d9091SRichard Lowe	bnz,a,pt %icc, 0b			! if(trap_level != 0) --+    !
1160*5d9d9091SRichard Lowe	  wrpr	%g1, %tl						     !
1161*5d9d9091SRichard Lowe1:						! ----------<----------------+
1162*5d9d9091SRichard Lowe	!
1163*5d9d9091SRichard Lowe	! %pstate, %pil, SOFTINT, (S)TICK
1164*5d9d9091SRichard Lowe	! Pending interrupts is also cleared in order to avoid a recursive call
1165*5d9d9091SRichard Lowe	! to ptl1_panic in case the interrupt handler causes a panic.
1166*5d9d9091SRichard Lowe	!
1167*5d9d9091SRichard Lowe	rdpr	%pil, %g1
1168*5d9d9091SRichard Lowe	stba	%g1, [%g3 + PTL1_PIL] %asi
1169*5d9d9091SRichard Lowe	rdpr	%pstate, %g1
1170*5d9d9091SRichard Lowe	stha	%g1, [%g3 + PTL1_PSTATE] %asi
1171*5d9d9091SRichard Lowe	rd	SOFTINT, %g1
1172*5d9d9091SRichard Lowe	sta	%g1, [%g3 + PTL1_SOFTINT] %asi
1173*5d9d9091SRichard Lowe	wr	%g1, CLEAR_SOFTINT
1174*5d9d9091SRichard Lowe	sethi   %hi(traptrace_use_stick), %g1
1175*5d9d9091SRichard Lowe	ld      [%g1 + %lo(traptrace_use_stick)], %g1
1176*5d9d9091SRichard Lowe	brz,a,pn %g1, 2f
1177*5d9d9091SRichard Lowe	  rdpr	%tick, %g1
1178*5d9d9091SRichard Lowe	rd	STICK, %g1
1179*5d9d9091SRichard Lowe2:	stxa	%g1, [%g3 + PTL1_TICK] %asi
1180*5d9d9091SRichard Lowe
1181*5d9d9091SRichard Lowe	!
1182*5d9d9091SRichard Lowe	! MMU registers because ptl1_panic may be called from
1183*5d9d9091SRichard Lowe	! the MMU trap handlers.
1184*5d9d9091SRichard Lowe	!
1185*5d9d9091SRichard Lowe	mov     MMU_SFAR, %g1
1186*5d9d9091SRichard Lowe	ldxa    [%g1]ASI_DMMU, %g4
1187*5d9d9091SRichard Lowe	stxa	%g4, [%g3 + PTL1_DMMU_SFAR]%asi
1188*5d9d9091SRichard Lowe	mov     MMU_SFSR, %g1
1189*5d9d9091SRichard Lowe	ldxa    [%g1]ASI_DMMU, %g4
1190*5d9d9091SRichard Lowe	stxa	%g4, [%g3 + PTL1_DMMU_SFSR]%asi
1191*5d9d9091SRichard Lowe	ldxa    [%g1]ASI_IMMU, %g4
1192*5d9d9091SRichard Lowe	stxa	%g4, [%g3 + PTL1_IMMU_SFSR]%asi
1193*5d9d9091SRichard Lowe	mov     MMU_TAG_ACCESS, %g1
1194*5d9d9091SRichard Lowe	ldxa    [%g1]ASI_DMMU, %g4
1195*5d9d9091SRichard Lowe	stxa	%g4, [%g3 + PTL1_DMMU_TAG_ACCESS]%asi
1196*5d9d9091SRichard Lowe	ldxa    [%g1]ASI_IMMU, %g4
1197*5d9d9091SRichard Lowe	stxa	%g4, [%g3 + PTL1_IMMU_TAG_ACCESS]%asi
1198*5d9d9091SRichard Lowe
1199*5d9d9091SRichard Lowe	!
1200*5d9d9091SRichard Lowe	! Save register window state and register windows.
1201*5d9d9091SRichard Lowe	!
1202*5d9d9091SRichard Lowe	rdpr	%cwp, %g1
1203*5d9d9091SRichard Lowe	stba	%g1, [%g3 + PTL1_CWP] %asi
1204*5d9d9091SRichard Lowe	rdpr	%wstate, %g1
1205*5d9d9091SRichard Lowe	stba	%g1, [%g3 + PTL1_WSTATE] %asi
1206*5d9d9091SRichard Lowe	rdpr	%otherwin, %g1
1207*5d9d9091SRichard Lowe	stba	%g1, [%g3 + PTL1_OTHERWIN] %asi
1208*5d9d9091SRichard Lowe	rdpr	%cleanwin, %g1
1209*5d9d9091SRichard Lowe	stba	%g1, [%g3 + PTL1_CLEANWIN] %asi
1210*5d9d9091SRichard Lowe	rdpr	%cansave, %g1
1211*5d9d9091SRichard Lowe	stba	%g1, [%g3 + PTL1_CANSAVE] %asi
1212*5d9d9091SRichard Lowe	rdpr	%canrestore, %g1
1213*5d9d9091SRichard Lowe	stba	%g1, [%g3 + PTL1_CANRESTORE] %asi
1214*5d9d9091SRichard Lowe
1215*5d9d9091SRichard Lowe	PTL1_RESET_RWINDOWS(%g1)
1216*5d9d9091SRichard Lowe	clr	%g1
1217*5d9d9091SRichard Lowe	wrpr	%g1, %cwp
1218*5d9d9091SRichard Lowe	add	%g3, PTL1_RWINDOW, %g4		! %g4 = &ptl1_rwindow[0];
1219*5d9d9091SRichard Lowe
1220*5d9d9091SRichard Lowe3:	PTL1_SAVE_WINDOW(%g4)	! <-------------+
1221*5d9d9091SRichard Lowe	inc	%g1				!
1222*5d9d9091SRichard Lowe	cmp	%g1, MAXWIN			!
1223*5d9d9091SRichard Lowe	bgeu,pn	%icc, 5f			!
1224*5d9d9091SRichard Lowe	wrpr	%g1, %cwp			!
1225*5d9d9091SRichard Lowe	rdpr	%cwp, %g2			!
1226*5d9d9091SRichard Lowe	cmp	%g1, %g2			! saturation check
1227*5d9d9091SRichard Lowe	be,pt	%icc, 3b			!
1228*5d9d9091SRichard Lowe	  PTL1_NEXT_WINDOW(%g4)		! ------+
1229*5d9d9091SRichard Lowe5:
1230*5d9d9091SRichard Lowe	!
1231*5d9d9091SRichard Lowe	! most crucial CPU state was saved.
1232*5d9d9091SRichard Lowe	! Proceed to go back to TL = 0.
1233*5d9d9091SRichard Lowe	!
1234*5d9d9091SRichard Lowestate_saved:
1235*5d9d9091SRichard Lowe	wrpr	%g0, 1, %tl
1236*5d9d9091SRichard Lowe	wrpr	%g0, PIL_MAX, %pil
1237*5d9d9091SRichard Lowe	!
1238*5d9d9091SRichard Lowe	PTL1_RESET_RWINDOWS(%g1)
1239*5d9d9091SRichard Lowe	wrpr	%g0, %cwp
1240*5d9d9091SRichard Lowe	wrpr	%g0, %cleanwin
1241*5d9d9091SRichard Lowe	wrpr	%g0, WSTATE_KERN, %wstate
1242*5d9d9091SRichard Lowe	!
1243*5d9d9091SRichard Lowe	! Set pcontext to run kernel.
1244*5d9d9091SRichard Lowe	!
1245*5d9d9091SRichard Lowe	! For OPL, load kcontexreg instead of clearing primary
1246*5d9d9091SRichard Lowe	! context register.  This is to avoid changing nucleus page
1247*5d9d9091SRichard Lowe	! size bits after boot initialization.
1248*5d9d9091SRichard Lowe	!
1249*5d9d9091SRichard Lowe#ifdef _OPL
1250*5d9d9091SRichard Lowe	sethi	%hi(kcontextreg), %g4
1251*5d9d9091SRichard Lowe	ldx	[%g4 + %lo(kcontextreg)], %g4
1252*5d9d9091SRichard Lowe#endif /* _OPL */
1253*5d9d9091SRichard Lowe
1254*5d9d9091SRichard Lowe	set	DEMAP_ALL_TYPE, %g1
1255*5d9d9091SRichard Lowe	sethi	%hi(FLUSH_ADDR), %g3
1256*5d9d9091SRichard Lowe	set	MMU_PCONTEXT, %g2
1257*5d9d9091SRichard Lowe
1258*5d9d9091SRichard Lowe	stxa	%g0, [%g1]ASI_DTLB_DEMAP
1259*5d9d9091SRichard Lowe	stxa	%g0, [%g1]ASI_ITLB_DEMAP
1260*5d9d9091SRichard Lowe
1261*5d9d9091SRichard Lowe#ifdef _OPL
1262*5d9d9091SRichard Lowe	stxa	%g4, [%g2]ASI_MMU_CTX
1263*5d9d9091SRichard Lowe#else /* _OPL */
1264*5d9d9091SRichard Lowe	stxa	%g0, [%g2]ASI_MMU_CTX
1265*5d9d9091SRichard Lowe#endif /* _OPL */
1266*5d9d9091SRichard Lowe
1267*5d9d9091SRichard Lowe	flush	%g3
1268*5d9d9091SRichard Lowe
1269*5d9d9091SRichard Lowe	rdpr	%cwp, %g1
1270*5d9d9091SRichard Lowe	set	TSTATE_KERN, %g3
1271*5d9d9091SRichard Lowe	wrpr	%g3, %g1, %tstate
1272*5d9d9091SRichard Lowe	set	ptl1_panic_tl0, %g3
1273*5d9d9091SRichard Lowe	wrpr	%g0, %g3, %tnpc
1274*5d9d9091SRichard Lowe	done					! go to -->-+	TL:1
1275*5d9d9091SRichard Lowe							    !
1276*5d9d9091SRichard Loweptl1_panic_tl0:					! ----<-----+	TL:0
1277*5d9d9091SRichard Lowe	CPU_ADDR(%l0, %l1)			! %l0 = cpu[cpuid]
1278*5d9d9091SRichard Lowe	add	%l0, CPU_PTL1, %l1		! %l1 = &CPU->mcpu.ptl1_state
1279*5d9d9091SRichard Lowe	!
1280*5d9d9091SRichard Lowe	! prepare to call panic()
1281*5d9d9091SRichard Lowe	!
1282*5d9d9091SRichard Lowe	ldn	[%l0 + CPU_THREAD], THREAD_REG	! restore %g7
1283*5d9d9091SRichard Lowe	ldn	[%l1 + PTL1_STKTOP], %l2	! %sp = ptl1_stktop
1284*5d9d9091SRichard Lowe	sub	%l2, SA(MINFRAME) + STACK_BIAS, %sp
1285*5d9d9091SRichard Lowe	clr	%fp				! no frame below this window
1286*5d9d9091SRichard Lowe	clr	%i7
1287*5d9d9091SRichard Lowe	!
1288*5d9d9091SRichard Lowe	! enable limited interrupts
1289*5d9d9091SRichard Lowe	!
1290*5d9d9091SRichard Lowe	wrpr	%g0, CLOCK_LEVEL, %pil
1291*5d9d9091SRichard Lowe	wrpr	%g0, PSTATE_KERN, %pstate
1292*5d9d9091SRichard Lowe	!
1293*5d9d9091SRichard Lowe	ba,pt	%xcc, ptl1_panic_handler
1294*5d9d9091SRichard Lowe	  mov	%l1, %o0
1295*5d9d9091SRichard Lowe	/*NOTREACHED*/
1296*5d9d9091SRichard Lowe	SET_SIZE(ptl1_panic)
1297*5d9d9091SRichard Lowe
1298*5d9d9091SRichard Lowe#ifdef	PTL1_PANIC_DEBUG
1299*5d9d9091SRichard Lowe
1300*5d9d9091SRichard Lowe/*
1301*5d9d9091SRichard Lowe * ptl1_recurse() calls itself a number of times to either set up a known
1302*5d9d9091SRichard Lowe * stack or to cause a kernel stack overflow. It decrements the arguments
1303*5d9d9091SRichard Lowe * on each recursion.
1304*5d9d9091SRichard Lowe * It's called by #ifdef PTL1_PANIC_DEBUG code in startup.c to set the
1305*5d9d9091SRichard Lowe * registers to a known state to facilitate debugging.
1306*5d9d9091SRichard Lowe */
1307*5d9d9091SRichard Lowe	ENTRY_NP(ptl1_recurse)
1308*5d9d9091SRichard Lowe	save    %sp, -SA(MINFRAME), %sp
1309*5d9d9091SRichard Lowe
1310*5d9d9091SRichard Lowe	set 	ptl1_recurse_call, %o7
1311*5d9d9091SRichard Lowe	cmp	%o7, %i7			! if ptl1_recurse is called
1312*5d9d9091SRichard Lowe	be,pt  %icc, 0f				! by itself, then skip
1313*5d9d9091SRichard Lowe	  nop					! register initialization
1314*5d9d9091SRichard Lowe
1315*5d9d9091SRichard Lowe	/*
1316*5d9d9091SRichard Lowe	 * Initialize Out Registers to Known Values
1317*5d9d9091SRichard Lowe	 */
1318*5d9d9091SRichard Lowe	set	0x01000, %l0			! %i0 is the ...
1319*5d9d9091SRichard Lowe						! recursion_depth_count
1320*5d9d9091SRichard Lowe	sub	%i0, 1, %o0;
1321*5d9d9091SRichard Lowe	sub 	%i1, 1, %o1;
1322*5d9d9091SRichard Lowe	add	%l0, %o0, %o2;
1323*5d9d9091SRichard Lowe	add	%l0, %o2, %o3;
1324*5d9d9091SRichard Lowe	add	%l0, %o3, %o4;
1325*5d9d9091SRichard Lowe	add	%l0, %o4, %o5;
1326*5d9d9091SRichard Lowe	ba,a	1f
1327*5d9d9091SRichard Lowe	  nop
1328*5d9d9091SRichard Lowe
1329*5d9d9091SRichard Lowe0:	/* Outs = Ins - 1 */
1330*5d9d9091SRichard Lowe	sub	%i0, 1, %o0;
1331*5d9d9091SRichard Lowe	sub	%i1, 1, %o1;
1332*5d9d9091SRichard Lowe	sub	%i2, 1, %o2;
1333*5d9d9091SRichard Lowe	sub	%i3, 1, %o3;
1334*5d9d9091SRichard Lowe	sub	%i4, 1, %o4;
1335*5d9d9091SRichard Lowe	sub	%i5, 1, %o5;
1336*5d9d9091SRichard Lowe
1337*5d9d9091SRichard Lowe	/* Locals = Ins + 1 */
1338*5d9d9091SRichard Lowe1:	add	%i0, 1, %l0;
1339*5d9d9091SRichard Lowe	add	%i1, 1, %l1;
1340*5d9d9091SRichard Lowe	add	%i2, 1, %l2;
1341*5d9d9091SRichard Lowe	add	%i3, 1, %l3;
1342*5d9d9091SRichard Lowe	add	%i4, 1, %l4;
1343*5d9d9091SRichard Lowe	add	%i5, 1, %l5;
1344*5d9d9091SRichard Lowe
1345*5d9d9091SRichard Lowe	set     0x0100000, %g5
1346*5d9d9091SRichard Lowe	add	%g5, %g0, %g1
1347*5d9d9091SRichard Lowe	add	%g5, %g1, %g2
1348*5d9d9091SRichard Lowe	add	%g5, %g2, %g3
1349*5d9d9091SRichard Lowe	add	%g5, %g3, %g4
1350*5d9d9091SRichard Lowe	add	%g5, %g4, %g5
1351*5d9d9091SRichard Lowe
1352*5d9d9091SRichard Lowe	brz,pn %i1, ptl1_recurse_trap		! if trpp_count == 0) {
1353*5d9d9091SRichard Lowe	  nop					!    trap to ptl1_panic
1354*5d9d9091SRichard Lowe						!
1355*5d9d9091SRichard Lowe	brz,pn %i0, ptl1_recure_exit		! if(depth_count == 0) {
1356*5d9d9091SRichard Lowe	  nop					!    skip recursive call
1357*5d9d9091SRichard Lowe						! }
1358*5d9d9091SRichard Loweptl1_recurse_call:
1359*5d9d9091SRichard Lowe	call	ptl1_recurse
1360*5d9d9091SRichard Lowe	  nop
1361*5d9d9091SRichard Lowe
1362*5d9d9091SRichard Loweptl1_recure_exit:
1363*5d9d9091SRichard Lowe	ret
1364*5d9d9091SRichard Lowe	restore
1365*5d9d9091SRichard Lowe
1366*5d9d9091SRichard Loweptl1_recurse_trap:
1367*5d9d9091SRichard Lowe	ta	PTL1_DEBUG_TRAP; 		! Trap Always to ptl1_panic()
1368*5d9d9091SRichard Lowe	  nop 					! NOTREACHED
1369*5d9d9091SRichard Lowe        SET_SIZE(ptl1_recurse)
1370*5d9d9091SRichard Lowe
1371*5d9d9091SRichard Lowe	/*
1372*5d9d9091SRichard Lowe	 * Asm function to handle a cross trap to call ptl1_panic()
1373*5d9d9091SRichard Lowe	 */
1374*5d9d9091SRichard Lowe	ENTRY_NP(ptl1_panic_xt)
1375*5d9d9091SRichard Lowe	ba	ptl1_panic
1376*5d9d9091SRichard Lowe	  mov	PTL1_BAD_DEBUG, %g1
1377*5d9d9091SRichard Lowe        SET_SIZE(ptl1_panic_xt)
1378*5d9d9091SRichard Lowe
1379*5d9d9091SRichard Lowe#endif	/* PTL1_PANIC_DEBUG */
1380*5d9d9091SRichard Lowe
1381*5d9d9091SRichard Lowe#ifdef	TRAPTRACE
1382*5d9d9091SRichard Lowe
1383*5d9d9091SRichard Lowe	ENTRY_NP(trace_ptr_panic)
1384*5d9d9091SRichard Lowe	!
1385*5d9d9091SRichard Lowe	! freeze the trap trace to disable the assertions.  Otherwise,
1386*5d9d9091SRichard Lowe	! ptl1_panic is likely to be repeatedly called from there.
1387*5d9d9091SRichard Lowe	! %g2 and %g3 are used as scratch registers in ptl1_panic.
1388*5d9d9091SRichard Lowe	!
1389*5d9d9091SRichard Lowe	mov	1, %g3
1390*5d9d9091SRichard Lowe	sethi	%hi(trap_freeze), %g2
1391*5d9d9091SRichard Lowe	st	%g3, [%g2 + %lo(trap_freeze)]
1392*5d9d9091SRichard Lowe	!
1393*5d9d9091SRichard Lowe	! %g1 contains the %pc address where an assertion was failed.
1394*5d9d9091SRichard Lowe	! save it in trap_freeze_pc for a debugging hint if there is
1395*5d9d9091SRichard Lowe	! no value saved in it.
1396*5d9d9091SRichard Lowe	!
1397*5d9d9091SRichard Lowe	set	trap_freeze_pc, %g2
1398*5d9d9091SRichard Lowe	casn	[%g2], %g0, %g1
1399*5d9d9091SRichard Lowe
1400*5d9d9091SRichard Lowe	ba	ptl1_panic
1401*5d9d9091SRichard Lowe	mov	PTL1_BAD_TRACE_PTR, %g1
1402*5d9d9091SRichard Lowe	SET_SIZE(trace_ptr_panic)
1403*5d9d9091SRichard Lowe
1404*5d9d9091SRichard Lowe#endif	/* TRAPTRACE */
1405*5d9d9091SRichard Lowe/*
1406*5d9d9091SRichard Lowe * set_kcontextreg() sets PCONTEXT to kctx
1407*5d9d9091SRichard Lowe * if PCONTEXT==kctx, do nothing
1408*5d9d9091SRichard Lowe * if N_pgsz0|N_pgsz1 differ, do demap all first
1409*5d9d9091SRichard Lowe */
1410*5d9d9091SRichard Lowe        ENTRY_NP(set_kcontextreg)
1411*5d9d9091SRichard Lowe	! SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
1412*5d9d9091SRichard Lowe	SET_KCONTEXTREG(%o0, %o1, %o2, %o3, %o4, l1, l2, l3)
1413*5d9d9091SRichard Lowe	retl
1414*5d9d9091SRichard Lowe        nop
1415*5d9d9091SRichard Lowe	SET_SIZE(set_kcontextreg)
1416*5d9d9091SRichard Lowe
1417*5d9d9091SRichard Lowe/*
1418*5d9d9091SRichard Lowe * The interface for a 32-bit client program that takes over the TBA
1419*5d9d9091SRichard Lowe * calling the 64-bit romvec OBP.
1420*5d9d9091SRichard Lowe */
1421*5d9d9091SRichard Lowe
1422*5d9d9091SRichard Lowe	ENTRY(client_handler)
1423*5d9d9091SRichard Lowe	save	%sp, -SA64(MINFRAME64), %sp	! 32 bit frame, 64 bit sized
1424*5d9d9091SRichard Lowe	sethi	%hi(tba_taken_over), %l2
1425*5d9d9091SRichard Lowe	ld	[%l2+%lo(tba_taken_over)], %l3
1426*5d9d9091SRichard Lowe	brz	%l3, 1f				! is the tba_taken_over = 1 ?
1427*5d9d9091SRichard Lowe	rdpr	%wstate, %l5			! save %wstate
1428*5d9d9091SRichard Lowe	andn	%l5, WSTATE_MASK, %l6
1429*5d9d9091SRichard Lowe	wrpr	%l6, WSTATE_KMIX, %wstate
1430*5d9d9091SRichard Lowe
1431*5d9d9091SRichard Lowe	!
1432*5d9d9091SRichard Lowe	! switch to PCONTEXT=0
1433*5d9d9091SRichard Lowe	!
1434*5d9d9091SRichard Lowe#ifndef _OPL
1435*5d9d9091SRichard Lowe	mov	MMU_PCONTEXT, %o2
1436*5d9d9091SRichard Lowe	ldxa	[%o2]ASI_DMMU, %o2
1437*5d9d9091SRichard Lowe	srlx	%o2, CTXREG_NEXT_SHIFT, %o2
1438*5d9d9091SRichard Lowe	brz,pt	%o2, 1f				! nucleus pgsz is 0, no problem
1439*5d9d9091SRichard Lowe	  nop
1440*5d9d9091SRichard Lowe	rdpr	%pstate, %l4			! disable interrupts
1441*5d9d9091SRichard Lowe	andn	%l4, PSTATE_IE, %o2
1442*5d9d9091SRichard Lowe	wrpr	%g0, %o2, %pstate
1443*5d9d9091SRichard Lowe	mov	DEMAP_ALL_TYPE, %o2		! set PCONTEXT=0
1444*5d9d9091SRichard Lowe	stxa	%g0, [%o2]ASI_DTLB_DEMAP
1445*5d9d9091SRichard Lowe	stxa	%g0, [%o2]ASI_ITLB_DEMAP
1446*5d9d9091SRichard Lowe	mov	MMU_PCONTEXT, %o2
1447*5d9d9091SRichard Lowe	stxa	%g0, [%o2]ASI_DMMU
1448*5d9d9091SRichard Lowe        membar  #Sync
1449*5d9d9091SRichard Lowe	sethi	%hi(FLUSH_ADDR), %o2
1450*5d9d9091SRichard Lowe	flush	%o2				! flush required by immu
1451*5d9d9091SRichard Lowe	wrpr	%g0, %l4, %pstate		! restore interrupt state
1452*5d9d9091SRichard Lowe#endif /* _OPL */
1453*5d9d9091SRichard Lowe
1454*5d9d9091SRichard Lowe1:	mov	%i1, %o0
1455*5d9d9091SRichard Lowe	rdpr	%pstate, %l4			! Get the present pstate value
1456*5d9d9091SRichard Lowe	andn	%l4, PSTATE_AM, %l6
1457*5d9d9091SRichard Lowe	wrpr	%l6, 0, %pstate			! Set PSTATE_AM = 0
1458*5d9d9091SRichard Lowe	jmpl	%i0, %o7			! Call cif handler
1459*5d9d9091SRichard Lowe	nop
1460*5d9d9091SRichard Lowe	wrpr	%l4, 0, %pstate			! restore pstate
1461*5d9d9091SRichard Lowe	brz	%l3, 1f				! is the tba_taken_over = 1
1462*5d9d9091SRichard Lowe	  nop
1463*5d9d9091SRichard Lowe	wrpr	%g0, %l5, %wstate		! restore wstate
1464*5d9d9091SRichard Lowe
1465*5d9d9091SRichard Lowe	!
1466*5d9d9091SRichard Lowe	! switch to PCONTEXT=kcontexreg
1467*5d9d9091SRichard Lowe	!
1468*5d9d9091SRichard Lowe#ifndef _OPL
1469*5d9d9091SRichard Lowe	sethi	%hi(kcontextreg), %o3
1470*5d9d9091SRichard Lowe	ldx     [%o3 + %lo(kcontextreg)], %o3
1471*5d9d9091SRichard Lowe	brz	%o3, 1f
1472*5d9d9091SRichard Lowe	  nop
1473*5d9d9091SRichard Lowe	rdpr	%pstate, %l4			! disable interrupts
1474*5d9d9091SRichard Lowe	andn	%l4, PSTATE_IE, %o2
1475*5d9d9091SRichard Lowe	wrpr	%g0, %o2, %pstate
1476*5d9d9091SRichard Lowe	mov	DEMAP_ALL_TYPE, %o2
1477*5d9d9091SRichard Lowe	stxa	%g0, [%o2]ASI_DTLB_DEMAP
1478*5d9d9091SRichard Lowe	stxa	%g0, [%o2]ASI_ITLB_DEMAP
1479*5d9d9091SRichard Lowe	mov	MMU_PCONTEXT, %o2
1480*5d9d9091SRichard Lowe	stxa	%o3, [%o2]ASI_DMMU
1481*5d9d9091SRichard Lowe        membar  #Sync
1482*5d9d9091SRichard Lowe	sethi	%hi(FLUSH_ADDR), %o2
1483*5d9d9091SRichard Lowe	flush	%o2				! flush required by immu
1484*5d9d9091SRichard Lowe	wrpr	%g0, %l4, %pstate		! restore interrupt state
1485*5d9d9091SRichard Lowe#endif /* _OPL */
1486*5d9d9091SRichard Lowe
1487*5d9d9091SRichard Lowe1:	ret					! Return result ...
1488*5d9d9091SRichard Lowe	restore	%o0, %g0, %o0			! delay; result in %o0
1489*5d9d9091SRichard Lowe	SET_SIZE(client_handler)
1490*5d9d9091SRichard Lowe
1491