1*5d9d9091SRichard Lowe/*
2*5d9d9091SRichard Lowe * CDDL HEADER START
3*5d9d9091SRichard Lowe *
4*5d9d9091SRichard Lowe * The contents of this file are subject to the terms of the
5*5d9d9091SRichard Lowe * Common Development and Distribution License (the "License").
6*5d9d9091SRichard Lowe * You may not use this file except in compliance with the License.
7*5d9d9091SRichard Lowe *
8*5d9d9091SRichard Lowe * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*5d9d9091SRichard Lowe * or http://www.opensolaris.org/os/licensing.
10*5d9d9091SRichard Lowe * See the License for the specific language governing permissions
11*5d9d9091SRichard Lowe * and limitations under the License.
12*5d9d9091SRichard Lowe *
13*5d9d9091SRichard Lowe * When distributing Covered Code, include this CDDL HEADER in each
14*5d9d9091SRichard Lowe * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*5d9d9091SRichard Lowe * If applicable, add the following below this CDDL HEADER, with the
16*5d9d9091SRichard Lowe * fields enclosed by brackets "[]" replaced with your own identifying
17*5d9d9091SRichard Lowe * information: Portions Copyright [yyyy] [name of copyright owner]
18*5d9d9091SRichard Lowe *
19*5d9d9091SRichard Lowe * CDDL HEADER END
20*5d9d9091SRichard Lowe */
21*5d9d9091SRichard Lowe/*
22*5d9d9091SRichard Lowe * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23*5d9d9091SRichard Lowe * Use is subject to license terms.
24*5d9d9091SRichard Lowe */
25*5d9d9091SRichard Lowe
26*5d9d9091SRichard Lowe/*
27*5d9d9091SRichard Lowe * SFMMU primitives.  These primitives should only be used by sfmmu
28*5d9d9091SRichard Lowe * routines.
29*5d9d9091SRichard Lowe */
30*5d9d9091SRichard Lowe
31*5d9d9091SRichard Lowe#include "assym.h"
32*5d9d9091SRichard Lowe
33*5d9d9091SRichard Lowe#include <sys/asm_linkage.h>
34*5d9d9091SRichard Lowe#include <sys/machtrap.h>
35*5d9d9091SRichard Lowe#include <sys/machasi.h>
36*5d9d9091SRichard Lowe#include <sys/sun4asi.h>
37*5d9d9091SRichard Lowe#include <sys/pte.h>
38*5d9d9091SRichard Lowe#include <sys/mmu.h>
39*5d9d9091SRichard Lowe#include <vm/hat_sfmmu.h>
40*5d9d9091SRichard Lowe#include <vm/seg_spt.h>
41*5d9d9091SRichard Lowe#include <sys/machparam.h>
42*5d9d9091SRichard Lowe#include <sys/privregs.h>
43*5d9d9091SRichard Lowe#include <sys/scb.h>
44*5d9d9091SRichard Lowe#include <sys/intreg.h>
45*5d9d9091SRichard Lowe#include <sys/machthread.h>
46*5d9d9091SRichard Lowe#include <sys/clock.h>
47*5d9d9091SRichard Lowe#include <sys/trapstat.h>
48*5d9d9091SRichard Lowe
49*5d9d9091SRichard Lowe/*
50*5d9d9091SRichard Lowe * sfmmu related subroutines
51*5d9d9091SRichard Lowe */
52*5d9d9091SRichard Lowe
53*5d9d9091SRichard Lowe/*
54*5d9d9091SRichard Lowe * Invalidate either the context of a specific victim or any process
55*5d9d9091SRichard Lowe * currently running on this CPU.
56*5d9d9091SRichard Lowe *
57*5d9d9091SRichard Lowe * %g1 = sfmmup whose ctx is being stolen (victim)
58*5d9d9091SRichard Lowe *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT.
59*5d9d9091SRichard Lowe * Note %g1 is the only input argument used by this xcall handler.
60*5d9d9091SRichard Lowe */
61*5d9d9091SRichard Lowe
62*5d9d9091SRichard Lowe	ENTRY(sfmmu_raise_tsb_exception)
63*5d9d9091SRichard Lowe	!
64*5d9d9091SRichard Lowe	! if (victim == INVALID_CONTEXT) {
65*5d9d9091SRichard Lowe	!	if (sec-ctx > INVALID_CONTEXT)
66*5d9d9091SRichard Lowe	!		write INVALID_CONTEXT to sec-ctx
67*5d9d9091SRichard Lowe	!	if (pri-ctx > INVALID_CONTEXT)
68*5d9d9091SRichard Lowe	!		write INVALID_CONTEXT to pri-ctx
69*5d9d9091SRichard Lowe	!
70*5d9d9091SRichard Lowe	! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
71*5d9d9091SRichard Lowe	!	return
72*5d9d9091SRichard Lowe	! } else {
73*5d9d9091SRichard Lowe	!	if (sec-ctx > INVALID_CONTEXT)
74*5d9d9091SRichard Lowe	!		write INVALID_CONTEXT to sec-ctx
75*5d9d9091SRichard Lowe	!
76*5d9d9091SRichard Lowe	!	if (pri-ctx > INVALID_CONTEXT)
77*5d9d9091SRichard Lowe	!		write INVALID_CONTEXT to pri-ctx
78*5d9d9091SRichard Lowe	! }
79*5d9d9091SRichard Lowe	!
80*5d9d9091SRichard Lowe
81*5d9d9091SRichard Lowe	sethi   %hi(ksfmmup), %g3
82*5d9d9091SRichard Lowe	ldx	[%g3 + %lo(ksfmmup)], %g3
83*5d9d9091SRichard Lowe	cmp	%g1, %g3
84*5d9d9091SRichard Lowe	be,a,pn %xcc, ptl1_panic	/* can't invalidate kernel ctx */
85*5d9d9091SRichard Lowe	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
86*5d9d9091SRichard Lowe
87*5d9d9091SRichard Lowe	set	INVALID_CONTEXT, %g2
88*5d9d9091SRichard Lowe
89*5d9d9091SRichard Lowe	cmp	%g1, INVALID_CONTEXT
90*5d9d9091SRichard Lowe	bne,pt	%xcc, 1f			/* called from wrap_around? */
91*5d9d9091SRichard Lowe	  mov	MMU_SCONTEXT, %g3
92*5d9d9091SRichard Lowe
93*5d9d9091SRichard Lowe	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = sec-ctx */
94*5d9d9091SRichard Lowe	cmp	%g5, INVALID_CONTEXT		/* kernel  or invalid ctx ? */
95*5d9d9091SRichard Lowe	ble,pn	%xcc, 0f			/* yes, no need to change */
96*5d9d9091SRichard Lowe	  mov	MMU_PCONTEXT, %g7
97*5d9d9091SRichard Lowe
98*5d9d9091SRichard Lowe	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
99*5d9d9091SRichard Lowe	membar	#Sync
100*5d9d9091SRichard Lowe
101*5d9d9091SRichard Lowe0:
102*5d9d9091SRichard Lowe	ldxa	[%g7]ASI_MMU_CTX, %g5		/* %g5 = pri-ctx */
103*5d9d9091SRichard Lowe	cmp	%g5, INVALID_CONTEXT		/* kernel or invalid ctx? */
104*5d9d9091SRichard Lowe	ble,pn	%xcc, 6f			/* yes, no need to change */
105*5d9d9091SRichard Lowe	  nop
106*5d9d9091SRichard Lowe
107*5d9d9091SRichard Lowe	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
108*5d9d9091SRichard Lowe	membar	#Sync
109*5d9d9091SRichard Lowe
110*5d9d9091SRichard Lowe6:	/* flushall tlb */
111*5d9d9091SRichard Lowe	mov	%o0, %g3
112*5d9d9091SRichard Lowe	mov	%o1, %g4
113*5d9d9091SRichard Lowe	mov	%o2, %g6
114*5d9d9091SRichard Lowe	mov	%o5, %g7
115*5d9d9091SRichard Lowe
116*5d9d9091SRichard Lowe        mov     %g0, %o0        ! XXX no cpu list yet
117*5d9d9091SRichard Lowe        mov     %g0, %o1        ! XXX no cpu list yet
118*5d9d9091SRichard Lowe        mov     MAP_ITLB | MAP_DTLB, %o2
119*5d9d9091SRichard Lowe        mov     MMU_DEMAP_ALL, %o5
120*5d9d9091SRichard Lowe        ta      FAST_TRAP
121*5d9d9091SRichard Lowe        brz,pt  %o0, 5f
122*5d9d9091SRichard Lowe          nop
123*5d9d9091SRichard Lowe     	ba ptl1_panic		/* bad HV call */
124*5d9d9091SRichard Lowe	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
125*5d9d9091SRichard Lowe5:
126*5d9d9091SRichard Lowe	mov	%g3, %o0
127*5d9d9091SRichard Lowe	mov	%g4, %o1
128*5d9d9091SRichard Lowe	mov	%g6, %o2
129*5d9d9091SRichard Lowe	mov	%g7, %o5
130*5d9d9091SRichard Lowe
131*5d9d9091SRichard Lowe	ba	3f
132*5d9d9091SRichard Lowe	  nop
133*5d9d9091SRichard Lowe1:
134*5d9d9091SRichard Lowe	/*
135*5d9d9091SRichard Lowe	 * %g1 = sfmmup
136*5d9d9091SRichard Lowe	 * %g2 = INVALID_CONTEXT
137*5d9d9091SRichard Lowe	 * %g3 = MMU_SCONTEXT
138*5d9d9091SRichard Lowe	 */
139*5d9d9091SRichard Lowe	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
140*5d9d9091SRichard Lowe	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
141*5d9d9091SRichard Lowe
142*5d9d9091SRichard Lowe	cmp	%g5, %g1			/* is it the victim? */
143*5d9d9091SRichard Lowe	bne,pt	%xcc, 2f			/* is our sec-ctx a victim? */
144*5d9d9091SRichard Lowe	  nop
145*5d9d9091SRichard Lowe
146*5d9d9091SRichard Lowe	ldxa    [%g3]ASI_MMU_CTX, %g5           /* %g5 = sec-ctx */
147*5d9d9091SRichard Lowe	cmp     %g5, INVALID_CONTEXT            /* kernel  or invalid ctx ? */
148*5d9d9091SRichard Lowe	ble,pn  %xcc, 0f                        /* yes, no need to change */
149*5d9d9091SRichard Lowe	  mov	MMU_PCONTEXT, %g7
150*5d9d9091SRichard Lowe
151*5d9d9091SRichard Lowe	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid */
152*5d9d9091SRichard Lowe	membar	#Sync
153*5d9d9091SRichard Lowe
154*5d9d9091SRichard Lowe0:
155*5d9d9091SRichard Lowe	ldxa	[%g7]ASI_MMU_CTX, %g4		/* %g4 = pri-ctx */
156*5d9d9091SRichard Lowe	cmp	%g4, INVALID_CONTEXT		/* is pri-ctx the victim? */
157*5d9d9091SRichard Lowe	ble 	%icc, 3f			/* no need to change pri-ctx */
158*5d9d9091SRichard Lowe	  nop
159*5d9d9091SRichard Lowe	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
160*5d9d9091SRichard Lowe	membar	#Sync
161*5d9d9091SRichard Lowe
162*5d9d9091SRichard Lowe3:
163*5d9d9091SRichard Lowe	/* TSB program must be cleared - walkers do not check a context. */
164*5d9d9091SRichard Lowe	mov	%o0, %g3
165*5d9d9091SRichard Lowe	mov	%o1, %g4
166*5d9d9091SRichard Lowe	mov	%o5, %g7
167*5d9d9091SRichard Lowe	clr	%o0
168*5d9d9091SRichard Lowe	clr	%o1
169*5d9d9091SRichard Lowe	mov	MMU_TSB_CTXNON0, %o5
170*5d9d9091SRichard Lowe	ta	FAST_TRAP
171*5d9d9091SRichard Lowe	brnz,a,pn %o0, ptl1_panic
172*5d9d9091SRichard Lowe	  mov	PTL1_BAD_HCALL, %g1
173*5d9d9091SRichard Lowe	mov	%g3, %o0
174*5d9d9091SRichard Lowe	mov	%g4, %o1
175*5d9d9091SRichard Lowe	mov	%g7, %o5
176*5d9d9091SRichard Lowe2:
177*5d9d9091SRichard Lowe	retry
178*5d9d9091SRichard Lowe	SET_SIZE(sfmmu_raise_tsb_exception)
179*5d9d9091SRichard Lowe
180*5d9d9091SRichard Lowe	ENTRY_NP(sfmmu_getctx_pri)
181*5d9d9091SRichard Lowe	set	MMU_PCONTEXT, %o0
182*5d9d9091SRichard Lowe	retl
183*5d9d9091SRichard Lowe	ldxa	[%o0]ASI_MMU_CTX, %o0
184*5d9d9091SRichard Lowe	SET_SIZE(sfmmu_getctx_pri)
185*5d9d9091SRichard Lowe
186*5d9d9091SRichard Lowe	ENTRY_NP(sfmmu_getctx_sec)
187*5d9d9091SRichard Lowe	set	MMU_SCONTEXT, %o0
188*5d9d9091SRichard Lowe	retl
189*5d9d9091SRichard Lowe	ldxa	[%o0]ASI_MMU_CTX, %o0
190*5d9d9091SRichard Lowe	SET_SIZE(sfmmu_getctx_sec)
191*5d9d9091SRichard Lowe
192*5d9d9091SRichard Lowe	/*
193*5d9d9091SRichard Lowe	 * Set the secondary context register for this process.
194*5d9d9091SRichard Lowe	 * %o0 = context number
195*5d9d9091SRichard Lowe	 */
196*5d9d9091SRichard Lowe	ENTRY_NP(sfmmu_setctx_sec)
197*5d9d9091SRichard Lowe	/*
198*5d9d9091SRichard Lowe	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
199*5d9d9091SRichard Lowe	 * But we can also get called from C with interrupts enabled. So,
200*5d9d9091SRichard Lowe	 * we need to check first.
201*5d9d9091SRichard Lowe	 */
202*5d9d9091SRichard Lowe
203*5d9d9091SRichard Lowe	/* If interrupts are not disabled, then disable them */
204*5d9d9091SRichard Lowe	rdpr	%pstate, %g1
205*5d9d9091SRichard Lowe	btst	PSTATE_IE, %g1
206*5d9d9091SRichard Lowe	bnz,a,pt %icc, 1f
207*5d9d9091SRichard Lowe	wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
208*5d9d9091SRichard Lowe1:
209*5d9d9091SRichard Lowe	mov	MMU_SCONTEXT, %o1
210*5d9d9091SRichard Lowe	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
211*5d9d9091SRichard Lowe	membar	#Sync
212*5d9d9091SRichard Lowe        /*
213*5d9d9091SRichard Lowe         * if the routine is entered with intr enabled, then enable intr now.
214*5d9d9091SRichard Lowe         * otherwise, keep intr disabled, return without enabing intr.
215*5d9d9091SRichard Lowe         * %g1 - old intr state
216*5d9d9091SRichard Lowe         */
217*5d9d9091SRichard Lowe        btst    PSTATE_IE, %g1
218*5d9d9091SRichard Lowe        bnz,a,pt %icc, 2f
219*5d9d9091SRichard Lowe        wrpr    %g0, %g1, %pstate               /* enable interrupts */
220*5d9d9091SRichard Lowe2:      retl
221*5d9d9091SRichard Lowe        nop
222*5d9d9091SRichard Lowe        SET_SIZE(sfmmu_setctx_sec)
223*5d9d9091SRichard Lowe
224*5d9d9091SRichard Lowe	/*
225*5d9d9091SRichard Lowe	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
226*5d9d9091SRichard Lowe	 * returns the detection value in %o0.
227*5d9d9091SRichard Lowe	 */
228*5d9d9091SRichard Lowe	ENTRY_NP(sfmmu_setup_4lp)
229*5d9d9091SRichard Lowe	set	ktsb_phys, %o2
230*5d9d9091SRichard Lowe	mov	1, %o1
231*5d9d9091SRichard Lowe	st	%o1, [%o2]
232*5d9d9091SRichard Lowe	retl
233*5d9d9091SRichard Lowe	mov	%o1, %o0
234*5d9d9091SRichard Lowe	SET_SIZE(sfmmu_setup_4lp)
235*5d9d9091SRichard Lowe
236*5d9d9091SRichard Lowe	/*
237*5d9d9091SRichard Lowe	 * Called to load MMU registers and tsbmiss area
238*5d9d9091SRichard Lowe	 * for the active process.  This function should
239*5d9d9091SRichard Lowe	 * only be called from TL=0.
240*5d9d9091SRichard Lowe	 *
241*5d9d9091SRichard Lowe	 * %o0 - hat pointer
242*5d9d9091SRichard Lowe	 */
243*5d9d9091SRichard Lowe	ENTRY_NP(sfmmu_load_mmustate)
244*5d9d9091SRichard Lowe
245*5d9d9091SRichard Lowe#ifdef DEBUG
246*5d9d9091SRichard Lowe	PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1)
247*5d9d9091SRichard Lowe#endif /* DEBUG */
248*5d9d9091SRichard Lowe
249*5d9d9091SRichard Lowe	sethi	%hi(ksfmmup), %o3
250*5d9d9091SRichard Lowe	ldx	[%o3 + %lo(ksfmmup)], %o3
251*5d9d9091SRichard Lowe	cmp	%o3, %o0
252*5d9d9091SRichard Lowe	be,pn	%xcc, 7f			! if kernel as, do nothing
253*5d9d9091SRichard Lowe	  nop
254*5d9d9091SRichard Lowe
255*5d9d9091SRichard Lowe	set     MMU_SCONTEXT, %o3
256*5d9d9091SRichard Lowe        ldxa    [%o3]ASI_MMU_CTX, %o5
257*5d9d9091SRichard Lowe
258*5d9d9091SRichard Lowe	cmp	%o5, INVALID_CONTEXT		! ctx is invalid?
259*5d9d9091SRichard Lowe	bne,pt	%icc, 1f
260*5d9d9091SRichard Lowe	  nop
261*5d9d9091SRichard Lowe
262*5d9d9091SRichard Lowe	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
263*5d9d9091SRichard Lowe	stx	%o0, [%o2 + TSBMISS_UHATID]
264*5d9d9091SRichard Lowe	stx	%g0, [%o2 +  TSBMISS_SHARED_UHATID]
265*5d9d9091SRichard Lowe#ifdef DEBUG
266*5d9d9091SRichard Lowe	/* check if hypervisor/hardware should handle user TSB */
267*5d9d9091SRichard Lowe	sethi	%hi(hv_use_non0_tsb), %o2
268*5d9d9091SRichard Lowe	ld	[%o2 + %lo(hv_use_non0_tsb)], %o2
269*5d9d9091SRichard Lowe	brz,pn	%o2, 0f
270*5d9d9091SRichard Lowe	  nop
271*5d9d9091SRichard Lowe#endif /* DEBUG */
272*5d9d9091SRichard Lowe	clr	%o0				! ntsb = 0 for invalid ctx
273*5d9d9091SRichard Lowe	clr	%o1				! HV_TSB_INFO_PA = 0 if inv ctx
274*5d9d9091SRichard Lowe	mov	MMU_TSB_CTXNON0, %o5
275*5d9d9091SRichard Lowe	ta	FAST_TRAP			! set TSB info for user process
276*5d9d9091SRichard Lowe	brnz,a,pn %o0, panic_bad_hcall
277*5d9d9091SRichard Lowe	  mov	MMU_TSB_CTXNON0, %o1
278*5d9d9091SRichard Lowe0:
279*5d9d9091SRichard Lowe	retl
280*5d9d9091SRichard Lowe	  nop
281*5d9d9091SRichard Lowe1:
282*5d9d9091SRichard Lowe	/*
283*5d9d9091SRichard Lowe	 * We need to set up the TSB base register, tsbmiss
284*5d9d9091SRichard Lowe	 * area, and pass the TSB information into the hypervisor
285*5d9d9091SRichard Lowe	 */
286*5d9d9091SRichard Lowe	ldx	[%o0 + SFMMU_TSB], %o1		! %o1 = first tsbinfo
287*5d9d9091SRichard Lowe	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second tsbinfo
288*5d9d9091SRichard Lowe
289*5d9d9091SRichard Lowe	/* create/set first UTSBREG */
290*5d9d9091SRichard Lowe	MAKE_UTSBREG(%o1, %o2, %o3)		! %o2 = user tsbreg
291*5d9d9091SRichard Lowe	SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3)
292*5d9d9091SRichard Lowe
293*5d9d9091SRichard Lowe	brz,pt	%g2, 2f
294*5d9d9091SRichard Lowe	  mov	-1, %o2				! use -1 if no second TSB
295*5d9d9091SRichard Lowe
296*5d9d9091SRichard Lowe	/* make 2nd UTSBREG */
297*5d9d9091SRichard Lowe	MAKE_UTSBREG(%g2, %o2, %o3)		! %o2 = user tsbreg
298*5d9d9091SRichard Lowe2:
299*5d9d9091SRichard Lowe	SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
300*5d9d9091SRichard Lowe
301*5d9d9091SRichard Lowe        /* make 3rd and 4th TSB */
302*5d9d9091SRichard Lowe	CPU_TSBMISS_AREA(%o4, %o3)		! %o4 = tsbmiss area
303*5d9d9091SRichard Lowe
304*5d9d9091SRichard Lowe	ldx	[%o0 + SFMMU_SCDP], %g2		! %g2 = sfmmu_scd
305*5d9d9091SRichard Lowe	brz,pt	%g2, 3f
306*5d9d9091SRichard Lowe	  mov	-1, %o2				! use -1 if no third TSB
307*5d9d9091SRichard Lowe
308*5d9d9091SRichard Lowe	ldx	[%g2 + SCD_SFMMUP], %g3		! %g3 = scdp->scd_sfmmup
309*5d9d9091SRichard Lowe	ldx	[%g3 + SFMMU_TSB], %o1		! %o1 = first scd tsbinfo
310*5d9d9091SRichard Lowe	brz,pn %o1, 9f
311*5d9d9091SRichard Lowe	  nop					! panic if no third TSB
312*5d9d9091SRichard Lowe
313*5d9d9091SRichard Lowe	/* make 3rd UTSBREG */
314*5d9d9091SRichard Lowe	MAKE_UTSBREG(%o1, %o2, %o3)		! %o2 = user tsbreg
315*5d9d9091SRichard Lowe3:
316*5d9d9091SRichard Lowe	SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR, %o2)
317*5d9d9091SRichard Lowe
318*5d9d9091SRichard Lowe	brz,pt	%g2, 4f
319*5d9d9091SRichard Lowe	  mov	-1, %o2				! use -1 if no 3rd or 4th TSB
320*5d9d9091SRichard Lowe
321*5d9d9091SRichard Lowe	brz,pt	%o1, 4f
322*5d9d9091SRichard Lowe	  mov	-1, %o2				! use -1 if no 3rd or 4th TSB
323*5d9d9091SRichard Lowe	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second scd tsbinfo
324*5d9d9091SRichard Lowe	brz,pt	%g2, 4f
325*5d9d9091SRichard Lowe	  mov	-1, %o2				! use -1 if no 4th TSB
326*5d9d9091SRichard Lowe
327*5d9d9091SRichard Lowe	/* make 4th UTSBREG */
328*5d9d9091SRichard Lowe	MAKE_UTSBREG(%g2, %o2, %o3)		! %o2 = user tsbreg
329*5d9d9091SRichard Lowe4:
330*5d9d9091SRichard Lowe	SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR4M, %o2)
331*5d9d9091SRichard Lowe
332*5d9d9091SRichard Lowe#ifdef DEBUG
333*5d9d9091SRichard Lowe	/* check if hypervisor/hardware should handle user TSB */
334*5d9d9091SRichard Lowe	sethi	%hi(hv_use_non0_tsb), %o2
335*5d9d9091SRichard Lowe	ld	[%o2 + %lo(hv_use_non0_tsb)], %o2
336*5d9d9091SRichard Lowe	brz,pn	%o2, 6f
337*5d9d9091SRichard Lowe	  nop
338*5d9d9091SRichard Lowe#endif /* DEBUG */
339*5d9d9091SRichard Lowe	CPU_ADDR(%o2, %o4)	! load CPU struct addr to %o2 using %o4
340*5d9d9091SRichard Lowe	ldub    [%o2 + CPU_TSTAT_FLAGS], %o1	! load cpu_tstat_flag to %o1
341*5d9d9091SRichard Lowe
342*5d9d9091SRichard Lowe	mov	%o0, %o3			! preserve %o0
343*5d9d9091SRichard Lowe	btst	TSTAT_TLB_STATS, %o1
344*5d9d9091SRichard Lowe	bnz,a,pn %icc, 5f			! ntsb = 0 if TLB stats enabled
345*5d9d9091SRichard Lowe	  clr	%o0
346*5d9d9091SRichard Lowe
347*5d9d9091SRichard Lowe	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0
348*5d9d9091SRichard Lowe5:
349*5d9d9091SRichard Lowe	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1
350*5d9d9091SRichard Lowe	mov	MMU_TSB_CTXNON0, %o5
351*5d9d9091SRichard Lowe	ta	FAST_TRAP			! set TSB info for user process
352*5d9d9091SRichard Lowe	brnz,a,pn %o0, panic_bad_hcall
353*5d9d9091SRichard Lowe	mov	MMU_TSB_CTXNON0, %o1
354*5d9d9091SRichard Lowe	mov	%o3, %o0			! restore %o0
355*5d9d9091SRichard Lowe6:
356*5d9d9091SRichard Lowe	ldx	[%o0 + SFMMU_ISMBLKPA], %o1	! copy members of sfmmu
357*5d9d9091SRichard Lowe	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
358*5d9d9091SRichard Lowe	stx	%o1, [%o2 + TSBMISS_ISMBLKPA]	! sfmmu_tsb_miss into the
359*5d9d9091SRichard Lowe	ldub	[%o0 + SFMMU_TTEFLAGS], %o3	! per-CPU tsbmiss area.
360*5d9d9091SRichard Lowe	ldub	[%o0 + SFMMU_RTTEFLAGS], %o4
361*5d9d9091SRichard Lowe	ldx	[%o0 + SFMMU_SRDP], %o1
362*5d9d9091SRichard Lowe	stx	%o0, [%o2 + TSBMISS_UHATID]
363*5d9d9091SRichard Lowe	stub	%o3, [%o2 + TSBMISS_UTTEFLAGS]
364*5d9d9091SRichard Lowe	stub	%o4,  [%o2 + TSBMISS_URTTEFLAGS]
365*5d9d9091SRichard Lowe	stx	%o1, [%o2 +  TSBMISS_SHARED_UHATID]
366*5d9d9091SRichard Lowe	brz,pn	%o1, 7f				! check for sfmmu_srdp
367*5d9d9091SRichard Lowe	  add	%o0, SFMMU_HMERMAP, %o1
368*5d9d9091SRichard Lowe	add	%o2, TSBMISS_SHMERMAP, %o2
369*5d9d9091SRichard Lowe	mov	SFMMU_HMERGNMAP_WORDS, %o3
370*5d9d9091SRichard Lowe						! set tsbmiss shmermap
371*5d9d9091SRichard Lowe	SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
372*5d9d9091SRichard Lowe
373*5d9d9091SRichard Lowe	ldx	[%o0 + SFMMU_SCDP], %o4		! %o4 = sfmmu_scd
374*5d9d9091SRichard Lowe	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
375*5d9d9091SRichard Lowe	mov	SFMMU_HMERGNMAP_WORDS, %o3
376*5d9d9091SRichard Lowe	brnz,pt	%o4, 8f				! check for sfmmu_scdp else
377*5d9d9091SRichard Lowe	  add	%o2, TSBMISS_SCDSHMERMAP, %o2	! zero tsbmiss scd_shmermap
378*5d9d9091SRichard Lowe	ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
379*5d9d9091SRichard Lowe7:
380*5d9d9091SRichard Lowe	retl
381*5d9d9091SRichard Lowe	nop
382*5d9d9091SRichard Lowe8:						! set tsbmiss scd_shmermap
383*5d9d9091SRichard Lowe	add	%o4, SCD_HMERMAP, %o1
384*5d9d9091SRichard Lowe	SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
385*5d9d9091SRichard Lowe	retl
386*5d9d9091SRichard Lowe	  nop
387*5d9d9091SRichard Lowe9:
388*5d9d9091SRichard Lowe	sethi   %hi(panicstr), %g1		! panic if no 3rd TSB
389*5d9d9091SRichard Lowe        ldx     [%g1 + %lo(panicstr)], %g1
390*5d9d9091SRichard Lowe        tst     %g1
391*5d9d9091SRichard Lowe
392*5d9d9091SRichard Lowe        bnz,pn  %xcc, 7b
393*5d9d9091SRichard Lowe          nop
394*5d9d9091SRichard Lowe
395*5d9d9091SRichard Lowe        sethi   %hi(sfmmu_panic10), %o0
396*5d9d9091SRichard Lowe        call    panic
397*5d9d9091SRichard Lowe          or      %o0, %lo(sfmmu_panic10), %o0
398*5d9d9091SRichard Lowe
399*5d9d9091SRichard Lowe	SET_SIZE(sfmmu_load_mmustate)
400*5d9d9091SRichard Lowe
401*5d9d9091SRichard Lowe	ENTRY(prefetch_tsbe_read)
402*5d9d9091SRichard Lowe	retl
403*5d9d9091SRichard Lowe	nop
404*5d9d9091SRichard Lowe	SET_SIZE(prefetch_tsbe_read)
405*5d9d9091SRichard Lowe
406*5d9d9091SRichard Lowe	ENTRY(prefetch_tsbe_write)
407*5d9d9091SRichard Lowe	retl
408*5d9d9091SRichard Lowe	nop
409*5d9d9091SRichard Lowe	SET_SIZE(prefetch_tsbe_write)
410