xref: /freebsd/sys/powerpc/aim/trap_subr64.S (revision 2f513db7)
1/* $FreeBSD$ */
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $	*/
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file.  to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 *	#include <powerpc/aim/trap_subr.S>
40 */
41
42/* Locate the per-CPU data structure */
43#define GET_CPUINFO(r)  \
44        mfsprg0  r
45#define GET_TOCBASE(r)  \
46	lis	r,DMAP_BASE_ADDRESS@highesta;	/* To real-mode alias/dmap */ \
47	sldi	r,r,32;							\
48	ori	r,r,TRAP_TOCBASE;	/* Magic address for TOC */	\
49	ld	r,0(r)
50
51/*
52 * Restore SRs for a pmap
53 *
54 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
55 */
56
57/*
58 * User SRs are loaded through a pointer to the current pmap.
59 */
60restore_usersrs:
61	GET_CPUINFO(%r28)
62	ld	%r28,PC_USERSLB(%r28)
63	cmpdi	%r28, 0			/* If user SLB pointer NULL, exit */
64	beqlr
65
66	li	%r29, 0			/* Set the counter to zero */
67
68	slbia
69	slbmfee	%r31,%r29
70	clrrdi	%r31,%r31,28
71	slbie	%r31
721:	ld	%r31, 0(%r28)		/* Load SLB entry pointer */
73	cmpdi	%r31, 0			/* If NULL, stop */
74	beqlr
75
76	ld	%r30, 0(%r31)		/* Load SLBV */
77	ld	%r31, 8(%r31)		/* Load SLBE */
78	or	%r31, %r31, %r29	/*  Set SLBE slot */
79	slbmte	%r30, %r31		/* Install SLB entry */
80
81	addi	%r28, %r28, 8		/* Advance pointer */
82	addi	%r29, %r29, 1
83	b	1b			/* Repeat */
84
85/*
86 * Kernel SRs are loaded directly from the PCPU fields
87 */
88restore_kernsrs:
89	GET_CPUINFO(%r28)
90	addi	%r28,%r28,PC_KERNSLB
91	ld	%r29,16(%r28)		/* One past USER_SLB_SLOT */
92	cmpdi	%r29,0
93	beqlr				/* If first kernel entry is invalid,
94					 * SLBs not in use, so exit early */
95
96	/* Otherwise, set up SLBs */
97	li	%r29, 0			/* Set the counter to zero */
98
99	slbia
100	slbmfee	%r31,%r29
101	clrrdi	%r31,%r31,28
102	slbie	%r31
1031:	cmpdi	%r29, USER_SLB_SLOT	/* Skip the user slot */
104	beq-	2f
105
106	ld	%r31, 8(%r28)		/* Load SLBE */
107	cmpdi	%r31, 0			/* If SLBE is not valid, stop */
108	beqlr
109	ld	%r30, 0(%r28)		/* Load SLBV  */
110	slbmte	%r30, %r31		/* Install SLB entry */
111
1122:	addi	%r28, %r28, 16		/* Advance pointer */
113	addi	%r29, %r29, 1
114	cmpdi	%r29, 64		/* Repeat if we are not at the end */
115	blt	1b
116	blr
117
118/*
119 * FRAME_SETUP assumes:
120 *	SPRG1		SP (1)
121 * 	SPRG3		trap type
122 *	savearea	r27-r31,DAR,DSISR   (DAR & DSISR only for DSI traps)
123 *	r28		LR
124 *	r29		CR
125 *	r30		scratch
126 *	r31		scratch
127 *	r1		kernel stack
128 *	SRR0/1		as at start of trap
129 *
130 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
131 * in any real-mode fault handler, including those handling double faults.
132 */
133#define	FRAME_SETUP(savearea)						\
134/* Have to enable translation to allow access of kernel stack: */	\
135	GET_CPUINFO(%r31);						\
136	mfsrr0	%r30;							\
137	std	%r30,(savearea+CPUSAVE_SRR0)(%r31);	/* save SRR0 */	\
138	mfsrr1	%r30;							\
139	std	%r30,(savearea+CPUSAVE_SRR1)(%r31);	/* save SRR1 */	\
140	mfsprg1	%r31;			/* get saved SP (clears SPRG1) */ \
141	mfmsr	%r30;							\
142	ori	%r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */	\
143	mtmsr	%r30;			/* stack can now be accessed */	\
144	isync;								\
145	stdu	%r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
146	std	%r0, FRAME_0+48(%r1);	/* save r0 in the trapframe */	\
147	std	%r31,FRAME_1+48(%r1);	/* save SP   "      "       */	\
148	std	%r2, FRAME_2+48(%r1);	/* save r2   "      "       */	\
149	std	%r28,FRAME_LR+48(%r1);	/* save LR   "      "       */	\
150	std	%r29,FRAME_CR+48(%r1);	/* save CR   "      "       */	\
151	GET_CPUINFO(%r2);						\
152	ld	%r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */	\
153	ld	%r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */	\
154	ld	%r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */	\
155	ld	%r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */	\
156	ld	%r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */	\
157	std	%r3,  FRAME_3+48(%r1);	/* save r3-r31 */		\
158	std	%r4,  FRAME_4+48(%r1);					\
159	std	%r5,  FRAME_5+48(%r1);					\
160	std	%r6,  FRAME_6+48(%r1);					\
161	std	%r7,  FRAME_7+48(%r1);					\
162	std	%r8,  FRAME_8+48(%r1);					\
163	std	%r9,  FRAME_9+48(%r1);					\
164	std	%r10, FRAME_10+48(%r1);					\
165	std	%r11, FRAME_11+48(%r1);					\
166	std	%r12, FRAME_12+48(%r1);					\
167	std	%r13, FRAME_13+48(%r1);					\
168	std	%r14, FRAME_14+48(%r1);					\
169	std	%r15, FRAME_15+48(%r1);					\
170	std	%r16, FRAME_16+48(%r1);					\
171	std	%r17, FRAME_17+48(%r1);					\
172	std	%r18, FRAME_18+48(%r1);					\
173	std	%r19, FRAME_19+48(%r1);					\
174	std	%r20, FRAME_20+48(%r1);					\
175	std	%r21, FRAME_21+48(%r1);					\
176	std	%r22, FRAME_22+48(%r1);					\
177	std	%r23, FRAME_23+48(%r1);					\
178	std	%r24, FRAME_24+48(%r1);					\
179	std	%r25, FRAME_25+48(%r1);					\
180	std	%r26, FRAME_26+48(%r1);					\
181	std	%r27, FRAME_27+48(%r1);					\
182	std	%r28, FRAME_28+48(%r1);					\
183	std	%r29, FRAME_29+48(%r1);					\
184	std	%r30, FRAME_30+48(%r1);					\
185	std	%r31, FRAME_31+48(%r1);					\
186	ld	%r28,(savearea+CPUSAVE_AIM_DAR)(%r2);  /* saved DAR */	\
187	ld	%r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
188	ld	%r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */	\
189	ld	%r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */	\
190	mfxer	%r3;							\
191	mfctr	%r4;							\
192	mfsprg3	%r5;							\
193	std	%r3, FRAME_XER+48(1);	/* save xer/ctr/exc */		\
194	std	%r4, FRAME_CTR+48(1);					\
195	std	%r5, FRAME_EXC+48(1);					\
196	std	%r28,FRAME_AIM_DAR+48(1);				\
197	std	%r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */	\
198	std	%r30,FRAME_SRR0+48(1);					\
199	std	%r31,FRAME_SRR1+48(1);					\
200	ld	%r13,PC_CURTHREAD(%r2)	/* set kernel curthread */
201
202#define	FRAME_LEAVE(savearea)						\
203/* Disable exceptions: */						\
204	mfmsr	%r2;							\
205	andi.	%r2,%r2,~PSL_EE@l;					\
206	mtmsr	%r2;							\
207	isync;								\
208/* Now restore regs: */							\
209	ld	%r2,FRAME_SRR0+48(%r1);					\
210	ld	%r3,FRAME_SRR1+48(%r1);					\
211	ld	%r4,FRAME_CTR+48(%r1);					\
212	ld	%r5,FRAME_XER+48(%r1);					\
213	ld	%r6,FRAME_LR+48(%r1);					\
214	GET_CPUINFO(%r7);						\
215	std	%r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */	\
216	std	%r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */	\
217	ld	%r7,FRAME_CR+48(%r1);					\
218	mtctr	%r4;							\
219	mtxer	%r5;							\
220	mtlr	%r6;							\
221	mtsprg2	%r7;			/* save cr */			\
222	ld	%r31,FRAME_31+48(%r1);   /* restore r0-31 */		\
223	ld	%r30,FRAME_30+48(%r1);					\
224	ld	%r29,FRAME_29+48(%r1);					\
225	ld	%r28,FRAME_28+48(%r1);					\
226	ld	%r27,FRAME_27+48(%r1);					\
227	ld	%r26,FRAME_26+48(%r1);					\
228	ld	%r25,FRAME_25+48(%r1);					\
229	ld	%r24,FRAME_24+48(%r1);					\
230	ld	%r23,FRAME_23+48(%r1);					\
231	ld	%r22,FRAME_22+48(%r1);					\
232	ld	%r21,FRAME_21+48(%r1);					\
233	ld	%r20,FRAME_20+48(%r1);					\
234	ld	%r19,FRAME_19+48(%r1);					\
235	ld	%r18,FRAME_18+48(%r1);					\
236	ld	%r17,FRAME_17+48(%r1);					\
237	ld	%r16,FRAME_16+48(%r1);					\
238	ld	%r15,FRAME_15+48(%r1);					\
239	ld	%r14,FRAME_14+48(%r1);					\
240	ld	%r13,FRAME_13+48(%r1);					\
241	ld	%r12,FRAME_12+48(%r1);					\
242	ld	%r11,FRAME_11+48(%r1);					\
243	ld	%r10,FRAME_10+48(%r1);					\
244	ld	%r9, FRAME_9+48(%r1);					\
245	ld	%r8, FRAME_8+48(%r1);					\
246	ld	%r7, FRAME_7+48(%r1);					\
247	ld	%r6, FRAME_6+48(%r1);					\
248	ld	%r5, FRAME_5+48(%r1);					\
249	ld	%r4, FRAME_4+48(%r1);					\
250	ld	%r3, FRAME_3+48(%r1);					\
251	ld	%r2, FRAME_2+48(%r1);					\
252	ld	%r0, FRAME_0+48(%r1);					\
253	ld	%r1, FRAME_1+48(%r1);					\
254/* Can't touch %r1 from here on */					\
255	mtsprg3	%r3;			/* save r3 */			\
256/* Disable translation, machine check and recoverability: */		\
257	mfmsr	%r3;							\
258	andi.	%r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l;		\
259	mtmsr	%r3;							\
260	isync;								\
261/* Decide whether we return to user mode: */				\
262	GET_CPUINFO(%r3);						\
263	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3);			\
264	mtcr	%r3;							\
265	bf	17,1f;			/* branch if PSL_PR is false */	\
266/* Restore user SRs */							\
267	GET_CPUINFO(%r3);						\
268	std	%r27,(savearea+CPUSAVE_R27)(%r3);			\
269	std	%r28,(savearea+CPUSAVE_R28)(%r3);			\
270	std	%r29,(savearea+CPUSAVE_R29)(%r3);			\
271	std	%r30,(savearea+CPUSAVE_R30)(%r3);			\
272	std	%r31,(savearea+CPUSAVE_R31)(%r3);			\
273	mflr	%r27;			/* preserve LR */		\
274	bl	restore_usersrs;	/* uses r28-r31 */		\
275	mtlr	%r27;							\
276	ld	%r31,(savearea+CPUSAVE_R31)(%r3);			\
277	ld	%r30,(savearea+CPUSAVE_R30)(%r3);			\
278	ld	%r29,(savearea+CPUSAVE_R29)(%r3);			\
279	ld	%r28,(savearea+CPUSAVE_R28)(%r3);			\
280	ld	%r27,(savearea+CPUSAVE_R27)(%r3);			\
2811:	mfsprg2	%r3;			/* restore cr */		\
282	mtcr	%r3;							\
283	GET_CPUINFO(%r3);						\
284	ld	%r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */	\
285	mtsrr0	%r3;							\
286	GET_CPUINFO(%r3);						\
287	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */	\
288	mtsrr1	%r3;							\
289	mfsprg3	%r3			/* restore r3 */
290
291#ifdef KDTRACE_HOOKS
292	.data
293	.globl	dtrace_invop_calltrap_addr
294	.align	8
295	.type	dtrace_invop_calltrap_addr, @object
296        .size	dtrace_invop_calltrap_addr, 8
297dtrace_invop_calltrap_addr:
298	.word	0
299	.word	0
300
301	.text
302#endif
303
304/*
305 * Processor reset exception handler. These are typically
306 * the first instructions the processor executes after a
307 * software reset. We do this in two bits so that we are
308 * not still hanging around in the trap handling region
309 * once the MMU is turned on.
310 */
311	.globl	CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler)
312	.globl	CNAME(cpu_wakeup_handler)
313	.p2align 3
314CNAME(rstcode):
315	/*
316	 * Check if this is software reset or
317	 * processor is waking up from power saving mode
318	 * It is software reset when 46:47 = 0b00
319	 */
320	/* 0x00 */
321	ld	%r2,TRAP_ENTRY(0)	/* Real-mode &generictrap */
322	mfsrr1	%r9			/* Load SRR1 into r9 */
323	andis.	%r9,%r9,0x3		/* Logic AND with 46:47 bits */
324
325	beq	2f			/* Branch if software reset */
326	/* 0x10 */
327	/* Reset was wakeup */
328	addi	%r9,%r2,(cpu_wakeup_handler-generictrap)
329	b	1f			/* Was power save, do the wakeup */
330
331	/* Reset was software reset */
332	/* Explicitly set MSR[SF] */
3332:	mfmsr	%r9
334	li	%r8,1
335	/* 0x20 */
336	insrdi	%r9,%r8,1,0
337	mtmsrd	%r9
338	isync
339
340	addi	%r9,%r2,(cpu_reset_handler-generictrap)
341
342	/* 0x30 */
3431:	mtlr	%r9
344	blr				/* Branch to either cpu_reset_handler
345					 * or cpu_wakeup_handler.
346					 */
347CNAME(rstcodeend):
348
349cpu_reset_handler:
350	GET_TOCBASE(%r2)
351
352	addis	%r1,%r2,TOC_REF(tmpstk)@ha
353	ld	%r1,TOC_REF(tmpstk)@l(%r1)	/* get new SP */
354	addi	%r1,%r1,(TMPSTKSZ-48)
355
356	bl	CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
357	nop
358	lis	%r3,1@l
359	bl	CNAME(pmap_cpu_bootstrap)	/* Turn on virtual memory */
360	nop
361	bl	CNAME(cpudep_ap_bootstrap)	/* Set up PCPU and stack */
362	nop
363	mr	%r1,%r3				/* Use new stack */
364	bl	CNAME(cpudep_ap_setup)
365	nop
366	GET_CPUINFO(%r5)
367	ld	%r3,(PC_RESTORE)(%r5)
368	cmpldi	%cr0,%r3,0
369	beq	%cr0,2f
370	nop
371	li	%r4,1
372	bl	CNAME(longjmp)
373	nop
3742:
375#ifdef SMP
376	bl	CNAME(machdep_ap_bootstrap)	/* And away! */
377	nop
378#endif
379
380	/* Should not be reached */
3819:
382	b	9b
383
384cpu_wakeup_handler:
385	GET_TOCBASE(%r2)
386
387	/* Check for false wake up due to badly SRR1 set (eg. by OPAL) */
388	addis	%r3,%r2,TOC_REF(can_wakeup)@ha
389	ld	%r3,TOC_REF(can_wakeup)@l(%r3)
390	ld	%r3,0(%r3)
391	cmpdi	%r3,0
392	beq	cpu_reset_handler
393
394	/* Turn on MMU after return from interrupt */
395	mfsrr1	%r3
396	ori	%r3,%r3,(PSL_IR | PSL_DR)
397	mtsrr1	%r3
398
399	/* Turn on MMU (needed to access PCB) */
400	mfmsr	%r3
401	ori	%r3,%r3,(PSL_IR | PSL_DR)
402	mtmsr	%r3
403	isync
404
405	mfsprg0	%r3
406
407	ld	%r3,PC_CURTHREAD(%r3)	/* Get current thread */
408	ld	%r3,TD_PCB(%r3)		/* Get PCB of current thread */
409	ld	%r12,PCB_CONTEXT(%r3)	/* Load the non-volatile GP regs. */
410	ld	%r13,PCB_CONTEXT+1*8(%r3)
411	ld	%r14,PCB_CONTEXT+2*8(%r3)
412	ld	%r15,PCB_CONTEXT+3*8(%r3)
413	ld	%r16,PCB_CONTEXT+4*8(%r3)
414	ld	%r17,PCB_CONTEXT+5*8(%r3)
415	ld	%r18,PCB_CONTEXT+6*8(%r3)
416	ld	%r19,PCB_CONTEXT+7*8(%r3)
417	ld	%r20,PCB_CONTEXT+8*8(%r3)
418	ld	%r21,PCB_CONTEXT+9*8(%r3)
419	ld	%r22,PCB_CONTEXT+10*8(%r3)
420	ld	%r23,PCB_CONTEXT+11*8(%r3)
421	ld	%r24,PCB_CONTEXT+12*8(%r3)
422	ld	%r25,PCB_CONTEXT+13*8(%r3)
423	ld	%r26,PCB_CONTEXT+14*8(%r3)
424	ld	%r27,PCB_CONTEXT+15*8(%r3)
425	ld	%r28,PCB_CONTEXT+16*8(%r3)
426	ld	%r29,PCB_CONTEXT+17*8(%r3)
427	ld	%r30,PCB_CONTEXT+18*8(%r3)
428	ld	%r31,PCB_CONTEXT+19*8(%r3)
429	ld	%r5,PCB_CR(%r3)		/* Load the condition register */
430	mtcr	%r5
431	ld	%r5,PCB_LR(%r3)		/* Load the link register */
432	mtsrr0	%r5
433	ld	%r1,PCB_SP(%r3)		/* Load the stack pointer */
434	ld	%r2,PCB_TOC(%r3)	/* Load the TOC pointer */
435
436	rfid
437
438/*
439 * This code gets copied to all the trap vectors
440 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
441 */
442
443	.globl	CNAME(trapcode),CNAME(trapcodeend)
444	.p2align 3
445CNAME(trapcode):
446	mtsprg1	%r1			/* save SP */
447	mflr	%r1			/* Save the old LR in r1 */
448	mtsprg2 %r1			/* And then in SPRG2 */
449	ld	%r1,TRAP_ENTRY(0)
450	mtlr	%r1
451	li	%r1, 0xe0		/* How to get the vector from LR */
452	blrl				/* Branch to generictrap */
453CNAME(trapcodeend):
454
455/* Same thing for traps setting HSRR0/HSRR1 */
456	.globl	CNAME(hypertrapcode),CNAME(hypertrapcodeend)
457	.p2align 3
458CNAME(hypertrapcode):
459	mtsprg1	%r1			/* save SP */
460	mflr	%r1			/* Save the old LR in r1 */
461	mtsprg2 %r1			/* And then in SPRG2 */
462	ld	%r1,TRAP_GENTRAP(0)
463	addi	%r1,%r1,(generichypertrap-generictrap)
464	mtlr	%r1
465	li	%r1, 0xe0		/* How to get the vector from LR */
466	blrl				/* Branch to generichypertrap */
467CNAME(hypertrapcodeend):
468
469/*
470 * For SLB misses: do special things for the kernel
471 *
472 * Note: SPRG1 is always safe to overwrite any time the MMU was on, which is
473 * the only time this can be called.
474 */
475	.globl	CNAME(slbtrap),CNAME(slbtrapend)
476	.p2align 3
477CNAME(slbtrap):
478	/* 0x00 */
479	mtsprg1	%r1			/* save SP */
480	GET_CPUINFO(%r1)
481	std	%r2,(PC_SLBSAVE+16)(%r1)	/* save r2 */
482	mfcr	%r2
483	/* 0x10 */
484	std	%r2,(PC_SLBSAVE+104)(%r1)	/* save CR */
485	mfsrr1	%r2			/* test kernel mode */
486	mtcr	%r2
487	bf	17,2f			/* branch if PSL_PR is false */
488	/* 0x20 */
489	/* User mode */
490	ld	%r2,(PC_SLBSAVE+104)(%r1)
491	mtcr	%r2				/* restore CR */
492	ld	%r2,(PC_SLBSAVE+16)(%r1) 	/* restore r2 */
493	mflr	%r1
494	/* 0x30 */
495	mtsprg2 %r1				/* save LR in SPRG2 */
496	ld	%r1,TRAP_ENTRY(0)		/* real-mode &generictrap */
497	mtlr	%r1
498	li	%r1, 0x80		/* How to get the vector from LR */
499	/* 0x40 */
500	blrl				/* Branch to generictrap */
5012:	mflr	%r2			/* Save the old LR in r2 */
502	/* Kernel mode */
503	ld	%r1,TRAP_GENTRAP(0)		/* Real-mode &generictrap */
504	addi    %r1,%r1,(kern_slbtrap-generictrap)
505	/* 0x50 */
506	mtlr	%r1
507	GET_CPUINFO(%r1)
508	blrl					/* Branch to kern_slbtrap */
509/* must fit in 128 bytes! */
510CNAME(slbtrapend):
511
512/*
513 * On entry:
514 * SPRG1: SP
515 * r1: pcpu
516 * r2: LR
517 * LR: branch address in trap region
518 */
519kern_slbtrap:
520	std	%r2,(PC_SLBSAVE+136)(%r1) /* old LR */
521	std	%r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
522
523	/* Check if this needs to be handled as a regular trap (userseg miss) */
524	mflr	%r2
525	andi.	%r2,%r2,0xff80
526	cmpwi	%r2,EXC_DSE
527	bne	1f
528	mfdar	%r2
529	b	2f
5301:	mfsrr0	%r2
5312:	/* r2 now contains the fault address */
532	lis	%r3,SEGMENT_MASK@highesta
533	ori	%r3,%r3,SEGMENT_MASK@highera
534	sldi	%r3,%r3,32
535	oris	%r3,%r3,SEGMENT_MASK@ha
536	ori	%r3,%r3,SEGMENT_MASK@l
537	and	%r2,%r2,%r3	/* R2 = segment base address */
538	lis	%r3,USER_ADDR@highesta
539	ori	%r3,%r3,USER_ADDR@highera
540	sldi	%r3,%r3,32
541	oris	%r3,%r3,USER_ADDR@ha
542	ori	%r3,%r3,USER_ADDR@l
543	cmpd	%r2,%r3		/* Compare fault base to USER_ADDR */
544	bne	3f
545
546	/* User seg miss, handle as a regular trap */
547	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
548	mtcr	%r2
549	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
550	ld	%r3,(PC_SLBSAVE+24)(%r1)
551	ld	%r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
552	mtsprg2 %r1			/* And then in SPRG2 */
553	li	%r1, 0x80		/* How to get the vector from LR */
554	b	generictrap		/* Retain old LR using b */
555
5563:	/* Real kernel SLB miss */
557	std	%r0,(PC_SLBSAVE+0)(%r1)	/* free all volatile regs */
558	mfsprg1	%r2			/* Old R1 */
559	std	%r2,(PC_SLBSAVE+8)(%r1)
560	/* R2,R3 already saved */
561	std	%r4,(PC_SLBSAVE+32)(%r1)
562	std	%r5,(PC_SLBSAVE+40)(%r1)
563	std	%r6,(PC_SLBSAVE+48)(%r1)
564	std	%r7,(PC_SLBSAVE+56)(%r1)
565	std	%r8,(PC_SLBSAVE+64)(%r1)
566	std	%r9,(PC_SLBSAVE+72)(%r1)
567	std	%r10,(PC_SLBSAVE+80)(%r1)
568	std	%r11,(PC_SLBSAVE+88)(%r1)
569	std	%r12,(PC_SLBSAVE+96)(%r1)
570	/* CR already saved */
571	mfxer	%r2			/* save XER */
572	std	%r2,(PC_SLBSAVE+112)(%r1)
573	mflr	%r2			/* save LR (SP already saved) */
574	std	%r2,(PC_SLBSAVE+120)(%r1)
575	mfctr	%r2			/* save CTR */
576	std	%r2,(PC_SLBSAVE+128)(%r1)
577
578	/* Call handler */
579	addi	%r1,%r1,PC_SLBSTACK-48+1024
580	li	%r2,~15
581	and	%r1,%r1,%r2
582	GET_TOCBASE(%r2)
583	mflr	%r3
584	andi.	%r3,%r3,0xff80
585	mfdar	%r4
586	mfsrr0	%r5
587	bl	handle_kernel_slb_spill
588	nop
589
590	/* Save r28-31, restore r4-r12 */
591	GET_CPUINFO(%r1)
592	ld	%r4,(PC_SLBSAVE+32)(%r1)
593	ld	%r5,(PC_SLBSAVE+40)(%r1)
594	ld	%r6,(PC_SLBSAVE+48)(%r1)
595	ld	%r7,(PC_SLBSAVE+56)(%r1)
596	ld	%r8,(PC_SLBSAVE+64)(%r1)
597	ld	%r9,(PC_SLBSAVE+72)(%r1)
598	ld	%r10,(PC_SLBSAVE+80)(%r1)
599	ld	%r11,(PC_SLBSAVE+88)(%r1)
600	ld	%r12,(PC_SLBSAVE+96)(%r1)
601	std	%r28,(PC_SLBSAVE+64)(%r1)
602	std	%r29,(PC_SLBSAVE+72)(%r1)
603	std	%r30,(PC_SLBSAVE+80)(%r1)
604	std	%r31,(PC_SLBSAVE+88)(%r1)
605
606	/* Restore kernel mapping */
607	bl	restore_kernsrs
608
609	/* Restore remaining registers */
610	ld	%r28,(PC_SLBSAVE+64)(%r1)
611	ld	%r29,(PC_SLBSAVE+72)(%r1)
612	ld	%r30,(PC_SLBSAVE+80)(%r1)
613	ld	%r31,(PC_SLBSAVE+88)(%r1)
614
615	ld	%r2,(PC_SLBSAVE+104)(%r1)
616	mtcr	%r2
617	ld	%r2,(PC_SLBSAVE+112)(%r1)
618	mtxer	%r2
619	ld	%r2,(PC_SLBSAVE+120)(%r1)
620	mtlr	%r2
621	ld	%r2,(PC_SLBSAVE+128)(%r1)
622	mtctr	%r2
623	ld	%r2,(PC_SLBSAVE+136)(%r1)
624	mtlr	%r2
625
626	/* Restore r0-r3 */
627	ld	%r0,(PC_SLBSAVE+0)(%r1)
628	ld	%r2,(PC_SLBSAVE+16)(%r1)
629	ld	%r3,(PC_SLBSAVE+24)(%r1)
630	mfsprg1	%r1
631
632	/* Back to whatever we were doing */
633	rfid
634
635/*
636 * For ALI: has to save DSISR and DAR
637 */
638	.globl	CNAME(alitrap),CNAME(aliend)
639CNAME(alitrap):
640	mtsprg1	%r1			/* save SP */
641	GET_CPUINFO(%r1)
642	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
643	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
644	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
645	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
646	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
647	mfdar	%r30
648	mfdsisr	%r31
649	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
650	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
651	mfsprg1	%r1			/* restore SP, in case of branch */
652	mflr	%r28			/* save LR */
653	mfcr	%r29			/* save CR */
654
655	ld	%r31,TRAP_GENTRAP(0)
656	addi	%r31,%r31,(s_trap - generictrap)
657	mtlr	%r31
658
659	/* Put our exception vector in SPRG3 */
660	li	%r31, EXC_ALI
661	mtsprg3	%r31
662
663	/* Test whether we already had PR set */
664	mfsrr1	%r31
665	mtcr	%r31
666	blrl				/* Branch to s_trap */
667CNAME(aliend):
668
669/*
670 * Similar to the above for DSI
671 * Has to handle standard pagetable spills
672 */
673	.globl	CNAME(dsitrap),CNAME(dsiend)
674	.p2align 3
675CNAME(dsitrap):
676	mtsprg1	%r1			/* save SP */
677	GET_CPUINFO(%r1)
678	std	%r27,(PC_DISISAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
679	std	%r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
680	std	%r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
681	std	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
682	std	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
683	mfcr	%r29			/* save CR */
684	mfxer	%r30			/* save XER */
685	mtsprg2	%r30			/* in SPRG2 */
686	mfsrr1	%r31			/* test kernel mode */
687	mtcr	%r31
688	mflr	%r28			/* save LR (SP already saved) */
689	ld	%r1,TRAP_GENTRAP(0)
690	addi	%r1,%r1,(disitrap-generictrap)
691	mtlr	%r1
692	blrl				/* Branch to disitrap */
693CNAME(dsiend):
694
695/*
696 * Preamble code for DSI/ISI traps
697 */
698disitrap:
699	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
700	mflr	%r1
701	andi.	%r1,%r1,0xff00
702	mtsprg3	%r1
703
704	GET_CPUINFO(%r1)
705	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
706	std	%r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
707	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
708	std	%r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
709	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
710	std	%r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
711	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
712	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
713	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
714	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
715	mfdar	%r30
716	mfdsisr	%r31
717	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
718	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
719
720#ifdef KDB
721	/* Try to detect a kernel stack overflow */
722	mfsrr1	%r31
723	mtcr	%r31
724	bt	17,realtrap		/* branch is user mode */
725	mfsprg1	%r31			/* get old SP */
726	clrrdi	%r31,%r31,12		/* Round SP down to nearest page */
727	sub.	%r30,%r31,%r30		/* SP - DAR */
728	bge	1f
729	neg	%r30,%r30		/* modulo value */
7301:	cmpldi	%cr0,%r30,4096		/* is DAR within a page of SP? */
731	bge	%cr0,realtrap		/* no, too far away. */
732
733	/* Now convert this DSI into a DDB trap.  */
734	GET_CPUINFO(%r1)
735	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
736	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
737	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
738	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
739	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get  r27 */
740	std	%r31,(PC_DBSAVE  +CPUSAVE_R27)(%r1) /* save r27 */
741	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get  r28 */
742	std	%r30,(PC_DBSAVE  +CPUSAVE_R28)(%r1) /* save r28 */
743	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get  r29 */
744	std	%r31,(PC_DBSAVE  +CPUSAVE_R29)(%r1) /* save r29 */
745	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get  r30 */
746	std	%r30,(PC_DBSAVE  +CPUSAVE_R30)(%r1) /* save r30 */
747	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get  r31 */
748	std	%r31,(PC_DBSAVE  +CPUSAVE_R31)(%r1) /* save r31 */
749	b	dbtrap
750#endif
751
752	/* XXX need stack probe here */
753realtrap:
754/* Test whether we already had PR set */
755	mfsrr1	%r1
756	mtcr	%r1
757	mfsprg1	%r1			/* restore SP (might have been
758					   overwritten) */
759	bf	17,k_trap		/* branch if PSL_PR is false */
760	GET_CPUINFO(%r1)
761	ld	%r1,PC_CURPCB(%r1)
762	mr	%r27,%r28		/* Save LR, r29 */
763	mtsprg2	%r29
764	bl	restore_kernsrs		/* enable kernel mapping */
765	mfsprg2	%r29
766	mr	%r28,%r27
767	b	s_trap
768
769/*
770 * generictrap does some standard setup for trap handling to minimize
771 * the code that need be installed in the actual vectors. It expects
772 * the following conditions.
773 *
774 * R1 - Trap vector = LR & (0xff00 | R1)
775 * SPRG1 - Original R1 contents
776 * SPRG2 - Original LR
777 */
778
779generichypertrap:
780	mtsprg3 %r1
781	mfspr	%r1, SPR_HSRR0
782	mtsrr0	%r1
783	mfspr	%r1, SPR_HSRR1
784	mtsrr1	%r1
785	mfsprg3	%r1
786	.globl	CNAME(generictrap)
787generictrap:
788	/* Save R1 for computing the exception vector */
789	mtsprg3 %r1
790
791	/* Save interesting registers */
792	GET_CPUINFO(%r1)
793	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
794	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
795	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
796	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
797	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
798	mfdar	%r30
799	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
800	mfsprg1	%r1			/* restore SP, in case of branch */
801	mfsprg2	%r28			/* save LR */
802	mfcr	%r29			/* save CR */
803
804	/* Compute the exception vector from the link register */
805	mfsprg3 %r31
806	ori	%r31,%r31,0xff00
807	mflr	%r30
808	addi	%r30,%r30,-4 /* The branch instruction, not the next */
809	and	%r30,%r30,%r31
810	mtsprg3	%r30
811
812	/* Test whether we already had PR set */
813	mfsrr1	%r31
814	mtcr	%r31
815
816s_trap:
817	bf	17,k_trap		/* branch if PSL_PR is false */
818	GET_CPUINFO(%r1)
819u_trap:
820	ld	%r1,PC_CURPCB(%r1)
821	mr	%r27,%r28		/* Save LR, r29 */
822	mtsprg2	%r29
823	bl	restore_kernsrs		/* enable kernel mapping */
824	mfsprg2	%r29
825	mr	%r28,%r27
826
827/*
828 * Now the common trap catching code.
829 */
830k_trap:
831	FRAME_SETUP(PC_TEMPSAVE)
832/* Call C interrupt dispatcher: */
833trapagain:
834	GET_TOCBASE(%r2)
835	addi	%r3,%r1,48
836	bl	CNAME(powerpc_interrupt)
837	nop
838
839	.globl	CNAME(trapexit)	/* backtrace code sentinel */
840CNAME(trapexit):
841/* Disable interrupts: */
842	mfmsr	%r3
843	andi.	%r3,%r3,~PSL_EE@l
844	mtmsr	%r3
845	isync
846/* Test AST pending: */
847	ld	%r5,FRAME_SRR1+48(%r1)
848	mtcr	%r5
849	bf	17,1f			/* branch if PSL_PR is false */
850
851	GET_CPUINFO(%r3)		/* get per-CPU pointer */
852	lwz	%r4, TD_FLAGS(%r13)	/* get thread flags value */
853	lis	%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
854	ori	%r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
855	and.	%r4,%r4,%r5
856	beq	1f
857	mfmsr	%r3			/* re-enable interrupts */
858	ori	%r3,%r3,PSL_EE@l
859	mtmsr	%r3
860	isync
861	GET_TOCBASE(%r2)
862	addi	%r3,%r1,48
863	bl	CNAME(ast)
864	nop
865	.globl	CNAME(asttrapexit)	/* backtrace code sentinel #2 */
866CNAME(asttrapexit):
867	b	trapexit		/* test ast ret value ? */
8681:
869	FRAME_LEAVE(PC_TEMPSAVE)
870	rfid
871
872#if defined(KDB)
873/*
874 * Deliberate entry to dbtrap
875 */
876ASENTRY_NOPROF(breakpoint)
877	mtsprg1	%r1
878	mfmsr	%r3
879	mtsrr1	%r3
880	andi.	%r3,%r3,~(PSL_EE|PSL_ME)@l
881	mtmsr	%r3			/* disable interrupts */
882	isync
883	GET_CPUINFO(%r3)
884	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
885	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
886	std	%r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
887	std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
888	std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
889	mflr	%r28
890	li	%r29,EXC_BPT
891	mtlr	%r29
892	mfcr	%r29
893	mtsrr0	%r28
894
895/*
896 * Now the kdb trap catching code.
897 */
898dbtrap:
899	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
900	mflr	%r1
901	andi.	%r1,%r1,0xff00
902	mtsprg3	%r1
903
904	GET_TOCBASE(%r1)			/* get new SP */
905	addis	%r1,%r1,TOC_REF(trapstk)@ha
906	ld	%r1,TOC_REF(trapstk)@l(%r1)
907	addi	%r1,%r1,(TRAPSTKSZ-48)
908
909	FRAME_SETUP(PC_DBSAVE)
910/* Call C trap code: */
911	GET_TOCBASE(%r2)
912	addi	%r3,%r1,48
913	bl	CNAME(db_trap_glue)
914	nop
915	or.	%r3,%r3,%r3
916	bne	dbleave
917/* This wasn't for KDB, so switch to real trap: */
918	ld	%r3,FRAME_EXC+48(%r1)	/* save exception */
919	GET_CPUINFO(%r4)
920	std	%r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
921	FRAME_LEAVE(PC_DBSAVE)
922	mtsprg1	%r1			/* prepare for entrance to realtrap */
923	GET_CPUINFO(%r1)
924	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
925	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
926	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
927	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
928	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
929	mflr	%r28
930	mfcr	%r29
931	ld	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
932	mtsprg3	%r31			/* SPRG3 was clobbered by FRAME_LEAVE */
933	mfsprg1	%r1
934	b	realtrap
935dbleave:
936	FRAME_LEAVE(PC_DBSAVE)
937	rfid
938
939/*
940 * In case of KDB we want a separate trap catcher for it
941 */
942	.globl	CNAME(dblow),CNAME(dbend)
943	.p2align 3
944CNAME(dblow):
945	mtsprg1	%r1			/* save SP */
946	mtsprg2	%r29			/* save r29 */
947	mfcr	%r29			/* save CR in r29 */
948	mfsrr1	%r1
949	mtcr	%r1
950	bf	17,1f			/* branch if privileged */
951
952	/* Unprivileged case */
953	mtcr	%r29			/* put the condition register back */
954        mfsprg2	%r29			/* ... and r29 */
955        mflr	%r1			/* save LR */
956	mtsprg2 %r1			/* And then in SPRG2 */
957
958	ld	%r1, TRAP_ENTRY(0)	/* Get branch address */
959	mtlr	%r1
960	li	%r1, 0	 		/* How to get the vector from LR */
961	blrl				/* Branch to generictrap */
962	/* No fallthrough */
9631:
964	GET_CPUINFO(%r1)
965	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r1)	/* free r27 */
966	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r1)	/* free r28 */
967        mfsprg2	%r28				/* r29 holds cr...  */
968        std	%r28,(PC_DBSAVE+CPUSAVE_R29)(%r1)	/* free r29 */
969        std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r1)	/* free r30 */
970        std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)	/* free r31 */
971        mflr	%r28					/* save LR */
972	ld	%r1,TRAP_GENTRAP(0)
973	addi	%r1,%r1,(dbtrap-generictrap)
974	mtlr	%r1
975	blrl				/* Branch to dbtrap */
976CNAME(dbend):
977#endif /* KDB */
978