xref: /freebsd/sys/powerpc/aim/trap_subr64.S (revision 190cef3d)
1/* $FreeBSD$ */
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $	*/
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file.  to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 *	#include <powerpc/aim/trap_subr.S>
40 */
41
42/* Locate the per-CPU data structure */
43#define GET_CPUINFO(r)  \
44        mfsprg0  r
45#define GET_TOCBASE(r)  \
46	lis	r,DMAP_BASE_ADDRESS@highesta;	/* To real-mode alias/dmap */ \
47	sldi	r,r,32;							\
48	ori	r,r,TRAP_TOCBASE;	/* Magic address for TOC */	\
49	ld	r,0(r)
50
51/*
52 * Restore SRs for a pmap
53 *
54 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
55 */
56
57/*
58 * User SRs are loaded through a pointer to the current pmap.
59 */
60restore_usersrs:
61	GET_CPUINFO(%r28)
62	ld	%r28,PC_USERSLB(%r28)
63	cmpdi	%r28, 0			/* If user SLB pointer NULL, exit */
64	beqlr
65
66	li	%r29, 0			/* Set the counter to zero */
67
68	slbia
69	slbmfee	%r31,%r29
70	clrrdi	%r31,%r31,28
71	slbie	%r31
721:	ld	%r31, 0(%r28)		/* Load SLB entry pointer */
73	cmpdi	%r31, 0			/* If NULL, stop */
74	beqlr
75
76	ld	%r30, 0(%r31)		/* Load SLBV */
77	ld	%r31, 8(%r31)		/* Load SLBE */
78	or	%r31, %r31, %r29	/*  Set SLBE slot */
79	slbmte	%r30, %r31		/* Install SLB entry */
80
81	addi	%r28, %r28, 8		/* Advance pointer */
82	addi	%r29, %r29, 1
83	b	1b			/* Repeat */
84
85/*
86 * Kernel SRs are loaded directly from the PCPU fields
87 */
88restore_kernsrs:
89	GET_CPUINFO(%r28)
90	addi	%r28,%r28,PC_KERNSLB
91	ld	%r29,16(%r28)		/* One past USER_SLB_SLOT */
92	cmpdi	%r28,0
93	beqlr				/* If first kernel entry is invalid,
94					 * SLBs not in use, so exit early */
95
96	/* Otherwise, set up SLBs */
97	li	%r29, 0			/* Set the counter to zero */
98
99	slbia
100	slbmfee	%r31,%r29
101	clrrdi	%r31,%r31,28
102	slbie	%r31
1031:	cmpdi	%r29, USER_SLB_SLOT	/* Skip the user slot */
104	beq-	2f
105
106	ld	%r31, 8(%r28)		/* Load SLBE */
107	cmpdi	%r31, 0			/* If SLBE is not valid, stop */
108	beqlr
109	ld	%r30, 0(%r28)		/* Load SLBV  */
110	slbmte	%r30, %r31		/* Install SLB entry */
111
1122:	addi	%r28, %r28, 16		/* Advance pointer */
113	addi	%r29, %r29, 1
114	cmpdi	%r29, 64		/* Repeat if we are not at the end */
115	blt	1b
116	blr
117
118/*
119 * FRAME_SETUP assumes:
120 *	SPRG1		SP (1)
121 * 	SPRG3		trap type
122 *	savearea	r27-r31,DAR,DSISR   (DAR & DSISR only for DSI traps)
123 *	r28		LR
124 *	r29		CR
125 *	r30		scratch
126 *	r31		scratch
127 *	r1		kernel stack
128 *	SRR0/1		as at start of trap
129 *
130 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
131 * in any real-mode fault handler, including those handling double faults.
132 */
133#define	FRAME_SETUP(savearea)						\
134/* Have to enable translation to allow access of kernel stack: */	\
135	GET_CPUINFO(%r31);						\
136	mfsrr0	%r30;							\
137	std	%r30,(savearea+CPUSAVE_SRR0)(%r31);	/* save SRR0 */	\
138	mfsrr1	%r30;							\
139	std	%r30,(savearea+CPUSAVE_SRR1)(%r31);	/* save SRR1 */	\
140	mfsprg1	%r31;			/* get saved SP (clears SPRG1) */ \
141	mfmsr	%r30;							\
142	ori	%r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */	\
143	mtmsr	%r30;			/* stack can now be accessed */	\
144	isync;								\
145	stdu	%r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
146	std	%r0, FRAME_0+48(%r1);	/* save r0 in the trapframe */	\
147	std	%r31,FRAME_1+48(%r1);	/* save SP   "      "       */	\
148	std	%r2, FRAME_2+48(%r1);	/* save r2   "      "       */	\
149	std	%r28,FRAME_LR+48(%r1);	/* save LR   "      "       */	\
150	std	%r29,FRAME_CR+48(%r1);	/* save CR   "      "       */	\
151	GET_CPUINFO(%r2);						\
152	ld	%r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */	\
153	ld	%r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */	\
154	ld	%r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */	\
155	ld	%r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */	\
156	ld	%r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */	\
157	std	%r3,  FRAME_3+48(%r1);	/* save r3-r31 */		\
158	std	%r4,  FRAME_4+48(%r1);					\
159	std	%r5,  FRAME_5+48(%r1);					\
160	std	%r6,  FRAME_6+48(%r1);					\
161	std	%r7,  FRAME_7+48(%r1);					\
162	std	%r8,  FRAME_8+48(%r1);					\
163	std	%r9,  FRAME_9+48(%r1);					\
164	std	%r10, FRAME_10+48(%r1);					\
165	std	%r11, FRAME_11+48(%r1);					\
166	std	%r12, FRAME_12+48(%r1);					\
167	std	%r13, FRAME_13+48(%r1);					\
168	std	%r14, FRAME_14+48(%r1);					\
169	std	%r15, FRAME_15+48(%r1);					\
170	std	%r16, FRAME_16+48(%r1);					\
171	std	%r17, FRAME_17+48(%r1);					\
172	std	%r18, FRAME_18+48(%r1);					\
173	std	%r19, FRAME_19+48(%r1);					\
174	std	%r20, FRAME_20+48(%r1);					\
175	std	%r21, FRAME_21+48(%r1);					\
176	std	%r22, FRAME_22+48(%r1);					\
177	std	%r23, FRAME_23+48(%r1);					\
178	std	%r24, FRAME_24+48(%r1);					\
179	std	%r25, FRAME_25+48(%r1);					\
180	std	%r26, FRAME_26+48(%r1);					\
181	std	%r27, FRAME_27+48(%r1);					\
182	std	%r28, FRAME_28+48(%r1);					\
183	std	%r29, FRAME_29+48(%r1);					\
184	std	%r30, FRAME_30+48(%r1);					\
185	std	%r31, FRAME_31+48(%r1);					\
186	ld	%r28,(savearea+CPUSAVE_AIM_DAR)(%r2);  /* saved DAR */	\
187	ld	%r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
188	ld	%r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */	\
189	ld	%r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */	\
190	mfxer	%r3;							\
191	mfctr	%r4;							\
192	mfsprg3	%r5;							\
193	std	%r3, FRAME_XER+48(1);	/* save xer/ctr/exc */		\
194	std	%r4, FRAME_CTR+48(1);					\
195	std	%r5, FRAME_EXC+48(1);					\
196	std	%r28,FRAME_AIM_DAR+48(1);				\
197	std	%r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */	\
198	std	%r30,FRAME_SRR0+48(1);					\
199	std	%r31,FRAME_SRR1+48(1);					\
200	ld	%r13,PC_CURTHREAD(%r2)	/* set kernel curthread */
201
202#define	FRAME_LEAVE(savearea)						\
203/* Disable exceptions: */						\
204	mfmsr	%r2;							\
205	andi.	%r2,%r2,~PSL_EE@l;					\
206	mtmsr	%r2;							\
207	isync;								\
208/* Now restore regs: */							\
209	ld	%r2,FRAME_SRR0+48(%r1);					\
210	ld	%r3,FRAME_SRR1+48(%r1);					\
211	ld	%r4,FRAME_CTR+48(%r1);					\
212	ld	%r5,FRAME_XER+48(%r1);					\
213	ld	%r6,FRAME_LR+48(%r1);					\
214	GET_CPUINFO(%r7);						\
215	std	%r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */	\
216	std	%r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */	\
217	ld	%r7,FRAME_CR+48(%r1);					\
218	mtctr	%r4;							\
219	mtxer	%r5;							\
220	mtlr	%r6;							\
221	mtsprg2	%r7;			/* save cr */			\
222	ld	%r31,FRAME_31+48(%r1);   /* restore r0-31 */		\
223	ld	%r30,FRAME_30+48(%r1);					\
224	ld	%r29,FRAME_29+48(%r1);					\
225	ld	%r28,FRAME_28+48(%r1);					\
226	ld	%r27,FRAME_27+48(%r1);					\
227	ld	%r26,FRAME_26+48(%r1);					\
228	ld	%r25,FRAME_25+48(%r1);					\
229	ld	%r24,FRAME_24+48(%r1);					\
230	ld	%r23,FRAME_23+48(%r1);					\
231	ld	%r22,FRAME_22+48(%r1);					\
232	ld	%r21,FRAME_21+48(%r1);					\
233	ld	%r20,FRAME_20+48(%r1);					\
234	ld	%r19,FRAME_19+48(%r1);					\
235	ld	%r18,FRAME_18+48(%r1);					\
236	ld	%r17,FRAME_17+48(%r1);					\
237	ld	%r16,FRAME_16+48(%r1);					\
238	ld	%r15,FRAME_15+48(%r1);					\
239	ld	%r14,FRAME_14+48(%r1);					\
240	ld	%r13,FRAME_13+48(%r1);					\
241	ld	%r12,FRAME_12+48(%r1);					\
242	ld	%r11,FRAME_11+48(%r1);					\
243	ld	%r10,FRAME_10+48(%r1);					\
244	ld	%r9, FRAME_9+48(%r1);					\
245	ld	%r8, FRAME_8+48(%r1);					\
246	ld	%r7, FRAME_7+48(%r1);					\
247	ld	%r6, FRAME_6+48(%r1);					\
248	ld	%r5, FRAME_5+48(%r1);					\
249	ld	%r4, FRAME_4+48(%r1);					\
250	ld	%r3, FRAME_3+48(%r1);					\
251	ld	%r2, FRAME_2+48(%r1);					\
252	ld	%r0, FRAME_0+48(%r1);					\
253	ld	%r1, FRAME_1+48(%r1);					\
254/* Can't touch %r1 from here on */					\
255	mtsprg3	%r3;			/* save r3 */			\
256/* Disable translation, machine check and recoverability: */		\
257	mfmsr	%r3;							\
258	andi.	%r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l;		\
259	mtmsr	%r3;							\
260	isync;								\
261/* Decide whether we return to user mode: */				\
262	GET_CPUINFO(%r3);						\
263	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3);			\
264	mtcr	%r3;							\
265	bf	17,1f;			/* branch if PSL_PR is false */	\
266/* Restore user SRs */							\
267	GET_CPUINFO(%r3);						\
268	std	%r27,(savearea+CPUSAVE_R27)(%r3);			\
269	std	%r28,(savearea+CPUSAVE_R28)(%r3);			\
270	std	%r29,(savearea+CPUSAVE_R29)(%r3);			\
271	std	%r30,(savearea+CPUSAVE_R30)(%r3);			\
272	std	%r31,(savearea+CPUSAVE_R31)(%r3);			\
273	mflr	%r27;			/* preserve LR */		\
274	bl	restore_usersrs;	/* uses r28-r31 */		\
275	mtlr	%r27;							\
276	ld	%r31,(savearea+CPUSAVE_R31)(%r3);			\
277	ld	%r30,(savearea+CPUSAVE_R30)(%r3);			\
278	ld	%r29,(savearea+CPUSAVE_R29)(%r3);			\
279	ld	%r28,(savearea+CPUSAVE_R28)(%r3);			\
280	ld	%r27,(savearea+CPUSAVE_R27)(%r3);			\
2811:	mfsprg2	%r3;			/* restore cr */		\
282	mtcr	%r3;							\
283	GET_CPUINFO(%r3);						\
284	ld	%r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */	\
285	mtsrr0	%r3;							\
286	GET_CPUINFO(%r3);						\
287	ld	%r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */	\
288	mtsrr1	%r3;							\
289	mfsprg3	%r3			/* restore r3 */
290
291#ifdef KDTRACE_HOOKS
292	.data
293	.globl	dtrace_invop_calltrap_addr
294	.align	8
295	.type	dtrace_invop_calltrap_addr, @object
296        .size	dtrace_invop_calltrap_addr, 8
297dtrace_invop_calltrap_addr:
298	.word	0
299	.word	0
300
301	.text
302#endif
303
304/*
305 * Processor reset exception handler. These are typically
306 * the first instructions the processor executes after a
307 * software reset. We do this in two bits so that we are
308 * not still hanging around in the trap handling region
309 * once the MMU is turned on.
310 */
311	.globl	CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler)
312	.globl	CNAME(cpu_wakeup_handler)
313	.p2align 3
314CNAME(rstcode):
315	/*
316	 * Check if this is software reset or
317	 * processor is waking up from power saving mode
318	 * It is software reset when 46:47 = 0b00
319	 */
320	mfsrr1	%r9			/* Load SRR1 into r9 */
321	andis.	%r9,%r9,0x3		/* Logic AND with 46:47 bits */
322	beq	2f			/* Branch if software reset */
323	bl	1f
324	.llong	cpu_wakeup_handler
325
326	/* It is software reset */
327
328	/* Explicitly set MSR[SF] */
3292:	mfmsr	%r9
330	li	%r8,1
331	insrdi	%r9,%r8,1,0
332	mtmsrd	%r9
333	isync
334
335	bl	1f
336	.llong	cpu_reset_handler /* Make sure to maintain 8-byte alignment */
337
3381:	mflr	%r9
339	ld	%r9,0(%r9)
340	mtlr	%r9
341
342	blr
343CNAME(rstcodeend):
344
345cpu_reset_handler:
346	GET_TOCBASE(%r2)
347
348	ld	%r1,TOC_REF(tmpstk)(%r2)	/* get new SP */
349	addi	%r1,%r1,(TMPSTKSZ-48)
350
351	bl	CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
352	nop
353	lis	%r3,1@l
354	bl	CNAME(pmap_cpu_bootstrap)	/* Turn on virtual memory */
355	nop
356	bl	CNAME(cpudep_ap_bootstrap)	/* Set up PCPU and stack */
357	nop
358	mr	%r1,%r3				/* Use new stack */
359	bl	CNAME(cpudep_ap_setup)
360	nop
361	GET_CPUINFO(%r5)
362	ld	%r3,(PC_RESTORE)(%r5)
363	cmpldi	%cr0,%r3,0
364	beq	%cr0,2f
365	nop
366	li	%r4,1
367	bl	CNAME(longjmp)
368	nop
3692:
370#ifdef SMP
371	bl	CNAME(machdep_ap_bootstrap)	/* And away! */
372	nop
373#endif
374
375	/* Should not be reached */
3769:
377	b	9b
378
379cpu_wakeup_handler:
380	GET_TOCBASE(%r2)
381
382	/* Check for false wake up due to badly SRR1 set (eg. by OPAL) */
383	ld	%r3,TOC_REF(can_wakeup)(%r2)
384	ld	%r3,0(%r3)
385	cmpdi	%r3,0
386	beq	cpu_reset_handler
387
388	/* Turn on MMU after return from interrupt */
389	mfsrr1	%r3
390	ori	%r3,%r3,(PSL_IR | PSL_DR)
391	mtsrr1	%r3
392
393	/* Turn on MMU (needed to access PCB) */
394	mfmsr	%r3
395	ori	%r3,%r3,(PSL_IR | PSL_DR)
396	mtmsr	%r3
397	isync
398
399	mfsprg0	%r3
400
401	ld	%r3,PC_CURTHREAD(%r3)	/* Get current thread */
402	ld	%r3,TD_PCB(%r3)		/* Get PCB of current thread */
403	ld	%r12,PCB_CONTEXT(%r3)	/* Load the non-volatile GP regs. */
404	ld	%r13,PCB_CONTEXT+1*8(%r3)
405	ld	%r14,PCB_CONTEXT+2*8(%r3)
406	ld	%r15,PCB_CONTEXT+3*8(%r3)
407	ld	%r16,PCB_CONTEXT+4*8(%r3)
408	ld	%r17,PCB_CONTEXT+5*8(%r3)
409	ld	%r18,PCB_CONTEXT+6*8(%r3)
410	ld	%r19,PCB_CONTEXT+7*8(%r3)
411	ld	%r20,PCB_CONTEXT+8*8(%r3)
412	ld	%r21,PCB_CONTEXT+9*8(%r3)
413	ld	%r22,PCB_CONTEXT+10*8(%r3)
414	ld	%r23,PCB_CONTEXT+11*8(%r3)
415	ld	%r24,PCB_CONTEXT+12*8(%r3)
416	ld	%r25,PCB_CONTEXT+13*8(%r3)
417	ld	%r26,PCB_CONTEXT+14*8(%r3)
418	ld	%r27,PCB_CONTEXT+15*8(%r3)
419	ld	%r28,PCB_CONTEXT+16*8(%r3)
420	ld	%r29,PCB_CONTEXT+17*8(%r3)
421	ld	%r30,PCB_CONTEXT+18*8(%r3)
422	ld	%r31,PCB_CONTEXT+19*8(%r3)
423	ld	%r5,PCB_CR(%r3)		/* Load the condition register */
424	mtcr	%r5
425	ld	%r5,PCB_LR(%r3)		/* Load the link register */
426	mtsrr0	%r5
427	ld	%r1,PCB_SP(%r3)		/* Load the stack pointer */
428	ld	%r2,PCB_TOC(%r3)	/* Load the TOC pointer */
429
430	rfid
431
432/*
433 * This code gets copied to all the trap vectors
434 * (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
435 */
436
437	.globl	CNAME(trapcode),CNAME(trapcodeend)
438	.p2align 3
439CNAME(trapcode):
440	mtsprg1	%r1			/* save SP */
441	mflr	%r1			/* Save the old LR in r1 */
442	mtsprg2 %r1			/* And then in SPRG2 */
443	ld	%r1,TRAP_GENTRAP(0)
444	mtlr	%r1
445	li	%r1, 0xe0		/* How to get the vector from LR */
446	blrl				/* Branch to generictrap */
447CNAME(trapcodeend):
448
449/* Same thing for traps setting HSRR0/HSS1 */
450	.globl	CNAME(hypertrapcode),CNAME(hypertrapcodeend)
451	.p2align 3
452CNAME(hypertrapcode):
453	mtsprg1	%r1			/* save SP */
454	mflr	%r1			/* Save the old LR in r1 */
455	mtsprg2 %r1			/* And then in SPRG2 */
456	ld	%r1,TRAP_GENTRAP(0)
457	addi	%r1,%r1,(generichypertrap-generictrap)
458	mtlr	%r1
459	li	%r1, 0xe0		/* How to get the vector from LR */
460	blrl				/* Branch to generictrap */
461CNAME(hypertrapcodeend):
462
463/*
464 * For SLB misses: do special things for the kernel
465 *
466 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is
467 * the only time this can be called.
468 */
469	.globl	CNAME(slbtrap),CNAME(slbtrapend)
470	.p2align 3
471CNAME(slbtrap):
472	mtsprg1	%r1			/* save SP */
473	GET_CPUINFO(%r1)
474	std	%r2,(PC_SLBSAVE+16)(%r1)
475	mfcr	%r2			/* save CR */
476	std	%r2,(PC_SLBSAVE+104)(%r1)
477	mfsrr1	%r2			/* test kernel mode */
478	mtcr	%r2
479	bf	17,2f			/* branch if PSL_PR is false */
480	/* User mode */
481	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
482	mtcr	%r2
483	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */
484	mflr	%r1			/* Save the old LR in r1 */
485	mtsprg2 %r1			/* And then in SPRG2 */
486					/* 52 bytes so far */
487	bl	1f
488	.llong	generictrap
4891:	mflr	%r1
490	ld	%r1,0(%r1)
491	mtlr	%r1
492	li	%r1, 0x80		/* How to get the vector from LR */
493	blrl				/* Branch to generictrap */
494					/* 84 bytes */
4952:	mflr	%r2			/* Save the old LR in r2 */
496	nop
497	bl	3f			/* Begin dance to jump to kern_slbtrap*/
498	.llong	kern_slbtrap
4993:	mflr	%r1
500	ld	%r1,0(%r1)
501	mtlr	%r1
502	GET_CPUINFO(%r1)
503	blrl				/* 124 bytes -- 4 to spare */
504CNAME(slbtrapend):
505
506kern_slbtrap:
507	std	%r2,(PC_SLBSAVE+136)(%r1) /* old LR */
508	std	%r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
509
510	/* Check if this needs to be handled as a regular trap (userseg miss) */
511	mflr	%r2
512	andi.	%r2,%r2,0xff80
513	cmpwi	%r2,0x380
514	bne	1f
515	mfdar	%r2
516	b	2f
5171:	mfsrr0	%r2
5182:	/* r2 now contains the fault address */
519	lis	%r3,SEGMENT_MASK@highesta
520	ori	%r3,%r3,SEGMENT_MASK@highera
521	sldi	%r3,%r3,32
522	oris	%r3,%r3,SEGMENT_MASK@ha
523	ori	%r3,%r3,SEGMENT_MASK@l
524	and	%r2,%r2,%r3	/* R2 = segment base address */
525	lis	%r3,USER_ADDR@highesta
526	ori	%r3,%r3,USER_ADDR@highera
527	sldi	%r3,%r3,32
528	oris	%r3,%r3,USER_ADDR@ha
529	ori	%r3,%r3,USER_ADDR@l
530	cmpd	%r2,%r3		/* Compare fault base to USER_ADDR */
531	bne	3f
532
533	/* User seg miss, handle as a regular trap */
534	ld	%r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
535	mtcr	%r2
536	ld	%r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
537	ld	%r3,(PC_SLBSAVE+24)(%r1)
538	ld	%r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
539	mtsprg2 %r1			/* And then in SPRG2 */
540	li	%r1, 0x80		/* How to get the vector from LR */
541	b	generictrap		/* Retain old LR using b */
542
5433:	/* Real kernel SLB miss */
544	std	%r0,(PC_SLBSAVE+0)(%r1)	/* free all volatile regs */
545	mfsprg1	%r2			/* Old R1 */
546	std	%r2,(PC_SLBSAVE+8)(%r1)
547	/* R2,R3 already saved */
548	std	%r4,(PC_SLBSAVE+32)(%r1)
549	std	%r5,(PC_SLBSAVE+40)(%r1)
550	std	%r6,(PC_SLBSAVE+48)(%r1)
551	std	%r7,(PC_SLBSAVE+56)(%r1)
552	std	%r8,(PC_SLBSAVE+64)(%r1)
553	std	%r9,(PC_SLBSAVE+72)(%r1)
554	std	%r10,(PC_SLBSAVE+80)(%r1)
555	std	%r11,(PC_SLBSAVE+88)(%r1)
556	std	%r12,(PC_SLBSAVE+96)(%r1)
557	/* CR already saved */
558	mfxer	%r2			/* save XER */
559	std	%r2,(PC_SLBSAVE+112)(%r1)
560	mflr	%r2			/* save LR (SP already saved) */
561	std	%r2,(PC_SLBSAVE+120)(%r1)
562	mfctr	%r2			/* save CTR */
563	std	%r2,(PC_SLBSAVE+128)(%r1)
564
565	/* Call handler */
566	addi	%r1,%r1,PC_SLBSTACK-48+1024
567	li	%r2,~15
568	and	%r1,%r1,%r2
569	GET_TOCBASE(%r2)
570	mflr	%r3
571	andi.	%r3,%r3,0xff80
572	mfdar	%r4
573	mfsrr0	%r5
574	bl	handle_kernel_slb_spill
575	nop
576
577	/* Save r28-31, restore r4-r12 */
578	GET_CPUINFO(%r1)
579	ld	%r4,(PC_SLBSAVE+32)(%r1)
580	ld	%r5,(PC_SLBSAVE+40)(%r1)
581	ld	%r6,(PC_SLBSAVE+48)(%r1)
582	ld	%r7,(PC_SLBSAVE+56)(%r1)
583	ld	%r8,(PC_SLBSAVE+64)(%r1)
584	ld	%r9,(PC_SLBSAVE+72)(%r1)
585	ld	%r10,(PC_SLBSAVE+80)(%r1)
586	ld	%r11,(PC_SLBSAVE+88)(%r1)
587	ld	%r12,(PC_SLBSAVE+96)(%r1)
588	std	%r28,(PC_SLBSAVE+64)(%r1)
589	std	%r29,(PC_SLBSAVE+72)(%r1)
590	std	%r30,(PC_SLBSAVE+80)(%r1)
591	std	%r31,(PC_SLBSAVE+88)(%r1)
592
593	/* Restore kernel mapping */
594	bl	restore_kernsrs
595
596	/* Restore remaining registers */
597	ld	%r28,(PC_SLBSAVE+64)(%r1)
598	ld	%r29,(PC_SLBSAVE+72)(%r1)
599	ld	%r30,(PC_SLBSAVE+80)(%r1)
600	ld	%r31,(PC_SLBSAVE+88)(%r1)
601
602	ld	%r2,(PC_SLBSAVE+104)(%r1)
603	mtcr	%r2
604	ld	%r2,(PC_SLBSAVE+112)(%r1)
605	mtxer	%r2
606	ld	%r2,(PC_SLBSAVE+120)(%r1)
607	mtlr	%r2
608	ld	%r2,(PC_SLBSAVE+128)(%r1)
609	mtctr	%r2
610	ld	%r2,(PC_SLBSAVE+136)(%r1)
611	mtlr	%r2
612
613	/* Restore r0-r3 */
614	ld	%r0,(PC_SLBSAVE+0)(%r1)
615	ld	%r2,(PC_SLBSAVE+16)(%r1)
616	ld	%r3,(PC_SLBSAVE+24)(%r1)
617	mfsprg1	%r1
618
619	/* Back to whatever we were doing */
620	rfid
621
622/*
623 * For ALI: has to save DSISR and DAR
624 */
625	.globl	CNAME(alitrap),CNAME(aliend)
626CNAME(alitrap):
627	mtsprg1	%r1			/* save SP */
628	GET_CPUINFO(%r1)
629	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
630	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
631	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
632	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
633	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
634	mfdar	%r30
635	mfdsisr	%r31
636	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
637	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
638	mfsprg1	%r1			/* restore SP, in case of branch */
639	mflr	%r28			/* save LR */
640	mfcr	%r29			/* save CR */
641
642	/* Begin dance to branch to s_trap in a bit */
643	b	1f
644	.p2align 3
6451:	nop
646	bl	1f
647	.llong	s_trap
6481:	mflr	%r31
649	ld	%r31,0(%r31)
650	mtlr	%r31
651
652	/* Put our exception vector in SPRG3 */
653	li	%r31, EXC_ALI
654	mtsprg3	%r31
655
656	/* Test whether we already had PR set */
657	mfsrr1	%r31
658	mtcr	%r31
659	blrl
660CNAME(aliend):
661
662/*
663 * Similar to the above for DSI
664 * Has to handle standard pagetable spills
665 */
666	.globl	CNAME(dsitrap),CNAME(dsiend)
667	.p2align 3
668CNAME(dsitrap):
669	mtsprg1	%r1			/* save SP */
670	GET_CPUINFO(%r1)
671	std	%r27,(PC_DISISAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
672	std	%r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
673	std	%r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
674	std	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
675	std	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
676	mfcr	%r29			/* save CR */
677	mfxer	%r30			/* save XER */
678	mtsprg2	%r30			/* in SPRG2 */
679	mfsrr1	%r31			/* test kernel mode */
680	mtcr	%r31
681	mflr	%r28			/* save LR (SP already saved) */
682	bl	1f			/* Begin branching to disitrap */
683	.llong	disitrap
6841:	mflr	%r1
685	ld	%r1,0(%r1)
686	mtlr	%r1
687	blrl				/* Branch to generictrap */
688CNAME(dsiend):
689
690/*
691 * Preamble code for DSI/ISI traps
692 */
693disitrap:
694	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
695	mflr	%r1
696	andi.	%r1,%r1,0xff00
697	mtsprg3	%r1
698
699	GET_CPUINFO(%r1)
700	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
701	std	%r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
702	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
703	std	%r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
704	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
705	std	%r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
706	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
707	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
708	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
709	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
710	mfdar	%r30
711	mfdsisr	%r31
712	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
713	std	%r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
714
715#ifdef KDB
716	/* Try to detect a kernel stack overflow */
717	mfsrr1	%r31
718	mtcr	%r31
719	bt	17,realtrap		/* branch is user mode */
720	mfsprg1	%r31			/* get old SP */
721	clrrdi	%r31,%r31,12		/* Round SP down to nearest page */
722	sub.	%r30,%r31,%r30		/* SP - DAR */
723	bge	1f
724	neg	%r30,%r30		/* modulo value */
7251:	cmpldi	%cr0,%r30,4096		/* is DAR within a page of SP? */
726	bge	%cr0,realtrap		/* no, too far away. */
727
728	/* Now convert this DSI into a DDB trap.  */
729	GET_CPUINFO(%r1)
730	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
731	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
732	ld	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
733	std	%r30,(PC_DBSAVE  +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
734	ld	%r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get  r27 */
735	std	%r31,(PC_DBSAVE  +CPUSAVE_R27)(%r1) /* save r27 */
736	ld	%r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get  r28 */
737	std	%r30,(PC_DBSAVE  +CPUSAVE_R28)(%r1) /* save r28 */
738	ld	%r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get  r29 */
739	std	%r31,(PC_DBSAVE  +CPUSAVE_R29)(%r1) /* save r29 */
740	ld	%r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get  r30 */
741	std	%r30,(PC_DBSAVE  +CPUSAVE_R30)(%r1) /* save r30 */
742	ld	%r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get  r31 */
743	std	%r31,(PC_DBSAVE  +CPUSAVE_R31)(%r1) /* save r31 */
744	b	dbtrap
745#endif
746
747	/* XXX need stack probe here */
748realtrap:
749/* Test whether we already had PR set */
750	mfsrr1	%r1
751	mtcr	%r1
752	mfsprg1	%r1			/* restore SP (might have been
753					   overwritten) */
754	bf	17,k_trap		/* branch if PSL_PR is false */
755	GET_CPUINFO(%r1)
756	ld	%r1,PC_CURPCB(%r1)
757	mr	%r27,%r28		/* Save LR, r29 */
758	mtsprg2	%r29
759	bl	restore_kernsrs		/* enable kernel mapping */
760	mfsprg2	%r29
761	mr	%r28,%r27
762	b	s_trap
763
764/*
765 * generictrap does some standard setup for trap handling to minimize
766 * the code that need be installed in the actual vectors. It expects
767 * the following conditions.
768 *
769 * R1 - Trap vector = LR & (0xff00 | R1)
770 * SPRG1 - Original R1 contents
771 * SPRG2 - Original LR
772 */
773
774generichypertrap:
775	mtsprg3 %r1
776	mfspr	%r1, SPR_HSRR0
777	mtsrr0	%r1
778	mfspr	%r1, SPR_HSRR1
779	mtsrr1	%r1
780	mfsprg3	%r1
781	.globl	CNAME(generictrap)
782generictrap:
783	/* Save R1 for computing the exception vector */
784	mtsprg3 %r1
785
786	/* Save interesting registers */
787	GET_CPUINFO(%r1)
788	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)	/* free r27-r31 */
789	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
790	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
791	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
792	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
793	mfdar	%r30
794	std	%r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
795	mfsprg1	%r1			/* restore SP, in case of branch */
796	mfsprg2	%r28			/* save LR */
797	mfcr	%r29			/* save CR */
798
799	/* Compute the exception vector from the link register */
800	mfsprg3 %r31
801	ori	%r31,%r31,0xff00
802	mflr	%r30
803	addi	%r30,%r30,-4 /* The branch instruction, not the next */
804	and	%r30,%r30,%r31
805	mtsprg3	%r30
806
807	/* Test whether we already had PR set */
808	mfsrr1	%r31
809	mtcr	%r31
810
811s_trap:
812	bf	17,k_trap		/* branch if PSL_PR is false */
813	GET_CPUINFO(%r1)
814u_trap:
815	ld	%r1,PC_CURPCB(%r1)
816	mr	%r27,%r28		/* Save LR, r29 */
817	mtsprg2	%r29
818	bl	restore_kernsrs		/* enable kernel mapping */
819	mfsprg2	%r29
820	mr	%r28,%r27
821
822/*
823 * Now the common trap catching code.
824 */
825k_trap:
826	FRAME_SETUP(PC_TEMPSAVE)
827/* Call C interrupt dispatcher: */
828trapagain:
829	GET_TOCBASE(%r2)
830	addi	%r3,%r1,48
831	bl	CNAME(powerpc_interrupt)
832	nop
833
834	.globl	CNAME(trapexit)	/* backtrace code sentinel */
835CNAME(trapexit):
836/* Disable interrupts: */
837	mfmsr	%r3
838	andi.	%r3,%r3,~PSL_EE@l
839	mtmsr	%r3
840	isync
841/* Test AST pending: */
842	ld	%r5,FRAME_SRR1+48(%r1)
843	mtcr	%r5
844	bf	17,1f			/* branch if PSL_PR is false */
845
846	GET_CPUINFO(%r3)		/* get per-CPU pointer */
847	lwz	%r4, TD_FLAGS(%r13)	/* get thread flags value */
848	lis	%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
849	ori	%r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
850	and.	%r4,%r4,%r5
851	beq	1f
852	mfmsr	%r3			/* re-enable interrupts */
853	ori	%r3,%r3,PSL_EE@l
854	mtmsr	%r3
855	isync
856	GET_TOCBASE(%r2)
857	addi	%r3,%r1,48
858	bl	CNAME(ast)
859	nop
860	.globl	CNAME(asttrapexit)	/* backtrace code sentinel #2 */
861CNAME(asttrapexit):
862	b	trapexit		/* test ast ret value ? */
8631:
864	FRAME_LEAVE(PC_TEMPSAVE)
865	rfid
866
867#if defined(KDB)
868/*
869 * Deliberate entry to dbtrap
870 */
871ASENTRY_NOPROF(breakpoint)
872	mtsprg1	%r1
873	mfmsr	%r3
874	mtsrr1	%r3
875	andi.	%r3,%r3,~(PSL_EE|PSL_ME)@l
876	mtmsr	%r3			/* disable interrupts */
877	isync
878	GET_CPUINFO(%r3)
879	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
880	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
881	std	%r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
882	std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
883	std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
884	mflr	%r28
885	li	%r29,EXC_BPT
886	mtlr	%r29
887	mfcr	%r29
888	mtsrr0	%r28
889
890/*
891 * Now the kdb trap catching code.
892 */
893dbtrap:
894	/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
895	mflr	%r1
896	andi.	%r1,%r1,0xff00
897	mtsprg3	%r1
898
899	GET_TOCBASE(%r1)			/* get new SP */
900	ld	%r1,TOC_REF(tmpstk)(%r1)
901	addi	%r1,%r1,(TMPSTKSZ-48)
902
903	FRAME_SETUP(PC_DBSAVE)
904/* Call C trap code: */
905	GET_TOCBASE(%r2)
906	addi	%r3,%r1,48
907	bl	CNAME(db_trap_glue)
908	nop
909	or.	%r3,%r3,%r3
910	bne	dbleave
911/* This wasn't for KDB, so switch to real trap: */
912	ld	%r3,FRAME_EXC+48(%r1)	/* save exception */
913	GET_CPUINFO(%r4)
914	std	%r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
915	FRAME_LEAVE(PC_DBSAVE)
916	mtsprg1	%r1			/* prepare for entrance to realtrap */
917	GET_CPUINFO(%r1)
918	std	%r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
919	std	%r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
920	std	%r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
921	std	%r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
922	std	%r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
923	mflr	%r28
924	mfcr	%r29
925	ld	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
926	mtsprg3	%r31			/* SPRG3 was clobbered by FRAME_LEAVE */
927	mfsprg1	%r1
928	b	realtrap
929dbleave:
930	FRAME_LEAVE(PC_DBSAVE)
931	rfid
932
933/*
934 * In case of KDB we want a separate trap catcher for it
935 */
936	.globl	CNAME(dblow),CNAME(dbend)
937	.p2align 3
938CNAME(dblow):
939	mtsprg1	%r1			/* save SP */
940	mtsprg2	%r29			/* save r29 */
941	mfcr	%r29			/* save CR in r29 */
942	mfsrr1	%r1
943	mtcr	%r1
944	bf	17,1f			/* branch if privileged */
945
946	/* Unprivileged case */
947	mtcr	%r29			/* put the condition register back */
948        mfsprg2	%r29			/* ... and r29 */
949        mflr	%r1			/* save LR */
950	mtsprg2 %r1			/* And then in SPRG2 */
951
952	ld	%r1, TRAP_GENTRAP(0)	/* Get branch address */
953	mtlr	%r1
954	li	%r1, 0	 		/* How to get the vector from LR */
955	blrl				/* Branch to generictrap */
956
9571:
958	GET_CPUINFO(%r1)
959	std	%r27,(PC_DBSAVE+CPUSAVE_R27)(%r1)	/* free r27 */
960	std	%r28,(PC_DBSAVE+CPUSAVE_R28)(%r1)	/* free r28 */
961        mfsprg2	%r28				/* r29 holds cr...  */
962        std	%r28,(PC_DBSAVE+CPUSAVE_R29)(%r1)	/* free r29 */
963        std	%r30,(PC_DBSAVE+CPUSAVE_R30)(%r1)	/* free r30 */
964        std	%r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)	/* free r31 */
965        mflr	%r28					/* save LR */
966	nop						/* alignment */
967	bl	9f					/* Begin branch */
968	.llong	dbtrap
9699:	mflr	%r1
970	ld	%r1,0(%r1)
971	mtlr	%r1
972	blrl				/* Branch to generictrap */
973CNAME(dbend):
974#endif /* KDB */
975