xref: /linux/arch/powerpc/kernel/interrupt_64.S (revision c6fbb759)
1#include <asm/asm-offsets.h>
2#include <asm/bug.h>
3#ifdef CONFIG_PPC_BOOK3S
4#include <asm/exception-64s.h>
5#else
6#include <asm/exception-64e.h>
7#endif
8#include <asm/feature-fixups.h>
9#include <asm/head-64.h>
10#include <asm/hw_irq.h>
11#include <asm/kup.h>
12#include <asm/mmu.h>
13#include <asm/ppc_asm.h>
14#include <asm/ptrace.h>
15
16	.align 7
17
18.macro DEBUG_SRR_VALID srr
19#ifdef CONFIG_PPC_RFI_SRR_DEBUG
20	.ifc \srr,srr
21	mfspr	r11,SPRN_SRR0
22	ld	r12,_NIP(r1)
23	clrrdi  r11,r11,2
24	clrrdi  r12,r12,2
25100:	tdne	r11,r12
26	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
27	mfspr	r11,SPRN_SRR1
28	ld	r12,_MSR(r1)
29100:	tdne	r11,r12
30	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
31	.else
32	mfspr	r11,SPRN_HSRR0
33	ld	r12,_NIP(r1)
34	clrrdi  r11,r11,2
35	clrrdi  r12,r12,2
36100:	tdne	r11,r12
37	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
38	mfspr	r11,SPRN_HSRR1
39	ld	r12,_MSR(r1)
40100:	tdne	r11,r12
41	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
42	.endif
43#endif
44.endm
45
46#ifdef CONFIG_PPC_BOOK3S
47.macro system_call_vectored name trapnr
48	.globl system_call_vectored_\name
49system_call_vectored_\name:
50_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
51	SCV_INTERRUPT_TO_KERNEL
52	mr	r10,r1
53	ld	r1,PACAKSAVE(r13)
54	std	r10,0(r1)
55	std	r11,_NIP(r1)
56	std	r12,_MSR(r1)
57	std	r0,GPR0(r1)
58	std	r10,GPR1(r1)
59	std	r2,GPR2(r1)
60	LOAD_PACA_TOC()
61	mfcr	r12
62	li	r11,0
63	/* Save syscall parameters in r3-r8 */
64	SAVE_GPRS(3, 8, r1)
65	/* Zero r9-r12, this should only be required when restoring all GPRs */
66	std	r11,GPR9(r1)
67	std	r11,GPR10(r1)
68	std	r11,GPR11(r1)
69	std	r11,GPR12(r1)
70	std	r9,GPR13(r1)
71	SAVE_NVGPRS(r1)
72	std	r11,_XER(r1)
73	std	r11,_LINK(r1)
74	std	r11,_CTR(r1)
75
76	li	r11,\trapnr
77	std	r11,_TRAP(r1)
78	std	r12,_CCR(r1)
79	std	r3,ORIG_GPR3(r1)
80	/* Calling convention has r3 = regs, r4 = orig r0 */
81	addi	r3,r1,STACK_FRAME_OVERHEAD
82	mr	r4,r0
83	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
84	std	r11,-16(r3)		/* "regshere" marker */
85
86BEGIN_FTR_SECTION
87	HMT_MEDIUM
88END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
89
90	/*
91	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
92	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
93	 * and interrupts may be masked and pending already.
94	 * system_call_exception() will call trace_hardirqs_off() which means
95	 * interrupts could already have been blocked before trace_hardirqs_off,
96	 * but this is the best we can do.
97	 */
98
99	bl	system_call_exception
100
101.Lsyscall_vectored_\name\()_exit:
102	addi	r4,r1,STACK_FRAME_OVERHEAD
103	li	r5,1 /* scv */
104	bl	syscall_exit_prepare
105	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
106.Lsyscall_vectored_\name\()_rst_start:
107	lbz	r11,PACAIRQHAPPENED(r13)
108	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
109	bne-	syscall_vectored_\name\()_restart
110	li	r11,IRQS_ENABLED
111	stb	r11,PACAIRQSOFTMASK(r13)
112	li	r11,0
113	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
114
115	ld	r2,_CCR(r1)
116	ld	r4,_NIP(r1)
117	ld	r5,_MSR(r1)
118
119BEGIN_FTR_SECTION
120	stdcx.	r0,0,r1			/* to clear the reservation */
121END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
122
123BEGIN_FTR_SECTION
124	HMT_MEDIUM_LOW
125END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
126
127	cmpdi	r3,0
128	bne	.Lsyscall_vectored_\name\()_restore_regs
129
130	/* rfscv returns with LR->NIA and CTR->MSR */
131	mtlr	r4
132	mtctr	r5
133
134	/* Could zero these as per ABI, but we may consider a stricter ABI
135	 * which preserves these if libc implementations can benefit, so
136	 * restore them for now until further measurement is done. */
137	REST_GPR(0, r1)
138	REST_GPRS(4, 8, r1)
139	/* Zero volatile regs that may contain sensitive kernel data */
140	ZEROIZE_GPRS(9, 12)
141	mtspr	SPRN_XER,r0
142
143	/*
144	 * We don't need to restore AMR on the way back to userspace for KUAP.
145	 * The value of AMR only matters while we're in the kernel.
146	 */
147	mtcr	r2
148	REST_GPRS(2, 3, r1)
149	REST_GPR(13, r1)
150	REST_GPR(1, r1)
151	RFSCV_TO_USER
152	b	.	/* prevent speculative execution */
153
154.Lsyscall_vectored_\name\()_restore_regs:
155	mtspr	SPRN_SRR0,r4
156	mtspr	SPRN_SRR1,r5
157
158	ld	r3,_CTR(r1)
159	ld	r4,_LINK(r1)
160	ld	r5,_XER(r1)
161
162	REST_NVGPRS(r1)
163	REST_GPR(0, r1)
164	mtcr	r2
165	mtctr	r3
166	mtlr	r4
167	mtspr	SPRN_XER,r5
168	REST_GPRS(2, 13, r1)
169	REST_GPR(1, r1)
170	RFI_TO_USER
171.Lsyscall_vectored_\name\()_rst_end:
172
173syscall_vectored_\name\()_restart:
174_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
175	GET_PACA(r13)
176	ld	r1,PACA_EXIT_SAVE_R1(r13)
177	LOAD_PACA_TOC()
178	ld	r3,RESULT(r1)
179	addi	r4,r1,STACK_FRAME_OVERHEAD
180	li	r11,IRQS_ALL_DISABLED
181	stb	r11,PACAIRQSOFTMASK(r13)
182	bl	syscall_exit_restart
183	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
184	b	.Lsyscall_vectored_\name\()_rst_start
1851:
186
187SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
188RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
189
190.endm
191
192system_call_vectored common 0x3000
193
194/*
195 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
196 * which is tested by system_call_exception when r0 is -1 (as set by vector
197 * entry code).
198 */
199system_call_vectored sigill 0x7ff0
200
201#endif /* CONFIG_PPC_BOOK3S */
202
203	.balign IFETCH_ALIGN_BYTES
204	.globl system_call_common_real
205system_call_common_real:
206_ASM_NOKPROBE_SYMBOL(system_call_common_real)
207	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
208	mtmsrd	r10
209
210	.balign IFETCH_ALIGN_BYTES
211	.globl system_call_common
212system_call_common:
213_ASM_NOKPROBE_SYMBOL(system_call_common)
214	mr	r10,r1
215	ld	r1,PACAKSAVE(r13)
216	std	r10,0(r1)
217	std	r11,_NIP(r1)
218	std	r12,_MSR(r1)
219	std	r0,GPR0(r1)
220	std	r10,GPR1(r1)
221	std	r2,GPR2(r1)
222#ifdef CONFIG_PPC_E500
223START_BTB_FLUSH_SECTION
224	BTB_FLUSH(r10)
225END_BTB_FLUSH_SECTION
226#endif
227	LOAD_PACA_TOC()
228	mfcr	r12
229	li	r11,0
230	/* Save syscall parameters in r3-r8 */
231	SAVE_GPRS(3, 8, r1)
232	/* Zero r9-r12, this should only be required when restoring all GPRs */
233	std	r11,GPR9(r1)
234	std	r11,GPR10(r1)
235	std	r11,GPR11(r1)
236	std	r11,GPR12(r1)
237	std	r9,GPR13(r1)
238	SAVE_NVGPRS(r1)
239	std	r11,_XER(r1)
240	std	r11,_CTR(r1)
241	mflr	r10
242
243	/*
244	 * This clears CR0.SO (bit 28), which is the error indication on
245	 * return from this system call.
246	 */
247	rldimi	r12,r11,28,(63-28)
248	li	r11,0xc00
249	std	r10,_LINK(r1)
250	std	r11,_TRAP(r1)
251	std	r12,_CCR(r1)
252	std	r3,ORIG_GPR3(r1)
253	/* Calling convention has r3 = regs, r4 = orig r0 */
254	addi	r3,r1,STACK_FRAME_OVERHEAD
255	mr	r4,r0
256	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
257	std	r11,-16(r3)		/* "regshere" marker */
258
259#ifdef CONFIG_PPC_BOOK3S
260	li	r11,1
261	stb	r11,PACASRR_VALID(r13)
262#endif
263
264	/*
265	 * We always enter kernel from userspace with irq soft-mask enabled and
266	 * nothing pending. system_call_exception() will call
267	 * trace_hardirqs_off().
268	 */
269	li	r11,IRQS_ALL_DISABLED
270	stb	r11,PACAIRQSOFTMASK(r13)
271#ifdef CONFIG_PPC_BOOK3S
272	li	r12,-1 /* Set MSR_EE and MSR_RI */
273	mtmsrd	r12,1
274#else
275	wrteei	1
276#endif
277
278	bl	system_call_exception
279
280.Lsyscall_exit:
281	addi	r4,r1,STACK_FRAME_OVERHEAD
282	li	r5,0 /* !scv */
283	bl	syscall_exit_prepare
284	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
285#ifdef CONFIG_PPC_BOOK3S
286.Lsyscall_rst_start:
287	lbz	r11,PACAIRQHAPPENED(r13)
288	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
289	bne-	syscall_restart
290#endif
291	li	r11,IRQS_ENABLED
292	stb	r11,PACAIRQSOFTMASK(r13)
293	li	r11,0
294	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
295
296	ld	r2,_CCR(r1)
297	ld	r6,_LINK(r1)
298	mtlr	r6
299
300#ifdef CONFIG_PPC_BOOK3S
301	lbz	r4,PACASRR_VALID(r13)
302	cmpdi	r4,0
303	bne	1f
304	li	r4,0
305	stb	r4,PACASRR_VALID(r13)
306#endif
307	ld	r4,_NIP(r1)
308	ld	r5,_MSR(r1)
309	mtspr	SPRN_SRR0,r4
310	mtspr	SPRN_SRR1,r5
3111:
312	DEBUG_SRR_VALID srr
313
314BEGIN_FTR_SECTION
315	stdcx.	r0,0,r1			/* to clear the reservation */
316END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
317
318	cmpdi	r3,0
319	bne	.Lsyscall_restore_regs
320	/* Zero volatile regs that may contain sensitive kernel data */
321	ZEROIZE_GPR(0)
322	ZEROIZE_GPRS(4, 12)
323	mtctr	r0
324	mtspr	SPRN_XER,r0
325.Lsyscall_restore_regs_cont:
326
327BEGIN_FTR_SECTION
328	HMT_MEDIUM_LOW
329END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
330
331	/*
332	 * We don't need to restore AMR on the way back to userspace for KUAP.
333	 * The value of AMR only matters while we're in the kernel.
334	 */
335	mtcr	r2
336	REST_GPRS(2, 3, r1)
337	REST_GPR(13, r1)
338	REST_GPR(1, r1)
339	RFI_TO_USER
340	b	.	/* prevent speculative execution */
341
342.Lsyscall_restore_regs:
343	ld	r3,_CTR(r1)
344	ld	r4,_XER(r1)
345	REST_NVGPRS(r1)
346	mtctr	r3
347	mtspr	SPRN_XER,r4
348	REST_GPR(0, r1)
349	REST_GPRS(4, 12, r1)
350	b	.Lsyscall_restore_regs_cont
351.Lsyscall_rst_end:
352
353#ifdef CONFIG_PPC_BOOK3S
354syscall_restart:
355_ASM_NOKPROBE_SYMBOL(syscall_restart)
356	GET_PACA(r13)
357	ld	r1,PACA_EXIT_SAVE_R1(r13)
358	LOAD_PACA_TOC()
359	ld	r3,RESULT(r1)
360	addi	r4,r1,STACK_FRAME_OVERHEAD
361	li	r11,IRQS_ALL_DISABLED
362	stb	r11,PACAIRQSOFTMASK(r13)
363	bl	syscall_exit_restart
364	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
365	b	.Lsyscall_rst_start
3661:
367
368SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
369RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
370#endif
371
372	/*
373	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
374	 * touched, no exit work created, then this can be used.
375	 */
376	.balign IFETCH_ALIGN_BYTES
377	.globl fast_interrupt_return_srr
378fast_interrupt_return_srr:
379_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
380	kuap_check_amr r3, r4
381	ld	r5,_MSR(r1)
382	andi.	r0,r5,MSR_PR
383#ifdef CONFIG_PPC_BOOK3S
384	beq	1f
385	kuap_user_restore r3, r4
386	b	.Lfast_user_interrupt_return_srr
3871:	kuap_kernel_restore r3, r4
388	andi.	r0,r5,MSR_RI
389	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
390	bne+	.Lfast_kernel_interrupt_return_srr
391	addi	r3,r1,STACK_FRAME_OVERHEAD
392	bl	unrecoverable_exception
393	b	. /* should not get here */
394#else
395	bne	.Lfast_user_interrupt_return_srr
396	b	.Lfast_kernel_interrupt_return_srr
397#endif
398
399.macro interrupt_return_macro srr
400	.balign IFETCH_ALIGN_BYTES
401	.globl interrupt_return_\srr
402interrupt_return_\srr\():
403_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
404	ld	r4,_MSR(r1)
405	andi.	r0,r4,MSR_PR
406	beq	interrupt_return_\srr\()_kernel
407interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
408_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
409	addi	r3,r1,STACK_FRAME_OVERHEAD
410	bl	interrupt_exit_user_prepare
411	cmpdi	r3,0
412	bne-	.Lrestore_nvgprs_\srr
413.Lrestore_nvgprs_\srr\()_cont:
414	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
415#ifdef CONFIG_PPC_BOOK3S
416.Linterrupt_return_\srr\()_user_rst_start:
417	lbz	r11,PACAIRQHAPPENED(r13)
418	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
419	bne-	interrupt_return_\srr\()_user_restart
420#endif
421	li	r11,IRQS_ENABLED
422	stb	r11,PACAIRQSOFTMASK(r13)
423	li	r11,0
424	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
425
426.Lfast_user_interrupt_return_\srr\():
427#ifdef CONFIG_PPC_BOOK3S
428	.ifc \srr,srr
429	lbz	r4,PACASRR_VALID(r13)
430	.else
431	lbz	r4,PACAHSRR_VALID(r13)
432	.endif
433	cmpdi	r4,0
434	li	r4,0
435	bne	1f
436#endif
437	ld	r11,_NIP(r1)
438	ld	r12,_MSR(r1)
439	.ifc \srr,srr
440	mtspr	SPRN_SRR0,r11
441	mtspr	SPRN_SRR1,r12
4421:
443#ifdef CONFIG_PPC_BOOK3S
444	stb	r4,PACASRR_VALID(r13)
445#endif
446	.else
447	mtspr	SPRN_HSRR0,r11
448	mtspr	SPRN_HSRR1,r12
4491:
450#ifdef CONFIG_PPC_BOOK3S
451	stb	r4,PACAHSRR_VALID(r13)
452#endif
453	.endif
454	DEBUG_SRR_VALID \srr
455
456#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
457	lbz	r4,PACAIRQSOFTMASK(r13)
458	tdnei	r4,IRQS_ENABLED
459#endif
460
461BEGIN_FTR_SECTION
462	ld	r10,_PPR(r1)
463	mtspr	SPRN_PPR,r10
464END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
465
466BEGIN_FTR_SECTION
467	stdcx.	r0,0,r1		/* to clear the reservation */
468FTR_SECTION_ELSE
469	ldarx	r0,0,r1
470ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
471
472	ld	r3,_CCR(r1)
473	ld	r4,_LINK(r1)
474	ld	r5,_CTR(r1)
475	ld	r6,_XER(r1)
476	li	r0,0
477
478	REST_GPRS(7, 13, r1)
479
480	mtcr	r3
481	mtlr	r4
482	mtctr	r5
483	mtspr	SPRN_XER,r6
484
485	REST_GPRS(2, 6, r1)
486	REST_GPR(0, r1)
487	REST_GPR(1, r1)
488	.ifc \srr,srr
489	RFI_TO_USER
490	.else
491	HRFI_TO_USER
492	.endif
493	b	.	/* prevent speculative execution */
494.Linterrupt_return_\srr\()_user_rst_end:
495
496.Lrestore_nvgprs_\srr\():
497	REST_NVGPRS(r1)
498	b	.Lrestore_nvgprs_\srr\()_cont
499
500#ifdef CONFIG_PPC_BOOK3S
501interrupt_return_\srr\()_user_restart:
502_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
503	GET_PACA(r13)
504	ld	r1,PACA_EXIT_SAVE_R1(r13)
505	LOAD_PACA_TOC()
506	addi	r3,r1,STACK_FRAME_OVERHEAD
507	li	r11,IRQS_ALL_DISABLED
508	stb	r11,PACAIRQSOFTMASK(r13)
509	bl	interrupt_exit_user_restart
510	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
511	b	.Linterrupt_return_\srr\()_user_rst_start
5121:
513
514SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
515RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
516#endif
517
518	.balign IFETCH_ALIGN_BYTES
519interrupt_return_\srr\()_kernel:
520_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
521	addi	r3,r1,STACK_FRAME_OVERHEAD
522	bl	interrupt_exit_kernel_prepare
523
524	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
525.Linterrupt_return_\srr\()_kernel_rst_start:
526	ld	r11,SOFTE(r1)
527	cmpwi	r11,IRQS_ENABLED
528	stb	r11,PACAIRQSOFTMASK(r13)
529	beq	.Linterrupt_return_\srr\()_soft_enabled
530
531	/*
532	 * Returning to soft-disabled context.
533	 * Check if a MUST_HARD_MASK interrupt has become pending, in which
534	 * case we need to disable MSR[EE] in the return context.
535	 */
536	ld	r12,_MSR(r1)
537	andi.	r10,r12,MSR_EE
538	beq	.Lfast_kernel_interrupt_return_\srr\() // EE already disabled
539	lbz	r11,PACAIRQHAPPENED(r13)
540	andi.	r10,r11,PACA_IRQ_MUST_HARD_MASK
541	beq	.Lfast_kernel_interrupt_return_\srr\() // No HARD_MASK pending
542
543	/* Must clear MSR_EE from _MSR */
544#ifdef CONFIG_PPC_BOOK3S
545	li	r10,0
546	/* Clear valid before changing _MSR */
547	.ifc \srr,srr
548	stb	r10,PACASRR_VALID(r13)
549	.else
550	stb	r10,PACAHSRR_VALID(r13)
551	.endif
552#endif
553	xori	r12,r12,MSR_EE
554	std	r12,_MSR(r1)
555	b	.Lfast_kernel_interrupt_return_\srr\()
556
557.Linterrupt_return_\srr\()_soft_enabled:
558	/*
559	 * In the soft-enabled case, need to double-check that we have no
560	 * pending interrupts that might have come in before we reached the
561	 * restart section of code, and restart the exit so those can be
562	 * handled.
563	 *
564	 * If there are none, it is be possible that the interrupt still
565	 * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
566	 * interrupted context. This clear will not clobber a new pending
567	 * interrupt coming in, because we're in the restart section, so
568	 * such would return to the restart location.
569	 */
570#ifdef CONFIG_PPC_BOOK3S
571	lbz	r11,PACAIRQHAPPENED(r13)
572	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
573	bne-	interrupt_return_\srr\()_kernel_restart
574#endif
575	li	r11,0
576	stb	r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
577
578.Lfast_kernel_interrupt_return_\srr\():
579	cmpdi	cr1,r3,0
580#ifdef CONFIG_PPC_BOOK3S
581	.ifc \srr,srr
582	lbz	r4,PACASRR_VALID(r13)
583	.else
584	lbz	r4,PACAHSRR_VALID(r13)
585	.endif
586	cmpdi	r4,0
587	li	r4,0
588	bne	1f
589#endif
590	ld	r11,_NIP(r1)
591	ld	r12,_MSR(r1)
592	.ifc \srr,srr
593	mtspr	SPRN_SRR0,r11
594	mtspr	SPRN_SRR1,r12
5951:
596#ifdef CONFIG_PPC_BOOK3S
597	stb	r4,PACASRR_VALID(r13)
598#endif
599	.else
600	mtspr	SPRN_HSRR0,r11
601	mtspr	SPRN_HSRR1,r12
6021:
603#ifdef CONFIG_PPC_BOOK3S
604	stb	r4,PACAHSRR_VALID(r13)
605#endif
606	.endif
607	DEBUG_SRR_VALID \srr
608
609BEGIN_FTR_SECTION
610	stdcx.	r0,0,r1		/* to clear the reservation */
611FTR_SECTION_ELSE
612	ldarx	r0,0,r1
613ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
614
615	ld	r3,_LINK(r1)
616	ld	r4,_CTR(r1)
617	ld	r5,_XER(r1)
618	ld	r6,_CCR(r1)
619	li	r0,0
620
621	REST_GPRS(7, 12, r1)
622
623	mtlr	r3
624	mtctr	r4
625	mtspr	SPRN_XER,r5
626
627	/*
628	 * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
629	 * the reliable stack unwinder later on. Clear it.
630	 */
631	std	r0,STACK_FRAME_OVERHEAD-16(r1)
632
633	REST_GPRS(2, 5, r1)
634
635	bne-	cr1,1f /* emulate stack store */
636	mtcr	r6
637	REST_GPR(6, r1)
638	REST_GPR(0, r1)
639	REST_GPR(1, r1)
640	.ifc \srr,srr
641	RFI_TO_KERNEL
642	.else
643	HRFI_TO_KERNEL
644	.endif
645	b	.	/* prevent speculative execution */
646
6471:	/*
648	 * Emulate stack store with update. New r1 value was already calculated
649	 * and updated in our interrupt regs by emulate_loadstore, but we can't
650	 * store the previous value of r1 to the stack before re-loading our
651	 * registers from it, otherwise they could be clobbered.  Use
652	 * PACA_EXGEN as temporary storage to hold the store data, as
653	 * interrupts are disabled here so it won't be clobbered.
654	 */
655	mtcr	r6
656	std	r9,PACA_EXGEN+0(r13)
657	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
658	REST_GPR(6, r1)
659	REST_GPR(0, r1)
660	REST_GPR(1, r1)
661	std	r9,0(r1) /* perform store component of stdu */
662	ld	r9,PACA_EXGEN+0(r13)
663
664	.ifc \srr,srr
665	RFI_TO_KERNEL
666	.else
667	HRFI_TO_KERNEL
668	.endif
669	b	.	/* prevent speculative execution */
670.Linterrupt_return_\srr\()_kernel_rst_end:
671
672#ifdef CONFIG_PPC_BOOK3S
673interrupt_return_\srr\()_kernel_restart:
674_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
675	GET_PACA(r13)
676	ld	r1,PACA_EXIT_SAVE_R1(r13)
677	LOAD_PACA_TOC()
678	addi	r3,r1,STACK_FRAME_OVERHEAD
679	li	r11,IRQS_ALL_DISABLED
680	stb	r11,PACAIRQSOFTMASK(r13)
681	bl	interrupt_exit_kernel_restart
682	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
683	b	.Linterrupt_return_\srr\()_kernel_rst_start
6841:
685
686SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
687RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
688#endif
689
690.endm
691
692interrupt_return_macro srr
693#ifdef CONFIG_PPC_BOOK3S
694interrupt_return_macro hsrr
695
696	.globl __end_soft_masked
697__end_soft_masked:
698DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
699#endif /* CONFIG_PPC_BOOK3S */
700
701#ifdef CONFIG_PPC_BOOK3S
702_GLOBAL(ret_from_fork_scv)
703	bl	schedule_tail
704	REST_NVGPRS(r1)
705	li	r3,0	/* fork() return value */
706	b	.Lsyscall_vectored_common_exit
707#endif
708
709_GLOBAL(ret_from_fork)
710	bl	schedule_tail
711	REST_NVGPRS(r1)
712	li	r3,0	/* fork() return value */
713	b	.Lsyscall_exit
714
715_GLOBAL(ret_from_kernel_thread)
716	bl	schedule_tail
717	REST_NVGPRS(r1)
718	mtctr	r14
719	mr	r3,r15
720#ifdef CONFIG_PPC64_ELF_ABI_V2
721	mr	r12,r14
722#endif
723	bctrl
724	li	r3,0
725	b	.Lsyscall_exit
726