xref: /linux/arch/powerpc/kernel/trace/ftrace_entry.S (revision db10cb9b)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Split from ftrace_64.S
4 */
5
6#include <linux/export.h>
7#include <linux/magic.h>
8#include <asm/ppc_asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
11#include <asm/ppc-opcode.h>
12#include <asm/thread_info.h>
13#include <asm/bug.h>
14#include <asm/ptrace.h>
15
16/*
17 *
18 * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
19 * when ftrace is active.
20 *
21 * We arrive here after a function A calls function B, and we are the trace
22 * function for B. When we enter r1 points to A's stack frame, B has not yet
23 * had a chance to allocate one yet.
24 *
25 * Additionally r2 may point either to the TOC for A, or B, depending on
26 * whether B did a TOC setup sequence before calling us.
27 *
28 * On entry the LR points back to the _mcount() call site, and r0 holds the
29 * saved LR as it was on entry to B, ie. the original return address at the
30 * call site in A.
31 *
32 * Our job is to save the register state into a struct pt_regs (on the stack)
33 * and then arrange for the ftrace function to be called.
34 */
35.macro	ftrace_regs_entry allregs
36	/* Create a minimal stack frame for representing B */
37	PPC_STLU	r1, -STACK_FRAME_MIN_SIZE(r1)
38
39	/* Create our stack frame + pt_regs */
40	PPC_STLU	r1,-SWITCH_FRAME_SIZE(r1)
41
42	/* Save all gprs to pt_regs */
43	SAVE_GPR(0, r1)
44	SAVE_GPRS(3, 10, r1)
45
46#ifdef CONFIG_PPC64
47	/* Save the original return address in A's stack frame */
48	std	r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
49	/* Ok to continue? */
50	lbz	r3, PACA_FTRACE_ENABLED(r13)
51	cmpdi	r3, 0
52	beq	ftrace_no_trace
53#endif
54
55	.if \allregs == 1
56	SAVE_GPR(2, r1)
57	SAVE_GPRS(11, 31, r1)
58	.else
59#ifdef CONFIG_LIVEPATCH_64
60	SAVE_GPR(14, r1)
61#endif
62	.endif
63
64	/* Save previous stack pointer (r1) */
65	addi	r8, r1, SWITCH_FRAME_SIZE
66	PPC_STL	r8, GPR1(r1)
67
68	.if \allregs == 1
69	/* Load special regs for save below */
70	mfmsr   r8
71	mfctr   r9
72	mfxer   r10
73	mfcr	r11
74	.else
75	/* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
76	li	r8, 0
77	.endif
78
79	/* Get the _mcount() call site out of LR */
80	mflr	r7
81	/* Save it as pt_regs->nip */
82	PPC_STL	r7, _NIP(r1)
83	/* Also save it in B's stackframe header for proper unwind */
84	PPC_STL	r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
85	/* Save the read LR in pt_regs->link */
86	PPC_STL	r0, _LINK(r1)
87
88#ifdef CONFIG_PPC64
89	/* Save callee's TOC in the ABI compliant location */
90	std	r2, STK_GOT(r1)
91	LOAD_PACA_TOC()		/* get kernel TOC in r2 */
92	LOAD_REG_ADDR(r3, function_trace_op)
93	ld	r5,0(r3)
94#else
95	lis	r3,function_trace_op@ha
96	lwz	r5,function_trace_op@l(r3)
97#endif
98
99#ifdef CONFIG_LIVEPATCH_64
100	mr	r14, r7		/* remember old NIP */
101#endif
102
103	/* Calculate ip from nip-4 into r3 for call below */
104	subi    r3, r7, MCOUNT_INSN_SIZE
105
106	/* Put the original return address in r4 as parent_ip */
107	mr	r4, r0
108
109	/* Save special regs */
110	PPC_STL	r8, _MSR(r1)
111	.if \allregs == 1
112	PPC_STL	r9, _CTR(r1)
113	PPC_STL	r10, _XER(r1)
114	PPC_STL	r11, _CCR(r1)
115	.endif
116
117	/* Load &pt_regs in r6 for call below */
118	addi    r6, r1, STACK_INT_FRAME_REGS
119.endm
120
121.macro	ftrace_regs_exit allregs
122	/* Load ctr with the possibly modified NIP */
123	PPC_LL	r3, _NIP(r1)
124	mtctr	r3
125
126#ifdef CONFIG_LIVEPATCH_64
127	cmpd	r14, r3		/* has NIP been altered? */
128#endif
129
130	/* Restore gprs */
131	.if \allregs == 1
132	REST_GPRS(2, 31, r1)
133	.else
134	REST_GPRS(3, 10, r1)
135#ifdef CONFIG_LIVEPATCH_64
136	REST_GPR(14, r1)
137#endif
138	.endif
139
140	/* Restore possibly modified LR */
141	PPC_LL	r0, _LINK(r1)
142	mtlr	r0
143
144#ifdef CONFIG_PPC64
145	/* Restore callee's TOC */
146	ld	r2, STK_GOT(r1)
147#endif
148
149	/* Pop our stack frame */
150	addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
151
152#ifdef CONFIG_LIVEPATCH_64
153        /* Based on the cmpd above, if the NIP was altered handle livepatch */
154	bne-	livepatch_handler
155#endif
156	bctr			/* jump after _mcount site */
157.endm
158
159_GLOBAL(ftrace_regs_caller)
160	ftrace_regs_entry 1
161	/* ftrace_call(r3, r4, r5, r6) */
162.globl ftrace_regs_call
163ftrace_regs_call:
164	bl	ftrace_stub
165	nop
166	ftrace_regs_exit 1
167
168_GLOBAL(ftrace_caller)
169	ftrace_regs_entry 0
170	/* ftrace_call(r3, r4, r5, r6) */
171.globl ftrace_call
172ftrace_call:
173	bl	ftrace_stub
174	nop
175	ftrace_regs_exit 0
176
177_GLOBAL(ftrace_stub)
178	blr
179
180#ifdef CONFIG_PPC64
181ftrace_no_trace:
182	mflr	r3
183	mtctr	r3
184	REST_GPR(3, r1)
185	addi	r1, r1, SWITCH_FRAME_SIZE
186	mtlr	r0
187	bctr
188#endif
189
190#ifdef CONFIG_LIVEPATCH_64
191	/*
192	 * This function runs in the mcount context, between two functions. As
193	 * such it can only clobber registers which are volatile and used in
194	 * function linkage.
195	 *
196	 * We get here when a function A, calls another function B, but B has
197	 * been live patched with a new function C.
198	 *
199	 * On entry:
200	 *  - we have no stack frame and can not allocate one
201	 *  - LR points back to the original caller (in A)
202	 *  - CTR holds the new NIP in C
203	 *  - r0, r11 & r12 are free
204	 */
205livepatch_handler:
206	ld	r12, PACA_THREAD_INFO(r13)
207
208	/* Allocate 3 x 8 bytes */
209	ld	r11, TI_livepatch_sp(r12)
210	addi	r11, r11, 24
211	std	r11, TI_livepatch_sp(r12)
212
213	/* Save toc & real LR on livepatch stack */
214	std	r2,  -24(r11)
215	mflr	r12
216	std	r12, -16(r11)
217
218	/* Store stack end marker */
219	lis     r12, STACK_END_MAGIC@h
220	ori     r12, r12, STACK_END_MAGIC@l
221	std	r12, -8(r11)
222
223	/* Put ctr in r12 for global entry and branch there */
224	mfctr	r12
225	bctrl
226
227	/*
228	 * Now we are returning from the patched function to the original
229	 * caller A. We are free to use r11, r12 and we can use r2 until we
230	 * restore it.
231	 */
232
233	ld	r12, PACA_THREAD_INFO(r13)
234
235	ld	r11, TI_livepatch_sp(r12)
236
237	/* Check stack marker hasn't been trashed */
238	lis     r2,  STACK_END_MAGIC@h
239	ori     r2,  r2, STACK_END_MAGIC@l
240	ld	r12, -8(r11)
2411:	tdne	r12, r2
242	EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
243
244	/* Restore LR & toc from livepatch stack */
245	ld	r12, -16(r11)
246	mtlr	r12
247	ld	r2,  -24(r11)
248
249	/* Pop livepatch stack frame */
250	ld	r12, PACA_THREAD_INFO(r13)
251	subi	r11, r11, 24
252	std	r11, TI_livepatch_sp(r12)
253
254	/* Return to original caller of live patched function */
255	blr
256#endif /* CONFIG_LIVEPATCH */
257
258#ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
259_GLOBAL(mcount)
260_GLOBAL(_mcount)
261EXPORT_SYMBOL(_mcount)
262	mflr	r12
263	mtctr	r12
264	mtlr	r0
265	bctr
266#endif
267
268#ifdef CONFIG_FUNCTION_GRAPH_TRACER
269_GLOBAL(return_to_handler)
270	/* need to save return values */
271#ifdef CONFIG_PPC64
272	std	r4,  -32(r1)
273	std	r3,  -24(r1)
274	/* save TOC */
275	std	r2,  -16(r1)
276	std	r31, -8(r1)
277	mr	r31, r1
278	stdu	r1, -112(r1)
279
280	/*
281	 * We might be called from a module.
282	 * Switch to our TOC to run inside the core kernel.
283	 */
284	LOAD_PACA_TOC()
285#else
286	stwu	r1, -16(r1)
287	stw	r3, 8(r1)
288	stw	r4, 12(r1)
289#endif
290
291	bl	ftrace_return_to_handler
292	nop
293
294	/* return value has real return address */
295	mtlr	r3
296
297#ifdef CONFIG_PPC64
298	ld	r1, 0(r1)
299	ld	r4,  -32(r1)
300	ld	r3,  -24(r1)
301	ld	r2,  -16(r1)
302	ld	r31, -8(r1)
303#else
304	lwz	r3, 8(r1)
305	lwz	r4, 12(r1)
306	addi	r1, r1, 16
307#endif
308
309	/* Jump back to real return address */
310	blr
311#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
312
313.pushsection ".tramp.ftrace.text","aw",@progbits;
314.globl ftrace_tramp_text
315ftrace_tramp_text:
316	.space 32
317.popsection
318
319.pushsection ".tramp.ftrace.init","aw",@progbits;
320.globl ftrace_tramp_init
321ftrace_tramp_init:
322	.space 32
323.popsection
324