xref: /linux/arch/loongarch/kernel/ftrace_dyn.c (revision d6fd48ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based on arch/arm64/kernel/ftrace.c
4  *
5  * Copyright (C) 2022 Loongson Technology Corporation Limited
6  */
7 
8 #include <linux/ftrace.h>
9 #include <linux/kprobes.h>
10 #include <linux/uaccess.h>
11 
12 #include <asm/inst.h>
13 #include <asm/module.h>
14 
15 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
16 {
17 	u32 replaced;
18 
19 	if (validate) {
20 		if (larch_insn_read((void *)pc, &replaced))
21 			return -EFAULT;
22 
23 		if (replaced != old)
24 			return -EINVAL;
25 	}
26 
27 	if (larch_insn_patch_text((void *)pc, new))
28 		return -EPERM;
29 
30 	return 0;
31 }
32 
33 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
34 
35 #ifdef CONFIG_MODULES
36 static inline int __get_mod(struct module **mod, unsigned long addr)
37 {
38 	preempt_disable();
39 	*mod = __module_text_address(addr);
40 	preempt_enable();
41 
42 	if (WARN_ON(!(*mod)))
43 		return -EINVAL;
44 
45 	return 0;
46 }
47 
48 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
49 {
50 	struct plt_entry *plt = mod->arch.ftrace_trampolines;
51 
52 	if (addr == FTRACE_ADDR)
53 		return &plt[FTRACE_PLT_IDX];
54 	if (addr == FTRACE_REGS_ADDR &&
55 			IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
56 		return &plt[FTRACE_REGS_PLT_IDX];
57 
58 	return NULL;
59 }
60 
61 static unsigned long get_plt_addr(struct module *mod, unsigned long addr)
62 {
63 	struct plt_entry *plt;
64 
65 	plt = get_ftrace_plt(mod, addr);
66 	if (!plt) {
67 		pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
68 		return -EINVAL;
69 	}
70 
71 	return (unsigned long)plt;
72 }
73 #endif
74 
75 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
76 {
77 	u32 old, new;
78 	unsigned long pc;
79 	long offset __maybe_unused;
80 
81 	pc = rec->ip + LOONGARCH_INSN_SIZE;
82 
83 #ifdef CONFIG_MODULES
84 	offset = (long)pc - (long)addr;
85 
86 	if (offset < -SZ_128M || offset >= SZ_128M) {
87 		int ret;
88 		struct module *mod;
89 
90 		ret = __get_mod(&mod, pc);
91 		if (ret)
92 			return ret;
93 
94 		addr = get_plt_addr(mod, addr);
95 
96 		old_addr = get_plt_addr(mod, old_addr);
97 	}
98 #endif
99 
100 	new = larch_insn_gen_bl(pc, addr);
101 	old = larch_insn_gen_bl(pc, old_addr);
102 
103 	return ftrace_modify_code(pc, old, new, true);
104 }
105 
106 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
107 
108 int ftrace_update_ftrace_func(ftrace_func_t func)
109 {
110 	u32 new;
111 	unsigned long pc;
112 
113 	pc = (unsigned long)&ftrace_call;
114 	new = larch_insn_gen_bl(pc, (unsigned long)func);
115 
116 	return ftrace_modify_code(pc, 0, new, false);
117 }
118 
119 /*
120  * The compiler has inserted 2 NOPs before the regular function prologue.
121  * T series registers are available and safe because of LoongArch's psABI.
122  *
123  * At runtime, we can replace nop with bl to enable ftrace call and replace bl
124  * with nop to disable ftrace call. The bl requires us to save the original RA
125  * value, so it saves RA at t0 here.
126  *
127  * Details are:
128  *
129  * | Compiled   |       Disabled         |        Enabled         |
130  * +------------+------------------------+------------------------+
131  * | nop        | move     t0, ra        | move     t0, ra        |
132  * | nop        | nop                    | bl       ftrace_caller |
133  * | func_body  | func_body              | func_body              |
134  *
135  * The RA value will be recovered by ftrace_regs_entry, and restored into RA
136  * before returning to the regular function prologue. When a function is not
137  * being traced, the "move t0, ra" is not harmful.
138  */
139 
140 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
141 {
142 	u32 old, new;
143 	unsigned long pc;
144 
145 	pc = rec->ip;
146 	old = larch_insn_gen_nop();
147 	new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
148 
149 	return ftrace_modify_code(pc, old, new, true);
150 }
151 
152 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
153 {
154 	u32 old, new;
155 	unsigned long pc;
156 	long offset __maybe_unused;
157 
158 	pc = rec->ip + LOONGARCH_INSN_SIZE;
159 
160 #ifdef CONFIG_MODULES
161 	offset = (long)pc - (long)addr;
162 
163 	if (offset < -SZ_128M || offset >= SZ_128M) {
164 		int ret;
165 		struct module *mod;
166 
167 		ret = __get_mod(&mod, pc);
168 		if (ret)
169 			return ret;
170 
171 		addr = get_plt_addr(mod, addr);
172 	}
173 #endif
174 
175 	old = larch_insn_gen_nop();
176 	new = larch_insn_gen_bl(pc, addr);
177 
178 	return ftrace_modify_code(pc, old, new, true);
179 }
180 
181 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
182 {
183 	u32 old, new;
184 	unsigned long pc;
185 	long offset __maybe_unused;
186 
187 	pc = rec->ip + LOONGARCH_INSN_SIZE;
188 
189 #ifdef CONFIG_MODULES
190 	offset = (long)pc - (long)addr;
191 
192 	if (offset < -SZ_128M || offset >= SZ_128M) {
193 		int ret;
194 		struct module *mod;
195 
196 		ret = __get_mod(&mod, pc);
197 		if (ret)
198 			return ret;
199 
200 		addr = get_plt_addr(mod, addr);
201 	}
202 #endif
203 
204 	new = larch_insn_gen_nop();
205 	old = larch_insn_gen_bl(pc, addr);
206 
207 	return ftrace_modify_code(pc, old, new, true);
208 }
209 
210 void arch_ftrace_update_code(int command)
211 {
212 	command |= FTRACE_MAY_SLEEP;
213 	ftrace_modify_all_code(command);
214 }
215 
216 int __init ftrace_dyn_arch_init(void)
217 {
218 	return 0;
219 }
220 
221 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
222 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
223 {
224 	unsigned long old;
225 	unsigned long return_hooker = (unsigned long)&return_to_handler;
226 
227 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
228 		return;
229 
230 	old = *parent;
231 
232 	if (!function_graph_enter(old, self_addr, 0, parent))
233 		*parent = return_hooker;
234 }
235 
236 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
237 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
238 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
239 {
240 	struct pt_regs *regs = &fregs->regs;
241 	unsigned long *parent = (unsigned long *)&regs->regs[1];
242 
243 	prepare_ftrace_return(ip, (unsigned long *)parent);
244 }
245 #else
246 static int ftrace_modify_graph_caller(bool enable)
247 {
248 	u32 branch, nop;
249 	unsigned long pc, func;
250 	extern void ftrace_graph_call(void);
251 
252 	pc = (unsigned long)&ftrace_graph_call;
253 	func = (unsigned long)&ftrace_graph_caller;
254 
255 	nop = larch_insn_gen_nop();
256 	branch = larch_insn_gen_b(pc, func);
257 
258 	if (enable)
259 		return ftrace_modify_code(pc, nop, branch, true);
260 	else
261 		return ftrace_modify_code(pc, branch, nop, true);
262 }
263 
264 int ftrace_enable_ftrace_graph_caller(void)
265 {
266 	return ftrace_modify_graph_caller(true);
267 }
268 
269 int ftrace_disable_ftrace_graph_caller(void)
270 {
271 	return ftrace_modify_graph_caller(false);
272 }
273 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
274 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
275 
276 #ifdef CONFIG_KPROBES_ON_FTRACE
277 /* Ftrace callback handler for kprobes -- called under preepmt disabled */
278 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
279 			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
280 {
281 	int bit;
282 	struct pt_regs *regs;
283 	struct kprobe *p;
284 	struct kprobe_ctlblk *kcb;
285 
286 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
287 	if (bit < 0)
288 		return;
289 
290 	p = get_kprobe((kprobe_opcode_t *)ip);
291 	if (unlikely(!p) || kprobe_disabled(p))
292 		goto out;
293 
294 	regs = ftrace_get_regs(fregs);
295 	if (!regs)
296 		goto out;
297 
298 	kcb = get_kprobe_ctlblk();
299 	if (kprobe_running()) {
300 		kprobes_inc_nmissed_count(p);
301 	} else {
302 		unsigned long orig_ip = instruction_pointer(regs);
303 
304 		instruction_pointer_set(regs, ip);
305 
306 		__this_cpu_write(current_kprobe, p);
307 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
308 		if (!p->pre_handler || !p->pre_handler(p, regs)) {
309 			/*
310 			 * Emulate singlestep (and also recover regs->csr_era)
311 			 * as if there is a nop
312 			 */
313 			instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
314 			if (unlikely(p->post_handler)) {
315 				kcb->kprobe_status = KPROBE_HIT_SSDONE;
316 				p->post_handler(p, regs, 0);
317 			}
318 			instruction_pointer_set(regs, orig_ip);
319 		}
320 
321 		/*
322 		 * If pre_handler returns !0, it changes regs->csr_era. We have to
323 		 * skip emulating post_handler.
324 		 */
325 		__this_cpu_write(current_kprobe, NULL);
326 	}
327 out:
328 	ftrace_test_recursion_unlock(bit);
329 }
330 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
331 
332 int arch_prepare_kprobe_ftrace(struct kprobe *p)
333 {
334 	p->ainsn.insn = NULL;
335 	return 0;
336 }
337 #endif /* CONFIG_KPROBES_ON_FTRACE */
338