1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Kernel Probes (KProbes)
4 *
5 * Copyright (C) IBM Corporation, 2002, 2004
6 *
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation ( includes contributions from
9 * Rusty Russell).
10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11 * interface to access function arguments.
12 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
13 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
14 * 2005-Mar Roland McGrath <roland@redhat.com>
15 * Fixed to handle %rip-relative addressing mode correctly.
16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
18 * <prasanna@in.ibm.com> added function-return probes.
19 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
20 * Added function return probes functionality
21 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
22 * kprobe-booster and kretprobe-booster for i386.
23 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
24 * and kretprobe-booster for x86-64
25 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
26 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
27 * unified x86 kprobes code.
28 */
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/string.h>
32 #include <linux/slab.h>
33 #include <linux/hardirq.h>
34 #include <linux/preempt.h>
35 #include <linux/sched/debug.h>
36 #include <linux/perf_event.h>
37 #include <linux/extable.h>
38 #include <linux/kdebug.h>
39 #include <linux/kallsyms.h>
40 #include <linux/ftrace.h>
41 #include <linux/kasan.h>
42 #include <linux/moduleloader.h>
43 #include <linux/objtool.h>
44 #include <linux/vmalloc.h>
45 #include <linux/pgtable.h>
46
47 #include <asm/text-patching.h>
48 #include <asm/cacheflush.h>
49 #include <asm/desc.h>
50 #include <linux/uaccess.h>
51 #include <asm/alternative.h>
52 #include <asm/insn.h>
53 #include <asm/debugreg.h>
54 #include <asm/set_memory.h>
55
56 #include "common.h"
57
58 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
59 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
60
61 #define stack_addr(regs) ((unsigned long *)regs->sp)
62
63 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
64 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
65 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
66 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
67 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
68 << (row % 32))
69 /*
70 * Undefined/reserved opcodes, conditional jump, Opcode Extension
71 * Groups, and some special opcodes can not boost.
72 * This is non-const and volatile to keep gcc from statically
73 * optimizing it out, as variable_test_bit makes gcc think only
74 * *(unsigned long*) is used.
75 */
76 static volatile u32 twobyte_is_boostable[256 / 32] = {
77 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
78 /* ---------------------------------------------- */
79 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
80 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
81 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
82 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
83 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
84 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
85 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
86 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
87 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
88 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
89 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
90 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
91 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
92 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
93 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
94 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
95 /* ----------------------------------------------- */
96 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
97 };
98 #undef W
99
100 struct kretprobe_blackpoint kretprobe_blacklist[] = {
101 {"__switch_to", }, /* This function switches only current task, but
102 doesn't switch kernel stack.*/
103 {NULL, NULL} /* Terminator */
104 };
105
106 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
107
108 static nokprobe_inline void
__synthesize_relative_insn(void * dest,void * from,void * to,u8 op)109 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
110 {
111 struct __arch_relative_insn {
112 u8 op;
113 s32 raddr;
114 } __packed *insn;
115
116 insn = (struct __arch_relative_insn *)dest;
117 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
118 insn->op = op;
119 }
120
121 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
synthesize_reljump(void * dest,void * from,void * to)122 void synthesize_reljump(void *dest, void *from, void *to)
123 {
124 __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
125 }
126 NOKPROBE_SYMBOL(synthesize_reljump);
127
128 /* Insert a call instruction at address 'from', which calls address 'to'.*/
synthesize_relcall(void * dest,void * from,void * to)129 void synthesize_relcall(void *dest, void *from, void *to)
130 {
131 __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
132 }
133 NOKPROBE_SYMBOL(synthesize_relcall);
134
135 /*
136 * Returns non-zero if INSN is boostable.
137 * RIP relative instructions are adjusted at copying time in 64 bits mode
138 */
can_boost(struct insn * insn,void * addr)139 int can_boost(struct insn *insn, void *addr)
140 {
141 kprobe_opcode_t opcode;
142 insn_byte_t prefix;
143 int i;
144
145 if (search_exception_tables((unsigned long)addr))
146 return 0; /* Page fault may occur on this address. */
147
148 /* 2nd-byte opcode */
149 if (insn->opcode.nbytes == 2)
150 return test_bit(insn->opcode.bytes[1],
151 (unsigned long *)twobyte_is_boostable);
152
153 if (insn->opcode.nbytes != 1)
154 return 0;
155
156 for_each_insn_prefix(insn, i, prefix) {
157 insn_attr_t attr;
158
159 attr = inat_get_opcode_attribute(prefix);
160 /* Can't boost Address-size override prefix and CS override prefix */
161 if (prefix == 0x2e || inat_is_address_size_prefix(attr))
162 return 0;
163 }
164
165 opcode = insn->opcode.bytes[0];
166
167 switch (opcode) {
168 case 0x62: /* bound */
169 case 0x70 ... 0x7f: /* Conditional jumps */
170 case 0x9a: /* Call far */
171 case 0xc0 ... 0xc1: /* Grp2 */
172 case 0xcc ... 0xce: /* software exceptions */
173 case 0xd0 ... 0xd3: /* Grp2 */
174 case 0xd6: /* (UD) */
175 case 0xd8 ... 0xdf: /* ESC */
176 case 0xe0 ... 0xe3: /* LOOP*, JCXZ */
177 case 0xe8 ... 0xe9: /* near Call, JMP */
178 case 0xeb: /* Short JMP */
179 case 0xf0 ... 0xf4: /* LOCK/REP, HLT */
180 case 0xf6 ... 0xf7: /* Grp3 */
181 case 0xfe: /* Grp4 */
182 /* ... are not boostable */
183 return 0;
184 case 0xff: /* Grp5 */
185 /* Only indirect jmp is boostable */
186 return X86_MODRM_REG(insn->modrm.bytes[0]) == 4;
187 default:
188 return 1;
189 }
190 }
191
192 static unsigned long
__recover_probed_insn(kprobe_opcode_t * buf,unsigned long addr)193 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
194 {
195 struct kprobe *kp;
196 unsigned long faddr;
197
198 kp = get_kprobe((void *)addr);
199 faddr = ftrace_location(addr);
200 /*
201 * Addresses inside the ftrace location are refused by
202 * arch_check_ftrace_location(). Something went terribly wrong
203 * if such an address is checked here.
204 */
205 if (WARN_ON(faddr && faddr != addr))
206 return 0UL;
207 /*
208 * Use the current code if it is not modified by Kprobe
209 * and it cannot be modified by ftrace.
210 */
211 if (!kp && !faddr)
212 return addr;
213
214 /*
215 * Basically, kp->ainsn.insn has an original instruction.
216 * However, RIP-relative instruction can not do single-stepping
217 * at different place, __copy_instruction() tweaks the displacement of
218 * that instruction. In that case, we can't recover the instruction
219 * from the kp->ainsn.insn.
220 *
221 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
222 * of the first byte of the probed instruction, which is overwritten
223 * by int3. And the instruction at kp->addr is not modified by kprobes
224 * except for the first byte, we can recover the original instruction
225 * from it and kp->opcode.
226 *
227 * In case of Kprobes using ftrace, we do not have a copy of
228 * the original instruction. In fact, the ftrace location might
229 * be modified at anytime and even could be in an inconsistent state.
230 * Fortunately, we know that the original code is the ideal 5-byte
231 * long NOP.
232 */
233 if (copy_from_kernel_nofault(buf, (void *)addr,
234 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
235 return 0UL;
236
237 if (faddr)
238 memcpy(buf, x86_nops[5], 5);
239 else
240 buf[0] = kp->opcode;
241 return (unsigned long)buf;
242 }
243
244 /*
245 * Recover the probed instruction at addr for further analysis.
246 * Caller must lock kprobes by kprobe_mutex, or disable preemption
247 * for preventing to release referencing kprobes.
248 * Returns zero if the instruction can not get recovered (or access failed).
249 */
recover_probed_instruction(kprobe_opcode_t * buf,unsigned long addr)250 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
251 {
252 unsigned long __addr;
253
254 __addr = __recover_optprobed_insn(buf, addr);
255 if (__addr != addr)
256 return __addr;
257
258 return __recover_probed_insn(buf, addr);
259 }
260
261 /* Check if paddr is at an instruction boundary */
can_probe(unsigned long paddr)262 static int can_probe(unsigned long paddr)
263 {
264 unsigned long addr, __addr, offset = 0;
265 struct insn insn;
266 kprobe_opcode_t buf[MAX_INSN_SIZE];
267
268 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
269 return 0;
270
271 /* Decode instructions */
272 addr = paddr - offset;
273 while (addr < paddr) {
274 int ret;
275
276 /*
277 * Check if the instruction has been modified by another
278 * kprobe, in which case we replace the breakpoint by the
279 * original instruction in our buffer.
280 * Also, jump optimization will change the breakpoint to
281 * relative-jump. Since the relative-jump itself is
282 * normally used, we just go through if there is no kprobe.
283 */
284 __addr = recover_probed_instruction(buf, addr);
285 if (!__addr)
286 return 0;
287
288 ret = insn_decode_kernel(&insn, (void *)__addr);
289 if (ret < 0)
290 return 0;
291
292 /*
293 * Another debugging subsystem might insert this breakpoint.
294 * In that case, we can't recover it.
295 */
296 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
297 return 0;
298 addr += insn.length;
299 }
300
301 return (addr == paddr);
302 }
303
304 /*
305 * Copy an instruction with recovering modified instruction by kprobes
306 * and adjust the displacement if the instruction uses the %rip-relative
307 * addressing mode. Note that since @real will be the final place of copied
308 * instruction, displacement must be adjust by @real, not @dest.
309 * This returns the length of copied instruction, or 0 if it has an error.
310 */
__copy_instruction(u8 * dest,u8 * src,u8 * real,struct insn * insn)311 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
312 {
313 kprobe_opcode_t buf[MAX_INSN_SIZE];
314 unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src);
315 int ret;
316
317 if (!recovered_insn || !insn)
318 return 0;
319
320 /* This can access kernel text if given address is not recovered */
321 if (copy_from_kernel_nofault(dest, (void *)recovered_insn,
322 MAX_INSN_SIZE))
323 return 0;
324
325 ret = insn_decode_kernel(insn, dest);
326 if (ret < 0)
327 return 0;
328
329 /* We can not probe force emulate prefixed instruction */
330 if (insn_has_emulate_prefix(insn))
331 return 0;
332
333 /* Another subsystem puts a breakpoint, failed to recover */
334 if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
335 return 0;
336
337 /* We should not singlestep on the exception masking instructions */
338 if (insn_masking_exception(insn))
339 return 0;
340
341 #ifdef CONFIG_X86_64
342 /* Only x86_64 has RIP relative instructions */
343 if (insn_rip_relative(insn)) {
344 s64 newdisp;
345 u8 *disp;
346 /*
347 * The copied instruction uses the %rip-relative addressing
348 * mode. Adjust the displacement for the difference between
349 * the original location of this instruction and the location
350 * of the copy that will actually be run. The tricky bit here
351 * is making sure that the sign extension happens correctly in
352 * this calculation, since we need a signed 32-bit result to
353 * be sign-extended to 64 bits when it's added to the %rip
354 * value and yield the same 64-bit result that the sign-
355 * extension of the original signed 32-bit displacement would
356 * have given.
357 */
358 newdisp = (u8 *) src + (s64) insn->displacement.value
359 - (u8 *) real;
360 if ((s64) (s32) newdisp != newdisp) {
361 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
362 return 0;
363 }
364 disp = (u8 *) dest + insn_offset_displacement(insn);
365 *(s32 *) disp = (s32) newdisp;
366 }
367 #endif
368 return insn->length;
369 }
370
371 /* Prepare reljump or int3 right after instruction */
prepare_singlestep(kprobe_opcode_t * buf,struct kprobe * p,struct insn * insn)372 static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
373 struct insn *insn)
374 {
375 int len = insn->length;
376
377 if (!IS_ENABLED(CONFIG_PREEMPTION) &&
378 !p->post_handler && can_boost(insn, p->addr) &&
379 MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
380 /*
381 * These instructions can be executed directly if it
382 * jumps back to correct address.
383 */
384 synthesize_reljump(buf + len, p->ainsn.insn + len,
385 p->addr + insn->length);
386 len += JMP32_INSN_SIZE;
387 p->ainsn.boostable = 1;
388 } else {
389 /* Otherwise, put an int3 for trapping singlestep */
390 if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
391 return -ENOSPC;
392
393 buf[len] = INT3_INSN_OPCODE;
394 len += INT3_INSN_SIZE;
395 }
396
397 return len;
398 }
399
400 /* Make page to RO mode when allocate it */
alloc_insn_page(void)401 void *alloc_insn_page(void)
402 {
403 void *page;
404
405 page = module_alloc(PAGE_SIZE);
406 if (!page)
407 return NULL;
408
409 set_vm_flush_reset_perms(page);
410 /*
411 * First make the page read-only, and only then make it executable to
412 * prevent it from being W+X in between.
413 */
414 set_memory_ro((unsigned long)page, 1);
415
416 /*
417 * TODO: Once additional kernel code protection mechanisms are set, ensure
418 * that the page was not maliciously altered and it is still zeroed.
419 */
420 set_memory_x((unsigned long)page, 1);
421
422 return page;
423 }
424
425 /* Recover page to RW mode before releasing it */
free_insn_page(void * page)426 void free_insn_page(void *page)
427 {
428 module_memfree(page);
429 }
430
431 /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
432
kprobe_emulate_ifmodifiers(struct kprobe * p,struct pt_regs * regs)433 static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
434 {
435 switch (p->ainsn.opcode) {
436 case 0xfa: /* cli */
437 regs->flags &= ~(X86_EFLAGS_IF);
438 break;
439 case 0xfb: /* sti */
440 regs->flags |= X86_EFLAGS_IF;
441 break;
442 case 0x9c: /* pushf */
443 int3_emulate_push(regs, regs->flags);
444 break;
445 case 0x9d: /* popf */
446 regs->flags = int3_emulate_pop(regs);
447 break;
448 }
449 regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
450 }
451 NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
452
kprobe_emulate_ret(struct kprobe * p,struct pt_regs * regs)453 static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
454 {
455 int3_emulate_ret(regs);
456 }
457 NOKPROBE_SYMBOL(kprobe_emulate_ret);
458
kprobe_emulate_call(struct kprobe * p,struct pt_regs * regs)459 static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
460 {
461 unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
462
463 func += p->ainsn.rel32;
464 int3_emulate_call(regs, func);
465 }
466 NOKPROBE_SYMBOL(kprobe_emulate_call);
467
468 static nokprobe_inline
__kprobe_emulate_jmp(struct kprobe * p,struct pt_regs * regs,bool cond)469 void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
470 {
471 unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
472
473 if (cond)
474 ip += p->ainsn.rel32;
475 int3_emulate_jmp(regs, ip);
476 }
477
kprobe_emulate_jmp(struct kprobe * p,struct pt_regs * regs)478 static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
479 {
480 __kprobe_emulate_jmp(p, regs, true);
481 }
482 NOKPROBE_SYMBOL(kprobe_emulate_jmp);
483
484 static const unsigned long jcc_mask[6] = {
485 [0] = X86_EFLAGS_OF,
486 [1] = X86_EFLAGS_CF,
487 [2] = X86_EFLAGS_ZF,
488 [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
489 [4] = X86_EFLAGS_SF,
490 [5] = X86_EFLAGS_PF,
491 };
492
kprobe_emulate_jcc(struct kprobe * p,struct pt_regs * regs)493 static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
494 {
495 bool invert = p->ainsn.jcc.type & 1;
496 bool match;
497
498 if (p->ainsn.jcc.type < 0xc) {
499 match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
500 } else {
501 match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
502 ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
503 if (p->ainsn.jcc.type >= 0xe)
504 match = match && (regs->flags & X86_EFLAGS_ZF);
505 }
506 __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
507 }
508 NOKPROBE_SYMBOL(kprobe_emulate_jcc);
509
kprobe_emulate_loop(struct kprobe * p,struct pt_regs * regs)510 static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
511 {
512 bool match;
513
514 if (p->ainsn.loop.type != 3) { /* LOOP* */
515 if (p->ainsn.loop.asize == 32)
516 match = ((*(u32 *)®s->cx)--) != 0;
517 #ifdef CONFIG_X86_64
518 else if (p->ainsn.loop.asize == 64)
519 match = ((*(u64 *)®s->cx)--) != 0;
520 #endif
521 else
522 match = ((*(u16 *)®s->cx)--) != 0;
523 } else { /* JCXZ */
524 if (p->ainsn.loop.asize == 32)
525 match = *(u32 *)(®s->cx) == 0;
526 #ifdef CONFIG_X86_64
527 else if (p->ainsn.loop.asize == 64)
528 match = *(u64 *)(®s->cx) == 0;
529 #endif
530 else
531 match = *(u16 *)(®s->cx) == 0;
532 }
533
534 if (p->ainsn.loop.type == 0) /* LOOPNE */
535 match = match && !(regs->flags & X86_EFLAGS_ZF);
536 else if (p->ainsn.loop.type == 1) /* LOOPE */
537 match = match && (regs->flags & X86_EFLAGS_ZF);
538
539 __kprobe_emulate_jmp(p, regs, match);
540 }
541 NOKPROBE_SYMBOL(kprobe_emulate_loop);
542
543 static const int addrmode_regoffs[] = {
544 offsetof(struct pt_regs, ax),
545 offsetof(struct pt_regs, cx),
546 offsetof(struct pt_regs, dx),
547 offsetof(struct pt_regs, bx),
548 offsetof(struct pt_regs, sp),
549 offsetof(struct pt_regs, bp),
550 offsetof(struct pt_regs, si),
551 offsetof(struct pt_regs, di),
552 #ifdef CONFIG_X86_64
553 offsetof(struct pt_regs, r8),
554 offsetof(struct pt_regs, r9),
555 offsetof(struct pt_regs, r10),
556 offsetof(struct pt_regs, r11),
557 offsetof(struct pt_regs, r12),
558 offsetof(struct pt_regs, r13),
559 offsetof(struct pt_regs, r14),
560 offsetof(struct pt_regs, r15),
561 #endif
562 };
563
kprobe_emulate_call_indirect(struct kprobe * p,struct pt_regs * regs)564 static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
565 {
566 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
567
568 int3_emulate_call(regs, regs_get_register(regs, offs));
569 }
570 NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
571
kprobe_emulate_jmp_indirect(struct kprobe * p,struct pt_regs * regs)572 static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
573 {
574 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
575
576 int3_emulate_jmp(regs, regs_get_register(regs, offs));
577 }
578 NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
579
prepare_emulation(struct kprobe * p,struct insn * insn)580 static int prepare_emulation(struct kprobe *p, struct insn *insn)
581 {
582 insn_byte_t opcode = insn->opcode.bytes[0];
583
584 switch (opcode) {
585 case 0xfa: /* cli */
586 case 0xfb: /* sti */
587 case 0x9c: /* pushfl */
588 case 0x9d: /* popf/popfd */
589 /*
590 * IF modifiers must be emulated since it will enable interrupt while
591 * int3 single stepping.
592 */
593 p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
594 p->ainsn.opcode = opcode;
595 break;
596 case 0xc2: /* ret/lret */
597 case 0xc3:
598 case 0xca:
599 case 0xcb:
600 p->ainsn.emulate_op = kprobe_emulate_ret;
601 break;
602 case 0x9a: /* far call absolute -- segment is not supported */
603 case 0xea: /* far jmp absolute -- segment is not supported */
604 case 0xcc: /* int3 */
605 case 0xcf: /* iret -- in-kernel IRET is not supported */
606 return -EOPNOTSUPP;
607 break;
608 case 0xe8: /* near call relative */
609 p->ainsn.emulate_op = kprobe_emulate_call;
610 if (insn->immediate.nbytes == 2)
611 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
612 else
613 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
614 break;
615 case 0xeb: /* short jump relative */
616 case 0xe9: /* near jump relative */
617 p->ainsn.emulate_op = kprobe_emulate_jmp;
618 if (insn->immediate.nbytes == 1)
619 p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
620 else if (insn->immediate.nbytes == 2)
621 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
622 else
623 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
624 break;
625 case 0x70 ... 0x7f:
626 /* 1 byte conditional jump */
627 p->ainsn.emulate_op = kprobe_emulate_jcc;
628 p->ainsn.jcc.type = opcode & 0xf;
629 p->ainsn.rel32 = *(char *)insn->immediate.bytes;
630 break;
631 case 0x0f:
632 opcode = insn->opcode.bytes[1];
633 if ((opcode & 0xf0) == 0x80) {
634 /* 2 bytes Conditional Jump */
635 p->ainsn.emulate_op = kprobe_emulate_jcc;
636 p->ainsn.jcc.type = opcode & 0xf;
637 if (insn->immediate.nbytes == 2)
638 p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
639 else
640 p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
641 } else if (opcode == 0x01 &&
642 X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
643 X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
644 /* VM extensions - not supported */
645 return -EOPNOTSUPP;
646 }
647 break;
648 case 0xe0: /* Loop NZ */
649 case 0xe1: /* Loop */
650 case 0xe2: /* Loop */
651 case 0xe3: /* J*CXZ */
652 p->ainsn.emulate_op = kprobe_emulate_loop;
653 p->ainsn.loop.type = opcode & 0x3;
654 p->ainsn.loop.asize = insn->addr_bytes * 8;
655 p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
656 break;
657 case 0xff:
658 /*
659 * Since the 0xff is an extended group opcode, the instruction
660 * is determined by the MOD/RM byte.
661 */
662 opcode = insn->modrm.bytes[0];
663 if ((opcode & 0x30) == 0x10) {
664 if ((opcode & 0x8) == 0x8)
665 return -EOPNOTSUPP; /* far call */
666 /* call absolute, indirect */
667 p->ainsn.emulate_op = kprobe_emulate_call_indirect;
668 } else if ((opcode & 0x30) == 0x20) {
669 if ((opcode & 0x8) == 0x8)
670 return -EOPNOTSUPP; /* far jmp */
671 /* jmp near absolute indirect */
672 p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
673 } else
674 break;
675
676 if (insn->addr_bytes != sizeof(unsigned long))
677 return -EOPNOTSUPP; /* Don't support differnt size */
678 if (X86_MODRM_MOD(opcode) != 3)
679 return -EOPNOTSUPP; /* TODO: support memory addressing */
680
681 p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
682 #ifdef CONFIG_X86_64
683 if (X86_REX_B(insn->rex_prefix.value))
684 p->ainsn.indirect.reg += 8;
685 #endif
686 break;
687 default:
688 break;
689 }
690 p->ainsn.size = insn->length;
691
692 return 0;
693 }
694
arch_copy_kprobe(struct kprobe * p)695 static int arch_copy_kprobe(struct kprobe *p)
696 {
697 struct insn insn;
698 kprobe_opcode_t buf[MAX_INSN_SIZE];
699 int ret, len;
700
701 /* Copy an instruction with recovering if other optprobe modifies it.*/
702 len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
703 if (!len)
704 return -EINVAL;
705
706 /* Analyze the opcode and setup emulate functions */
707 ret = prepare_emulation(p, &insn);
708 if (ret < 0)
709 return ret;
710
711 /* Add int3 for single-step or booster jmp */
712 len = prepare_singlestep(buf, p, &insn);
713 if (len < 0)
714 return len;
715
716 /* Also, displacement change doesn't affect the first byte */
717 p->opcode = buf[0];
718
719 p->ainsn.tp_len = len;
720 perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len);
721
722 /* OK, write back the instruction(s) into ROX insn buffer */
723 text_poke(p->ainsn.insn, buf, len);
724
725 return 0;
726 }
727
arch_prepare_kprobe(struct kprobe * p)728 int arch_prepare_kprobe(struct kprobe *p)
729 {
730 int ret;
731
732 if (alternatives_text_reserved(p->addr, p->addr))
733 return -EINVAL;
734
735 if (!can_probe((unsigned long)p->addr))
736 return -EILSEQ;
737
738 memset(&p->ainsn, 0, sizeof(p->ainsn));
739
740 /* insn: must be on special executable page on x86. */
741 p->ainsn.insn = get_insn_slot();
742 if (!p->ainsn.insn)
743 return -ENOMEM;
744
745 ret = arch_copy_kprobe(p);
746 if (ret) {
747 free_insn_slot(p->ainsn.insn, 0);
748 p->ainsn.insn = NULL;
749 }
750
751 return ret;
752 }
753
arch_arm_kprobe(struct kprobe * p)754 void arch_arm_kprobe(struct kprobe *p)
755 {
756 u8 int3 = INT3_INSN_OPCODE;
757
758 text_poke(p->addr, &int3, 1);
759 text_poke_sync();
760 perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1);
761 }
762
arch_disarm_kprobe(struct kprobe * p)763 void arch_disarm_kprobe(struct kprobe *p)
764 {
765 u8 int3 = INT3_INSN_OPCODE;
766
767 perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1);
768 text_poke(p->addr, &p->opcode, 1);
769 text_poke_sync();
770 }
771
arch_remove_kprobe(struct kprobe * p)772 void arch_remove_kprobe(struct kprobe *p)
773 {
774 if (p->ainsn.insn) {
775 /* Record the perf event before freeing the slot */
776 perf_event_text_poke(p->ainsn.insn, p->ainsn.insn,
777 p->ainsn.tp_len, NULL, 0);
778 free_insn_slot(p->ainsn.insn, p->ainsn.boostable);
779 p->ainsn.insn = NULL;
780 }
781 }
782
783 static nokprobe_inline void
save_previous_kprobe(struct kprobe_ctlblk * kcb)784 save_previous_kprobe(struct kprobe_ctlblk *kcb)
785 {
786 kcb->prev_kprobe.kp = kprobe_running();
787 kcb->prev_kprobe.status = kcb->kprobe_status;
788 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
789 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
790 }
791
792 static nokprobe_inline void
restore_previous_kprobe(struct kprobe_ctlblk * kcb)793 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
794 {
795 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
796 kcb->kprobe_status = kcb->prev_kprobe.status;
797 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
798 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
799 }
800
801 static nokprobe_inline void
set_current_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)802 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
803 struct kprobe_ctlblk *kcb)
804 {
805 __this_cpu_write(current_kprobe, p);
806 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
807 = (regs->flags & X86_EFLAGS_IF);
808 }
809
arch_prepare_kretprobe(struct kretprobe_instance * ri,struct pt_regs * regs)810 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
811 {
812 unsigned long *sara = stack_addr(regs);
813
814 ri->ret_addr = (kprobe_opcode_t *) *sara;
815 ri->fp = sara;
816
817 /* Replace the return addr with trampoline addr */
818 *sara = (unsigned long) &kretprobe_trampoline;
819 }
820 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
821
kprobe_post_process(struct kprobe * cur,struct pt_regs * regs,struct kprobe_ctlblk * kcb)822 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
823 struct kprobe_ctlblk *kcb)
824 {
825 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
826 kcb->kprobe_status = KPROBE_HIT_SSDONE;
827 cur->post_handler(cur, regs, 0);
828 }
829
830 /* Restore back the original saved kprobes variables and continue. */
831 if (kcb->kprobe_status == KPROBE_REENTER)
832 restore_previous_kprobe(kcb);
833 else
834 reset_current_kprobe();
835 }
836 NOKPROBE_SYMBOL(kprobe_post_process);
837
setup_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb,int reenter)838 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
839 struct kprobe_ctlblk *kcb, int reenter)
840 {
841 if (setup_detour_execution(p, regs, reenter))
842 return;
843
844 #if !defined(CONFIG_PREEMPTION)
845 if (p->ainsn.boostable) {
846 /* Boost up -- we can execute copied instructions directly */
847 if (!reenter)
848 reset_current_kprobe();
849 /*
850 * Reentering boosted probe doesn't reset current_kprobe,
851 * nor set current_kprobe, because it doesn't use single
852 * stepping.
853 */
854 regs->ip = (unsigned long)p->ainsn.insn;
855 return;
856 }
857 #endif
858 if (reenter) {
859 save_previous_kprobe(kcb);
860 set_current_kprobe(p, regs, kcb);
861 kcb->kprobe_status = KPROBE_REENTER;
862 } else
863 kcb->kprobe_status = KPROBE_HIT_SS;
864
865 if (p->ainsn.emulate_op) {
866 p->ainsn.emulate_op(p, regs);
867 kprobe_post_process(p, regs, kcb);
868 return;
869 }
870
871 /* Disable interrupt, and set ip register on trampoline */
872 regs->flags &= ~X86_EFLAGS_IF;
873 regs->ip = (unsigned long)p->ainsn.insn;
874 }
875 NOKPROBE_SYMBOL(setup_singlestep);
876
877 /*
878 * Called after single-stepping. p->addr is the address of the
879 * instruction whose first byte has been replaced by the "int3"
880 * instruction. To avoid the SMP problems that can occur when we
881 * temporarily put back the original opcode to single-step, we
882 * single-stepped a copy of the instruction. The address of this
883 * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
884 * right after the copied instruction.
885 * Different from the trap single-step, "int3" single-step can not
886 * handle the instruction which changes the ip register, e.g. jmp,
887 * call, conditional jmp, and the instructions which changes the IF
888 * flags because interrupt must be disabled around the single-stepping.
889 * Such instructions are software emulated, but others are single-stepped
890 * using "int3".
891 *
892 * When the 2nd "int3" handled, the regs->ip and regs->flags needs to
893 * be adjusted, so that we can resume execution on correct code.
894 */
resume_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)895 static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
896 struct kprobe_ctlblk *kcb)
897 {
898 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
899 unsigned long orig_ip = (unsigned long)p->addr;
900
901 /* Restore saved interrupt flag and ip register */
902 regs->flags |= kcb->kprobe_saved_flags;
903 /* Note that regs->ip is executed int3 so must be a step back */
904 regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
905 }
906 NOKPROBE_SYMBOL(resume_singlestep);
907
908 /*
909 * We have reentered the kprobe_handler(), since another probe was hit while
910 * within the handler. We save the original kprobes variables and just single
911 * step on the instruction of the new probe without calling any user handlers.
912 */
reenter_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)913 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
914 struct kprobe_ctlblk *kcb)
915 {
916 switch (kcb->kprobe_status) {
917 case KPROBE_HIT_SSDONE:
918 case KPROBE_HIT_ACTIVE:
919 case KPROBE_HIT_SS:
920 kprobes_inc_nmissed_count(p);
921 setup_singlestep(p, regs, kcb, 1);
922 break;
923 case KPROBE_REENTER:
924 /* A probe has been hit in the codepath leading up to, or just
925 * after, single-stepping of a probed instruction. This entire
926 * codepath should strictly reside in .kprobes.text section.
927 * Raise a BUG or we'll continue in an endless reentering loop
928 * and eventually a stack overflow.
929 */
930 pr_err("Unrecoverable kprobe detected.\n");
931 dump_kprobe(p);
932 BUG();
933 default:
934 /* impossible cases */
935 WARN_ON(1);
936 return 0;
937 }
938
939 return 1;
940 }
941 NOKPROBE_SYMBOL(reenter_kprobe);
942
kprobe_is_ss(struct kprobe_ctlblk * kcb)943 static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
944 {
945 return (kcb->kprobe_status == KPROBE_HIT_SS ||
946 kcb->kprobe_status == KPROBE_REENTER);
947 }
948
949 /*
950 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
951 * remain disabled throughout this function.
952 */
kprobe_int3_handler(struct pt_regs * regs)953 int kprobe_int3_handler(struct pt_regs *regs)
954 {
955 kprobe_opcode_t *addr;
956 struct kprobe *p;
957 struct kprobe_ctlblk *kcb;
958
959 if (user_mode(regs))
960 return 0;
961
962 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
963 /*
964 * We don't want to be preempted for the entire duration of kprobe
965 * processing. Since int3 and debug trap disables irqs and we clear
966 * IF while singlestepping, it must be no preemptible.
967 */
968
969 kcb = get_kprobe_ctlblk();
970 p = get_kprobe(addr);
971
972 if (p) {
973 if (kprobe_running()) {
974 if (reenter_kprobe(p, regs, kcb))
975 return 1;
976 } else {
977 set_current_kprobe(p, regs, kcb);
978 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
979
980 /*
981 * If we have no pre-handler or it returned 0, we
982 * continue with normal processing. If we have a
983 * pre-handler and it returned non-zero, that means
984 * user handler setup registers to exit to another
985 * instruction, we must skip the single stepping.
986 */
987 if (!p->pre_handler || !p->pre_handler(p, regs))
988 setup_singlestep(p, regs, kcb, 0);
989 else
990 reset_current_kprobe();
991 return 1;
992 }
993 } else if (kprobe_is_ss(kcb)) {
994 p = kprobe_running();
995 if ((unsigned long)p->ainsn.insn < regs->ip &&
996 (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
997 /* Most provably this is the second int3 for singlestep */
998 resume_singlestep(p, regs, kcb);
999 kprobe_post_process(p, regs, kcb);
1000 return 1;
1001 }
1002 }
1003
1004 if (*addr != INT3_INSN_OPCODE) {
1005 /*
1006 * The breakpoint instruction was removed right
1007 * after we hit it. Another cpu has removed
1008 * either a probepoint or a debugger breakpoint
1009 * at this address. In either case, no further
1010 * handling of this interrupt is appropriate.
1011 * Back up over the (now missing) int3 and run
1012 * the original instruction.
1013 */
1014 regs->ip = (unsigned long)addr;
1015 return 1;
1016 } /* else: not a kprobe fault; let the kernel handle it */
1017
1018 return 0;
1019 }
1020 NOKPROBE_SYMBOL(kprobe_int3_handler);
1021
1022 /*
1023 * When a retprobed function returns, this code saves registers and
1024 * calls trampoline_handler() runs, which calls the kretprobe's handler.
1025 */
1026 asm(
1027 ".text\n"
1028 ".global kretprobe_trampoline\n"
1029 ".type kretprobe_trampoline, @function\n"
1030 "kretprobe_trampoline:\n"
1031 /* We don't bother saving the ss register */
1032 #ifdef CONFIG_X86_64
1033 " pushq %rsp\n"
1034 " pushfq\n"
1035 SAVE_REGS_STRING
1036 " movq %rsp, %rdi\n"
1037 " call trampoline_handler\n"
1038 /* Replace saved sp with true return address. */
1039 " movq %rax, 19*8(%rsp)\n"
1040 RESTORE_REGS_STRING
1041 " popfq\n"
1042 #else
1043 " pushl %esp\n"
1044 " pushfl\n"
1045 SAVE_REGS_STRING
1046 " movl %esp, %eax\n"
1047 " call trampoline_handler\n"
1048 /* Replace saved sp with true return address. */
1049 " movl %eax, 15*4(%esp)\n"
1050 RESTORE_REGS_STRING
1051 " popfl\n"
1052 #endif
1053 " ret\n"
1054 ".size kretprobe_trampoline, .-kretprobe_trampoline\n"
1055 );
1056 NOKPROBE_SYMBOL(kretprobe_trampoline);
1057 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
1058
1059
1060 /*
1061 * Called from kretprobe_trampoline
1062 */
trampoline_handler(struct pt_regs * regs)1063 __used __visible void *trampoline_handler(struct pt_regs *regs)
1064 {
1065 /* fixup registers */
1066 regs->cs = __KERNEL_CS;
1067 #ifdef CONFIG_X86_32
1068 regs->gs = 0;
1069 #endif
1070 regs->ip = (unsigned long)&kretprobe_trampoline;
1071 regs->orig_ax = ~0UL;
1072
1073 return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, ®s->sp);
1074 }
1075 NOKPROBE_SYMBOL(trampoline_handler);
1076
kprobe_fault_handler(struct pt_regs * regs,int trapnr)1077 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1078 {
1079 struct kprobe *cur = kprobe_running();
1080 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1081
1082 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
1083 /* This must happen on single-stepping */
1084 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
1085 kcb->kprobe_status != KPROBE_REENTER);
1086 /*
1087 * We are here because the instruction being single
1088 * stepped caused a page fault. We reset the current
1089 * kprobe and the ip points back to the probe address
1090 * and allow the page fault handler to continue as a
1091 * normal page fault.
1092 */
1093 regs->ip = (unsigned long)cur->addr;
1094
1095 /*
1096 * If the IF flag was set before the kprobe hit,
1097 * don't touch it:
1098 */
1099 regs->flags |= kcb->kprobe_old_flags;
1100
1101 if (kcb->kprobe_status == KPROBE_REENTER)
1102 restore_previous_kprobe(kcb);
1103 else
1104 reset_current_kprobe();
1105 } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
1106 kcb->kprobe_status == KPROBE_HIT_SSDONE) {
1107 /*
1108 * We increment the nmissed count for accounting,
1109 * we can also use npre/npostfault count for accounting
1110 * these specific fault cases.
1111 */
1112 kprobes_inc_nmissed_count(cur);
1113
1114 /*
1115 * We come here because instructions in the pre/post
1116 * handler caused the page_fault, this could happen
1117 * if handler tries to access user space by
1118 * copy_from_user(), get_user() etc. Let the
1119 * user-specified handler try to fix it first.
1120 */
1121 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1122 return 1;
1123 }
1124
1125 return 0;
1126 }
1127 NOKPROBE_SYMBOL(kprobe_fault_handler);
1128
arch_populate_kprobe_blacklist(void)1129 int __init arch_populate_kprobe_blacklist(void)
1130 {
1131 return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
1132 (unsigned long)__entry_text_end);
1133 }
1134
arch_init_kprobes(void)1135 int __init arch_init_kprobes(void)
1136 {
1137 return 0;
1138 }
1139
arch_trampoline_kprobe(struct kprobe * p)1140 int arch_trampoline_kprobe(struct kprobe *p)
1141 {
1142 return 0;
1143 }
1144