1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30 #include <asm/fixmap.h>
31 #include <asm/paravirt.h>
32
33 int __read_mostly alternatives_patched;
34
35 EXPORT_SYMBOL_GPL(alternatives_patched);
36
37 #define MAX_PATCH_LEN (255-1)
38
39 static int __initdata_or_module debug_alternative;
40
debug_alt(char * str)41 static int __init debug_alt(char *str)
42 {
43 debug_alternative = 1;
44 return 1;
45 }
46 __setup("debug-alternative", debug_alt);
47
48 static int noreplace_smp;
49
setup_noreplace_smp(char * str)50 static int __init setup_noreplace_smp(char *str)
51 {
52 noreplace_smp = 1;
53 return 1;
54 }
55 __setup("noreplace-smp", setup_noreplace_smp);
56
57 #define DPRINTK(fmt, args...) \
58 do { \
59 if (debug_alternative) \
60 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
61 } while (0)
62
63 #define DUMP_BYTES(buf, len, fmt, args...) \
64 do { \
65 if (unlikely(debug_alternative)) { \
66 int j; \
67 \
68 if (!(len)) \
69 break; \
70 \
71 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
72 for (j = 0; j < (len) - 1; j++) \
73 printk(KERN_CONT "%02hhx ", buf[j]); \
74 printk(KERN_CONT "%02hhx\n", buf[j]); \
75 } \
76 } while (0)
77
78 const unsigned char x86nops[] =
79 {
80 BYTES_NOP1,
81 BYTES_NOP2,
82 BYTES_NOP3,
83 BYTES_NOP4,
84 BYTES_NOP5,
85 BYTES_NOP6,
86 BYTES_NOP7,
87 BYTES_NOP8,
88 };
89
90 const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
91 {
92 NULL,
93 x86nops,
94 x86nops + 1,
95 x86nops + 1 + 2,
96 x86nops + 1 + 2 + 3,
97 x86nops + 1 + 2 + 3 + 4,
98 x86nops + 1 + 2 + 3 + 4 + 5,
99 x86nops + 1 + 2 + 3 + 4 + 5 + 6,
100 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
101 };
102
103 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
add_nops(void * insns,unsigned int len)104 static void __init_or_module add_nops(void *insns, unsigned int len)
105 {
106 while (len > 0) {
107 unsigned int noplen = len;
108 if (noplen > ASM_NOP_MAX)
109 noplen = ASM_NOP_MAX;
110 memcpy(insns, x86_nops[noplen], noplen);
111 insns += noplen;
112 len -= noplen;
113 }
114 }
115
116 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
117 extern s32 __smp_locks[], __smp_locks_end[];
118 void text_poke_early(void *addr, const void *opcode, size_t len);
119
120 /*
121 * Are we looking at a near JMP with a 1 or 4-byte displacement.
122 */
is_jmp(const u8 opcode)123 static inline bool is_jmp(const u8 opcode)
124 {
125 return opcode == 0xeb || opcode == 0xe9;
126 }
127
128 static void __init_or_module
recompute_jump(struct alt_instr * a,u8 * orig_insn,u8 * repl_insn,u8 * insn_buff)129 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
130 {
131 u8 *next_rip, *tgt_rip;
132 s32 n_dspl, o_dspl;
133 int repl_len;
134
135 if (a->replacementlen != 5)
136 return;
137
138 o_dspl = *(s32 *)(insn_buff + 1);
139
140 /* next_rip of the replacement JMP */
141 next_rip = repl_insn + a->replacementlen;
142 /* target rip of the replacement JMP */
143 tgt_rip = next_rip + o_dspl;
144 n_dspl = tgt_rip - orig_insn;
145
146 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
147
148 if (tgt_rip - orig_insn >= 0) {
149 if (n_dspl - 2 <= 127)
150 goto two_byte_jmp;
151 else
152 goto five_byte_jmp;
153 /* negative offset */
154 } else {
155 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
156 goto two_byte_jmp;
157 else
158 goto five_byte_jmp;
159 }
160
161 two_byte_jmp:
162 n_dspl -= 2;
163
164 insn_buff[0] = 0xeb;
165 insn_buff[1] = (s8)n_dspl;
166 add_nops(insn_buff + 2, 3);
167
168 repl_len = 2;
169 goto done;
170
171 five_byte_jmp:
172 n_dspl -= 5;
173
174 insn_buff[0] = 0xe9;
175 *(s32 *)&insn_buff[1] = n_dspl;
176
177 repl_len = 5;
178
179 done:
180
181 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
182 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
183 }
184
185 /*
186 * "noinline" to cause control flow change and thus invalidate I$ and
187 * cause refetch after modification.
188 */
optimize_nops(struct alt_instr * a,u8 * instr)189 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
190 {
191 unsigned long flags;
192 struct insn insn;
193 int nop, i = 0;
194
195 /*
196 * Jump over the non-NOP insns, the remaining bytes must be single-byte
197 * NOPs, optimize them.
198 */
199 for (;;) {
200 if (insn_decode_kernel(&insn, &instr[i]))
201 return;
202
203 if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
204 break;
205
206 if ((i += insn.length) >= a->instrlen)
207 return;
208 }
209
210 for (nop = i; i < a->instrlen; i++) {
211 if (WARN_ONCE(instr[i] != 0x90, "Not a NOP at 0x%px\n", &instr[i]))
212 return;
213 }
214
215 local_irq_save(flags);
216 add_nops(instr + nop, i - nop);
217 local_irq_restore(flags);
218
219 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
220 instr, nop, a->instrlen);
221 }
222
223 /*
224 * Replace instructions with better alternatives for this CPU type. This runs
225 * before SMP is initialized to avoid SMP problems with self modifying code.
226 * This implies that asymmetric systems where APs have less capabilities than
227 * the boot processor are not handled. Tough. Make sure you disable such
228 * features by hand.
229 *
230 * Marked "noinline" to cause control flow change and thus insn cache
231 * to refetch changed I$ lines.
232 */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)233 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
234 struct alt_instr *end)
235 {
236 struct alt_instr *a;
237 u8 *instr, *replacement;
238 u8 insn_buff[MAX_PATCH_LEN];
239
240 DPRINTK("alt table %px, -> %px", start, end);
241 /*
242 * The scan order should be from start to end. A later scanned
243 * alternative code can overwrite previously scanned alternative code.
244 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
245 * patch code.
246 *
247 * So be careful if you want to change the scan order to any other
248 * order.
249 */
250 for (a = start; a < end; a++) {
251 int insn_buff_sz = 0;
252 /* Mask away "NOT" flag bit for feature to test. */
253 u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
254
255 instr = (u8 *)&a->instr_offset + a->instr_offset;
256 replacement = (u8 *)&a->repl_offset + a->repl_offset;
257 BUG_ON(a->instrlen > sizeof(insn_buff));
258 BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
259
260 /*
261 * Patch if either:
262 * - feature is present
263 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
264 * patch if feature is *NOT* present.
265 */
266 if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
267 goto next;
268
269 DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
270 (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
271 feature >> 5,
272 feature & 0x1f,
273 instr, instr, a->instrlen,
274 replacement, a->replacementlen);
275
276 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
277 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
278
279 memcpy(insn_buff, replacement, a->replacementlen);
280 insn_buff_sz = a->replacementlen;
281
282 /*
283 * 0xe8 is a relative jump; fix the offset.
284 *
285 * Instruction length is checked before the opcode to avoid
286 * accessing uninitialized bytes for zero-length replacements.
287 */
288 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
289 *(s32 *)(insn_buff + 1) += replacement - instr;
290 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
291 *(s32 *)(insn_buff + 1),
292 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
293 }
294
295 if (a->replacementlen && is_jmp(replacement[0]))
296 recompute_jump(a, instr, replacement, insn_buff);
297
298 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
299 insn_buff[insn_buff_sz] = 0x90;
300
301 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
302
303 text_poke_early(instr, insn_buff, insn_buff_sz);
304
305 next:
306 optimize_nops(a, instr);
307 }
308 }
309
310 #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)311 static void alternatives_smp_lock(const s32 *start, const s32 *end,
312 u8 *text, u8 *text_end)
313 {
314 const s32 *poff;
315
316 for (poff = start; poff < end; poff++) {
317 u8 *ptr = (u8 *)poff + *poff;
318
319 if (!*poff || ptr < text || ptr >= text_end)
320 continue;
321 /* turn DS segment override prefix into lock prefix */
322 if (*ptr == 0x3e)
323 text_poke(ptr, ((unsigned char []){0xf0}), 1);
324 }
325 }
326
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)327 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
328 u8 *text, u8 *text_end)
329 {
330 const s32 *poff;
331
332 for (poff = start; poff < end; poff++) {
333 u8 *ptr = (u8 *)poff + *poff;
334
335 if (!*poff || ptr < text || ptr >= text_end)
336 continue;
337 /* turn lock prefix into DS segment override prefix */
338 if (*ptr == 0xf0)
339 text_poke(ptr, ((unsigned char []){0x3E}), 1);
340 }
341 }
342
343 struct smp_alt_module {
344 /* what is this ??? */
345 struct module *mod;
346 char *name;
347
348 /* ptrs to lock prefixes */
349 const s32 *locks;
350 const s32 *locks_end;
351
352 /* .text segment, needed to avoid patching init code ;) */
353 u8 *text;
354 u8 *text_end;
355
356 struct list_head next;
357 };
358 static LIST_HEAD(smp_alt_modules);
359 static bool uniproc_patched = false; /* protected by text_mutex */
360
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)361 void __init_or_module alternatives_smp_module_add(struct module *mod,
362 char *name,
363 void *locks, void *locks_end,
364 void *text, void *text_end)
365 {
366 struct smp_alt_module *smp;
367
368 mutex_lock(&text_mutex);
369 if (!uniproc_patched)
370 goto unlock;
371
372 if (num_possible_cpus() == 1)
373 /* Don't bother remembering, we'll never have to undo it. */
374 goto smp_unlock;
375
376 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
377 if (NULL == smp)
378 /* we'll run the (safe but slow) SMP code then ... */
379 goto unlock;
380
381 smp->mod = mod;
382 smp->name = name;
383 smp->locks = locks;
384 smp->locks_end = locks_end;
385 smp->text = text;
386 smp->text_end = text_end;
387 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
388 smp->locks, smp->locks_end,
389 smp->text, smp->text_end, smp->name);
390
391 list_add_tail(&smp->next, &smp_alt_modules);
392 smp_unlock:
393 alternatives_smp_unlock(locks, locks_end, text, text_end);
394 unlock:
395 mutex_unlock(&text_mutex);
396 }
397
alternatives_smp_module_del(struct module * mod)398 void __init_or_module alternatives_smp_module_del(struct module *mod)
399 {
400 struct smp_alt_module *item;
401
402 mutex_lock(&text_mutex);
403 list_for_each_entry(item, &smp_alt_modules, next) {
404 if (mod != item->mod)
405 continue;
406 list_del(&item->next);
407 kfree(item);
408 break;
409 }
410 mutex_unlock(&text_mutex);
411 }
412
alternatives_enable_smp(void)413 void alternatives_enable_smp(void)
414 {
415 struct smp_alt_module *mod;
416
417 /* Why bother if there are no other CPUs? */
418 BUG_ON(num_possible_cpus() == 1);
419
420 mutex_lock(&text_mutex);
421
422 if (uniproc_patched) {
423 pr_info("switching to SMP code\n");
424 BUG_ON(num_online_cpus() != 1);
425 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
426 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
427 list_for_each_entry(mod, &smp_alt_modules, next)
428 alternatives_smp_lock(mod->locks, mod->locks_end,
429 mod->text, mod->text_end);
430 uniproc_patched = false;
431 }
432 mutex_unlock(&text_mutex);
433 }
434
435 /*
436 * Return 1 if the address range is reserved for SMP-alternatives.
437 * Must hold text_mutex.
438 */
alternatives_text_reserved(void * start,void * end)439 int alternatives_text_reserved(void *start, void *end)
440 {
441 struct smp_alt_module *mod;
442 const s32 *poff;
443 u8 *text_start = start;
444 u8 *text_end = end;
445
446 lockdep_assert_held(&text_mutex);
447
448 list_for_each_entry(mod, &smp_alt_modules, next) {
449 if (mod->text > text_end || mod->text_end < text_start)
450 continue;
451 for (poff = mod->locks; poff < mod->locks_end; poff++) {
452 const u8 *ptr = (const u8 *)poff + *poff;
453
454 if (text_start <= ptr && text_end > ptr)
455 return 1;
456 }
457 }
458
459 return 0;
460 }
461 #endif /* CONFIG_SMP */
462
463 #ifdef CONFIG_PARAVIRT
apply_paravirt(struct paravirt_patch_site * start,struct paravirt_patch_site * end)464 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
465 struct paravirt_patch_site *end)
466 {
467 struct paravirt_patch_site *p;
468 char insn_buff[MAX_PATCH_LEN];
469
470 for (p = start; p < end; p++) {
471 unsigned int used;
472
473 BUG_ON(p->len > MAX_PATCH_LEN);
474 /* prep the buffer with the original instructions */
475 memcpy(insn_buff, p->instr, p->len);
476 used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
477
478 BUG_ON(used > p->len);
479
480 /* Pad the rest with nops */
481 add_nops(insn_buff + used, p->len - used);
482 text_poke_early(p->instr, insn_buff, p->len);
483 }
484 }
485 extern struct paravirt_patch_site __start_parainstructions[],
486 __stop_parainstructions[];
487 #endif /* CONFIG_PARAVIRT */
488
489 /*
490 * Self-test for the INT3 based CALL emulation code.
491 *
492 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
493 * properly and that there is a stack gap between the INT3 frame and the
494 * previous context. Without this gap doing a virtual PUSH on the interrupted
495 * stack would corrupt the INT3 IRET frame.
496 *
497 * See entry_{32,64}.S for more details.
498 */
499
500 /*
501 * We define the int3_magic() function in assembly to control the calling
502 * convention such that we can 'call' it from assembly.
503 */
504
505 extern void int3_magic(unsigned int *ptr); /* defined in asm */
506
507 asm (
508 " .pushsection .init.text, \"ax\", @progbits\n"
509 " .type int3_magic, @function\n"
510 "int3_magic:\n"
511 " movl $1, (%" _ASM_ARG1 ")\n"
512 " ret\n"
513 " .size int3_magic, .-int3_magic\n"
514 " .popsection\n"
515 );
516
517 extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
518
519 static int __init
int3_exception_notify(struct notifier_block * self,unsigned long val,void * data)520 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
521 {
522 struct die_args *args = data;
523 struct pt_regs *regs = args->regs;
524
525 if (!regs || user_mode(regs))
526 return NOTIFY_DONE;
527
528 if (val != DIE_INT3)
529 return NOTIFY_DONE;
530
531 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
532 return NOTIFY_DONE;
533
534 int3_emulate_call(regs, (unsigned long)&int3_magic);
535 return NOTIFY_STOP;
536 }
537
int3_selftest(void)538 static void __init int3_selftest(void)
539 {
540 static __initdata struct notifier_block int3_exception_nb = {
541 .notifier_call = int3_exception_notify,
542 .priority = INT_MAX-1, /* last */
543 };
544 unsigned int val = 0;
545
546 BUG_ON(register_die_notifier(&int3_exception_nb));
547
548 /*
549 * Basically: int3_magic(&val); but really complicated :-)
550 *
551 * Stick the address of the INT3 instruction into int3_selftest_ip,
552 * then trigger the INT3, padded with NOPs to match a CALL instruction
553 * length.
554 */
555 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
556 ".pushsection .init.data,\"aw\"\n\t"
557 ".align " __ASM_SEL(4, 8) "\n\t"
558 ".type int3_selftest_ip, @object\n\t"
559 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
560 "int3_selftest_ip:\n\t"
561 __ASM_SEL(.long, .quad) " 1b\n\t"
562 ".popsection\n\t"
563 : ASM_CALL_CONSTRAINT
564 : __ASM_SEL_RAW(a, D) (&val)
565 : "memory");
566
567 BUG_ON(val != 1);
568
569 unregister_die_notifier(&int3_exception_nb);
570 }
571
alternative_instructions(void)572 void __init alternative_instructions(void)
573 {
574 int3_selftest();
575
576 /*
577 * The patching is not fully atomic, so try to avoid local
578 * interruptions that might execute the to be patched code.
579 * Other CPUs are not running.
580 */
581 stop_nmi();
582
583 /*
584 * Don't stop machine check exceptions while patching.
585 * MCEs only happen when something got corrupted and in this
586 * case we must do something about the corruption.
587 * Ignoring it is worse than an unlikely patching race.
588 * Also machine checks tend to be broadcast and if one CPU
589 * goes into machine check the others follow quickly, so we don't
590 * expect a machine check to cause undue problems during to code
591 * patching.
592 */
593
594 /*
595 * Paravirt patching and alternative patching can be combined to
596 * replace a function call with a short direct code sequence (e.g.
597 * by setting a constant return value instead of doing that in an
598 * external function).
599 * In order to make this work the following sequence is required:
600 * 1. set (artificial) features depending on used paravirt
601 * functions which can later influence alternative patching
602 * 2. apply paravirt patching (generally replacing an indirect
603 * function call with a direct one)
604 * 3. apply alternative patching (e.g. replacing a direct function
605 * call with a custom code sequence)
606 * Doing paravirt patching after alternative patching would clobber
607 * the optimization of the custom code with a function call again.
608 */
609 paravirt_set_cap();
610
611 /*
612 * First patch paravirt functions, such that we overwrite the indirect
613 * call with the direct call.
614 */
615 apply_paravirt(__parainstructions, __parainstructions_end);
616
617 /*
618 * Then patch alternatives, such that those paravirt calls that are in
619 * alternatives can be overwritten by their immediate fragments.
620 */
621 apply_alternatives(__alt_instructions, __alt_instructions_end);
622
623 #ifdef CONFIG_SMP
624 /* Patch to UP if other cpus not imminent. */
625 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
626 uniproc_patched = true;
627 alternatives_smp_module_add(NULL, "core kernel",
628 __smp_locks, __smp_locks_end,
629 _text, _etext);
630 }
631
632 if (!uniproc_patched || num_possible_cpus() == 1) {
633 free_init_pages("SMP alternatives",
634 (unsigned long)__smp_locks,
635 (unsigned long)__smp_locks_end);
636 }
637 #endif
638
639 restart_nmi();
640 alternatives_patched = 1;
641 }
642
643 /**
644 * text_poke_early - Update instructions on a live kernel at boot time
645 * @addr: address to modify
646 * @opcode: source of the copy
647 * @len: length to copy
648 *
649 * When you use this code to patch more than one byte of an instruction
650 * you need to make sure that other CPUs cannot execute this code in parallel.
651 * Also no thread must be currently preempted in the middle of these
652 * instructions. And on the local CPU you need to be protected against NMI or
653 * MCE handlers seeing an inconsistent instruction while you patch.
654 */
text_poke_early(void * addr,const void * opcode,size_t len)655 void __init_or_module text_poke_early(void *addr, const void *opcode,
656 size_t len)
657 {
658 unsigned long flags;
659
660 if (boot_cpu_has(X86_FEATURE_NX) &&
661 is_module_text_address((unsigned long)addr)) {
662 /*
663 * Modules text is marked initially as non-executable, so the
664 * code cannot be running and speculative code-fetches are
665 * prevented. Just change the code.
666 */
667 memcpy(addr, opcode, len);
668 } else {
669 local_irq_save(flags);
670 memcpy(addr, opcode, len);
671 local_irq_restore(flags);
672 sync_core();
673
674 /*
675 * Could also do a CLFLUSH here to speed up CPU recovery; but
676 * that causes hangs on some VIA CPUs.
677 */
678 }
679 }
680
681 typedef struct {
682 struct mm_struct *mm;
683 } temp_mm_state_t;
684
685 /*
686 * Using a temporary mm allows to set temporary mappings that are not accessible
687 * by other CPUs. Such mappings are needed to perform sensitive memory writes
688 * that override the kernel memory protections (e.g., W^X), without exposing the
689 * temporary page-table mappings that are required for these write operations to
690 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
691 * mapping is torn down.
692 *
693 * Context: The temporary mm needs to be used exclusively by a single core. To
694 * harden security IRQs must be disabled while the temporary mm is
695 * loaded, thereby preventing interrupt handler bugs from overriding
696 * the kernel memory protection.
697 */
use_temporary_mm(struct mm_struct * mm)698 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
699 {
700 temp_mm_state_t temp_state;
701
702 lockdep_assert_irqs_disabled();
703
704 /*
705 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
706 * with a stale address space WITHOUT being in lazy mode after
707 * restoring the previous mm.
708 */
709 if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
710 leave_mm(smp_processor_id());
711
712 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
713 switch_mm_irqs_off(NULL, mm, current);
714
715 /*
716 * If breakpoints are enabled, disable them while the temporary mm is
717 * used. Userspace might set up watchpoints on addresses that are used
718 * in the temporary mm, which would lead to wrong signals being sent or
719 * crashes.
720 *
721 * Note that breakpoints are not disabled selectively, which also causes
722 * kernel breakpoints (e.g., perf's) to be disabled. This might be
723 * undesirable, but still seems reasonable as the code that runs in the
724 * temporary mm should be short.
725 */
726 if (hw_breakpoint_active())
727 hw_breakpoint_disable();
728
729 return temp_state;
730 }
731
unuse_temporary_mm(temp_mm_state_t prev_state)732 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
733 {
734 lockdep_assert_irqs_disabled();
735 switch_mm_irqs_off(NULL, prev_state.mm, current);
736
737 /*
738 * Restore the breakpoints if they were disabled before the temporary mm
739 * was loaded.
740 */
741 if (hw_breakpoint_active())
742 hw_breakpoint_restore();
743 }
744
745 __ro_after_init struct mm_struct *poking_mm;
746 __ro_after_init unsigned long poking_addr;
747
__text_poke(void * addr,const void * opcode,size_t len)748 static void *__text_poke(void *addr, const void *opcode, size_t len)
749 {
750 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
751 struct page *pages[2] = {NULL};
752 temp_mm_state_t prev;
753 unsigned long flags;
754 pte_t pte, *ptep;
755 spinlock_t *ptl;
756 pgprot_t pgprot;
757
758 /*
759 * While boot memory allocator is running we cannot use struct pages as
760 * they are not yet initialized. There is no way to recover.
761 */
762 BUG_ON(!after_bootmem);
763
764 if (!core_kernel_text((unsigned long)addr)) {
765 pages[0] = vmalloc_to_page(addr);
766 if (cross_page_boundary)
767 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
768 } else {
769 pages[0] = virt_to_page(addr);
770 WARN_ON(!PageReserved(pages[0]));
771 if (cross_page_boundary)
772 pages[1] = virt_to_page(addr + PAGE_SIZE);
773 }
774 /*
775 * If something went wrong, crash and burn since recovery paths are not
776 * implemented.
777 */
778 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
779
780 /*
781 * Map the page without the global bit, as TLB flushing is done with
782 * flush_tlb_mm_range(), which is intended for non-global PTEs.
783 */
784 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
785
786 /*
787 * The lock is not really needed, but this allows to avoid open-coding.
788 */
789 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
790
791 /*
792 * This must not fail; preallocated in poking_init().
793 */
794 VM_BUG_ON(!ptep);
795
796 local_irq_save(flags);
797
798 pte = mk_pte(pages[0], pgprot);
799 set_pte_at(poking_mm, poking_addr, ptep, pte);
800
801 if (cross_page_boundary) {
802 pte = mk_pte(pages[1], pgprot);
803 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
804 }
805
806 /*
807 * Loading the temporary mm behaves as a compiler barrier, which
808 * guarantees that the PTE will be set at the time memcpy() is done.
809 */
810 prev = use_temporary_mm(poking_mm);
811
812 kasan_disable_current();
813 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
814 kasan_enable_current();
815
816 /*
817 * Ensure that the PTE is only cleared after the instructions of memcpy
818 * were issued by using a compiler barrier.
819 */
820 barrier();
821
822 pte_clear(poking_mm, poking_addr, ptep);
823 if (cross_page_boundary)
824 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
825
826 /*
827 * Loading the previous page-table hierarchy requires a serializing
828 * instruction that already allows the core to see the updated version.
829 * Xen-PV is assumed to serialize execution in a similar manner.
830 */
831 unuse_temporary_mm(prev);
832
833 /*
834 * Flushing the TLB might involve IPIs, which would require enabled
835 * IRQs, but not if the mm is not used, as it is in this point.
836 */
837 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
838 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
839 PAGE_SHIFT, false);
840
841 /*
842 * If the text does not match what we just wrote then something is
843 * fundamentally screwy; there's nothing we can really do about that.
844 */
845 BUG_ON(memcmp(addr, opcode, len));
846
847 local_irq_restore(flags);
848 pte_unmap_unlock(ptep, ptl);
849 return addr;
850 }
851
852 /**
853 * text_poke - Update instructions on a live kernel
854 * @addr: address to modify
855 * @opcode: source of the copy
856 * @len: length to copy
857 *
858 * Only atomic text poke/set should be allowed when not doing early patching.
859 * It means the size must be writable atomically and the address must be aligned
860 * in a way that permits an atomic write. It also makes sure we fit on a single
861 * page.
862 *
863 * Note that the caller must ensure that if the modified code is part of a
864 * module, the module would not be removed during poking. This can be achieved
865 * by registering a module notifier, and ordering module removal and patching
866 * trough a mutex.
867 */
text_poke(void * addr,const void * opcode,size_t len)868 void *text_poke(void *addr, const void *opcode, size_t len)
869 {
870 lockdep_assert_held(&text_mutex);
871
872 return __text_poke(addr, opcode, len);
873 }
874
875 /**
876 * text_poke_kgdb - Update instructions on a live kernel by kgdb
877 * @addr: address to modify
878 * @opcode: source of the copy
879 * @len: length to copy
880 *
881 * Only atomic text poke/set should be allowed when not doing early patching.
882 * It means the size must be writable atomically and the address must be aligned
883 * in a way that permits an atomic write. It also makes sure we fit on a single
884 * page.
885 *
886 * Context: should only be used by kgdb, which ensures no other core is running,
887 * despite the fact it does not hold the text_mutex.
888 */
text_poke_kgdb(void * addr,const void * opcode,size_t len)889 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
890 {
891 return __text_poke(addr, opcode, len);
892 }
893
do_sync_core(void * info)894 static void do_sync_core(void *info)
895 {
896 sync_core();
897 }
898
text_poke_sync(void)899 void text_poke_sync(void)
900 {
901 on_each_cpu(do_sync_core, NULL, 1);
902 }
903
904 struct text_poke_loc {
905 s32 rel_addr; /* addr := _stext + rel_addr */
906 s32 rel32;
907 u8 opcode;
908 const u8 text[POKE_MAX_OPCODE_SIZE];
909 u8 old;
910 };
911
912 struct bp_patching_desc {
913 struct text_poke_loc *vec;
914 int nr_entries;
915 atomic_t refs;
916 };
917
918 static struct bp_patching_desc *bp_desc;
919
920 static __always_inline
try_get_desc(struct bp_patching_desc ** descp)921 struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
922 {
923 struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
924
925 if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
926 return NULL;
927
928 return desc;
929 }
930
put_desc(struct bp_patching_desc * desc)931 static __always_inline void put_desc(struct bp_patching_desc *desc)
932 {
933 smp_mb__before_atomic();
934 arch_atomic_dec(&desc->refs);
935 }
936
text_poke_addr(struct text_poke_loc * tp)937 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
938 {
939 return _stext + tp->rel_addr;
940 }
941
patch_cmp(const void * key,const void * elt)942 static __always_inline int patch_cmp(const void *key, const void *elt)
943 {
944 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
945
946 if (key < text_poke_addr(tp))
947 return -1;
948 if (key > text_poke_addr(tp))
949 return 1;
950 return 0;
951 }
952
poke_int3_handler(struct pt_regs * regs)953 noinstr int poke_int3_handler(struct pt_regs *regs)
954 {
955 struct bp_patching_desc *desc;
956 struct text_poke_loc *tp;
957 int len, ret = 0;
958 void *ip;
959
960 if (user_mode(regs))
961 return 0;
962
963 /*
964 * Having observed our INT3 instruction, we now must observe
965 * bp_desc:
966 *
967 * bp_desc = desc INT3
968 * WMB RMB
969 * write INT3 if (desc)
970 */
971 smp_rmb();
972
973 desc = try_get_desc(&bp_desc);
974 if (!desc)
975 return 0;
976
977 /*
978 * Discount the INT3. See text_poke_bp_batch().
979 */
980 ip = (void *) regs->ip - INT3_INSN_SIZE;
981
982 /*
983 * Skip the binary search if there is a single member in the vector.
984 */
985 if (unlikely(desc->nr_entries > 1)) {
986 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
987 sizeof(struct text_poke_loc),
988 patch_cmp);
989 if (!tp)
990 goto out_put;
991 } else {
992 tp = desc->vec;
993 if (text_poke_addr(tp) != ip)
994 goto out_put;
995 }
996
997 len = text_opcode_size(tp->opcode);
998 ip += len;
999
1000 switch (tp->opcode) {
1001 case INT3_INSN_OPCODE:
1002 /*
1003 * Someone poked an explicit INT3, they'll want to handle it,
1004 * do not consume.
1005 */
1006 goto out_put;
1007
1008 case RET_INSN_OPCODE:
1009 int3_emulate_ret(regs);
1010 break;
1011
1012 case CALL_INSN_OPCODE:
1013 int3_emulate_call(regs, (long)ip + tp->rel32);
1014 break;
1015
1016 case JMP32_INSN_OPCODE:
1017 case JMP8_INSN_OPCODE:
1018 int3_emulate_jmp(regs, (long)ip + tp->rel32);
1019 break;
1020
1021 default:
1022 BUG();
1023 }
1024
1025 ret = 1;
1026
1027 out_put:
1028 put_desc(desc);
1029 return ret;
1030 }
1031
1032 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1033 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1034 static int tp_vec_nr;
1035
1036 /**
1037 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1038 * @tp: vector of instructions to patch
1039 * @nr_entries: number of entries in the vector
1040 *
1041 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1042 * We completely avoid stop_machine() here, and achieve the
1043 * synchronization using int3 breakpoint.
1044 *
1045 * The way it is done:
1046 * - For each entry in the vector:
1047 * - add a int3 trap to the address that will be patched
1048 * - sync cores
1049 * - For each entry in the vector:
1050 * - update all but the first byte of the patched range
1051 * - sync cores
1052 * - For each entry in the vector:
1053 * - replace the first byte (int3) by the first byte of
1054 * replacing opcode
1055 * - sync cores
1056 */
text_poke_bp_batch(struct text_poke_loc * tp,unsigned int nr_entries)1057 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1058 {
1059 struct bp_patching_desc desc = {
1060 .vec = tp,
1061 .nr_entries = nr_entries,
1062 .refs = ATOMIC_INIT(1),
1063 };
1064 unsigned char int3 = INT3_INSN_OPCODE;
1065 unsigned int i;
1066 int do_sync;
1067
1068 lockdep_assert_held(&text_mutex);
1069
1070 smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
1071
1072 /*
1073 * Corresponding read barrier in int3 notifier for making sure the
1074 * nr_entries and handler are correctly ordered wrt. patching.
1075 */
1076 smp_wmb();
1077
1078 /*
1079 * First step: add a int3 trap to the address that will be patched.
1080 */
1081 for (i = 0; i < nr_entries; i++) {
1082 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1083 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1084 }
1085
1086 text_poke_sync();
1087
1088 /*
1089 * Second step: update all but the first byte of the patched range.
1090 */
1091 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1092 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1093 int len = text_opcode_size(tp[i].opcode);
1094
1095 if (len - INT3_INSN_SIZE > 0) {
1096 memcpy(old + INT3_INSN_SIZE,
1097 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1098 len - INT3_INSN_SIZE);
1099 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1100 (const char *)tp[i].text + INT3_INSN_SIZE,
1101 len - INT3_INSN_SIZE);
1102 do_sync++;
1103 }
1104
1105 /*
1106 * Emit a perf event to record the text poke, primarily to
1107 * support Intel PT decoding which must walk the executable code
1108 * to reconstruct the trace. The flow up to here is:
1109 * - write INT3 byte
1110 * - IPI-SYNC
1111 * - write instruction tail
1112 * At this point the actual control flow will be through the
1113 * INT3 and handler and not hit the old or new instruction.
1114 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1115 * can still be decoded. Subsequently:
1116 * - emit RECORD_TEXT_POKE with the new instruction
1117 * - IPI-SYNC
1118 * - write first byte
1119 * - IPI-SYNC
1120 * So before the text poke event timestamp, the decoder will see
1121 * either the old instruction flow or FUP/TIP of INT3. After the
1122 * text poke event timestamp, the decoder will see either the
1123 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1124 * use the timestamp as the point at which to modify the
1125 * executable code.
1126 * The old instruction is recorded so that the event can be
1127 * processed forwards or backwards.
1128 */
1129 perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1130 tp[i].text, len);
1131 }
1132
1133 if (do_sync) {
1134 /*
1135 * According to Intel, this core syncing is very likely
1136 * not necessary and we'd be safe even without it. But
1137 * better safe than sorry (plus there's not only Intel).
1138 */
1139 text_poke_sync();
1140 }
1141
1142 /*
1143 * Third step: replace the first byte (int3) by the first byte of
1144 * replacing opcode.
1145 */
1146 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1147 if (tp[i].text[0] == INT3_INSN_OPCODE)
1148 continue;
1149
1150 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1151 do_sync++;
1152 }
1153
1154 if (do_sync)
1155 text_poke_sync();
1156
1157 /*
1158 * Remove and synchronize_rcu(), except we have a very primitive
1159 * refcount based completion.
1160 */
1161 WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1162 if (!atomic_dec_and_test(&desc.refs))
1163 atomic_cond_read_acquire(&desc.refs, !VAL);
1164 }
1165
text_poke_loc_init(struct text_poke_loc * tp,void * addr,const void * opcode,size_t len,const void * emulate)1166 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1167 const void *opcode, size_t len, const void *emulate)
1168 {
1169 struct insn insn;
1170 int ret;
1171
1172 memcpy((void *)tp->text, opcode, len);
1173 if (!emulate)
1174 emulate = opcode;
1175
1176 ret = insn_decode_kernel(&insn, emulate);
1177
1178 BUG_ON(ret < 0);
1179 BUG_ON(len != insn.length);
1180
1181 tp->rel_addr = addr - (void *)_stext;
1182 tp->opcode = insn.opcode.bytes[0];
1183
1184 switch (tp->opcode) {
1185 case INT3_INSN_OPCODE:
1186 case RET_INSN_OPCODE:
1187 break;
1188
1189 case CALL_INSN_OPCODE:
1190 case JMP32_INSN_OPCODE:
1191 case JMP8_INSN_OPCODE:
1192 tp->rel32 = insn.immediate.value;
1193 break;
1194
1195 default: /* assume NOP */
1196 switch (len) {
1197 case 2: /* NOP2 -- emulate as JMP8+0 */
1198 BUG_ON(memcmp(emulate, x86_nops[len], len));
1199 tp->opcode = JMP8_INSN_OPCODE;
1200 tp->rel32 = 0;
1201 break;
1202
1203 case 5: /* NOP5 -- emulate as JMP32+0 */
1204 BUG_ON(memcmp(emulate, x86_nops[len], len));
1205 tp->opcode = JMP32_INSN_OPCODE;
1206 tp->rel32 = 0;
1207 break;
1208
1209 default: /* unknown instruction */
1210 BUG();
1211 }
1212 break;
1213 }
1214 }
1215
1216 /*
1217 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1218 * early if needed.
1219 */
tp_order_fail(void * addr)1220 static bool tp_order_fail(void *addr)
1221 {
1222 struct text_poke_loc *tp;
1223
1224 if (!tp_vec_nr)
1225 return false;
1226
1227 if (!addr) /* force */
1228 return true;
1229
1230 tp = &tp_vec[tp_vec_nr - 1];
1231 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1232 return true;
1233
1234 return false;
1235 }
1236
text_poke_flush(void * addr)1237 static void text_poke_flush(void *addr)
1238 {
1239 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1240 text_poke_bp_batch(tp_vec, tp_vec_nr);
1241 tp_vec_nr = 0;
1242 }
1243 }
1244
text_poke_finish(void)1245 void text_poke_finish(void)
1246 {
1247 text_poke_flush(NULL);
1248 }
1249
text_poke_queue(void * addr,const void * opcode,size_t len,const void * emulate)1250 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1251 {
1252 struct text_poke_loc *tp;
1253
1254 if (unlikely(system_state == SYSTEM_BOOTING)) {
1255 text_poke_early(addr, opcode, len);
1256 return;
1257 }
1258
1259 text_poke_flush(addr);
1260
1261 tp = &tp_vec[tp_vec_nr++];
1262 text_poke_loc_init(tp, addr, opcode, len, emulate);
1263 }
1264
1265 /**
1266 * text_poke_bp() -- update instructions on live kernel on SMP
1267 * @addr: address to patch
1268 * @opcode: opcode of new instruction
1269 * @len: length to copy
1270 * @emulate: instruction to be emulated
1271 *
1272 * Update a single instruction with the vector in the stack, avoiding
1273 * dynamically allocated memory. This function should be used when it is
1274 * not possible to allocate memory.
1275 */
text_poke_bp(void * addr,const void * opcode,size_t len,const void * emulate)1276 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1277 {
1278 struct text_poke_loc tp;
1279
1280 if (unlikely(system_state == SYSTEM_BOOTING)) {
1281 text_poke_early(addr, opcode, len);
1282 return;
1283 }
1284
1285 text_poke_loc_init(&tp, addr, opcode, len, emulate);
1286 text_poke_bp_batch(&tp, 1);
1287 }
1288