1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * kernel/kprobes.c 5 * 6 * Copyright (C) IBM Corporation, 2002, 2004 7 * 8 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 9 * Probes initial implementation (includes suggestions from 10 * Rusty Russell). 11 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 12 * hlists and exceptions notifier as suggested by Andi Kleen. 13 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 14 * interface to access function arguments. 15 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 16 * exceptions notifier to be first on the priority list. 17 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 18 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 19 * <prasanna@in.ibm.com> added function-return probes. 20 */ 21 22 #define pr_fmt(fmt) "kprobes: " fmt 23 24 #include <linux/kprobes.h> 25 #include <linux/hash.h> 26 #include <linux/init.h> 27 #include <linux/slab.h> 28 #include <linux/stddef.h> 29 #include <linux/export.h> 30 #include <linux/moduleloader.h> 31 #include <linux/kallsyms.h> 32 #include <linux/freezer.h> 33 #include <linux/seq_file.h> 34 #include <linux/debugfs.h> 35 #include <linux/sysctl.h> 36 #include <linux/kdebug.h> 37 #include <linux/memory.h> 38 #include <linux/ftrace.h> 39 #include <linux/cpu.h> 40 #include <linux/jump_label.h> 41 #include <linux/static_call.h> 42 #include <linux/perf_event.h> 43 44 #include <asm/sections.h> 45 #include <asm/cacheflush.h> 46 #include <asm/errno.h> 47 #include <linux/uaccess.h> 48 49 #define KPROBE_HASH_BITS 6 50 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 51 52 53 static int kprobes_initialized; 54 /* kprobe_table can be accessed by 55 * - Normal hlist traversal and RCU add/del under kprobe_mutex is held. 56 * Or 57 * - RCU hlist traversal under disabling preempt (breakpoint handlers) 58 */ 59 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 60 61 /* NOTE: change this value only with kprobe_mutex held */ 62 static bool kprobes_all_disarmed; 63 64 /* This protects kprobe_table and optimizing_list */ 65 static DEFINE_MUTEX(kprobe_mutex); 66 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 67 68 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, 69 unsigned int __unused) 70 { 71 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); 72 } 73 74 /* Blacklist -- list of struct kprobe_blacklist_entry */ 75 static LIST_HEAD(kprobe_blacklist); 76 77 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 78 /* 79 * kprobe->ainsn.insn points to the copy of the instruction to be 80 * single-stepped. x86_64, POWER4 and above have no-exec support and 81 * stepping on the instruction on a vmalloced/kmalloced/data page 82 * is a recipe for disaster 83 */ 84 struct kprobe_insn_page { 85 struct list_head list; 86 kprobe_opcode_t *insns; /* Page of instruction slots */ 87 struct kprobe_insn_cache *cache; 88 int nused; 89 int ngarbage; 90 char slot_used[]; 91 }; 92 93 #define KPROBE_INSN_PAGE_SIZE(slots) \ 94 (offsetof(struct kprobe_insn_page, slot_used) + \ 95 (sizeof(char) * (slots))) 96 97 static int slots_per_page(struct kprobe_insn_cache *c) 98 { 99 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 100 } 101 102 enum kprobe_slot_state { 103 SLOT_CLEAN = 0, 104 SLOT_DIRTY = 1, 105 SLOT_USED = 2, 106 }; 107 108 void __weak *alloc_insn_page(void) 109 { 110 return module_alloc(PAGE_SIZE); 111 } 112 113 static void free_insn_page(void *page) 114 { 115 module_memfree(page); 116 } 117 118 struct kprobe_insn_cache kprobe_insn_slots = { 119 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 120 .alloc = alloc_insn_page, 121 .free = free_insn_page, 122 .sym = KPROBE_INSN_PAGE_SYM, 123 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 124 .insn_size = MAX_INSN_SIZE, 125 .nr_garbage = 0, 126 }; 127 static int collect_garbage_slots(struct kprobe_insn_cache *c); 128 129 /** 130 * __get_insn_slot() - Find a slot on an executable page for an instruction. 131 * We allocate an executable page if there's no room on existing ones. 132 */ 133 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 134 { 135 struct kprobe_insn_page *kip; 136 kprobe_opcode_t *slot = NULL; 137 138 /* Since the slot array is not protected by rcu, we need a mutex */ 139 mutex_lock(&c->mutex); 140 retry: 141 rcu_read_lock(); 142 list_for_each_entry_rcu(kip, &c->pages, list) { 143 if (kip->nused < slots_per_page(c)) { 144 int i; 145 for (i = 0; i < slots_per_page(c); i++) { 146 if (kip->slot_used[i] == SLOT_CLEAN) { 147 kip->slot_used[i] = SLOT_USED; 148 kip->nused++; 149 slot = kip->insns + (i * c->insn_size); 150 rcu_read_unlock(); 151 goto out; 152 } 153 } 154 /* kip->nused is broken. Fix it. */ 155 kip->nused = slots_per_page(c); 156 WARN_ON(1); 157 } 158 } 159 rcu_read_unlock(); 160 161 /* If there are any garbage slots, collect it and try again. */ 162 if (c->nr_garbage && collect_garbage_slots(c) == 0) 163 goto retry; 164 165 /* All out of space. Need to allocate a new page. */ 166 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 167 if (!kip) 168 goto out; 169 170 /* 171 * Use module_alloc so this page is within +/- 2GB of where the 172 * kernel image and loaded module images reside. This is required 173 * so x86_64 can correctly handle the %rip-relative fixups. 174 */ 175 kip->insns = c->alloc(); 176 if (!kip->insns) { 177 kfree(kip); 178 goto out; 179 } 180 INIT_LIST_HEAD(&kip->list); 181 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 182 kip->slot_used[0] = SLOT_USED; 183 kip->nused = 1; 184 kip->ngarbage = 0; 185 kip->cache = c; 186 list_add_rcu(&kip->list, &c->pages); 187 slot = kip->insns; 188 189 /* Record the perf ksymbol register event after adding the page */ 190 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, 191 PAGE_SIZE, false, c->sym); 192 out: 193 mutex_unlock(&c->mutex); 194 return slot; 195 } 196 197 /* Return 1 if all garbages are collected, otherwise 0. */ 198 static int collect_one_slot(struct kprobe_insn_page *kip, int idx) 199 { 200 kip->slot_used[idx] = SLOT_CLEAN; 201 kip->nused--; 202 if (kip->nused == 0) { 203 /* 204 * Page is no longer in use. Free it unless 205 * it's the last one. We keep the last one 206 * so as not to have to set it up again the 207 * next time somebody inserts a probe. 208 */ 209 if (!list_is_singular(&kip->list)) { 210 /* 211 * Record perf ksymbol unregister event before removing 212 * the page. 213 */ 214 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 215 (unsigned long)kip->insns, PAGE_SIZE, true, 216 kip->cache->sym); 217 list_del_rcu(&kip->list); 218 synchronize_rcu(); 219 kip->cache->free(kip->insns); 220 kfree(kip); 221 } 222 return 1; 223 } 224 return 0; 225 } 226 227 static int collect_garbage_slots(struct kprobe_insn_cache *c) 228 { 229 struct kprobe_insn_page *kip, *next; 230 231 /* Ensure no-one is interrupted on the garbages */ 232 synchronize_rcu(); 233 234 list_for_each_entry_safe(kip, next, &c->pages, list) { 235 int i; 236 if (kip->ngarbage == 0) 237 continue; 238 kip->ngarbage = 0; /* we will collect all garbages */ 239 for (i = 0; i < slots_per_page(c); i++) { 240 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) 241 break; 242 } 243 } 244 c->nr_garbage = 0; 245 return 0; 246 } 247 248 void __free_insn_slot(struct kprobe_insn_cache *c, 249 kprobe_opcode_t *slot, int dirty) 250 { 251 struct kprobe_insn_page *kip; 252 long idx; 253 254 mutex_lock(&c->mutex); 255 rcu_read_lock(); 256 list_for_each_entry_rcu(kip, &c->pages, list) { 257 idx = ((long)slot - (long)kip->insns) / 258 (c->insn_size * sizeof(kprobe_opcode_t)); 259 if (idx >= 0 && idx < slots_per_page(c)) 260 goto out; 261 } 262 /* Could not find this slot. */ 263 WARN_ON(1); 264 kip = NULL; 265 out: 266 rcu_read_unlock(); 267 /* Mark and sweep: this may sleep */ 268 if (kip) { 269 /* Check double free */ 270 WARN_ON(kip->slot_used[idx] != SLOT_USED); 271 if (dirty) { 272 kip->slot_used[idx] = SLOT_DIRTY; 273 kip->ngarbage++; 274 if (++c->nr_garbage > slots_per_page(c)) 275 collect_garbage_slots(c); 276 } else { 277 collect_one_slot(kip, idx); 278 } 279 } 280 mutex_unlock(&c->mutex); 281 } 282 283 /* 284 * Check given address is on the page of kprobe instruction slots. 285 * This will be used for checking whether the address on a stack 286 * is on a text area or not. 287 */ 288 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) 289 { 290 struct kprobe_insn_page *kip; 291 bool ret = false; 292 293 rcu_read_lock(); 294 list_for_each_entry_rcu(kip, &c->pages, list) { 295 if (addr >= (unsigned long)kip->insns && 296 addr < (unsigned long)kip->insns + PAGE_SIZE) { 297 ret = true; 298 break; 299 } 300 } 301 rcu_read_unlock(); 302 303 return ret; 304 } 305 306 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, 307 unsigned long *value, char *type, char *sym) 308 { 309 struct kprobe_insn_page *kip; 310 int ret = -ERANGE; 311 312 rcu_read_lock(); 313 list_for_each_entry_rcu(kip, &c->pages, list) { 314 if ((*symnum)--) 315 continue; 316 strlcpy(sym, c->sym, KSYM_NAME_LEN); 317 *type = 't'; 318 *value = (unsigned long)kip->insns; 319 ret = 0; 320 break; 321 } 322 rcu_read_unlock(); 323 324 return ret; 325 } 326 327 #ifdef CONFIG_OPTPROBES 328 void __weak *alloc_optinsn_page(void) 329 { 330 return alloc_insn_page(); 331 } 332 333 void __weak free_optinsn_page(void *page) 334 { 335 free_insn_page(page); 336 } 337 338 /* For optimized_kprobe buffer */ 339 struct kprobe_insn_cache kprobe_optinsn_slots = { 340 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 341 .alloc = alloc_optinsn_page, 342 .free = free_optinsn_page, 343 .sym = KPROBE_OPTINSN_PAGE_SYM, 344 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 345 /* .insn_size is initialized later */ 346 .nr_garbage = 0, 347 }; 348 #endif 349 #endif 350 351 /* We have preemption disabled.. so it is safe to use __ versions */ 352 static inline void set_kprobe_instance(struct kprobe *kp) 353 { 354 __this_cpu_write(kprobe_instance, kp); 355 } 356 357 static inline void reset_kprobe_instance(void) 358 { 359 __this_cpu_write(kprobe_instance, NULL); 360 } 361 362 /* 363 * This routine is called either: 364 * - under the kprobe_mutex - during kprobe_[un]register() 365 * OR 366 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 367 */ 368 struct kprobe *get_kprobe(void *addr) 369 { 370 struct hlist_head *head; 371 struct kprobe *p; 372 373 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 374 hlist_for_each_entry_rcu(p, head, hlist, 375 lockdep_is_held(&kprobe_mutex)) { 376 if (p->addr == addr) 377 return p; 378 } 379 380 return NULL; 381 } 382 NOKPROBE_SYMBOL(get_kprobe); 383 384 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 385 386 /* Return true if the kprobe is an aggregator */ 387 static inline int kprobe_aggrprobe(struct kprobe *p) 388 { 389 return p->pre_handler == aggr_pre_handler; 390 } 391 392 /* Return true(!0) if the kprobe is unused */ 393 static inline int kprobe_unused(struct kprobe *p) 394 { 395 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 396 list_empty(&p->list); 397 } 398 399 /* 400 * Keep all fields in the kprobe consistent 401 */ 402 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 403 { 404 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 405 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 406 } 407 408 #ifdef CONFIG_OPTPROBES 409 /* NOTE: change this value only with kprobe_mutex held */ 410 static bool kprobes_allow_optimization; 411 412 /* 413 * Call all pre_handler on the list, but ignores its return value. 414 * This must be called from arch-dep optimized caller. 415 */ 416 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 417 { 418 struct kprobe *kp; 419 420 list_for_each_entry_rcu(kp, &p->list, list) { 421 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 422 set_kprobe_instance(kp); 423 kp->pre_handler(kp, regs); 424 } 425 reset_kprobe_instance(); 426 } 427 } 428 NOKPROBE_SYMBOL(opt_pre_handler); 429 430 /* Free optimized instructions and optimized_kprobe */ 431 static void free_aggr_kprobe(struct kprobe *p) 432 { 433 struct optimized_kprobe *op; 434 435 op = container_of(p, struct optimized_kprobe, kp); 436 arch_remove_optimized_kprobe(op); 437 arch_remove_kprobe(p); 438 kfree(op); 439 } 440 441 /* Return true(!0) if the kprobe is ready for optimization. */ 442 static inline int kprobe_optready(struct kprobe *p) 443 { 444 struct optimized_kprobe *op; 445 446 if (kprobe_aggrprobe(p)) { 447 op = container_of(p, struct optimized_kprobe, kp); 448 return arch_prepared_optinsn(&op->optinsn); 449 } 450 451 return 0; 452 } 453 454 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ 455 static inline int kprobe_disarmed(struct kprobe *p) 456 { 457 struct optimized_kprobe *op; 458 459 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 460 if (!kprobe_aggrprobe(p)) 461 return kprobe_disabled(p); 462 463 op = container_of(p, struct optimized_kprobe, kp); 464 465 return kprobe_disabled(p) && list_empty(&op->list); 466 } 467 468 /* Return true(!0) if the probe is queued on (un)optimizing lists */ 469 static int kprobe_queued(struct kprobe *p) 470 { 471 struct optimized_kprobe *op; 472 473 if (kprobe_aggrprobe(p)) { 474 op = container_of(p, struct optimized_kprobe, kp); 475 if (!list_empty(&op->list)) 476 return 1; 477 } 478 return 0; 479 } 480 481 /* 482 * Return an optimized kprobe whose optimizing code replaces 483 * instructions including addr (exclude breakpoint). 484 */ 485 static struct kprobe *get_optimized_kprobe(unsigned long addr) 486 { 487 int i; 488 struct kprobe *p = NULL; 489 struct optimized_kprobe *op; 490 491 /* Don't check i == 0, since that is a breakpoint case. */ 492 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 493 p = get_kprobe((void *)(addr - i)); 494 495 if (p && kprobe_optready(p)) { 496 op = container_of(p, struct optimized_kprobe, kp); 497 if (arch_within_optimized_kprobe(op, addr)) 498 return p; 499 } 500 501 return NULL; 502 } 503 504 /* Optimization staging list, protected by kprobe_mutex */ 505 static LIST_HEAD(optimizing_list); 506 static LIST_HEAD(unoptimizing_list); 507 static LIST_HEAD(freeing_list); 508 509 static void kprobe_optimizer(struct work_struct *work); 510 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 511 #define OPTIMIZE_DELAY 5 512 513 /* 514 * Optimize (replace a breakpoint with a jump) kprobes listed on 515 * optimizing_list. 516 */ 517 static void do_optimize_kprobes(void) 518 { 519 lockdep_assert_held(&text_mutex); 520 /* 521 * The optimization/unoptimization refers online_cpus via 522 * stop_machine() and cpu-hotplug modifies online_cpus. 523 * And same time, text_mutex will be held in cpu-hotplug and here. 524 * This combination can cause a deadlock (cpu-hotplug try to lock 525 * text_mutex but stop_machine can not be done because online_cpus 526 * has been changed) 527 * To avoid this deadlock, caller must have locked cpu hotplug 528 * for preventing cpu-hotplug outside of text_mutex locking. 529 */ 530 lockdep_assert_cpus_held(); 531 532 /* Optimization never be done when disarmed */ 533 if (kprobes_all_disarmed || !kprobes_allow_optimization || 534 list_empty(&optimizing_list)) 535 return; 536 537 arch_optimize_kprobes(&optimizing_list); 538 } 539 540 /* 541 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 542 * if need) kprobes listed on unoptimizing_list. 543 */ 544 static void do_unoptimize_kprobes(void) 545 { 546 struct optimized_kprobe *op, *tmp; 547 548 lockdep_assert_held(&text_mutex); 549 /* See comment in do_optimize_kprobes() */ 550 lockdep_assert_cpus_held(); 551 552 /* Unoptimization must be done anytime */ 553 if (list_empty(&unoptimizing_list)) 554 return; 555 556 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 557 /* Loop free_list for disarming */ 558 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 559 /* Switching from detour code to origin */ 560 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 561 /* Disarm probes if marked disabled */ 562 if (kprobe_disabled(&op->kp)) 563 arch_disarm_kprobe(&op->kp); 564 if (kprobe_unused(&op->kp)) { 565 /* 566 * Remove unused probes from hash list. After waiting 567 * for synchronization, these probes are reclaimed. 568 * (reclaiming is done by do_free_cleaned_kprobes.) 569 */ 570 hlist_del_rcu(&op->kp.hlist); 571 } else 572 list_del_init(&op->list); 573 } 574 } 575 576 /* Reclaim all kprobes on the free_list */ 577 static void do_free_cleaned_kprobes(void) 578 { 579 struct optimized_kprobe *op, *tmp; 580 581 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 582 list_del_init(&op->list); 583 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { 584 /* 585 * This must not happen, but if there is a kprobe 586 * still in use, keep it on kprobes hash list. 587 */ 588 continue; 589 } 590 free_aggr_kprobe(&op->kp); 591 } 592 } 593 594 /* Start optimizer after OPTIMIZE_DELAY passed */ 595 static void kick_kprobe_optimizer(void) 596 { 597 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 598 } 599 600 /* Kprobe jump optimizer */ 601 static void kprobe_optimizer(struct work_struct *work) 602 { 603 mutex_lock(&kprobe_mutex); 604 cpus_read_lock(); 605 mutex_lock(&text_mutex); 606 607 /* 608 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 609 * kprobes before waiting for quiesence period. 610 */ 611 do_unoptimize_kprobes(); 612 613 /* 614 * Step 2: Wait for quiesence period to ensure all potentially 615 * preempted tasks to have normally scheduled. Because optprobe 616 * may modify multiple instructions, there is a chance that Nth 617 * instruction is preempted. In that case, such tasks can return 618 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. 619 * Note that on non-preemptive kernel, this is transparently converted 620 * to synchronoze_sched() to wait for all interrupts to have completed. 621 */ 622 synchronize_rcu_tasks(); 623 624 /* Step 3: Optimize kprobes after quiesence period */ 625 do_optimize_kprobes(); 626 627 /* Step 4: Free cleaned kprobes after quiesence period */ 628 do_free_cleaned_kprobes(); 629 630 mutex_unlock(&text_mutex); 631 cpus_read_unlock(); 632 633 /* Step 5: Kick optimizer again if needed */ 634 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 635 kick_kprobe_optimizer(); 636 637 mutex_unlock(&kprobe_mutex); 638 } 639 640 /* Wait for completing optimization and unoptimization */ 641 void wait_for_kprobe_optimizer(void) 642 { 643 mutex_lock(&kprobe_mutex); 644 645 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 646 mutex_unlock(&kprobe_mutex); 647 648 /* this will also make optimizing_work execute immmediately */ 649 flush_delayed_work(&optimizing_work); 650 /* @optimizing_work might not have been queued yet, relax */ 651 cpu_relax(); 652 653 mutex_lock(&kprobe_mutex); 654 } 655 656 mutex_unlock(&kprobe_mutex); 657 } 658 659 static bool optprobe_queued_unopt(struct optimized_kprobe *op) 660 { 661 struct optimized_kprobe *_op; 662 663 list_for_each_entry(_op, &unoptimizing_list, list) { 664 if (op == _op) 665 return true; 666 } 667 668 return false; 669 } 670 671 /* Optimize kprobe if p is ready to be optimized */ 672 static void optimize_kprobe(struct kprobe *p) 673 { 674 struct optimized_kprobe *op; 675 676 /* Check if the kprobe is disabled or not ready for optimization. */ 677 if (!kprobe_optready(p) || !kprobes_allow_optimization || 678 (kprobe_disabled(p) || kprobes_all_disarmed)) 679 return; 680 681 /* kprobes with post_handler can not be optimized */ 682 if (p->post_handler) 683 return; 684 685 op = container_of(p, struct optimized_kprobe, kp); 686 687 /* Check there is no other kprobes at the optimized instructions */ 688 if (arch_check_optimized_kprobe(op) < 0) 689 return; 690 691 /* Check if it is already optimized. */ 692 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { 693 if (optprobe_queued_unopt(op)) { 694 /* This is under unoptimizing. Just dequeue the probe */ 695 list_del_init(&op->list); 696 } 697 return; 698 } 699 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 700 701 /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */ 702 if (WARN_ON_ONCE(!list_empty(&op->list))) 703 return; 704 705 list_add(&op->list, &optimizing_list); 706 kick_kprobe_optimizer(); 707 } 708 709 /* Short cut to direct unoptimizing */ 710 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 711 { 712 lockdep_assert_cpus_held(); 713 arch_unoptimize_kprobe(op); 714 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 715 } 716 717 /* Unoptimize a kprobe if p is optimized */ 718 static void unoptimize_kprobe(struct kprobe *p, bool force) 719 { 720 struct optimized_kprobe *op; 721 722 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 723 return; /* This is not an optprobe nor optimized */ 724 725 op = container_of(p, struct optimized_kprobe, kp); 726 if (!kprobe_optimized(p)) 727 return; 728 729 if (!list_empty(&op->list)) { 730 if (optprobe_queued_unopt(op)) { 731 /* Queued in unoptimizing queue */ 732 if (force) { 733 /* 734 * Forcibly unoptimize the kprobe here, and queue it 735 * in the freeing list for release afterwards. 736 */ 737 force_unoptimize_kprobe(op); 738 list_move(&op->list, &freeing_list); 739 } 740 } else { 741 /* Dequeue from the optimizing queue */ 742 list_del_init(&op->list); 743 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 744 } 745 return; 746 } 747 748 /* Optimized kprobe case */ 749 if (force) { 750 /* Forcibly update the code: this is a special case */ 751 force_unoptimize_kprobe(op); 752 } else { 753 list_add(&op->list, &unoptimizing_list); 754 kick_kprobe_optimizer(); 755 } 756 } 757 758 /* Cancel unoptimizing for reusing */ 759 static int reuse_unused_kprobe(struct kprobe *ap) 760 { 761 struct optimized_kprobe *op; 762 763 /* 764 * Unused kprobe MUST be on the way of delayed unoptimizing (means 765 * there is still a relative jump) and disabled. 766 */ 767 op = container_of(ap, struct optimized_kprobe, kp); 768 WARN_ON_ONCE(list_empty(&op->list)); 769 /* Enable the probe again */ 770 ap->flags &= ~KPROBE_FLAG_DISABLED; 771 /* Optimize it again (remove from op->list) */ 772 if (!kprobe_optready(ap)) 773 return -EINVAL; 774 775 optimize_kprobe(ap); 776 return 0; 777 } 778 779 /* Remove optimized instructions */ 780 static void kill_optimized_kprobe(struct kprobe *p) 781 { 782 struct optimized_kprobe *op; 783 784 op = container_of(p, struct optimized_kprobe, kp); 785 if (!list_empty(&op->list)) 786 /* Dequeue from the (un)optimization queue */ 787 list_del_init(&op->list); 788 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 789 790 if (kprobe_unused(p)) { 791 /* Enqueue if it is unused */ 792 list_add(&op->list, &freeing_list); 793 /* 794 * Remove unused probes from the hash list. After waiting 795 * for synchronization, this probe is reclaimed. 796 * (reclaiming is done by do_free_cleaned_kprobes().) 797 */ 798 hlist_del_rcu(&op->kp.hlist); 799 } 800 801 /* Don't touch the code, because it is already freed. */ 802 arch_remove_optimized_kprobe(op); 803 } 804 805 static inline 806 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 807 { 808 if (!kprobe_ftrace(p)) 809 arch_prepare_optimized_kprobe(op, p); 810 } 811 812 /* Try to prepare optimized instructions */ 813 static void prepare_optimized_kprobe(struct kprobe *p) 814 { 815 struct optimized_kprobe *op; 816 817 op = container_of(p, struct optimized_kprobe, kp); 818 __prepare_optimized_kprobe(op, p); 819 } 820 821 /* Allocate new optimized_kprobe and try to prepare optimized instructions */ 822 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 823 { 824 struct optimized_kprobe *op; 825 826 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 827 if (!op) 828 return NULL; 829 830 INIT_LIST_HEAD(&op->list); 831 op->kp.addr = p->addr; 832 __prepare_optimized_kprobe(op, p); 833 834 return &op->kp; 835 } 836 837 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 838 839 /* 840 * Prepare an optimized_kprobe and optimize it 841 * NOTE: p must be a normal registered kprobe 842 */ 843 static void try_to_optimize_kprobe(struct kprobe *p) 844 { 845 struct kprobe *ap; 846 struct optimized_kprobe *op; 847 848 /* Impossible to optimize ftrace-based kprobe */ 849 if (kprobe_ftrace(p)) 850 return; 851 852 /* For preparing optimization, jump_label_text_reserved() is called */ 853 cpus_read_lock(); 854 jump_label_lock(); 855 mutex_lock(&text_mutex); 856 857 ap = alloc_aggr_kprobe(p); 858 if (!ap) 859 goto out; 860 861 op = container_of(ap, struct optimized_kprobe, kp); 862 if (!arch_prepared_optinsn(&op->optinsn)) { 863 /* If failed to setup optimizing, fallback to kprobe */ 864 arch_remove_optimized_kprobe(op); 865 kfree(op); 866 goto out; 867 } 868 869 init_aggr_kprobe(ap, p); 870 optimize_kprobe(ap); /* This just kicks optimizer thread */ 871 872 out: 873 mutex_unlock(&text_mutex); 874 jump_label_unlock(); 875 cpus_read_unlock(); 876 } 877 878 static void optimize_all_kprobes(void) 879 { 880 struct hlist_head *head; 881 struct kprobe *p; 882 unsigned int i; 883 884 mutex_lock(&kprobe_mutex); 885 /* If optimization is already allowed, just return */ 886 if (kprobes_allow_optimization) 887 goto out; 888 889 cpus_read_lock(); 890 kprobes_allow_optimization = true; 891 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 892 head = &kprobe_table[i]; 893 hlist_for_each_entry(p, head, hlist) 894 if (!kprobe_disabled(p)) 895 optimize_kprobe(p); 896 } 897 cpus_read_unlock(); 898 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); 899 out: 900 mutex_unlock(&kprobe_mutex); 901 } 902 903 #ifdef CONFIG_SYSCTL 904 static void unoptimize_all_kprobes(void) 905 { 906 struct hlist_head *head; 907 struct kprobe *p; 908 unsigned int i; 909 910 mutex_lock(&kprobe_mutex); 911 /* If optimization is already prohibited, just return */ 912 if (!kprobes_allow_optimization) { 913 mutex_unlock(&kprobe_mutex); 914 return; 915 } 916 917 cpus_read_lock(); 918 kprobes_allow_optimization = false; 919 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 920 head = &kprobe_table[i]; 921 hlist_for_each_entry(p, head, hlist) { 922 if (!kprobe_disabled(p)) 923 unoptimize_kprobe(p, false); 924 } 925 } 926 cpus_read_unlock(); 927 mutex_unlock(&kprobe_mutex); 928 929 /* Wait for unoptimizing completion */ 930 wait_for_kprobe_optimizer(); 931 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); 932 } 933 934 static DEFINE_MUTEX(kprobe_sysctl_mutex); 935 int sysctl_kprobes_optimization; 936 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 937 void *buffer, size_t *length, 938 loff_t *ppos) 939 { 940 int ret; 941 942 mutex_lock(&kprobe_sysctl_mutex); 943 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 944 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 945 946 if (sysctl_kprobes_optimization) 947 optimize_all_kprobes(); 948 else 949 unoptimize_all_kprobes(); 950 mutex_unlock(&kprobe_sysctl_mutex); 951 952 return ret; 953 } 954 #endif /* CONFIG_SYSCTL */ 955 956 /* Put a breakpoint for a probe. Must be called with text_mutex locked */ 957 static void __arm_kprobe(struct kprobe *p) 958 { 959 struct kprobe *_p; 960 961 /* Check collision with other optimized kprobes */ 962 _p = get_optimized_kprobe((unsigned long)p->addr); 963 if (unlikely(_p)) 964 /* Fallback to unoptimized kprobe */ 965 unoptimize_kprobe(_p, true); 966 967 arch_arm_kprobe(p); 968 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 969 } 970 971 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 972 static void __disarm_kprobe(struct kprobe *p, bool reopt) 973 { 974 struct kprobe *_p; 975 976 /* Try to unoptimize */ 977 unoptimize_kprobe(p, kprobes_all_disarmed); 978 979 if (!kprobe_queued(p)) { 980 arch_disarm_kprobe(p); 981 /* If another kprobe was blocked, optimize it. */ 982 _p = get_optimized_kprobe((unsigned long)p->addr); 983 if (unlikely(_p) && reopt) 984 optimize_kprobe(_p); 985 } 986 /* TODO: reoptimize others after unoptimized this probe */ 987 } 988 989 #else /* !CONFIG_OPTPROBES */ 990 991 #define optimize_kprobe(p) do {} while (0) 992 #define unoptimize_kprobe(p, f) do {} while (0) 993 #define kill_optimized_kprobe(p) do {} while (0) 994 #define prepare_optimized_kprobe(p) do {} while (0) 995 #define try_to_optimize_kprobe(p) do {} while (0) 996 #define __arm_kprobe(p) arch_arm_kprobe(p) 997 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 998 #define kprobe_disarmed(p) kprobe_disabled(p) 999 #define wait_for_kprobe_optimizer() do {} while (0) 1000 1001 static int reuse_unused_kprobe(struct kprobe *ap) 1002 { 1003 /* 1004 * If the optimized kprobe is NOT supported, the aggr kprobe is 1005 * released at the same time that the last aggregated kprobe is 1006 * unregistered. 1007 * Thus there should be no chance to reuse unused kprobe. 1008 */ 1009 WARN_ON_ONCE(1); 1010 return -EINVAL; 1011 } 1012 1013 static void free_aggr_kprobe(struct kprobe *p) 1014 { 1015 arch_remove_kprobe(p); 1016 kfree(p); 1017 } 1018 1019 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 1020 { 1021 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 1022 } 1023 #endif /* CONFIG_OPTPROBES */ 1024 1025 #ifdef CONFIG_KPROBES_ON_FTRACE 1026 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 1027 .func = kprobe_ftrace_handler, 1028 .flags = FTRACE_OPS_FL_SAVE_REGS, 1029 }; 1030 1031 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { 1032 .func = kprobe_ftrace_handler, 1033 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 1034 }; 1035 1036 static int kprobe_ipmodify_enabled; 1037 static int kprobe_ftrace_enabled; 1038 1039 /* Caller must lock kprobe_mutex */ 1040 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1041 int *cnt) 1042 { 1043 int ret = 0; 1044 1045 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1046 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) 1047 return ret; 1048 1049 if (*cnt == 0) { 1050 ret = register_ftrace_function(ops); 1051 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) 1052 goto err_ftrace; 1053 } 1054 1055 (*cnt)++; 1056 return ret; 1057 1058 err_ftrace: 1059 /* 1060 * At this point, sinec ops is not registered, we should be sefe from 1061 * registering empty filter. 1062 */ 1063 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1064 return ret; 1065 } 1066 1067 static int arm_kprobe_ftrace(struct kprobe *p) 1068 { 1069 bool ipmodify = (p->post_handler != NULL); 1070 1071 return __arm_kprobe_ftrace(p, 1072 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1073 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1074 } 1075 1076 /* Caller must lock kprobe_mutex */ 1077 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1078 int *cnt) 1079 { 1080 int ret = 0; 1081 1082 if (*cnt == 1) { 1083 ret = unregister_ftrace_function(ops); 1084 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret)) 1085 return ret; 1086 } 1087 1088 (*cnt)--; 1089 1090 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1091 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n", 1092 p->addr, ret); 1093 return ret; 1094 } 1095 1096 static int disarm_kprobe_ftrace(struct kprobe *p) 1097 { 1098 bool ipmodify = (p->post_handler != NULL); 1099 1100 return __disarm_kprobe_ftrace(p, 1101 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1102 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1103 } 1104 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1105 static inline int arm_kprobe_ftrace(struct kprobe *p) 1106 { 1107 return -ENODEV; 1108 } 1109 1110 static inline int disarm_kprobe_ftrace(struct kprobe *p) 1111 { 1112 return -ENODEV; 1113 } 1114 #endif 1115 1116 static int prepare_kprobe(struct kprobe *p) 1117 { 1118 /* Must ensure p->addr is really on ftrace */ 1119 if (kprobe_ftrace(p)) 1120 return arch_prepare_kprobe_ftrace(p); 1121 1122 return arch_prepare_kprobe(p); 1123 } 1124 1125 /* Arm a kprobe with text_mutex */ 1126 static int arm_kprobe(struct kprobe *kp) 1127 { 1128 if (unlikely(kprobe_ftrace(kp))) 1129 return arm_kprobe_ftrace(kp); 1130 1131 cpus_read_lock(); 1132 mutex_lock(&text_mutex); 1133 __arm_kprobe(kp); 1134 mutex_unlock(&text_mutex); 1135 cpus_read_unlock(); 1136 1137 return 0; 1138 } 1139 1140 /* Disarm a kprobe with text_mutex */ 1141 static int disarm_kprobe(struct kprobe *kp, bool reopt) 1142 { 1143 if (unlikely(kprobe_ftrace(kp))) 1144 return disarm_kprobe_ftrace(kp); 1145 1146 cpus_read_lock(); 1147 mutex_lock(&text_mutex); 1148 __disarm_kprobe(kp, reopt); 1149 mutex_unlock(&text_mutex); 1150 cpus_read_unlock(); 1151 1152 return 0; 1153 } 1154 1155 /* 1156 * Aggregate handlers for multiple kprobes support - these handlers 1157 * take care of invoking the individual kprobe handlers on p->list 1158 */ 1159 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1160 { 1161 struct kprobe *kp; 1162 1163 list_for_each_entry_rcu(kp, &p->list, list) { 1164 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1165 set_kprobe_instance(kp); 1166 if (kp->pre_handler(kp, regs)) 1167 return 1; 1168 } 1169 reset_kprobe_instance(); 1170 } 1171 return 0; 1172 } 1173 NOKPROBE_SYMBOL(aggr_pre_handler); 1174 1175 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1176 unsigned long flags) 1177 { 1178 struct kprobe *kp; 1179 1180 list_for_each_entry_rcu(kp, &p->list, list) { 1181 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1182 set_kprobe_instance(kp); 1183 kp->post_handler(kp, regs, flags); 1184 reset_kprobe_instance(); 1185 } 1186 } 1187 } 1188 NOKPROBE_SYMBOL(aggr_post_handler); 1189 1190 /* Walks the list and increments nmissed count for multiprobe case */ 1191 void kprobes_inc_nmissed_count(struct kprobe *p) 1192 { 1193 struct kprobe *kp; 1194 if (!kprobe_aggrprobe(p)) { 1195 p->nmissed++; 1196 } else { 1197 list_for_each_entry_rcu(kp, &p->list, list) 1198 kp->nmissed++; 1199 } 1200 return; 1201 } 1202 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1203 1204 static void free_rp_inst_rcu(struct rcu_head *head) 1205 { 1206 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu); 1207 1208 if (refcount_dec_and_test(&ri->rph->ref)) 1209 kfree(ri->rph); 1210 kfree(ri); 1211 } 1212 NOKPROBE_SYMBOL(free_rp_inst_rcu); 1213 1214 static void recycle_rp_inst(struct kretprobe_instance *ri) 1215 { 1216 struct kretprobe *rp = get_kretprobe(ri); 1217 1218 if (likely(rp)) { 1219 freelist_add(&ri->freelist, &rp->freelist); 1220 } else 1221 call_rcu(&ri->rcu, free_rp_inst_rcu); 1222 } 1223 NOKPROBE_SYMBOL(recycle_rp_inst); 1224 1225 static struct kprobe kprobe_busy = { 1226 .addr = (void *) get_kprobe, 1227 }; 1228 1229 void kprobe_busy_begin(void) 1230 { 1231 struct kprobe_ctlblk *kcb; 1232 1233 preempt_disable(); 1234 __this_cpu_write(current_kprobe, &kprobe_busy); 1235 kcb = get_kprobe_ctlblk(); 1236 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 1237 } 1238 1239 void kprobe_busy_end(void) 1240 { 1241 __this_cpu_write(current_kprobe, NULL); 1242 preempt_enable(); 1243 } 1244 1245 /* 1246 * This function is called from finish_task_switch when task tk becomes dead, 1247 * so that we can recycle any function-return probe instances associated 1248 * with this task. These left over instances represent probed functions 1249 * that have been called but will never return. 1250 */ 1251 void kprobe_flush_task(struct task_struct *tk) 1252 { 1253 struct kretprobe_instance *ri; 1254 struct llist_node *node; 1255 1256 /* Early boot, not yet initialized. */ 1257 if (unlikely(!kprobes_initialized)) 1258 return; 1259 1260 kprobe_busy_begin(); 1261 1262 node = __llist_del_all(&tk->kretprobe_instances); 1263 while (node) { 1264 ri = container_of(node, struct kretprobe_instance, llist); 1265 node = node->next; 1266 1267 recycle_rp_inst(ri); 1268 } 1269 1270 kprobe_busy_end(); 1271 } 1272 NOKPROBE_SYMBOL(kprobe_flush_task); 1273 1274 static inline void free_rp_inst(struct kretprobe *rp) 1275 { 1276 struct kretprobe_instance *ri; 1277 struct freelist_node *node; 1278 int count = 0; 1279 1280 node = rp->freelist.head; 1281 while (node) { 1282 ri = container_of(node, struct kretprobe_instance, freelist); 1283 node = node->next; 1284 1285 kfree(ri); 1286 count++; 1287 } 1288 1289 if (refcount_sub_and_test(count, &rp->rph->ref)) { 1290 kfree(rp->rph); 1291 rp->rph = NULL; 1292 } 1293 } 1294 1295 /* Add the new probe to ap->list */ 1296 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1297 { 1298 if (p->post_handler) 1299 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1300 1301 list_add_rcu(&p->list, &ap->list); 1302 if (p->post_handler && !ap->post_handler) 1303 ap->post_handler = aggr_post_handler; 1304 1305 return 0; 1306 } 1307 1308 /* 1309 * Fill in the required fields of the "manager kprobe". Replace the 1310 * earlier kprobe in the hlist with the manager kprobe 1311 */ 1312 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1313 { 1314 /* Copy p's insn slot to ap */ 1315 copy_kprobe(p, ap); 1316 flush_insn_slot(ap); 1317 ap->addr = p->addr; 1318 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1319 ap->pre_handler = aggr_pre_handler; 1320 /* We don't care the kprobe which has gone. */ 1321 if (p->post_handler && !kprobe_gone(p)) 1322 ap->post_handler = aggr_post_handler; 1323 1324 INIT_LIST_HEAD(&ap->list); 1325 INIT_HLIST_NODE(&ap->hlist); 1326 1327 list_add_rcu(&p->list, &ap->list); 1328 hlist_replace_rcu(&p->hlist, &ap->hlist); 1329 } 1330 1331 /* 1332 * This is the second or subsequent kprobe at the address - handle 1333 * the intricacies 1334 */ 1335 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1336 { 1337 int ret = 0; 1338 struct kprobe *ap = orig_p; 1339 1340 cpus_read_lock(); 1341 1342 /* For preparing optimization, jump_label_text_reserved() is called */ 1343 jump_label_lock(); 1344 mutex_lock(&text_mutex); 1345 1346 if (!kprobe_aggrprobe(orig_p)) { 1347 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1348 ap = alloc_aggr_kprobe(orig_p); 1349 if (!ap) { 1350 ret = -ENOMEM; 1351 goto out; 1352 } 1353 init_aggr_kprobe(ap, orig_p); 1354 } else if (kprobe_unused(ap)) { 1355 /* This probe is going to die. Rescue it */ 1356 ret = reuse_unused_kprobe(ap); 1357 if (ret) 1358 goto out; 1359 } 1360 1361 if (kprobe_gone(ap)) { 1362 /* 1363 * Attempting to insert new probe at the same location that 1364 * had a probe in the module vaddr area which already 1365 * freed. So, the instruction slot has already been 1366 * released. We need a new slot for the new probe. 1367 */ 1368 ret = arch_prepare_kprobe(ap); 1369 if (ret) 1370 /* 1371 * Even if fail to allocate new slot, don't need to 1372 * free aggr_probe. It will be used next time, or 1373 * freed by unregister_kprobe. 1374 */ 1375 goto out; 1376 1377 /* Prepare optimized instructions if possible. */ 1378 prepare_optimized_kprobe(ap); 1379 1380 /* 1381 * Clear gone flag to prevent allocating new slot again, and 1382 * set disabled flag because it is not armed yet. 1383 */ 1384 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1385 | KPROBE_FLAG_DISABLED; 1386 } 1387 1388 /* Copy ap's insn slot to p */ 1389 copy_kprobe(ap, p); 1390 ret = add_new_kprobe(ap, p); 1391 1392 out: 1393 mutex_unlock(&text_mutex); 1394 jump_label_unlock(); 1395 cpus_read_unlock(); 1396 1397 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1398 ap->flags &= ~KPROBE_FLAG_DISABLED; 1399 if (!kprobes_all_disarmed) { 1400 /* Arm the breakpoint again. */ 1401 ret = arm_kprobe(ap); 1402 if (ret) { 1403 ap->flags |= KPROBE_FLAG_DISABLED; 1404 list_del_rcu(&p->list); 1405 synchronize_rcu(); 1406 } 1407 } 1408 } 1409 return ret; 1410 } 1411 1412 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1413 { 1414 /* The __kprobes marked functions and entry code must not be probed */ 1415 return addr >= (unsigned long)__kprobes_text_start && 1416 addr < (unsigned long)__kprobes_text_end; 1417 } 1418 1419 static bool __within_kprobe_blacklist(unsigned long addr) 1420 { 1421 struct kprobe_blacklist_entry *ent; 1422 1423 if (arch_within_kprobe_blacklist(addr)) 1424 return true; 1425 /* 1426 * If there exists a kprobe_blacklist, verify and 1427 * fail any probe registration in the prohibited area 1428 */ 1429 list_for_each_entry(ent, &kprobe_blacklist, list) { 1430 if (addr >= ent->start_addr && addr < ent->end_addr) 1431 return true; 1432 } 1433 return false; 1434 } 1435 1436 bool within_kprobe_blacklist(unsigned long addr) 1437 { 1438 char symname[KSYM_NAME_LEN], *p; 1439 1440 if (__within_kprobe_blacklist(addr)) 1441 return true; 1442 1443 /* Check if the address is on a suffixed-symbol */ 1444 if (!lookup_symbol_name(addr, symname)) { 1445 p = strchr(symname, '.'); 1446 if (!p) 1447 return false; 1448 *p = '\0'; 1449 addr = (unsigned long)kprobe_lookup_name(symname, 0); 1450 if (addr) 1451 return __within_kprobe_blacklist(addr); 1452 } 1453 return false; 1454 } 1455 1456 /* 1457 * If we have a symbol_name argument, look it up and add the offset field 1458 * to it. This way, we can specify a relative address to a symbol. 1459 * This returns encoded errors if it fails to look up symbol or invalid 1460 * combination of parameters. 1461 */ 1462 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr, 1463 const char *symbol_name, unsigned int offset) 1464 { 1465 if ((symbol_name && addr) || (!symbol_name && !addr)) 1466 goto invalid; 1467 1468 if (symbol_name) { 1469 addr = kprobe_lookup_name(symbol_name, offset); 1470 if (!addr) 1471 return ERR_PTR(-ENOENT); 1472 } 1473 1474 addr = (kprobe_opcode_t *)(((char *)addr) + offset); 1475 if (addr) 1476 return addr; 1477 1478 invalid: 1479 return ERR_PTR(-EINVAL); 1480 } 1481 1482 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1483 { 1484 return _kprobe_addr(p->addr, p->symbol_name, p->offset); 1485 } 1486 1487 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1488 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1489 { 1490 struct kprobe *ap, *list_p; 1491 1492 lockdep_assert_held(&kprobe_mutex); 1493 1494 ap = get_kprobe(p->addr); 1495 if (unlikely(!ap)) 1496 return NULL; 1497 1498 if (p != ap) { 1499 list_for_each_entry(list_p, &ap->list, list) 1500 if (list_p == p) 1501 /* kprobe p is a valid probe */ 1502 goto valid; 1503 return NULL; 1504 } 1505 valid: 1506 return ap; 1507 } 1508 1509 /* 1510 * Warn and return error if the kprobe is being re-registered since 1511 * there must be a software bug. 1512 */ 1513 static inline int warn_kprobe_rereg(struct kprobe *p) 1514 { 1515 int ret = 0; 1516 1517 mutex_lock(&kprobe_mutex); 1518 if (WARN_ON_ONCE(__get_valid_kprobe(p))) 1519 ret = -EINVAL; 1520 mutex_unlock(&kprobe_mutex); 1521 1522 return ret; 1523 } 1524 1525 static int check_ftrace_location(struct kprobe *p) 1526 { 1527 unsigned long ftrace_addr; 1528 1529 ftrace_addr = ftrace_location((unsigned long)p->addr); 1530 if (ftrace_addr) { 1531 #ifdef CONFIG_KPROBES_ON_FTRACE 1532 /* Given address is not on the instruction boundary */ 1533 if ((unsigned long)p->addr != ftrace_addr) 1534 return -EILSEQ; 1535 p->flags |= KPROBE_FLAG_FTRACE; 1536 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1537 return -EINVAL; 1538 #endif 1539 } 1540 return 0; 1541 } 1542 1543 static int check_kprobe_address_safe(struct kprobe *p, 1544 struct module **probed_mod) 1545 { 1546 int ret; 1547 1548 ret = check_ftrace_location(p); 1549 if (ret) 1550 return ret; 1551 jump_label_lock(); 1552 preempt_disable(); 1553 1554 /* Ensure it is not in reserved area nor out of text */ 1555 if (!kernel_text_address((unsigned long) p->addr) || 1556 within_kprobe_blacklist((unsigned long) p->addr) || 1557 jump_label_text_reserved(p->addr, p->addr) || 1558 static_call_text_reserved(p->addr, p->addr) || 1559 find_bug((unsigned long)p->addr)) { 1560 ret = -EINVAL; 1561 goto out; 1562 } 1563 1564 /* Check if are we probing a module */ 1565 *probed_mod = __module_text_address((unsigned long) p->addr); 1566 if (*probed_mod) { 1567 /* 1568 * We must hold a refcount of the probed module while updating 1569 * its code to prohibit unexpected unloading. 1570 */ 1571 if (unlikely(!try_module_get(*probed_mod))) { 1572 ret = -ENOENT; 1573 goto out; 1574 } 1575 1576 /* 1577 * If the module freed .init.text, we couldn't insert 1578 * kprobes in there. 1579 */ 1580 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1581 (*probed_mod)->state != MODULE_STATE_COMING) { 1582 module_put(*probed_mod); 1583 *probed_mod = NULL; 1584 ret = -ENOENT; 1585 } 1586 } 1587 out: 1588 preempt_enable(); 1589 jump_label_unlock(); 1590 1591 return ret; 1592 } 1593 1594 int register_kprobe(struct kprobe *p) 1595 { 1596 int ret; 1597 struct kprobe *old_p; 1598 struct module *probed_mod; 1599 kprobe_opcode_t *addr; 1600 1601 /* Adjust probe address from symbol */ 1602 addr = kprobe_addr(p); 1603 if (IS_ERR(addr)) 1604 return PTR_ERR(addr); 1605 p->addr = addr; 1606 1607 ret = warn_kprobe_rereg(p); 1608 if (ret) 1609 return ret; 1610 1611 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1612 p->flags &= KPROBE_FLAG_DISABLED; 1613 p->nmissed = 0; 1614 INIT_LIST_HEAD(&p->list); 1615 1616 ret = check_kprobe_address_safe(p, &probed_mod); 1617 if (ret) 1618 return ret; 1619 1620 mutex_lock(&kprobe_mutex); 1621 1622 old_p = get_kprobe(p->addr); 1623 if (old_p) { 1624 /* Since this may unoptimize old_p, locking text_mutex. */ 1625 ret = register_aggr_kprobe(old_p, p); 1626 goto out; 1627 } 1628 1629 cpus_read_lock(); 1630 /* Prevent text modification */ 1631 mutex_lock(&text_mutex); 1632 ret = prepare_kprobe(p); 1633 mutex_unlock(&text_mutex); 1634 cpus_read_unlock(); 1635 if (ret) 1636 goto out; 1637 1638 INIT_HLIST_NODE(&p->hlist); 1639 hlist_add_head_rcu(&p->hlist, 1640 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1641 1642 if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1643 ret = arm_kprobe(p); 1644 if (ret) { 1645 hlist_del_rcu(&p->hlist); 1646 synchronize_rcu(); 1647 goto out; 1648 } 1649 } 1650 1651 /* Try to optimize kprobe */ 1652 try_to_optimize_kprobe(p); 1653 out: 1654 mutex_unlock(&kprobe_mutex); 1655 1656 if (probed_mod) 1657 module_put(probed_mod); 1658 1659 return ret; 1660 } 1661 EXPORT_SYMBOL_GPL(register_kprobe); 1662 1663 /* Check if all probes on the aggrprobe are disabled */ 1664 static int aggr_kprobe_disabled(struct kprobe *ap) 1665 { 1666 struct kprobe *kp; 1667 1668 lockdep_assert_held(&kprobe_mutex); 1669 1670 list_for_each_entry(kp, &ap->list, list) 1671 if (!kprobe_disabled(kp)) 1672 /* 1673 * There is an active probe on the list. 1674 * We can't disable this ap. 1675 */ 1676 return 0; 1677 1678 return 1; 1679 } 1680 1681 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1682 static struct kprobe *__disable_kprobe(struct kprobe *p) 1683 { 1684 struct kprobe *orig_p; 1685 int ret; 1686 1687 /* Get an original kprobe for return */ 1688 orig_p = __get_valid_kprobe(p); 1689 if (unlikely(orig_p == NULL)) 1690 return ERR_PTR(-EINVAL); 1691 1692 if (!kprobe_disabled(p)) { 1693 /* Disable probe if it is a child probe */ 1694 if (p != orig_p) 1695 p->flags |= KPROBE_FLAG_DISABLED; 1696 1697 /* Try to disarm and disable this/parent probe */ 1698 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1699 /* 1700 * If kprobes_all_disarmed is set, orig_p 1701 * should have already been disarmed, so 1702 * skip unneed disarming process. 1703 */ 1704 if (!kprobes_all_disarmed) { 1705 ret = disarm_kprobe(orig_p, true); 1706 if (ret) { 1707 p->flags &= ~KPROBE_FLAG_DISABLED; 1708 return ERR_PTR(ret); 1709 } 1710 } 1711 orig_p->flags |= KPROBE_FLAG_DISABLED; 1712 } 1713 } 1714 1715 return orig_p; 1716 } 1717 1718 /* 1719 * Unregister a kprobe without a scheduler synchronization. 1720 */ 1721 static int __unregister_kprobe_top(struct kprobe *p) 1722 { 1723 struct kprobe *ap, *list_p; 1724 1725 /* Disable kprobe. This will disarm it if needed. */ 1726 ap = __disable_kprobe(p); 1727 if (IS_ERR(ap)) 1728 return PTR_ERR(ap); 1729 1730 if (ap == p) 1731 /* 1732 * This probe is an independent(and non-optimized) kprobe 1733 * (not an aggrprobe). Remove from the hash list. 1734 */ 1735 goto disarmed; 1736 1737 /* Following process expects this probe is an aggrprobe */ 1738 WARN_ON(!kprobe_aggrprobe(ap)); 1739 1740 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1741 /* 1742 * !disarmed could be happen if the probe is under delayed 1743 * unoptimizing. 1744 */ 1745 goto disarmed; 1746 else { 1747 /* If disabling probe has special handlers, update aggrprobe */ 1748 if (p->post_handler && !kprobe_gone(p)) { 1749 list_for_each_entry(list_p, &ap->list, list) { 1750 if ((list_p != p) && (list_p->post_handler)) 1751 goto noclean; 1752 } 1753 ap->post_handler = NULL; 1754 } 1755 noclean: 1756 /* 1757 * Remove from the aggrprobe: this path will do nothing in 1758 * __unregister_kprobe_bottom(). 1759 */ 1760 list_del_rcu(&p->list); 1761 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1762 /* 1763 * Try to optimize this probe again, because post 1764 * handler may have been changed. 1765 */ 1766 optimize_kprobe(ap); 1767 } 1768 return 0; 1769 1770 disarmed: 1771 hlist_del_rcu(&ap->hlist); 1772 return 0; 1773 } 1774 1775 static void __unregister_kprobe_bottom(struct kprobe *p) 1776 { 1777 struct kprobe *ap; 1778 1779 if (list_empty(&p->list)) 1780 /* This is an independent kprobe */ 1781 arch_remove_kprobe(p); 1782 else if (list_is_singular(&p->list)) { 1783 /* This is the last child of an aggrprobe */ 1784 ap = list_entry(p->list.next, struct kprobe, list); 1785 list_del(&p->list); 1786 free_aggr_kprobe(ap); 1787 } 1788 /* Otherwise, do nothing. */ 1789 } 1790 1791 int register_kprobes(struct kprobe **kps, int num) 1792 { 1793 int i, ret = 0; 1794 1795 if (num <= 0) 1796 return -EINVAL; 1797 for (i = 0; i < num; i++) { 1798 ret = register_kprobe(kps[i]); 1799 if (ret < 0) { 1800 if (i > 0) 1801 unregister_kprobes(kps, i); 1802 break; 1803 } 1804 } 1805 return ret; 1806 } 1807 EXPORT_SYMBOL_GPL(register_kprobes); 1808 1809 void unregister_kprobe(struct kprobe *p) 1810 { 1811 unregister_kprobes(&p, 1); 1812 } 1813 EXPORT_SYMBOL_GPL(unregister_kprobe); 1814 1815 void unregister_kprobes(struct kprobe **kps, int num) 1816 { 1817 int i; 1818 1819 if (num <= 0) 1820 return; 1821 mutex_lock(&kprobe_mutex); 1822 for (i = 0; i < num; i++) 1823 if (__unregister_kprobe_top(kps[i]) < 0) 1824 kps[i]->addr = NULL; 1825 mutex_unlock(&kprobe_mutex); 1826 1827 synchronize_rcu(); 1828 for (i = 0; i < num; i++) 1829 if (kps[i]->addr) 1830 __unregister_kprobe_bottom(kps[i]); 1831 } 1832 EXPORT_SYMBOL_GPL(unregister_kprobes); 1833 1834 int __weak kprobe_exceptions_notify(struct notifier_block *self, 1835 unsigned long val, void *data) 1836 { 1837 return NOTIFY_DONE; 1838 } 1839 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 1840 1841 static struct notifier_block kprobe_exceptions_nb = { 1842 .notifier_call = kprobe_exceptions_notify, 1843 .priority = 0x7fffffff /* we need to be notified first */ 1844 }; 1845 1846 unsigned long __weak arch_deref_entry_point(void *entry) 1847 { 1848 return (unsigned long)entry; 1849 } 1850 1851 #ifdef CONFIG_KRETPROBES 1852 1853 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, 1854 void *trampoline_address, 1855 void *frame_pointer) 1856 { 1857 kprobe_opcode_t *correct_ret_addr = NULL; 1858 struct kretprobe_instance *ri = NULL; 1859 struct llist_node *first, *node; 1860 struct kretprobe *rp; 1861 1862 /* Find all nodes for this frame. */ 1863 first = node = current->kretprobe_instances.first; 1864 while (node) { 1865 ri = container_of(node, struct kretprobe_instance, llist); 1866 1867 BUG_ON(ri->fp != frame_pointer); 1868 1869 if (ri->ret_addr != trampoline_address) { 1870 correct_ret_addr = ri->ret_addr; 1871 /* 1872 * This is the real return address. Any other 1873 * instances associated with this task are for 1874 * other calls deeper on the call stack 1875 */ 1876 goto found; 1877 } 1878 1879 node = node->next; 1880 } 1881 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n"); 1882 BUG_ON(1); 1883 1884 found: 1885 /* Unlink all nodes for this frame. */ 1886 current->kretprobe_instances.first = node->next; 1887 node->next = NULL; 1888 1889 /* Run them.. */ 1890 while (first) { 1891 ri = container_of(first, struct kretprobe_instance, llist); 1892 first = first->next; 1893 1894 rp = get_kretprobe(ri); 1895 if (rp && rp->handler) { 1896 struct kprobe *prev = kprobe_running(); 1897 1898 __this_cpu_write(current_kprobe, &rp->kp); 1899 ri->ret_addr = correct_ret_addr; 1900 rp->handler(ri, regs); 1901 __this_cpu_write(current_kprobe, prev); 1902 } 1903 1904 recycle_rp_inst(ri); 1905 } 1906 1907 return (unsigned long)correct_ret_addr; 1908 } 1909 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) 1910 1911 /* 1912 * This kprobe pre_handler is registered with every kretprobe. When probe 1913 * hits it will set up the return probe. 1914 */ 1915 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 1916 { 1917 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1918 struct kretprobe_instance *ri; 1919 struct freelist_node *fn; 1920 1921 fn = freelist_try_get(&rp->freelist); 1922 if (!fn) { 1923 rp->nmissed++; 1924 return 0; 1925 } 1926 1927 ri = container_of(fn, struct kretprobe_instance, freelist); 1928 1929 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 1930 freelist_add(&ri->freelist, &rp->freelist); 1931 return 0; 1932 } 1933 1934 arch_prepare_kretprobe(ri, regs); 1935 1936 __llist_add(&ri->llist, ¤t->kretprobe_instances); 1937 1938 return 0; 1939 } 1940 NOKPROBE_SYMBOL(pre_handler_kretprobe); 1941 1942 bool __weak arch_kprobe_on_func_entry(unsigned long offset) 1943 { 1944 return !offset; 1945 } 1946 1947 /** 1948 * kprobe_on_func_entry() -- check whether given address is function entry 1949 * @addr: Target address 1950 * @sym: Target symbol name 1951 * @offset: The offset from the symbol or the address 1952 * 1953 * This checks whether the given @addr+@offset or @sym+@offset is on the 1954 * function entry address or not. 1955 * This returns 0 if it is the function entry, or -EINVAL if it is not. 1956 * And also it returns -ENOENT if it fails the symbol or address lookup. 1957 * Caller must pass @addr or @sym (either one must be NULL), or this 1958 * returns -EINVAL. 1959 */ 1960 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) 1961 { 1962 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); 1963 1964 if (IS_ERR(kp_addr)) 1965 return PTR_ERR(kp_addr); 1966 1967 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset)) 1968 return -ENOENT; 1969 1970 if (!arch_kprobe_on_func_entry(offset)) 1971 return -EINVAL; 1972 1973 return 0; 1974 } 1975 1976 int register_kretprobe(struct kretprobe *rp) 1977 { 1978 int ret; 1979 struct kretprobe_instance *inst; 1980 int i; 1981 void *addr; 1982 1983 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); 1984 if (ret) 1985 return ret; 1986 1987 /* If only rp->kp.addr is specified, check reregistering kprobes */ 1988 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp)) 1989 return -EINVAL; 1990 1991 if (kretprobe_blacklist_size) { 1992 addr = kprobe_addr(&rp->kp); 1993 if (IS_ERR(addr)) 1994 return PTR_ERR(addr); 1995 1996 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1997 if (kretprobe_blacklist[i].addr == addr) 1998 return -EINVAL; 1999 } 2000 } 2001 2002 rp->kp.pre_handler = pre_handler_kretprobe; 2003 rp->kp.post_handler = NULL; 2004 2005 /* Pre-allocate memory for max kretprobe instances */ 2006 if (rp->maxactive <= 0) { 2007 #ifdef CONFIG_PREEMPTION 2008 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 2009 #else 2010 rp->maxactive = num_possible_cpus(); 2011 #endif 2012 } 2013 rp->freelist.head = NULL; 2014 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); 2015 if (!rp->rph) 2016 return -ENOMEM; 2017 2018 rp->rph->rp = rp; 2019 for (i = 0; i < rp->maxactive; i++) { 2020 inst = kzalloc(sizeof(struct kretprobe_instance) + 2021 rp->data_size, GFP_KERNEL); 2022 if (inst == NULL) { 2023 refcount_set(&rp->rph->ref, i); 2024 free_rp_inst(rp); 2025 return -ENOMEM; 2026 } 2027 inst->rph = rp->rph; 2028 freelist_add(&inst->freelist, &rp->freelist); 2029 } 2030 refcount_set(&rp->rph->ref, i); 2031 2032 rp->nmissed = 0; 2033 /* Establish function entry probe point */ 2034 ret = register_kprobe(&rp->kp); 2035 if (ret != 0) 2036 free_rp_inst(rp); 2037 return ret; 2038 } 2039 EXPORT_SYMBOL_GPL(register_kretprobe); 2040 2041 int register_kretprobes(struct kretprobe **rps, int num) 2042 { 2043 int ret = 0, i; 2044 2045 if (num <= 0) 2046 return -EINVAL; 2047 for (i = 0; i < num; i++) { 2048 ret = register_kretprobe(rps[i]); 2049 if (ret < 0) { 2050 if (i > 0) 2051 unregister_kretprobes(rps, i); 2052 break; 2053 } 2054 } 2055 return ret; 2056 } 2057 EXPORT_SYMBOL_GPL(register_kretprobes); 2058 2059 void unregister_kretprobe(struct kretprobe *rp) 2060 { 2061 unregister_kretprobes(&rp, 1); 2062 } 2063 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2064 2065 void unregister_kretprobes(struct kretprobe **rps, int num) 2066 { 2067 int i; 2068 2069 if (num <= 0) 2070 return; 2071 mutex_lock(&kprobe_mutex); 2072 for (i = 0; i < num; i++) { 2073 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 2074 rps[i]->kp.addr = NULL; 2075 rps[i]->rph->rp = NULL; 2076 } 2077 mutex_unlock(&kprobe_mutex); 2078 2079 synchronize_rcu(); 2080 for (i = 0; i < num; i++) { 2081 if (rps[i]->kp.addr) { 2082 __unregister_kprobe_bottom(&rps[i]->kp); 2083 free_rp_inst(rps[i]); 2084 } 2085 } 2086 } 2087 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2088 2089 #else /* CONFIG_KRETPROBES */ 2090 int register_kretprobe(struct kretprobe *rp) 2091 { 2092 return -ENOSYS; 2093 } 2094 EXPORT_SYMBOL_GPL(register_kretprobe); 2095 2096 int register_kretprobes(struct kretprobe **rps, int num) 2097 { 2098 return -ENOSYS; 2099 } 2100 EXPORT_SYMBOL_GPL(register_kretprobes); 2101 2102 void unregister_kretprobe(struct kretprobe *rp) 2103 { 2104 } 2105 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2106 2107 void unregister_kretprobes(struct kretprobe **rps, int num) 2108 { 2109 } 2110 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2111 2112 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2113 { 2114 return 0; 2115 } 2116 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2117 2118 #endif /* CONFIG_KRETPROBES */ 2119 2120 /* Set the kprobe gone and remove its instruction buffer. */ 2121 static void kill_kprobe(struct kprobe *p) 2122 { 2123 struct kprobe *kp; 2124 2125 lockdep_assert_held(&kprobe_mutex); 2126 2127 p->flags |= KPROBE_FLAG_GONE; 2128 if (kprobe_aggrprobe(p)) { 2129 /* 2130 * If this is an aggr_kprobe, we have to list all the 2131 * chained probes and mark them GONE. 2132 */ 2133 list_for_each_entry(kp, &p->list, list) 2134 kp->flags |= KPROBE_FLAG_GONE; 2135 p->post_handler = NULL; 2136 kill_optimized_kprobe(p); 2137 } 2138 /* 2139 * Here, we can remove insn_slot safely, because no thread calls 2140 * the original probed function (which will be freed soon) any more. 2141 */ 2142 arch_remove_kprobe(p); 2143 2144 /* 2145 * The module is going away. We should disarm the kprobe which 2146 * is using ftrace, because ftrace framework is still available at 2147 * MODULE_STATE_GOING notification. 2148 */ 2149 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) 2150 disarm_kprobe_ftrace(p); 2151 } 2152 2153 /* Disable one kprobe */ 2154 int disable_kprobe(struct kprobe *kp) 2155 { 2156 int ret = 0; 2157 struct kprobe *p; 2158 2159 mutex_lock(&kprobe_mutex); 2160 2161 /* Disable this kprobe */ 2162 p = __disable_kprobe(kp); 2163 if (IS_ERR(p)) 2164 ret = PTR_ERR(p); 2165 2166 mutex_unlock(&kprobe_mutex); 2167 return ret; 2168 } 2169 EXPORT_SYMBOL_GPL(disable_kprobe); 2170 2171 /* Enable one kprobe */ 2172 int enable_kprobe(struct kprobe *kp) 2173 { 2174 int ret = 0; 2175 struct kprobe *p; 2176 2177 mutex_lock(&kprobe_mutex); 2178 2179 /* Check whether specified probe is valid. */ 2180 p = __get_valid_kprobe(kp); 2181 if (unlikely(p == NULL)) { 2182 ret = -EINVAL; 2183 goto out; 2184 } 2185 2186 if (kprobe_gone(kp)) { 2187 /* This kprobe has gone, we couldn't enable it. */ 2188 ret = -EINVAL; 2189 goto out; 2190 } 2191 2192 if (p != kp) 2193 kp->flags &= ~KPROBE_FLAG_DISABLED; 2194 2195 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2196 p->flags &= ~KPROBE_FLAG_DISABLED; 2197 ret = arm_kprobe(p); 2198 if (ret) 2199 p->flags |= KPROBE_FLAG_DISABLED; 2200 } 2201 out: 2202 mutex_unlock(&kprobe_mutex); 2203 return ret; 2204 } 2205 EXPORT_SYMBOL_GPL(enable_kprobe); 2206 2207 /* Caller must NOT call this in usual path. This is only for critical case */ 2208 void dump_kprobe(struct kprobe *kp) 2209 { 2210 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n", 2211 kp->symbol_name, kp->offset, kp->addr); 2212 } 2213 NOKPROBE_SYMBOL(dump_kprobe); 2214 2215 int kprobe_add_ksym_blacklist(unsigned long entry) 2216 { 2217 struct kprobe_blacklist_entry *ent; 2218 unsigned long offset = 0, size = 0; 2219 2220 if (!kernel_text_address(entry) || 2221 !kallsyms_lookup_size_offset(entry, &size, &offset)) 2222 return -EINVAL; 2223 2224 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2225 if (!ent) 2226 return -ENOMEM; 2227 ent->start_addr = entry; 2228 ent->end_addr = entry + size; 2229 INIT_LIST_HEAD(&ent->list); 2230 list_add_tail(&ent->list, &kprobe_blacklist); 2231 2232 return (int)size; 2233 } 2234 2235 /* Add all symbols in given area into kprobe blacklist */ 2236 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) 2237 { 2238 unsigned long entry; 2239 int ret = 0; 2240 2241 for (entry = start; entry < end; entry += ret) { 2242 ret = kprobe_add_ksym_blacklist(entry); 2243 if (ret < 0) 2244 return ret; 2245 if (ret == 0) /* In case of alias symbol */ 2246 ret = 1; 2247 } 2248 return 0; 2249 } 2250 2251 /* Remove all symbols in given area from kprobe blacklist */ 2252 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) 2253 { 2254 struct kprobe_blacklist_entry *ent, *n; 2255 2256 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) { 2257 if (ent->start_addr < start || ent->start_addr >= end) 2258 continue; 2259 list_del(&ent->list); 2260 kfree(ent); 2261 } 2262 } 2263 2264 static void kprobe_remove_ksym_blacklist(unsigned long entry) 2265 { 2266 kprobe_remove_area_blacklist(entry, entry + 1); 2267 } 2268 2269 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, 2270 char *type, char *sym) 2271 { 2272 return -ERANGE; 2273 } 2274 2275 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 2276 char *sym) 2277 { 2278 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 2279 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym)) 2280 return 0; 2281 #ifdef CONFIG_OPTPROBES 2282 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym)) 2283 return 0; 2284 #endif 2285 #endif 2286 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym)) 2287 return 0; 2288 return -ERANGE; 2289 } 2290 2291 int __init __weak arch_populate_kprobe_blacklist(void) 2292 { 2293 return 0; 2294 } 2295 2296 /* 2297 * Lookup and populate the kprobe_blacklist. 2298 * 2299 * Unlike the kretprobe blacklist, we'll need to determine 2300 * the range of addresses that belong to the said functions, 2301 * since a kprobe need not necessarily be at the beginning 2302 * of a function. 2303 */ 2304 static int __init populate_kprobe_blacklist(unsigned long *start, 2305 unsigned long *end) 2306 { 2307 unsigned long entry; 2308 unsigned long *iter; 2309 int ret; 2310 2311 for (iter = start; iter < end; iter++) { 2312 entry = arch_deref_entry_point((void *)*iter); 2313 ret = kprobe_add_ksym_blacklist(entry); 2314 if (ret == -EINVAL) 2315 continue; 2316 if (ret < 0) 2317 return ret; 2318 } 2319 2320 /* Symbols in __kprobes_text are blacklisted */ 2321 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, 2322 (unsigned long)__kprobes_text_end); 2323 if (ret) 2324 return ret; 2325 2326 /* Symbols in noinstr section are blacklisted */ 2327 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start, 2328 (unsigned long)__noinstr_text_end); 2329 2330 return ret ? : arch_populate_kprobe_blacklist(); 2331 } 2332 2333 static void add_module_kprobe_blacklist(struct module *mod) 2334 { 2335 unsigned long start, end; 2336 int i; 2337 2338 if (mod->kprobe_blacklist) { 2339 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2340 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]); 2341 } 2342 2343 start = (unsigned long)mod->kprobes_text_start; 2344 if (start) { 2345 end = start + mod->kprobes_text_size; 2346 kprobe_add_area_blacklist(start, end); 2347 } 2348 2349 start = (unsigned long)mod->noinstr_text_start; 2350 if (start) { 2351 end = start + mod->noinstr_text_size; 2352 kprobe_add_area_blacklist(start, end); 2353 } 2354 } 2355 2356 static void remove_module_kprobe_blacklist(struct module *mod) 2357 { 2358 unsigned long start, end; 2359 int i; 2360 2361 if (mod->kprobe_blacklist) { 2362 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2363 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]); 2364 } 2365 2366 start = (unsigned long)mod->kprobes_text_start; 2367 if (start) { 2368 end = start + mod->kprobes_text_size; 2369 kprobe_remove_area_blacklist(start, end); 2370 } 2371 2372 start = (unsigned long)mod->noinstr_text_start; 2373 if (start) { 2374 end = start + mod->noinstr_text_size; 2375 kprobe_remove_area_blacklist(start, end); 2376 } 2377 } 2378 2379 /* Module notifier call back, checking kprobes on the module */ 2380 static int kprobes_module_callback(struct notifier_block *nb, 2381 unsigned long val, void *data) 2382 { 2383 struct module *mod = data; 2384 struct hlist_head *head; 2385 struct kprobe *p; 2386 unsigned int i; 2387 int checkcore = (val == MODULE_STATE_GOING); 2388 2389 if (val == MODULE_STATE_COMING) { 2390 mutex_lock(&kprobe_mutex); 2391 add_module_kprobe_blacklist(mod); 2392 mutex_unlock(&kprobe_mutex); 2393 } 2394 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2395 return NOTIFY_DONE; 2396 2397 /* 2398 * When MODULE_STATE_GOING was notified, both of module .text and 2399 * .init.text sections would be freed. When MODULE_STATE_LIVE was 2400 * notified, only .init.text section would be freed. We need to 2401 * disable kprobes which have been inserted in the sections. 2402 */ 2403 mutex_lock(&kprobe_mutex); 2404 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2405 head = &kprobe_table[i]; 2406 hlist_for_each_entry(p, head, hlist) 2407 if (within_module_init((unsigned long)p->addr, mod) || 2408 (checkcore && 2409 within_module_core((unsigned long)p->addr, mod))) { 2410 /* 2411 * The vaddr this probe is installed will soon 2412 * be vfreed buy not synced to disk. Hence, 2413 * disarming the breakpoint isn't needed. 2414 * 2415 * Note, this will also move any optimized probes 2416 * that are pending to be removed from their 2417 * corresponding lists to the freeing_list and 2418 * will not be touched by the delayed 2419 * kprobe_optimizer work handler. 2420 */ 2421 kill_kprobe(p); 2422 } 2423 } 2424 if (val == MODULE_STATE_GOING) 2425 remove_module_kprobe_blacklist(mod); 2426 mutex_unlock(&kprobe_mutex); 2427 return NOTIFY_DONE; 2428 } 2429 2430 static struct notifier_block kprobe_module_nb = { 2431 .notifier_call = kprobes_module_callback, 2432 .priority = 0 2433 }; 2434 2435 /* Markers of _kprobe_blacklist section */ 2436 extern unsigned long __start_kprobe_blacklist[]; 2437 extern unsigned long __stop_kprobe_blacklist[]; 2438 2439 void kprobe_free_init_mem(void) 2440 { 2441 void *start = (void *)(&__init_begin); 2442 void *end = (void *)(&__init_end); 2443 struct hlist_head *head; 2444 struct kprobe *p; 2445 int i; 2446 2447 mutex_lock(&kprobe_mutex); 2448 2449 /* Kill all kprobes on initmem */ 2450 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2451 head = &kprobe_table[i]; 2452 hlist_for_each_entry(p, head, hlist) { 2453 if (start <= (void *)p->addr && (void *)p->addr < end) 2454 kill_kprobe(p); 2455 } 2456 } 2457 2458 mutex_unlock(&kprobe_mutex); 2459 } 2460 2461 static int __init init_kprobes(void) 2462 { 2463 int i, err = 0; 2464 2465 /* FIXME allocate the probe table, currently defined statically */ 2466 /* initialize all list heads */ 2467 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 2468 INIT_HLIST_HEAD(&kprobe_table[i]); 2469 2470 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2471 __stop_kprobe_blacklist); 2472 if (err) { 2473 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err); 2474 } 2475 2476 if (kretprobe_blacklist_size) { 2477 /* lookup the function address from its name */ 2478 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2479 kretprobe_blacklist[i].addr = 2480 kprobe_lookup_name(kretprobe_blacklist[i].name, 0); 2481 if (!kretprobe_blacklist[i].addr) 2482 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n", 2483 kretprobe_blacklist[i].name); 2484 } 2485 } 2486 2487 /* By default, kprobes are armed */ 2488 kprobes_all_disarmed = false; 2489 2490 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2491 /* Init kprobe_optinsn_slots for allocation */ 2492 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2493 #endif 2494 2495 err = arch_init_kprobes(); 2496 if (!err) 2497 err = register_die_notifier(&kprobe_exceptions_nb); 2498 if (!err) 2499 err = register_module_notifier(&kprobe_module_nb); 2500 2501 kprobes_initialized = (err == 0); 2502 2503 if (!err) 2504 init_test_probes(); 2505 return err; 2506 } 2507 early_initcall(init_kprobes); 2508 2509 #if defined(CONFIG_OPTPROBES) 2510 static int __init init_optprobes(void) 2511 { 2512 /* 2513 * Enable kprobe optimization - this kicks the optimizer which 2514 * depends on synchronize_rcu_tasks() and ksoftirqd, that is 2515 * not spawned in early initcall. So delay the optimization. 2516 */ 2517 optimize_all_kprobes(); 2518 2519 return 0; 2520 } 2521 subsys_initcall(init_optprobes); 2522 #endif 2523 2524 #ifdef CONFIG_DEBUG_FS 2525 static void report_probe(struct seq_file *pi, struct kprobe *p, 2526 const char *sym, int offset, char *modname, struct kprobe *pp) 2527 { 2528 char *kprobe_type; 2529 void *addr = p->addr; 2530 2531 if (p->pre_handler == pre_handler_kretprobe) 2532 kprobe_type = "r"; 2533 else 2534 kprobe_type = "k"; 2535 2536 if (!kallsyms_show_value(pi->file->f_cred)) 2537 addr = NULL; 2538 2539 if (sym) 2540 seq_printf(pi, "%px %s %s+0x%x %s ", 2541 addr, kprobe_type, sym, offset, 2542 (modname ? modname : " ")); 2543 else /* try to use %pS */ 2544 seq_printf(pi, "%px %s %pS ", 2545 addr, kprobe_type, p->addr); 2546 2547 if (!pp) 2548 pp = p; 2549 seq_printf(pi, "%s%s%s%s\n", 2550 (kprobe_gone(p) ? "[GONE]" : ""), 2551 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2552 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2553 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2554 } 2555 2556 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2557 { 2558 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2559 } 2560 2561 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2562 { 2563 (*pos)++; 2564 if (*pos >= KPROBE_TABLE_SIZE) 2565 return NULL; 2566 return pos; 2567 } 2568 2569 static void kprobe_seq_stop(struct seq_file *f, void *v) 2570 { 2571 /* Nothing to do */ 2572 } 2573 2574 static int show_kprobe_addr(struct seq_file *pi, void *v) 2575 { 2576 struct hlist_head *head; 2577 struct kprobe *p, *kp; 2578 const char *sym = NULL; 2579 unsigned int i = *(loff_t *) v; 2580 unsigned long offset = 0; 2581 char *modname, namebuf[KSYM_NAME_LEN]; 2582 2583 head = &kprobe_table[i]; 2584 preempt_disable(); 2585 hlist_for_each_entry_rcu(p, head, hlist) { 2586 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2587 &offset, &modname, namebuf); 2588 if (kprobe_aggrprobe(p)) { 2589 list_for_each_entry_rcu(kp, &p->list, list) 2590 report_probe(pi, kp, sym, offset, modname, p); 2591 } else 2592 report_probe(pi, p, sym, offset, modname, NULL); 2593 } 2594 preempt_enable(); 2595 return 0; 2596 } 2597 2598 static const struct seq_operations kprobes_sops = { 2599 .start = kprobe_seq_start, 2600 .next = kprobe_seq_next, 2601 .stop = kprobe_seq_stop, 2602 .show = show_kprobe_addr 2603 }; 2604 2605 DEFINE_SEQ_ATTRIBUTE(kprobes); 2606 2607 /* kprobes/blacklist -- shows which functions can not be probed */ 2608 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2609 { 2610 mutex_lock(&kprobe_mutex); 2611 return seq_list_start(&kprobe_blacklist, *pos); 2612 } 2613 2614 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2615 { 2616 return seq_list_next(v, &kprobe_blacklist, pos); 2617 } 2618 2619 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2620 { 2621 struct kprobe_blacklist_entry *ent = 2622 list_entry(v, struct kprobe_blacklist_entry, list); 2623 2624 /* 2625 * If /proc/kallsyms is not showing kernel address, we won't 2626 * show them here either. 2627 */ 2628 if (!kallsyms_show_value(m->file->f_cred)) 2629 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2630 (void *)ent->start_addr); 2631 else 2632 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, 2633 (void *)ent->end_addr, (void *)ent->start_addr); 2634 return 0; 2635 } 2636 2637 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v) 2638 { 2639 mutex_unlock(&kprobe_mutex); 2640 } 2641 2642 static const struct seq_operations kprobe_blacklist_sops = { 2643 .start = kprobe_blacklist_seq_start, 2644 .next = kprobe_blacklist_seq_next, 2645 .stop = kprobe_blacklist_seq_stop, 2646 .show = kprobe_blacklist_seq_show, 2647 }; 2648 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist); 2649 2650 static int arm_all_kprobes(void) 2651 { 2652 struct hlist_head *head; 2653 struct kprobe *p; 2654 unsigned int i, total = 0, errors = 0; 2655 int err, ret = 0; 2656 2657 mutex_lock(&kprobe_mutex); 2658 2659 /* If kprobes are armed, just return */ 2660 if (!kprobes_all_disarmed) 2661 goto already_enabled; 2662 2663 /* 2664 * optimize_kprobe() called by arm_kprobe() checks 2665 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2666 * arm_kprobe. 2667 */ 2668 kprobes_all_disarmed = false; 2669 /* Arming kprobes doesn't optimize kprobe itself */ 2670 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2671 head = &kprobe_table[i]; 2672 /* Arm all kprobes on a best-effort basis */ 2673 hlist_for_each_entry(p, head, hlist) { 2674 if (!kprobe_disabled(p)) { 2675 err = arm_kprobe(p); 2676 if (err) { 2677 errors++; 2678 ret = err; 2679 } 2680 total++; 2681 } 2682 } 2683 } 2684 2685 if (errors) 2686 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n", 2687 errors, total); 2688 else 2689 pr_info("Kprobes globally enabled\n"); 2690 2691 already_enabled: 2692 mutex_unlock(&kprobe_mutex); 2693 return ret; 2694 } 2695 2696 static int disarm_all_kprobes(void) 2697 { 2698 struct hlist_head *head; 2699 struct kprobe *p; 2700 unsigned int i, total = 0, errors = 0; 2701 int err, ret = 0; 2702 2703 mutex_lock(&kprobe_mutex); 2704 2705 /* If kprobes are already disarmed, just return */ 2706 if (kprobes_all_disarmed) { 2707 mutex_unlock(&kprobe_mutex); 2708 return 0; 2709 } 2710 2711 kprobes_all_disarmed = true; 2712 2713 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2714 head = &kprobe_table[i]; 2715 /* Disarm all kprobes on a best-effort basis */ 2716 hlist_for_each_entry(p, head, hlist) { 2717 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { 2718 err = disarm_kprobe(p, false); 2719 if (err) { 2720 errors++; 2721 ret = err; 2722 } 2723 total++; 2724 } 2725 } 2726 } 2727 2728 if (errors) 2729 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n", 2730 errors, total); 2731 else 2732 pr_info("Kprobes globally disabled\n"); 2733 2734 mutex_unlock(&kprobe_mutex); 2735 2736 /* Wait for disarming all kprobes by optimizer */ 2737 wait_for_kprobe_optimizer(); 2738 2739 return ret; 2740 } 2741 2742 /* 2743 * XXX: The debugfs bool file interface doesn't allow for callbacks 2744 * when the bool state is switched. We can reuse that facility when 2745 * available 2746 */ 2747 static ssize_t read_enabled_file_bool(struct file *file, 2748 char __user *user_buf, size_t count, loff_t *ppos) 2749 { 2750 char buf[3]; 2751 2752 if (!kprobes_all_disarmed) 2753 buf[0] = '1'; 2754 else 2755 buf[0] = '0'; 2756 buf[1] = '\n'; 2757 buf[2] = 0x00; 2758 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2759 } 2760 2761 static ssize_t write_enabled_file_bool(struct file *file, 2762 const char __user *user_buf, size_t count, loff_t *ppos) 2763 { 2764 bool enable; 2765 int ret; 2766 2767 ret = kstrtobool_from_user(user_buf, count, &enable); 2768 if (ret) 2769 return ret; 2770 2771 ret = enable ? arm_all_kprobes() : disarm_all_kprobes(); 2772 if (ret) 2773 return ret; 2774 2775 return count; 2776 } 2777 2778 static const struct file_operations fops_kp = { 2779 .read = read_enabled_file_bool, 2780 .write = write_enabled_file_bool, 2781 .llseek = default_llseek, 2782 }; 2783 2784 static int __init debugfs_kprobe_init(void) 2785 { 2786 struct dentry *dir; 2787 2788 dir = debugfs_create_dir("kprobes", NULL); 2789 2790 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops); 2791 2792 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp); 2793 2794 debugfs_create_file("blacklist", 0400, dir, NULL, 2795 &kprobe_blacklist_fops); 2796 2797 return 0; 2798 } 2799 2800 late_initcall(debugfs_kprobe_init); 2801 #endif /* CONFIG_DEBUG_FS */ 2802