1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * kernel/kprobes.c 5 * 6 * Copyright (C) IBM Corporation, 2002, 2004 7 * 8 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 9 * Probes initial implementation (includes suggestions from 10 * Rusty Russell). 11 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 12 * hlists and exceptions notifier as suggested by Andi Kleen. 13 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 14 * interface to access function arguments. 15 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 16 * exceptions notifier to be first on the priority list. 17 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 18 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 19 * <prasanna@in.ibm.com> added function-return probes. 20 */ 21 #include <linux/kprobes.h> 22 #include <linux/hash.h> 23 #include <linux/init.h> 24 #include <linux/slab.h> 25 #include <linux/stddef.h> 26 #include <linux/export.h> 27 #include <linux/moduleloader.h> 28 #include <linux/kallsyms.h> 29 #include <linux/freezer.h> 30 #include <linux/seq_file.h> 31 #include <linux/debugfs.h> 32 #include <linux/sysctl.h> 33 #include <linux/kdebug.h> 34 #include <linux/memory.h> 35 #include <linux/ftrace.h> 36 #include <linux/cpu.h> 37 #include <linux/jump_label.h> 38 #include <linux/static_call.h> 39 #include <linux/perf_event.h> 40 41 #include <asm/sections.h> 42 #include <asm/cacheflush.h> 43 #include <asm/errno.h> 44 #include <linux/uaccess.h> 45 46 #define KPROBE_HASH_BITS 6 47 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 48 49 50 static int kprobes_initialized; 51 /* kprobe_table can be accessed by 52 * - Normal hlist traversal and RCU add/del under kprobe_mutex is held. 53 * Or 54 * - RCU hlist traversal under disabling preempt (breakpoint handlers) 55 */ 56 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 57 58 /* NOTE: change this value only with kprobe_mutex held */ 59 static bool kprobes_all_disarmed; 60 61 /* This protects kprobe_table and optimizing_list */ 62 static DEFINE_MUTEX(kprobe_mutex); 63 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 64 65 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, 66 unsigned int __unused) 67 { 68 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); 69 } 70 71 /* Blacklist -- list of struct kprobe_blacklist_entry */ 72 static LIST_HEAD(kprobe_blacklist); 73 74 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 75 /* 76 * kprobe->ainsn.insn points to the copy of the instruction to be 77 * single-stepped. x86_64, POWER4 and above have no-exec support and 78 * stepping on the instruction on a vmalloced/kmalloced/data page 79 * is a recipe for disaster 80 */ 81 struct kprobe_insn_page { 82 struct list_head list; 83 kprobe_opcode_t *insns; /* Page of instruction slots */ 84 struct kprobe_insn_cache *cache; 85 int nused; 86 int ngarbage; 87 char slot_used[]; 88 }; 89 90 #define KPROBE_INSN_PAGE_SIZE(slots) \ 91 (offsetof(struct kprobe_insn_page, slot_used) + \ 92 (sizeof(char) * (slots))) 93 94 static int slots_per_page(struct kprobe_insn_cache *c) 95 { 96 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 97 } 98 99 enum kprobe_slot_state { 100 SLOT_CLEAN = 0, 101 SLOT_DIRTY = 1, 102 SLOT_USED = 2, 103 }; 104 105 void __weak *alloc_insn_page(void) 106 { 107 return module_alloc(PAGE_SIZE); 108 } 109 110 static void free_insn_page(void *page) 111 { 112 module_memfree(page); 113 } 114 115 struct kprobe_insn_cache kprobe_insn_slots = { 116 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 117 .alloc = alloc_insn_page, 118 .free = free_insn_page, 119 .sym = KPROBE_INSN_PAGE_SYM, 120 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 121 .insn_size = MAX_INSN_SIZE, 122 .nr_garbage = 0, 123 }; 124 static int collect_garbage_slots(struct kprobe_insn_cache *c); 125 126 /** 127 * __get_insn_slot() - Find a slot on an executable page for an instruction. 128 * We allocate an executable page if there's no room on existing ones. 129 */ 130 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 131 { 132 struct kprobe_insn_page *kip; 133 kprobe_opcode_t *slot = NULL; 134 135 /* Since the slot array is not protected by rcu, we need a mutex */ 136 mutex_lock(&c->mutex); 137 retry: 138 rcu_read_lock(); 139 list_for_each_entry_rcu(kip, &c->pages, list) { 140 if (kip->nused < slots_per_page(c)) { 141 int i; 142 for (i = 0; i < slots_per_page(c); i++) { 143 if (kip->slot_used[i] == SLOT_CLEAN) { 144 kip->slot_used[i] = SLOT_USED; 145 kip->nused++; 146 slot = kip->insns + (i * c->insn_size); 147 rcu_read_unlock(); 148 goto out; 149 } 150 } 151 /* kip->nused is broken. Fix it. */ 152 kip->nused = slots_per_page(c); 153 WARN_ON(1); 154 } 155 } 156 rcu_read_unlock(); 157 158 /* If there are any garbage slots, collect it and try again. */ 159 if (c->nr_garbage && collect_garbage_slots(c) == 0) 160 goto retry; 161 162 /* All out of space. Need to allocate a new page. */ 163 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 164 if (!kip) 165 goto out; 166 167 /* 168 * Use module_alloc so this page is within +/- 2GB of where the 169 * kernel image and loaded module images reside. This is required 170 * so x86_64 can correctly handle the %rip-relative fixups. 171 */ 172 kip->insns = c->alloc(); 173 if (!kip->insns) { 174 kfree(kip); 175 goto out; 176 } 177 INIT_LIST_HEAD(&kip->list); 178 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 179 kip->slot_used[0] = SLOT_USED; 180 kip->nused = 1; 181 kip->ngarbage = 0; 182 kip->cache = c; 183 list_add_rcu(&kip->list, &c->pages); 184 slot = kip->insns; 185 186 /* Record the perf ksymbol register event after adding the page */ 187 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, 188 PAGE_SIZE, false, c->sym); 189 out: 190 mutex_unlock(&c->mutex); 191 return slot; 192 } 193 194 /* Return 1 if all garbages are collected, otherwise 0. */ 195 static int collect_one_slot(struct kprobe_insn_page *kip, int idx) 196 { 197 kip->slot_used[idx] = SLOT_CLEAN; 198 kip->nused--; 199 if (kip->nused == 0) { 200 /* 201 * Page is no longer in use. Free it unless 202 * it's the last one. We keep the last one 203 * so as not to have to set it up again the 204 * next time somebody inserts a probe. 205 */ 206 if (!list_is_singular(&kip->list)) { 207 /* 208 * Record perf ksymbol unregister event before removing 209 * the page. 210 */ 211 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 212 (unsigned long)kip->insns, PAGE_SIZE, true, 213 kip->cache->sym); 214 list_del_rcu(&kip->list); 215 synchronize_rcu(); 216 kip->cache->free(kip->insns); 217 kfree(kip); 218 } 219 return 1; 220 } 221 return 0; 222 } 223 224 static int collect_garbage_slots(struct kprobe_insn_cache *c) 225 { 226 struct kprobe_insn_page *kip, *next; 227 228 /* Ensure no-one is interrupted on the garbages */ 229 synchronize_rcu(); 230 231 list_for_each_entry_safe(kip, next, &c->pages, list) { 232 int i; 233 if (kip->ngarbage == 0) 234 continue; 235 kip->ngarbage = 0; /* we will collect all garbages */ 236 for (i = 0; i < slots_per_page(c); i++) { 237 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) 238 break; 239 } 240 } 241 c->nr_garbage = 0; 242 return 0; 243 } 244 245 void __free_insn_slot(struct kprobe_insn_cache *c, 246 kprobe_opcode_t *slot, int dirty) 247 { 248 struct kprobe_insn_page *kip; 249 long idx; 250 251 mutex_lock(&c->mutex); 252 rcu_read_lock(); 253 list_for_each_entry_rcu(kip, &c->pages, list) { 254 idx = ((long)slot - (long)kip->insns) / 255 (c->insn_size * sizeof(kprobe_opcode_t)); 256 if (idx >= 0 && idx < slots_per_page(c)) 257 goto out; 258 } 259 /* Could not find this slot. */ 260 WARN_ON(1); 261 kip = NULL; 262 out: 263 rcu_read_unlock(); 264 /* Mark and sweep: this may sleep */ 265 if (kip) { 266 /* Check double free */ 267 WARN_ON(kip->slot_used[idx] != SLOT_USED); 268 if (dirty) { 269 kip->slot_used[idx] = SLOT_DIRTY; 270 kip->ngarbage++; 271 if (++c->nr_garbage > slots_per_page(c)) 272 collect_garbage_slots(c); 273 } else { 274 collect_one_slot(kip, idx); 275 } 276 } 277 mutex_unlock(&c->mutex); 278 } 279 280 /* 281 * Check given address is on the page of kprobe instruction slots. 282 * This will be used for checking whether the address on a stack 283 * is on a text area or not. 284 */ 285 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) 286 { 287 struct kprobe_insn_page *kip; 288 bool ret = false; 289 290 rcu_read_lock(); 291 list_for_each_entry_rcu(kip, &c->pages, list) { 292 if (addr >= (unsigned long)kip->insns && 293 addr < (unsigned long)kip->insns + PAGE_SIZE) { 294 ret = true; 295 break; 296 } 297 } 298 rcu_read_unlock(); 299 300 return ret; 301 } 302 303 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, 304 unsigned long *value, char *type, char *sym) 305 { 306 struct kprobe_insn_page *kip; 307 int ret = -ERANGE; 308 309 rcu_read_lock(); 310 list_for_each_entry_rcu(kip, &c->pages, list) { 311 if ((*symnum)--) 312 continue; 313 strlcpy(sym, c->sym, KSYM_NAME_LEN); 314 *type = 't'; 315 *value = (unsigned long)kip->insns; 316 ret = 0; 317 break; 318 } 319 rcu_read_unlock(); 320 321 return ret; 322 } 323 324 #ifdef CONFIG_OPTPROBES 325 void __weak *alloc_optinsn_page(void) 326 { 327 return alloc_insn_page(); 328 } 329 330 void __weak free_optinsn_page(void *page) 331 { 332 free_insn_page(page); 333 } 334 335 /* For optimized_kprobe buffer */ 336 struct kprobe_insn_cache kprobe_optinsn_slots = { 337 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 338 .alloc = alloc_optinsn_page, 339 .free = free_optinsn_page, 340 .sym = KPROBE_OPTINSN_PAGE_SYM, 341 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 342 /* .insn_size is initialized later */ 343 .nr_garbage = 0, 344 }; 345 #endif 346 #endif 347 348 /* We have preemption disabled.. so it is safe to use __ versions */ 349 static inline void set_kprobe_instance(struct kprobe *kp) 350 { 351 __this_cpu_write(kprobe_instance, kp); 352 } 353 354 static inline void reset_kprobe_instance(void) 355 { 356 __this_cpu_write(kprobe_instance, NULL); 357 } 358 359 /* 360 * This routine is called either: 361 * - under the kprobe_mutex - during kprobe_[un]register() 362 * OR 363 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 364 */ 365 struct kprobe *get_kprobe(void *addr) 366 { 367 struct hlist_head *head; 368 struct kprobe *p; 369 370 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 371 hlist_for_each_entry_rcu(p, head, hlist, 372 lockdep_is_held(&kprobe_mutex)) { 373 if (p->addr == addr) 374 return p; 375 } 376 377 return NULL; 378 } 379 NOKPROBE_SYMBOL(get_kprobe); 380 381 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 382 383 /* Return true if the kprobe is an aggregator */ 384 static inline int kprobe_aggrprobe(struct kprobe *p) 385 { 386 return p->pre_handler == aggr_pre_handler; 387 } 388 389 /* Return true(!0) if the kprobe is unused */ 390 static inline int kprobe_unused(struct kprobe *p) 391 { 392 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 393 list_empty(&p->list); 394 } 395 396 /* 397 * Keep all fields in the kprobe consistent 398 */ 399 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 400 { 401 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 402 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 403 } 404 405 #ifdef CONFIG_OPTPROBES 406 /* NOTE: change this value only with kprobe_mutex held */ 407 static bool kprobes_allow_optimization; 408 409 /* 410 * Call all pre_handler on the list, but ignores its return value. 411 * This must be called from arch-dep optimized caller. 412 */ 413 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 414 { 415 struct kprobe *kp; 416 417 list_for_each_entry_rcu(kp, &p->list, list) { 418 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 419 set_kprobe_instance(kp); 420 kp->pre_handler(kp, regs); 421 } 422 reset_kprobe_instance(); 423 } 424 } 425 NOKPROBE_SYMBOL(opt_pre_handler); 426 427 /* Free optimized instructions and optimized_kprobe */ 428 static void free_aggr_kprobe(struct kprobe *p) 429 { 430 struct optimized_kprobe *op; 431 432 op = container_of(p, struct optimized_kprobe, kp); 433 arch_remove_optimized_kprobe(op); 434 arch_remove_kprobe(p); 435 kfree(op); 436 } 437 438 /* Return true(!0) if the kprobe is ready for optimization. */ 439 static inline int kprobe_optready(struct kprobe *p) 440 { 441 struct optimized_kprobe *op; 442 443 if (kprobe_aggrprobe(p)) { 444 op = container_of(p, struct optimized_kprobe, kp); 445 return arch_prepared_optinsn(&op->optinsn); 446 } 447 448 return 0; 449 } 450 451 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ 452 static inline int kprobe_disarmed(struct kprobe *p) 453 { 454 struct optimized_kprobe *op; 455 456 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 457 if (!kprobe_aggrprobe(p)) 458 return kprobe_disabled(p); 459 460 op = container_of(p, struct optimized_kprobe, kp); 461 462 return kprobe_disabled(p) && list_empty(&op->list); 463 } 464 465 /* Return true(!0) if the probe is queued on (un)optimizing lists */ 466 static int kprobe_queued(struct kprobe *p) 467 { 468 struct optimized_kprobe *op; 469 470 if (kprobe_aggrprobe(p)) { 471 op = container_of(p, struct optimized_kprobe, kp); 472 if (!list_empty(&op->list)) 473 return 1; 474 } 475 return 0; 476 } 477 478 /* 479 * Return an optimized kprobe whose optimizing code replaces 480 * instructions including addr (exclude breakpoint). 481 */ 482 static struct kprobe *get_optimized_kprobe(unsigned long addr) 483 { 484 int i; 485 struct kprobe *p = NULL; 486 struct optimized_kprobe *op; 487 488 /* Don't check i == 0, since that is a breakpoint case. */ 489 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 490 p = get_kprobe((void *)(addr - i)); 491 492 if (p && kprobe_optready(p)) { 493 op = container_of(p, struct optimized_kprobe, kp); 494 if (arch_within_optimized_kprobe(op, addr)) 495 return p; 496 } 497 498 return NULL; 499 } 500 501 /* Optimization staging list, protected by kprobe_mutex */ 502 static LIST_HEAD(optimizing_list); 503 static LIST_HEAD(unoptimizing_list); 504 static LIST_HEAD(freeing_list); 505 506 static void kprobe_optimizer(struct work_struct *work); 507 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 508 #define OPTIMIZE_DELAY 5 509 510 /* 511 * Optimize (replace a breakpoint with a jump) kprobes listed on 512 * optimizing_list. 513 */ 514 static void do_optimize_kprobes(void) 515 { 516 lockdep_assert_held(&text_mutex); 517 /* 518 * The optimization/unoptimization refers online_cpus via 519 * stop_machine() and cpu-hotplug modifies online_cpus. 520 * And same time, text_mutex will be held in cpu-hotplug and here. 521 * This combination can cause a deadlock (cpu-hotplug try to lock 522 * text_mutex but stop_machine can not be done because online_cpus 523 * has been changed) 524 * To avoid this deadlock, caller must have locked cpu hotplug 525 * for preventing cpu-hotplug outside of text_mutex locking. 526 */ 527 lockdep_assert_cpus_held(); 528 529 /* Optimization never be done when disarmed */ 530 if (kprobes_all_disarmed || !kprobes_allow_optimization || 531 list_empty(&optimizing_list)) 532 return; 533 534 arch_optimize_kprobes(&optimizing_list); 535 } 536 537 /* 538 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 539 * if need) kprobes listed on unoptimizing_list. 540 */ 541 static void do_unoptimize_kprobes(void) 542 { 543 struct optimized_kprobe *op, *tmp; 544 545 lockdep_assert_held(&text_mutex); 546 /* See comment in do_optimize_kprobes() */ 547 lockdep_assert_cpus_held(); 548 549 /* Unoptimization must be done anytime */ 550 if (list_empty(&unoptimizing_list)) 551 return; 552 553 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 554 /* Loop free_list for disarming */ 555 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 556 /* Switching from detour code to origin */ 557 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 558 /* Disarm probes if marked disabled */ 559 if (kprobe_disabled(&op->kp)) 560 arch_disarm_kprobe(&op->kp); 561 if (kprobe_unused(&op->kp)) { 562 /* 563 * Remove unused probes from hash list. After waiting 564 * for synchronization, these probes are reclaimed. 565 * (reclaiming is done by do_free_cleaned_kprobes.) 566 */ 567 hlist_del_rcu(&op->kp.hlist); 568 } else 569 list_del_init(&op->list); 570 } 571 } 572 573 /* Reclaim all kprobes on the free_list */ 574 static void do_free_cleaned_kprobes(void) 575 { 576 struct optimized_kprobe *op, *tmp; 577 578 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 579 list_del_init(&op->list); 580 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { 581 /* 582 * This must not happen, but if there is a kprobe 583 * still in use, keep it on kprobes hash list. 584 */ 585 continue; 586 } 587 free_aggr_kprobe(&op->kp); 588 } 589 } 590 591 /* Start optimizer after OPTIMIZE_DELAY passed */ 592 static void kick_kprobe_optimizer(void) 593 { 594 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 595 } 596 597 /* Kprobe jump optimizer */ 598 static void kprobe_optimizer(struct work_struct *work) 599 { 600 mutex_lock(&kprobe_mutex); 601 cpus_read_lock(); 602 mutex_lock(&text_mutex); 603 604 /* 605 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 606 * kprobes before waiting for quiesence period. 607 */ 608 do_unoptimize_kprobes(); 609 610 /* 611 * Step 2: Wait for quiesence period to ensure all potentially 612 * preempted tasks to have normally scheduled. Because optprobe 613 * may modify multiple instructions, there is a chance that Nth 614 * instruction is preempted. In that case, such tasks can return 615 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. 616 * Note that on non-preemptive kernel, this is transparently converted 617 * to synchronoze_sched() to wait for all interrupts to have completed. 618 */ 619 synchronize_rcu_tasks(); 620 621 /* Step 3: Optimize kprobes after quiesence period */ 622 do_optimize_kprobes(); 623 624 /* Step 4: Free cleaned kprobes after quiesence period */ 625 do_free_cleaned_kprobes(); 626 627 mutex_unlock(&text_mutex); 628 cpus_read_unlock(); 629 630 /* Step 5: Kick optimizer again if needed */ 631 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 632 kick_kprobe_optimizer(); 633 634 mutex_unlock(&kprobe_mutex); 635 } 636 637 /* Wait for completing optimization and unoptimization */ 638 void wait_for_kprobe_optimizer(void) 639 { 640 mutex_lock(&kprobe_mutex); 641 642 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 643 mutex_unlock(&kprobe_mutex); 644 645 /* this will also make optimizing_work execute immmediately */ 646 flush_delayed_work(&optimizing_work); 647 /* @optimizing_work might not have been queued yet, relax */ 648 cpu_relax(); 649 650 mutex_lock(&kprobe_mutex); 651 } 652 653 mutex_unlock(&kprobe_mutex); 654 } 655 656 static bool optprobe_queued_unopt(struct optimized_kprobe *op) 657 { 658 struct optimized_kprobe *_op; 659 660 list_for_each_entry(_op, &unoptimizing_list, list) { 661 if (op == _op) 662 return true; 663 } 664 665 return false; 666 } 667 668 /* Optimize kprobe if p is ready to be optimized */ 669 static void optimize_kprobe(struct kprobe *p) 670 { 671 struct optimized_kprobe *op; 672 673 /* Check if the kprobe is disabled or not ready for optimization. */ 674 if (!kprobe_optready(p) || !kprobes_allow_optimization || 675 (kprobe_disabled(p) || kprobes_all_disarmed)) 676 return; 677 678 /* kprobes with post_handler can not be optimized */ 679 if (p->post_handler) 680 return; 681 682 op = container_of(p, struct optimized_kprobe, kp); 683 684 /* Check there is no other kprobes at the optimized instructions */ 685 if (arch_check_optimized_kprobe(op) < 0) 686 return; 687 688 /* Check if it is already optimized. */ 689 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { 690 if (optprobe_queued_unopt(op)) { 691 /* This is under unoptimizing. Just dequeue the probe */ 692 list_del_init(&op->list); 693 } 694 return; 695 } 696 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 697 698 /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */ 699 if (WARN_ON_ONCE(!list_empty(&op->list))) 700 return; 701 702 list_add(&op->list, &optimizing_list); 703 kick_kprobe_optimizer(); 704 } 705 706 /* Short cut to direct unoptimizing */ 707 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 708 { 709 lockdep_assert_cpus_held(); 710 arch_unoptimize_kprobe(op); 711 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 712 } 713 714 /* Unoptimize a kprobe if p is optimized */ 715 static void unoptimize_kprobe(struct kprobe *p, bool force) 716 { 717 struct optimized_kprobe *op; 718 719 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 720 return; /* This is not an optprobe nor optimized */ 721 722 op = container_of(p, struct optimized_kprobe, kp); 723 if (!kprobe_optimized(p)) 724 return; 725 726 if (!list_empty(&op->list)) { 727 if (optprobe_queued_unopt(op)) { 728 /* Queued in unoptimizing queue */ 729 if (force) { 730 /* 731 * Forcibly unoptimize the kprobe here, and queue it 732 * in the freeing list for release afterwards. 733 */ 734 force_unoptimize_kprobe(op); 735 list_move(&op->list, &freeing_list); 736 } 737 } else { 738 /* Dequeue from the optimizing queue */ 739 list_del_init(&op->list); 740 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 741 } 742 return; 743 } 744 745 /* Optimized kprobe case */ 746 if (force) { 747 /* Forcibly update the code: this is a special case */ 748 force_unoptimize_kprobe(op); 749 } else { 750 list_add(&op->list, &unoptimizing_list); 751 kick_kprobe_optimizer(); 752 } 753 } 754 755 /* Cancel unoptimizing for reusing */ 756 static int reuse_unused_kprobe(struct kprobe *ap) 757 { 758 struct optimized_kprobe *op; 759 760 /* 761 * Unused kprobe MUST be on the way of delayed unoptimizing (means 762 * there is still a relative jump) and disabled. 763 */ 764 op = container_of(ap, struct optimized_kprobe, kp); 765 WARN_ON_ONCE(list_empty(&op->list)); 766 /* Enable the probe again */ 767 ap->flags &= ~KPROBE_FLAG_DISABLED; 768 /* Optimize it again (remove from op->list) */ 769 if (!kprobe_optready(ap)) 770 return -EINVAL; 771 772 optimize_kprobe(ap); 773 return 0; 774 } 775 776 /* Remove optimized instructions */ 777 static void kill_optimized_kprobe(struct kprobe *p) 778 { 779 struct optimized_kprobe *op; 780 781 op = container_of(p, struct optimized_kprobe, kp); 782 if (!list_empty(&op->list)) 783 /* Dequeue from the (un)optimization queue */ 784 list_del_init(&op->list); 785 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 786 787 if (kprobe_unused(p)) { 788 /* Enqueue if it is unused */ 789 list_add(&op->list, &freeing_list); 790 /* 791 * Remove unused probes from the hash list. After waiting 792 * for synchronization, this probe is reclaimed. 793 * (reclaiming is done by do_free_cleaned_kprobes().) 794 */ 795 hlist_del_rcu(&op->kp.hlist); 796 } 797 798 /* Don't touch the code, because it is already freed. */ 799 arch_remove_optimized_kprobe(op); 800 } 801 802 static inline 803 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 804 { 805 if (!kprobe_ftrace(p)) 806 arch_prepare_optimized_kprobe(op, p); 807 } 808 809 /* Try to prepare optimized instructions */ 810 static void prepare_optimized_kprobe(struct kprobe *p) 811 { 812 struct optimized_kprobe *op; 813 814 op = container_of(p, struct optimized_kprobe, kp); 815 __prepare_optimized_kprobe(op, p); 816 } 817 818 /* Allocate new optimized_kprobe and try to prepare optimized instructions */ 819 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 820 { 821 struct optimized_kprobe *op; 822 823 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 824 if (!op) 825 return NULL; 826 827 INIT_LIST_HEAD(&op->list); 828 op->kp.addr = p->addr; 829 __prepare_optimized_kprobe(op, p); 830 831 return &op->kp; 832 } 833 834 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 835 836 /* 837 * Prepare an optimized_kprobe and optimize it 838 * NOTE: p must be a normal registered kprobe 839 */ 840 static void try_to_optimize_kprobe(struct kprobe *p) 841 { 842 struct kprobe *ap; 843 struct optimized_kprobe *op; 844 845 /* Impossible to optimize ftrace-based kprobe */ 846 if (kprobe_ftrace(p)) 847 return; 848 849 /* For preparing optimization, jump_label_text_reserved() is called */ 850 cpus_read_lock(); 851 jump_label_lock(); 852 mutex_lock(&text_mutex); 853 854 ap = alloc_aggr_kprobe(p); 855 if (!ap) 856 goto out; 857 858 op = container_of(ap, struct optimized_kprobe, kp); 859 if (!arch_prepared_optinsn(&op->optinsn)) { 860 /* If failed to setup optimizing, fallback to kprobe */ 861 arch_remove_optimized_kprobe(op); 862 kfree(op); 863 goto out; 864 } 865 866 init_aggr_kprobe(ap, p); 867 optimize_kprobe(ap); /* This just kicks optimizer thread */ 868 869 out: 870 mutex_unlock(&text_mutex); 871 jump_label_unlock(); 872 cpus_read_unlock(); 873 } 874 875 static void optimize_all_kprobes(void) 876 { 877 struct hlist_head *head; 878 struct kprobe *p; 879 unsigned int i; 880 881 mutex_lock(&kprobe_mutex); 882 /* If optimization is already allowed, just return */ 883 if (kprobes_allow_optimization) 884 goto out; 885 886 cpus_read_lock(); 887 kprobes_allow_optimization = true; 888 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 889 head = &kprobe_table[i]; 890 hlist_for_each_entry(p, head, hlist) 891 if (!kprobe_disabled(p)) 892 optimize_kprobe(p); 893 } 894 cpus_read_unlock(); 895 printk(KERN_INFO "Kprobes globally optimized\n"); 896 out: 897 mutex_unlock(&kprobe_mutex); 898 } 899 900 #ifdef CONFIG_SYSCTL 901 static void unoptimize_all_kprobes(void) 902 { 903 struct hlist_head *head; 904 struct kprobe *p; 905 unsigned int i; 906 907 mutex_lock(&kprobe_mutex); 908 /* If optimization is already prohibited, just return */ 909 if (!kprobes_allow_optimization) { 910 mutex_unlock(&kprobe_mutex); 911 return; 912 } 913 914 cpus_read_lock(); 915 kprobes_allow_optimization = false; 916 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 917 head = &kprobe_table[i]; 918 hlist_for_each_entry(p, head, hlist) { 919 if (!kprobe_disabled(p)) 920 unoptimize_kprobe(p, false); 921 } 922 } 923 cpus_read_unlock(); 924 mutex_unlock(&kprobe_mutex); 925 926 /* Wait for unoptimizing completion */ 927 wait_for_kprobe_optimizer(); 928 printk(KERN_INFO "Kprobes globally unoptimized\n"); 929 } 930 931 static DEFINE_MUTEX(kprobe_sysctl_mutex); 932 int sysctl_kprobes_optimization; 933 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 934 void *buffer, size_t *length, 935 loff_t *ppos) 936 { 937 int ret; 938 939 mutex_lock(&kprobe_sysctl_mutex); 940 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 941 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 942 943 if (sysctl_kprobes_optimization) 944 optimize_all_kprobes(); 945 else 946 unoptimize_all_kprobes(); 947 mutex_unlock(&kprobe_sysctl_mutex); 948 949 return ret; 950 } 951 #endif /* CONFIG_SYSCTL */ 952 953 /* Put a breakpoint for a probe. Must be called with text_mutex locked */ 954 static void __arm_kprobe(struct kprobe *p) 955 { 956 struct kprobe *_p; 957 958 /* Check collision with other optimized kprobes */ 959 _p = get_optimized_kprobe((unsigned long)p->addr); 960 if (unlikely(_p)) 961 /* Fallback to unoptimized kprobe */ 962 unoptimize_kprobe(_p, true); 963 964 arch_arm_kprobe(p); 965 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 966 } 967 968 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 969 static void __disarm_kprobe(struct kprobe *p, bool reopt) 970 { 971 struct kprobe *_p; 972 973 /* Try to unoptimize */ 974 unoptimize_kprobe(p, kprobes_all_disarmed); 975 976 if (!kprobe_queued(p)) { 977 arch_disarm_kprobe(p); 978 /* If another kprobe was blocked, optimize it. */ 979 _p = get_optimized_kprobe((unsigned long)p->addr); 980 if (unlikely(_p) && reopt) 981 optimize_kprobe(_p); 982 } 983 /* TODO: reoptimize others after unoptimized this probe */ 984 } 985 986 #else /* !CONFIG_OPTPROBES */ 987 988 #define optimize_kprobe(p) do {} while (0) 989 #define unoptimize_kprobe(p, f) do {} while (0) 990 #define kill_optimized_kprobe(p) do {} while (0) 991 #define prepare_optimized_kprobe(p) do {} while (0) 992 #define try_to_optimize_kprobe(p) do {} while (0) 993 #define __arm_kprobe(p) arch_arm_kprobe(p) 994 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 995 #define kprobe_disarmed(p) kprobe_disabled(p) 996 #define wait_for_kprobe_optimizer() do {} while (0) 997 998 static int reuse_unused_kprobe(struct kprobe *ap) 999 { 1000 /* 1001 * If the optimized kprobe is NOT supported, the aggr kprobe is 1002 * released at the same time that the last aggregated kprobe is 1003 * unregistered. 1004 * Thus there should be no chance to reuse unused kprobe. 1005 */ 1006 printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); 1007 return -EINVAL; 1008 } 1009 1010 static void free_aggr_kprobe(struct kprobe *p) 1011 { 1012 arch_remove_kprobe(p); 1013 kfree(p); 1014 } 1015 1016 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 1017 { 1018 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 1019 } 1020 #endif /* CONFIG_OPTPROBES */ 1021 1022 #ifdef CONFIG_KPROBES_ON_FTRACE 1023 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 1024 .func = kprobe_ftrace_handler, 1025 .flags = FTRACE_OPS_FL_SAVE_REGS, 1026 }; 1027 1028 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { 1029 .func = kprobe_ftrace_handler, 1030 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 1031 }; 1032 1033 static int kprobe_ipmodify_enabled; 1034 static int kprobe_ftrace_enabled; 1035 1036 /* Caller must lock kprobe_mutex */ 1037 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1038 int *cnt) 1039 { 1040 int ret = 0; 1041 1042 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1043 if (ret) { 1044 pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", 1045 p->addr, ret); 1046 return ret; 1047 } 1048 1049 if (*cnt == 0) { 1050 ret = register_ftrace_function(ops); 1051 if (ret) { 1052 pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); 1053 goto err_ftrace; 1054 } 1055 } 1056 1057 (*cnt)++; 1058 return ret; 1059 1060 err_ftrace: 1061 /* 1062 * At this point, sinec ops is not registered, we should be sefe from 1063 * registering empty filter. 1064 */ 1065 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1066 return ret; 1067 } 1068 1069 static int arm_kprobe_ftrace(struct kprobe *p) 1070 { 1071 bool ipmodify = (p->post_handler != NULL); 1072 1073 return __arm_kprobe_ftrace(p, 1074 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1075 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1076 } 1077 1078 /* Caller must lock kprobe_mutex */ 1079 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1080 int *cnt) 1081 { 1082 int ret = 0; 1083 1084 if (*cnt == 1) { 1085 ret = unregister_ftrace_function(ops); 1086 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret)) 1087 return ret; 1088 } 1089 1090 (*cnt)--; 1091 1092 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1093 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", 1094 p->addr, ret); 1095 return ret; 1096 } 1097 1098 static int disarm_kprobe_ftrace(struct kprobe *p) 1099 { 1100 bool ipmodify = (p->post_handler != NULL); 1101 1102 return __disarm_kprobe_ftrace(p, 1103 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1104 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1105 } 1106 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1107 static inline int arm_kprobe_ftrace(struct kprobe *p) 1108 { 1109 return -ENODEV; 1110 } 1111 1112 static inline int disarm_kprobe_ftrace(struct kprobe *p) 1113 { 1114 return -ENODEV; 1115 } 1116 #endif 1117 1118 static int prepare_kprobe(struct kprobe *p) 1119 { 1120 /* Must ensure p->addr is really on ftrace */ 1121 if (kprobe_ftrace(p)) 1122 return arch_prepare_kprobe_ftrace(p); 1123 1124 return arch_prepare_kprobe(p); 1125 } 1126 1127 /* Arm a kprobe with text_mutex */ 1128 static int arm_kprobe(struct kprobe *kp) 1129 { 1130 if (unlikely(kprobe_ftrace(kp))) 1131 return arm_kprobe_ftrace(kp); 1132 1133 cpus_read_lock(); 1134 mutex_lock(&text_mutex); 1135 __arm_kprobe(kp); 1136 mutex_unlock(&text_mutex); 1137 cpus_read_unlock(); 1138 1139 return 0; 1140 } 1141 1142 /* Disarm a kprobe with text_mutex */ 1143 static int disarm_kprobe(struct kprobe *kp, bool reopt) 1144 { 1145 if (unlikely(kprobe_ftrace(kp))) 1146 return disarm_kprobe_ftrace(kp); 1147 1148 cpus_read_lock(); 1149 mutex_lock(&text_mutex); 1150 __disarm_kprobe(kp, reopt); 1151 mutex_unlock(&text_mutex); 1152 cpus_read_unlock(); 1153 1154 return 0; 1155 } 1156 1157 /* 1158 * Aggregate handlers for multiple kprobes support - these handlers 1159 * take care of invoking the individual kprobe handlers on p->list 1160 */ 1161 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1162 { 1163 struct kprobe *kp; 1164 1165 list_for_each_entry_rcu(kp, &p->list, list) { 1166 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1167 set_kprobe_instance(kp); 1168 if (kp->pre_handler(kp, regs)) 1169 return 1; 1170 } 1171 reset_kprobe_instance(); 1172 } 1173 return 0; 1174 } 1175 NOKPROBE_SYMBOL(aggr_pre_handler); 1176 1177 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1178 unsigned long flags) 1179 { 1180 struct kprobe *kp; 1181 1182 list_for_each_entry_rcu(kp, &p->list, list) { 1183 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1184 set_kprobe_instance(kp); 1185 kp->post_handler(kp, regs, flags); 1186 reset_kprobe_instance(); 1187 } 1188 } 1189 } 1190 NOKPROBE_SYMBOL(aggr_post_handler); 1191 1192 /* Walks the list and increments nmissed count for multiprobe case */ 1193 void kprobes_inc_nmissed_count(struct kprobe *p) 1194 { 1195 struct kprobe *kp; 1196 if (!kprobe_aggrprobe(p)) { 1197 p->nmissed++; 1198 } else { 1199 list_for_each_entry_rcu(kp, &p->list, list) 1200 kp->nmissed++; 1201 } 1202 return; 1203 } 1204 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1205 1206 static void free_rp_inst_rcu(struct rcu_head *head) 1207 { 1208 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu); 1209 1210 if (refcount_dec_and_test(&ri->rph->ref)) 1211 kfree(ri->rph); 1212 kfree(ri); 1213 } 1214 NOKPROBE_SYMBOL(free_rp_inst_rcu); 1215 1216 static void recycle_rp_inst(struct kretprobe_instance *ri) 1217 { 1218 struct kretprobe *rp = get_kretprobe(ri); 1219 1220 if (likely(rp)) { 1221 freelist_add(&ri->freelist, &rp->freelist); 1222 } else 1223 call_rcu(&ri->rcu, free_rp_inst_rcu); 1224 } 1225 NOKPROBE_SYMBOL(recycle_rp_inst); 1226 1227 static struct kprobe kprobe_busy = { 1228 .addr = (void *) get_kprobe, 1229 }; 1230 1231 void kprobe_busy_begin(void) 1232 { 1233 struct kprobe_ctlblk *kcb; 1234 1235 preempt_disable(); 1236 __this_cpu_write(current_kprobe, &kprobe_busy); 1237 kcb = get_kprobe_ctlblk(); 1238 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 1239 } 1240 1241 void kprobe_busy_end(void) 1242 { 1243 __this_cpu_write(current_kprobe, NULL); 1244 preempt_enable(); 1245 } 1246 1247 /* 1248 * This function is called from finish_task_switch when task tk becomes dead, 1249 * so that we can recycle any function-return probe instances associated 1250 * with this task. These left over instances represent probed functions 1251 * that have been called but will never return. 1252 */ 1253 void kprobe_flush_task(struct task_struct *tk) 1254 { 1255 struct kretprobe_instance *ri; 1256 struct llist_node *node; 1257 1258 /* Early boot, not yet initialized. */ 1259 if (unlikely(!kprobes_initialized)) 1260 return; 1261 1262 kprobe_busy_begin(); 1263 1264 node = __llist_del_all(&tk->kretprobe_instances); 1265 while (node) { 1266 ri = container_of(node, struct kretprobe_instance, llist); 1267 node = node->next; 1268 1269 recycle_rp_inst(ri); 1270 } 1271 1272 kprobe_busy_end(); 1273 } 1274 NOKPROBE_SYMBOL(kprobe_flush_task); 1275 1276 static inline void free_rp_inst(struct kretprobe *rp) 1277 { 1278 struct kretprobe_instance *ri; 1279 struct freelist_node *node; 1280 int count = 0; 1281 1282 node = rp->freelist.head; 1283 while (node) { 1284 ri = container_of(node, struct kretprobe_instance, freelist); 1285 node = node->next; 1286 1287 kfree(ri); 1288 count++; 1289 } 1290 1291 if (refcount_sub_and_test(count, &rp->rph->ref)) { 1292 kfree(rp->rph); 1293 rp->rph = NULL; 1294 } 1295 } 1296 1297 /* Add the new probe to ap->list */ 1298 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1299 { 1300 if (p->post_handler) 1301 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1302 1303 list_add_rcu(&p->list, &ap->list); 1304 if (p->post_handler && !ap->post_handler) 1305 ap->post_handler = aggr_post_handler; 1306 1307 return 0; 1308 } 1309 1310 /* 1311 * Fill in the required fields of the "manager kprobe". Replace the 1312 * earlier kprobe in the hlist with the manager kprobe 1313 */ 1314 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1315 { 1316 /* Copy p's insn slot to ap */ 1317 copy_kprobe(p, ap); 1318 flush_insn_slot(ap); 1319 ap->addr = p->addr; 1320 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1321 ap->pre_handler = aggr_pre_handler; 1322 /* We don't care the kprobe which has gone. */ 1323 if (p->post_handler && !kprobe_gone(p)) 1324 ap->post_handler = aggr_post_handler; 1325 1326 INIT_LIST_HEAD(&ap->list); 1327 INIT_HLIST_NODE(&ap->hlist); 1328 1329 list_add_rcu(&p->list, &ap->list); 1330 hlist_replace_rcu(&p->hlist, &ap->hlist); 1331 } 1332 1333 /* 1334 * This is the second or subsequent kprobe at the address - handle 1335 * the intricacies 1336 */ 1337 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1338 { 1339 int ret = 0; 1340 struct kprobe *ap = orig_p; 1341 1342 cpus_read_lock(); 1343 1344 /* For preparing optimization, jump_label_text_reserved() is called */ 1345 jump_label_lock(); 1346 mutex_lock(&text_mutex); 1347 1348 if (!kprobe_aggrprobe(orig_p)) { 1349 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1350 ap = alloc_aggr_kprobe(orig_p); 1351 if (!ap) { 1352 ret = -ENOMEM; 1353 goto out; 1354 } 1355 init_aggr_kprobe(ap, orig_p); 1356 } else if (kprobe_unused(ap)) { 1357 /* This probe is going to die. Rescue it */ 1358 ret = reuse_unused_kprobe(ap); 1359 if (ret) 1360 goto out; 1361 } 1362 1363 if (kprobe_gone(ap)) { 1364 /* 1365 * Attempting to insert new probe at the same location that 1366 * had a probe in the module vaddr area which already 1367 * freed. So, the instruction slot has already been 1368 * released. We need a new slot for the new probe. 1369 */ 1370 ret = arch_prepare_kprobe(ap); 1371 if (ret) 1372 /* 1373 * Even if fail to allocate new slot, don't need to 1374 * free aggr_probe. It will be used next time, or 1375 * freed by unregister_kprobe. 1376 */ 1377 goto out; 1378 1379 /* Prepare optimized instructions if possible. */ 1380 prepare_optimized_kprobe(ap); 1381 1382 /* 1383 * Clear gone flag to prevent allocating new slot again, and 1384 * set disabled flag because it is not armed yet. 1385 */ 1386 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1387 | KPROBE_FLAG_DISABLED; 1388 } 1389 1390 /* Copy ap's insn slot to p */ 1391 copy_kprobe(ap, p); 1392 ret = add_new_kprobe(ap, p); 1393 1394 out: 1395 mutex_unlock(&text_mutex); 1396 jump_label_unlock(); 1397 cpus_read_unlock(); 1398 1399 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1400 ap->flags &= ~KPROBE_FLAG_DISABLED; 1401 if (!kprobes_all_disarmed) { 1402 /* Arm the breakpoint again. */ 1403 ret = arm_kprobe(ap); 1404 if (ret) { 1405 ap->flags |= KPROBE_FLAG_DISABLED; 1406 list_del_rcu(&p->list); 1407 synchronize_rcu(); 1408 } 1409 } 1410 } 1411 return ret; 1412 } 1413 1414 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1415 { 1416 /* The __kprobes marked functions and entry code must not be probed */ 1417 return addr >= (unsigned long)__kprobes_text_start && 1418 addr < (unsigned long)__kprobes_text_end; 1419 } 1420 1421 static bool __within_kprobe_blacklist(unsigned long addr) 1422 { 1423 struct kprobe_blacklist_entry *ent; 1424 1425 if (arch_within_kprobe_blacklist(addr)) 1426 return true; 1427 /* 1428 * If there exists a kprobe_blacklist, verify and 1429 * fail any probe registration in the prohibited area 1430 */ 1431 list_for_each_entry(ent, &kprobe_blacklist, list) { 1432 if (addr >= ent->start_addr && addr < ent->end_addr) 1433 return true; 1434 } 1435 return false; 1436 } 1437 1438 bool within_kprobe_blacklist(unsigned long addr) 1439 { 1440 char symname[KSYM_NAME_LEN], *p; 1441 1442 if (__within_kprobe_blacklist(addr)) 1443 return true; 1444 1445 /* Check if the address is on a suffixed-symbol */ 1446 if (!lookup_symbol_name(addr, symname)) { 1447 p = strchr(symname, '.'); 1448 if (!p) 1449 return false; 1450 *p = '\0'; 1451 addr = (unsigned long)kprobe_lookup_name(symname, 0); 1452 if (addr) 1453 return __within_kprobe_blacklist(addr); 1454 } 1455 return false; 1456 } 1457 1458 /* 1459 * If we have a symbol_name argument, look it up and add the offset field 1460 * to it. This way, we can specify a relative address to a symbol. 1461 * This returns encoded errors if it fails to look up symbol or invalid 1462 * combination of parameters. 1463 */ 1464 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr, 1465 const char *symbol_name, unsigned int offset) 1466 { 1467 if ((symbol_name && addr) || (!symbol_name && !addr)) 1468 goto invalid; 1469 1470 if (symbol_name) { 1471 addr = kprobe_lookup_name(symbol_name, offset); 1472 if (!addr) 1473 return ERR_PTR(-ENOENT); 1474 } 1475 1476 addr = (kprobe_opcode_t *)(((char *)addr) + offset); 1477 if (addr) 1478 return addr; 1479 1480 invalid: 1481 return ERR_PTR(-EINVAL); 1482 } 1483 1484 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1485 { 1486 return _kprobe_addr(p->addr, p->symbol_name, p->offset); 1487 } 1488 1489 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1490 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1491 { 1492 struct kprobe *ap, *list_p; 1493 1494 lockdep_assert_held(&kprobe_mutex); 1495 1496 ap = get_kprobe(p->addr); 1497 if (unlikely(!ap)) 1498 return NULL; 1499 1500 if (p != ap) { 1501 list_for_each_entry(list_p, &ap->list, list) 1502 if (list_p == p) 1503 /* kprobe p is a valid probe */ 1504 goto valid; 1505 return NULL; 1506 } 1507 valid: 1508 return ap; 1509 } 1510 1511 /* 1512 * Warn and return error if the kprobe is being re-registered since 1513 * there must be a software bug. 1514 */ 1515 static inline int warn_kprobe_rereg(struct kprobe *p) 1516 { 1517 int ret = 0; 1518 1519 mutex_lock(&kprobe_mutex); 1520 if (WARN_ON_ONCE(__get_valid_kprobe(p))) 1521 ret = -EINVAL; 1522 mutex_unlock(&kprobe_mutex); 1523 1524 return ret; 1525 } 1526 1527 int __weak arch_check_ftrace_location(struct kprobe *p) 1528 { 1529 unsigned long ftrace_addr; 1530 1531 ftrace_addr = ftrace_location((unsigned long)p->addr); 1532 if (ftrace_addr) { 1533 #ifdef CONFIG_KPROBES_ON_FTRACE 1534 /* Given address is not on the instruction boundary */ 1535 if ((unsigned long)p->addr != ftrace_addr) 1536 return -EILSEQ; 1537 p->flags |= KPROBE_FLAG_FTRACE; 1538 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1539 return -EINVAL; 1540 #endif 1541 } 1542 return 0; 1543 } 1544 1545 static int check_kprobe_address_safe(struct kprobe *p, 1546 struct module **probed_mod) 1547 { 1548 int ret; 1549 1550 ret = arch_check_ftrace_location(p); 1551 if (ret) 1552 return ret; 1553 jump_label_lock(); 1554 preempt_disable(); 1555 1556 /* Ensure it is not in reserved area nor out of text */ 1557 if (!kernel_text_address((unsigned long) p->addr) || 1558 within_kprobe_blacklist((unsigned long) p->addr) || 1559 jump_label_text_reserved(p->addr, p->addr) || 1560 static_call_text_reserved(p->addr, p->addr) || 1561 find_bug((unsigned long)p->addr)) { 1562 ret = -EINVAL; 1563 goto out; 1564 } 1565 1566 /* Check if are we probing a module */ 1567 *probed_mod = __module_text_address((unsigned long) p->addr); 1568 if (*probed_mod) { 1569 /* 1570 * We must hold a refcount of the probed module while updating 1571 * its code to prohibit unexpected unloading. 1572 */ 1573 if (unlikely(!try_module_get(*probed_mod))) { 1574 ret = -ENOENT; 1575 goto out; 1576 } 1577 1578 /* 1579 * If the module freed .init.text, we couldn't insert 1580 * kprobes in there. 1581 */ 1582 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1583 (*probed_mod)->state != MODULE_STATE_COMING) { 1584 module_put(*probed_mod); 1585 *probed_mod = NULL; 1586 ret = -ENOENT; 1587 } 1588 } 1589 out: 1590 preempt_enable(); 1591 jump_label_unlock(); 1592 1593 return ret; 1594 } 1595 1596 int register_kprobe(struct kprobe *p) 1597 { 1598 int ret; 1599 struct kprobe *old_p; 1600 struct module *probed_mod; 1601 kprobe_opcode_t *addr; 1602 1603 /* Adjust probe address from symbol */ 1604 addr = kprobe_addr(p); 1605 if (IS_ERR(addr)) 1606 return PTR_ERR(addr); 1607 p->addr = addr; 1608 1609 ret = warn_kprobe_rereg(p); 1610 if (ret) 1611 return ret; 1612 1613 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1614 p->flags &= KPROBE_FLAG_DISABLED; 1615 p->nmissed = 0; 1616 INIT_LIST_HEAD(&p->list); 1617 1618 ret = check_kprobe_address_safe(p, &probed_mod); 1619 if (ret) 1620 return ret; 1621 1622 mutex_lock(&kprobe_mutex); 1623 1624 old_p = get_kprobe(p->addr); 1625 if (old_p) { 1626 /* Since this may unoptimize old_p, locking text_mutex. */ 1627 ret = register_aggr_kprobe(old_p, p); 1628 goto out; 1629 } 1630 1631 cpus_read_lock(); 1632 /* Prevent text modification */ 1633 mutex_lock(&text_mutex); 1634 ret = prepare_kprobe(p); 1635 mutex_unlock(&text_mutex); 1636 cpus_read_unlock(); 1637 if (ret) 1638 goto out; 1639 1640 INIT_HLIST_NODE(&p->hlist); 1641 hlist_add_head_rcu(&p->hlist, 1642 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1643 1644 if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1645 ret = arm_kprobe(p); 1646 if (ret) { 1647 hlist_del_rcu(&p->hlist); 1648 synchronize_rcu(); 1649 goto out; 1650 } 1651 } 1652 1653 /* Try to optimize kprobe */ 1654 try_to_optimize_kprobe(p); 1655 out: 1656 mutex_unlock(&kprobe_mutex); 1657 1658 if (probed_mod) 1659 module_put(probed_mod); 1660 1661 return ret; 1662 } 1663 EXPORT_SYMBOL_GPL(register_kprobe); 1664 1665 /* Check if all probes on the aggrprobe are disabled */ 1666 static int aggr_kprobe_disabled(struct kprobe *ap) 1667 { 1668 struct kprobe *kp; 1669 1670 lockdep_assert_held(&kprobe_mutex); 1671 1672 list_for_each_entry(kp, &ap->list, list) 1673 if (!kprobe_disabled(kp)) 1674 /* 1675 * There is an active probe on the list. 1676 * We can't disable this ap. 1677 */ 1678 return 0; 1679 1680 return 1; 1681 } 1682 1683 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1684 static struct kprobe *__disable_kprobe(struct kprobe *p) 1685 { 1686 struct kprobe *orig_p; 1687 int ret; 1688 1689 /* Get an original kprobe for return */ 1690 orig_p = __get_valid_kprobe(p); 1691 if (unlikely(orig_p == NULL)) 1692 return ERR_PTR(-EINVAL); 1693 1694 if (!kprobe_disabled(p)) { 1695 /* Disable probe if it is a child probe */ 1696 if (p != orig_p) 1697 p->flags |= KPROBE_FLAG_DISABLED; 1698 1699 /* Try to disarm and disable this/parent probe */ 1700 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1701 /* 1702 * If kprobes_all_disarmed is set, orig_p 1703 * should have already been disarmed, so 1704 * skip unneed disarming process. 1705 */ 1706 if (!kprobes_all_disarmed) { 1707 ret = disarm_kprobe(orig_p, true); 1708 if (ret) { 1709 p->flags &= ~KPROBE_FLAG_DISABLED; 1710 return ERR_PTR(ret); 1711 } 1712 } 1713 orig_p->flags |= KPROBE_FLAG_DISABLED; 1714 } 1715 } 1716 1717 return orig_p; 1718 } 1719 1720 /* 1721 * Unregister a kprobe without a scheduler synchronization. 1722 */ 1723 static int __unregister_kprobe_top(struct kprobe *p) 1724 { 1725 struct kprobe *ap, *list_p; 1726 1727 /* Disable kprobe. This will disarm it if needed. */ 1728 ap = __disable_kprobe(p); 1729 if (IS_ERR(ap)) 1730 return PTR_ERR(ap); 1731 1732 if (ap == p) 1733 /* 1734 * This probe is an independent(and non-optimized) kprobe 1735 * (not an aggrprobe). Remove from the hash list. 1736 */ 1737 goto disarmed; 1738 1739 /* Following process expects this probe is an aggrprobe */ 1740 WARN_ON(!kprobe_aggrprobe(ap)); 1741 1742 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1743 /* 1744 * !disarmed could be happen if the probe is under delayed 1745 * unoptimizing. 1746 */ 1747 goto disarmed; 1748 else { 1749 /* If disabling probe has special handlers, update aggrprobe */ 1750 if (p->post_handler && !kprobe_gone(p)) { 1751 list_for_each_entry(list_p, &ap->list, list) { 1752 if ((list_p != p) && (list_p->post_handler)) 1753 goto noclean; 1754 } 1755 ap->post_handler = NULL; 1756 } 1757 noclean: 1758 /* 1759 * Remove from the aggrprobe: this path will do nothing in 1760 * __unregister_kprobe_bottom(). 1761 */ 1762 list_del_rcu(&p->list); 1763 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1764 /* 1765 * Try to optimize this probe again, because post 1766 * handler may have been changed. 1767 */ 1768 optimize_kprobe(ap); 1769 } 1770 return 0; 1771 1772 disarmed: 1773 hlist_del_rcu(&ap->hlist); 1774 return 0; 1775 } 1776 1777 static void __unregister_kprobe_bottom(struct kprobe *p) 1778 { 1779 struct kprobe *ap; 1780 1781 if (list_empty(&p->list)) 1782 /* This is an independent kprobe */ 1783 arch_remove_kprobe(p); 1784 else if (list_is_singular(&p->list)) { 1785 /* This is the last child of an aggrprobe */ 1786 ap = list_entry(p->list.next, struct kprobe, list); 1787 list_del(&p->list); 1788 free_aggr_kprobe(ap); 1789 } 1790 /* Otherwise, do nothing. */ 1791 } 1792 1793 int register_kprobes(struct kprobe **kps, int num) 1794 { 1795 int i, ret = 0; 1796 1797 if (num <= 0) 1798 return -EINVAL; 1799 for (i = 0; i < num; i++) { 1800 ret = register_kprobe(kps[i]); 1801 if (ret < 0) { 1802 if (i > 0) 1803 unregister_kprobes(kps, i); 1804 break; 1805 } 1806 } 1807 return ret; 1808 } 1809 EXPORT_SYMBOL_GPL(register_kprobes); 1810 1811 void unregister_kprobe(struct kprobe *p) 1812 { 1813 unregister_kprobes(&p, 1); 1814 } 1815 EXPORT_SYMBOL_GPL(unregister_kprobe); 1816 1817 void unregister_kprobes(struct kprobe **kps, int num) 1818 { 1819 int i; 1820 1821 if (num <= 0) 1822 return; 1823 mutex_lock(&kprobe_mutex); 1824 for (i = 0; i < num; i++) 1825 if (__unregister_kprobe_top(kps[i]) < 0) 1826 kps[i]->addr = NULL; 1827 mutex_unlock(&kprobe_mutex); 1828 1829 synchronize_rcu(); 1830 for (i = 0; i < num; i++) 1831 if (kps[i]->addr) 1832 __unregister_kprobe_bottom(kps[i]); 1833 } 1834 EXPORT_SYMBOL_GPL(unregister_kprobes); 1835 1836 int __weak kprobe_exceptions_notify(struct notifier_block *self, 1837 unsigned long val, void *data) 1838 { 1839 return NOTIFY_DONE; 1840 } 1841 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 1842 1843 static struct notifier_block kprobe_exceptions_nb = { 1844 .notifier_call = kprobe_exceptions_notify, 1845 .priority = 0x7fffffff /* we need to be notified first */ 1846 }; 1847 1848 unsigned long __weak arch_deref_entry_point(void *entry) 1849 { 1850 return (unsigned long)entry; 1851 } 1852 1853 #ifdef CONFIG_KRETPROBES 1854 1855 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, 1856 void *trampoline_address, 1857 void *frame_pointer) 1858 { 1859 kprobe_opcode_t *correct_ret_addr = NULL; 1860 struct kretprobe_instance *ri = NULL; 1861 struct llist_node *first, *node; 1862 struct kretprobe *rp; 1863 1864 /* Find all nodes for this frame. */ 1865 first = node = current->kretprobe_instances.first; 1866 while (node) { 1867 ri = container_of(node, struct kretprobe_instance, llist); 1868 1869 BUG_ON(ri->fp != frame_pointer); 1870 1871 if (ri->ret_addr != trampoline_address) { 1872 correct_ret_addr = ri->ret_addr; 1873 /* 1874 * This is the real return address. Any other 1875 * instances associated with this task are for 1876 * other calls deeper on the call stack 1877 */ 1878 goto found; 1879 } 1880 1881 node = node->next; 1882 } 1883 pr_err("Oops! Kretprobe fails to find correct return address.\n"); 1884 BUG_ON(1); 1885 1886 found: 1887 /* Unlink all nodes for this frame. */ 1888 current->kretprobe_instances.first = node->next; 1889 node->next = NULL; 1890 1891 /* Run them.. */ 1892 while (first) { 1893 ri = container_of(first, struct kretprobe_instance, llist); 1894 first = first->next; 1895 1896 rp = get_kretprobe(ri); 1897 if (rp && rp->handler) { 1898 struct kprobe *prev = kprobe_running(); 1899 1900 __this_cpu_write(current_kprobe, &rp->kp); 1901 ri->ret_addr = correct_ret_addr; 1902 rp->handler(ri, regs); 1903 __this_cpu_write(current_kprobe, prev); 1904 } 1905 1906 recycle_rp_inst(ri); 1907 } 1908 1909 return (unsigned long)correct_ret_addr; 1910 } 1911 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) 1912 1913 /* 1914 * This kprobe pre_handler is registered with every kretprobe. When probe 1915 * hits it will set up the return probe. 1916 */ 1917 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 1918 { 1919 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1920 struct kretprobe_instance *ri; 1921 struct freelist_node *fn; 1922 1923 fn = freelist_try_get(&rp->freelist); 1924 if (!fn) { 1925 rp->nmissed++; 1926 return 0; 1927 } 1928 1929 ri = container_of(fn, struct kretprobe_instance, freelist); 1930 1931 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 1932 freelist_add(&ri->freelist, &rp->freelist); 1933 return 0; 1934 } 1935 1936 arch_prepare_kretprobe(ri, regs); 1937 1938 __llist_add(&ri->llist, ¤t->kretprobe_instances); 1939 1940 return 0; 1941 } 1942 NOKPROBE_SYMBOL(pre_handler_kretprobe); 1943 1944 bool __weak arch_kprobe_on_func_entry(unsigned long offset) 1945 { 1946 return !offset; 1947 } 1948 1949 /** 1950 * kprobe_on_func_entry() -- check whether given address is function entry 1951 * @addr: Target address 1952 * @sym: Target symbol name 1953 * @offset: The offset from the symbol or the address 1954 * 1955 * This checks whether the given @addr+@offset or @sym+@offset is on the 1956 * function entry address or not. 1957 * This returns 0 if it is the function entry, or -EINVAL if it is not. 1958 * And also it returns -ENOENT if it fails the symbol or address lookup. 1959 * Caller must pass @addr or @sym (either one must be NULL), or this 1960 * returns -EINVAL. 1961 */ 1962 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) 1963 { 1964 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); 1965 1966 if (IS_ERR(kp_addr)) 1967 return PTR_ERR(kp_addr); 1968 1969 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset)) 1970 return -ENOENT; 1971 1972 if (!arch_kprobe_on_func_entry(offset)) 1973 return -EINVAL; 1974 1975 return 0; 1976 } 1977 1978 int register_kretprobe(struct kretprobe *rp) 1979 { 1980 int ret; 1981 struct kretprobe_instance *inst; 1982 int i; 1983 void *addr; 1984 1985 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); 1986 if (ret) 1987 return ret; 1988 1989 /* If only rp->kp.addr is specified, check reregistering kprobes */ 1990 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp)) 1991 return -EINVAL; 1992 1993 if (kretprobe_blacklist_size) { 1994 addr = kprobe_addr(&rp->kp); 1995 if (IS_ERR(addr)) 1996 return PTR_ERR(addr); 1997 1998 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1999 if (kretprobe_blacklist[i].addr == addr) 2000 return -EINVAL; 2001 } 2002 } 2003 2004 rp->kp.pre_handler = pre_handler_kretprobe; 2005 rp->kp.post_handler = NULL; 2006 2007 /* Pre-allocate memory for max kretprobe instances */ 2008 if (rp->maxactive <= 0) { 2009 #ifdef CONFIG_PREEMPTION 2010 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 2011 #else 2012 rp->maxactive = num_possible_cpus(); 2013 #endif 2014 } 2015 rp->freelist.head = NULL; 2016 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); 2017 if (!rp->rph) 2018 return -ENOMEM; 2019 2020 rp->rph->rp = rp; 2021 for (i = 0; i < rp->maxactive; i++) { 2022 inst = kzalloc(sizeof(struct kretprobe_instance) + 2023 rp->data_size, GFP_KERNEL); 2024 if (inst == NULL) { 2025 refcount_set(&rp->rph->ref, i); 2026 free_rp_inst(rp); 2027 return -ENOMEM; 2028 } 2029 inst->rph = rp->rph; 2030 freelist_add(&inst->freelist, &rp->freelist); 2031 } 2032 refcount_set(&rp->rph->ref, i); 2033 2034 rp->nmissed = 0; 2035 /* Establish function entry probe point */ 2036 ret = register_kprobe(&rp->kp); 2037 if (ret != 0) 2038 free_rp_inst(rp); 2039 return ret; 2040 } 2041 EXPORT_SYMBOL_GPL(register_kretprobe); 2042 2043 int register_kretprobes(struct kretprobe **rps, int num) 2044 { 2045 int ret = 0, i; 2046 2047 if (num <= 0) 2048 return -EINVAL; 2049 for (i = 0; i < num; i++) { 2050 ret = register_kretprobe(rps[i]); 2051 if (ret < 0) { 2052 if (i > 0) 2053 unregister_kretprobes(rps, i); 2054 break; 2055 } 2056 } 2057 return ret; 2058 } 2059 EXPORT_SYMBOL_GPL(register_kretprobes); 2060 2061 void unregister_kretprobe(struct kretprobe *rp) 2062 { 2063 unregister_kretprobes(&rp, 1); 2064 } 2065 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2066 2067 void unregister_kretprobes(struct kretprobe **rps, int num) 2068 { 2069 int i; 2070 2071 if (num <= 0) 2072 return; 2073 mutex_lock(&kprobe_mutex); 2074 for (i = 0; i < num; i++) { 2075 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 2076 rps[i]->kp.addr = NULL; 2077 rps[i]->rph->rp = NULL; 2078 } 2079 mutex_unlock(&kprobe_mutex); 2080 2081 synchronize_rcu(); 2082 for (i = 0; i < num; i++) { 2083 if (rps[i]->kp.addr) { 2084 __unregister_kprobe_bottom(&rps[i]->kp); 2085 free_rp_inst(rps[i]); 2086 } 2087 } 2088 } 2089 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2090 2091 #else /* CONFIG_KRETPROBES */ 2092 int register_kretprobe(struct kretprobe *rp) 2093 { 2094 return -ENOSYS; 2095 } 2096 EXPORT_SYMBOL_GPL(register_kretprobe); 2097 2098 int register_kretprobes(struct kretprobe **rps, int num) 2099 { 2100 return -ENOSYS; 2101 } 2102 EXPORT_SYMBOL_GPL(register_kretprobes); 2103 2104 void unregister_kretprobe(struct kretprobe *rp) 2105 { 2106 } 2107 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2108 2109 void unregister_kretprobes(struct kretprobe **rps, int num) 2110 { 2111 } 2112 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2113 2114 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2115 { 2116 return 0; 2117 } 2118 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2119 2120 #endif /* CONFIG_KRETPROBES */ 2121 2122 /* Set the kprobe gone and remove its instruction buffer. */ 2123 static void kill_kprobe(struct kprobe *p) 2124 { 2125 struct kprobe *kp; 2126 2127 lockdep_assert_held(&kprobe_mutex); 2128 2129 p->flags |= KPROBE_FLAG_GONE; 2130 if (kprobe_aggrprobe(p)) { 2131 /* 2132 * If this is an aggr_kprobe, we have to list all the 2133 * chained probes and mark them GONE. 2134 */ 2135 list_for_each_entry(kp, &p->list, list) 2136 kp->flags |= KPROBE_FLAG_GONE; 2137 p->post_handler = NULL; 2138 kill_optimized_kprobe(p); 2139 } 2140 /* 2141 * Here, we can remove insn_slot safely, because no thread calls 2142 * the original probed function (which will be freed soon) any more. 2143 */ 2144 arch_remove_kprobe(p); 2145 2146 /* 2147 * The module is going away. We should disarm the kprobe which 2148 * is using ftrace, because ftrace framework is still available at 2149 * MODULE_STATE_GOING notification. 2150 */ 2151 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) 2152 disarm_kprobe_ftrace(p); 2153 } 2154 2155 /* Disable one kprobe */ 2156 int disable_kprobe(struct kprobe *kp) 2157 { 2158 int ret = 0; 2159 struct kprobe *p; 2160 2161 mutex_lock(&kprobe_mutex); 2162 2163 /* Disable this kprobe */ 2164 p = __disable_kprobe(kp); 2165 if (IS_ERR(p)) 2166 ret = PTR_ERR(p); 2167 2168 mutex_unlock(&kprobe_mutex); 2169 return ret; 2170 } 2171 EXPORT_SYMBOL_GPL(disable_kprobe); 2172 2173 /* Enable one kprobe */ 2174 int enable_kprobe(struct kprobe *kp) 2175 { 2176 int ret = 0; 2177 struct kprobe *p; 2178 2179 mutex_lock(&kprobe_mutex); 2180 2181 /* Check whether specified probe is valid. */ 2182 p = __get_valid_kprobe(kp); 2183 if (unlikely(p == NULL)) { 2184 ret = -EINVAL; 2185 goto out; 2186 } 2187 2188 if (kprobe_gone(kp)) { 2189 /* This kprobe has gone, we couldn't enable it. */ 2190 ret = -EINVAL; 2191 goto out; 2192 } 2193 2194 if (p != kp) 2195 kp->flags &= ~KPROBE_FLAG_DISABLED; 2196 2197 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2198 p->flags &= ~KPROBE_FLAG_DISABLED; 2199 ret = arm_kprobe(p); 2200 if (ret) 2201 p->flags |= KPROBE_FLAG_DISABLED; 2202 } 2203 out: 2204 mutex_unlock(&kprobe_mutex); 2205 return ret; 2206 } 2207 EXPORT_SYMBOL_GPL(enable_kprobe); 2208 2209 /* Caller must NOT call this in usual path. This is only for critical case */ 2210 void dump_kprobe(struct kprobe *kp) 2211 { 2212 pr_err("Dumping kprobe:\n"); 2213 pr_err("Name: %s\nOffset: %x\nAddress: %pS\n", 2214 kp->symbol_name, kp->offset, kp->addr); 2215 } 2216 NOKPROBE_SYMBOL(dump_kprobe); 2217 2218 int kprobe_add_ksym_blacklist(unsigned long entry) 2219 { 2220 struct kprobe_blacklist_entry *ent; 2221 unsigned long offset = 0, size = 0; 2222 2223 if (!kernel_text_address(entry) || 2224 !kallsyms_lookup_size_offset(entry, &size, &offset)) 2225 return -EINVAL; 2226 2227 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2228 if (!ent) 2229 return -ENOMEM; 2230 ent->start_addr = entry; 2231 ent->end_addr = entry + size; 2232 INIT_LIST_HEAD(&ent->list); 2233 list_add_tail(&ent->list, &kprobe_blacklist); 2234 2235 return (int)size; 2236 } 2237 2238 /* Add all symbols in given area into kprobe blacklist */ 2239 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) 2240 { 2241 unsigned long entry; 2242 int ret = 0; 2243 2244 for (entry = start; entry < end; entry += ret) { 2245 ret = kprobe_add_ksym_blacklist(entry); 2246 if (ret < 0) 2247 return ret; 2248 if (ret == 0) /* In case of alias symbol */ 2249 ret = 1; 2250 } 2251 return 0; 2252 } 2253 2254 /* Remove all symbols in given area from kprobe blacklist */ 2255 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) 2256 { 2257 struct kprobe_blacklist_entry *ent, *n; 2258 2259 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) { 2260 if (ent->start_addr < start || ent->start_addr >= end) 2261 continue; 2262 list_del(&ent->list); 2263 kfree(ent); 2264 } 2265 } 2266 2267 static void kprobe_remove_ksym_blacklist(unsigned long entry) 2268 { 2269 kprobe_remove_area_blacklist(entry, entry + 1); 2270 } 2271 2272 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, 2273 char *type, char *sym) 2274 { 2275 return -ERANGE; 2276 } 2277 2278 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 2279 char *sym) 2280 { 2281 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 2282 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym)) 2283 return 0; 2284 #ifdef CONFIG_OPTPROBES 2285 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym)) 2286 return 0; 2287 #endif 2288 #endif 2289 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym)) 2290 return 0; 2291 return -ERANGE; 2292 } 2293 2294 int __init __weak arch_populate_kprobe_blacklist(void) 2295 { 2296 return 0; 2297 } 2298 2299 /* 2300 * Lookup and populate the kprobe_blacklist. 2301 * 2302 * Unlike the kretprobe blacklist, we'll need to determine 2303 * the range of addresses that belong to the said functions, 2304 * since a kprobe need not necessarily be at the beginning 2305 * of a function. 2306 */ 2307 static int __init populate_kprobe_blacklist(unsigned long *start, 2308 unsigned long *end) 2309 { 2310 unsigned long entry; 2311 unsigned long *iter; 2312 int ret; 2313 2314 for (iter = start; iter < end; iter++) { 2315 entry = arch_deref_entry_point((void *)*iter); 2316 ret = kprobe_add_ksym_blacklist(entry); 2317 if (ret == -EINVAL) 2318 continue; 2319 if (ret < 0) 2320 return ret; 2321 } 2322 2323 /* Symbols in __kprobes_text are blacklisted */ 2324 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, 2325 (unsigned long)__kprobes_text_end); 2326 if (ret) 2327 return ret; 2328 2329 /* Symbols in noinstr section are blacklisted */ 2330 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start, 2331 (unsigned long)__noinstr_text_end); 2332 2333 return ret ? : arch_populate_kprobe_blacklist(); 2334 } 2335 2336 static void add_module_kprobe_blacklist(struct module *mod) 2337 { 2338 unsigned long start, end; 2339 int i; 2340 2341 if (mod->kprobe_blacklist) { 2342 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2343 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]); 2344 } 2345 2346 start = (unsigned long)mod->kprobes_text_start; 2347 if (start) { 2348 end = start + mod->kprobes_text_size; 2349 kprobe_add_area_blacklist(start, end); 2350 } 2351 2352 start = (unsigned long)mod->noinstr_text_start; 2353 if (start) { 2354 end = start + mod->noinstr_text_size; 2355 kprobe_add_area_blacklist(start, end); 2356 } 2357 } 2358 2359 static void remove_module_kprobe_blacklist(struct module *mod) 2360 { 2361 unsigned long start, end; 2362 int i; 2363 2364 if (mod->kprobe_blacklist) { 2365 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2366 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]); 2367 } 2368 2369 start = (unsigned long)mod->kprobes_text_start; 2370 if (start) { 2371 end = start + mod->kprobes_text_size; 2372 kprobe_remove_area_blacklist(start, end); 2373 } 2374 2375 start = (unsigned long)mod->noinstr_text_start; 2376 if (start) { 2377 end = start + mod->noinstr_text_size; 2378 kprobe_remove_area_blacklist(start, end); 2379 } 2380 } 2381 2382 /* Module notifier call back, checking kprobes on the module */ 2383 static int kprobes_module_callback(struct notifier_block *nb, 2384 unsigned long val, void *data) 2385 { 2386 struct module *mod = data; 2387 struct hlist_head *head; 2388 struct kprobe *p; 2389 unsigned int i; 2390 int checkcore = (val == MODULE_STATE_GOING); 2391 2392 if (val == MODULE_STATE_COMING) { 2393 mutex_lock(&kprobe_mutex); 2394 add_module_kprobe_blacklist(mod); 2395 mutex_unlock(&kprobe_mutex); 2396 } 2397 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2398 return NOTIFY_DONE; 2399 2400 /* 2401 * When MODULE_STATE_GOING was notified, both of module .text and 2402 * .init.text sections would be freed. When MODULE_STATE_LIVE was 2403 * notified, only .init.text section would be freed. We need to 2404 * disable kprobes which have been inserted in the sections. 2405 */ 2406 mutex_lock(&kprobe_mutex); 2407 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2408 head = &kprobe_table[i]; 2409 hlist_for_each_entry(p, head, hlist) 2410 if (within_module_init((unsigned long)p->addr, mod) || 2411 (checkcore && 2412 within_module_core((unsigned long)p->addr, mod))) { 2413 /* 2414 * The vaddr this probe is installed will soon 2415 * be vfreed buy not synced to disk. Hence, 2416 * disarming the breakpoint isn't needed. 2417 * 2418 * Note, this will also move any optimized probes 2419 * that are pending to be removed from their 2420 * corresponding lists to the freeing_list and 2421 * will not be touched by the delayed 2422 * kprobe_optimizer work handler. 2423 */ 2424 kill_kprobe(p); 2425 } 2426 } 2427 if (val == MODULE_STATE_GOING) 2428 remove_module_kprobe_blacklist(mod); 2429 mutex_unlock(&kprobe_mutex); 2430 return NOTIFY_DONE; 2431 } 2432 2433 static struct notifier_block kprobe_module_nb = { 2434 .notifier_call = kprobes_module_callback, 2435 .priority = 0 2436 }; 2437 2438 /* Markers of _kprobe_blacklist section */ 2439 extern unsigned long __start_kprobe_blacklist[]; 2440 extern unsigned long __stop_kprobe_blacklist[]; 2441 2442 void kprobe_free_init_mem(void) 2443 { 2444 void *start = (void *)(&__init_begin); 2445 void *end = (void *)(&__init_end); 2446 struct hlist_head *head; 2447 struct kprobe *p; 2448 int i; 2449 2450 mutex_lock(&kprobe_mutex); 2451 2452 /* Kill all kprobes on initmem */ 2453 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2454 head = &kprobe_table[i]; 2455 hlist_for_each_entry(p, head, hlist) { 2456 if (start <= (void *)p->addr && (void *)p->addr < end) 2457 kill_kprobe(p); 2458 } 2459 } 2460 2461 mutex_unlock(&kprobe_mutex); 2462 } 2463 2464 static int __init init_kprobes(void) 2465 { 2466 int i, err = 0; 2467 2468 /* FIXME allocate the probe table, currently defined statically */ 2469 /* initialize all list heads */ 2470 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 2471 INIT_HLIST_HEAD(&kprobe_table[i]); 2472 2473 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2474 __stop_kprobe_blacklist); 2475 if (err) { 2476 pr_err("kprobes: failed to populate blacklist: %d\n", err); 2477 pr_err("Please take care of using kprobes.\n"); 2478 } 2479 2480 if (kretprobe_blacklist_size) { 2481 /* lookup the function address from its name */ 2482 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2483 kretprobe_blacklist[i].addr = 2484 kprobe_lookup_name(kretprobe_blacklist[i].name, 0); 2485 if (!kretprobe_blacklist[i].addr) 2486 printk("kretprobe: lookup failed: %s\n", 2487 kretprobe_blacklist[i].name); 2488 } 2489 } 2490 2491 /* By default, kprobes are armed */ 2492 kprobes_all_disarmed = false; 2493 2494 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2495 /* Init kprobe_optinsn_slots for allocation */ 2496 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2497 #endif 2498 2499 err = arch_init_kprobes(); 2500 if (!err) 2501 err = register_die_notifier(&kprobe_exceptions_nb); 2502 if (!err) 2503 err = register_module_notifier(&kprobe_module_nb); 2504 2505 kprobes_initialized = (err == 0); 2506 2507 if (!err) 2508 init_test_probes(); 2509 return err; 2510 } 2511 early_initcall(init_kprobes); 2512 2513 #if defined(CONFIG_OPTPROBES) 2514 static int __init init_optprobes(void) 2515 { 2516 /* 2517 * Enable kprobe optimization - this kicks the optimizer which 2518 * depends on synchronize_rcu_tasks() and ksoftirqd, that is 2519 * not spawned in early initcall. So delay the optimization. 2520 */ 2521 optimize_all_kprobes(); 2522 2523 return 0; 2524 } 2525 subsys_initcall(init_optprobes); 2526 #endif 2527 2528 #ifdef CONFIG_DEBUG_FS 2529 static void report_probe(struct seq_file *pi, struct kprobe *p, 2530 const char *sym, int offset, char *modname, struct kprobe *pp) 2531 { 2532 char *kprobe_type; 2533 void *addr = p->addr; 2534 2535 if (p->pre_handler == pre_handler_kretprobe) 2536 kprobe_type = "r"; 2537 else 2538 kprobe_type = "k"; 2539 2540 if (!kallsyms_show_value(pi->file->f_cred)) 2541 addr = NULL; 2542 2543 if (sym) 2544 seq_printf(pi, "%px %s %s+0x%x %s ", 2545 addr, kprobe_type, sym, offset, 2546 (modname ? modname : " ")); 2547 else /* try to use %pS */ 2548 seq_printf(pi, "%px %s %pS ", 2549 addr, kprobe_type, p->addr); 2550 2551 if (!pp) 2552 pp = p; 2553 seq_printf(pi, "%s%s%s%s\n", 2554 (kprobe_gone(p) ? "[GONE]" : ""), 2555 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2556 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2557 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2558 } 2559 2560 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2561 { 2562 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2563 } 2564 2565 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2566 { 2567 (*pos)++; 2568 if (*pos >= KPROBE_TABLE_SIZE) 2569 return NULL; 2570 return pos; 2571 } 2572 2573 static void kprobe_seq_stop(struct seq_file *f, void *v) 2574 { 2575 /* Nothing to do */ 2576 } 2577 2578 static int show_kprobe_addr(struct seq_file *pi, void *v) 2579 { 2580 struct hlist_head *head; 2581 struct kprobe *p, *kp; 2582 const char *sym = NULL; 2583 unsigned int i = *(loff_t *) v; 2584 unsigned long offset = 0; 2585 char *modname, namebuf[KSYM_NAME_LEN]; 2586 2587 head = &kprobe_table[i]; 2588 preempt_disable(); 2589 hlist_for_each_entry_rcu(p, head, hlist) { 2590 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2591 &offset, &modname, namebuf); 2592 if (kprobe_aggrprobe(p)) { 2593 list_for_each_entry_rcu(kp, &p->list, list) 2594 report_probe(pi, kp, sym, offset, modname, p); 2595 } else 2596 report_probe(pi, p, sym, offset, modname, NULL); 2597 } 2598 preempt_enable(); 2599 return 0; 2600 } 2601 2602 static const struct seq_operations kprobes_sops = { 2603 .start = kprobe_seq_start, 2604 .next = kprobe_seq_next, 2605 .stop = kprobe_seq_stop, 2606 .show = show_kprobe_addr 2607 }; 2608 2609 DEFINE_SEQ_ATTRIBUTE(kprobes); 2610 2611 /* kprobes/blacklist -- shows which functions can not be probed */ 2612 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2613 { 2614 mutex_lock(&kprobe_mutex); 2615 return seq_list_start(&kprobe_blacklist, *pos); 2616 } 2617 2618 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2619 { 2620 return seq_list_next(v, &kprobe_blacklist, pos); 2621 } 2622 2623 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2624 { 2625 struct kprobe_blacklist_entry *ent = 2626 list_entry(v, struct kprobe_blacklist_entry, list); 2627 2628 /* 2629 * If /proc/kallsyms is not showing kernel address, we won't 2630 * show them here either. 2631 */ 2632 if (!kallsyms_show_value(m->file->f_cred)) 2633 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2634 (void *)ent->start_addr); 2635 else 2636 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, 2637 (void *)ent->end_addr, (void *)ent->start_addr); 2638 return 0; 2639 } 2640 2641 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v) 2642 { 2643 mutex_unlock(&kprobe_mutex); 2644 } 2645 2646 static const struct seq_operations kprobe_blacklist_sops = { 2647 .start = kprobe_blacklist_seq_start, 2648 .next = kprobe_blacklist_seq_next, 2649 .stop = kprobe_blacklist_seq_stop, 2650 .show = kprobe_blacklist_seq_show, 2651 }; 2652 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist); 2653 2654 static int arm_all_kprobes(void) 2655 { 2656 struct hlist_head *head; 2657 struct kprobe *p; 2658 unsigned int i, total = 0, errors = 0; 2659 int err, ret = 0; 2660 2661 mutex_lock(&kprobe_mutex); 2662 2663 /* If kprobes are armed, just return */ 2664 if (!kprobes_all_disarmed) 2665 goto already_enabled; 2666 2667 /* 2668 * optimize_kprobe() called by arm_kprobe() checks 2669 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2670 * arm_kprobe. 2671 */ 2672 kprobes_all_disarmed = false; 2673 /* Arming kprobes doesn't optimize kprobe itself */ 2674 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2675 head = &kprobe_table[i]; 2676 /* Arm all kprobes on a best-effort basis */ 2677 hlist_for_each_entry(p, head, hlist) { 2678 if (!kprobe_disabled(p)) { 2679 err = arm_kprobe(p); 2680 if (err) { 2681 errors++; 2682 ret = err; 2683 } 2684 total++; 2685 } 2686 } 2687 } 2688 2689 if (errors) 2690 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", 2691 errors, total); 2692 else 2693 pr_info("Kprobes globally enabled\n"); 2694 2695 already_enabled: 2696 mutex_unlock(&kprobe_mutex); 2697 return ret; 2698 } 2699 2700 static int disarm_all_kprobes(void) 2701 { 2702 struct hlist_head *head; 2703 struct kprobe *p; 2704 unsigned int i, total = 0, errors = 0; 2705 int err, ret = 0; 2706 2707 mutex_lock(&kprobe_mutex); 2708 2709 /* If kprobes are already disarmed, just return */ 2710 if (kprobes_all_disarmed) { 2711 mutex_unlock(&kprobe_mutex); 2712 return 0; 2713 } 2714 2715 kprobes_all_disarmed = true; 2716 2717 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2718 head = &kprobe_table[i]; 2719 /* Disarm all kprobes on a best-effort basis */ 2720 hlist_for_each_entry(p, head, hlist) { 2721 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { 2722 err = disarm_kprobe(p, false); 2723 if (err) { 2724 errors++; 2725 ret = err; 2726 } 2727 total++; 2728 } 2729 } 2730 } 2731 2732 if (errors) 2733 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n", 2734 errors, total); 2735 else 2736 pr_info("Kprobes globally disabled\n"); 2737 2738 mutex_unlock(&kprobe_mutex); 2739 2740 /* Wait for disarming all kprobes by optimizer */ 2741 wait_for_kprobe_optimizer(); 2742 2743 return ret; 2744 } 2745 2746 /* 2747 * XXX: The debugfs bool file interface doesn't allow for callbacks 2748 * when the bool state is switched. We can reuse that facility when 2749 * available 2750 */ 2751 static ssize_t read_enabled_file_bool(struct file *file, 2752 char __user *user_buf, size_t count, loff_t *ppos) 2753 { 2754 char buf[3]; 2755 2756 if (!kprobes_all_disarmed) 2757 buf[0] = '1'; 2758 else 2759 buf[0] = '0'; 2760 buf[1] = '\n'; 2761 buf[2] = 0x00; 2762 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2763 } 2764 2765 static ssize_t write_enabled_file_bool(struct file *file, 2766 const char __user *user_buf, size_t count, loff_t *ppos) 2767 { 2768 bool enable; 2769 int ret; 2770 2771 ret = kstrtobool_from_user(user_buf, count, &enable); 2772 if (ret) 2773 return ret; 2774 2775 ret = enable ? arm_all_kprobes() : disarm_all_kprobes(); 2776 if (ret) 2777 return ret; 2778 2779 return count; 2780 } 2781 2782 static const struct file_operations fops_kp = { 2783 .read = read_enabled_file_bool, 2784 .write = write_enabled_file_bool, 2785 .llseek = default_llseek, 2786 }; 2787 2788 static int __init debugfs_kprobe_init(void) 2789 { 2790 struct dentry *dir; 2791 2792 dir = debugfs_create_dir("kprobes", NULL); 2793 2794 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops); 2795 2796 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp); 2797 2798 debugfs_create_file("blacklist", 0400, dir, NULL, 2799 &kprobe_blacklist_fops); 2800 2801 return 0; 2802 } 2803 2804 late_initcall(debugfs_kprobe_init); 2805 #endif /* CONFIG_DEBUG_FS */ 2806