1 /* 2 * Performance events: 3 * 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 7 * 8 * Data type definitions, declarations, prototypes. 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14 #ifndef _LINUX_PERF_EVENT_H 15 #define _LINUX_PERF_EVENT_H 16 17 #include <uapi/linux/perf_event.h> 18 #include <uapi/linux/bpf_perf_event.h> 19 20 /* 21 * Kernel-internal data types and definitions: 22 */ 23 24 #ifdef CONFIG_PERF_EVENTS 25 # include <asm/perf_event.h> 26 # include <asm/local64.h> 27 #endif 28 29 struct perf_guest_info_callbacks { 30 int (*is_in_guest)(void); 31 int (*is_user_mode)(void); 32 unsigned long (*get_guest_ip)(void); 33 void (*handle_intel_pt_intr)(void); 34 }; 35 36 #ifdef CONFIG_HAVE_HW_BREAKPOINT 37 #include <asm/hw_breakpoint.h> 38 #endif 39 40 #include <linux/list.h> 41 #include <linux/mutex.h> 42 #include <linux/rculist.h> 43 #include <linux/rcupdate.h> 44 #include <linux/spinlock.h> 45 #include <linux/hrtimer.h> 46 #include <linux/fs.h> 47 #include <linux/pid_namespace.h> 48 #include <linux/workqueue.h> 49 #include <linux/ftrace.h> 50 #include <linux/cpu.h> 51 #include <linux/irq_work.h> 52 #include <linux/static_key.h> 53 #include <linux/jump_label_ratelimit.h> 54 #include <linux/atomic.h> 55 #include <linux/sysfs.h> 56 #include <linux/perf_regs.h> 57 #include <linux/cgroup.h> 58 #include <linux/refcount.h> 59 #include <asm/local.h> 60 61 struct perf_callchain_entry { 62 __u64 nr; 63 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ 64 }; 65 66 struct perf_callchain_entry_ctx { 67 struct perf_callchain_entry *entry; 68 u32 max_stack; 69 u32 nr; 70 short contexts; 71 bool contexts_maxed; 72 }; 73 74 typedef unsigned long (*perf_copy_f)(void *dst, const void *src, 75 unsigned long off, unsigned long len); 76 77 struct perf_raw_frag { 78 union { 79 struct perf_raw_frag *next; 80 unsigned long pad; 81 }; 82 perf_copy_f copy; 83 void *data; 84 u32 size; 85 } __packed; 86 87 struct perf_raw_record { 88 struct perf_raw_frag frag; 89 u32 size; 90 }; 91 92 /* 93 * branch stack layout: 94 * nr: number of taken branches stored in entries[] 95 * 96 * Note that nr can vary from sample to sample 97 * branches (to, from) are stored from most recent 98 * to least recent, i.e., entries[0] contains the most 99 * recent branch. 100 */ 101 struct perf_branch_stack { 102 __u64 nr; 103 struct perf_branch_entry entries[0]; 104 }; 105 106 struct task_struct; 107 108 /* 109 * extra PMU register associated with an event 110 */ 111 struct hw_perf_event_extra { 112 u64 config; /* register value */ 113 unsigned int reg; /* register address or index */ 114 int alloc; /* extra register already allocated */ 115 int idx; /* index in shared_regs->regs[] */ 116 }; 117 118 /** 119 * struct hw_perf_event - performance event hardware details: 120 */ 121 struct hw_perf_event { 122 #ifdef CONFIG_PERF_EVENTS 123 union { 124 struct { /* hardware */ 125 u64 config; 126 u64 last_tag; 127 unsigned long config_base; 128 unsigned long event_base; 129 int event_base_rdpmc; 130 int idx; 131 int last_cpu; 132 int flags; 133 134 struct hw_perf_event_extra extra_reg; 135 struct hw_perf_event_extra branch_reg; 136 }; 137 struct { /* software */ 138 struct hrtimer hrtimer; 139 }; 140 struct { /* tracepoint */ 141 /* for tp_event->class */ 142 struct list_head tp_list; 143 }; 144 struct { /* amd_power */ 145 u64 pwr_acc; 146 u64 ptsc; 147 }; 148 #ifdef CONFIG_HAVE_HW_BREAKPOINT 149 struct { /* breakpoint */ 150 /* 151 * Crufty hack to avoid the chicken and egg 152 * problem hw_breakpoint has with context 153 * creation and event initalization. 154 */ 155 struct arch_hw_breakpoint info; 156 struct list_head bp_list; 157 }; 158 #endif 159 struct { /* amd_iommu */ 160 u8 iommu_bank; 161 u8 iommu_cntr; 162 u16 padding; 163 u64 conf; 164 u64 conf1; 165 }; 166 }; 167 /* 168 * If the event is a per task event, this will point to the task in 169 * question. See the comment in perf_event_alloc(). 170 */ 171 struct task_struct *target; 172 173 /* 174 * PMU would store hardware filter configuration 175 * here. 176 */ 177 void *addr_filters; 178 179 /* Last sync'ed generation of filters */ 180 unsigned long addr_filters_gen; 181 182 /* 183 * hw_perf_event::state flags; used to track the PERF_EF_* state. 184 */ 185 #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ 186 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ 187 #define PERF_HES_ARCH 0x04 188 189 int state; 190 191 /* 192 * The last observed hardware counter value, updated with a 193 * local64_cmpxchg() such that pmu::read() can be called nested. 194 */ 195 local64_t prev_count; 196 197 /* 198 * The period to start the next sample with. 199 */ 200 u64 sample_period; 201 202 /* 203 * The period we started this sample with. 204 */ 205 u64 last_period; 206 207 /* 208 * However much is left of the current period; note that this is 209 * a full 64bit value and allows for generation of periods longer 210 * than hardware might allow. 211 */ 212 local64_t period_left; 213 214 /* 215 * State for throttling the event, see __perf_event_overflow() and 216 * perf_adjust_freq_unthr_context(). 217 */ 218 u64 interrupts_seq; 219 u64 interrupts; 220 221 /* 222 * State for freq target events, see __perf_event_overflow() and 223 * perf_adjust_freq_unthr_context(). 224 */ 225 u64 freq_time_stamp; 226 u64 freq_count_stamp; 227 #endif 228 }; 229 230 struct perf_event; 231 232 /* 233 * Common implementation detail of pmu::{start,commit,cancel}_txn 234 */ 235 #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ 236 #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ 237 238 /** 239 * pmu::capabilities flags 240 */ 241 #define PERF_PMU_CAP_NO_INTERRUPT 0x01 242 #define PERF_PMU_CAP_NO_NMI 0x02 243 #define PERF_PMU_CAP_AUX_NO_SG 0x04 244 #define PERF_PMU_CAP_EXTENDED_REGS 0x08 245 #define PERF_PMU_CAP_EXCLUSIVE 0x10 246 #define PERF_PMU_CAP_ITRACE 0x20 247 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 248 #define PERF_PMU_CAP_NO_EXCLUDE 0x80 249 #define PERF_PMU_CAP_AUX_OUTPUT 0x100 250 251 /** 252 * struct pmu - generic performance monitoring unit 253 */ 254 struct pmu { 255 struct list_head entry; 256 257 struct module *module; 258 struct device *dev; 259 const struct attribute_group **attr_groups; 260 const struct attribute_group **attr_update; 261 const char *name; 262 int type; 263 264 /* 265 * various common per-pmu feature flags 266 */ 267 int capabilities; 268 269 int __percpu *pmu_disable_count; 270 struct perf_cpu_context __percpu *pmu_cpu_context; 271 atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ 272 int task_ctx_nr; 273 int hrtimer_interval_ms; 274 275 /* number of address filters this PMU can do */ 276 unsigned int nr_addr_filters; 277 278 /* 279 * Fully disable/enable this PMU, can be used to protect from the PMI 280 * as well as for lazy/batch writing of the MSRs. 281 */ 282 void (*pmu_enable) (struct pmu *pmu); /* optional */ 283 void (*pmu_disable) (struct pmu *pmu); /* optional */ 284 285 /* 286 * Try and initialize the event for this PMU. 287 * 288 * Returns: 289 * -ENOENT -- @event is not for this PMU 290 * 291 * -ENODEV -- @event is for this PMU but PMU not present 292 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable 293 * -EINVAL -- @event is for this PMU but @event is not valid 294 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported 295 * -EACCES -- @event is for this PMU, @event is valid, but no privileges 296 * 297 * 0 -- @event is for this PMU and valid 298 * 299 * Other error return values are allowed. 300 */ 301 int (*event_init) (struct perf_event *event); 302 303 /* 304 * Notification that the event was mapped or unmapped. Called 305 * in the context of the mapping task. 306 */ 307 void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ 308 void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ 309 310 /* 311 * Flags for ->add()/->del()/ ->start()/->stop(). There are 312 * matching hw_perf_event::state flags. 313 */ 314 #define PERF_EF_START 0x01 /* start the counter when adding */ 315 #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ 316 #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ 317 318 /* 319 * Adds/Removes a counter to/from the PMU, can be done inside a 320 * transaction, see the ->*_txn() methods. 321 * 322 * The add/del callbacks will reserve all hardware resources required 323 * to service the event, this includes any counter constraint 324 * scheduling etc. 325 * 326 * Called with IRQs disabled and the PMU disabled on the CPU the event 327 * is on. 328 * 329 * ->add() called without PERF_EF_START should result in the same state 330 * as ->add() followed by ->stop(). 331 * 332 * ->del() must always PERF_EF_UPDATE stop an event. If it calls 333 * ->stop() that must deal with already being stopped without 334 * PERF_EF_UPDATE. 335 */ 336 int (*add) (struct perf_event *event, int flags); 337 void (*del) (struct perf_event *event, int flags); 338 339 /* 340 * Starts/Stops a counter present on the PMU. 341 * 342 * The PMI handler should stop the counter when perf_event_overflow() 343 * returns !0. ->start() will be used to continue. 344 * 345 * Also used to change the sample period. 346 * 347 * Called with IRQs disabled and the PMU disabled on the CPU the event 348 * is on -- will be called from NMI context with the PMU generates 349 * NMIs. 350 * 351 * ->stop() with PERF_EF_UPDATE will read the counter and update 352 * period/count values like ->read() would. 353 * 354 * ->start() with PERF_EF_RELOAD will reprogram the the counter 355 * value, must be preceded by a ->stop() with PERF_EF_UPDATE. 356 */ 357 void (*start) (struct perf_event *event, int flags); 358 void (*stop) (struct perf_event *event, int flags); 359 360 /* 361 * Updates the counter value of the event. 362 * 363 * For sampling capable PMUs this will also update the software period 364 * hw_perf_event::period_left field. 365 */ 366 void (*read) (struct perf_event *event); 367 368 /* 369 * Group events scheduling is treated as a transaction, add 370 * group events as a whole and perform one schedulability test. 371 * If the test fails, roll back the whole group 372 * 373 * Start the transaction, after this ->add() doesn't need to 374 * do schedulability tests. 375 * 376 * Optional. 377 */ 378 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); 379 /* 380 * If ->start_txn() disabled the ->add() schedulability test 381 * then ->commit_txn() is required to perform one. On success 382 * the transaction is closed. On error the transaction is kept 383 * open until ->cancel_txn() is called. 384 * 385 * Optional. 386 */ 387 int (*commit_txn) (struct pmu *pmu); 388 /* 389 * Will cancel the transaction, assumes ->del() is called 390 * for each successful ->add() during the transaction. 391 * 392 * Optional. 393 */ 394 void (*cancel_txn) (struct pmu *pmu); 395 396 /* 397 * Will return the value for perf_event_mmap_page::index for this event, 398 * if no implementation is provided it will default to: event->hw.idx + 1. 399 */ 400 int (*event_idx) (struct perf_event *event); /*optional */ 401 402 /* 403 * context-switches callback 404 */ 405 void (*sched_task) (struct perf_event_context *ctx, 406 bool sched_in); 407 /* 408 * PMU specific data size 409 */ 410 size_t task_ctx_size; 411 412 413 /* 414 * Set up pmu-private data structures for an AUX area 415 */ 416 void *(*setup_aux) (struct perf_event *event, void **pages, 417 int nr_pages, bool overwrite); 418 /* optional */ 419 420 /* 421 * Free pmu-private AUX data structures 422 */ 423 void (*free_aux) (void *aux); /* optional */ 424 425 /* 426 * Validate address range filters: make sure the HW supports the 427 * requested configuration and number of filters; return 0 if the 428 * supplied filters are valid, -errno otherwise. 429 * 430 * Runs in the context of the ioctl()ing process and is not serialized 431 * with the rest of the PMU callbacks. 432 */ 433 int (*addr_filters_validate) (struct list_head *filters); 434 /* optional */ 435 436 /* 437 * Synchronize address range filter configuration: 438 * translate hw-agnostic filters into hardware configuration in 439 * event::hw::addr_filters. 440 * 441 * Runs as a part of filter sync sequence that is done in ->start() 442 * callback by calling perf_event_addr_filters_sync(). 443 * 444 * May (and should) traverse event::addr_filters::list, for which its 445 * caller provides necessary serialization. 446 */ 447 void (*addr_filters_sync) (struct perf_event *event); 448 /* optional */ 449 450 /* 451 * Check if event can be used for aux_output purposes for 452 * events of this PMU. 453 * 454 * Runs from perf_event_open(). Should return 0 for "no match" 455 * or non-zero for "match". 456 */ 457 int (*aux_output_match) (struct perf_event *event); 458 /* optional */ 459 460 /* 461 * Filter events for PMU-specific reasons. 462 */ 463 int (*filter_match) (struct perf_event *event); /* optional */ 464 465 /* 466 * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 467 */ 468 int (*check_period) (struct perf_event *event, u64 value); /* optional */ 469 }; 470 471 enum perf_addr_filter_action_t { 472 PERF_ADDR_FILTER_ACTION_STOP = 0, 473 PERF_ADDR_FILTER_ACTION_START, 474 PERF_ADDR_FILTER_ACTION_FILTER, 475 }; 476 477 /** 478 * struct perf_addr_filter - address range filter definition 479 * @entry: event's filter list linkage 480 * @path: object file's path for file-based filters 481 * @offset: filter range offset 482 * @size: filter range size (size==0 means single address trigger) 483 * @action: filter/start/stop 484 * 485 * This is a hardware-agnostic filter configuration as specified by the user. 486 */ 487 struct perf_addr_filter { 488 struct list_head entry; 489 struct path path; 490 unsigned long offset; 491 unsigned long size; 492 enum perf_addr_filter_action_t action; 493 }; 494 495 /** 496 * struct perf_addr_filters_head - container for address range filters 497 * @list: list of filters for this event 498 * @lock: spinlock that serializes accesses to the @list and event's 499 * (and its children's) filter generations. 500 * @nr_file_filters: number of file-based filters 501 * 502 * A child event will use parent's @list (and therefore @lock), so they are 503 * bundled together; see perf_event_addr_filters(). 504 */ 505 struct perf_addr_filters_head { 506 struct list_head list; 507 raw_spinlock_t lock; 508 unsigned int nr_file_filters; 509 }; 510 511 struct perf_addr_filter_range { 512 unsigned long start; 513 unsigned long size; 514 }; 515 516 /** 517 * enum perf_event_state - the states of an event: 518 */ 519 enum perf_event_state { 520 PERF_EVENT_STATE_DEAD = -4, 521 PERF_EVENT_STATE_EXIT = -3, 522 PERF_EVENT_STATE_ERROR = -2, 523 PERF_EVENT_STATE_OFF = -1, 524 PERF_EVENT_STATE_INACTIVE = 0, 525 PERF_EVENT_STATE_ACTIVE = 1, 526 }; 527 528 struct file; 529 struct perf_sample_data; 530 531 typedef void (*perf_overflow_handler_t)(struct perf_event *, 532 struct perf_sample_data *, 533 struct pt_regs *regs); 534 535 /* 536 * Event capabilities. For event_caps and groups caps. 537 * 538 * PERF_EV_CAP_SOFTWARE: Is a software event. 539 * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read 540 * from any CPU in the package where it is active. 541 */ 542 #define PERF_EV_CAP_SOFTWARE BIT(0) 543 #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) 544 545 #define SWEVENT_HLIST_BITS 8 546 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) 547 548 struct swevent_hlist { 549 struct hlist_head heads[SWEVENT_HLIST_SIZE]; 550 struct rcu_head rcu_head; 551 }; 552 553 #define PERF_ATTACH_CONTEXT 0x01 554 #define PERF_ATTACH_GROUP 0x02 555 #define PERF_ATTACH_TASK 0x04 556 #define PERF_ATTACH_TASK_DATA 0x08 557 #define PERF_ATTACH_ITRACE 0x10 558 559 struct perf_cgroup; 560 struct ring_buffer; 561 562 struct pmu_event_list { 563 raw_spinlock_t lock; 564 struct list_head list; 565 }; 566 567 #define for_each_sibling_event(sibling, event) \ 568 if ((event)->group_leader == (event)) \ 569 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) 570 571 /** 572 * struct perf_event - performance event kernel representation: 573 */ 574 struct perf_event { 575 #ifdef CONFIG_PERF_EVENTS 576 /* 577 * entry onto perf_event_context::event_list; 578 * modifications require ctx->lock 579 * RCU safe iterations. 580 */ 581 struct list_head event_entry; 582 583 /* 584 * Locked for modification by both ctx->mutex and ctx->lock; holding 585 * either sufficies for read. 586 */ 587 struct list_head sibling_list; 588 struct list_head active_list; 589 /* 590 * Node on the pinned or flexible tree located at the event context; 591 */ 592 struct rb_node group_node; 593 u64 group_index; 594 /* 595 * We need storage to track the entries in perf_pmu_migrate_context; we 596 * cannot use the event_entry because of RCU and we want to keep the 597 * group in tact which avoids us using the other two entries. 598 */ 599 struct list_head migrate_entry; 600 601 struct hlist_node hlist_entry; 602 struct list_head active_entry; 603 int nr_siblings; 604 605 /* Not serialized. Only written during event initialization. */ 606 int event_caps; 607 /* The cumulative AND of all event_caps for events in this group. */ 608 int group_caps; 609 610 struct perf_event *group_leader; 611 struct pmu *pmu; 612 void *pmu_private; 613 614 enum perf_event_state state; 615 unsigned int attach_state; 616 local64_t count; 617 atomic64_t child_count; 618 619 /* 620 * These are the total time in nanoseconds that the event 621 * has been enabled (i.e. eligible to run, and the task has 622 * been scheduled in, if this is a per-task event) 623 * and running (scheduled onto the CPU), respectively. 624 */ 625 u64 total_time_enabled; 626 u64 total_time_running; 627 u64 tstamp; 628 629 /* 630 * timestamp shadows the actual context timing but it can 631 * be safely used in NMI interrupt context. It reflects the 632 * context time as it was when the event was last scheduled in. 633 * 634 * ctx_time already accounts for ctx->timestamp. Therefore to 635 * compute ctx_time for a sample, simply add perf_clock(). 636 */ 637 u64 shadow_ctx_time; 638 639 struct perf_event_attr attr; 640 u16 header_size; 641 u16 id_header_size; 642 u16 read_size; 643 struct hw_perf_event hw; 644 645 struct perf_event_context *ctx; 646 atomic_long_t refcount; 647 648 /* 649 * These accumulate total time (in nanoseconds) that children 650 * events have been enabled and running, respectively. 651 */ 652 atomic64_t child_total_time_enabled; 653 atomic64_t child_total_time_running; 654 655 /* 656 * Protect attach/detach and child_list: 657 */ 658 struct mutex child_mutex; 659 struct list_head child_list; 660 struct perf_event *parent; 661 662 int oncpu; 663 int cpu; 664 665 struct list_head owner_entry; 666 struct task_struct *owner; 667 668 /* mmap bits */ 669 struct mutex mmap_mutex; 670 atomic_t mmap_count; 671 672 struct ring_buffer *rb; 673 struct list_head rb_entry; 674 unsigned long rcu_batches; 675 int rcu_pending; 676 677 /* poll related */ 678 wait_queue_head_t waitq; 679 struct fasync_struct *fasync; 680 681 /* delayed work for NMIs and such */ 682 int pending_wakeup; 683 int pending_kill; 684 int pending_disable; 685 struct irq_work pending; 686 687 atomic_t event_limit; 688 689 /* address range filters */ 690 struct perf_addr_filters_head addr_filters; 691 /* vma address array for file-based filders */ 692 struct perf_addr_filter_range *addr_filter_ranges; 693 unsigned long addr_filters_gen; 694 695 /* for aux_output events */ 696 struct perf_event *aux_event; 697 698 void (*destroy)(struct perf_event *); 699 struct rcu_head rcu_head; 700 701 struct pid_namespace *ns; 702 u64 id; 703 704 u64 (*clock)(void); 705 perf_overflow_handler_t overflow_handler; 706 void *overflow_handler_context; 707 #ifdef CONFIG_BPF_SYSCALL 708 perf_overflow_handler_t orig_overflow_handler; 709 struct bpf_prog *prog; 710 #endif 711 712 #ifdef CONFIG_EVENT_TRACING 713 struct trace_event_call *tp_event; 714 struct event_filter *filter; 715 #ifdef CONFIG_FUNCTION_TRACER 716 struct ftrace_ops ftrace_ops; 717 #endif 718 #endif 719 720 #ifdef CONFIG_CGROUP_PERF 721 struct perf_cgroup *cgrp; /* cgroup event is attach to */ 722 #endif 723 724 struct list_head sb_list; 725 #endif /* CONFIG_PERF_EVENTS */ 726 }; 727 728 729 struct perf_event_groups { 730 struct rb_root tree; 731 u64 index; 732 }; 733 734 /** 735 * struct perf_event_context - event context structure 736 * 737 * Used as a container for task events and CPU events as well: 738 */ 739 struct perf_event_context { 740 struct pmu *pmu; 741 /* 742 * Protect the states of the events in the list, 743 * nr_active, and the list: 744 */ 745 raw_spinlock_t lock; 746 /* 747 * Protect the list of events. Locking either mutex or lock 748 * is sufficient to ensure the list doesn't change; to change 749 * the list you need to lock both the mutex and the spinlock. 750 */ 751 struct mutex mutex; 752 753 struct list_head active_ctx_list; 754 struct perf_event_groups pinned_groups; 755 struct perf_event_groups flexible_groups; 756 struct list_head event_list; 757 758 struct list_head pinned_active; 759 struct list_head flexible_active; 760 761 int nr_events; 762 int nr_active; 763 int is_active; 764 int nr_stat; 765 int nr_freq; 766 int rotate_disable; 767 /* 768 * Set when nr_events != nr_active, except tolerant to events not 769 * necessary to be active due to scheduling constraints, such as cgroups. 770 */ 771 int rotate_necessary; 772 refcount_t refcount; 773 struct task_struct *task; 774 775 /* 776 * Context clock, runs when context enabled. 777 */ 778 u64 time; 779 u64 timestamp; 780 781 /* 782 * These fields let us detect when two contexts have both 783 * been cloned (inherited) from a common ancestor. 784 */ 785 struct perf_event_context *parent_ctx; 786 u64 parent_gen; 787 u64 generation; 788 int pin_count; 789 #ifdef CONFIG_CGROUP_PERF 790 int nr_cgroups; /* cgroup evts */ 791 #endif 792 void *task_ctx_data; /* pmu specific data */ 793 struct rcu_head rcu_head; 794 }; 795 796 /* 797 * Number of contexts where an event can trigger: 798 * task, softirq, hardirq, nmi. 799 */ 800 #define PERF_NR_CONTEXTS 4 801 802 /** 803 * struct perf_event_cpu_context - per cpu event context structure 804 */ 805 struct perf_cpu_context { 806 struct perf_event_context ctx; 807 struct perf_event_context *task_ctx; 808 int active_oncpu; 809 int exclusive; 810 811 raw_spinlock_t hrtimer_lock; 812 struct hrtimer hrtimer; 813 ktime_t hrtimer_interval; 814 unsigned int hrtimer_active; 815 816 #ifdef CONFIG_CGROUP_PERF 817 struct perf_cgroup *cgrp; 818 struct list_head cgrp_cpuctx_entry; 819 #endif 820 821 struct list_head sched_cb_entry; 822 int sched_cb_usage; 823 824 int online; 825 }; 826 827 struct perf_output_handle { 828 struct perf_event *event; 829 struct ring_buffer *rb; 830 unsigned long wakeup; 831 unsigned long size; 832 u64 aux_flags; 833 union { 834 void *addr; 835 unsigned long head; 836 }; 837 int page; 838 }; 839 840 struct bpf_perf_event_data_kern { 841 bpf_user_pt_regs_t *regs; 842 struct perf_sample_data *data; 843 struct perf_event *event; 844 }; 845 846 #ifdef CONFIG_CGROUP_PERF 847 848 /* 849 * perf_cgroup_info keeps track of time_enabled for a cgroup. 850 * This is a per-cpu dynamically allocated data structure. 851 */ 852 struct perf_cgroup_info { 853 u64 time; 854 u64 timestamp; 855 }; 856 857 struct perf_cgroup { 858 struct cgroup_subsys_state css; 859 struct perf_cgroup_info __percpu *info; 860 }; 861 862 /* 863 * Must ensure cgroup is pinned (css_get) before calling 864 * this function. In other words, we cannot call this function 865 * if there is no cgroup event for the current CPU context. 866 */ 867 static inline struct perf_cgroup * 868 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) 869 { 870 return container_of(task_css_check(task, perf_event_cgrp_id, 871 ctx ? lockdep_is_held(&ctx->lock) 872 : true), 873 struct perf_cgroup, css); 874 } 875 #endif /* CONFIG_CGROUP_PERF */ 876 877 #ifdef CONFIG_PERF_EVENTS 878 879 extern void *perf_aux_output_begin(struct perf_output_handle *handle, 880 struct perf_event *event); 881 extern void perf_aux_output_end(struct perf_output_handle *handle, 882 unsigned long size); 883 extern int perf_aux_output_skip(struct perf_output_handle *handle, 884 unsigned long size); 885 extern void *perf_get_aux(struct perf_output_handle *handle); 886 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); 887 extern void perf_event_itrace_started(struct perf_event *event); 888 889 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); 890 extern void perf_pmu_unregister(struct pmu *pmu); 891 892 extern int perf_num_counters(void); 893 extern const char *perf_pmu_name(void); 894 extern void __perf_event_task_sched_in(struct task_struct *prev, 895 struct task_struct *task); 896 extern void __perf_event_task_sched_out(struct task_struct *prev, 897 struct task_struct *next); 898 extern int perf_event_init_task(struct task_struct *child); 899 extern void perf_event_exit_task(struct task_struct *child); 900 extern void perf_event_free_task(struct task_struct *task); 901 extern void perf_event_delayed_put(struct task_struct *task); 902 extern struct file *perf_event_get(unsigned int fd); 903 extern const struct perf_event *perf_get_event(struct file *file); 904 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); 905 extern void perf_event_print_debug(void); 906 extern void perf_pmu_disable(struct pmu *pmu); 907 extern void perf_pmu_enable(struct pmu *pmu); 908 extern void perf_sched_cb_dec(struct pmu *pmu); 909 extern void perf_sched_cb_inc(struct pmu *pmu); 910 extern int perf_event_task_disable(void); 911 extern int perf_event_task_enable(void); 912 913 extern void perf_pmu_resched(struct pmu *pmu); 914 915 extern int perf_event_refresh(struct perf_event *event, int refresh); 916 extern void perf_event_update_userpage(struct perf_event *event); 917 extern int perf_event_release_kernel(struct perf_event *event); 918 extern struct perf_event * 919 perf_event_create_kernel_counter(struct perf_event_attr *attr, 920 int cpu, 921 struct task_struct *task, 922 perf_overflow_handler_t callback, 923 void *context); 924 extern void perf_pmu_migrate_context(struct pmu *pmu, 925 int src_cpu, int dst_cpu); 926 int perf_event_read_local(struct perf_event *event, u64 *value, 927 u64 *enabled, u64 *running); 928 extern u64 perf_event_read_value(struct perf_event *event, 929 u64 *enabled, u64 *running); 930 931 932 struct perf_sample_data { 933 /* 934 * Fields set by perf_sample_data_init(), group so as to 935 * minimize the cachelines touched. 936 */ 937 u64 addr; 938 struct perf_raw_record *raw; 939 struct perf_branch_stack *br_stack; 940 u64 period; 941 u64 weight; 942 u64 txn; 943 union perf_mem_data_src data_src; 944 945 /* 946 * The other fields, optionally {set,used} by 947 * perf_{prepare,output}_sample(). 948 */ 949 u64 type; 950 u64 ip; 951 struct { 952 u32 pid; 953 u32 tid; 954 } tid_entry; 955 u64 time; 956 u64 id; 957 u64 stream_id; 958 struct { 959 u32 cpu; 960 u32 reserved; 961 } cpu_entry; 962 struct perf_callchain_entry *callchain; 963 964 /* 965 * regs_user may point to task_pt_regs or to regs_user_copy, depending 966 * on arch details. 967 */ 968 struct perf_regs regs_user; 969 struct pt_regs regs_user_copy; 970 971 struct perf_regs regs_intr; 972 u64 stack_user_size; 973 974 u64 phys_addr; 975 } ____cacheline_aligned; 976 977 /* default value for data source */ 978 #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ 979 PERF_MEM_S(LVL, NA) |\ 980 PERF_MEM_S(SNOOP, NA) |\ 981 PERF_MEM_S(LOCK, NA) |\ 982 PERF_MEM_S(TLB, NA)) 983 984 static inline void perf_sample_data_init(struct perf_sample_data *data, 985 u64 addr, u64 period) 986 { 987 /* remaining struct members initialized in perf_prepare_sample() */ 988 data->addr = addr; 989 data->raw = NULL; 990 data->br_stack = NULL; 991 data->period = period; 992 data->weight = 0; 993 data->data_src.val = PERF_MEM_NA; 994 data->txn = 0; 995 } 996 997 extern void perf_output_sample(struct perf_output_handle *handle, 998 struct perf_event_header *header, 999 struct perf_sample_data *data, 1000 struct perf_event *event); 1001 extern void perf_prepare_sample(struct perf_event_header *header, 1002 struct perf_sample_data *data, 1003 struct perf_event *event, 1004 struct pt_regs *regs); 1005 1006 extern int perf_event_overflow(struct perf_event *event, 1007 struct perf_sample_data *data, 1008 struct pt_regs *regs); 1009 1010 extern void perf_event_output_forward(struct perf_event *event, 1011 struct perf_sample_data *data, 1012 struct pt_regs *regs); 1013 extern void perf_event_output_backward(struct perf_event *event, 1014 struct perf_sample_data *data, 1015 struct pt_regs *regs); 1016 extern int perf_event_output(struct perf_event *event, 1017 struct perf_sample_data *data, 1018 struct pt_regs *regs); 1019 1020 static inline bool 1021 is_default_overflow_handler(struct perf_event *event) 1022 { 1023 if (likely(event->overflow_handler == perf_event_output_forward)) 1024 return true; 1025 if (unlikely(event->overflow_handler == perf_event_output_backward)) 1026 return true; 1027 return false; 1028 } 1029 1030 extern void 1031 perf_event_header__init_id(struct perf_event_header *header, 1032 struct perf_sample_data *data, 1033 struct perf_event *event); 1034 extern void 1035 perf_event__output_id_sample(struct perf_event *event, 1036 struct perf_output_handle *handle, 1037 struct perf_sample_data *sample); 1038 1039 extern void 1040 perf_log_lost_samples(struct perf_event *event, u64 lost); 1041 1042 static inline bool event_has_any_exclude_flag(struct perf_event *event) 1043 { 1044 struct perf_event_attr *attr = &event->attr; 1045 1046 return attr->exclude_idle || attr->exclude_user || 1047 attr->exclude_kernel || attr->exclude_hv || 1048 attr->exclude_guest || attr->exclude_host; 1049 } 1050 1051 static inline bool is_sampling_event(struct perf_event *event) 1052 { 1053 return event->attr.sample_period != 0; 1054 } 1055 1056 /* 1057 * Return 1 for a software event, 0 for a hardware event 1058 */ 1059 static inline int is_software_event(struct perf_event *event) 1060 { 1061 return event->event_caps & PERF_EV_CAP_SOFTWARE; 1062 } 1063 1064 /* 1065 * Return 1 for event in sw context, 0 for event in hw context 1066 */ 1067 static inline int in_software_context(struct perf_event *event) 1068 { 1069 return event->ctx->pmu->task_ctx_nr == perf_sw_context; 1070 } 1071 1072 static inline int is_exclusive_pmu(struct pmu *pmu) 1073 { 1074 return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; 1075 } 1076 1077 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1078 1079 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); 1080 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 1081 1082 #ifndef perf_arch_fetch_caller_regs 1083 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } 1084 #endif 1085 1086 /* 1087 * When generating a perf sample in-line, instead of from an interrupt / 1088 * exception, we lack a pt_regs. This is typically used from software events 1089 * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. 1090 * 1091 * We typically don't need a full set, but (for x86) do require: 1092 * - ip for PERF_SAMPLE_IP 1093 * - cs for user_mode() tests 1094 * - sp for PERF_SAMPLE_CALLCHAIN 1095 * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) 1096 * 1097 * NOTE: assumes @regs is otherwise already 0 filled; this is important for 1098 * things like PERF_SAMPLE_REGS_INTR. 1099 */ 1100 static inline void perf_fetch_caller_regs(struct pt_regs *regs) 1101 { 1102 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); 1103 } 1104 1105 static __always_inline void 1106 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 1107 { 1108 if (static_key_false(&perf_swevent_enabled[event_id])) 1109 __perf_sw_event(event_id, nr, regs, addr); 1110 } 1111 1112 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); 1113 1114 /* 1115 * 'Special' version for the scheduler, it hard assumes no recursion, 1116 * which is guaranteed by us not actually scheduling inside other swevents 1117 * because those disable preemption. 1118 */ 1119 static __always_inline void 1120 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) 1121 { 1122 if (static_key_false(&perf_swevent_enabled[event_id])) { 1123 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); 1124 1125 perf_fetch_caller_regs(regs); 1126 ___perf_sw_event(event_id, nr, regs, addr); 1127 } 1128 } 1129 1130 extern struct static_key_false perf_sched_events; 1131 1132 static __always_inline bool 1133 perf_sw_migrate_enabled(void) 1134 { 1135 if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) 1136 return true; 1137 return false; 1138 } 1139 1140 static inline void perf_event_task_migrate(struct task_struct *task) 1141 { 1142 if (perf_sw_migrate_enabled()) 1143 task->sched_migrated = 1; 1144 } 1145 1146 static inline void perf_event_task_sched_in(struct task_struct *prev, 1147 struct task_struct *task) 1148 { 1149 if (static_branch_unlikely(&perf_sched_events)) 1150 __perf_event_task_sched_in(prev, task); 1151 1152 if (perf_sw_migrate_enabled() && task->sched_migrated) { 1153 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); 1154 1155 perf_fetch_caller_regs(regs); 1156 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); 1157 task->sched_migrated = 0; 1158 } 1159 } 1160 1161 static inline void perf_event_task_sched_out(struct task_struct *prev, 1162 struct task_struct *next) 1163 { 1164 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); 1165 1166 if (static_branch_unlikely(&perf_sched_events)) 1167 __perf_event_task_sched_out(prev, next); 1168 } 1169 1170 extern void perf_event_mmap(struct vm_area_struct *vma); 1171 1172 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, 1173 bool unregister, const char *sym); 1174 extern void perf_event_bpf_event(struct bpf_prog *prog, 1175 enum perf_bpf_event_type type, 1176 u16 flags); 1177 1178 extern struct perf_guest_info_callbacks *perf_guest_cbs; 1179 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1180 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1181 1182 extern void perf_event_exec(void); 1183 extern void perf_event_comm(struct task_struct *tsk, bool exec); 1184 extern void perf_event_namespaces(struct task_struct *tsk); 1185 extern void perf_event_fork(struct task_struct *tsk); 1186 1187 /* Callchains */ 1188 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); 1189 1190 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); 1191 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); 1192 extern struct perf_callchain_entry * 1193 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1194 u32 max_stack, bool crosstask, bool add_mark); 1195 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); 1196 extern int get_callchain_buffers(int max_stack); 1197 extern void put_callchain_buffers(void); 1198 1199 extern int sysctl_perf_event_max_stack; 1200 extern int sysctl_perf_event_max_contexts_per_stack; 1201 1202 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) 1203 { 1204 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { 1205 struct perf_callchain_entry *entry = ctx->entry; 1206 entry->ip[entry->nr++] = ip; 1207 ++ctx->contexts; 1208 return 0; 1209 } else { 1210 ctx->contexts_maxed = true; 1211 return -1; /* no more room, stop walking the stack */ 1212 } 1213 } 1214 1215 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) 1216 { 1217 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { 1218 struct perf_callchain_entry *entry = ctx->entry; 1219 entry->ip[entry->nr++] = ip; 1220 ++ctx->nr; 1221 return 0; 1222 } else { 1223 return -1; /* no more room, stop walking the stack */ 1224 } 1225 } 1226 1227 extern int sysctl_perf_event_paranoid; 1228 extern int sysctl_perf_event_mlock; 1229 extern int sysctl_perf_event_sample_rate; 1230 extern int sysctl_perf_cpu_time_max_percent; 1231 1232 extern void perf_sample_event_took(u64 sample_len_ns); 1233 1234 extern int perf_proc_update_handler(struct ctl_table *table, int write, 1235 void __user *buffer, size_t *lenp, 1236 loff_t *ppos); 1237 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 1238 void __user *buffer, size_t *lenp, 1239 loff_t *ppos); 1240 1241 int perf_event_max_stack_handler(struct ctl_table *table, int write, 1242 void __user *buffer, size_t *lenp, loff_t *ppos); 1243 1244 static inline bool perf_paranoid_tracepoint_raw(void) 1245 { 1246 return sysctl_perf_event_paranoid > -1; 1247 } 1248 1249 static inline bool perf_paranoid_cpu(void) 1250 { 1251 return sysctl_perf_event_paranoid > 0; 1252 } 1253 1254 static inline bool perf_paranoid_kernel(void) 1255 { 1256 return sysctl_perf_event_paranoid > 1; 1257 } 1258 1259 extern void perf_event_init(void); 1260 extern void perf_tp_event(u16 event_type, u64 count, void *record, 1261 int entry_size, struct pt_regs *regs, 1262 struct hlist_head *head, int rctx, 1263 struct task_struct *task); 1264 extern void perf_bp_event(struct perf_event *event, void *data); 1265 1266 #ifndef perf_misc_flags 1267 # define perf_misc_flags(regs) \ 1268 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) 1269 # define perf_instruction_pointer(regs) instruction_pointer(regs) 1270 #endif 1271 #ifndef perf_arch_bpf_user_pt_regs 1272 # define perf_arch_bpf_user_pt_regs(regs) regs 1273 #endif 1274 1275 static inline bool has_branch_stack(struct perf_event *event) 1276 { 1277 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; 1278 } 1279 1280 static inline bool needs_branch_stack(struct perf_event *event) 1281 { 1282 return event->attr.branch_sample_type != 0; 1283 } 1284 1285 static inline bool has_aux(struct perf_event *event) 1286 { 1287 return event->pmu->setup_aux; 1288 } 1289 1290 static inline bool is_write_backward(struct perf_event *event) 1291 { 1292 return !!event->attr.write_backward; 1293 } 1294 1295 static inline bool has_addr_filter(struct perf_event *event) 1296 { 1297 return event->pmu->nr_addr_filters; 1298 } 1299 1300 /* 1301 * An inherited event uses parent's filters 1302 */ 1303 static inline struct perf_addr_filters_head * 1304 perf_event_addr_filters(struct perf_event *event) 1305 { 1306 struct perf_addr_filters_head *ifh = &event->addr_filters; 1307 1308 if (event->parent) 1309 ifh = &event->parent->addr_filters; 1310 1311 return ifh; 1312 } 1313 1314 extern void perf_event_addr_filters_sync(struct perf_event *event); 1315 1316 extern int perf_output_begin(struct perf_output_handle *handle, 1317 struct perf_event *event, unsigned int size); 1318 extern int perf_output_begin_forward(struct perf_output_handle *handle, 1319 struct perf_event *event, 1320 unsigned int size); 1321 extern int perf_output_begin_backward(struct perf_output_handle *handle, 1322 struct perf_event *event, 1323 unsigned int size); 1324 1325 extern void perf_output_end(struct perf_output_handle *handle); 1326 extern unsigned int perf_output_copy(struct perf_output_handle *handle, 1327 const void *buf, unsigned int len); 1328 extern unsigned int perf_output_skip(struct perf_output_handle *handle, 1329 unsigned int len); 1330 extern int perf_swevent_get_recursion_context(void); 1331 extern void perf_swevent_put_recursion_context(int rctx); 1332 extern u64 perf_swevent_set_period(struct perf_event *event); 1333 extern void perf_event_enable(struct perf_event *event); 1334 extern void perf_event_disable(struct perf_event *event); 1335 extern void perf_event_disable_local(struct perf_event *event); 1336 extern void perf_event_disable_inatomic(struct perf_event *event); 1337 extern void perf_event_task_tick(void); 1338 extern int perf_event_account_interrupt(struct perf_event *event); 1339 #else /* !CONFIG_PERF_EVENTS: */ 1340 static inline void * 1341 perf_aux_output_begin(struct perf_output_handle *handle, 1342 struct perf_event *event) { return NULL; } 1343 static inline void 1344 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) 1345 { } 1346 static inline int 1347 perf_aux_output_skip(struct perf_output_handle *handle, 1348 unsigned long size) { return -EINVAL; } 1349 static inline void * 1350 perf_get_aux(struct perf_output_handle *handle) { return NULL; } 1351 static inline void 1352 perf_event_task_migrate(struct task_struct *task) { } 1353 static inline void 1354 perf_event_task_sched_in(struct task_struct *prev, 1355 struct task_struct *task) { } 1356 static inline void 1357 perf_event_task_sched_out(struct task_struct *prev, 1358 struct task_struct *next) { } 1359 static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1360 static inline void perf_event_exit_task(struct task_struct *child) { } 1361 static inline void perf_event_free_task(struct task_struct *task) { } 1362 static inline void perf_event_delayed_put(struct task_struct *task) { } 1363 static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } 1364 static inline const struct perf_event *perf_get_event(struct file *file) 1365 { 1366 return ERR_PTR(-EINVAL); 1367 } 1368 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) 1369 { 1370 return ERR_PTR(-EINVAL); 1371 } 1372 static inline int perf_event_read_local(struct perf_event *event, u64 *value, 1373 u64 *enabled, u64 *running) 1374 { 1375 return -EINVAL; 1376 } 1377 static inline void perf_event_print_debug(void) { } 1378 static inline int perf_event_task_disable(void) { return -EINVAL; } 1379 static inline int perf_event_task_enable(void) { return -EINVAL; } 1380 static inline int perf_event_refresh(struct perf_event *event, int refresh) 1381 { 1382 return -EINVAL; 1383 } 1384 1385 static inline void 1386 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } 1387 static inline void 1388 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } 1389 static inline void 1390 perf_bp_event(struct perf_event *event, void *data) { } 1391 1392 static inline int perf_register_guest_info_callbacks 1393 (struct perf_guest_info_callbacks *callbacks) { return 0; } 1394 static inline int perf_unregister_guest_info_callbacks 1395 (struct perf_guest_info_callbacks *callbacks) { return 0; } 1396 1397 static inline void perf_event_mmap(struct vm_area_struct *vma) { } 1398 1399 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); 1400 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, 1401 bool unregister, const char *sym) { } 1402 static inline void perf_event_bpf_event(struct bpf_prog *prog, 1403 enum perf_bpf_event_type type, 1404 u16 flags) { } 1405 static inline void perf_event_exec(void) { } 1406 static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } 1407 static inline void perf_event_namespaces(struct task_struct *tsk) { } 1408 static inline void perf_event_fork(struct task_struct *tsk) { } 1409 static inline void perf_event_init(void) { } 1410 static inline int perf_swevent_get_recursion_context(void) { return -1; } 1411 static inline void perf_swevent_put_recursion_context(int rctx) { } 1412 static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } 1413 static inline void perf_event_enable(struct perf_event *event) { } 1414 static inline void perf_event_disable(struct perf_event *event) { } 1415 static inline int __perf_event_disable(void *info) { return -1; } 1416 static inline void perf_event_task_tick(void) { } 1417 static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } 1418 #endif 1419 1420 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 1421 extern void perf_restore_debug_store(void); 1422 #else 1423 static inline void perf_restore_debug_store(void) { } 1424 #endif 1425 1426 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) 1427 { 1428 return frag->pad < sizeof(u64); 1429 } 1430 1431 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) 1432 1433 struct perf_pmu_events_attr { 1434 struct device_attribute attr; 1435 u64 id; 1436 const char *event_str; 1437 }; 1438 1439 struct perf_pmu_events_ht_attr { 1440 struct device_attribute attr; 1441 u64 id; 1442 const char *event_str_ht; 1443 const char *event_str_noht; 1444 }; 1445 1446 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 1447 char *page); 1448 1449 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ 1450 static struct perf_pmu_events_attr _var = { \ 1451 .attr = __ATTR(_name, 0444, _show, NULL), \ 1452 .id = _id, \ 1453 }; 1454 1455 #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ 1456 static struct perf_pmu_events_attr _var = { \ 1457 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ 1458 .id = 0, \ 1459 .event_str = _str, \ 1460 }; 1461 1462 #define PMU_FORMAT_ATTR(_name, _format) \ 1463 static ssize_t \ 1464 _name##_show(struct device *dev, \ 1465 struct device_attribute *attr, \ 1466 char *page) \ 1467 { \ 1468 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 1469 return sprintf(page, _format "\n"); \ 1470 } \ 1471 \ 1472 static struct device_attribute format_attr_##_name = __ATTR_RO(_name) 1473 1474 /* Performance counter hotplug functions */ 1475 #ifdef CONFIG_PERF_EVENTS 1476 int perf_event_init_cpu(unsigned int cpu); 1477 int perf_event_exit_cpu(unsigned int cpu); 1478 #else 1479 #define perf_event_init_cpu NULL 1480 #define perf_event_exit_cpu NULL 1481 #endif 1482 1483 #endif /* _LINUX_PERF_EVENT_H */ 1484