xref: /linux/include/linux/perf_event.h (revision 4a013980)
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <uapi/linux/perf_event.h>
18 #include <uapi/linux/bpf_perf_event.h>
19 
20 /*
21  * Kernel-internal data types and definitions:
22  */
23 
24 #ifdef CONFIG_PERF_EVENTS
25 # include <asm/perf_event.h>
26 # include <asm/local64.h>
27 #endif
28 
29 #define PERF_GUEST_ACTIVE	0x01
30 #define PERF_GUEST_USER	0x02
31 
32 struct perf_guest_info_callbacks {
33 	unsigned int			(*state)(void);
34 	unsigned long			(*get_ip)(void);
35 	unsigned int			(*handle_intel_pt_intr)(void);
36 };
37 
38 #ifdef CONFIG_HAVE_HW_BREAKPOINT
39 #include <linux/rhashtable-types.h>
40 #include <asm/hw_breakpoint.h>
41 #endif
42 
43 #include <linux/list.h>
44 #include <linux/mutex.h>
45 #include <linux/rculist.h>
46 #include <linux/rcupdate.h>
47 #include <linux/spinlock.h>
48 #include <linux/hrtimer.h>
49 #include <linux/fs.h>
50 #include <linux/pid_namespace.h>
51 #include <linux/workqueue.h>
52 #include <linux/ftrace.h>
53 #include <linux/cpu.h>
54 #include <linux/irq_work.h>
55 #include <linux/static_key.h>
56 #include <linux/jump_label_ratelimit.h>
57 #include <linux/atomic.h>
58 #include <linux/sysfs.h>
59 #include <linux/perf_regs.h>
60 #include <linux/cgroup.h>
61 #include <linux/refcount.h>
62 #include <linux/security.h>
63 #include <linux/static_call.h>
64 #include <linux/lockdep.h>
65 #include <asm/local.h>
66 
67 struct perf_callchain_entry {
68 	__u64				nr;
69 	__u64				ip[]; /* /proc/sys/kernel/perf_event_max_stack */
70 };
71 
72 struct perf_callchain_entry_ctx {
73 	struct perf_callchain_entry *entry;
74 	u32			    max_stack;
75 	u32			    nr;
76 	short			    contexts;
77 	bool			    contexts_maxed;
78 };
79 
80 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
81 				     unsigned long off, unsigned long len);
82 
83 struct perf_raw_frag {
84 	union {
85 		struct perf_raw_frag	*next;
86 		unsigned long		pad;
87 	};
88 	perf_copy_f			copy;
89 	void				*data;
90 	u32				size;
91 } __packed;
92 
93 struct perf_raw_record {
94 	struct perf_raw_frag		frag;
95 	u32				size;
96 };
97 
perf_raw_frag_last(const struct perf_raw_frag * frag)98 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
99 {
100 	return frag->pad < sizeof(u64);
101 }
102 
103 /*
104  * branch stack layout:
105  *  nr: number of taken branches stored in entries[]
106  *  hw_idx: The low level index of raw branch records
107  *          for the most recent branch.
108  *          -1ULL means invalid/unknown.
109  *
110  * Note that nr can vary from sample to sample
111  * branches (to, from) are stored from most recent
112  * to least recent, i.e., entries[0] contains the most
113  * recent branch.
114  * The entries[] is an abstraction of raw branch records,
115  * which may not be stored in age order in HW, e.g. Intel LBR.
116  * The hw_idx is to expose the low level index of raw
117  * branch record for the most recent branch aka entries[0].
118  * The hw_idx index is between -1 (unknown) and max depth,
119  * which can be retrieved in /sys/devices/cpu/caps/branches.
120  * For the architectures whose raw branch records are
121  * already stored in age order, the hw_idx should be 0.
122  */
123 struct perf_branch_stack {
124 	__u64				nr;
125 	__u64				hw_idx;
126 	struct perf_branch_entry	entries[];
127 };
128 
129 struct task_struct;
130 
131 /*
132  * extra PMU register associated with an event
133  */
134 struct hw_perf_event_extra {
135 	u64		config;	/* register value */
136 	unsigned int	reg;	/* register address or index */
137 	int		alloc;	/* extra register already allocated */
138 	int		idx;	/* index in shared_regs->regs[] */
139 };
140 
141 /**
142  * hw_perf_event::flag values
143  *
144  * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
145  * usage.
146  */
147 #define PERF_EVENT_FLAG_ARCH			0x000fffff
148 #define PERF_EVENT_FLAG_USER_READ_CNT		0x80000000
149 
150 static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
151 
152 /**
153  * struct hw_perf_event - performance event hardware details:
154  */
155 struct hw_perf_event {
156 #ifdef CONFIG_PERF_EVENTS
157 	union {
158 		struct { /* hardware */
159 			u64		config;
160 			u64		last_tag;
161 			unsigned long	config_base;
162 			unsigned long	event_base;
163 			int		event_base_rdpmc;
164 			int		idx;
165 			int		last_cpu;
166 			int		flags;
167 
168 			struct hw_perf_event_extra extra_reg;
169 			struct hw_perf_event_extra branch_reg;
170 		};
171 		struct { /* software */
172 			struct hrtimer	hrtimer;
173 		};
174 		struct { /* tracepoint */
175 			/* for tp_event->class */
176 			struct list_head	tp_list;
177 		};
178 		struct { /* amd_power */
179 			u64	pwr_acc;
180 			u64	ptsc;
181 		};
182 #ifdef CONFIG_HAVE_HW_BREAKPOINT
183 		struct { /* breakpoint */
184 			/*
185 			 * Crufty hack to avoid the chicken and egg
186 			 * problem hw_breakpoint has with context
187 			 * creation and event initalization.
188 			 */
189 			struct arch_hw_breakpoint	info;
190 			struct rhlist_head		bp_list;
191 		};
192 #endif
193 		struct { /* amd_iommu */
194 			u8	iommu_bank;
195 			u8	iommu_cntr;
196 			u16	padding;
197 			u64	conf;
198 			u64	conf1;
199 		};
200 	};
201 	/*
202 	 * If the event is a per task event, this will point to the task in
203 	 * question. See the comment in perf_event_alloc().
204 	 */
205 	struct task_struct		*target;
206 
207 	/*
208 	 * PMU would store hardware filter configuration
209 	 * here.
210 	 */
211 	void				*addr_filters;
212 
213 	/* Last sync'ed generation of filters */
214 	unsigned long			addr_filters_gen;
215 
216 /*
217  * hw_perf_event::state flags; used to track the PERF_EF_* state.
218  */
219 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
220 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
221 #define PERF_HES_ARCH		0x04
222 
223 	int				state;
224 
225 	/*
226 	 * The last observed hardware counter value, updated with a
227 	 * local64_cmpxchg() such that pmu::read() can be called nested.
228 	 */
229 	local64_t			prev_count;
230 
231 	/*
232 	 * The period to start the next sample with.
233 	 */
234 	u64				sample_period;
235 
236 	union {
237 		struct { /* Sampling */
238 			/*
239 			 * The period we started this sample with.
240 			 */
241 			u64				last_period;
242 
243 			/*
244 			 * However much is left of the current period;
245 			 * note that this is a full 64bit value and
246 			 * allows for generation of periods longer
247 			 * than hardware might allow.
248 			 */
249 			local64_t			period_left;
250 		};
251 		struct { /* Topdown events counting for context switch */
252 			u64				saved_metric;
253 			u64				saved_slots;
254 		};
255 	};
256 
257 	/*
258 	 * State for throttling the event, see __perf_event_overflow() and
259 	 * perf_adjust_freq_unthr_context().
260 	 */
261 	u64                             interrupts_seq;
262 	u64				interrupts;
263 
264 	/*
265 	 * State for freq target events, see __perf_event_overflow() and
266 	 * perf_adjust_freq_unthr_context().
267 	 */
268 	u64				freq_time_stamp;
269 	u64				freq_count_stamp;
270 #endif
271 };
272 
273 struct perf_event;
274 struct perf_event_pmu_context;
275 
276 /*
277  * Common implementation detail of pmu::{start,commit,cancel}_txn
278  */
279 #define PERF_PMU_TXN_ADD  0x1		/* txn to add/schedule event on PMU */
280 #define PERF_PMU_TXN_READ 0x2		/* txn to read event group from PMU */
281 
282 /**
283  * pmu::capabilities flags
284  */
285 #define PERF_PMU_CAP_NO_INTERRUPT		0x0001
286 #define PERF_PMU_CAP_NO_NMI			0x0002
287 #define PERF_PMU_CAP_AUX_NO_SG			0x0004
288 #define PERF_PMU_CAP_EXTENDED_REGS		0x0008
289 #define PERF_PMU_CAP_EXCLUSIVE			0x0010
290 #define PERF_PMU_CAP_ITRACE			0x0020
291 #define PERF_PMU_CAP_NO_EXCLUDE			0x0040
292 #define PERF_PMU_CAP_AUX_OUTPUT			0x0080
293 #define PERF_PMU_CAP_EXTENDED_HW_TYPE		0x0100
294 
295 struct perf_output_handle;
296 
297 #define PMU_NULL_DEV	((void *)(~0UL))
298 
299 /**
300  * struct pmu - generic performance monitoring unit
301  */
302 struct pmu {
303 	struct list_head		entry;
304 
305 	struct module			*module;
306 	struct device			*dev;
307 	struct device			*parent;
308 	const struct attribute_group	**attr_groups;
309 	const struct attribute_group	**attr_update;
310 	const char			*name;
311 	int				type;
312 
313 	/*
314 	 * various common per-pmu feature flags
315 	 */
316 	int				capabilities;
317 
318 	int __percpu			*pmu_disable_count;
319 	struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
320 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
321 	int				task_ctx_nr;
322 	int				hrtimer_interval_ms;
323 
324 	/* number of address filters this PMU can do */
325 	unsigned int			nr_addr_filters;
326 
327 	/*
328 	 * Fully disable/enable this PMU, can be used to protect from the PMI
329 	 * as well as for lazy/batch writing of the MSRs.
330 	 */
331 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
332 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
333 
334 	/*
335 	 * Try and initialize the event for this PMU.
336 	 *
337 	 * Returns:
338 	 *  -ENOENT	-- @event is not for this PMU
339 	 *
340 	 *  -ENODEV	-- @event is for this PMU but PMU not present
341 	 *  -EBUSY	-- @event is for this PMU but PMU temporarily unavailable
342 	 *  -EINVAL	-- @event is for this PMU but @event is not valid
343 	 *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
344 	 *  -EACCES	-- @event is for this PMU, @event is valid, but no privileges
345 	 *
346 	 *  0		-- @event is for this PMU and valid
347 	 *
348 	 * Other error return values are allowed.
349 	 */
350 	int (*event_init)		(struct perf_event *event);
351 
352 	/*
353 	 * Notification that the event was mapped or unmapped.  Called
354 	 * in the context of the mapping task.
355 	 */
356 	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
357 	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
358 
359 	/*
360 	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
361 	 * matching hw_perf_event::state flags.
362 	 */
363 #define PERF_EF_START	0x01		/* start the counter when adding    */
364 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
365 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
366 
367 	/*
368 	 * Adds/Removes a counter to/from the PMU, can be done inside a
369 	 * transaction, see the ->*_txn() methods.
370 	 *
371 	 * The add/del callbacks will reserve all hardware resources required
372 	 * to service the event, this includes any counter constraint
373 	 * scheduling etc.
374 	 *
375 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
376 	 * is on.
377 	 *
378 	 * ->add() called without PERF_EF_START should result in the same state
379 	 *  as ->add() followed by ->stop().
380 	 *
381 	 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
382 	 *  ->stop() that must deal with already being stopped without
383 	 *  PERF_EF_UPDATE.
384 	 */
385 	int  (*add)			(struct perf_event *event, int flags);
386 	void (*del)			(struct perf_event *event, int flags);
387 
388 	/*
389 	 * Starts/Stops a counter present on the PMU.
390 	 *
391 	 * The PMI handler should stop the counter when perf_event_overflow()
392 	 * returns !0. ->start() will be used to continue.
393 	 *
394 	 * Also used to change the sample period.
395 	 *
396 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
397 	 * is on -- will be called from NMI context with the PMU generates
398 	 * NMIs.
399 	 *
400 	 * ->stop() with PERF_EF_UPDATE will read the counter and update
401 	 *  period/count values like ->read() would.
402 	 *
403 	 * ->start() with PERF_EF_RELOAD will reprogram the counter
404 	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
405 	 */
406 	void (*start)			(struct perf_event *event, int flags);
407 	void (*stop)			(struct perf_event *event, int flags);
408 
409 	/*
410 	 * Updates the counter value of the event.
411 	 *
412 	 * For sampling capable PMUs this will also update the software period
413 	 * hw_perf_event::period_left field.
414 	 */
415 	void (*read)			(struct perf_event *event);
416 
417 	/*
418 	 * Group events scheduling is treated as a transaction, add
419 	 * group events as a whole and perform one schedulability test.
420 	 * If the test fails, roll back the whole group
421 	 *
422 	 * Start the transaction, after this ->add() doesn't need to
423 	 * do schedulability tests.
424 	 *
425 	 * Optional.
426 	 */
427 	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
428 	/*
429 	 * If ->start_txn() disabled the ->add() schedulability test
430 	 * then ->commit_txn() is required to perform one. On success
431 	 * the transaction is closed. On error the transaction is kept
432 	 * open until ->cancel_txn() is called.
433 	 *
434 	 * Optional.
435 	 */
436 	int  (*commit_txn)		(struct pmu *pmu);
437 	/*
438 	 * Will cancel the transaction, assumes ->del() is called
439 	 * for each successful ->add() during the transaction.
440 	 *
441 	 * Optional.
442 	 */
443 	void (*cancel_txn)		(struct pmu *pmu);
444 
445 	/*
446 	 * Will return the value for perf_event_mmap_page::index for this event,
447 	 * if no implementation is provided it will default to 0 (see
448 	 * perf_event_idx_default).
449 	 */
450 	int (*event_idx)		(struct perf_event *event); /*optional */
451 
452 	/*
453 	 * context-switches callback
454 	 */
455 	void (*sched_task)		(struct perf_event_pmu_context *pmu_ctx,
456 					bool sched_in);
457 
458 	/*
459 	 * Kmem cache of PMU specific data
460 	 */
461 	struct kmem_cache		*task_ctx_cache;
462 
463 	/*
464 	 * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
465 	 * can be synchronized using this function. See Intel LBR callstack support
466 	 * implementation and Perf core context switch handling callbacks for usage
467 	 * examples.
468 	 */
469 	void (*swap_task_ctx)		(struct perf_event_pmu_context *prev_epc,
470 					 struct perf_event_pmu_context *next_epc);
471 					/* optional */
472 
473 	/*
474 	 * Set up pmu-private data structures for an AUX area
475 	 */
476 	void *(*setup_aux)		(struct perf_event *event, void **pages,
477 					 int nr_pages, bool overwrite);
478 					/* optional */
479 
480 	/*
481 	 * Free pmu-private AUX data structures
482 	 */
483 	void (*free_aux)		(void *aux); /* optional */
484 
485 	/*
486 	 * Take a snapshot of the AUX buffer without touching the event
487 	 * state, so that preempting ->start()/->stop() callbacks does
488 	 * not interfere with their logic. Called in PMI context.
489 	 *
490 	 * Returns the size of AUX data copied to the output handle.
491 	 *
492 	 * Optional.
493 	 */
494 	long (*snapshot_aux)		(struct perf_event *event,
495 					 struct perf_output_handle *handle,
496 					 unsigned long size);
497 
498 	/*
499 	 * Validate address range filters: make sure the HW supports the
500 	 * requested configuration and number of filters; return 0 if the
501 	 * supplied filters are valid, -errno otherwise.
502 	 *
503 	 * Runs in the context of the ioctl()ing process and is not serialized
504 	 * with the rest of the PMU callbacks.
505 	 */
506 	int (*addr_filters_validate)	(struct list_head *filters);
507 					/* optional */
508 
509 	/*
510 	 * Synchronize address range filter configuration:
511 	 * translate hw-agnostic filters into hardware configuration in
512 	 * event::hw::addr_filters.
513 	 *
514 	 * Runs as a part of filter sync sequence that is done in ->start()
515 	 * callback by calling perf_event_addr_filters_sync().
516 	 *
517 	 * May (and should) traverse event::addr_filters::list, for which its
518 	 * caller provides necessary serialization.
519 	 */
520 	void (*addr_filters_sync)	(struct perf_event *event);
521 					/* optional */
522 
523 	/*
524 	 * Check if event can be used for aux_output purposes for
525 	 * events of this PMU.
526 	 *
527 	 * Runs from perf_event_open(). Should return 0 for "no match"
528 	 * or non-zero for "match".
529 	 */
530 	int (*aux_output_match)		(struct perf_event *event);
531 					/* optional */
532 
533 	/*
534 	 * Skip programming this PMU on the given CPU. Typically needed for
535 	 * big.LITTLE things.
536 	 */
537 	bool (*filter)			(struct pmu *pmu, int cpu); /* optional */
538 
539 	/*
540 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
541 	 */
542 	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
543 };
544 
545 enum perf_addr_filter_action_t {
546 	PERF_ADDR_FILTER_ACTION_STOP = 0,
547 	PERF_ADDR_FILTER_ACTION_START,
548 	PERF_ADDR_FILTER_ACTION_FILTER,
549 };
550 
551 /**
552  * struct perf_addr_filter - address range filter definition
553  * @entry:	event's filter list linkage
554  * @path:	object file's path for file-based filters
555  * @offset:	filter range offset
556  * @size:	filter range size (size==0 means single address trigger)
557  * @action:	filter/start/stop
558  *
559  * This is a hardware-agnostic filter configuration as specified by the user.
560  */
561 struct perf_addr_filter {
562 	struct list_head	entry;
563 	struct path		path;
564 	unsigned long		offset;
565 	unsigned long		size;
566 	enum perf_addr_filter_action_t	action;
567 };
568 
569 /**
570  * struct perf_addr_filters_head - container for address range filters
571  * @list:	list of filters for this event
572  * @lock:	spinlock that serializes accesses to the @list and event's
573  *		(and its children's) filter generations.
574  * @nr_file_filters:	number of file-based filters
575  *
576  * A child event will use parent's @list (and therefore @lock), so they are
577  * bundled together; see perf_event_addr_filters().
578  */
579 struct perf_addr_filters_head {
580 	struct list_head	list;
581 	raw_spinlock_t		lock;
582 	unsigned int		nr_file_filters;
583 };
584 
585 struct perf_addr_filter_range {
586 	unsigned long		start;
587 	unsigned long		size;
588 };
589 
590 /**
591  * enum perf_event_state - the states of an event:
592  */
593 enum perf_event_state {
594 	PERF_EVENT_STATE_DEAD		= -4,
595 	PERF_EVENT_STATE_EXIT		= -3,
596 	PERF_EVENT_STATE_ERROR		= -2,
597 	PERF_EVENT_STATE_OFF		= -1,
598 	PERF_EVENT_STATE_INACTIVE	=  0,
599 	PERF_EVENT_STATE_ACTIVE		=  1,
600 };
601 
602 struct file;
603 struct perf_sample_data;
604 
605 typedef void (*perf_overflow_handler_t)(struct perf_event *,
606 					struct perf_sample_data *,
607 					struct pt_regs *regs);
608 
609 /*
610  * Event capabilities. For event_caps and groups caps.
611  *
612  * PERF_EV_CAP_SOFTWARE: Is a software event.
613  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
614  * from any CPU in the package where it is active.
615  * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
616  * cannot be a group leader. If an event with this flag is detached from the
617  * group it is scheduled out and moved into an unrecoverable ERROR state.
618  */
619 #define PERF_EV_CAP_SOFTWARE		BIT(0)
620 #define PERF_EV_CAP_READ_ACTIVE_PKG	BIT(1)
621 #define PERF_EV_CAP_SIBLING		BIT(2)
622 
623 #define SWEVENT_HLIST_BITS		8
624 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
625 
626 struct swevent_hlist {
627 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
628 	struct rcu_head			rcu_head;
629 };
630 
631 #define PERF_ATTACH_CONTEXT	0x01
632 #define PERF_ATTACH_GROUP	0x02
633 #define PERF_ATTACH_TASK	0x04
634 #define PERF_ATTACH_TASK_DATA	0x08
635 #define PERF_ATTACH_ITRACE	0x10
636 #define PERF_ATTACH_SCHED_CB	0x20
637 #define PERF_ATTACH_CHILD	0x40
638 
639 struct bpf_prog;
640 struct perf_cgroup;
641 struct perf_buffer;
642 
643 struct pmu_event_list {
644 	raw_spinlock_t		lock;
645 	struct list_head	list;
646 };
647 
648 /*
649  * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
650  * as such iteration must hold either lock. However, since ctx->lock is an IRQ
651  * safe lock, and is only held by the CPU doing the modification, having IRQs
652  * disabled is sufficient since it will hold-off the IPIs.
653  */
654 #ifdef CONFIG_PROVE_LOCKING
655 #define lockdep_assert_event_ctx(event)				\
656 	WARN_ON_ONCE(__lockdep_enabled &&			\
657 		     (this_cpu_read(hardirqs_enabled) &&	\
658 		      lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
659 #else
660 #define lockdep_assert_event_ctx(event)
661 #endif
662 
663 #define for_each_sibling_event(sibling, event)			\
664 	lockdep_assert_event_ctx(event);			\
665 	if ((event)->group_leader == (event))			\
666 		list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
667 
668 /**
669  * struct perf_event - performance event kernel representation:
670  */
671 struct perf_event {
672 #ifdef CONFIG_PERF_EVENTS
673 	/*
674 	 * entry onto perf_event_context::event_list;
675 	 *   modifications require ctx->lock
676 	 *   RCU safe iterations.
677 	 */
678 	struct list_head		event_entry;
679 
680 	/*
681 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
682 	 * either sufficies for read.
683 	 */
684 	struct list_head		sibling_list;
685 	struct list_head		active_list;
686 	/*
687 	 * Node on the pinned or flexible tree located at the event context;
688 	 */
689 	struct rb_node			group_node;
690 	u64				group_index;
691 	/*
692 	 * We need storage to track the entries in perf_pmu_migrate_context; we
693 	 * cannot use the event_entry because of RCU and we want to keep the
694 	 * group in tact which avoids us using the other two entries.
695 	 */
696 	struct list_head		migrate_entry;
697 
698 	struct hlist_node		hlist_entry;
699 	struct list_head		active_entry;
700 	int				nr_siblings;
701 
702 	/* Not serialized. Only written during event initialization. */
703 	int				event_caps;
704 	/* The cumulative AND of all event_caps for events in this group. */
705 	int				group_caps;
706 
707 	unsigned int			group_generation;
708 	struct perf_event		*group_leader;
709 	/*
710 	 * event->pmu will always point to pmu in which this event belongs.
711 	 * Whereas event->pmu_ctx->pmu may point to other pmu when group of
712 	 * different pmu events is created.
713 	 */
714 	struct pmu			*pmu;
715 	void				*pmu_private;
716 
717 	enum perf_event_state		state;
718 	unsigned int			attach_state;
719 	local64_t			count;
720 	atomic64_t			child_count;
721 
722 	/*
723 	 * These are the total time in nanoseconds that the event
724 	 * has been enabled (i.e. eligible to run, and the task has
725 	 * been scheduled in, if this is a per-task event)
726 	 * and running (scheduled onto the CPU), respectively.
727 	 */
728 	u64				total_time_enabled;
729 	u64				total_time_running;
730 	u64				tstamp;
731 
732 	struct perf_event_attr		attr;
733 	u16				header_size;
734 	u16				id_header_size;
735 	u16				read_size;
736 	struct hw_perf_event		hw;
737 
738 	struct perf_event_context	*ctx;
739 	/*
740 	 * event->pmu_ctx points to perf_event_pmu_context in which the event
741 	 * is added. This pmu_ctx can be of other pmu for sw event when that
742 	 * sw event is part of a group which also contains non-sw events.
743 	 */
744 	struct perf_event_pmu_context	*pmu_ctx;
745 	atomic_long_t			refcount;
746 
747 	/*
748 	 * These accumulate total time (in nanoseconds) that children
749 	 * events have been enabled and running, respectively.
750 	 */
751 	atomic64_t			child_total_time_enabled;
752 	atomic64_t			child_total_time_running;
753 
754 	/*
755 	 * Protect attach/detach and child_list:
756 	 */
757 	struct mutex			child_mutex;
758 	struct list_head		child_list;
759 	struct perf_event		*parent;
760 
761 	int				oncpu;
762 	int				cpu;
763 
764 	struct list_head		owner_entry;
765 	struct task_struct		*owner;
766 
767 	/* mmap bits */
768 	struct mutex			mmap_mutex;
769 	atomic_t			mmap_count;
770 
771 	struct perf_buffer		*rb;
772 	struct list_head		rb_entry;
773 	unsigned long			rcu_batches;
774 	int				rcu_pending;
775 
776 	/* poll related */
777 	wait_queue_head_t		waitq;
778 	struct fasync_struct		*fasync;
779 
780 	/* delayed work for NMIs and such */
781 	unsigned int			pending_wakeup;
782 	unsigned int			pending_kill;
783 	unsigned int			pending_disable;
784 	unsigned int			pending_sigtrap;
785 	unsigned long			pending_addr;	/* SIGTRAP */
786 	struct irq_work			pending_irq;
787 	struct callback_head		pending_task;
788 	unsigned int			pending_work;
789 
790 	atomic_t			event_limit;
791 
792 	/* address range filters */
793 	struct perf_addr_filters_head	addr_filters;
794 	/* vma address array for file-based filders */
795 	struct perf_addr_filter_range	*addr_filter_ranges;
796 	unsigned long			addr_filters_gen;
797 
798 	/* for aux_output events */
799 	struct perf_event		*aux_event;
800 
801 	void (*destroy)(struct perf_event *);
802 	struct rcu_head			rcu_head;
803 
804 	struct pid_namespace		*ns;
805 	u64				id;
806 
807 	atomic64_t			lost_samples;
808 
809 	u64				(*clock)(void);
810 	perf_overflow_handler_t		overflow_handler;
811 	void				*overflow_handler_context;
812 	struct bpf_prog			*prog;
813 	u64				bpf_cookie;
814 
815 #ifdef CONFIG_EVENT_TRACING
816 	struct trace_event_call		*tp_event;
817 	struct event_filter		*filter;
818 #ifdef CONFIG_FUNCTION_TRACER
819 	struct ftrace_ops               ftrace_ops;
820 #endif
821 #endif
822 
823 #ifdef CONFIG_CGROUP_PERF
824 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
825 #endif
826 
827 #ifdef CONFIG_SECURITY
828 	void *security;
829 #endif
830 	struct list_head		sb_list;
831 
832 	/*
833 	 * Certain events gets forwarded to another pmu internally by over-
834 	 * writing kernel copy of event->attr.type without user being aware
835 	 * of it. event->orig_type contains original 'type' requested by
836 	 * user.
837 	 */
838 	__u32				orig_type;
839 #endif /* CONFIG_PERF_EVENTS */
840 };
841 
842 /*
843  *           ,-----------------------[1:n]------------------------.
844  *           V                                                    V
845  * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
846  *                                        |                       |
847  *                                        `--[n:1]-> pmu <-[1:n]--'
848  *
849  *
850  * struct perf_event_pmu_context  lifetime is refcount based and RCU freed
851  * (similar to perf_event_context). Locking is as if it were a member of
852  * perf_event_context; specifically:
853  *
854  *   modification, both: ctx->mutex && ctx->lock
855  *   reading, either:    ctx->mutex || ctx->lock
856  *
857  * There is one exception to this; namely put_pmu_ctx() isn't always called
858  * with ctx->mutex held; this means that as long as we can guarantee the epc
859  * has events the above rules hold.
860  *
861  * Specificially, sys_perf_event_open()'s group_leader case depends on
862  * ctx->mutex pinning the configuration. Since we hold a reference on
863  * group_leader (through the filedesc) it can't go away, therefore it's
864  * associated pmu_ctx must exist and cannot change due to ctx->mutex.
865  *
866  * perf_event holds a refcount on perf_event_context
867  * perf_event holds a refcount on perf_event_pmu_context
868  */
869 struct perf_event_pmu_context {
870 	struct pmu			*pmu;
871 	struct perf_event_context       *ctx;
872 
873 	struct list_head		pmu_ctx_entry;
874 
875 	struct list_head		pinned_active;
876 	struct list_head		flexible_active;
877 
878 	/* Used to avoid freeing per-cpu perf_event_pmu_context */
879 	unsigned int			embedded : 1;
880 
881 	unsigned int			nr_events;
882 	unsigned int			nr_cgroups;
883 	unsigned int			nr_freq;
884 
885 	atomic_t			refcount; /* event <-> epc */
886 	struct rcu_head			rcu_head;
887 
888 	void				*task_ctx_data; /* pmu specific data */
889 	/*
890 	 * Set when one or more (plausibly active) event can't be scheduled
891 	 * due to pmu overcommit or pmu constraints, except tolerant to
892 	 * events not necessary to be active due to scheduling constraints,
893 	 * such as cgroups.
894 	 */
895 	int				rotate_necessary;
896 };
897 
perf_pmu_ctx_is_active(struct perf_event_pmu_context * epc)898 static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
899 {
900 	return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active);
901 }
902 
903 struct perf_event_groups {
904 	struct rb_root	tree;
905 	u64		index;
906 };
907 
908 
909 /**
910  * struct perf_event_context - event context structure
911  *
912  * Used as a container for task events and CPU events as well:
913  */
914 struct perf_event_context {
915 	/*
916 	 * Protect the states of the events in the list,
917 	 * nr_active, and the list:
918 	 */
919 	raw_spinlock_t			lock;
920 	/*
921 	 * Protect the list of events.  Locking either mutex or lock
922 	 * is sufficient to ensure the list doesn't change; to change
923 	 * the list you need to lock both the mutex and the spinlock.
924 	 */
925 	struct mutex			mutex;
926 
927 	struct list_head		pmu_ctx_list;
928 	struct perf_event_groups	pinned_groups;
929 	struct perf_event_groups	flexible_groups;
930 	struct list_head		event_list;
931 
932 	int				nr_events;
933 	int				nr_user;
934 	int				is_active;
935 
936 	int				nr_task_data;
937 	int				nr_stat;
938 	int				nr_freq;
939 	int				rotate_disable;
940 
941 	refcount_t			refcount; /* event <-> ctx */
942 	struct task_struct		*task;
943 
944 	/*
945 	 * Context clock, runs when context enabled.
946 	 */
947 	u64				time;
948 	u64				timestamp;
949 	u64				timeoffset;
950 
951 	/*
952 	 * These fields let us detect when two contexts have both
953 	 * been cloned (inherited) from a common ancestor.
954 	 */
955 	struct perf_event_context	*parent_ctx;
956 	u64				parent_gen;
957 	u64				generation;
958 	int				pin_count;
959 #ifdef CONFIG_CGROUP_PERF
960 	int				nr_cgroups;	 /* cgroup evts */
961 #endif
962 	struct rcu_head			rcu_head;
963 
964 	/*
965 	 * Sum (event->pending_sigtrap + event->pending_work)
966 	 *
967 	 * The SIGTRAP is targeted at ctx->task, as such it won't do changing
968 	 * that until the signal is delivered.
969 	 */
970 	local_t				nr_pending;
971 };
972 
973 /*
974  * Number of contexts where an event can trigger:
975  *	task, softirq, hardirq, nmi.
976  */
977 #define PERF_NR_CONTEXTS	4
978 
979 struct perf_cpu_pmu_context {
980 	struct perf_event_pmu_context	epc;
981 	struct perf_event_pmu_context	*task_epc;
982 
983 	struct list_head		sched_cb_entry;
984 	int				sched_cb_usage;
985 
986 	int				active_oncpu;
987 	int				exclusive;
988 
989 	raw_spinlock_t			hrtimer_lock;
990 	struct hrtimer			hrtimer;
991 	ktime_t				hrtimer_interval;
992 	unsigned int			hrtimer_active;
993 };
994 
995 /**
996  * struct perf_event_cpu_context - per cpu event context structure
997  */
998 struct perf_cpu_context {
999 	struct perf_event_context	ctx;
1000 	struct perf_event_context	*task_ctx;
1001 	int				online;
1002 
1003 #ifdef CONFIG_CGROUP_PERF
1004 	struct perf_cgroup		*cgrp;
1005 #endif
1006 
1007 	/*
1008 	 * Per-CPU storage for iterators used in visit_groups_merge. The default
1009 	 * storage is of size 2 to hold the CPU and any CPU event iterators.
1010 	 */
1011 	int				heap_size;
1012 	struct perf_event		**heap;
1013 	struct perf_event		*heap_default[2];
1014 };
1015 
1016 struct perf_output_handle {
1017 	struct perf_event		*event;
1018 	struct perf_buffer		*rb;
1019 	unsigned long			wakeup;
1020 	unsigned long			size;
1021 	u64				aux_flags;
1022 	union {
1023 		void			*addr;
1024 		unsigned long		head;
1025 	};
1026 	int				page;
1027 };
1028 
1029 struct bpf_perf_event_data_kern {
1030 	bpf_user_pt_regs_t *regs;
1031 	struct perf_sample_data *data;
1032 	struct perf_event *event;
1033 };
1034 
1035 #ifdef CONFIG_CGROUP_PERF
1036 
1037 /*
1038  * perf_cgroup_info keeps track of time_enabled for a cgroup.
1039  * This is a per-cpu dynamically allocated data structure.
1040  */
1041 struct perf_cgroup_info {
1042 	u64				time;
1043 	u64				timestamp;
1044 	u64				timeoffset;
1045 	int				active;
1046 };
1047 
1048 struct perf_cgroup {
1049 	struct cgroup_subsys_state	css;
1050 	struct perf_cgroup_info	__percpu *info;
1051 };
1052 
1053 /*
1054  * Must ensure cgroup is pinned (css_get) before calling
1055  * this function. In other words, we cannot call this function
1056  * if there is no cgroup event for the current CPU context.
1057  */
1058 static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct * task,struct perf_event_context * ctx)1059 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
1060 {
1061 	return container_of(task_css_check(task, perf_event_cgrp_id,
1062 					   ctx ? lockdep_is_held(&ctx->lock)
1063 					       : true),
1064 			    struct perf_cgroup, css);
1065 }
1066 #endif /* CONFIG_CGROUP_PERF */
1067 
1068 #ifdef CONFIG_PERF_EVENTS
1069 
1070 extern struct perf_event_context *perf_cpu_task_ctx(void);
1071 
1072 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
1073 				   struct perf_event *event);
1074 extern void perf_aux_output_end(struct perf_output_handle *handle,
1075 				unsigned long size);
1076 extern int perf_aux_output_skip(struct perf_output_handle *handle,
1077 				unsigned long size);
1078 extern void *perf_get_aux(struct perf_output_handle *handle);
1079 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
1080 extern void perf_event_itrace_started(struct perf_event *event);
1081 
1082 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
1083 extern void perf_pmu_unregister(struct pmu *pmu);
1084 
1085 extern void __perf_event_task_sched_in(struct task_struct *prev,
1086 				       struct task_struct *task);
1087 extern void __perf_event_task_sched_out(struct task_struct *prev,
1088 					struct task_struct *next);
1089 extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
1090 extern void perf_event_exit_task(struct task_struct *child);
1091 extern void perf_event_free_task(struct task_struct *task);
1092 extern void perf_event_delayed_put(struct task_struct *task);
1093 extern struct file *perf_event_get(unsigned int fd);
1094 extern const struct perf_event *perf_get_event(struct file *file);
1095 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
1096 extern void perf_event_print_debug(void);
1097 extern void perf_pmu_disable(struct pmu *pmu);
1098 extern void perf_pmu_enable(struct pmu *pmu);
1099 extern void perf_sched_cb_dec(struct pmu *pmu);
1100 extern void perf_sched_cb_inc(struct pmu *pmu);
1101 extern int perf_event_task_disable(void);
1102 extern int perf_event_task_enable(void);
1103 
1104 extern void perf_pmu_resched(struct pmu *pmu);
1105 
1106 extern int perf_event_refresh(struct perf_event *event, int refresh);
1107 extern void perf_event_update_userpage(struct perf_event *event);
1108 extern int perf_event_release_kernel(struct perf_event *event);
1109 extern struct perf_event *
1110 perf_event_create_kernel_counter(struct perf_event_attr *attr,
1111 				int cpu,
1112 				struct task_struct *task,
1113 				perf_overflow_handler_t callback,
1114 				void *context);
1115 extern void perf_pmu_migrate_context(struct pmu *pmu,
1116 				int src_cpu, int dst_cpu);
1117 int perf_event_read_local(struct perf_event *event, u64 *value,
1118 			  u64 *enabled, u64 *running);
1119 extern u64 perf_event_read_value(struct perf_event *event,
1120 				 u64 *enabled, u64 *running);
1121 
1122 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1123 
branch_sample_no_flags(const struct perf_event * event)1124 static inline bool branch_sample_no_flags(const struct perf_event *event)
1125 {
1126 	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
1127 }
1128 
branch_sample_no_cycles(const struct perf_event * event)1129 static inline bool branch_sample_no_cycles(const struct perf_event *event)
1130 {
1131 	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
1132 }
1133 
branch_sample_type(const struct perf_event * event)1134 static inline bool branch_sample_type(const struct perf_event *event)
1135 {
1136 	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
1137 }
1138 
branch_sample_hw_index(const struct perf_event * event)1139 static inline bool branch_sample_hw_index(const struct perf_event *event)
1140 {
1141 	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
1142 }
1143 
branch_sample_priv(const struct perf_event * event)1144 static inline bool branch_sample_priv(const struct perf_event *event)
1145 {
1146 	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
1147 }
1148 
branch_sample_counters(const struct perf_event * event)1149 static inline bool branch_sample_counters(const struct perf_event *event)
1150 {
1151 	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
1152 }
1153 
branch_sample_call_stack(const struct perf_event * event)1154 static inline bool branch_sample_call_stack(const struct perf_event *event)
1155 {
1156 	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
1157 }
1158 
1159 struct perf_sample_data {
1160 	/*
1161 	 * Fields set by perf_sample_data_init() unconditionally,
1162 	 * group so as to minimize the cachelines touched.
1163 	 */
1164 	u64				sample_flags;
1165 	u64				period;
1166 	u64				dyn_size;
1167 
1168 	/*
1169 	 * Fields commonly set by __perf_event_header__init_id(),
1170 	 * group so as to minimize the cachelines touched.
1171 	 */
1172 	u64				type;
1173 	struct {
1174 		u32	pid;
1175 		u32	tid;
1176 	}				tid_entry;
1177 	u64				time;
1178 	u64				id;
1179 	struct {
1180 		u32	cpu;
1181 		u32	reserved;
1182 	}				cpu_entry;
1183 
1184 	/*
1185 	 * The other fields, optionally {set,used} by
1186 	 * perf_{prepare,output}_sample().
1187 	 */
1188 	u64				ip;
1189 	struct perf_callchain_entry	*callchain;
1190 	struct perf_raw_record		*raw;
1191 	struct perf_branch_stack	*br_stack;
1192 	u64				*br_stack_cntr;
1193 	union perf_sample_weight	weight;
1194 	union  perf_mem_data_src	data_src;
1195 	u64				txn;
1196 
1197 	struct perf_regs		regs_user;
1198 	struct perf_regs		regs_intr;
1199 	u64				stack_user_size;
1200 
1201 	u64				stream_id;
1202 	u64				cgroup;
1203 	u64				addr;
1204 	u64				phys_addr;
1205 	u64				data_page_size;
1206 	u64				code_page_size;
1207 	u64				aux_size;
1208 } ____cacheline_aligned;
1209 
1210 /* default value for data source */
1211 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
1212 		    PERF_MEM_S(LVL, NA)   |\
1213 		    PERF_MEM_S(SNOOP, NA) |\
1214 		    PERF_MEM_S(LOCK, NA)  |\
1215 		    PERF_MEM_S(TLB, NA)   |\
1216 		    PERF_MEM_S(LVLNUM, NA))
1217 
perf_sample_data_init(struct perf_sample_data * data,u64 addr,u64 period)1218 static inline void perf_sample_data_init(struct perf_sample_data *data,
1219 					 u64 addr, u64 period)
1220 {
1221 	/* remaining struct members initialized in perf_prepare_sample() */
1222 	data->sample_flags = PERF_SAMPLE_PERIOD;
1223 	data->period = period;
1224 	data->dyn_size = 0;
1225 
1226 	if (addr) {
1227 		data->addr = addr;
1228 		data->sample_flags |= PERF_SAMPLE_ADDR;
1229 	}
1230 }
1231 
perf_sample_save_callchain(struct perf_sample_data * data,struct perf_event * event,struct pt_regs * regs)1232 static inline void perf_sample_save_callchain(struct perf_sample_data *data,
1233 					      struct perf_event *event,
1234 					      struct pt_regs *regs)
1235 {
1236 	int size = 1;
1237 
1238 	data->callchain = perf_callchain(event, regs);
1239 	size += data->callchain->nr;
1240 
1241 	data->dyn_size += size * sizeof(u64);
1242 	data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
1243 }
1244 
perf_sample_save_raw_data(struct perf_sample_data * data,struct perf_raw_record * raw)1245 static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
1246 					     struct perf_raw_record *raw)
1247 {
1248 	struct perf_raw_frag *frag = &raw->frag;
1249 	u32 sum = 0;
1250 	int size;
1251 
1252 	do {
1253 		sum += frag->size;
1254 		if (perf_raw_frag_last(frag))
1255 			break;
1256 		frag = frag->next;
1257 	} while (1);
1258 
1259 	size = round_up(sum + sizeof(u32), sizeof(u64));
1260 	raw->size = size - sizeof(u32);
1261 	frag->pad = raw->size - sum;
1262 
1263 	data->raw = raw;
1264 	data->dyn_size += size;
1265 	data->sample_flags |= PERF_SAMPLE_RAW;
1266 }
1267 
perf_sample_save_brstack(struct perf_sample_data * data,struct perf_event * event,struct perf_branch_stack * brs,u64 * brs_cntr)1268 static inline void perf_sample_save_brstack(struct perf_sample_data *data,
1269 					    struct perf_event *event,
1270 					    struct perf_branch_stack *brs,
1271 					    u64 *brs_cntr)
1272 {
1273 	int size = sizeof(u64); /* nr */
1274 
1275 	if (branch_sample_hw_index(event))
1276 		size += sizeof(u64);
1277 	size += brs->nr * sizeof(struct perf_branch_entry);
1278 
1279 	/*
1280 	 * The extension space for counters is appended after the
1281 	 * struct perf_branch_stack. It is used to store the occurrences
1282 	 * of events of each branch.
1283 	 */
1284 	if (brs_cntr)
1285 		size += brs->nr * sizeof(u64);
1286 
1287 	data->br_stack = brs;
1288 	data->br_stack_cntr = brs_cntr;
1289 	data->dyn_size += size;
1290 	data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
1291 }
1292 
perf_sample_data_size(struct perf_sample_data * data,struct perf_event * event)1293 static inline u32 perf_sample_data_size(struct perf_sample_data *data,
1294 					struct perf_event *event)
1295 {
1296 	u32 size = sizeof(struct perf_event_header);
1297 
1298 	size += event->header_size + event->id_header_size;
1299 	size += data->dyn_size;
1300 
1301 	return size;
1302 }
1303 
1304 /*
1305  * Clear all bitfields in the perf_branch_entry.
1306  * The to and from fields are not cleared because they are
1307  * systematically modified by caller.
1308  */
perf_clear_branch_entry_bitfields(struct perf_branch_entry * br)1309 static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
1310 {
1311 	br->mispred = 0;
1312 	br->predicted = 0;
1313 	br->in_tx = 0;
1314 	br->abort = 0;
1315 	br->cycles = 0;
1316 	br->type = 0;
1317 	br->spec = PERF_BR_SPEC_NA;
1318 	br->reserved = 0;
1319 }
1320 
1321 extern void perf_output_sample(struct perf_output_handle *handle,
1322 			       struct perf_event_header *header,
1323 			       struct perf_sample_data *data,
1324 			       struct perf_event *event);
1325 extern void perf_prepare_sample(struct perf_sample_data *data,
1326 				struct perf_event *event,
1327 				struct pt_regs *regs);
1328 extern void perf_prepare_header(struct perf_event_header *header,
1329 				struct perf_sample_data *data,
1330 				struct perf_event *event,
1331 				struct pt_regs *regs);
1332 
1333 extern int perf_event_overflow(struct perf_event *event,
1334 				 struct perf_sample_data *data,
1335 				 struct pt_regs *regs);
1336 
1337 extern void perf_event_output_forward(struct perf_event *event,
1338 				     struct perf_sample_data *data,
1339 				     struct pt_regs *regs);
1340 extern void perf_event_output_backward(struct perf_event *event,
1341 				       struct perf_sample_data *data,
1342 				       struct pt_regs *regs);
1343 extern int perf_event_output(struct perf_event *event,
1344 			     struct perf_sample_data *data,
1345 			     struct pt_regs *regs);
1346 
1347 static inline bool
is_default_overflow_handler(struct perf_event * event)1348 is_default_overflow_handler(struct perf_event *event)
1349 {
1350 	perf_overflow_handler_t overflow_handler = event->overflow_handler;
1351 
1352 	if (likely(overflow_handler == perf_event_output_forward))
1353 		return true;
1354 	if (unlikely(overflow_handler == perf_event_output_backward))
1355 		return true;
1356 	return false;
1357 }
1358 
1359 extern void
1360 perf_event_header__init_id(struct perf_event_header *header,
1361 			   struct perf_sample_data *data,
1362 			   struct perf_event *event);
1363 extern void
1364 perf_event__output_id_sample(struct perf_event *event,
1365 			     struct perf_output_handle *handle,
1366 			     struct perf_sample_data *sample);
1367 
1368 extern void
1369 perf_log_lost_samples(struct perf_event *event, u64 lost);
1370 
event_has_any_exclude_flag(struct perf_event * event)1371 static inline bool event_has_any_exclude_flag(struct perf_event *event)
1372 {
1373 	struct perf_event_attr *attr = &event->attr;
1374 
1375 	return attr->exclude_idle || attr->exclude_user ||
1376 	       attr->exclude_kernel || attr->exclude_hv ||
1377 	       attr->exclude_guest || attr->exclude_host;
1378 }
1379 
is_sampling_event(struct perf_event * event)1380 static inline bool is_sampling_event(struct perf_event *event)
1381 {
1382 	return event->attr.sample_period != 0;
1383 }
1384 
1385 /*
1386  * Return 1 for a software event, 0 for a hardware event
1387  */
is_software_event(struct perf_event * event)1388 static inline int is_software_event(struct perf_event *event)
1389 {
1390 	return event->event_caps & PERF_EV_CAP_SOFTWARE;
1391 }
1392 
1393 /*
1394  * Return 1 for event in sw context, 0 for event in hw context
1395  */
in_software_context(struct perf_event * event)1396 static inline int in_software_context(struct perf_event *event)
1397 {
1398 	return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
1399 }
1400 
is_exclusive_pmu(struct pmu * pmu)1401 static inline int is_exclusive_pmu(struct pmu *pmu)
1402 {
1403 	return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1404 }
1405 
1406 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1407 
1408 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1409 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1410 
1411 #ifndef perf_arch_fetch_caller_regs
perf_arch_fetch_caller_regs(struct pt_regs * regs,unsigned long ip)1412 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1413 #endif
1414 
1415 /*
1416  * When generating a perf sample in-line, instead of from an interrupt /
1417  * exception, we lack a pt_regs. This is typically used from software events
1418  * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1419  *
1420  * We typically don't need a full set, but (for x86) do require:
1421  * - ip for PERF_SAMPLE_IP
1422  * - cs for user_mode() tests
1423  * - sp for PERF_SAMPLE_CALLCHAIN
1424  * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1425  *
1426  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1427  * things like PERF_SAMPLE_REGS_INTR.
1428  */
perf_fetch_caller_regs(struct pt_regs * regs)1429 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1430 {
1431 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1432 }
1433 
1434 static __always_inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1435 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1436 {
1437 	if (static_key_false(&perf_swevent_enabled[event_id]))
1438 		__perf_sw_event(event_id, nr, regs, addr);
1439 }
1440 
1441 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1442 
1443 /*
1444  * 'Special' version for the scheduler, it hard assumes no recursion,
1445  * which is guaranteed by us not actually scheduling inside other swevents
1446  * because those disable preemption.
1447  */
__perf_sw_event_sched(u32 event_id,u64 nr,u64 addr)1448 static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1449 {
1450 	struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1451 
1452 	perf_fetch_caller_regs(regs);
1453 	___perf_sw_event(event_id, nr, regs, addr);
1454 }
1455 
1456 extern struct static_key_false perf_sched_events;
1457 
__perf_sw_enabled(int swevt)1458 static __always_inline bool __perf_sw_enabled(int swevt)
1459 {
1460 	return static_key_false(&perf_swevent_enabled[swevt]);
1461 }
1462 
perf_event_task_migrate(struct task_struct * task)1463 static inline void perf_event_task_migrate(struct task_struct *task)
1464 {
1465 	if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
1466 		task->sched_migrated = 1;
1467 }
1468 
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1469 static inline void perf_event_task_sched_in(struct task_struct *prev,
1470 					    struct task_struct *task)
1471 {
1472 	if (static_branch_unlikely(&perf_sched_events))
1473 		__perf_event_task_sched_in(prev, task);
1474 
1475 	if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
1476 	    task->sched_migrated) {
1477 		__perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1478 		task->sched_migrated = 0;
1479 	}
1480 }
1481 
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1482 static inline void perf_event_task_sched_out(struct task_struct *prev,
1483 					     struct task_struct *next)
1484 {
1485 	if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
1486 		__perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1487 
1488 #ifdef CONFIG_CGROUP_PERF
1489 	if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) &&
1490 	    perf_cgroup_from_task(prev, NULL) !=
1491 	    perf_cgroup_from_task(next, NULL))
1492 		__perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0);
1493 #endif
1494 
1495 	if (static_branch_unlikely(&perf_sched_events))
1496 		__perf_event_task_sched_out(prev, next);
1497 }
1498 
1499 extern void perf_event_mmap(struct vm_area_struct *vma);
1500 
1501 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1502 			       bool unregister, const char *sym);
1503 extern void perf_event_bpf_event(struct bpf_prog *prog,
1504 				 enum perf_bpf_event_type type,
1505 				 u16 flags);
1506 
1507 #ifdef CONFIG_GUEST_PERF_EVENTS
1508 extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
1509 
1510 DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
1511 DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
1512 DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
1513 
perf_guest_state(void)1514 static inline unsigned int perf_guest_state(void)
1515 {
1516 	return static_call(__perf_guest_state)();
1517 }
perf_guest_get_ip(void)1518 static inline unsigned long perf_guest_get_ip(void)
1519 {
1520 	return static_call(__perf_guest_get_ip)();
1521 }
perf_guest_handle_intel_pt_intr(void)1522 static inline unsigned int perf_guest_handle_intel_pt_intr(void)
1523 {
1524 	return static_call(__perf_guest_handle_intel_pt_intr)();
1525 }
1526 extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
1527 extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
1528 #else
perf_guest_state(void)1529 static inline unsigned int perf_guest_state(void)		 { return 0; }
perf_guest_get_ip(void)1530 static inline unsigned long perf_guest_get_ip(void)		 { return 0; }
perf_guest_handle_intel_pt_intr(void)1531 static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
1532 #endif /* CONFIG_GUEST_PERF_EVENTS */
1533 
1534 extern void perf_event_exec(void);
1535 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1536 extern void perf_event_namespaces(struct task_struct *tsk);
1537 extern void perf_event_fork(struct task_struct *tsk);
1538 extern void perf_event_text_poke(const void *addr,
1539 				 const void *old_bytes, size_t old_len,
1540 				 const void *new_bytes, size_t new_len);
1541 
1542 /* Callchains */
1543 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1544 
1545 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1546 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1547 extern struct perf_callchain_entry *
1548 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1549 		   u32 max_stack, bool crosstask, bool add_mark);
1550 extern int get_callchain_buffers(int max_stack);
1551 extern void put_callchain_buffers(void);
1552 extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
1553 extern void put_callchain_entry(int rctx);
1554 
1555 extern int sysctl_perf_event_max_stack;
1556 extern int sysctl_perf_event_max_contexts_per_stack;
1557 
perf_callchain_store_context(struct perf_callchain_entry_ctx * ctx,u64 ip)1558 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1559 {
1560 	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1561 		struct perf_callchain_entry *entry = ctx->entry;
1562 		entry->ip[entry->nr++] = ip;
1563 		++ctx->contexts;
1564 		return 0;
1565 	} else {
1566 		ctx->contexts_maxed = true;
1567 		return -1; /* no more room, stop walking the stack */
1568 	}
1569 }
1570 
perf_callchain_store(struct perf_callchain_entry_ctx * ctx,u64 ip)1571 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1572 {
1573 	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1574 		struct perf_callchain_entry *entry = ctx->entry;
1575 		entry->ip[entry->nr++] = ip;
1576 		++ctx->nr;
1577 		return 0;
1578 	} else {
1579 		return -1; /* no more room, stop walking the stack */
1580 	}
1581 }
1582 
1583 extern int sysctl_perf_event_paranoid;
1584 extern int sysctl_perf_event_mlock;
1585 extern int sysctl_perf_event_sample_rate;
1586 extern int sysctl_perf_cpu_time_max_percent;
1587 
1588 extern void perf_sample_event_took(u64 sample_len_ns);
1589 
1590 int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
1591 		void *buffer, size_t *lenp, loff_t *ppos);
1592 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1593 		void *buffer, size_t *lenp, loff_t *ppos);
1594 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1595 		void *buffer, size_t *lenp, loff_t *ppos);
1596 
1597 /* Access to perf_event_open(2) syscall. */
1598 #define PERF_SECURITY_OPEN		0
1599 
1600 /* Finer grained perf_event_open(2) access control. */
1601 #define PERF_SECURITY_CPU		1
1602 #define PERF_SECURITY_KERNEL		2
1603 #define PERF_SECURITY_TRACEPOINT	3
1604 
perf_is_paranoid(void)1605 static inline int perf_is_paranoid(void)
1606 {
1607 	return sysctl_perf_event_paranoid > -1;
1608 }
1609 
perf_allow_kernel(struct perf_event_attr * attr)1610 static inline int perf_allow_kernel(struct perf_event_attr *attr)
1611 {
1612 	if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
1613 		return -EACCES;
1614 
1615 	return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1616 }
1617 
perf_allow_cpu(struct perf_event_attr * attr)1618 static inline int perf_allow_cpu(struct perf_event_attr *attr)
1619 {
1620 	if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
1621 		return -EACCES;
1622 
1623 	return security_perf_event_open(attr, PERF_SECURITY_CPU);
1624 }
1625 
perf_allow_tracepoint(struct perf_event_attr * attr)1626 static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1627 {
1628 	if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
1629 		return -EPERM;
1630 
1631 	return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1632 }
1633 
1634 extern void perf_event_init(void);
1635 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1636 			  int entry_size, struct pt_regs *regs,
1637 			  struct hlist_head *head, int rctx,
1638 			  struct task_struct *task);
1639 extern void perf_bp_event(struct perf_event *event, void *data);
1640 
1641 #ifndef perf_misc_flags
1642 # define perf_misc_flags(regs) \
1643 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1644 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1645 #endif
1646 #ifndef perf_arch_bpf_user_pt_regs
1647 # define perf_arch_bpf_user_pt_regs(regs) regs
1648 #endif
1649 
has_branch_stack(struct perf_event * event)1650 static inline bool has_branch_stack(struct perf_event *event)
1651 {
1652 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1653 }
1654 
needs_branch_stack(struct perf_event * event)1655 static inline bool needs_branch_stack(struct perf_event *event)
1656 {
1657 	return event->attr.branch_sample_type != 0;
1658 }
1659 
has_aux(struct perf_event * event)1660 static inline bool has_aux(struct perf_event *event)
1661 {
1662 	return event->pmu->setup_aux;
1663 }
1664 
is_write_backward(struct perf_event * event)1665 static inline bool is_write_backward(struct perf_event *event)
1666 {
1667 	return !!event->attr.write_backward;
1668 }
1669 
has_addr_filter(struct perf_event * event)1670 static inline bool has_addr_filter(struct perf_event *event)
1671 {
1672 	return event->pmu->nr_addr_filters;
1673 }
1674 
1675 /*
1676  * An inherited event uses parent's filters
1677  */
1678 static inline struct perf_addr_filters_head *
perf_event_addr_filters(struct perf_event * event)1679 perf_event_addr_filters(struct perf_event *event)
1680 {
1681 	struct perf_addr_filters_head *ifh = &event->addr_filters;
1682 
1683 	if (event->parent)
1684 		ifh = &event->parent->addr_filters;
1685 
1686 	return ifh;
1687 }
1688 
perf_event_fasync(struct perf_event * event)1689 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
1690 {
1691 	/* Only the parent has fasync state */
1692 	if (event->parent)
1693 		event = event->parent;
1694 	return &event->fasync;
1695 }
1696 
1697 extern void perf_event_addr_filters_sync(struct perf_event *event);
1698 extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
1699 
1700 extern int perf_output_begin(struct perf_output_handle *handle,
1701 			     struct perf_sample_data *data,
1702 			     struct perf_event *event, unsigned int size);
1703 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1704 				     struct perf_sample_data *data,
1705 				     struct perf_event *event,
1706 				     unsigned int size);
1707 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1708 				      struct perf_sample_data *data,
1709 				      struct perf_event *event,
1710 				      unsigned int size);
1711 
1712 extern void perf_output_end(struct perf_output_handle *handle);
1713 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1714 			     const void *buf, unsigned int len);
1715 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1716 				     unsigned int len);
1717 extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1718 				 struct perf_output_handle *handle,
1719 				 unsigned long from, unsigned long to);
1720 extern int perf_swevent_get_recursion_context(void);
1721 extern void perf_swevent_put_recursion_context(int rctx);
1722 extern u64 perf_swevent_set_period(struct perf_event *event);
1723 extern void perf_event_enable(struct perf_event *event);
1724 extern void perf_event_disable(struct perf_event *event);
1725 extern void perf_event_disable_local(struct perf_event *event);
1726 extern void perf_event_disable_inatomic(struct perf_event *event);
1727 extern void perf_event_task_tick(void);
1728 extern int perf_event_account_interrupt(struct perf_event *event);
1729 extern int perf_event_period(struct perf_event *event, u64 value);
1730 extern u64 perf_event_pause(struct perf_event *event, bool reset);
1731 #else /* !CONFIG_PERF_EVENTS: */
1732 static inline void *
perf_aux_output_begin(struct perf_output_handle * handle,struct perf_event * event)1733 perf_aux_output_begin(struct perf_output_handle *handle,
1734 		      struct perf_event *event)				{ return NULL; }
1735 static inline void
perf_aux_output_end(struct perf_output_handle * handle,unsigned long size)1736 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1737 									{ }
1738 static inline int
perf_aux_output_skip(struct perf_output_handle * handle,unsigned long size)1739 perf_aux_output_skip(struct perf_output_handle *handle,
1740 		     unsigned long size)				{ return -EINVAL; }
1741 static inline void *
perf_get_aux(struct perf_output_handle * handle)1742 perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
1743 static inline void
perf_event_task_migrate(struct task_struct * task)1744 perf_event_task_migrate(struct task_struct *task)			{ }
1745 static inline void
perf_event_task_sched_in(struct task_struct * prev,struct task_struct * task)1746 perf_event_task_sched_in(struct task_struct *prev,
1747 			 struct task_struct *task)			{ }
1748 static inline void
perf_event_task_sched_out(struct task_struct * prev,struct task_struct * next)1749 perf_event_task_sched_out(struct task_struct *prev,
1750 			  struct task_struct *next)			{ }
perf_event_init_task(struct task_struct * child,u64 clone_flags)1751 static inline int perf_event_init_task(struct task_struct *child,
1752 				       u64 clone_flags)			{ return 0; }
perf_event_exit_task(struct task_struct * child)1753 static inline void perf_event_exit_task(struct task_struct *child)	{ }
perf_event_free_task(struct task_struct * task)1754 static inline void perf_event_free_task(struct task_struct *task)	{ }
perf_event_delayed_put(struct task_struct * task)1755 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
perf_event_get(unsigned int fd)1756 static inline struct file *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
perf_get_event(struct file * file)1757 static inline const struct perf_event *perf_get_event(struct file *file)
1758 {
1759 	return ERR_PTR(-EINVAL);
1760 }
perf_event_attrs(struct perf_event * event)1761 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1762 {
1763 	return ERR_PTR(-EINVAL);
1764 }
perf_event_read_local(struct perf_event * event,u64 * value,u64 * enabled,u64 * running)1765 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1766 					u64 *enabled, u64 *running)
1767 {
1768 	return -EINVAL;
1769 }
perf_event_print_debug(void)1770 static inline void perf_event_print_debug(void)				{ }
perf_event_task_disable(void)1771 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
perf_event_task_enable(void)1772 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
perf_event_refresh(struct perf_event * event,int refresh)1773 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1774 {
1775 	return -EINVAL;
1776 }
1777 
1778 static inline void
perf_sw_event(u32 event_id,u64 nr,struct pt_regs * regs,u64 addr)1779 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1780 static inline void
perf_bp_event(struct perf_event * event,void * data)1781 perf_bp_event(struct perf_event *event, void *data)			{ }
1782 
perf_event_mmap(struct vm_area_struct * vma)1783 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1784 
1785 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
perf_event_ksymbol(u16 ksym_type,u64 addr,u32 len,bool unregister,const char * sym)1786 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1787 				      bool unregister, const char *sym)	{ }
perf_event_bpf_event(struct bpf_prog * prog,enum perf_bpf_event_type type,u16 flags)1788 static inline void perf_event_bpf_event(struct bpf_prog *prog,
1789 					enum perf_bpf_event_type type,
1790 					u16 flags)			{ }
perf_event_exec(void)1791 static inline void perf_event_exec(void)				{ }
perf_event_comm(struct task_struct * tsk,bool exec)1792 static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
perf_event_namespaces(struct task_struct * tsk)1793 static inline void perf_event_namespaces(struct task_struct *tsk)	{ }
perf_event_fork(struct task_struct * tsk)1794 static inline void perf_event_fork(struct task_struct *tsk)		{ }
perf_event_text_poke(const void * addr,const void * old_bytes,size_t old_len,const void * new_bytes,size_t new_len)1795 static inline void perf_event_text_poke(const void *addr,
1796 					const void *old_bytes,
1797 					size_t old_len,
1798 					const void *new_bytes,
1799 					size_t new_len)			{ }
perf_event_init(void)1800 static inline void perf_event_init(void)				{ }
perf_swevent_get_recursion_context(void)1801 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
perf_swevent_put_recursion_context(int rctx)1802 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
perf_swevent_set_period(struct perf_event * event)1803 static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
perf_event_enable(struct perf_event * event)1804 static inline void perf_event_enable(struct perf_event *event)		{ }
perf_event_disable(struct perf_event * event)1805 static inline void perf_event_disable(struct perf_event *event)		{ }
__perf_event_disable(void * info)1806 static inline int __perf_event_disable(void *info)			{ return -1; }
perf_event_task_tick(void)1807 static inline void perf_event_task_tick(void)				{ }
perf_event_release_kernel(struct perf_event * event)1808 static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
perf_event_period(struct perf_event * event,u64 value)1809 static inline int perf_event_period(struct perf_event *event, u64 value)
1810 {
1811 	return -EINVAL;
1812 }
perf_event_pause(struct perf_event * event,bool reset)1813 static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1814 {
1815 	return 0;
1816 }
1817 #endif
1818 
1819 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1820 extern void perf_restore_debug_store(void);
1821 #else
perf_restore_debug_store(void)1822 static inline void perf_restore_debug_store(void)			{ }
1823 #endif
1824 
1825 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1826 
1827 struct perf_pmu_events_attr {
1828 	struct device_attribute attr;
1829 	u64 id;
1830 	const char *event_str;
1831 };
1832 
1833 struct perf_pmu_events_ht_attr {
1834 	struct device_attribute			attr;
1835 	u64					id;
1836 	const char				*event_str_ht;
1837 	const char				*event_str_noht;
1838 };
1839 
1840 struct perf_pmu_events_hybrid_attr {
1841 	struct device_attribute			attr;
1842 	u64					id;
1843 	const char				*event_str;
1844 	u64					pmu_type;
1845 };
1846 
1847 struct perf_pmu_format_hybrid_attr {
1848 	struct device_attribute			attr;
1849 	u64					pmu_type;
1850 };
1851 
1852 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1853 			      char *page);
1854 
1855 #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
1856 static struct perf_pmu_events_attr _var = {				\
1857 	.attr = __ATTR(_name, 0444, _show, NULL),			\
1858 	.id   =  _id,							\
1859 };
1860 
1861 #define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
1862 static struct perf_pmu_events_attr _var = {				    \
1863 	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1864 	.id		= 0,						    \
1865 	.event_str	= _str,						    \
1866 };
1867 
1868 #define PMU_EVENT_ATTR_ID(_name, _show, _id)				\
1869 	(&((struct perf_pmu_events_attr[]) {				\
1870 		{ .attr = __ATTR(_name, 0444, _show, NULL),		\
1871 		  .id = _id, }						\
1872 	})[0].attr.attr)
1873 
1874 #define PMU_FORMAT_ATTR_SHOW(_name, _format)				\
1875 static ssize_t								\
1876 _name##_show(struct device *dev,					\
1877 			       struct device_attribute *attr,		\
1878 			       char *page)				\
1879 {									\
1880 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
1881 	return sprintf(page, _format "\n");				\
1882 }									\
1883 
1884 #define PMU_FORMAT_ATTR(_name, _format)					\
1885 	PMU_FORMAT_ATTR_SHOW(_name, _format)				\
1886 									\
1887 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1888 
1889 /* Performance counter hotplug functions */
1890 #ifdef CONFIG_PERF_EVENTS
1891 int perf_event_init_cpu(unsigned int cpu);
1892 int perf_event_exit_cpu(unsigned int cpu);
1893 #else
1894 #define perf_event_init_cpu	NULL
1895 #define perf_event_exit_cpu	NULL
1896 #endif
1897 
1898 extern void arch_perf_update_userpage(struct perf_event *event,
1899 				      struct perf_event_mmap_page *userpg,
1900 				      u64 now);
1901 
1902 /*
1903  * Snapshot branch stack on software events.
1904  *
1905  * Branch stack can be very useful in understanding software events. For
1906  * example, when a long function, e.g. sys_perf_event_open, returns an
1907  * errno, it is not obvious why the function failed. Branch stack could
1908  * provide very helpful information in this type of scenarios.
1909  *
1910  * On software event, it is necessary to stop the hardware branch recorder
1911  * fast. Otherwise, the hardware register/buffer will be flushed with
1912  * entries of the triggering event. Therefore, static call is used to
1913  * stop the hardware recorder.
1914  */
1915 
1916 /*
1917  * cnt is the number of entries allocated for entries.
1918  * Return number of entries copied to .
1919  */
1920 typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
1921 					   unsigned int cnt);
1922 DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
1923 
1924 #ifndef PERF_NEEDS_LOPWR_CB
perf_lopwr_cb(bool mode)1925 static inline void perf_lopwr_cb(bool mode)
1926 {
1927 }
1928 #endif
1929 
1930 #endif /* _LINUX_PERF_EVENT_H */
1931