xref: /linux/tools/perf/util/auxtrace.h (revision ee73fe99)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * auxtrace.h: AUX area trace support
4  * Copyright (c) 2013-2015, Intel Corporation.
5  */
6 
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9 
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <stdio.h> // FILE
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/types.h>
18 #include <perf/cpumap.h>
19 #include <asm/bitsperlong.h>
20 #include <asm/barrier.h>
21 
22 union perf_event;
23 struct perf_session;
24 struct evlist;
25 struct evsel;
26 struct perf_tool;
27 struct mmap;
28 struct perf_sample;
29 struct option;
30 struct record_opts;
31 struct perf_record_auxtrace_error;
32 struct perf_record_auxtrace_info;
33 struct events_stats;
34 struct perf_pmu;
35 
36 enum auxtrace_error_type {
37        PERF_AUXTRACE_ERROR_ITRACE  = 1,
38        PERF_AUXTRACE_ERROR_MAX
39 };
40 
41 /* Auxtrace records must have the same alignment as perf event records */
42 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
43 
44 enum auxtrace_type {
45 	PERF_AUXTRACE_UNKNOWN,
46 	PERF_AUXTRACE_INTEL_PT,
47 	PERF_AUXTRACE_INTEL_BTS,
48 	PERF_AUXTRACE_CS_ETM,
49 	PERF_AUXTRACE_ARM_SPE,
50 	PERF_AUXTRACE_S390_CPUMSF,
51 	PERF_AUXTRACE_HISI_PTT,
52 };
53 
54 enum itrace_period_type {
55 	PERF_ITRACE_PERIOD_INSTRUCTIONS,
56 	PERF_ITRACE_PERIOD_TICKS,
57 	PERF_ITRACE_PERIOD_NANOSECS,
58 };
59 
60 #define AUXTRACE_ERR_FLG_OVERFLOW	(1 << ('o' - 'a'))
61 #define AUXTRACE_ERR_FLG_DATA_LOST	(1 << ('l' - 'a'))
62 
63 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS	(1 << ('a' - 'a'))
64 #define AUXTRACE_LOG_FLG_ON_ERROR	(1 << ('e' - 'a'))
65 #define AUXTRACE_LOG_FLG_USE_STDOUT	(1 << ('o' - 'a'))
66 
67 /**
68  * struct itrace_synth_opts - AUX area tracing synthesis options.
69  * @set: indicates whether or not options have been set
70  * @default_no_sample: Default to no sampling.
71  * @inject: indicates the event (not just the sample) must be fully synthesized
72  *          because 'perf inject' will write it out
73  * @instructions: whether to synthesize 'instructions' events
74  * @cycles: whether to synthesize 'cycles' events
75  *          (not fully accurate, since CYC packets are only emitted
76  *          together with other events, such as branches)
77  * @branches: whether to synthesize 'branches' events
78  *            (branch misses only for Arm SPE)
79  * @transactions: whether to synthesize events for transactions
80  * @ptwrites: whether to synthesize events for ptwrites
81  * @pwr_events: whether to synthesize power events
82  * @other_events: whether to synthesize other events recorded due to the use of
83  *                aux_output
84  * @intr_events: whether to synthesize interrupt events
85  * @errors: whether to synthesize decoder error events
86  * @dont_decode: whether to skip decoding entirely
87  * @log: write a decoding log
88  * @calls: limit branch samples to calls (can be combined with @returns)
89  * @returns: limit branch samples to returns (can be combined with @calls)
90  * @callchain: add callchain to 'instructions' events
91  * @add_callchain: add callchain to existing event records
92  * @thread_stack: feed branches to the thread_stack
93  * @last_branch: add branch context to 'instruction' events
94  * @add_last_branch: add branch context to existing event records
95  * @approx_ipc: approximate IPC
96  * @flc: whether to synthesize first level cache events
97  * @llc: whether to synthesize last level cache events
98  * @tlb: whether to synthesize TLB events
99  * @remote_access: whether to synthesize remote access events
100  * @mem: whether to synthesize memory events
101  * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
102  * @use_timestamp: use the timestamp trace as kernel time
103  * @vm_time_correlation: perform VM Time Correlation
104  * @vm_tm_corr_dry_run: VM Time Correlation dry-run
105  * @vm_tm_corr_args:  VM Time Correlation implementation-specific arguments
106  * @callchain_sz: maximum callchain size
107  * @last_branch_sz: branch context size
108  * @period: 'instructions' events period
109  * @period_type: 'instructions' events period type
110  * @initial_skip: skip N events at the beginning.
111  * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
112  * @ptime_range: time intervals to trace or NULL
113  * @range_num: number of time intervals to trace
114  * @error_plus_flags: flags to affect what errors are reported
115  * @error_minus_flags: flags to affect what errors are reported
116  * @log_plus_flags: flags to affect what is logged
117  * @log_minus_flags: flags to affect what is logged
118  * @quick: quicker (less detailed) decoding
119  * @log_on_error_size: size of log to keep for outputting log only on errors
120  */
121 struct itrace_synth_opts {
122 	bool			set;
123 	bool			default_no_sample;
124 	bool			inject;
125 	bool			instructions;
126 	bool			cycles;
127 	bool			branches;
128 	bool			transactions;
129 	bool			ptwrites;
130 	bool			pwr_events;
131 	bool			other_events;
132 	bool			intr_events;
133 	bool			errors;
134 	bool			dont_decode;
135 	bool			log;
136 	bool			calls;
137 	bool			returns;
138 	bool			callchain;
139 	bool			add_callchain;
140 	bool			thread_stack;
141 	bool			last_branch;
142 	bool			add_last_branch;
143 	bool			approx_ipc;
144 	bool			flc;
145 	bool			llc;
146 	bool			tlb;
147 	bool			remote_access;
148 	bool			mem;
149 	bool			timeless_decoding;
150 	bool			use_timestamp;
151 	bool			vm_time_correlation;
152 	bool			vm_tm_corr_dry_run;
153 	char			*vm_tm_corr_args;
154 	unsigned int		callchain_sz;
155 	unsigned int		last_branch_sz;
156 	unsigned long long	period;
157 	enum itrace_period_type	period_type;
158 	unsigned long		initial_skip;
159 	unsigned long		*cpu_bitmap;
160 	struct perf_time_interval *ptime_range;
161 	int			range_num;
162 	unsigned int		error_plus_flags;
163 	unsigned int		error_minus_flags;
164 	unsigned int		log_plus_flags;
165 	unsigned int		log_minus_flags;
166 	unsigned int		quick;
167 	unsigned int		log_on_error_size;
168 };
169 
170 /**
171  * struct auxtrace_index_entry - indexes a AUX area tracing event within a
172  *                               perf.data file.
173  * @file_offset: offset within the perf.data file
174  * @sz: size of the event
175  */
176 struct auxtrace_index_entry {
177 	u64			file_offset;
178 	u64			sz;
179 };
180 
181 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
182 
183 /**
184  * struct auxtrace_index - index of AUX area tracing events within a perf.data
185  *                         file.
186  * @list: linking a number of arrays of entries
187  * @nr: number of entries
188  * @entries: array of entries
189  */
190 struct auxtrace_index {
191 	struct list_head	list;
192 	size_t			nr;
193 	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
194 };
195 
196 /**
197  * struct auxtrace - session callbacks to allow AUX area data decoding.
198  * @process_event: lets the decoder see all session events
199  * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
200  * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
201  *              processing
202  * @dump_auxtrace_sample: dump AUX area sample data
203  * @flush_events: process any remaining data
204  * @free_events: free resources associated with event processing
205  * @free: free resources associated with the session
206  */
207 struct auxtrace {
208 	int (*process_event)(struct perf_session *session,
209 			     union perf_event *event,
210 			     struct perf_sample *sample,
211 			     struct perf_tool *tool);
212 	int (*process_auxtrace_event)(struct perf_session *session,
213 				      union perf_event *event,
214 				      struct perf_tool *tool);
215 	int (*queue_data)(struct perf_session *session,
216 			  struct perf_sample *sample, union perf_event *event,
217 			  u64 data_offset);
218 	void (*dump_auxtrace_sample)(struct perf_session *session,
219 				     struct perf_sample *sample);
220 	int (*flush_events)(struct perf_session *session,
221 			    struct perf_tool *tool);
222 	void (*free_events)(struct perf_session *session);
223 	void (*free)(struct perf_session *session);
224 	bool (*evsel_is_auxtrace)(struct perf_session *session,
225 				  struct evsel *evsel);
226 };
227 
228 /**
229  * struct auxtrace_buffer - a buffer containing AUX area tracing data.
230  * @list: buffers are queued in a list held by struct auxtrace_queue
231  * @size: size of the buffer in bytes
232  * @pid: in per-thread mode, the pid this buffer is associated with
233  * @tid: in per-thread mode, the tid this buffer is associated with
234  * @cpu: in per-cpu mode, the cpu this buffer is associated with
235  * @data: actual buffer data (can be null if the data has not been loaded)
236  * @data_offset: file offset at which the buffer can be read
237  * @mmap_addr: mmap address at which the buffer can be read
238  * @mmap_size: size of the mmap at @mmap_addr
239  * @data_needs_freeing: @data was malloc'd so free it when it is no longer
240  *                      needed
241  * @consecutive: the original data was split up and this buffer is consecutive
242  *               to the previous buffer
243  * @offset: offset as determined by aux_head / aux_tail members of struct
244  *          perf_event_mmap_page
245  * @reference: an implementation-specific reference determined when the data is
246  *             recorded
247  * @buffer_nr: used to number each buffer
248  * @use_size: implementation actually only uses this number of bytes
249  * @use_data: implementation actually only uses data starting at this address
250  */
251 struct auxtrace_buffer {
252 	struct list_head	list;
253 	size_t			size;
254 	pid_t			pid;
255 	pid_t			tid;
256 	struct perf_cpu		cpu;
257 	void			*data;
258 	off_t			data_offset;
259 	void			*mmap_addr;
260 	size_t			mmap_size;
261 	bool			data_needs_freeing;
262 	bool			consecutive;
263 	u64			offset;
264 	u64			reference;
265 	u64			buffer_nr;
266 	size_t			use_size;
267 	void			*use_data;
268 };
269 
270 /**
271  * struct auxtrace_queue - a queue of AUX area tracing data buffers.
272  * @head: head of buffer list
273  * @tid: in per-thread mode, the tid this queue is associated with
274  * @cpu: in per-cpu mode, the cpu this queue is associated with
275  * @set: %true once this queue has been dedicated to a specific thread or cpu
276  * @priv: implementation-specific data
277  */
278 struct auxtrace_queue {
279 	struct list_head	head;
280 	pid_t			tid;
281 	int			cpu;
282 	bool			set;
283 	void			*priv;
284 };
285 
286 /**
287  * struct auxtrace_queues - an array of AUX area tracing queues.
288  * @queue_array: array of queues
289  * @nr_queues: number of queues
290  * @new_data: set whenever new data is queued
291  * @populated: queues have been fully populated using the auxtrace_index
292  * @next_buffer_nr: used to number each buffer
293  */
294 struct auxtrace_queues {
295 	struct auxtrace_queue	*queue_array;
296 	unsigned int		nr_queues;
297 	bool			new_data;
298 	bool			populated;
299 	u64			next_buffer_nr;
300 };
301 
302 /**
303  * struct auxtrace_heap_item - element of struct auxtrace_heap.
304  * @queue_nr: queue number
305  * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
306  *           to be a timestamp
307  */
308 struct auxtrace_heap_item {
309 	unsigned int		queue_nr;
310 	u64			ordinal;
311 };
312 
313 /**
314  * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
315  * @heap_array: the heap
316  * @heap_cnt: the number of elements in the heap
317  * @heap_sz: maximum number of elements (grows as needed)
318  */
319 struct auxtrace_heap {
320 	struct auxtrace_heap_item	*heap_array;
321 	unsigned int		heap_cnt;
322 	unsigned int		heap_sz;
323 };
324 
325 /**
326  * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
327  * @base: address of mapped area
328  * @userpg: pointer to buffer's perf_event_mmap_page
329  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
330  * @len: size of mapped area
331  * @prev: previous aux_head
332  * @idx: index of this mmap
333  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
334  *       mmap) otherwise %0
335  * @cpu: cpu number for a per-cpu mmap otherwise %-1
336  */
337 struct auxtrace_mmap {
338 	void		*base;
339 	void		*userpg;
340 	size_t		mask;
341 	size_t		len;
342 	u64		prev;
343 	int		idx;
344 	pid_t		tid;
345 	int		cpu;
346 };
347 
348 /**
349  * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
350  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
351  * @offset: file offset of mapped area
352  * @len: size of mapped area
353  * @prot: mmap memory protection
354  * @idx: index of this mmap
355  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
356  *       mmap) otherwise %0
357  * @mmap_needed: set to %false for non-auxtrace events. This is needed because
358  *               auxtrace mmapping is done in the same code path as non-auxtrace
359  *               mmapping but not every evsel that needs non-auxtrace mmapping
360  *               also needs auxtrace mmapping.
361  * @cpu: cpu number for a per-cpu mmap otherwise %-1
362  */
363 struct auxtrace_mmap_params {
364 	size_t		mask;
365 	off_t		offset;
366 	size_t		len;
367 	int		prot;
368 	int		idx;
369 	pid_t		tid;
370 	bool		mmap_needed;
371 	struct perf_cpu	cpu;
372 };
373 
374 /**
375  * struct auxtrace_record - callbacks for recording AUX area data.
376  * @recording_options: validate and process recording options
377  * @info_priv_size: return the size of the private data in auxtrace_info_event
378  * @info_fill: fill-in the private data in auxtrace_info_event
379  * @free: free this auxtrace record structure
380  * @snapshot_start: starting a snapshot
381  * @snapshot_finish: finishing a snapshot
382  * @find_snapshot: find data to snapshot within auxtrace mmap
383  * @parse_snapshot_options: parse snapshot options
384  * @reference: provide a 64-bit reference number for auxtrace_event
385  * @read_finish: called after reading from an auxtrace mmap
386  * @alignment: alignment (if any) for AUX area data
387  * @default_aux_sample_size: default sample size for --aux sample option
388  * @pmu: associated pmu
389  * @evlist: selected events list
390  */
391 struct auxtrace_record {
392 	int (*recording_options)(struct auxtrace_record *itr,
393 				 struct evlist *evlist,
394 				 struct record_opts *opts);
395 	size_t (*info_priv_size)(struct auxtrace_record *itr,
396 				 struct evlist *evlist);
397 	int (*info_fill)(struct auxtrace_record *itr,
398 			 struct perf_session *session,
399 			 struct perf_record_auxtrace_info *auxtrace_info,
400 			 size_t priv_size);
401 	void (*free)(struct auxtrace_record *itr);
402 	int (*snapshot_start)(struct auxtrace_record *itr);
403 	int (*snapshot_finish)(struct auxtrace_record *itr);
404 	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
405 			     struct auxtrace_mmap *mm, unsigned char *data,
406 			     u64 *head, u64 *old);
407 	int (*parse_snapshot_options)(struct auxtrace_record *itr,
408 				      struct record_opts *opts,
409 				      const char *str);
410 	u64 (*reference)(struct auxtrace_record *itr);
411 	int (*read_finish)(struct auxtrace_record *itr, int idx);
412 	unsigned int alignment;
413 	unsigned int default_aux_sample_size;
414 	struct perf_pmu *pmu;
415 	struct evlist *evlist;
416 };
417 
418 /**
419  * struct addr_filter - address filter.
420  * @list: list node
421  * @range: true if it is a range filter
422  * @start: true if action is 'filter' or 'start'
423  * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
424  *          to 'stop')
425  * @sym_from: symbol name for the filter address
426  * @sym_to: symbol name that determines the filter size
427  * @sym_from_idx: selects n'th from symbols with the same name (0 means global
428  *                and less than 0 means symbol must be unique)
429  * @sym_to_idx: same as @sym_from_idx but for @sym_to
430  * @addr: filter address
431  * @size: filter region size (for range filters)
432  * @filename: DSO file name or NULL for the kernel
433  * @str: allocated string that contains the other string members
434  */
435 struct addr_filter {
436 	struct list_head	list;
437 	bool			range;
438 	bool			start;
439 	const char		*action;
440 	const char		*sym_from;
441 	const char		*sym_to;
442 	int			sym_from_idx;
443 	int			sym_to_idx;
444 	u64			addr;
445 	u64			size;
446 	const char		*filename;
447 	char			*str;
448 };
449 
450 /**
451  * struct addr_filters - list of address filters.
452  * @head: list of address filters
453  * @cnt: number of address filters
454  */
455 struct addr_filters {
456 	struct list_head	head;
457 	int			cnt;
458 };
459 
460 struct auxtrace_cache;
461 
462 #ifdef HAVE_AUXTRACE_SUPPORT
463 
464 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm);
465 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail);
466 
auxtrace_mmap__read_head(struct auxtrace_mmap * mm,int kernel_is_64_bit __maybe_unused)467 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm,
468 					   int kernel_is_64_bit __maybe_unused)
469 {
470 	struct perf_event_mmap_page *pc = mm->userpg;
471 	u64 head;
472 
473 #if BITS_PER_LONG == 32
474 	if (kernel_is_64_bit)
475 		return compat_auxtrace_mmap__read_head(mm);
476 #endif
477 	head = READ_ONCE(pc->aux_head);
478 
479 	/* Ensure all reads are done after we read the head */
480 	smp_rmb();
481 	return head;
482 }
483 
auxtrace_mmap__write_tail(struct auxtrace_mmap * mm,u64 tail,int kernel_is_64_bit __maybe_unused)484 static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail,
485 					    int kernel_is_64_bit __maybe_unused)
486 {
487 	struct perf_event_mmap_page *pc = mm->userpg;
488 
489 #if BITS_PER_LONG == 32
490 	if (kernel_is_64_bit)
491 		return compat_auxtrace_mmap__write_tail(mm, tail);
492 #endif
493 	/* Ensure all reads are done before we write the tail out */
494 	smp_mb();
495 	WRITE_ONCE(pc->aux_tail, tail);
496 	return 0;
497 }
498 
499 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
500 			struct auxtrace_mmap_params *mp,
501 			void *userpg, int fd);
502 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
503 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
504 				off_t auxtrace_offset,
505 				unsigned int auxtrace_pages,
506 				bool auxtrace_overwrite);
507 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
508 				   struct evlist *evlist,
509 				   struct evsel *evsel, int idx);
510 
511 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
512 				  struct mmap *map,
513 				  union perf_event *event, void *data1,
514 				  size_t len1, void *data2, size_t len2);
515 
516 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
517 			struct perf_tool *tool, process_auxtrace_t fn);
518 
519 int auxtrace_mmap__read_snapshot(struct mmap *map,
520 				 struct auxtrace_record *itr,
521 				 struct perf_tool *tool, process_auxtrace_t fn,
522 				 size_t snapshot_size);
523 
524 int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues);
525 int auxtrace_queues__init(struct auxtrace_queues *queues);
526 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
527 			       struct perf_session *session,
528 			       union perf_event *event, off_t data_offset,
529 			       struct auxtrace_buffer **buffer_ptr);
530 struct auxtrace_queue *
531 auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
532 			      struct perf_sample *sample,
533 			      struct perf_session *session);
534 int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
535 				struct perf_session *session,
536 				struct perf_sample *sample, u64 data_offset,
537 				u64 reference);
538 void auxtrace_queues__free(struct auxtrace_queues *queues);
539 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
540 				   struct perf_session *session);
541 int auxtrace_queue_data(struct perf_session *session, bool samples,
542 			bool events);
543 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
544 					      struct auxtrace_buffer *buffer);
545 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
auxtrace_buffer__get_data(struct auxtrace_buffer * buffer,int fd)546 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
547 {
548 	return auxtrace_buffer__get_data_rw(buffer, fd, false);
549 }
550 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
551 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
552 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
553 
554 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
555 		       u64 ordinal);
556 void auxtrace_heap__pop(struct auxtrace_heap *heap);
557 void auxtrace_heap__free(struct auxtrace_heap *heap);
558 
559 struct auxtrace_cache_entry {
560 	struct hlist_node hash;
561 	u32 key;
562 };
563 
564 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
565 					   unsigned int limit_percent);
566 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
567 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
568 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
569 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
570 			struct auxtrace_cache_entry *entry);
571 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
572 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
573 
574 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
575 					      int *err);
576 
577 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
578 				    struct record_opts *opts,
579 				    const char *str);
580 int auxtrace_parse_sample_options(struct auxtrace_record *itr,
581 				  struct evlist *evlist,
582 				  struct record_opts *opts, const char *str);
583 void auxtrace_regroup_aux_output(struct evlist *evlist);
584 int auxtrace_record__options(struct auxtrace_record *itr,
585 			     struct evlist *evlist,
586 			     struct record_opts *opts);
587 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
588 				       struct evlist *evlist);
589 int auxtrace_record__info_fill(struct auxtrace_record *itr,
590 			       struct perf_session *session,
591 			       struct perf_record_auxtrace_info *auxtrace_info,
592 			       size_t priv_size);
593 void auxtrace_record__free(struct auxtrace_record *itr);
594 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
595 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
596 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
597 				   struct auxtrace_mmap *mm,
598 				   unsigned char *data, u64 *head, u64 *old);
599 u64 auxtrace_record__reference(struct auxtrace_record *itr);
600 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
601 
602 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
603 				   off_t file_offset);
604 int auxtrace_index__write(int fd, struct list_head *head);
605 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
606 			    bool needs_swap);
607 void auxtrace_index__free(struct list_head *head);
608 
609 void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
610 				int code, int cpu, pid_t pid, pid_t tid, u64 ip,
611 				const char *msg, u64 timestamp,
612 				pid_t machine_pid, int vcpu);
613 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
614 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
615 			  const char *msg, u64 timestamp);
616 
617 int perf_event__process_auxtrace_info(struct perf_session *session,
618 				      union perf_event *event);
619 s64 perf_event__process_auxtrace(struct perf_session *session,
620 				 union perf_event *event);
621 int perf_event__process_auxtrace_error(struct perf_session *session,
622 				       union perf_event *event);
623 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
624 			       const char *str, int unset);
625 int itrace_parse_synth_opts(const struct option *opt, const char *str,
626 			    int unset);
627 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
628 				    bool no_sample);
629 
630 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
631 void perf_session__auxtrace_error_inc(struct perf_session *session,
632 				      union perf_event *event);
633 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
634 
635 void addr_filters__init(struct addr_filters *filts);
636 void addr_filters__exit(struct addr_filters *filts);
637 int addr_filters__parse_bare_filter(struct addr_filters *filts,
638 				    const char *filter);
639 int auxtrace_parse_filters(struct evlist *evlist);
640 
641 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
642 			    struct perf_sample *sample, struct perf_tool *tool);
643 void auxtrace__dump_auxtrace_sample(struct perf_session *session,
644 				    struct perf_sample *sample);
645 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
646 void auxtrace__free_events(struct perf_session *session);
647 void auxtrace__free(struct perf_session *session);
648 bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
649 				 struct evsel *evsel);
650 
651 #define ITRACE_HELP \
652 "				i[period]:    		synthesize instructions events\n" \
653 "				y[period]:    		synthesize cycles events (same period as i)\n" \
654 "				b:	    		synthesize branches events (branch misses for Arm SPE)\n" \
655 "				c:	    		synthesize branches events (calls only)\n"	\
656 "				r:	    		synthesize branches events (returns only)\n" \
657 "				x:	    		synthesize transactions events\n"		\
658 "				w:	    		synthesize ptwrite events\n"		\
659 "				p:	    		synthesize power events\n"			\
660 "				o:			synthesize other events recorded due to the use\n" \
661 "							of aux-output (refer to perf record)\n"	\
662 "				I:			synthesize interrupt or similar (asynchronous) events\n" \
663 "							(e.g. Intel PT Event Trace)\n" \
664 "				e[flags]:		synthesize error events\n" \
665 "							each flag must be preceded by + or -\n" \
666 "							error flags are: o (overflow)\n" \
667 "									 l (data lost)\n" \
668 "				d[flags]:		create a debug log\n" \
669 "							each flag must be preceded by + or -\n" \
670 "							log flags are: a (all perf events)\n" \
671 "							               o (output to stdout)\n" \
672 "				f:	    		synthesize first level cache events\n" \
673 "				m:	    		synthesize last level cache events\n" \
674 "				t:	    		synthesize TLB events\n" \
675 "				a:	    		synthesize remote access events\n" \
676 "				g[len]:     		synthesize a call chain (use with i or x)\n" \
677 "				G[len]:			synthesize a call chain on existing event records\n" \
678 "				l[len]:     		synthesize last branch entries (use with i or x)\n" \
679 "				L[len]:			synthesize last branch entries on existing event records\n" \
680 "				sNUMBER:    		skip initial number of events\n"		\
681 "				q:			quicker (less detailed) decoding\n" \
682 "				A:			approximate IPC\n" \
683 "				Z:			prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \
684 "				T:			use the timestamp trace as kernel time\n" \
685 "				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
686 "				concatenate multiple options. Default is iybxwpe or cewp\n"
687 
688 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts,struct perf_time_interval * ptime_range,int range_num)689 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
690 				       struct perf_time_interval *ptime_range,
691 				       int range_num)
692 {
693 	opts->ptime_range = ptime_range;
694 	opts->range_num = range_num;
695 }
696 
697 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts)698 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
699 {
700 	opts->ptime_range = NULL;
701 	opts->range_num = 0;
702 }
703 
704 #else
705 #include "debug.h"
706 
707 static inline struct auxtrace_record *
auxtrace_record__init(struct evlist * evlist __maybe_unused,int * err)708 auxtrace_record__init(struct evlist *evlist __maybe_unused,
709 		      int *err)
710 {
711 	*err = 0;
712 	return NULL;
713 }
714 
715 static inline
auxtrace_record__free(struct auxtrace_record * itr __maybe_unused)716 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
717 {
718 }
719 
720 static inline
auxtrace_record__options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused)721 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
722 			     struct evlist *evlist __maybe_unused,
723 			     struct record_opts *opts __maybe_unused)
724 {
725 	return 0;
726 }
727 
728 static inline
perf_event__process_auxtrace_info(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)729 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
730 				      union perf_event *event __maybe_unused)
731 {
732 	return 0;
733 }
734 
735 static inline
perf_event__process_auxtrace(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)736 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
737 				 union perf_event *event __maybe_unused)
738 {
739 	return 0;
740 }
741 
742 static inline
perf_event__process_auxtrace_error(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)743 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
744 				       union perf_event *event __maybe_unused)
745 {
746 	return 0;
747 }
748 
749 static inline
perf_session__auxtrace_error_inc(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused)750 void perf_session__auxtrace_error_inc(struct perf_session *session
751 				      __maybe_unused,
752 				      union perf_event *event
753 				      __maybe_unused)
754 {
755 }
756 
757 static inline
events_stats__auxtrace_error_warn(const struct events_stats * stats __maybe_unused)758 void events_stats__auxtrace_error_warn(const struct events_stats *stats
759 				       __maybe_unused)
760 {
761 }
762 
763 static inline
itrace_do_parse_synth_opts(struct itrace_synth_opts * synth_opts __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)764 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
765 			       const char *str __maybe_unused, int unset __maybe_unused)
766 {
767 	pr_err("AUX area tracing not supported\n");
768 	return -EINVAL;
769 }
770 
771 static inline
itrace_parse_synth_opts(const struct option * opt __maybe_unused,const char * str __maybe_unused,int unset __maybe_unused)772 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
773 			    const char *str __maybe_unused,
774 			    int unset __maybe_unused)
775 {
776 	pr_err("AUX area tracing not supported\n");
777 	return -EINVAL;
778 }
779 
780 static inline
auxtrace_parse_snapshot_options(struct auxtrace_record * itr __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)781 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
782 				    struct record_opts *opts __maybe_unused,
783 				    const char *str)
784 {
785 	if (!str)
786 		return 0;
787 	pr_err("AUX area tracing not supported\n");
788 	return -EINVAL;
789 }
790 
791 static inline
auxtrace_parse_sample_options(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused,struct record_opts * opts __maybe_unused,const char * str)792 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
793 				  struct evlist *evlist __maybe_unused,
794 				  struct record_opts *opts __maybe_unused,
795 				  const char *str)
796 {
797 	if (!str)
798 		return 0;
799 	pr_err("AUX area tracing not supported\n");
800 	return -EINVAL;
801 }
802 
803 static inline
auxtrace_regroup_aux_output(struct evlist * evlist __maybe_unused)804 void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
805 {
806 }
807 
808 static inline
auxtrace__process_event(struct perf_session * session __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct perf_tool * tool __maybe_unused)809 int auxtrace__process_event(struct perf_session *session __maybe_unused,
810 			    union perf_event *event __maybe_unused,
811 			    struct perf_sample *sample __maybe_unused,
812 			    struct perf_tool *tool __maybe_unused)
813 {
814 	return 0;
815 }
816 
817 static inline
auxtrace__dump_auxtrace_sample(struct perf_session * session __maybe_unused,struct perf_sample * sample __maybe_unused)818 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
819 				    struct perf_sample *sample __maybe_unused)
820 {
821 }
822 
823 static inline
auxtrace__flush_events(struct perf_session * session __maybe_unused,struct perf_tool * tool __maybe_unused)824 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
825 			   struct perf_tool *tool __maybe_unused)
826 {
827 	return 0;
828 }
829 
830 static inline
auxtrace__free_events(struct perf_session * session __maybe_unused)831 void auxtrace__free_events(struct perf_session *session __maybe_unused)
832 {
833 }
834 
835 static inline
auxtrace_cache__free(struct auxtrace_cache * auxtrace_cache __maybe_unused)836 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
837 {
838 }
839 
840 static inline
auxtrace__free(struct perf_session * session __maybe_unused)841 void auxtrace__free(struct perf_session *session __maybe_unused)
842 {
843 }
844 
845 static inline
auxtrace_index__write(int fd __maybe_unused,struct list_head * head __maybe_unused)846 int auxtrace_index__write(int fd __maybe_unused,
847 			  struct list_head *head __maybe_unused)
848 {
849 	return -EINVAL;
850 }
851 
852 static inline
auxtrace_index__process(int fd __maybe_unused,u64 size __maybe_unused,struct perf_session * session __maybe_unused,bool needs_swap __maybe_unused)853 int auxtrace_index__process(int fd __maybe_unused,
854 			    u64 size __maybe_unused,
855 			    struct perf_session *session __maybe_unused,
856 			    bool needs_swap __maybe_unused)
857 {
858 	return -EINVAL;
859 }
860 
861 static inline
auxtrace_index__free(struct list_head * head __maybe_unused)862 void auxtrace_index__free(struct list_head *head __maybe_unused)
863 {
864 }
865 
866 static inline
auxtrace__evsel_is_auxtrace(struct perf_session * session __maybe_unused,struct evsel * evsel __maybe_unused)867 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
868 				 struct evsel *evsel __maybe_unused)
869 {
870 	return false;
871 }
872 
873 static inline
auxtrace_parse_filters(struct evlist * evlist __maybe_unused)874 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
875 {
876 	return 0;
877 }
878 
879 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
880 			struct auxtrace_mmap_params *mp,
881 			void *userpg, int fd);
882 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
883 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
884 				off_t auxtrace_offset,
885 				unsigned int auxtrace_pages,
886 				bool auxtrace_overwrite);
887 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
888 				   struct evlist *evlist,
889 				   struct evsel *evsel, int idx);
890 
891 #define ITRACE_HELP ""
892 
893 static inline
itrace_synth_opts__set_time_range(struct itrace_synth_opts * opts __maybe_unused,struct perf_time_interval * ptime_range __maybe_unused,int range_num __maybe_unused)894 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
895 				       __maybe_unused,
896 				       struct perf_time_interval *ptime_range
897 				       __maybe_unused,
898 				       int range_num __maybe_unused)
899 {
900 }
901 
902 static inline
itrace_synth_opts__clear_time_range(struct itrace_synth_opts * opts __maybe_unused)903 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
904 					 __maybe_unused)
905 {
906 }
907 
908 #endif
909 
910 #endif
911