xref: /linux/tools/perf/util/lock-contention.h (revision 4fd06bd2)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef PERF_LOCK_CONTENTION_H
3 #define PERF_LOCK_CONTENTION_H
4 
5 #include <linux/list.h>
6 #include <linux/rbtree.h>
7 
8 struct lock_filter {
9 	int			nr_types;
10 	int			nr_addrs;
11 	int			nr_syms;
12 	int			nr_cgrps;
13 	unsigned int		*types;
14 	unsigned long		*addrs;
15 	char			**syms;
16 	u64			*cgrps;
17 };
18 
19 struct lock_stat {
20 	struct hlist_node	hash_entry;
21 	struct rb_node		rb;		/* used for sorting */
22 
23 	u64			addr;		/* address of lockdep_map, used as ID */
24 	char			*name;		/* for strcpy(), we cannot use const */
25 	u64			*callstack;
26 
27 	unsigned int		nr_acquire;
28 	unsigned int		nr_acquired;
29 	unsigned int		nr_contended;
30 	unsigned int		nr_release;
31 
32 	union {
33 		unsigned int	nr_readlock;
34 		unsigned int	flags;
35 	};
36 	unsigned int		nr_trylock;
37 
38 	/* these times are in nano sec. */
39 	u64                     avg_wait_time;
40 	u64			wait_time_total;
41 	u64			wait_time_min;
42 	u64			wait_time_max;
43 
44 	int			broken; /* flag of blacklist */
45 	int			combined;
46 };
47 
48 /*
49  * States of lock_seq_stat
50  *
51  * UNINITIALIZED is required for detecting first event of acquire.
52  * As the nature of lock events, there is no guarantee
53  * that the first event for the locks are acquire,
54  * it can be acquired, contended or release.
55  */
56 #define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
57 #define SEQ_STATE_RELEASED	1
58 #define SEQ_STATE_ACQUIRING	2
59 #define SEQ_STATE_ACQUIRED	3
60 #define SEQ_STATE_READ_ACQUIRED	4
61 #define SEQ_STATE_CONTENDED	5
62 
63 /*
64  * MAX_LOCK_DEPTH
65  * Imported from include/linux/sched.h.
66  * Should this be synchronized?
67  */
68 #define MAX_LOCK_DEPTH 48
69 
70 struct lock_stat *lock_stat_find(u64 addr);
71 struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
72 
73 bool match_callstack_filter(struct machine *machine, u64 *callstack);
74 
75 /*
76  * struct lock_seq_stat:
77  * Place to put on state of one lock sequence
78  * 1) acquire -> acquired -> release
79  * 2) acquire -> contended -> acquired -> release
80  * 3) acquire (with read or try) -> release
81  * 4) Are there other patterns?
82  */
83 struct lock_seq_stat {
84 	struct list_head        list;
85 	int			state;
86 	u64			prev_event_time;
87 	u64                     addr;
88 
89 	int                     read_count;
90 };
91 
92 struct thread_stat {
93 	struct rb_node		rb;
94 
95 	u32                     tid;
96 	struct list_head        seq_list;
97 };
98 
99 /*
100  * CONTENTION_STACK_DEPTH
101  * Number of stack trace entries to find callers
102  */
103 #define CONTENTION_STACK_DEPTH  8
104 
105 /*
106  * CONTENTION_STACK_SKIP
107  * Number of stack trace entries to skip when finding callers.
108  * The first few entries belong to the locking implementation itself.
109  */
110 #define CONTENTION_STACK_SKIP  4
111 
112 /*
113  * flags for lock:contention_begin
114  * Imported from include/trace/events/lock.h.
115  */
116 #define LCB_F_SPIN	(1U << 0)
117 #define LCB_F_READ	(1U << 1)
118 #define LCB_F_WRITE	(1U << 2)
119 #define LCB_F_RT	(1U << 3)
120 #define LCB_F_PERCPU	(1U << 4)
121 #define LCB_F_MUTEX	(1U << 5)
122 
123 struct evlist;
124 struct machine;
125 struct target;
126 
127 struct lock_contention_fails {
128 	int task;
129 	int stack;
130 	int time;
131 	int data;
132 };
133 
134 struct lock_contention {
135 	struct evlist *evlist;
136 	struct target *target;
137 	struct machine *machine;
138 	struct hlist_head *result;
139 	struct lock_filter *filters;
140 	struct lock_contention_fails fails;
141 	struct rb_root cgroups;
142 	unsigned long map_nr_entries;
143 	int max_stack;
144 	int stack_skip;
145 	int aggr_mode;
146 	int owner;
147 	int nr_filtered;
148 	bool save_callstack;
149 };
150 
151 #ifdef HAVE_BPF_SKEL
152 
153 int lock_contention_prepare(struct lock_contention *con);
154 int lock_contention_start(void);
155 int lock_contention_stop(void);
156 int lock_contention_read(struct lock_contention *con);
157 int lock_contention_finish(struct lock_contention *con);
158 
159 #else  /* !HAVE_BPF_SKEL */
160 
lock_contention_prepare(struct lock_contention * con __maybe_unused)161 static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
162 {
163 	return 0;
164 }
165 
lock_contention_start(void)166 static inline int lock_contention_start(void) { return 0; }
lock_contention_stop(void)167 static inline int lock_contention_stop(void) { return 0; }
lock_contention_finish(struct lock_contention * con __maybe_unused)168 static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
169 {
170 	return 0;
171 }
172 
lock_contention_read(struct lock_contention * con __maybe_unused)173 static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
174 {
175 	return 0;
176 }
177 
178 #endif  /* HAVE_BPF_SKEL */
179 
180 #endif  /* PERF_LOCK_CONTENTION_H */
181