1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_TRACE_RECURSION_H
3 #define _LINUX_TRACE_RECURSION_H
4 
5 #include <linux/interrupt.h>
6 #include <linux/sched.h>
7 
8 #ifdef CONFIG_TRACING
9 
10 /* Only current can touch trace_recursion */
11 
12 /*
13  * For function tracing recursion:
14  *  The order of these bits are important.
15  *
16  *  When function tracing occurs, the following steps are made:
17  *   If arch does not support a ftrace feature:
18  *    call internal function (uses INTERNAL bits) which calls...
19  *   If callback is registered to the "global" list, the list
20  *    function is called and recursion checks the GLOBAL bits.
21  *    then this function calls...
22  *   The function callback, which can use the FTRACE bits to
23  *    check for recursion.
24  *
25  * Now if the arch does not support a feature, and it calls
26  * the global list function which calls the ftrace callback
27  * all three of these steps will do a recursion protection.
28  * There's no reason to do one if the previous caller already
29  * did. The recursion that we are protecting against will
30  * go through the same steps again.
31  *
32  * To prevent the multiple recursion checks, if a recursion
33  * bit is set that is higher than the MAX bit of the current
34  * check, then we know that the check was made by the previous
35  * caller, and we can skip the current check.
36  */
37 enum {
38 	/* Function recursion bits */
39 	TRACE_FTRACE_BIT,
40 	TRACE_FTRACE_NMI_BIT,
41 	TRACE_FTRACE_IRQ_BIT,
42 	TRACE_FTRACE_SIRQ_BIT,
43 
44 	/* INTERNAL_BITs must be greater than FTRACE_BITs */
45 	TRACE_INTERNAL_BIT,
46 	TRACE_INTERNAL_NMI_BIT,
47 	TRACE_INTERNAL_IRQ_BIT,
48 	TRACE_INTERNAL_SIRQ_BIT,
49 
50 	TRACE_BRANCH_BIT,
51 /*
52  * Abuse of the trace_recursion.
53  * As we need a way to maintain state if we are tracing the function
54  * graph in irq because we want to trace a particular function that
55  * was called in irq context but we have irq tracing off. Since this
56  * can only be modified by current, we can reuse trace_recursion.
57  */
58 	TRACE_IRQ_BIT,
59 
60 	/* Set if the function is in the set_graph_function file */
61 	TRACE_GRAPH_BIT,
62 
63 	/*
64 	 * In the very unlikely case that an interrupt came in
65 	 * at a start of graph tracing, and we want to trace
66 	 * the function in that interrupt, the depth can be greater
67 	 * than zero, because of the preempted start of a previous
68 	 * trace. In an even more unlikely case, depth could be 2
69 	 * if a softirq interrupted the start of graph tracing,
70 	 * followed by an interrupt preempting a start of graph
71 	 * tracing in the softirq, and depth can even be 3
72 	 * if an NMI came in at the start of an interrupt function
73 	 * that preempted a softirq start of a function that
74 	 * preempted normal context!!!! Luckily, it can't be
75 	 * greater than 3, so the next two bits are a mask
76 	 * of what the depth is when we set TRACE_GRAPH_BIT
77 	 */
78 
79 	TRACE_GRAPH_DEPTH_START_BIT,
80 	TRACE_GRAPH_DEPTH_END_BIT,
81 
82 	/*
83 	 * To implement set_graph_notrace, if this bit is set, we ignore
84 	 * function graph tracing of called functions, until the return
85 	 * function is called to clear it.
86 	 */
87 	TRACE_GRAPH_NOTRACE_BIT,
88 
89 	/*
90 	 * When transitioning between context, the preempt_count() may
91 	 * not be correct. Allow for a single recursion to cover this case.
92 	 */
93 	TRACE_TRANSITION_BIT,
94 
95 	/* Used to prevent recursion recording from recursing. */
96 	TRACE_RECORD_RECURSION_BIT,
97 };
98 
99 #define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
100 #define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
101 #define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
102 
103 #define trace_recursion_depth() \
104 	(((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
105 #define trace_recursion_set_depth(depth) \
106 	do {								\
107 		current->trace_recursion &=				\
108 			~(3 << TRACE_GRAPH_DEPTH_START_BIT);		\
109 		current->trace_recursion |=				\
110 			((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;	\
111 	} while (0)
112 
113 #define TRACE_CONTEXT_BITS	4
114 
115 #define TRACE_FTRACE_START	TRACE_FTRACE_BIT
116 #define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
117 
118 #define TRACE_LIST_START	TRACE_INTERNAL_BIT
119 #define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
120 
121 #define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
122 
123 /*
124  * Used for setting context
125  *  NMI     = 0
126  *  IRQ     = 1
127  *  SOFTIRQ = 2
128  *  NORMAL  = 3
129  */
130 enum {
131 	TRACE_CTX_NMI,
132 	TRACE_CTX_IRQ,
133 	TRACE_CTX_SOFTIRQ,
134 	TRACE_CTX_NORMAL,
135 };
136 
trace_get_context_bit(void)137 static __always_inline int trace_get_context_bit(void)
138 {
139 	unsigned long pc = preempt_count();
140 
141 	if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
142 		return TRACE_CTX_NORMAL;
143 	else
144 		return pc & NMI_MASK ? TRACE_CTX_NMI :
145 			pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
146 }
147 
148 #ifdef CONFIG_FTRACE_RECORD_RECURSION
149 extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
150 # define do_ftrace_record_recursion(ip, pip)				\
151 	do {								\
152 		if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
153 			trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
154 			ftrace_record_recursion(ip, pip);		\
155 			trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
156 		}							\
157 	} while (0)
158 #else
159 # define do_ftrace_record_recursion(ip, pip)	do { } while (0)
160 #endif
161 
trace_test_and_set_recursion(unsigned long ip,unsigned long pip,int start,int max)162 static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
163 							int start, int max)
164 {
165 	unsigned int val = READ_ONCE(current->trace_recursion);
166 	int bit;
167 
168 	/* A previous recursion check was made */
169 	if ((val & TRACE_CONTEXT_MASK) > max)
170 		return 0;
171 
172 	bit = trace_get_context_bit() + start;
173 	if (unlikely(val & (1 << bit))) {
174 		/*
175 		 * It could be that preempt_count has not been updated during
176 		 * a switch between contexts. Allow for a single recursion.
177 		 */
178 		bit = TRACE_TRANSITION_BIT;
179 		if (val & (1 << bit)) {
180 			do_ftrace_record_recursion(ip, pip);
181 			return -1;
182 		}
183 	} else {
184 		/* Normal check passed, clear the transition to allow it again */
185 		val &= ~(1 << TRACE_TRANSITION_BIT);
186 	}
187 
188 	val |= 1 << bit;
189 	current->trace_recursion = val;
190 	barrier();
191 
192 	return bit + 1;
193 }
194 
trace_clear_recursion(int bit)195 static __always_inline void trace_clear_recursion(int bit)
196 {
197 	if (!bit)
198 		return;
199 
200 	barrier();
201 	bit--;
202 	trace_recursion_clear(bit);
203 }
204 
205 /**
206  * ftrace_test_recursion_trylock - tests for recursion in same context
207  *
208  * Use this for ftrace callbacks. This will detect if the function
209  * tracing recursed in the same context (normal vs interrupt),
210  *
211  * Returns: -1 if a recursion happened.
212  *           >= 0 if no recursion
213  */
ftrace_test_recursion_trylock(unsigned long ip,unsigned long parent_ip)214 static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
215 							 unsigned long parent_ip)
216 {
217 	return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
218 }
219 
220 /**
221  * ftrace_test_recursion_unlock - called when function callback is complete
222  * @bit: The return of a successful ftrace_test_recursion_trylock()
223  *
224  * This is used at the end of a ftrace callback.
225  */
ftrace_test_recursion_unlock(int bit)226 static __always_inline void ftrace_test_recursion_unlock(int bit)
227 {
228 	trace_clear_recursion(bit);
229 }
230 
231 #endif /* CONFIG_TRACING */
232 #endif /* _LINUX_TRACE_RECURSION_H */
233