1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2012 The Chromium OS Authors.
4  */
5 
6 #include <common.h>
7 #include <mapmem.h>
8 #include <trace.h>
9 #include <asm/io.h>
10 #include <asm/sections.h>
11 
12 DECLARE_GLOBAL_DATA_PTR;
13 
14 static char trace_enabled __attribute__((section(".data")));
15 static char trace_inited __attribute__((section(".data")));
16 
17 /* The header block at the start of the trace memory area */
18 struct trace_hdr {
19 	int func_count;		/* Total number of function call sites */
20 	u64 call_count;		/* Total number of tracked function calls */
21 	u64 untracked_count;	/* Total number of untracked function calls */
22 	int funcs_used;		/* Total number of functions used */
23 
24 	/*
25 	 * Call count for each function. This is indexed by the word offset
26 	 * of the function from gd->relocaddr
27 	 */
28 	uintptr_t *call_accum;
29 
30 	/* Function trace list */
31 	struct trace_call *ftrace;	/* The function call records */
32 	ulong ftrace_size;	/* Num. of ftrace records we have space for */
33 	ulong ftrace_count;	/* Num. of ftrace records written */
34 	ulong ftrace_too_deep_count;	/* Functions that were too deep */
35 
36 	int depth;
37 	int depth_limit;
38 	int max_depth;
39 };
40 
41 static struct trace_hdr *hdr;	/* Pointer to start of trace buffer */
42 
43 static inline uintptr_t __attribute__((no_instrument_function))
func_ptr_to_num(void * func_ptr)44 		func_ptr_to_num(void *func_ptr)
45 {
46 	uintptr_t offset = (uintptr_t)func_ptr;
47 
48 #ifdef CONFIG_SANDBOX
49 	offset -= (uintptr_t)&_init;
50 #else
51 	if (gd->flags & GD_FLG_RELOC)
52 		offset -= gd->relocaddr;
53 	else
54 		offset -= CONFIG_SYS_TEXT_BASE;
55 #endif
56 	return offset / FUNC_SITE_SIZE;
57 }
58 
add_ftrace(void * func_ptr,void * caller,ulong flags)59 static void __attribute__((no_instrument_function)) add_ftrace(void *func_ptr,
60 				void *caller, ulong flags)
61 {
62 	if (hdr->depth > hdr->depth_limit) {
63 		hdr->ftrace_too_deep_count++;
64 		return;
65 	}
66 	if (hdr->ftrace_count < hdr->ftrace_size) {
67 		struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
68 
69 		rec->func = func_ptr_to_num(func_ptr);
70 		rec->caller = func_ptr_to_num(caller);
71 		rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK);
72 	}
73 	hdr->ftrace_count++;
74 }
75 
add_textbase(void)76 static void __attribute__((no_instrument_function)) add_textbase(void)
77 {
78 	if (hdr->ftrace_count < hdr->ftrace_size) {
79 		struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
80 
81 		rec->func = CONFIG_SYS_TEXT_BASE;
82 		rec->caller = 0;
83 		rec->flags = FUNCF_TEXTBASE;
84 	}
85 	hdr->ftrace_count++;
86 }
87 
88 /**
89  * This is called on every function entry
90  *
91  * We add to our tally for this function and add to the list of called
92  * functions.
93  *
94  * @param func_ptr	Pointer to function being entered
95  * @param caller	Pointer to function which called this function
96  */
__cyg_profile_func_enter(void * func_ptr,void * caller)97 void __attribute__((no_instrument_function)) __cyg_profile_func_enter(
98 		void *func_ptr, void *caller)
99 {
100 	if (trace_enabled) {
101 		int func;
102 
103 		add_ftrace(func_ptr, caller, FUNCF_ENTRY);
104 		func = func_ptr_to_num(func_ptr);
105 		if (func < hdr->func_count) {
106 			hdr->call_accum[func]++;
107 			hdr->call_count++;
108 		} else {
109 			hdr->untracked_count++;
110 		}
111 		hdr->depth++;
112 		if (hdr->depth > hdr->depth_limit)
113 			hdr->max_depth = hdr->depth;
114 	}
115 }
116 
117 /**
118  * This is called on every function exit
119  *
120  * We do nothing here.
121  *
122  * @param func_ptr	Pointer to function being entered
123  * @param caller	Pointer to function which called this function
124  */
__cyg_profile_func_exit(void * func_ptr,void * caller)125 void __attribute__((no_instrument_function)) __cyg_profile_func_exit(
126 		void *func_ptr, void *caller)
127 {
128 	if (trace_enabled) {
129 		add_ftrace(func_ptr, caller, FUNCF_EXIT);
130 		hdr->depth--;
131 	}
132 }
133 
134 /**
135  * Produce a list of called functions
136  *
137  * The information is written into the supplied buffer - a header followed
138  * by a list of function records.
139  *
140  * @param buff		Buffer to place list into
141  * @param buff_size	Size of buffer
142  * @param needed	Returns size of buffer needed, which may be
143  *			greater than buff_size if we ran out of space.
144  * @return 0 if ok, -1 if space was exhausted
145  */
trace_list_functions(void * buff,int buff_size,unsigned int * needed)146 int trace_list_functions(void *buff, int buff_size, unsigned int *needed)
147 {
148 	struct trace_output_hdr *output_hdr = NULL;
149 	void *end, *ptr = buff;
150 	int func;
151 	int upto;
152 
153 	end = buff ? buff + buff_size : NULL;
154 
155 	/* Place some header information */
156 	if (ptr + sizeof(struct trace_output_hdr) < end)
157 		output_hdr = ptr;
158 	ptr += sizeof(struct trace_output_hdr);
159 
160 	/* Add information about each function */
161 	for (func = upto = 0; func < hdr->func_count; func++) {
162 		int calls = hdr->call_accum[func];
163 
164 		if (!calls)
165 			continue;
166 
167 		if (ptr + sizeof(struct trace_output_func) < end) {
168 			struct trace_output_func *stats = ptr;
169 
170 			stats->offset = func * FUNC_SITE_SIZE;
171 			stats->call_count = calls;
172 			upto++;
173 		}
174 		ptr += sizeof(struct trace_output_func);
175 	}
176 
177 	/* Update the header */
178 	if (output_hdr) {
179 		output_hdr->rec_count = upto;
180 		output_hdr->type = TRACE_CHUNK_FUNCS;
181 	}
182 
183 	/* Work out how must of the buffer we used */
184 	*needed = ptr - buff;
185 	if (ptr > end)
186 		return -1;
187 	return 0;
188 }
189 
trace_list_calls(void * buff,int buff_size,unsigned * needed)190 int trace_list_calls(void *buff, int buff_size, unsigned *needed)
191 {
192 	struct trace_output_hdr *output_hdr = NULL;
193 	void *end, *ptr = buff;
194 	int rec, upto;
195 	int count;
196 
197 	end = buff ? buff + buff_size : NULL;
198 
199 	/* Place some header information */
200 	if (ptr + sizeof(struct trace_output_hdr) < end)
201 		output_hdr = ptr;
202 	ptr += sizeof(struct trace_output_hdr);
203 
204 	/* Add information about each call */
205 	count = hdr->ftrace_count;
206 	if (count > hdr->ftrace_size)
207 		count = hdr->ftrace_size;
208 	for (rec = upto = 0; rec < count; rec++) {
209 		if (ptr + sizeof(struct trace_call) < end) {
210 			struct trace_call *call = &hdr->ftrace[rec];
211 			struct trace_call *out = ptr;
212 
213 			out->func = call->func * FUNC_SITE_SIZE;
214 			out->caller = call->caller * FUNC_SITE_SIZE;
215 			out->flags = call->flags;
216 			upto++;
217 		}
218 		ptr += sizeof(struct trace_call);
219 	}
220 
221 	/* Update the header */
222 	if (output_hdr) {
223 		output_hdr->rec_count = upto;
224 		output_hdr->type = TRACE_CHUNK_CALLS;
225 	}
226 
227 	/* Work out how must of the buffer we used */
228 	*needed = ptr - buff;
229 	if (ptr > end)
230 		return -1;
231 	return 0;
232 }
233 
234 /* Print basic information about tracing */
trace_print_stats(void)235 void trace_print_stats(void)
236 {
237 	ulong count;
238 
239 #ifndef FTRACE
240 	puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n");
241 	puts("You will likely get zeroed data here\n");
242 #endif
243 	if (!trace_inited) {
244 		printf("Trace is disabled\n");
245 		return;
246 	}
247 	print_grouped_ull(hdr->func_count, 10);
248 	puts(" function sites\n");
249 	print_grouped_ull(hdr->call_count, 10);
250 	puts(" function calls\n");
251 	print_grouped_ull(hdr->untracked_count, 10);
252 	puts(" untracked function calls\n");
253 	count = min(hdr->ftrace_count, hdr->ftrace_size);
254 	print_grouped_ull(count, 10);
255 	puts(" traced function calls");
256 	if (hdr->ftrace_count > hdr->ftrace_size) {
257 		printf(" (%lu dropped due to overflow)",
258 		       hdr->ftrace_count - hdr->ftrace_size);
259 	}
260 	puts("\n");
261 	printf("%15d maximum observed call depth\n", hdr->max_depth);
262 	printf("%15d call depth limit\n", hdr->depth_limit);
263 	print_grouped_ull(hdr->ftrace_too_deep_count, 10);
264 	puts(" calls not traced due to depth\n");
265 }
266 
trace_set_enabled(int enabled)267 void __attribute__((no_instrument_function)) trace_set_enabled(int enabled)
268 {
269 	trace_enabled = enabled != 0;
270 }
271 
272 /**
273  * Init the tracing system ready for used, and enable it
274  *
275  * @param buff		Pointer to trace buffer
276  * @param buff_size	Size of trace buffer
277  */
trace_init(void * buff,size_t buff_size)278 int __attribute__((no_instrument_function)) trace_init(void *buff,
279 		size_t buff_size)
280 {
281 	ulong func_count = gd->mon_len / FUNC_SITE_SIZE;
282 	size_t needed;
283 	int was_disabled = !trace_enabled;
284 
285 	if (!was_disabled) {
286 #ifdef CONFIG_TRACE_EARLY
287 		char *end;
288 		ulong used;
289 
290 		/*
291 		 * Copy over the early trace data if we have it. Disable
292 		 * tracing while we are doing this.
293 		 */
294 		trace_enabled = 0;
295 		hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR,
296 				 CONFIG_TRACE_EARLY_SIZE);
297 		end = (char *)&hdr->ftrace[hdr->ftrace_count];
298 		used = end - (char *)hdr;
299 		printf("trace: copying %08lx bytes of early data from %x to %08lx\n",
300 		       used, CONFIG_TRACE_EARLY_ADDR,
301 		       (ulong)map_to_sysmem(buff));
302 		memcpy(buff, hdr, used);
303 #else
304 		puts("trace: already enabled\n");
305 		return -1;
306 #endif
307 	}
308 	hdr = (struct trace_hdr *)buff;
309 	needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
310 	if (needed > buff_size) {
311 		printf("trace: buffer size %zd bytes: at least %zd needed\n",
312 		       buff_size, needed);
313 		return -1;
314 	}
315 
316 	if (was_disabled)
317 		memset(hdr, '\0', needed);
318 	hdr->func_count = func_count;
319 	hdr->call_accum = (uintptr_t *)(hdr + 1);
320 
321 	/* Use any remaining space for the timed function trace */
322 	hdr->ftrace = (struct trace_call *)(buff + needed);
323 	hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
324 	add_textbase();
325 
326 	puts("trace: enabled\n");
327 	hdr->depth_limit = 15;
328 	trace_enabled = 1;
329 	trace_inited = 1;
330 	return 0;
331 }
332 
333 #ifdef CONFIG_TRACE_EARLY
trace_early_init(void)334 int __attribute__((no_instrument_function)) trace_early_init(void)
335 {
336 	ulong func_count = gd->mon_len / FUNC_SITE_SIZE;
337 	size_t buff_size = CONFIG_TRACE_EARLY_SIZE;
338 	size_t needed;
339 
340 	/* We can ignore additional calls to this function */
341 	if (trace_enabled)
342 		return 0;
343 
344 	hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE);
345 	needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
346 	if (needed > buff_size) {
347 		printf("trace: buffer size is %zd bytes, at least %zd needed\n",
348 		       buff_size, needed);
349 		return -1;
350 	}
351 
352 	memset(hdr, '\0', needed);
353 	hdr->call_accum = (uintptr_t *)(hdr + 1);
354 	hdr->func_count = func_count;
355 
356 	/* Use any remaining space for the timed function trace */
357 	hdr->ftrace = (struct trace_call *)((char *)hdr + needed);
358 	hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
359 	add_textbase();
360 	hdr->depth_limit = 200;
361 	printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR);
362 
363 	trace_enabled = 1;
364 	return 0;
365 }
366 #endif
367