xref: /linux/tools/perf/util/block-info.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdlib.h>
3 #include <string.h>
4 #include <linux/zalloc.h>
5 #include "block-info.h"
6 #include "sort.h"
7 #include "annotate.h"
8 #include "symbol.h"
9 #include "dso.h"
10 #include "map.h"
11 #include "srcline.h"
12 #include "evlist.h"
13 #include "hist.h"
14 #include "ui/browsers/hists.h"
15 
16 static struct block_header_column {
17 	const char *name;
18 	int width;
19 } block_columns[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
20 	[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT] = {
21 		.name = "Sampled Cycles%",
22 		.width = 15,
23 	},
24 	[PERF_HPP_REPORT__BLOCK_LBR_CYCLES] = {
25 		.name = "Sampled Cycles",
26 		.width = 14,
27 	},
28 	[PERF_HPP_REPORT__BLOCK_CYCLES_PCT] = {
29 		.name = "Avg Cycles%",
30 		.width = 11,
31 	},
32 	[PERF_HPP_REPORT__BLOCK_AVG_CYCLES] = {
33 		.name = "Avg Cycles",
34 		.width = 10,
35 	},
36 	[PERF_HPP_REPORT__BLOCK_RANGE] = {
37 		.name = "[Program Block Range]",
38 		.width = 70,
39 	},
40 	[PERF_HPP_REPORT__BLOCK_DSO] = {
41 		.name = "Shared Object",
42 		.width = 20,
43 	}
44 };
45 
46 struct block_info *block_info__get(struct block_info *bi)
47 {
48 	if (bi)
49 		refcount_inc(&bi->refcnt);
50 	return bi;
51 }
52 
53 void block_info__put(struct block_info *bi)
54 {
55 	if (bi && refcount_dec_and_test(&bi->refcnt))
56 		free(bi);
57 }
58 
59 struct block_info *block_info__new(void)
60 {
61 	struct block_info *bi = zalloc(sizeof(*bi));
62 
63 	if (bi)
64 		refcount_set(&bi->refcnt, 1);
65 	return bi;
66 }
67 
68 int64_t block_info__cmp(struct perf_hpp_fmt *fmt __maybe_unused,
69 			struct hist_entry *left, struct hist_entry *right)
70 {
71 	struct block_info *bi_l = left->block_info;
72 	struct block_info *bi_r = right->block_info;
73 	int cmp;
74 
75 	if (!bi_l->sym || !bi_r->sym) {
76 		if (!bi_l->sym && !bi_r->sym)
77 			return 0;
78 		else if (!bi_l->sym)
79 			return -1;
80 		else
81 			return 1;
82 	}
83 
84 	if (bi_l->sym == bi_r->sym) {
85 		if (bi_l->start == bi_r->start) {
86 			if (bi_l->end == bi_r->end)
87 				return 0;
88 			else
89 				return (int64_t)(bi_r->end - bi_l->end);
90 		} else
91 			return (int64_t)(bi_r->start - bi_l->start);
92 	} else {
93 		cmp = strcmp(bi_l->sym->name, bi_r->sym->name);
94 		return cmp;
95 	}
96 
97 	if (bi_l->sym->start != bi_r->sym->start)
98 		return (int64_t)(bi_r->sym->start - bi_l->sym->start);
99 
100 	return (int64_t)(bi_r->sym->end - bi_l->sym->end);
101 }
102 
103 static void init_block_info(struct block_info *bi, struct symbol *sym,
104 			    struct cyc_hist *ch, int offset,
105 			    u64 total_cycles)
106 {
107 	bi->sym = sym;
108 	bi->start = ch->start;
109 	bi->end = offset;
110 	bi->cycles = ch->cycles;
111 	bi->cycles_aggr = ch->cycles_aggr;
112 	bi->num = ch->num;
113 	bi->num_aggr = ch->num_aggr;
114 	bi->total_cycles = total_cycles;
115 
116 	memcpy(bi->cycles_spark, ch->cycles_spark,
117 	       NUM_SPARKS * sizeof(u64));
118 }
119 
120 int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
121 			    u64 *block_cycles_aggr, u64 total_cycles)
122 {
123 	struct annotation *notes;
124 	struct cyc_hist *ch;
125 	static struct addr_location al;
126 	u64 cycles = 0;
127 
128 	if (!he->ms.map || !he->ms.sym)
129 		return 0;
130 
131 	memset(&al, 0, sizeof(al));
132 	al.map = he->ms.map;
133 	al.sym = he->ms.sym;
134 
135 	notes = symbol__annotation(he->ms.sym);
136 	if (!notes || !notes->src || !notes->src->cycles_hist)
137 		return 0;
138 	ch = notes->src->cycles_hist;
139 	for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
140 		if (ch[i].num_aggr) {
141 			struct block_info *bi;
142 			struct hist_entry *he_block;
143 
144 			bi = block_info__new();
145 			if (!bi)
146 				return -1;
147 
148 			init_block_info(bi, he->ms.sym, &ch[i], i,
149 					total_cycles);
150 			cycles += bi->cycles_aggr / bi->num_aggr;
151 
152 			he_block = hists__add_entry_block(&bh->block_hists,
153 							  &al, bi);
154 			if (!he_block) {
155 				block_info__put(bi);
156 				return -1;
157 			}
158 		}
159 	}
160 
161 	if (block_cycles_aggr)
162 		*block_cycles_aggr += cycles;
163 
164 	return 0;
165 }
166 
167 static int block_column_header(struct perf_hpp_fmt *fmt,
168 			       struct perf_hpp *hpp,
169 			       struct hists *hists __maybe_unused,
170 			       int line __maybe_unused,
171 			       int *span __maybe_unused)
172 {
173 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
174 
175 	return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
176 			 block_fmt->header);
177 }
178 
179 static int block_column_width(struct perf_hpp_fmt *fmt,
180 			      struct perf_hpp *hpp __maybe_unused,
181 			      struct hists *hists __maybe_unused)
182 {
183 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
184 
185 	return block_fmt->width;
186 }
187 
188 static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
189 					struct perf_hpp *hpp,
190 					struct hist_entry *he)
191 {
192 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
193 	struct block_info *bi = he->block_info;
194 	double ratio = 0.0;
195 	char buf[16];
196 
197 	if (block_fmt->total_cycles)
198 		ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
199 
200 	sprintf(buf, "%.2f%%", 100.0 * ratio);
201 
202 	return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
203 }
204 
205 static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
206 					   struct hist_entry *left,
207 					   struct hist_entry *right)
208 {
209 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
210 	struct block_info *bi_l = left->block_info;
211 	struct block_info *bi_r = right->block_info;
212 	double l, r;
213 
214 	if (block_fmt->total_cycles) {
215 		l = ((double)bi_l->cycles /
216 			(double)block_fmt->total_cycles) * 100000.0;
217 		r = ((double)bi_r->cycles /
218 			(double)block_fmt->total_cycles) * 100000.0;
219 		return (int64_t)l - (int64_t)r;
220 	}
221 
222 	return 0;
223 }
224 
225 static void cycles_string(u64 cycles, char *buf, int size)
226 {
227 	if (cycles >= 1000000)
228 		scnprintf(buf, size, "%.1fM", (double)cycles / 1000000.0);
229 	else if (cycles >= 1000)
230 		scnprintf(buf, size, "%.1fK", (double)cycles / 1000.0);
231 	else
232 		scnprintf(buf, size, "%1d", cycles);
233 }
234 
235 static int block_cycles_lbr_entry(struct perf_hpp_fmt *fmt,
236 				  struct perf_hpp *hpp, struct hist_entry *he)
237 {
238 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
239 	struct block_info *bi = he->block_info;
240 	char cycles_buf[16];
241 
242 	cycles_string(bi->cycles_aggr, cycles_buf, sizeof(cycles_buf));
243 
244 	return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
245 			 cycles_buf);
246 }
247 
248 static int block_cycles_pct_entry(struct perf_hpp_fmt *fmt,
249 				  struct perf_hpp *hpp, struct hist_entry *he)
250 {
251 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
252 	struct block_info *bi = he->block_info;
253 	double ratio = 0.0;
254 	u64 avg;
255 	char buf[16];
256 
257 	if (block_fmt->block_cycles && bi->num_aggr) {
258 		avg = bi->cycles_aggr / bi->num_aggr;
259 		ratio = (double)avg / (double)block_fmt->block_cycles;
260 	}
261 
262 	sprintf(buf, "%.2f%%", 100.0 * ratio);
263 
264 	return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
265 }
266 
267 static int block_avg_cycles_entry(struct perf_hpp_fmt *fmt,
268 				  struct perf_hpp *hpp,
269 				  struct hist_entry *he)
270 {
271 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
272 	struct block_info *bi = he->block_info;
273 	char cycles_buf[16];
274 
275 	cycles_string(bi->cycles_aggr / bi->num_aggr, cycles_buf,
276 		      sizeof(cycles_buf));
277 
278 	return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
279 			 cycles_buf);
280 }
281 
282 static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
283 			     struct hist_entry *he)
284 {
285 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
286 	struct block_info *bi = he->block_info;
287 	char buf[128];
288 	char *start_line, *end_line;
289 
290 	symbol_conf.disable_add2line_warn = true;
291 
292 	start_line = map__srcline(he->ms.map, bi->sym->start + bi->start,
293 				  he->ms.sym);
294 
295 	end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
296 				he->ms.sym);
297 
298 	if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
299 		scnprintf(buf, sizeof(buf), "[%s -> %s]",
300 			  start_line, end_line);
301 	} else {
302 		scnprintf(buf, sizeof(buf), "[%7lx -> %7lx]",
303 			  bi->start, bi->end);
304 	}
305 
306 	free_srcline(start_line);
307 	free_srcline(end_line);
308 
309 	return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width, buf);
310 }
311 
312 static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
313 			   struct hist_entry *he)
314 {
315 	struct block_fmt *block_fmt = container_of(fmt, struct block_fmt, fmt);
316 	struct map *map = he->ms.map;
317 
318 	if (map && map->dso) {
319 		return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
320 				 map->dso->short_name);
321 	}
322 
323 	return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
324 			 "[unknown]");
325 }
326 
327 static void init_block_header(struct block_fmt *block_fmt)
328 {
329 	struct perf_hpp_fmt *fmt = &block_fmt->fmt;
330 
331 	BUG_ON(block_fmt->idx >= PERF_HPP_REPORT__BLOCK_MAX_INDEX);
332 
333 	block_fmt->header = block_columns[block_fmt->idx].name;
334 	block_fmt->width = block_columns[block_fmt->idx].width;
335 
336 	fmt->header = block_column_header;
337 	fmt->width = block_column_width;
338 }
339 
340 static void hpp_register(struct block_fmt *block_fmt, int idx,
341 			 struct perf_hpp_list *hpp_list)
342 {
343 	struct perf_hpp_fmt *fmt = &block_fmt->fmt;
344 
345 	block_fmt->idx = idx;
346 	INIT_LIST_HEAD(&fmt->list);
347 	INIT_LIST_HEAD(&fmt->sort_list);
348 
349 	switch (idx) {
350 	case PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT:
351 		fmt->entry = block_total_cycles_pct_entry;
352 		fmt->cmp = block_info__cmp;
353 		fmt->sort = block_total_cycles_pct_sort;
354 		break;
355 	case PERF_HPP_REPORT__BLOCK_LBR_CYCLES:
356 		fmt->entry = block_cycles_lbr_entry;
357 		break;
358 	case PERF_HPP_REPORT__BLOCK_CYCLES_PCT:
359 		fmt->entry = block_cycles_pct_entry;
360 		break;
361 	case PERF_HPP_REPORT__BLOCK_AVG_CYCLES:
362 		fmt->entry = block_avg_cycles_entry;
363 		break;
364 	case PERF_HPP_REPORT__BLOCK_RANGE:
365 		fmt->entry = block_range_entry;
366 		break;
367 	case PERF_HPP_REPORT__BLOCK_DSO:
368 		fmt->entry = block_dso_entry;
369 		break;
370 	default:
371 		return;
372 	}
373 
374 	init_block_header(block_fmt);
375 	perf_hpp_list__column_register(hpp_list, fmt);
376 }
377 
378 static void register_block_columns(struct perf_hpp_list *hpp_list,
379 				   struct block_fmt *block_fmts)
380 {
381 	for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++)
382 		hpp_register(&block_fmts[i], i, hpp_list);
383 }
384 
385 static void init_block_hist(struct block_hist *bh, struct block_fmt *block_fmts)
386 {
387 	__hists__init(&bh->block_hists, &bh->block_list);
388 	perf_hpp_list__init(&bh->block_list);
389 	bh->block_list.nr_header_lines = 1;
390 
391 	register_block_columns(&bh->block_list, block_fmts);
392 
393 	perf_hpp_list__register_sort_field(&bh->block_list,
394 		&block_fmts[PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT].fmt);
395 }
396 
397 static void process_block_report(struct hists *hists,
398 				 struct block_report *block_report,
399 				 u64 total_cycles)
400 {
401 	struct rb_node *next = rb_first_cached(&hists->entries);
402 	struct block_hist *bh = &block_report->hist;
403 	struct hist_entry *he;
404 
405 	init_block_hist(bh, block_report->fmts);
406 
407 	while (next) {
408 		he = rb_entry(next, struct hist_entry, rb_node);
409 		block_info__process_sym(he, bh, &block_report->cycles,
410 					total_cycles);
411 		next = rb_next(&he->rb_node);
412 	}
413 
414 	for (int i = 0; i < PERF_HPP_REPORT__BLOCK_MAX_INDEX; i++) {
415 		block_report->fmts[i].total_cycles = total_cycles;
416 		block_report->fmts[i].block_cycles = block_report->cycles;
417 	}
418 
419 	hists__output_resort(&bh->block_hists, NULL);
420 }
421 
422 struct block_report *block_info__create_report(struct evlist *evlist,
423 					       u64 total_cycles)
424 {
425 	struct block_report *block_reports;
426 	int nr_hists = evlist->core.nr_entries, i = 0;
427 	struct evsel *pos;
428 
429 	block_reports = calloc(nr_hists, sizeof(struct block_report));
430 	if (!block_reports)
431 		return NULL;
432 
433 	evlist__for_each_entry(evlist, pos) {
434 		struct hists *hists = evsel__hists(pos);
435 
436 		process_block_report(hists, &block_reports[i], total_cycles);
437 		i++;
438 	}
439 
440 	return block_reports;
441 }
442 
443 int report__browse_block_hists(struct block_hist *bh, float min_percent,
444 			       struct evsel *evsel, struct perf_env *env,
445 			       struct annotation_options *annotation_opts)
446 {
447 	int ret;
448 
449 	switch (use_browser) {
450 	case 0:
451 		symbol_conf.report_individual_block = true;
452 		hists__fprintf(&bh->block_hists, true, 0, 0, min_percent,
453 			       stdout, true);
454 		hists__delete_entries(&bh->block_hists);
455 		return 0;
456 	case 1:
457 		symbol_conf.report_individual_block = true;
458 		ret = block_hists_tui_browse(bh, evsel, min_percent,
459 					     env, annotation_opts);
460 		hists__delete_entries(&bh->block_hists);
461 		return ret;
462 	default:
463 		return -1;
464 	}
465 
466 	return 0;
467 }
468 
469 float block_info__total_cycles_percent(struct hist_entry *he)
470 {
471 	struct block_info *bi = he->block_info;
472 
473 	if (bi->total_cycles)
474 		return bi->cycles * 100.0 / bi->total_cycles;
475 
476 	return 0.0;
477 }
478