xref: /linux/tools/perf/util/sort.c (revision 871304a7)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <stdlib.h>
6 #include <linux/mman.h>
7 #include <linux/time64.h>
8 #include "debug.h"
9 #include "dso.h"
10 #include "sort.h"
11 #include "hist.h"
12 #include "cacheline.h"
13 #include "comm.h"
14 #include "map.h"
15 #include "maps.h"
16 #include "symbol.h"
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "thread.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "srcline.h"
23 #include "strlist.h"
24 #include "strbuf.h"
25 #include "mem-events.h"
26 #include "annotate.h"
27 #include "annotate-data.h"
28 #include "event.h"
29 #include "time-utils.h"
30 #include "cgroup.h"
31 #include "machine.h"
32 #include "trace-event.h"
33 #include <linux/kernel.h>
34 #include <linux/string.h>
35 
36 #ifdef HAVE_LIBTRACEEVENT
37 #include <traceevent/event-parse.h>
38 #endif
39 
40 regex_t		parent_regex;
41 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
42 const char	*parent_pattern = default_parent_pattern;
43 const char	*default_sort_order = "comm,dso,symbol";
44 const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
45 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
46 const char	default_top_sort_order[] = "dso,symbol";
47 const char	default_diff_sort_order[] = "dso,symbol";
48 const char	default_tracepoint_sort_order[] = "trace";
49 const char	*sort_order;
50 const char	*field_order;
51 regex_t		ignore_callees_regex;
52 int		have_ignore_callees = 0;
53 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
54 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
55 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
56 
57 /*
58  * Some architectures have Adjacent Cacheline Prefetch feature, which
59  * behaves like the cacheline size is doubled. Enable this flag to
60  * check things in double cacheline granularity.
61  */
62 bool chk_double_cl;
63 
64 /*
65  * Replaces all occurrences of a char used with the:
66  *
67  * -t, --field-separator
68  *
69  * option, that uses a special separator character and don't pad with spaces,
70  * replacing all occurrences of this separator in symbol names (and other
71  * output) with a '.' character, that thus it's the only non valid separator.
72 */
73 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
74 {
75 	int n;
76 	va_list ap;
77 
78 	va_start(ap, fmt);
79 	n = vsnprintf(bf, size, fmt, ap);
80 	if (symbol_conf.field_sep && n > 0) {
81 		char *sep = bf;
82 
83 		while (1) {
84 			sep = strchr(sep, *symbol_conf.field_sep);
85 			if (sep == NULL)
86 				break;
87 			*sep = '.';
88 		}
89 	}
90 	va_end(ap);
91 
92 	if (n >= (int)size)
93 		return size - 1;
94 	return n;
95 }
96 
97 static int64_t cmp_null(const void *l, const void *r)
98 {
99 	if (!l && !r)
100 		return 0;
101 	else if (!l)
102 		return -1;
103 	else
104 		return 1;
105 }
106 
107 /* --sort pid */
108 
109 static int64_t
110 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
111 {
112 	return thread__tid(right->thread) - thread__tid(left->thread);
113 }
114 
115 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
116 				       size_t size, unsigned int width)
117 {
118 	const char *comm = thread__comm_str(he->thread);
119 
120 	width = max(7U, width) - 8;
121 	return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
122 			       width, width, comm ?: "");
123 }
124 
125 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
126 {
127 	const struct thread *th = arg;
128 
129 	if (type != HIST_FILTER__THREAD)
130 		return -1;
131 
132 	return th && !RC_CHK_EQUAL(he->thread, th);
133 }
134 
135 struct sort_entry sort_thread = {
136 	.se_header	= "    Pid:Command",
137 	.se_cmp		= sort__thread_cmp,
138 	.se_snprintf	= hist_entry__thread_snprintf,
139 	.se_filter	= hist_entry__thread_filter,
140 	.se_width_idx	= HISTC_THREAD,
141 };
142 
143 /* --sort simd */
144 
145 static int64_t
146 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
147 {
148 	if (left->simd_flags.arch != right->simd_flags.arch)
149 		return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
150 
151 	return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
152 }
153 
154 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
155 {
156 	u64 arch = simd_flags->arch;
157 
158 	if (arch & SIMD_OP_FLAGS_ARCH_SVE)
159 		return "SVE";
160 	else
161 		return "n/a";
162 }
163 
164 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
165 				     size_t size, unsigned int width __maybe_unused)
166 {
167 	const char *name;
168 
169 	if (!he->simd_flags.arch)
170 		return repsep_snprintf(bf, size, "");
171 
172 	name = hist_entry__get_simd_name(&he->simd_flags);
173 
174 	if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
175 		return repsep_snprintf(bf, size, "[e] %s", name);
176 	else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
177 		return repsep_snprintf(bf, size, "[p] %s", name);
178 
179 	return repsep_snprintf(bf, size, "[.] %s", name);
180 }
181 
182 struct sort_entry sort_simd = {
183 	.se_header	= "Simd   ",
184 	.se_cmp		= sort__simd_cmp,
185 	.se_snprintf	= hist_entry__simd_snprintf,
186 	.se_width_idx	= HISTC_SIMD,
187 };
188 
189 /* --sort comm */
190 
191 /*
192  * We can't use pointer comparison in functions below,
193  * because it gives different results based on pointer
194  * values, which could break some sorting assumptions.
195  */
196 static int64_t
197 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
198 {
199 	return strcmp(comm__str(right->comm), comm__str(left->comm));
200 }
201 
202 static int64_t
203 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
204 {
205 	return strcmp(comm__str(right->comm), comm__str(left->comm));
206 }
207 
208 static int64_t
209 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
210 {
211 	return strcmp(comm__str(right->comm), comm__str(left->comm));
212 }
213 
214 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
215 				     size_t size, unsigned int width)
216 {
217 	return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
218 }
219 
220 struct sort_entry sort_comm = {
221 	.se_header	= "Command",
222 	.se_cmp		= sort__comm_cmp,
223 	.se_collapse	= sort__comm_collapse,
224 	.se_sort	= sort__comm_sort,
225 	.se_snprintf	= hist_entry__comm_snprintf,
226 	.se_filter	= hist_entry__thread_filter,
227 	.se_width_idx	= HISTC_COMM,
228 };
229 
230 /* --sort dso */
231 
232 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
233 {
234 	struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
235 	struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
236 	const char *dso_name_l, *dso_name_r;
237 
238 	if (!dso_l || !dso_r)
239 		return cmp_null(dso_r, dso_l);
240 
241 	if (verbose > 0) {
242 		dso_name_l = dso_l->long_name;
243 		dso_name_r = dso_r->long_name;
244 	} else {
245 		dso_name_l = dso_l->short_name;
246 		dso_name_r = dso_r->short_name;
247 	}
248 
249 	return strcmp(dso_name_l, dso_name_r);
250 }
251 
252 static int64_t
253 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
254 {
255 	return _sort__dso_cmp(right->ms.map, left->ms.map);
256 }
257 
258 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
259 				     size_t size, unsigned int width)
260 {
261 	const struct dso *dso = map ? map__dso(map) : NULL;
262 	const char *dso_name = "[unknown]";
263 
264 	if (dso)
265 		dso_name = verbose > 0 ? dso->long_name : dso->short_name;
266 
267 	return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
268 }
269 
270 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
271 				    size_t size, unsigned int width)
272 {
273 	return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
274 }
275 
276 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
277 {
278 	const struct dso *dso = arg;
279 
280 	if (type != HIST_FILTER__DSO)
281 		return -1;
282 
283 	return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
284 }
285 
286 struct sort_entry sort_dso = {
287 	.se_header	= "Shared Object",
288 	.se_cmp		= sort__dso_cmp,
289 	.se_snprintf	= hist_entry__dso_snprintf,
290 	.se_filter	= hist_entry__dso_filter,
291 	.se_width_idx	= HISTC_DSO,
292 };
293 
294 /* --sort symbol */
295 
296 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
297 {
298 	return (int64_t)(right_ip - left_ip);
299 }
300 
301 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
302 {
303 	if (!sym_l || !sym_r)
304 		return cmp_null(sym_l, sym_r);
305 
306 	if (sym_l == sym_r)
307 		return 0;
308 
309 	if (sym_l->inlined || sym_r->inlined) {
310 		int ret = strcmp(sym_l->name, sym_r->name);
311 
312 		if (ret)
313 			return ret;
314 		if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
315 			return 0;
316 	}
317 
318 	if (sym_l->start != sym_r->start)
319 		return (int64_t)(sym_r->start - sym_l->start);
320 
321 	return (int64_t)(sym_r->end - sym_l->end);
322 }
323 
324 static int64_t
325 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
326 {
327 	int64_t ret;
328 
329 	if (!left->ms.sym && !right->ms.sym)
330 		return _sort__addr_cmp(left->ip, right->ip);
331 
332 	/*
333 	 * comparing symbol address alone is not enough since it's a
334 	 * relative address within a dso.
335 	 */
336 	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
337 		ret = sort__dso_cmp(left, right);
338 		if (ret != 0)
339 			return ret;
340 	}
341 
342 	return _sort__sym_cmp(left->ms.sym, right->ms.sym);
343 }
344 
345 static int64_t
346 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
347 {
348 	if (!left->ms.sym || !right->ms.sym)
349 		return cmp_null(left->ms.sym, right->ms.sym);
350 
351 	return strcmp(right->ms.sym->name, left->ms.sym->name);
352 }
353 
354 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
355 				     u64 ip, char level, char *bf, size_t size,
356 				     unsigned int width)
357 {
358 	struct symbol *sym = ms->sym;
359 	struct map *map = ms->map;
360 	size_t ret = 0;
361 
362 	if (verbose > 0) {
363 		struct dso *dso = map ? map__dso(map) : NULL;
364 		char o = dso ? dso__symtab_origin(dso) : '!';
365 		u64 rip = ip;
366 
367 		if (dso && dso->kernel && dso->adjust_symbols)
368 			rip = map__unmap_ip(map, ip);
369 
370 		ret += repsep_snprintf(bf, size, "%-#*llx %c ",
371 				       BITS_PER_LONG / 4 + 2, rip, o);
372 	}
373 
374 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
375 	if (sym && map) {
376 		if (sym->type == STT_OBJECT) {
377 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
378 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
379 					ip - map__unmap_ip(map, sym->start));
380 		} else {
381 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
382 					       width - ret,
383 					       sym->name);
384 			if (sym->inlined)
385 				ret += repsep_snprintf(bf + ret, size - ret,
386 						       " (inlined)");
387 		}
388 	} else {
389 		size_t len = BITS_PER_LONG / 4;
390 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
391 				       len, ip);
392 	}
393 
394 	return ret;
395 }
396 
397 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
398 {
399 	return _hist_entry__sym_snprintf(&he->ms, he->ip,
400 					 he->level, bf, size, width);
401 }
402 
403 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
404 {
405 	const char *sym = arg;
406 
407 	if (type != HIST_FILTER__SYMBOL)
408 		return -1;
409 
410 	return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
411 }
412 
413 struct sort_entry sort_sym = {
414 	.se_header	= "Symbol",
415 	.se_cmp		= sort__sym_cmp,
416 	.se_sort	= sort__sym_sort,
417 	.se_snprintf	= hist_entry__sym_snprintf,
418 	.se_filter	= hist_entry__sym_filter,
419 	.se_width_idx	= HISTC_SYMBOL,
420 };
421 
422 /* --sort srcline */
423 
424 char *hist_entry__srcline(struct hist_entry *he)
425 {
426 	return map__srcline(he->ms.map, he->ip, he->ms.sym);
427 }
428 
429 static int64_t
430 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
431 {
432 	int64_t ret;
433 
434 	ret = _sort__addr_cmp(left->ip, right->ip);
435 	if (ret)
436 		return ret;
437 
438 	return sort__dso_cmp(left, right);
439 }
440 
441 static int64_t
442 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
443 {
444 	if (!left->srcline)
445 		left->srcline = hist_entry__srcline(left);
446 	if (!right->srcline)
447 		right->srcline = hist_entry__srcline(right);
448 
449 	return strcmp(right->srcline, left->srcline);
450 }
451 
452 static int64_t
453 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
454 {
455 	return sort__srcline_collapse(left, right);
456 }
457 
458 static void
459 sort__srcline_init(struct hist_entry *he)
460 {
461 	if (!he->srcline)
462 		he->srcline = hist_entry__srcline(he);
463 }
464 
465 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
466 					size_t size, unsigned int width)
467 {
468 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
469 }
470 
471 struct sort_entry sort_srcline = {
472 	.se_header	= "Source:Line",
473 	.se_cmp		= sort__srcline_cmp,
474 	.se_collapse	= sort__srcline_collapse,
475 	.se_sort	= sort__srcline_sort,
476 	.se_init	= sort__srcline_init,
477 	.se_snprintf	= hist_entry__srcline_snprintf,
478 	.se_width_idx	= HISTC_SRCLINE,
479 };
480 
481 /* --sort srcline_from */
482 
483 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
484 {
485 	return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
486 }
487 
488 static int64_t
489 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
490 {
491 	return left->branch_info->from.addr - right->branch_info->from.addr;
492 }
493 
494 static int64_t
495 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
496 {
497 	if (!left->branch_info->srcline_from)
498 		left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
499 
500 	if (!right->branch_info->srcline_from)
501 		right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
502 
503 	return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
504 }
505 
506 static int64_t
507 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
508 {
509 	return sort__srcline_from_collapse(left, right);
510 }
511 
512 static void sort__srcline_from_init(struct hist_entry *he)
513 {
514 	if (!he->branch_info->srcline_from)
515 		he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
516 }
517 
518 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
519 					size_t size, unsigned int width)
520 {
521 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
522 }
523 
524 struct sort_entry sort_srcline_from = {
525 	.se_header	= "From Source:Line",
526 	.se_cmp		= sort__srcline_from_cmp,
527 	.se_collapse	= sort__srcline_from_collapse,
528 	.se_sort	= sort__srcline_from_sort,
529 	.se_init	= sort__srcline_from_init,
530 	.se_snprintf	= hist_entry__srcline_from_snprintf,
531 	.se_width_idx	= HISTC_SRCLINE_FROM,
532 };
533 
534 /* --sort srcline_to */
535 
536 static int64_t
537 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
538 {
539 	return left->branch_info->to.addr - right->branch_info->to.addr;
540 }
541 
542 static int64_t
543 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
544 {
545 	if (!left->branch_info->srcline_to)
546 		left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
547 
548 	if (!right->branch_info->srcline_to)
549 		right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
550 
551 	return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
552 }
553 
554 static int64_t
555 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
556 {
557 	return sort__srcline_to_collapse(left, right);
558 }
559 
560 static void sort__srcline_to_init(struct hist_entry *he)
561 {
562 	if (!he->branch_info->srcline_to)
563 		he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
564 }
565 
566 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
567 					size_t size, unsigned int width)
568 {
569 	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
570 }
571 
572 struct sort_entry sort_srcline_to = {
573 	.se_header	= "To Source:Line",
574 	.se_cmp		= sort__srcline_to_cmp,
575 	.se_collapse	= sort__srcline_to_collapse,
576 	.se_sort	= sort__srcline_to_sort,
577 	.se_init	= sort__srcline_to_init,
578 	.se_snprintf	= hist_entry__srcline_to_snprintf,
579 	.se_width_idx	= HISTC_SRCLINE_TO,
580 };
581 
582 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
583 					size_t size, unsigned int width)
584 {
585 
586 	struct symbol *sym = he->ms.sym;
587 	struct annotated_branch *branch;
588 	double ipc = 0.0, coverage = 0.0;
589 	char tmp[64];
590 
591 	if (!sym)
592 		return repsep_snprintf(bf, size, "%-*s", width, "-");
593 
594 	branch = symbol__annotation(sym)->branch;
595 
596 	if (branch && branch->hit_cycles)
597 		ipc = branch->hit_insn / ((double)branch->hit_cycles);
598 
599 	if (branch && branch->total_insn) {
600 		coverage = branch->cover_insn * 100.0 /
601 			((double)branch->total_insn);
602 	}
603 
604 	snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
605 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
606 }
607 
608 struct sort_entry sort_sym_ipc = {
609 	.se_header	= "IPC   [IPC Coverage]",
610 	.se_cmp		= sort__sym_cmp,
611 	.se_snprintf	= hist_entry__sym_ipc_snprintf,
612 	.se_width_idx	= HISTC_SYMBOL_IPC,
613 };
614 
615 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
616 					     __maybe_unused,
617 					     char *bf, size_t size,
618 					     unsigned int width)
619 {
620 	char tmp[64];
621 
622 	snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
623 	return repsep_snprintf(bf, size, "%-*s", width, tmp);
624 }
625 
626 struct sort_entry sort_sym_ipc_null = {
627 	.se_header	= "IPC   [IPC Coverage]",
628 	.se_cmp		= sort__sym_cmp,
629 	.se_snprintf	= hist_entry__sym_ipc_null_snprintf,
630 	.se_width_idx	= HISTC_SYMBOL_IPC,
631 };
632 
633 /* --sort srcfile */
634 
635 static char no_srcfile[1];
636 
637 static char *hist_entry__get_srcfile(struct hist_entry *e)
638 {
639 	char *sf, *p;
640 	struct map *map = e->ms.map;
641 
642 	if (!map)
643 		return no_srcfile;
644 
645 	sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
646 			 e->ms.sym, false, true, true, e->ip);
647 	if (sf == SRCLINE_UNKNOWN)
648 		return no_srcfile;
649 	p = strchr(sf, ':');
650 	if (p && *sf) {
651 		*p = 0;
652 		return sf;
653 	}
654 	free(sf);
655 	return no_srcfile;
656 }
657 
658 static int64_t
659 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
660 {
661 	return sort__srcline_cmp(left, right);
662 }
663 
664 static int64_t
665 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
666 {
667 	if (!left->srcfile)
668 		left->srcfile = hist_entry__get_srcfile(left);
669 	if (!right->srcfile)
670 		right->srcfile = hist_entry__get_srcfile(right);
671 
672 	return strcmp(right->srcfile, left->srcfile);
673 }
674 
675 static int64_t
676 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
677 {
678 	return sort__srcfile_collapse(left, right);
679 }
680 
681 static void sort__srcfile_init(struct hist_entry *he)
682 {
683 	if (!he->srcfile)
684 		he->srcfile = hist_entry__get_srcfile(he);
685 }
686 
687 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
688 					size_t size, unsigned int width)
689 {
690 	return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
691 }
692 
693 struct sort_entry sort_srcfile = {
694 	.se_header	= "Source File",
695 	.se_cmp		= sort__srcfile_cmp,
696 	.se_collapse	= sort__srcfile_collapse,
697 	.se_sort	= sort__srcfile_sort,
698 	.se_init	= sort__srcfile_init,
699 	.se_snprintf	= hist_entry__srcfile_snprintf,
700 	.se_width_idx	= HISTC_SRCFILE,
701 };
702 
703 /* --sort parent */
704 
705 static int64_t
706 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
707 {
708 	struct symbol *sym_l = left->parent;
709 	struct symbol *sym_r = right->parent;
710 
711 	if (!sym_l || !sym_r)
712 		return cmp_null(sym_l, sym_r);
713 
714 	return strcmp(sym_r->name, sym_l->name);
715 }
716 
717 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
718 				       size_t size, unsigned int width)
719 {
720 	return repsep_snprintf(bf, size, "%-*.*s", width, width,
721 			      he->parent ? he->parent->name : "[other]");
722 }
723 
724 struct sort_entry sort_parent = {
725 	.se_header	= "Parent symbol",
726 	.se_cmp		= sort__parent_cmp,
727 	.se_snprintf	= hist_entry__parent_snprintf,
728 	.se_width_idx	= HISTC_PARENT,
729 };
730 
731 /* --sort cpu */
732 
733 static int64_t
734 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
735 {
736 	return right->cpu - left->cpu;
737 }
738 
739 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
740 				    size_t size, unsigned int width)
741 {
742 	return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
743 }
744 
745 struct sort_entry sort_cpu = {
746 	.se_header      = "CPU",
747 	.se_cmp	        = sort__cpu_cmp,
748 	.se_snprintf    = hist_entry__cpu_snprintf,
749 	.se_width_idx	= HISTC_CPU,
750 };
751 
752 /* --sort cgroup_id */
753 
754 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
755 {
756 	return (int64_t)(right_dev - left_dev);
757 }
758 
759 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
760 {
761 	return (int64_t)(right_ino - left_ino);
762 }
763 
764 static int64_t
765 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
766 {
767 	int64_t ret;
768 
769 	ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
770 	if (ret != 0)
771 		return ret;
772 
773 	return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
774 				       left->cgroup_id.ino);
775 }
776 
777 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
778 					  char *bf, size_t size,
779 					  unsigned int width __maybe_unused)
780 {
781 	return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
782 			       he->cgroup_id.ino);
783 }
784 
785 struct sort_entry sort_cgroup_id = {
786 	.se_header      = "cgroup id (dev/inode)",
787 	.se_cmp	        = sort__cgroup_id_cmp,
788 	.se_snprintf    = hist_entry__cgroup_id_snprintf,
789 	.se_width_idx	= HISTC_CGROUP_ID,
790 };
791 
792 /* --sort cgroup */
793 
794 static int64_t
795 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
796 {
797 	return right->cgroup - left->cgroup;
798 }
799 
800 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
801 				       char *bf, size_t size,
802 				       unsigned int width __maybe_unused)
803 {
804 	const char *cgrp_name = "N/A";
805 
806 	if (he->cgroup) {
807 		struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
808 						   he->cgroup);
809 		if (cgrp != NULL)
810 			cgrp_name = cgrp->name;
811 		else
812 			cgrp_name = "unknown";
813 	}
814 
815 	return repsep_snprintf(bf, size, "%s", cgrp_name);
816 }
817 
818 struct sort_entry sort_cgroup = {
819 	.se_header      = "Cgroup",
820 	.se_cmp	        = sort__cgroup_cmp,
821 	.se_snprintf    = hist_entry__cgroup_snprintf,
822 	.se_width_idx	= HISTC_CGROUP,
823 };
824 
825 /* --sort socket */
826 
827 static int64_t
828 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
829 {
830 	return right->socket - left->socket;
831 }
832 
833 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
834 				    size_t size, unsigned int width)
835 {
836 	return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
837 }
838 
839 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
840 {
841 	int sk = *(const int *)arg;
842 
843 	if (type != HIST_FILTER__SOCKET)
844 		return -1;
845 
846 	return sk >= 0 && he->socket != sk;
847 }
848 
849 struct sort_entry sort_socket = {
850 	.se_header      = "Socket",
851 	.se_cmp	        = sort__socket_cmp,
852 	.se_snprintf    = hist_entry__socket_snprintf,
853 	.se_filter      = hist_entry__socket_filter,
854 	.se_width_idx	= HISTC_SOCKET,
855 };
856 
857 /* --sort time */
858 
859 static int64_t
860 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
861 {
862 	return right->time - left->time;
863 }
864 
865 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
866 				    size_t size, unsigned int width)
867 {
868 	char he_time[32];
869 
870 	if (symbol_conf.nanosecs)
871 		timestamp__scnprintf_nsec(he->time, he_time,
872 					  sizeof(he_time));
873 	else
874 		timestamp__scnprintf_usec(he->time, he_time,
875 					  sizeof(he_time));
876 
877 	return repsep_snprintf(bf, size, "%-.*s", width, he_time);
878 }
879 
880 struct sort_entry sort_time = {
881 	.se_header      = "Time",
882 	.se_cmp	        = sort__time_cmp,
883 	.se_snprintf    = hist_entry__time_snprintf,
884 	.se_width_idx	= HISTC_TIME,
885 };
886 
887 /* --sort trace */
888 
889 #ifdef HAVE_LIBTRACEEVENT
890 static char *get_trace_output(struct hist_entry *he)
891 {
892 	struct trace_seq seq;
893 	struct evsel *evsel;
894 	struct tep_record rec = {
895 		.data = he->raw_data,
896 		.size = he->raw_size,
897 	};
898 
899 	evsel = hists_to_evsel(he->hists);
900 
901 	trace_seq_init(&seq);
902 	if (symbol_conf.raw_trace) {
903 		tep_print_fields(&seq, he->raw_data, he->raw_size,
904 				 evsel->tp_format);
905 	} else {
906 		tep_print_event(evsel->tp_format->tep,
907 				&seq, &rec, "%s", TEP_PRINT_INFO);
908 	}
909 	/*
910 	 * Trim the buffer, it starts at 4KB and we're not going to
911 	 * add anything more to this buffer.
912 	 */
913 	return realloc(seq.buffer, seq.len + 1);
914 }
915 
916 static int64_t
917 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
918 {
919 	struct evsel *evsel;
920 
921 	evsel = hists_to_evsel(left->hists);
922 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
923 		return 0;
924 
925 	if (left->trace_output == NULL)
926 		left->trace_output = get_trace_output(left);
927 	if (right->trace_output == NULL)
928 		right->trace_output = get_trace_output(right);
929 
930 	return strcmp(right->trace_output, left->trace_output);
931 }
932 
933 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
934 				    size_t size, unsigned int width)
935 {
936 	struct evsel *evsel;
937 
938 	evsel = hists_to_evsel(he->hists);
939 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
940 		return scnprintf(bf, size, "%-.*s", width, "N/A");
941 
942 	if (he->trace_output == NULL)
943 		he->trace_output = get_trace_output(he);
944 	return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
945 }
946 
947 struct sort_entry sort_trace = {
948 	.se_header      = "Trace output",
949 	.se_cmp	        = sort__trace_cmp,
950 	.se_snprintf    = hist_entry__trace_snprintf,
951 	.se_width_idx	= HISTC_TRACE,
952 };
953 #endif /* HAVE_LIBTRACEEVENT */
954 
955 /* sort keys for branch stacks */
956 
957 static int64_t
958 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
959 {
960 	if (!left->branch_info || !right->branch_info)
961 		return cmp_null(left->branch_info, right->branch_info);
962 
963 	return _sort__dso_cmp(left->branch_info->from.ms.map,
964 			      right->branch_info->from.ms.map);
965 }
966 
967 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
968 				    size_t size, unsigned int width)
969 {
970 	if (he->branch_info)
971 		return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
972 						 bf, size, width);
973 	else
974 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
975 }
976 
977 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
978 				       const void *arg)
979 {
980 	const struct dso *dso = arg;
981 
982 	if (type != HIST_FILTER__DSO)
983 		return -1;
984 
985 	return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
986 		map__dso(he->branch_info->from.ms.map) != dso);
987 }
988 
989 static int64_t
990 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
991 {
992 	if (!left->branch_info || !right->branch_info)
993 		return cmp_null(left->branch_info, right->branch_info);
994 
995 	return _sort__dso_cmp(left->branch_info->to.ms.map,
996 			      right->branch_info->to.ms.map);
997 }
998 
999 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1000 				       size_t size, unsigned int width)
1001 {
1002 	if (he->branch_info)
1003 		return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1004 						 bf, size, width);
1005 	else
1006 		return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1007 }
1008 
1009 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1010 				     const void *arg)
1011 {
1012 	const struct dso *dso = arg;
1013 
1014 	if (type != HIST_FILTER__DSO)
1015 		return -1;
1016 
1017 	return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1018 		map__dso(he->branch_info->to.ms.map) != dso);
1019 }
1020 
1021 static int64_t
1022 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1023 {
1024 	struct addr_map_symbol *from_l, *from_r;
1025 
1026 	if (!left->branch_info || !right->branch_info)
1027 		return cmp_null(left->branch_info, right->branch_info);
1028 
1029 	from_l = &left->branch_info->from;
1030 	from_r = &right->branch_info->from;
1031 
1032 	if (!from_l->ms.sym && !from_r->ms.sym)
1033 		return _sort__addr_cmp(from_l->addr, from_r->addr);
1034 
1035 	return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1036 }
1037 
1038 static int64_t
1039 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1040 {
1041 	struct addr_map_symbol *to_l, *to_r;
1042 
1043 	if (!left->branch_info || !right->branch_info)
1044 		return cmp_null(left->branch_info, right->branch_info);
1045 
1046 	to_l = &left->branch_info->to;
1047 	to_r = &right->branch_info->to;
1048 
1049 	if (!to_l->ms.sym && !to_r->ms.sym)
1050 		return _sort__addr_cmp(to_l->addr, to_r->addr);
1051 
1052 	return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1053 }
1054 
1055 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1056 					 size_t size, unsigned int width)
1057 {
1058 	if (he->branch_info) {
1059 		struct addr_map_symbol *from = &he->branch_info->from;
1060 
1061 		return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1062 						 from->al_level, bf, size, width);
1063 	}
1064 
1065 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1066 }
1067 
1068 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1069 				       size_t size, unsigned int width)
1070 {
1071 	if (he->branch_info) {
1072 		struct addr_map_symbol *to = &he->branch_info->to;
1073 
1074 		return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1075 						 to->al_level, bf, size, width);
1076 	}
1077 
1078 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1079 }
1080 
1081 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1082 				       const void *arg)
1083 {
1084 	const char *sym = arg;
1085 
1086 	if (type != HIST_FILTER__SYMBOL)
1087 		return -1;
1088 
1089 	return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1090 			strstr(he->branch_info->from.ms.sym->name, sym));
1091 }
1092 
1093 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1094 				       const void *arg)
1095 {
1096 	const char *sym = arg;
1097 
1098 	if (type != HIST_FILTER__SYMBOL)
1099 		return -1;
1100 
1101 	return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1102 		        strstr(he->branch_info->to.ms.sym->name, sym));
1103 }
1104 
1105 struct sort_entry sort_dso_from = {
1106 	.se_header	= "Source Shared Object",
1107 	.se_cmp		= sort__dso_from_cmp,
1108 	.se_snprintf	= hist_entry__dso_from_snprintf,
1109 	.se_filter	= hist_entry__dso_from_filter,
1110 	.se_width_idx	= HISTC_DSO_FROM,
1111 };
1112 
1113 struct sort_entry sort_dso_to = {
1114 	.se_header	= "Target Shared Object",
1115 	.se_cmp		= sort__dso_to_cmp,
1116 	.se_snprintf	= hist_entry__dso_to_snprintf,
1117 	.se_filter	= hist_entry__dso_to_filter,
1118 	.se_width_idx	= HISTC_DSO_TO,
1119 };
1120 
1121 struct sort_entry sort_sym_from = {
1122 	.se_header	= "Source Symbol",
1123 	.se_cmp		= sort__sym_from_cmp,
1124 	.se_snprintf	= hist_entry__sym_from_snprintf,
1125 	.se_filter	= hist_entry__sym_from_filter,
1126 	.se_width_idx	= HISTC_SYMBOL_FROM,
1127 };
1128 
1129 struct sort_entry sort_sym_to = {
1130 	.se_header	= "Target Symbol",
1131 	.se_cmp		= sort__sym_to_cmp,
1132 	.se_snprintf	= hist_entry__sym_to_snprintf,
1133 	.se_filter	= hist_entry__sym_to_filter,
1134 	.se_width_idx	= HISTC_SYMBOL_TO,
1135 };
1136 
1137 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1138 				     u64 ip, char level, char *bf, size_t size,
1139 				     unsigned int width)
1140 {
1141 	struct symbol *sym = ms->sym;
1142 	struct map *map = ms->map;
1143 	size_t ret = 0, offs;
1144 
1145 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1146 	if (sym && map) {
1147 		if (sym->type == STT_OBJECT) {
1148 			ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1149 			ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1150 					ip - map__unmap_ip(map, sym->start));
1151 		} else {
1152 			ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1153 					       width - ret,
1154 					       sym->name);
1155 			offs = ip - sym->start;
1156 			if (offs)
1157 				ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1158 		}
1159 	} else {
1160 		size_t len = BITS_PER_LONG / 4;
1161 		ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1162 				       len, ip);
1163 	}
1164 
1165 	return ret;
1166 }
1167 
1168 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1169 					 size_t size, unsigned int width)
1170 {
1171 	if (he->branch_info) {
1172 		struct addr_map_symbol *from = &he->branch_info->from;
1173 
1174 		return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1175 						 he->level, bf, size, width);
1176 	}
1177 
1178 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1179 }
1180 
1181 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1182 				       size_t size, unsigned int width)
1183 {
1184 	if (he->branch_info) {
1185 		struct addr_map_symbol *to = &he->branch_info->to;
1186 
1187 		return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1188 						 he->level, bf, size, width);
1189 	}
1190 
1191 	return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1192 }
1193 
1194 static int64_t
1195 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1196 {
1197 	struct addr_map_symbol *from_l;
1198 	struct addr_map_symbol *from_r;
1199 	int64_t ret;
1200 
1201 	if (!left->branch_info || !right->branch_info)
1202 		return cmp_null(left->branch_info, right->branch_info);
1203 
1204 	from_l = &left->branch_info->from;
1205 	from_r = &right->branch_info->from;
1206 
1207 	/*
1208 	 * comparing symbol address alone is not enough since it's a
1209 	 * relative address within a dso.
1210 	 */
1211 	ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1212 	if (ret != 0)
1213 		return ret;
1214 
1215 	return _sort__addr_cmp(from_l->addr, from_r->addr);
1216 }
1217 
1218 static int64_t
1219 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1220 {
1221 	struct addr_map_symbol *to_l;
1222 	struct addr_map_symbol *to_r;
1223 	int64_t ret;
1224 
1225 	if (!left->branch_info || !right->branch_info)
1226 		return cmp_null(left->branch_info, right->branch_info);
1227 
1228 	to_l = &left->branch_info->to;
1229 	to_r = &right->branch_info->to;
1230 
1231 	/*
1232 	 * comparing symbol address alone is not enough since it's a
1233 	 * relative address within a dso.
1234 	 */
1235 	ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1236 	if (ret != 0)
1237 		return ret;
1238 
1239 	return _sort__addr_cmp(to_l->addr, to_r->addr);
1240 }
1241 
1242 struct sort_entry sort_addr_from = {
1243 	.se_header	= "Source Address",
1244 	.se_cmp		= sort__addr_from_cmp,
1245 	.se_snprintf	= hist_entry__addr_from_snprintf,
1246 	.se_filter	= hist_entry__sym_from_filter, /* shared with sym_from */
1247 	.se_width_idx	= HISTC_ADDR_FROM,
1248 };
1249 
1250 struct sort_entry sort_addr_to = {
1251 	.se_header	= "Target Address",
1252 	.se_cmp		= sort__addr_to_cmp,
1253 	.se_snprintf	= hist_entry__addr_to_snprintf,
1254 	.se_filter	= hist_entry__sym_to_filter, /* shared with sym_to */
1255 	.se_width_idx	= HISTC_ADDR_TO,
1256 };
1257 
1258 
1259 static int64_t
1260 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1261 {
1262 	unsigned char mp, p;
1263 
1264 	if (!left->branch_info || !right->branch_info)
1265 		return cmp_null(left->branch_info, right->branch_info);
1266 
1267 	mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1268 	p  = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1269 	return mp || p;
1270 }
1271 
1272 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1273 				    size_t size, unsigned int width){
1274 	static const char *out = "N/A";
1275 
1276 	if (he->branch_info) {
1277 		if (he->branch_info->flags.predicted)
1278 			out = "N";
1279 		else if (he->branch_info->flags.mispred)
1280 			out = "Y";
1281 	}
1282 
1283 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1284 }
1285 
1286 static int64_t
1287 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1288 {
1289 	if (!left->branch_info || !right->branch_info)
1290 		return cmp_null(left->branch_info, right->branch_info);
1291 
1292 	return left->branch_info->flags.cycles -
1293 		right->branch_info->flags.cycles;
1294 }
1295 
1296 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1297 				    size_t size, unsigned int width)
1298 {
1299 	if (!he->branch_info)
1300 		return scnprintf(bf, size, "%-.*s", width, "N/A");
1301 	if (he->branch_info->flags.cycles == 0)
1302 		return repsep_snprintf(bf, size, "%-*s", width, "-");
1303 	return repsep_snprintf(bf, size, "%-*hd", width,
1304 			       he->branch_info->flags.cycles);
1305 }
1306 
1307 struct sort_entry sort_cycles = {
1308 	.se_header	= "Basic Block Cycles",
1309 	.se_cmp		= sort__cycles_cmp,
1310 	.se_snprintf	= hist_entry__cycles_snprintf,
1311 	.se_width_idx	= HISTC_CYCLES,
1312 };
1313 
1314 /* --sort daddr_sym */
1315 int64_t
1316 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1317 {
1318 	uint64_t l = 0, r = 0;
1319 
1320 	if (left->mem_info)
1321 		l = left->mem_info->daddr.addr;
1322 	if (right->mem_info)
1323 		r = right->mem_info->daddr.addr;
1324 
1325 	return (int64_t)(r - l);
1326 }
1327 
1328 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1329 				    size_t size, unsigned int width)
1330 {
1331 	uint64_t addr = 0;
1332 	struct map_symbol *ms = NULL;
1333 
1334 	if (he->mem_info) {
1335 		addr = he->mem_info->daddr.addr;
1336 		ms = &he->mem_info->daddr.ms;
1337 	}
1338 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1339 }
1340 
1341 int64_t
1342 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1343 {
1344 	uint64_t l = 0, r = 0;
1345 
1346 	if (left->mem_info)
1347 		l = left->mem_info->iaddr.addr;
1348 	if (right->mem_info)
1349 		r = right->mem_info->iaddr.addr;
1350 
1351 	return (int64_t)(r - l);
1352 }
1353 
1354 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1355 				    size_t size, unsigned int width)
1356 {
1357 	uint64_t addr = 0;
1358 	struct map_symbol *ms = NULL;
1359 
1360 	if (he->mem_info) {
1361 		addr = he->mem_info->iaddr.addr;
1362 		ms   = &he->mem_info->iaddr.ms;
1363 	}
1364 	return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1365 }
1366 
1367 static int64_t
1368 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1369 {
1370 	struct map *map_l = NULL;
1371 	struct map *map_r = NULL;
1372 
1373 	if (left->mem_info)
1374 		map_l = left->mem_info->daddr.ms.map;
1375 	if (right->mem_info)
1376 		map_r = right->mem_info->daddr.ms.map;
1377 
1378 	return _sort__dso_cmp(map_l, map_r);
1379 }
1380 
1381 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1382 				    size_t size, unsigned int width)
1383 {
1384 	struct map *map = NULL;
1385 
1386 	if (he->mem_info)
1387 		map = he->mem_info->daddr.ms.map;
1388 
1389 	return _hist_entry__dso_snprintf(map, bf, size, width);
1390 }
1391 
1392 static int64_t
1393 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1394 {
1395 	union perf_mem_data_src data_src_l;
1396 	union perf_mem_data_src data_src_r;
1397 
1398 	if (left->mem_info)
1399 		data_src_l = left->mem_info->data_src;
1400 	else
1401 		data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1402 
1403 	if (right->mem_info)
1404 		data_src_r = right->mem_info->data_src;
1405 	else
1406 		data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1407 
1408 	return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1409 }
1410 
1411 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1412 				    size_t size, unsigned int width)
1413 {
1414 	char out[10];
1415 
1416 	perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1417 	return repsep_snprintf(bf, size, "%.*s", width, out);
1418 }
1419 
1420 static int64_t
1421 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1422 {
1423 	union perf_mem_data_src data_src_l;
1424 	union perf_mem_data_src data_src_r;
1425 
1426 	if (left->mem_info)
1427 		data_src_l = left->mem_info->data_src;
1428 	else
1429 		data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1430 
1431 	if (right->mem_info)
1432 		data_src_r = right->mem_info->data_src;
1433 	else
1434 		data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1435 
1436 	return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1437 }
1438 
1439 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1440 				    size_t size, unsigned int width)
1441 {
1442 	char out[64];
1443 
1444 	perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1445 	return repsep_snprintf(bf, size, "%-*s", width, out);
1446 }
1447 
1448 static int64_t
1449 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1450 {
1451 	union perf_mem_data_src data_src_l;
1452 	union perf_mem_data_src data_src_r;
1453 
1454 	if (left->mem_info)
1455 		data_src_l = left->mem_info->data_src;
1456 	else
1457 		data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1458 
1459 	if (right->mem_info)
1460 		data_src_r = right->mem_info->data_src;
1461 	else
1462 		data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1463 
1464 	return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1465 }
1466 
1467 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1468 				    size_t size, unsigned int width)
1469 {
1470 	char out[64];
1471 
1472 	perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1473 	return repsep_snprintf(bf, size, "%-*s", width, out);
1474 }
1475 
1476 static int64_t
1477 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1478 {
1479 	union perf_mem_data_src data_src_l;
1480 	union perf_mem_data_src data_src_r;
1481 
1482 	if (left->mem_info)
1483 		data_src_l = left->mem_info->data_src;
1484 	else
1485 		data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1486 
1487 	if (right->mem_info)
1488 		data_src_r = right->mem_info->data_src;
1489 	else
1490 		data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1491 
1492 	return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1493 }
1494 
1495 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1496 				    size_t size, unsigned int width)
1497 {
1498 	char out[64];
1499 
1500 	perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1501 	return repsep_snprintf(bf, size, "%-*s", width, out);
1502 }
1503 
1504 int64_t
1505 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1506 {
1507 	u64 l, r;
1508 	struct map *l_map, *r_map;
1509 	struct dso *l_dso, *r_dso;
1510 	int rc;
1511 
1512 	if (!left->mem_info)  return -1;
1513 	if (!right->mem_info) return 1;
1514 
1515 	/* group event types together */
1516 	if (left->cpumode > right->cpumode) return -1;
1517 	if (left->cpumode < right->cpumode) return 1;
1518 
1519 	l_map = left->mem_info->daddr.ms.map;
1520 	r_map = right->mem_info->daddr.ms.map;
1521 
1522 	/* if both are NULL, jump to sort on al_addr instead */
1523 	if (!l_map && !r_map)
1524 		goto addr;
1525 
1526 	if (!l_map) return -1;
1527 	if (!r_map) return 1;
1528 
1529 	l_dso = map__dso(l_map);
1530 	r_dso = map__dso(r_map);
1531 	rc = dso__cmp_id(l_dso, r_dso);
1532 	if (rc)
1533 		return rc;
1534 	/*
1535 	 * Addresses with no major/minor numbers are assumed to be
1536 	 * anonymous in userspace.  Sort those on pid then address.
1537 	 *
1538 	 * The kernel and non-zero major/minor mapped areas are
1539 	 * assumed to be unity mapped.  Sort those on address.
1540 	 */
1541 
1542 	if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1543 	    (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min &&
1544 	    !l_dso->id.ino && !l_dso->id.ino_generation) {
1545 		/* userspace anonymous */
1546 
1547 		if (thread__pid(left->thread) > thread__pid(right->thread))
1548 			return -1;
1549 		if (thread__pid(left->thread) < thread__pid(right->thread))
1550 			return 1;
1551 	}
1552 
1553 addr:
1554 	/* al_addr does all the right addr - start + offset calculations */
1555 	l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl);
1556 	r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl);
1557 
1558 	if (l > r) return -1;
1559 	if (l < r) return 1;
1560 
1561 	return 0;
1562 }
1563 
1564 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1565 					  size_t size, unsigned int width)
1566 {
1567 
1568 	uint64_t addr = 0;
1569 	struct map_symbol *ms = NULL;
1570 	char level = he->level;
1571 
1572 	if (he->mem_info) {
1573 		struct map *map = he->mem_info->daddr.ms.map;
1574 		struct dso *dso = map ? map__dso(map) : NULL;
1575 
1576 		addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl);
1577 		ms = &he->mem_info->daddr.ms;
1578 
1579 		/* print [s] for shared data mmaps */
1580 		if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1581 		     map && !(map__prot(map) & PROT_EXEC) &&
1582 		     (map__flags(map) & MAP_SHARED) &&
1583 		    (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation))
1584 			level = 's';
1585 		else if (!map)
1586 			level = 'X';
1587 	}
1588 	return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1589 }
1590 
1591 struct sort_entry sort_mispredict = {
1592 	.se_header	= "Branch Mispredicted",
1593 	.se_cmp		= sort__mispredict_cmp,
1594 	.se_snprintf	= hist_entry__mispredict_snprintf,
1595 	.se_width_idx	= HISTC_MISPREDICT,
1596 };
1597 
1598 static int64_t
1599 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1600 {
1601 	return left->weight - right->weight;
1602 }
1603 
1604 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1605 				    size_t size, unsigned int width)
1606 {
1607 	return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1608 }
1609 
1610 struct sort_entry sort_local_weight = {
1611 	.se_header	= "Local Weight",
1612 	.se_cmp		= sort__weight_cmp,
1613 	.se_snprintf	= hist_entry__local_weight_snprintf,
1614 	.se_width_idx	= HISTC_LOCAL_WEIGHT,
1615 };
1616 
1617 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1618 					      size_t size, unsigned int width)
1619 {
1620 	return repsep_snprintf(bf, size, "%-*llu", width,
1621 			       he->weight * he->stat.nr_events);
1622 }
1623 
1624 struct sort_entry sort_global_weight = {
1625 	.se_header	= "Weight",
1626 	.se_cmp		= sort__weight_cmp,
1627 	.se_snprintf	= hist_entry__global_weight_snprintf,
1628 	.se_width_idx	= HISTC_GLOBAL_WEIGHT,
1629 };
1630 
1631 static int64_t
1632 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1633 {
1634 	return left->ins_lat - right->ins_lat;
1635 }
1636 
1637 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1638 					      size_t size, unsigned int width)
1639 {
1640 	return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1641 }
1642 
1643 struct sort_entry sort_local_ins_lat = {
1644 	.se_header	= "Local INSTR Latency",
1645 	.se_cmp		= sort__ins_lat_cmp,
1646 	.se_snprintf	= hist_entry__local_ins_lat_snprintf,
1647 	.se_width_idx	= HISTC_LOCAL_INS_LAT,
1648 };
1649 
1650 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1651 					       size_t size, unsigned int width)
1652 {
1653 	return repsep_snprintf(bf, size, "%-*u", width,
1654 			       he->ins_lat * he->stat.nr_events);
1655 }
1656 
1657 struct sort_entry sort_global_ins_lat = {
1658 	.se_header	= "INSTR Latency",
1659 	.se_cmp		= sort__ins_lat_cmp,
1660 	.se_snprintf	= hist_entry__global_ins_lat_snprintf,
1661 	.se_width_idx	= HISTC_GLOBAL_INS_LAT,
1662 };
1663 
1664 static int64_t
1665 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1666 {
1667 	return left->p_stage_cyc - right->p_stage_cyc;
1668 }
1669 
1670 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1671 					size_t size, unsigned int width)
1672 {
1673 	return repsep_snprintf(bf, size, "%-*u", width,
1674 			he->p_stage_cyc * he->stat.nr_events);
1675 }
1676 
1677 
1678 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1679 					size_t size, unsigned int width)
1680 {
1681 	return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1682 }
1683 
1684 struct sort_entry sort_local_p_stage_cyc = {
1685 	.se_header      = "Local Pipeline Stage Cycle",
1686 	.se_cmp         = sort__p_stage_cyc_cmp,
1687 	.se_snprintf	= hist_entry__p_stage_cyc_snprintf,
1688 	.se_width_idx	= HISTC_LOCAL_P_STAGE_CYC,
1689 };
1690 
1691 struct sort_entry sort_global_p_stage_cyc = {
1692 	.se_header      = "Pipeline Stage Cycle",
1693 	.se_cmp         = sort__p_stage_cyc_cmp,
1694 	.se_snprintf    = hist_entry__global_p_stage_cyc_snprintf,
1695 	.se_width_idx   = HISTC_GLOBAL_P_STAGE_CYC,
1696 };
1697 
1698 struct sort_entry sort_mem_daddr_sym = {
1699 	.se_header	= "Data Symbol",
1700 	.se_cmp		= sort__daddr_cmp,
1701 	.se_snprintf	= hist_entry__daddr_snprintf,
1702 	.se_width_idx	= HISTC_MEM_DADDR_SYMBOL,
1703 };
1704 
1705 struct sort_entry sort_mem_iaddr_sym = {
1706 	.se_header	= "Code Symbol",
1707 	.se_cmp		= sort__iaddr_cmp,
1708 	.se_snprintf	= hist_entry__iaddr_snprintf,
1709 	.se_width_idx	= HISTC_MEM_IADDR_SYMBOL,
1710 };
1711 
1712 struct sort_entry sort_mem_daddr_dso = {
1713 	.se_header	= "Data Object",
1714 	.se_cmp		= sort__dso_daddr_cmp,
1715 	.se_snprintf	= hist_entry__dso_daddr_snprintf,
1716 	.se_width_idx	= HISTC_MEM_DADDR_DSO,
1717 };
1718 
1719 struct sort_entry sort_mem_locked = {
1720 	.se_header	= "Locked",
1721 	.se_cmp		= sort__locked_cmp,
1722 	.se_snprintf	= hist_entry__locked_snprintf,
1723 	.se_width_idx	= HISTC_MEM_LOCKED,
1724 };
1725 
1726 struct sort_entry sort_mem_tlb = {
1727 	.se_header	= "TLB access",
1728 	.se_cmp		= sort__tlb_cmp,
1729 	.se_snprintf	= hist_entry__tlb_snprintf,
1730 	.se_width_idx	= HISTC_MEM_TLB,
1731 };
1732 
1733 struct sort_entry sort_mem_lvl = {
1734 	.se_header	= "Memory access",
1735 	.se_cmp		= sort__lvl_cmp,
1736 	.se_snprintf	= hist_entry__lvl_snprintf,
1737 	.se_width_idx	= HISTC_MEM_LVL,
1738 };
1739 
1740 struct sort_entry sort_mem_snoop = {
1741 	.se_header	= "Snoop",
1742 	.se_cmp		= sort__snoop_cmp,
1743 	.se_snprintf	= hist_entry__snoop_snprintf,
1744 	.se_width_idx	= HISTC_MEM_SNOOP,
1745 };
1746 
1747 struct sort_entry sort_mem_dcacheline = {
1748 	.se_header	= "Data Cacheline",
1749 	.se_cmp		= sort__dcacheline_cmp,
1750 	.se_snprintf	= hist_entry__dcacheline_snprintf,
1751 	.se_width_idx	= HISTC_MEM_DCACHELINE,
1752 };
1753 
1754 static int64_t
1755 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1756 {
1757 	union perf_mem_data_src data_src_l;
1758 	union perf_mem_data_src data_src_r;
1759 
1760 	if (left->mem_info)
1761 		data_src_l = left->mem_info->data_src;
1762 	else
1763 		data_src_l.mem_blk = PERF_MEM_BLK_NA;
1764 
1765 	if (right->mem_info)
1766 		data_src_r = right->mem_info->data_src;
1767 	else
1768 		data_src_r.mem_blk = PERF_MEM_BLK_NA;
1769 
1770 	return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1771 }
1772 
1773 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1774 					size_t size, unsigned int width)
1775 {
1776 	char out[16];
1777 
1778 	perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1779 	return repsep_snprintf(bf, size, "%.*s", width, out);
1780 }
1781 
1782 struct sort_entry sort_mem_blocked = {
1783 	.se_header	= "Blocked",
1784 	.se_cmp		= sort__blocked_cmp,
1785 	.se_snprintf	= hist_entry__blocked_snprintf,
1786 	.se_width_idx	= HISTC_MEM_BLOCKED,
1787 };
1788 
1789 static int64_t
1790 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1791 {
1792 	uint64_t l = 0, r = 0;
1793 
1794 	if (left->mem_info)
1795 		l = left->mem_info->daddr.phys_addr;
1796 	if (right->mem_info)
1797 		r = right->mem_info->daddr.phys_addr;
1798 
1799 	return (int64_t)(r - l);
1800 }
1801 
1802 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1803 					   size_t size, unsigned int width)
1804 {
1805 	uint64_t addr = 0;
1806 	size_t ret = 0;
1807 	size_t len = BITS_PER_LONG / 4;
1808 
1809 	addr = he->mem_info->daddr.phys_addr;
1810 
1811 	ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1812 
1813 	ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1814 
1815 	ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1816 
1817 	if (ret > width)
1818 		bf[width] = '\0';
1819 
1820 	return width;
1821 }
1822 
1823 struct sort_entry sort_mem_phys_daddr = {
1824 	.se_header	= "Data Physical Address",
1825 	.se_cmp		= sort__phys_daddr_cmp,
1826 	.se_snprintf	= hist_entry__phys_daddr_snprintf,
1827 	.se_width_idx	= HISTC_MEM_PHYS_DADDR,
1828 };
1829 
1830 static int64_t
1831 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1832 {
1833 	uint64_t l = 0, r = 0;
1834 
1835 	if (left->mem_info)
1836 		l = left->mem_info->daddr.data_page_size;
1837 	if (right->mem_info)
1838 		r = right->mem_info->daddr.data_page_size;
1839 
1840 	return (int64_t)(r - l);
1841 }
1842 
1843 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1844 					  size_t size, unsigned int width)
1845 {
1846 	char str[PAGE_SIZE_NAME_LEN];
1847 
1848 	return repsep_snprintf(bf, size, "%-*s", width,
1849 			       get_page_size_name(he->mem_info->daddr.data_page_size, str));
1850 }
1851 
1852 struct sort_entry sort_mem_data_page_size = {
1853 	.se_header	= "Data Page Size",
1854 	.se_cmp		= sort__data_page_size_cmp,
1855 	.se_snprintf	= hist_entry__data_page_size_snprintf,
1856 	.se_width_idx	= HISTC_MEM_DATA_PAGE_SIZE,
1857 };
1858 
1859 static int64_t
1860 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1861 {
1862 	uint64_t l = left->code_page_size;
1863 	uint64_t r = right->code_page_size;
1864 
1865 	return (int64_t)(r - l);
1866 }
1867 
1868 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1869 					  size_t size, unsigned int width)
1870 {
1871 	char str[PAGE_SIZE_NAME_LEN];
1872 
1873 	return repsep_snprintf(bf, size, "%-*s", width,
1874 			       get_page_size_name(he->code_page_size, str));
1875 }
1876 
1877 struct sort_entry sort_code_page_size = {
1878 	.se_header	= "Code Page Size",
1879 	.se_cmp		= sort__code_page_size_cmp,
1880 	.se_snprintf	= hist_entry__code_page_size_snprintf,
1881 	.se_width_idx	= HISTC_CODE_PAGE_SIZE,
1882 };
1883 
1884 static int64_t
1885 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1886 {
1887 	if (!left->branch_info || !right->branch_info)
1888 		return cmp_null(left->branch_info, right->branch_info);
1889 
1890 	return left->branch_info->flags.abort !=
1891 		right->branch_info->flags.abort;
1892 }
1893 
1894 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1895 				    size_t size, unsigned int width)
1896 {
1897 	static const char *out = "N/A";
1898 
1899 	if (he->branch_info) {
1900 		if (he->branch_info->flags.abort)
1901 			out = "A";
1902 		else
1903 			out = ".";
1904 	}
1905 
1906 	return repsep_snprintf(bf, size, "%-*s", width, out);
1907 }
1908 
1909 struct sort_entry sort_abort = {
1910 	.se_header	= "Transaction abort",
1911 	.se_cmp		= sort__abort_cmp,
1912 	.se_snprintf	= hist_entry__abort_snprintf,
1913 	.se_width_idx	= HISTC_ABORT,
1914 };
1915 
1916 static int64_t
1917 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1918 {
1919 	if (!left->branch_info || !right->branch_info)
1920 		return cmp_null(left->branch_info, right->branch_info);
1921 
1922 	return left->branch_info->flags.in_tx !=
1923 		right->branch_info->flags.in_tx;
1924 }
1925 
1926 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1927 				    size_t size, unsigned int width)
1928 {
1929 	static const char *out = "N/A";
1930 
1931 	if (he->branch_info) {
1932 		if (he->branch_info->flags.in_tx)
1933 			out = "T";
1934 		else
1935 			out = ".";
1936 	}
1937 
1938 	return repsep_snprintf(bf, size, "%-*s", width, out);
1939 }
1940 
1941 struct sort_entry sort_in_tx = {
1942 	.se_header	= "Branch in transaction",
1943 	.se_cmp		= sort__in_tx_cmp,
1944 	.se_snprintf	= hist_entry__in_tx_snprintf,
1945 	.se_width_idx	= HISTC_IN_TX,
1946 };
1947 
1948 static int64_t
1949 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1950 {
1951 	return left->transaction - right->transaction;
1952 }
1953 
1954 static inline char *add_str(char *p, const char *str)
1955 {
1956 	strcpy(p, str);
1957 	return p + strlen(str);
1958 }
1959 
1960 static struct txbit {
1961 	unsigned flag;
1962 	const char *name;
1963 	int skip_for_len;
1964 } txbits[] = {
1965 	{ PERF_TXN_ELISION,        "EL ",        0 },
1966 	{ PERF_TXN_TRANSACTION,    "TX ",        1 },
1967 	{ PERF_TXN_SYNC,           "SYNC ",      1 },
1968 	{ PERF_TXN_ASYNC,          "ASYNC ",     0 },
1969 	{ PERF_TXN_RETRY,          "RETRY ",     0 },
1970 	{ PERF_TXN_CONFLICT,       "CON ",       0 },
1971 	{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1972 	{ PERF_TXN_CAPACITY_READ,  "CAP-READ ",  0 },
1973 	{ 0, NULL, 0 }
1974 };
1975 
1976 int hist_entry__transaction_len(void)
1977 {
1978 	int i;
1979 	int len = 0;
1980 
1981 	for (i = 0; txbits[i].name; i++) {
1982 		if (!txbits[i].skip_for_len)
1983 			len += strlen(txbits[i].name);
1984 	}
1985 	len += 4; /* :XX<space> */
1986 	return len;
1987 }
1988 
1989 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1990 					    size_t size, unsigned int width)
1991 {
1992 	u64 t = he->transaction;
1993 	char buf[128];
1994 	char *p = buf;
1995 	int i;
1996 
1997 	buf[0] = 0;
1998 	for (i = 0; txbits[i].name; i++)
1999 		if (txbits[i].flag & t)
2000 			p = add_str(p, txbits[i].name);
2001 	if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2002 		p = add_str(p, "NEITHER ");
2003 	if (t & PERF_TXN_ABORT_MASK) {
2004 		sprintf(p, ":%" PRIx64,
2005 			(t & PERF_TXN_ABORT_MASK) >>
2006 			PERF_TXN_ABORT_SHIFT);
2007 		p += strlen(p);
2008 	}
2009 
2010 	return repsep_snprintf(bf, size, "%-*s", width, buf);
2011 }
2012 
2013 struct sort_entry sort_transaction = {
2014 	.se_header	= "Transaction                ",
2015 	.se_cmp		= sort__transaction_cmp,
2016 	.se_snprintf	= hist_entry__transaction_snprintf,
2017 	.se_width_idx	= HISTC_TRANSACTION,
2018 };
2019 
2020 /* --sort symbol_size */
2021 
2022 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2023 {
2024 	int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2025 	int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2026 
2027 	return size_l < size_r ? -1 :
2028 		size_l == size_r ? 0 : 1;
2029 }
2030 
2031 static int64_t
2032 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2033 {
2034 	return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2035 }
2036 
2037 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2038 					  size_t bf_size, unsigned int width)
2039 {
2040 	if (sym)
2041 		return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2042 
2043 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2044 }
2045 
2046 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2047 					 size_t size, unsigned int width)
2048 {
2049 	return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2050 }
2051 
2052 struct sort_entry sort_sym_size = {
2053 	.se_header	= "Symbol size",
2054 	.se_cmp		= sort__sym_size_cmp,
2055 	.se_snprintf	= hist_entry__sym_size_snprintf,
2056 	.se_width_idx	= HISTC_SYM_SIZE,
2057 };
2058 
2059 /* --sort dso_size */
2060 
2061 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2062 {
2063 	int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2064 	int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2065 
2066 	return size_l < size_r ? -1 :
2067 		size_l == size_r ? 0 : 1;
2068 }
2069 
2070 static int64_t
2071 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2072 {
2073 	return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2074 }
2075 
2076 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2077 					  size_t bf_size, unsigned int width)
2078 {
2079 	if (map && map__dso(map))
2080 		return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2081 
2082 	return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2083 }
2084 
2085 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2086 					 size_t size, unsigned int width)
2087 {
2088 	return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2089 }
2090 
2091 struct sort_entry sort_dso_size = {
2092 	.se_header	= "DSO size",
2093 	.se_cmp		= sort__dso_size_cmp,
2094 	.se_snprintf	= hist_entry__dso_size_snprintf,
2095 	.se_width_idx	= HISTC_DSO_SIZE,
2096 };
2097 
2098 /* --sort addr */
2099 
2100 static int64_t
2101 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2102 {
2103 	u64 left_ip = left->ip;
2104 	u64 right_ip = right->ip;
2105 	struct map *left_map = left->ms.map;
2106 	struct map *right_map = right->ms.map;
2107 
2108 	if (left_map)
2109 		left_ip = map__unmap_ip(left_map, left_ip);
2110 	if (right_map)
2111 		right_ip = map__unmap_ip(right_map, right_ip);
2112 
2113 	return _sort__addr_cmp(left_ip, right_ip);
2114 }
2115 
2116 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2117 				     size_t size, unsigned int width)
2118 {
2119 	u64 ip = he->ip;
2120 	struct map *map = he->ms.map;
2121 
2122 	if (map)
2123 		ip = map__unmap_ip(map, ip);
2124 
2125 	return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2126 }
2127 
2128 struct sort_entry sort_addr = {
2129 	.se_header	= "Address",
2130 	.se_cmp		= sort__addr_cmp,
2131 	.se_snprintf	= hist_entry__addr_snprintf,
2132 	.se_width_idx	= HISTC_ADDR,
2133 };
2134 
2135 /* --sort type */
2136 
2137 struct annotated_data_type unknown_type = {
2138 	.self = {
2139 		.type_name = (char *)"(unknown)",
2140 		.children = LIST_HEAD_INIT(unknown_type.self.children),
2141 	},
2142 };
2143 
2144 static int64_t
2145 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2146 {
2147 	return sort__addr_cmp(left, right);
2148 }
2149 
2150 static void sort__type_init(struct hist_entry *he)
2151 {
2152 	if (he->mem_type)
2153 		return;
2154 
2155 	he->mem_type = hist_entry__get_data_type(he);
2156 	if (he->mem_type == NULL) {
2157 		he->mem_type = &unknown_type;
2158 		he->mem_type_off = 0;
2159 	}
2160 }
2161 
2162 static int64_t
2163 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2164 {
2165 	struct annotated_data_type *left_type = left->mem_type;
2166 	struct annotated_data_type *right_type = right->mem_type;
2167 
2168 	if (!left_type) {
2169 		sort__type_init(left);
2170 		left_type = left->mem_type;
2171 	}
2172 
2173 	if (!right_type) {
2174 		sort__type_init(right);
2175 		right_type = right->mem_type;
2176 	}
2177 
2178 	return strcmp(left_type->self.type_name, right_type->self.type_name);
2179 }
2180 
2181 static int64_t
2182 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2183 {
2184 	return sort__type_collapse(left, right);
2185 }
2186 
2187 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2188 				     size_t size, unsigned int width)
2189 {
2190 	return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2191 }
2192 
2193 struct sort_entry sort_type = {
2194 	.se_header	= "Data Type",
2195 	.se_cmp		= sort__type_cmp,
2196 	.se_collapse	= sort__type_collapse,
2197 	.se_sort	= sort__type_sort,
2198 	.se_init	= sort__type_init,
2199 	.se_snprintf	= hist_entry__type_snprintf,
2200 	.se_width_idx	= HISTC_TYPE,
2201 };
2202 
2203 /* --sort typeoff */
2204 
2205 static int64_t
2206 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2207 {
2208 	struct annotated_data_type *left_type = left->mem_type;
2209 	struct annotated_data_type *right_type = right->mem_type;
2210 	int64_t ret;
2211 
2212 	if (!left_type) {
2213 		sort__type_init(left);
2214 		left_type = left->mem_type;
2215 	}
2216 
2217 	if (!right_type) {
2218 		sort__type_init(right);
2219 		right_type = right->mem_type;
2220 	}
2221 
2222 	ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2223 	if (ret)
2224 		return ret;
2225 	return left->mem_type_off - right->mem_type_off;
2226 }
2227 
2228 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
2229 			     int offset, bool first)
2230 {
2231 	struct annotated_member *child;
2232 
2233 	if (list_empty(&m->children))
2234 		return;
2235 
2236 	list_for_each_entry(child, &m->children, node) {
2237 		if (child->offset <= offset && offset < child->offset + child->size) {
2238 			int len = 0;
2239 
2240 			/* It can have anonymous struct/union members */
2241 			if (child->var_name) {
2242 				len = scnprintf(buf, sz, "%s%s",
2243 						first ? "" : ".", child->var_name);
2244 				first = false;
2245 			}
2246 
2247 			fill_member_name(buf + len, sz - len, child, offset, first);
2248 			return;
2249 		}
2250 	}
2251 }
2252 
2253 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2254 				     size_t size, unsigned int width __maybe_unused)
2255 {
2256 	struct annotated_data_type *he_type = he->mem_type;
2257 	char buf[4096];
2258 
2259 	buf[0] = '\0';
2260 	if (list_empty(&he_type->self.children))
2261 		snprintf(buf, sizeof(buf), "no field");
2262 	else
2263 		fill_member_name(buf, sizeof(buf), &he_type->self,
2264 				 he->mem_type_off, true);
2265 	buf[4095] = '\0';
2266 
2267 	return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name,
2268 			       he->mem_type_off, buf);
2269 }
2270 
2271 struct sort_entry sort_type_offset = {
2272 	.se_header	= "Data Type Offset",
2273 	.se_cmp		= sort__type_cmp,
2274 	.se_collapse	= sort__typeoff_sort,
2275 	.se_sort	= sort__typeoff_sort,
2276 	.se_init	= sort__type_init,
2277 	.se_snprintf	= hist_entry__typeoff_snprintf,
2278 	.se_width_idx	= HISTC_TYPE_OFFSET,
2279 };
2280 
2281 
2282 struct sort_dimension {
2283 	const char		*name;
2284 	struct sort_entry	*entry;
2285 	int			taken;
2286 };
2287 
2288 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2289 {
2290 	return 0;
2291 }
2292 
2293 const char * __weak arch_perf_header_entry(const char *se_header)
2294 {
2295 	return se_header;
2296 }
2297 
2298 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2299 {
2300 	sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2301 }
2302 
2303 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2304 
2305 static struct sort_dimension common_sort_dimensions[] = {
2306 	DIM(SORT_PID, "pid", sort_thread),
2307 	DIM(SORT_COMM, "comm", sort_comm),
2308 	DIM(SORT_DSO, "dso", sort_dso),
2309 	DIM(SORT_SYM, "symbol", sort_sym),
2310 	DIM(SORT_PARENT, "parent", sort_parent),
2311 	DIM(SORT_CPU, "cpu", sort_cpu),
2312 	DIM(SORT_SOCKET, "socket", sort_socket),
2313 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
2314 	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2315 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2316 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2317 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2318 #ifdef HAVE_LIBTRACEEVENT
2319 	DIM(SORT_TRACE, "trace", sort_trace),
2320 #endif
2321 	DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2322 	DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2323 	DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2324 	DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2325 	DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2326 	DIM(SORT_TIME, "time", sort_time),
2327 	DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2328 	DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2329 	DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2330 	DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2331 	DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2332 	DIM(SORT_ADDR, "addr", sort_addr),
2333 	DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2334 	DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2335 	DIM(SORT_SIMD, "simd", sort_simd),
2336 	DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2337 	DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2338 };
2339 
2340 #undef DIM
2341 
2342 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2343 
2344 static struct sort_dimension bstack_sort_dimensions[] = {
2345 	DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2346 	DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2347 	DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2348 	DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2349 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2350 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2351 	DIM(SORT_ABORT, "abort", sort_abort),
2352 	DIM(SORT_CYCLES, "cycles", sort_cycles),
2353 	DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2354 	DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2355 	DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2356 	DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2357 	DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2358 };
2359 
2360 #undef DIM
2361 
2362 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2363 
2364 static struct sort_dimension memory_sort_dimensions[] = {
2365 	DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2366 	DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2367 	DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2368 	DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2369 	DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2370 	DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2371 	DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2372 	DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2373 	DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2374 	DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2375 	DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2376 };
2377 
2378 #undef DIM
2379 
2380 struct hpp_dimension {
2381 	const char		*name;
2382 	struct perf_hpp_fmt	*fmt;
2383 	int			taken;
2384 };
2385 
2386 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2387 
2388 static struct hpp_dimension hpp_sort_dimensions[] = {
2389 	DIM(PERF_HPP__OVERHEAD, "overhead"),
2390 	DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2391 	DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2392 	DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2393 	DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2394 	DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2395 	DIM(PERF_HPP__SAMPLES, "sample"),
2396 	DIM(PERF_HPP__PERIOD, "period"),
2397 };
2398 
2399 #undef DIM
2400 
2401 struct hpp_sort_entry {
2402 	struct perf_hpp_fmt hpp;
2403 	struct sort_entry *se;
2404 };
2405 
2406 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2407 {
2408 	struct hpp_sort_entry *hse;
2409 
2410 	if (!perf_hpp__is_sort_entry(fmt))
2411 		return;
2412 
2413 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2414 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2415 }
2416 
2417 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2418 			      struct hists *hists, int line __maybe_unused,
2419 			      int *span __maybe_unused)
2420 {
2421 	struct hpp_sort_entry *hse;
2422 	size_t len = fmt->user_len;
2423 
2424 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2425 
2426 	if (!len)
2427 		len = hists__col_len(hists, hse->se->se_width_idx);
2428 
2429 	return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2430 }
2431 
2432 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2433 			     struct perf_hpp *hpp __maybe_unused,
2434 			     struct hists *hists)
2435 {
2436 	struct hpp_sort_entry *hse;
2437 	size_t len = fmt->user_len;
2438 
2439 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2440 
2441 	if (!len)
2442 		len = hists__col_len(hists, hse->se->se_width_idx);
2443 
2444 	return len;
2445 }
2446 
2447 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2448 			     struct hist_entry *he)
2449 {
2450 	struct hpp_sort_entry *hse;
2451 	size_t len = fmt->user_len;
2452 
2453 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2454 
2455 	if (!len)
2456 		len = hists__col_len(he->hists, hse->se->se_width_idx);
2457 
2458 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2459 }
2460 
2461 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2462 			       struct hist_entry *a, struct hist_entry *b)
2463 {
2464 	struct hpp_sort_entry *hse;
2465 
2466 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2467 	return hse->se->se_cmp(a, b);
2468 }
2469 
2470 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2471 				    struct hist_entry *a, struct hist_entry *b)
2472 {
2473 	struct hpp_sort_entry *hse;
2474 	int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2475 
2476 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2477 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2478 	return collapse_fn(a, b);
2479 }
2480 
2481 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2482 				struct hist_entry *a, struct hist_entry *b)
2483 {
2484 	struct hpp_sort_entry *hse;
2485 	int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2486 
2487 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2488 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2489 	return sort_fn(a, b);
2490 }
2491 
2492 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2493 {
2494 	return format->header == __sort__hpp_header;
2495 }
2496 
2497 #define MK_SORT_ENTRY_CHK(key)					\
2498 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt)	\
2499 {								\
2500 	struct hpp_sort_entry *hse;				\
2501 								\
2502 	if (!perf_hpp__is_sort_entry(fmt))			\
2503 		return false;					\
2504 								\
2505 	hse = container_of(fmt, struct hpp_sort_entry, hpp);	\
2506 	return hse->se == &sort_ ## key ;			\
2507 }
2508 
2509 #ifdef HAVE_LIBTRACEEVENT
2510 MK_SORT_ENTRY_CHK(trace)
2511 #else
2512 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2513 {
2514 	return false;
2515 }
2516 #endif
2517 MK_SORT_ENTRY_CHK(srcline)
2518 MK_SORT_ENTRY_CHK(srcfile)
2519 MK_SORT_ENTRY_CHK(thread)
2520 MK_SORT_ENTRY_CHK(comm)
2521 MK_SORT_ENTRY_CHK(dso)
2522 MK_SORT_ENTRY_CHK(sym)
2523 
2524 
2525 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2526 {
2527 	struct hpp_sort_entry *hse_a;
2528 	struct hpp_sort_entry *hse_b;
2529 
2530 	if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2531 		return false;
2532 
2533 	hse_a = container_of(a, struct hpp_sort_entry, hpp);
2534 	hse_b = container_of(b, struct hpp_sort_entry, hpp);
2535 
2536 	return hse_a->se == hse_b->se;
2537 }
2538 
2539 static void hse_free(struct perf_hpp_fmt *fmt)
2540 {
2541 	struct hpp_sort_entry *hse;
2542 
2543 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2544 	free(hse);
2545 }
2546 
2547 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2548 {
2549 	struct hpp_sort_entry *hse;
2550 
2551 	if (!perf_hpp__is_sort_entry(fmt))
2552 		return;
2553 
2554 	hse = container_of(fmt, struct hpp_sort_entry, hpp);
2555 
2556 	if (hse->se->se_init)
2557 		hse->se->se_init(he);
2558 }
2559 
2560 static struct hpp_sort_entry *
2561 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2562 {
2563 	struct hpp_sort_entry *hse;
2564 
2565 	hse = malloc(sizeof(*hse));
2566 	if (hse == NULL) {
2567 		pr_err("Memory allocation failed\n");
2568 		return NULL;
2569 	}
2570 
2571 	hse->se = sd->entry;
2572 	hse->hpp.name = sd->entry->se_header;
2573 	hse->hpp.header = __sort__hpp_header;
2574 	hse->hpp.width = __sort__hpp_width;
2575 	hse->hpp.entry = __sort__hpp_entry;
2576 	hse->hpp.color = NULL;
2577 
2578 	hse->hpp.cmp = __sort__hpp_cmp;
2579 	hse->hpp.collapse = __sort__hpp_collapse;
2580 	hse->hpp.sort = __sort__hpp_sort;
2581 	hse->hpp.equal = __sort__hpp_equal;
2582 	hse->hpp.free = hse_free;
2583 	hse->hpp.init = hse_init;
2584 
2585 	INIT_LIST_HEAD(&hse->hpp.list);
2586 	INIT_LIST_HEAD(&hse->hpp.sort_list);
2587 	hse->hpp.elide = false;
2588 	hse->hpp.len = 0;
2589 	hse->hpp.user_len = 0;
2590 	hse->hpp.level = level;
2591 
2592 	return hse;
2593 }
2594 
2595 static void hpp_free(struct perf_hpp_fmt *fmt)
2596 {
2597 	free(fmt);
2598 }
2599 
2600 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2601 						       int level)
2602 {
2603 	struct perf_hpp_fmt *fmt;
2604 
2605 	fmt = memdup(hd->fmt, sizeof(*fmt));
2606 	if (fmt) {
2607 		INIT_LIST_HEAD(&fmt->list);
2608 		INIT_LIST_HEAD(&fmt->sort_list);
2609 		fmt->free = hpp_free;
2610 		fmt->level = level;
2611 	}
2612 
2613 	return fmt;
2614 }
2615 
2616 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2617 {
2618 	struct perf_hpp_fmt *fmt;
2619 	struct hpp_sort_entry *hse;
2620 	int ret = -1;
2621 	int r;
2622 
2623 	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2624 		if (!perf_hpp__is_sort_entry(fmt))
2625 			continue;
2626 
2627 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2628 		if (hse->se->se_filter == NULL)
2629 			continue;
2630 
2631 		/*
2632 		 * hist entry is filtered if any of sort key in the hpp list
2633 		 * is applied.  But it should skip non-matched filter types.
2634 		 */
2635 		r = hse->se->se_filter(he, type, arg);
2636 		if (r >= 0) {
2637 			if (ret < 0)
2638 				ret = 0;
2639 			ret |= r;
2640 		}
2641 	}
2642 
2643 	return ret;
2644 }
2645 
2646 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2647 					  struct perf_hpp_list *list,
2648 					  int level)
2649 {
2650 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2651 
2652 	if (hse == NULL)
2653 		return -1;
2654 
2655 	perf_hpp_list__register_sort_field(list, &hse->hpp);
2656 	return 0;
2657 }
2658 
2659 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2660 					    struct perf_hpp_list *list)
2661 {
2662 	struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2663 
2664 	if (hse == NULL)
2665 		return -1;
2666 
2667 	perf_hpp_list__column_register(list, &hse->hpp);
2668 	return 0;
2669 }
2670 
2671 #ifndef HAVE_LIBTRACEEVENT
2672 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2673 {
2674 	return false;
2675 }
2676 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2677 				     struct hists *hists __maybe_unused)
2678 {
2679 	return false;
2680 }
2681 #else
2682 struct hpp_dynamic_entry {
2683 	struct perf_hpp_fmt hpp;
2684 	struct evsel *evsel;
2685 	struct tep_format_field *field;
2686 	unsigned dynamic_len;
2687 	bool raw_trace;
2688 };
2689 
2690 static int hde_width(struct hpp_dynamic_entry *hde)
2691 {
2692 	if (!hde->hpp.len) {
2693 		int len = hde->dynamic_len;
2694 		int namelen = strlen(hde->field->name);
2695 		int fieldlen = hde->field->size;
2696 
2697 		if (namelen > len)
2698 			len = namelen;
2699 
2700 		if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2701 			/* length for print hex numbers */
2702 			fieldlen = hde->field->size * 2 + 2;
2703 		}
2704 		if (fieldlen > len)
2705 			len = fieldlen;
2706 
2707 		hde->hpp.len = len;
2708 	}
2709 	return hde->hpp.len;
2710 }
2711 
2712 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2713 			       struct hist_entry *he)
2714 {
2715 	char *str, *pos;
2716 	struct tep_format_field *field = hde->field;
2717 	size_t namelen;
2718 	bool last = false;
2719 
2720 	if (hde->raw_trace)
2721 		return;
2722 
2723 	/* parse pretty print result and update max length */
2724 	if (!he->trace_output)
2725 		he->trace_output = get_trace_output(he);
2726 
2727 	namelen = strlen(field->name);
2728 	str = he->trace_output;
2729 
2730 	while (str) {
2731 		pos = strchr(str, ' ');
2732 		if (pos == NULL) {
2733 			last = true;
2734 			pos = str + strlen(str);
2735 		}
2736 
2737 		if (!strncmp(str, field->name, namelen)) {
2738 			size_t len;
2739 
2740 			str += namelen + 1;
2741 			len = pos - str;
2742 
2743 			if (len > hde->dynamic_len)
2744 				hde->dynamic_len = len;
2745 			break;
2746 		}
2747 
2748 		if (last)
2749 			str = NULL;
2750 		else
2751 			str = pos + 1;
2752 	}
2753 }
2754 
2755 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2756 			      struct hists *hists __maybe_unused,
2757 			      int line __maybe_unused,
2758 			      int *span __maybe_unused)
2759 {
2760 	struct hpp_dynamic_entry *hde;
2761 	size_t len = fmt->user_len;
2762 
2763 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2764 
2765 	if (!len)
2766 		len = hde_width(hde);
2767 
2768 	return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2769 }
2770 
2771 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2772 			     struct perf_hpp *hpp __maybe_unused,
2773 			     struct hists *hists __maybe_unused)
2774 {
2775 	struct hpp_dynamic_entry *hde;
2776 	size_t len = fmt->user_len;
2777 
2778 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2779 
2780 	if (!len)
2781 		len = hde_width(hde);
2782 
2783 	return len;
2784 }
2785 
2786 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2787 {
2788 	struct hpp_dynamic_entry *hde;
2789 
2790 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2791 
2792 	return hists_to_evsel(hists) == hde->evsel;
2793 }
2794 
2795 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2796 			     struct hist_entry *he)
2797 {
2798 	struct hpp_dynamic_entry *hde;
2799 	size_t len = fmt->user_len;
2800 	char *str, *pos;
2801 	struct tep_format_field *field;
2802 	size_t namelen;
2803 	bool last = false;
2804 	int ret;
2805 
2806 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2807 
2808 	if (!len)
2809 		len = hde_width(hde);
2810 
2811 	if (hde->raw_trace)
2812 		goto raw_field;
2813 
2814 	if (!he->trace_output)
2815 		he->trace_output = get_trace_output(he);
2816 
2817 	field = hde->field;
2818 	namelen = strlen(field->name);
2819 	str = he->trace_output;
2820 
2821 	while (str) {
2822 		pos = strchr(str, ' ');
2823 		if (pos == NULL) {
2824 			last = true;
2825 			pos = str + strlen(str);
2826 		}
2827 
2828 		if (!strncmp(str, field->name, namelen)) {
2829 			str += namelen + 1;
2830 			str = strndup(str, pos - str);
2831 
2832 			if (str == NULL)
2833 				return scnprintf(hpp->buf, hpp->size,
2834 						 "%*.*s", len, len, "ERROR");
2835 			break;
2836 		}
2837 
2838 		if (last)
2839 			str = NULL;
2840 		else
2841 			str = pos + 1;
2842 	}
2843 
2844 	if (str == NULL) {
2845 		struct trace_seq seq;
2846 raw_field:
2847 		trace_seq_init(&seq);
2848 		tep_print_field(&seq, he->raw_data, hde->field);
2849 		str = seq.buffer;
2850 	}
2851 
2852 	ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2853 	free(str);
2854 	return ret;
2855 }
2856 
2857 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2858 			       struct hist_entry *a, struct hist_entry *b)
2859 {
2860 	struct hpp_dynamic_entry *hde;
2861 	struct tep_format_field *field;
2862 	unsigned offset, size;
2863 
2864 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2865 
2866 	field = hde->field;
2867 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2868 		unsigned long long dyn;
2869 
2870 		tep_read_number_field(field, a->raw_data, &dyn);
2871 		offset = dyn & 0xffff;
2872 		size = (dyn >> 16) & 0xffff;
2873 		if (tep_field_is_relative(field->flags))
2874 			offset += field->offset + field->size;
2875 		/* record max width for output */
2876 		if (size > hde->dynamic_len)
2877 			hde->dynamic_len = size;
2878 	} else {
2879 		offset = field->offset;
2880 		size = field->size;
2881 	}
2882 
2883 	return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2884 }
2885 
2886 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2887 {
2888 	return fmt->cmp == __sort__hde_cmp;
2889 }
2890 
2891 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2892 {
2893 	struct hpp_dynamic_entry *hde_a;
2894 	struct hpp_dynamic_entry *hde_b;
2895 
2896 	if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2897 		return false;
2898 
2899 	hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2900 	hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2901 
2902 	return hde_a->field == hde_b->field;
2903 }
2904 
2905 static void hde_free(struct perf_hpp_fmt *fmt)
2906 {
2907 	struct hpp_dynamic_entry *hde;
2908 
2909 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2910 	free(hde);
2911 }
2912 
2913 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2914 {
2915 	struct hpp_dynamic_entry *hde;
2916 
2917 	if (!perf_hpp__is_dynamic_entry(fmt))
2918 		return;
2919 
2920 	hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2921 	update_dynamic_len(hde, he);
2922 }
2923 
2924 static struct hpp_dynamic_entry *
2925 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2926 		      int level)
2927 {
2928 	struct hpp_dynamic_entry *hde;
2929 
2930 	hde = malloc(sizeof(*hde));
2931 	if (hde == NULL) {
2932 		pr_debug("Memory allocation failed\n");
2933 		return NULL;
2934 	}
2935 
2936 	hde->evsel = evsel;
2937 	hde->field = field;
2938 	hde->dynamic_len = 0;
2939 
2940 	hde->hpp.name = field->name;
2941 	hde->hpp.header = __sort__hde_header;
2942 	hde->hpp.width  = __sort__hde_width;
2943 	hde->hpp.entry  = __sort__hde_entry;
2944 	hde->hpp.color  = NULL;
2945 
2946 	hde->hpp.init = __sort__hde_init;
2947 	hde->hpp.cmp = __sort__hde_cmp;
2948 	hde->hpp.collapse = __sort__hde_cmp;
2949 	hde->hpp.sort = __sort__hde_cmp;
2950 	hde->hpp.equal = __sort__hde_equal;
2951 	hde->hpp.free = hde_free;
2952 
2953 	INIT_LIST_HEAD(&hde->hpp.list);
2954 	INIT_LIST_HEAD(&hde->hpp.sort_list);
2955 	hde->hpp.elide = false;
2956 	hde->hpp.len = 0;
2957 	hde->hpp.user_len = 0;
2958 	hde->hpp.level = level;
2959 
2960 	return hde;
2961 }
2962 #endif /* HAVE_LIBTRACEEVENT */
2963 
2964 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2965 {
2966 	struct perf_hpp_fmt *new_fmt = NULL;
2967 
2968 	if (perf_hpp__is_sort_entry(fmt)) {
2969 		struct hpp_sort_entry *hse, *new_hse;
2970 
2971 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
2972 		new_hse = memdup(hse, sizeof(*hse));
2973 		if (new_hse)
2974 			new_fmt = &new_hse->hpp;
2975 #ifdef HAVE_LIBTRACEEVENT
2976 	} else if (perf_hpp__is_dynamic_entry(fmt)) {
2977 		struct hpp_dynamic_entry *hde, *new_hde;
2978 
2979 		hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2980 		new_hde = memdup(hde, sizeof(*hde));
2981 		if (new_hde)
2982 			new_fmt = &new_hde->hpp;
2983 #endif
2984 	} else {
2985 		new_fmt = memdup(fmt, sizeof(*fmt));
2986 	}
2987 
2988 	INIT_LIST_HEAD(&new_fmt->list);
2989 	INIT_LIST_HEAD(&new_fmt->sort_list);
2990 
2991 	return new_fmt;
2992 }
2993 
2994 static int parse_field_name(char *str, char **event, char **field, char **opt)
2995 {
2996 	char *event_name, *field_name, *opt_name;
2997 
2998 	event_name = str;
2999 	field_name = strchr(str, '.');
3000 
3001 	if (field_name) {
3002 		*field_name++ = '\0';
3003 	} else {
3004 		event_name = NULL;
3005 		field_name = str;
3006 	}
3007 
3008 	opt_name = strchr(field_name, '/');
3009 	if (opt_name)
3010 		*opt_name++ = '\0';
3011 
3012 	*event = event_name;
3013 	*field = field_name;
3014 	*opt   = opt_name;
3015 
3016 	return 0;
3017 }
3018 
3019 /* find match evsel using a given event name.  The event name can be:
3020  *   1. '%' + event index (e.g. '%1' for first event)
3021  *   2. full event name (e.g. sched:sched_switch)
3022  *   3. partial event name (should not contain ':')
3023  */
3024 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3025 {
3026 	struct evsel *evsel = NULL;
3027 	struct evsel *pos;
3028 	bool full_name;
3029 
3030 	/* case 1 */
3031 	if (event_name[0] == '%') {
3032 		int nr = strtol(event_name+1, NULL, 0);
3033 
3034 		if (nr > evlist->core.nr_entries)
3035 			return NULL;
3036 
3037 		evsel = evlist__first(evlist);
3038 		while (--nr > 0)
3039 			evsel = evsel__next(evsel);
3040 
3041 		return evsel;
3042 	}
3043 
3044 	full_name = !!strchr(event_name, ':');
3045 	evlist__for_each_entry(evlist, pos) {
3046 		/* case 2 */
3047 		if (full_name && evsel__name_is(pos, event_name))
3048 			return pos;
3049 		/* case 3 */
3050 		if (!full_name && strstr(pos->name, event_name)) {
3051 			if (evsel) {
3052 				pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3053 					 event_name, evsel->name, pos->name);
3054 				return NULL;
3055 			}
3056 			evsel = pos;
3057 		}
3058 	}
3059 
3060 	return evsel;
3061 }
3062 
3063 #ifdef HAVE_LIBTRACEEVENT
3064 static int __dynamic_dimension__add(struct evsel *evsel,
3065 				    struct tep_format_field *field,
3066 				    bool raw_trace, int level)
3067 {
3068 	struct hpp_dynamic_entry *hde;
3069 
3070 	hde = __alloc_dynamic_entry(evsel, field, level);
3071 	if (hde == NULL)
3072 		return -ENOMEM;
3073 
3074 	hde->raw_trace = raw_trace;
3075 
3076 	perf_hpp__register_sort_field(&hde->hpp);
3077 	return 0;
3078 }
3079 
3080 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3081 {
3082 	int ret;
3083 	struct tep_format_field *field;
3084 
3085 	field = evsel->tp_format->format.fields;
3086 	while (field) {
3087 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3088 		if (ret < 0)
3089 			return ret;
3090 
3091 		field = field->next;
3092 	}
3093 	return 0;
3094 }
3095 
3096 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3097 				  int level)
3098 {
3099 	int ret;
3100 	struct evsel *evsel;
3101 
3102 	evlist__for_each_entry(evlist, evsel) {
3103 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3104 			continue;
3105 
3106 		ret = add_evsel_fields(evsel, raw_trace, level);
3107 		if (ret < 0)
3108 			return ret;
3109 	}
3110 	return 0;
3111 }
3112 
3113 static int add_all_matching_fields(struct evlist *evlist,
3114 				   char *field_name, bool raw_trace, int level)
3115 {
3116 	int ret = -ESRCH;
3117 	struct evsel *evsel;
3118 	struct tep_format_field *field;
3119 
3120 	evlist__for_each_entry(evlist, evsel) {
3121 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3122 			continue;
3123 
3124 		field = tep_find_any_field(evsel->tp_format, field_name);
3125 		if (field == NULL)
3126 			continue;
3127 
3128 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3129 		if (ret < 0)
3130 			break;
3131 	}
3132 	return ret;
3133 }
3134 #endif /* HAVE_LIBTRACEEVENT */
3135 
3136 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3137 			     int level)
3138 {
3139 	char *str, *event_name, *field_name, *opt_name;
3140 	struct evsel *evsel;
3141 	bool raw_trace = symbol_conf.raw_trace;
3142 	int ret = 0;
3143 
3144 	if (evlist == NULL)
3145 		return -ENOENT;
3146 
3147 	str = strdup(tok);
3148 	if (str == NULL)
3149 		return -ENOMEM;
3150 
3151 	if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3152 		ret = -EINVAL;
3153 		goto out;
3154 	}
3155 
3156 	if (opt_name) {
3157 		if (strcmp(opt_name, "raw")) {
3158 			pr_debug("unsupported field option %s\n", opt_name);
3159 			ret = -EINVAL;
3160 			goto out;
3161 		}
3162 		raw_trace = true;
3163 	}
3164 
3165 #ifdef HAVE_LIBTRACEEVENT
3166 	if (!strcmp(field_name, "trace_fields")) {
3167 		ret = add_all_dynamic_fields(evlist, raw_trace, level);
3168 		goto out;
3169 	}
3170 
3171 	if (event_name == NULL) {
3172 		ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3173 		goto out;
3174 	}
3175 #else
3176 	evlist__for_each_entry(evlist, evsel) {
3177 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3178 			pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3179 			ret = -ENOTSUP;
3180 		}
3181 	}
3182 
3183 	if (ret) {
3184 		pr_err("\n");
3185 		goto out;
3186 	}
3187 #endif
3188 
3189 	evsel = find_evsel(evlist, event_name);
3190 	if (evsel == NULL) {
3191 		pr_debug("Cannot find event: %s\n", event_name);
3192 		ret = -ENOENT;
3193 		goto out;
3194 	}
3195 
3196 	if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3197 		pr_debug("%s is not a tracepoint event\n", event_name);
3198 		ret = -EINVAL;
3199 		goto out;
3200 	}
3201 
3202 #ifdef HAVE_LIBTRACEEVENT
3203 	if (!strcmp(field_name, "*")) {
3204 		ret = add_evsel_fields(evsel, raw_trace, level);
3205 	} else {
3206 		struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3207 
3208 		if (field == NULL) {
3209 			pr_debug("Cannot find event field for %s.%s\n",
3210 				 event_name, field_name);
3211 			return -ENOENT;
3212 		}
3213 
3214 		ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3215 	}
3216 #else
3217 	(void)level;
3218 	(void)raw_trace;
3219 #endif /* HAVE_LIBTRACEEVENT */
3220 
3221 out:
3222 	free(str);
3223 	return ret;
3224 }
3225 
3226 static int __sort_dimension__add(struct sort_dimension *sd,
3227 				 struct perf_hpp_list *list,
3228 				 int level)
3229 {
3230 	if (sd->taken)
3231 		return 0;
3232 
3233 	if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3234 		return -1;
3235 
3236 	if (sd->entry->se_collapse)
3237 		list->need_collapse = 1;
3238 
3239 	sd->taken = 1;
3240 
3241 	return 0;
3242 }
3243 
3244 static int __hpp_dimension__add(struct hpp_dimension *hd,
3245 				struct perf_hpp_list *list,
3246 				int level)
3247 {
3248 	struct perf_hpp_fmt *fmt;
3249 
3250 	if (hd->taken)
3251 		return 0;
3252 
3253 	fmt = __hpp_dimension__alloc_hpp(hd, level);
3254 	if (!fmt)
3255 		return -1;
3256 
3257 	hd->taken = 1;
3258 	perf_hpp_list__register_sort_field(list, fmt);
3259 	return 0;
3260 }
3261 
3262 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3263 					struct sort_dimension *sd)
3264 {
3265 	if (sd->taken)
3266 		return 0;
3267 
3268 	if (__sort_dimension__add_hpp_output(sd, list) < 0)
3269 		return -1;
3270 
3271 	sd->taken = 1;
3272 	return 0;
3273 }
3274 
3275 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3276 				       struct hpp_dimension *hd)
3277 {
3278 	struct perf_hpp_fmt *fmt;
3279 
3280 	if (hd->taken)
3281 		return 0;
3282 
3283 	fmt = __hpp_dimension__alloc_hpp(hd, 0);
3284 	if (!fmt)
3285 		return -1;
3286 
3287 	hd->taken = 1;
3288 	perf_hpp_list__column_register(list, fmt);
3289 	return 0;
3290 }
3291 
3292 int hpp_dimension__add_output(unsigned col)
3293 {
3294 	BUG_ON(col >= PERF_HPP__MAX_INDEX);
3295 	return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3296 }
3297 
3298 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3299 			struct evlist *evlist,
3300 			int level)
3301 {
3302 	unsigned int i, j;
3303 
3304 	/*
3305 	 * Check to see if there are any arch specific
3306 	 * sort dimensions not applicable for the current
3307 	 * architecture. If so, Skip that sort key since
3308 	 * we don't want to display it in the output fields.
3309 	 */
3310 	for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3311 		if (!strcmp(arch_specific_sort_keys[j], tok) &&
3312 				!arch_support_sort_key(tok)) {
3313 			return 0;
3314 		}
3315 	}
3316 
3317 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3318 		struct sort_dimension *sd = &common_sort_dimensions[i];
3319 
3320 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3321 			continue;
3322 
3323 		for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3324 			if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3325 				sort_dimension_add_dynamic_header(sd);
3326 		}
3327 
3328 		if (sd->entry == &sort_parent) {
3329 			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3330 			if (ret) {
3331 				char err[BUFSIZ];
3332 
3333 				regerror(ret, &parent_regex, err, sizeof(err));
3334 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3335 				return -EINVAL;
3336 			}
3337 			list->parent = 1;
3338 		} else if (sd->entry == &sort_sym) {
3339 			list->sym = 1;
3340 			/*
3341 			 * perf diff displays the performance difference amongst
3342 			 * two or more perf.data files. Those files could come
3343 			 * from different binaries. So we should not compare
3344 			 * their ips, but the name of symbol.
3345 			 */
3346 			if (sort__mode == SORT_MODE__DIFF)
3347 				sd->entry->se_collapse = sort__sym_sort;
3348 
3349 		} else if (sd->entry == &sort_dso) {
3350 			list->dso = 1;
3351 		} else if (sd->entry == &sort_socket) {
3352 			list->socket = 1;
3353 		} else if (sd->entry == &sort_thread) {
3354 			list->thread = 1;
3355 		} else if (sd->entry == &sort_comm) {
3356 			list->comm = 1;
3357 		}
3358 
3359 		return __sort_dimension__add(sd, list, level);
3360 	}
3361 
3362 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3363 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3364 
3365 		if (strncasecmp(tok, hd->name, strlen(tok)))
3366 			continue;
3367 
3368 		return __hpp_dimension__add(hd, list, level);
3369 	}
3370 
3371 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3372 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3373 
3374 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3375 			continue;
3376 
3377 		if (sort__mode != SORT_MODE__BRANCH)
3378 			return -EINVAL;
3379 
3380 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3381 			list->sym = 1;
3382 
3383 		__sort_dimension__add(sd, list, level);
3384 		return 0;
3385 	}
3386 
3387 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3388 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3389 
3390 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3391 			continue;
3392 
3393 		if (sort__mode != SORT_MODE__MEMORY)
3394 			return -EINVAL;
3395 
3396 		if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3397 			return -EINVAL;
3398 
3399 		if (sd->entry == &sort_mem_daddr_sym)
3400 			list->sym = 1;
3401 
3402 		__sort_dimension__add(sd, list, level);
3403 		return 0;
3404 	}
3405 
3406 	if (!add_dynamic_entry(evlist, tok, level))
3407 		return 0;
3408 
3409 	return -ESRCH;
3410 }
3411 
3412 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3413 			   struct evlist *evlist)
3414 {
3415 	char *tmp, *tok;
3416 	int ret = 0;
3417 	int level = 0;
3418 	int next_level = 1;
3419 	bool in_group = false;
3420 
3421 	do {
3422 		tok = str;
3423 		tmp = strpbrk(str, "{}, ");
3424 		if (tmp) {
3425 			if (in_group)
3426 				next_level = level;
3427 			else
3428 				next_level = level + 1;
3429 
3430 			if (*tmp == '{')
3431 				in_group = true;
3432 			else if (*tmp == '}')
3433 				in_group = false;
3434 
3435 			*tmp = '\0';
3436 			str = tmp + 1;
3437 		}
3438 
3439 		if (*tok) {
3440 			ret = sort_dimension__add(list, tok, evlist, level);
3441 			if (ret == -EINVAL) {
3442 				if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3443 					ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3444 				else
3445 					ui__error("Invalid --sort key: `%s'", tok);
3446 				break;
3447 			} else if (ret == -ESRCH) {
3448 				ui__error("Unknown --sort key: `%s'", tok);
3449 				break;
3450 			}
3451 		}
3452 
3453 		level = next_level;
3454 	} while (tmp);
3455 
3456 	return ret;
3457 }
3458 
3459 static const char *get_default_sort_order(struct evlist *evlist)
3460 {
3461 	const char *default_sort_orders[] = {
3462 		default_sort_order,
3463 		default_branch_sort_order,
3464 		default_mem_sort_order,
3465 		default_top_sort_order,
3466 		default_diff_sort_order,
3467 		default_tracepoint_sort_order,
3468 	};
3469 	bool use_trace = true;
3470 	struct evsel *evsel;
3471 
3472 	BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3473 
3474 	if (evlist == NULL || evlist__empty(evlist))
3475 		goto out_no_evlist;
3476 
3477 	evlist__for_each_entry(evlist, evsel) {
3478 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3479 			use_trace = false;
3480 			break;
3481 		}
3482 	}
3483 
3484 	if (use_trace) {
3485 		sort__mode = SORT_MODE__TRACEPOINT;
3486 		if (symbol_conf.raw_trace)
3487 			return "trace_fields";
3488 	}
3489 out_no_evlist:
3490 	return default_sort_orders[sort__mode];
3491 }
3492 
3493 static int setup_sort_order(struct evlist *evlist)
3494 {
3495 	char *new_sort_order;
3496 
3497 	/*
3498 	 * Append '+'-prefixed sort order to the default sort
3499 	 * order string.
3500 	 */
3501 	if (!sort_order || is_strict_order(sort_order))
3502 		return 0;
3503 
3504 	if (sort_order[1] == '\0') {
3505 		ui__error("Invalid --sort key: `+'");
3506 		return -EINVAL;
3507 	}
3508 
3509 	/*
3510 	 * We allocate new sort_order string, but we never free it,
3511 	 * because it's checked over the rest of the code.
3512 	 */
3513 	if (asprintf(&new_sort_order, "%s,%s",
3514 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
3515 		pr_err("Not enough memory to set up --sort");
3516 		return -ENOMEM;
3517 	}
3518 
3519 	sort_order = new_sort_order;
3520 	return 0;
3521 }
3522 
3523 /*
3524  * Adds 'pre,' prefix into 'str' is 'pre' is
3525  * not already part of 'str'.
3526  */
3527 static char *prefix_if_not_in(const char *pre, char *str)
3528 {
3529 	char *n;
3530 
3531 	if (!str || strstr(str, pre))
3532 		return str;
3533 
3534 	if (asprintf(&n, "%s,%s", pre, str) < 0)
3535 		n = NULL;
3536 
3537 	free(str);
3538 	return n;
3539 }
3540 
3541 static char *setup_overhead(char *keys)
3542 {
3543 	if (sort__mode == SORT_MODE__DIFF)
3544 		return keys;
3545 
3546 	keys = prefix_if_not_in("overhead", keys);
3547 
3548 	if (symbol_conf.cumulate_callchain)
3549 		keys = prefix_if_not_in("overhead_children", keys);
3550 
3551 	return keys;
3552 }
3553 
3554 static int __setup_sorting(struct evlist *evlist)
3555 {
3556 	char *str;
3557 	const char *sort_keys;
3558 	int ret = 0;
3559 
3560 	ret = setup_sort_order(evlist);
3561 	if (ret)
3562 		return ret;
3563 
3564 	sort_keys = sort_order;
3565 	if (sort_keys == NULL) {
3566 		if (is_strict_order(field_order)) {
3567 			/*
3568 			 * If user specified field order but no sort order,
3569 			 * we'll honor it and not add default sort orders.
3570 			 */
3571 			return 0;
3572 		}
3573 
3574 		sort_keys = get_default_sort_order(evlist);
3575 	}
3576 
3577 	str = strdup(sort_keys);
3578 	if (str == NULL) {
3579 		pr_err("Not enough memory to setup sort keys");
3580 		return -ENOMEM;
3581 	}
3582 
3583 	/*
3584 	 * Prepend overhead fields for backward compatibility.
3585 	 */
3586 	if (!is_strict_order(field_order)) {
3587 		str = setup_overhead(str);
3588 		if (str == NULL) {
3589 			pr_err("Not enough memory to setup overhead keys");
3590 			return -ENOMEM;
3591 		}
3592 	}
3593 
3594 	ret = setup_sort_list(&perf_hpp_list, str, evlist);
3595 
3596 	free(str);
3597 	return ret;
3598 }
3599 
3600 void perf_hpp__set_elide(int idx, bool elide)
3601 {
3602 	struct perf_hpp_fmt *fmt;
3603 	struct hpp_sort_entry *hse;
3604 
3605 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3606 		if (!perf_hpp__is_sort_entry(fmt))
3607 			continue;
3608 
3609 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3610 		if (hse->se->se_width_idx == idx) {
3611 			fmt->elide = elide;
3612 			break;
3613 		}
3614 	}
3615 }
3616 
3617 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3618 {
3619 	if (list && strlist__nr_entries(list) == 1) {
3620 		if (fp != NULL)
3621 			fprintf(fp, "# %s: %s\n", list_name,
3622 				strlist__entry(list, 0)->s);
3623 		return true;
3624 	}
3625 	return false;
3626 }
3627 
3628 static bool get_elide(int idx, FILE *output)
3629 {
3630 	switch (idx) {
3631 	case HISTC_SYMBOL:
3632 		return __get_elide(symbol_conf.sym_list, "symbol", output);
3633 	case HISTC_DSO:
3634 		return __get_elide(symbol_conf.dso_list, "dso", output);
3635 	case HISTC_COMM:
3636 		return __get_elide(symbol_conf.comm_list, "comm", output);
3637 	default:
3638 		break;
3639 	}
3640 
3641 	if (sort__mode != SORT_MODE__BRANCH)
3642 		return false;
3643 
3644 	switch (idx) {
3645 	case HISTC_SYMBOL_FROM:
3646 		return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3647 	case HISTC_SYMBOL_TO:
3648 		return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3649 	case HISTC_DSO_FROM:
3650 		return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3651 	case HISTC_DSO_TO:
3652 		return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3653 	case HISTC_ADDR_FROM:
3654 		return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3655 	case HISTC_ADDR_TO:
3656 		return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3657 	default:
3658 		break;
3659 	}
3660 
3661 	return false;
3662 }
3663 
3664 void sort__setup_elide(FILE *output)
3665 {
3666 	struct perf_hpp_fmt *fmt;
3667 	struct hpp_sort_entry *hse;
3668 
3669 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3670 		if (!perf_hpp__is_sort_entry(fmt))
3671 			continue;
3672 
3673 		hse = container_of(fmt, struct hpp_sort_entry, hpp);
3674 		fmt->elide = get_elide(hse->se->se_width_idx, output);
3675 	}
3676 
3677 	/*
3678 	 * It makes no sense to elide all of sort entries.
3679 	 * Just revert them to show up again.
3680 	 */
3681 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3682 		if (!perf_hpp__is_sort_entry(fmt))
3683 			continue;
3684 
3685 		if (!fmt->elide)
3686 			return;
3687 	}
3688 
3689 	perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3690 		if (!perf_hpp__is_sort_entry(fmt))
3691 			continue;
3692 
3693 		fmt->elide = false;
3694 	}
3695 }
3696 
3697 int output_field_add(struct perf_hpp_list *list, char *tok)
3698 {
3699 	unsigned int i;
3700 
3701 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3702 		struct sort_dimension *sd = &common_sort_dimensions[i];
3703 
3704 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3705 			continue;
3706 
3707 		return __sort_dimension__add_output(list, sd);
3708 	}
3709 
3710 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3711 		struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3712 
3713 		if (strncasecmp(tok, hd->name, strlen(tok)))
3714 			continue;
3715 
3716 		return __hpp_dimension__add_output(list, hd);
3717 	}
3718 
3719 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3720 		struct sort_dimension *sd = &bstack_sort_dimensions[i];
3721 
3722 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3723 			continue;
3724 
3725 		if (sort__mode != SORT_MODE__BRANCH)
3726 			return -EINVAL;
3727 
3728 		return __sort_dimension__add_output(list, sd);
3729 	}
3730 
3731 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3732 		struct sort_dimension *sd = &memory_sort_dimensions[i];
3733 
3734 		if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3735 			continue;
3736 
3737 		if (sort__mode != SORT_MODE__MEMORY)
3738 			return -EINVAL;
3739 
3740 		return __sort_dimension__add_output(list, sd);
3741 	}
3742 
3743 	return -ESRCH;
3744 }
3745 
3746 static int setup_output_list(struct perf_hpp_list *list, char *str)
3747 {
3748 	char *tmp, *tok;
3749 	int ret = 0;
3750 
3751 	for (tok = strtok_r(str, ", ", &tmp);
3752 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3753 		ret = output_field_add(list, tok);
3754 		if (ret == -EINVAL) {
3755 			ui__error("Invalid --fields key: `%s'", tok);
3756 			break;
3757 		} else if (ret == -ESRCH) {
3758 			ui__error("Unknown --fields key: `%s'", tok);
3759 			break;
3760 		}
3761 	}
3762 
3763 	return ret;
3764 }
3765 
3766 void reset_dimensions(void)
3767 {
3768 	unsigned int i;
3769 
3770 	for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3771 		common_sort_dimensions[i].taken = 0;
3772 
3773 	for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3774 		hpp_sort_dimensions[i].taken = 0;
3775 
3776 	for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3777 		bstack_sort_dimensions[i].taken = 0;
3778 
3779 	for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3780 		memory_sort_dimensions[i].taken = 0;
3781 }
3782 
3783 bool is_strict_order(const char *order)
3784 {
3785 	return order && (*order != '+');
3786 }
3787 
3788 static int __setup_output_field(void)
3789 {
3790 	char *str, *strp;
3791 	int ret = -EINVAL;
3792 
3793 	if (field_order == NULL)
3794 		return 0;
3795 
3796 	strp = str = strdup(field_order);
3797 	if (str == NULL) {
3798 		pr_err("Not enough memory to setup output fields");
3799 		return -ENOMEM;
3800 	}
3801 
3802 	if (!is_strict_order(field_order))
3803 		strp++;
3804 
3805 	if (!strlen(strp)) {
3806 		ui__error("Invalid --fields key: `+'");
3807 		goto out;
3808 	}
3809 
3810 	ret = setup_output_list(&perf_hpp_list, strp);
3811 
3812 out:
3813 	free(str);
3814 	return ret;
3815 }
3816 
3817 int setup_sorting(struct evlist *evlist)
3818 {
3819 	int err;
3820 
3821 	err = __setup_sorting(evlist);
3822 	if (err < 0)
3823 		return err;
3824 
3825 	if (parent_pattern != default_parent_pattern) {
3826 		err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3827 		if (err < 0)
3828 			return err;
3829 	}
3830 
3831 	reset_dimensions();
3832 
3833 	/*
3834 	 * perf diff doesn't use default hpp output fields.
3835 	 */
3836 	if (sort__mode != SORT_MODE__DIFF)
3837 		perf_hpp__init();
3838 
3839 	err = __setup_output_field();
3840 	if (err < 0)
3841 		return err;
3842 
3843 	/* copy sort keys to output fields */
3844 	perf_hpp__setup_output_field(&perf_hpp_list);
3845 	/* and then copy output fields to sort keys */
3846 	perf_hpp__append_sort_keys(&perf_hpp_list);
3847 
3848 	/* setup hists-specific output fields */
3849 	if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3850 		return -1;
3851 
3852 	return 0;
3853 }
3854 
3855 void reset_output_field(void)
3856 {
3857 	perf_hpp_list.need_collapse = 0;
3858 	perf_hpp_list.parent = 0;
3859 	perf_hpp_list.sym = 0;
3860 	perf_hpp_list.dso = 0;
3861 
3862 	field_order = NULL;
3863 	sort_order = NULL;
3864 
3865 	reset_dimensions();
3866 	perf_hpp__reset_output_field(&perf_hpp_list);
3867 }
3868 
3869 #define INDENT (3*8 + 1)
3870 
3871 static void add_key(struct strbuf *sb, const char *str, int *llen)
3872 {
3873 	if (!str)
3874 		return;
3875 
3876 	if (*llen >= 75) {
3877 		strbuf_addstr(sb, "\n\t\t\t ");
3878 		*llen = INDENT;
3879 	}
3880 	strbuf_addf(sb, " %s", str);
3881 	*llen += strlen(str) + 1;
3882 }
3883 
3884 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3885 			    int *llen)
3886 {
3887 	int i;
3888 
3889 	for (i = 0; i < n; i++)
3890 		add_key(sb, s[i].name, llen);
3891 }
3892 
3893 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3894 				int *llen)
3895 {
3896 	int i;
3897 
3898 	for (i = 0; i < n; i++)
3899 		add_key(sb, s[i].name, llen);
3900 }
3901 
3902 char *sort_help(const char *prefix)
3903 {
3904 	struct strbuf sb;
3905 	char *s;
3906 	int len = strlen(prefix) + INDENT;
3907 
3908 	strbuf_init(&sb, 300);
3909 	strbuf_addstr(&sb, prefix);
3910 	add_hpp_sort_string(&sb, hpp_sort_dimensions,
3911 			    ARRAY_SIZE(hpp_sort_dimensions), &len);
3912 	add_sort_string(&sb, common_sort_dimensions,
3913 			    ARRAY_SIZE(common_sort_dimensions), &len);
3914 	add_sort_string(&sb, bstack_sort_dimensions,
3915 			    ARRAY_SIZE(bstack_sort_dimensions), &len);
3916 	add_sort_string(&sb, memory_sort_dimensions,
3917 			    ARRAY_SIZE(memory_sort_dimensions), &len);
3918 	s = strbuf_detach(&sb, NULL);
3919 	strbuf_release(&sb);
3920 	return s;
3921 }
3922