1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include <linux/mman.h>
6 #include "sort.h"
7 #include "hist.h"
8 #include "comm.h"
9 #include "symbol.h"
10 #include "thread.h"
11 #include "evsel.h"
12 #include "evlist.h"
13 #include "strlist.h"
14 #include <traceevent/event-parse.h>
15 #include "mem-events.h"
16 #include <linux/kernel.h>
17
18 regex_t parent_regex;
19 const char default_parent_pattern[] = "^sys_|^do_page_fault";
20 const char *parent_pattern = default_parent_pattern;
21 const char *default_sort_order = "comm,dso,symbol";
22 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
23 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
24 const char default_top_sort_order[] = "dso,symbol";
25 const char default_diff_sort_order[] = "dso,symbol";
26 const char default_tracepoint_sort_order[] = "trace";
27 const char *sort_order;
28 const char *field_order;
29 regex_t ignore_callees_regex;
30 int have_ignore_callees = 0;
31 enum sort_mode sort__mode = SORT_MODE__NORMAL;
32
33 /*
34 * Replaces all occurrences of a char used with the:
35 *
36 * -t, --field-separator
37 *
38 * option, that uses a special separator character and don't pad with spaces,
39 * replacing all occurances of this separator in symbol names (and other
40 * output) with a '.' character, that thus it's the only non valid separator.
41 */
repsep_snprintf(char * bf,size_t size,const char * fmt,...)42 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
43 {
44 int n;
45 va_list ap;
46
47 va_start(ap, fmt);
48 n = vsnprintf(bf, size, fmt, ap);
49 if (symbol_conf.field_sep && n > 0) {
50 char *sep = bf;
51
52 while (1) {
53 sep = strchr(sep, *symbol_conf.field_sep);
54 if (sep == NULL)
55 break;
56 *sep = '.';
57 }
58 }
59 va_end(ap);
60
61 if (n >= (int)size)
62 return size - 1;
63 return n;
64 }
65
cmp_null(const void * l,const void * r)66 static int64_t cmp_null(const void *l, const void *r)
67 {
68 if (!l && !r)
69 return 0;
70 else if (!l)
71 return -1;
72 else
73 return 1;
74 }
75
76 /* --sort pid */
77
78 static int64_t
sort__thread_cmp(struct hist_entry * left,struct hist_entry * right)79 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
80 {
81 return right->thread->tid - left->thread->tid;
82 }
83
hist_entry__thread_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)84 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
85 size_t size, unsigned int width)
86 {
87 const char *comm = thread__comm_str(he->thread);
88
89 width = max(7U, width) - 8;
90 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
91 width, width, comm ?: "");
92 }
93
hist_entry__thread_filter(struct hist_entry * he,int type,const void * arg)94 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
95 {
96 const struct thread *th = arg;
97
98 if (type != HIST_FILTER__THREAD)
99 return -1;
100
101 return th && he->thread != th;
102 }
103
104 struct sort_entry sort_thread = {
105 .se_header = " Pid:Command",
106 .se_cmp = sort__thread_cmp,
107 .se_snprintf = hist_entry__thread_snprintf,
108 .se_filter = hist_entry__thread_filter,
109 .se_width_idx = HISTC_THREAD,
110 };
111
112 /* --sort comm */
113
114 /*
115 * We can't use pointer comparison in functions below,
116 * because it gives different results based on pointer
117 * values, which could break some sorting assumptions.
118 */
119 static int64_t
sort__comm_cmp(struct hist_entry * left,struct hist_entry * right)120 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
121 {
122 return strcmp(comm__str(right->comm), comm__str(left->comm));
123 }
124
125 static int64_t
sort__comm_collapse(struct hist_entry * left,struct hist_entry * right)126 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
127 {
128 return strcmp(comm__str(right->comm), comm__str(left->comm));
129 }
130
131 static int64_t
sort__comm_sort(struct hist_entry * left,struct hist_entry * right)132 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
133 {
134 return strcmp(comm__str(right->comm), comm__str(left->comm));
135 }
136
hist_entry__comm_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)137 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
138 size_t size, unsigned int width)
139 {
140 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
141 }
142
143 struct sort_entry sort_comm = {
144 .se_header = "Command",
145 .se_cmp = sort__comm_cmp,
146 .se_collapse = sort__comm_collapse,
147 .se_sort = sort__comm_sort,
148 .se_snprintf = hist_entry__comm_snprintf,
149 .se_filter = hist_entry__thread_filter,
150 .se_width_idx = HISTC_COMM,
151 };
152
153 /* --sort dso */
154
_sort__dso_cmp(struct map * map_l,struct map * map_r)155 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
156 {
157 struct dso *dso_l = map_l ? map_l->dso : NULL;
158 struct dso *dso_r = map_r ? map_r->dso : NULL;
159 const char *dso_name_l, *dso_name_r;
160
161 if (!dso_l || !dso_r)
162 return cmp_null(dso_r, dso_l);
163
164 if (verbose > 0) {
165 dso_name_l = dso_l->long_name;
166 dso_name_r = dso_r->long_name;
167 } else {
168 dso_name_l = dso_l->short_name;
169 dso_name_r = dso_r->short_name;
170 }
171
172 return strcmp(dso_name_l, dso_name_r);
173 }
174
175 static int64_t
sort__dso_cmp(struct hist_entry * left,struct hist_entry * right)176 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
177 {
178 return _sort__dso_cmp(right->ms.map, left->ms.map);
179 }
180
_hist_entry__dso_snprintf(struct map * map,char * bf,size_t size,unsigned int width)181 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
182 size_t size, unsigned int width)
183 {
184 if (map && map->dso) {
185 const char *dso_name = verbose > 0 ? map->dso->long_name :
186 map->dso->short_name;
187 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
188 }
189
190 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
191 }
192
hist_entry__dso_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)193 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
194 size_t size, unsigned int width)
195 {
196 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
197 }
198
hist_entry__dso_filter(struct hist_entry * he,int type,const void * arg)199 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
200 {
201 const struct dso *dso = arg;
202
203 if (type != HIST_FILTER__DSO)
204 return -1;
205
206 return dso && (!he->ms.map || he->ms.map->dso != dso);
207 }
208
209 struct sort_entry sort_dso = {
210 .se_header = "Shared Object",
211 .se_cmp = sort__dso_cmp,
212 .se_snprintf = hist_entry__dso_snprintf,
213 .se_filter = hist_entry__dso_filter,
214 .se_width_idx = HISTC_DSO,
215 };
216
217 /* --sort symbol */
218
_sort__addr_cmp(u64 left_ip,u64 right_ip)219 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
220 {
221 return (int64_t)(right_ip - left_ip);
222 }
223
_sort__sym_cmp(struct symbol * sym_l,struct symbol * sym_r)224 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
225 {
226 if (!sym_l || !sym_r)
227 return cmp_null(sym_l, sym_r);
228
229 if (sym_l == sym_r)
230 return 0;
231
232 if (sym_l->inlined || sym_r->inlined)
233 return strcmp(sym_l->name, sym_r->name);
234
235 if (sym_l->start != sym_r->start)
236 return (int64_t)(sym_r->start - sym_l->start);
237
238 return (int64_t)(sym_r->end - sym_l->end);
239 }
240
241 static int64_t
sort__sym_cmp(struct hist_entry * left,struct hist_entry * right)242 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
243 {
244 int64_t ret;
245
246 if (!left->ms.sym && !right->ms.sym)
247 return _sort__addr_cmp(left->ip, right->ip);
248
249 /*
250 * comparing symbol address alone is not enough since it's a
251 * relative address within a dso.
252 */
253 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
254 ret = sort__dso_cmp(left, right);
255 if (ret != 0)
256 return ret;
257 }
258
259 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
260 }
261
262 static int64_t
sort__sym_sort(struct hist_entry * left,struct hist_entry * right)263 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
264 {
265 if (!left->ms.sym || !right->ms.sym)
266 return cmp_null(left->ms.sym, right->ms.sym);
267
268 return strcmp(right->ms.sym->name, left->ms.sym->name);
269 }
270
_hist_entry__sym_snprintf(struct map * map,struct symbol * sym,u64 ip,char level,char * bf,size_t size,unsigned int width)271 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
272 u64 ip, char level, char *bf, size_t size,
273 unsigned int width)
274 {
275 size_t ret = 0;
276
277 if (verbose > 0) {
278 char o = map ? dso__symtab_origin(map->dso) : '!';
279 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
280 BITS_PER_LONG / 4 + 2, ip, o);
281 }
282
283 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
284 if (sym && map) {
285 if (sym->type == STT_OBJECT) {
286 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
287 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
288 ip - map->unmap_ip(map, sym->start));
289 } else {
290 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
291 width - ret,
292 sym->name);
293 if (sym->inlined)
294 ret += repsep_snprintf(bf + ret, size - ret,
295 " (inlined)");
296 }
297 } else {
298 size_t len = BITS_PER_LONG / 4;
299 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
300 len, ip);
301 }
302
303 return ret;
304 }
305
hist_entry__sym_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)306 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
307 size_t size, unsigned int width)
308 {
309 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
310 he->level, bf, size, width);
311 }
312
hist_entry__sym_filter(struct hist_entry * he,int type,const void * arg)313 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
314 {
315 const char *sym = arg;
316
317 if (type != HIST_FILTER__SYMBOL)
318 return -1;
319
320 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
321 }
322
323 struct sort_entry sort_sym = {
324 .se_header = "Symbol",
325 .se_cmp = sort__sym_cmp,
326 .se_sort = sort__sym_sort,
327 .se_snprintf = hist_entry__sym_snprintf,
328 .se_filter = hist_entry__sym_filter,
329 .se_width_idx = HISTC_SYMBOL,
330 };
331
332 /* --sort srcline */
333
hist_entry__srcline(struct hist_entry * he)334 char *hist_entry__srcline(struct hist_entry *he)
335 {
336 return map__srcline(he->ms.map, he->ip, he->ms.sym);
337 }
338
339 static int64_t
sort__srcline_cmp(struct hist_entry * left,struct hist_entry * right)340 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
341 {
342 if (!left->srcline)
343 left->srcline = hist_entry__srcline(left);
344 if (!right->srcline)
345 right->srcline = hist_entry__srcline(right);
346
347 return strcmp(right->srcline, left->srcline);
348 }
349
hist_entry__srcline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)350 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
351 size_t size, unsigned int width)
352 {
353 if (!he->srcline)
354 he->srcline = hist_entry__srcline(he);
355
356 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
357 }
358
359 struct sort_entry sort_srcline = {
360 .se_header = "Source:Line",
361 .se_cmp = sort__srcline_cmp,
362 .se_snprintf = hist_entry__srcline_snprintf,
363 .se_width_idx = HISTC_SRCLINE,
364 };
365
366 /* --sort srcline_from */
367
addr_map_symbol__srcline(struct addr_map_symbol * ams)368 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
369 {
370 return map__srcline(ams->map, ams->al_addr, ams->sym);
371 }
372
373 static int64_t
sort__srcline_from_cmp(struct hist_entry * left,struct hist_entry * right)374 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
375 {
376 if (!left->branch_info->srcline_from)
377 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
378
379 if (!right->branch_info->srcline_from)
380 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
381
382 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
383 }
384
hist_entry__srcline_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)385 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
386 size_t size, unsigned int width)
387 {
388 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
389 }
390
391 struct sort_entry sort_srcline_from = {
392 .se_header = "From Source:Line",
393 .se_cmp = sort__srcline_from_cmp,
394 .se_snprintf = hist_entry__srcline_from_snprintf,
395 .se_width_idx = HISTC_SRCLINE_FROM,
396 };
397
398 /* --sort srcline_to */
399
400 static int64_t
sort__srcline_to_cmp(struct hist_entry * left,struct hist_entry * right)401 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
402 {
403 if (!left->branch_info->srcline_to)
404 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
405
406 if (!right->branch_info->srcline_to)
407 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
408
409 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
410 }
411
hist_entry__srcline_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)412 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
413 size_t size, unsigned int width)
414 {
415 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
416 }
417
418 struct sort_entry sort_srcline_to = {
419 .se_header = "To Source:Line",
420 .se_cmp = sort__srcline_to_cmp,
421 .se_snprintf = hist_entry__srcline_to_snprintf,
422 .se_width_idx = HISTC_SRCLINE_TO,
423 };
424
425 /* --sort srcfile */
426
427 static char no_srcfile[1];
428
hist_entry__get_srcfile(struct hist_entry * e)429 static char *hist_entry__get_srcfile(struct hist_entry *e)
430 {
431 char *sf, *p;
432 struct map *map = e->ms.map;
433
434 if (!map)
435 return no_srcfile;
436
437 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
438 e->ms.sym, false, true, true, e->ip);
439 if (!strcmp(sf, SRCLINE_UNKNOWN))
440 return no_srcfile;
441 p = strchr(sf, ':');
442 if (p && *sf) {
443 *p = 0;
444 return sf;
445 }
446 free(sf);
447 return no_srcfile;
448 }
449
450 static int64_t
sort__srcfile_cmp(struct hist_entry * left,struct hist_entry * right)451 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
452 {
453 if (!left->srcfile)
454 left->srcfile = hist_entry__get_srcfile(left);
455 if (!right->srcfile)
456 right->srcfile = hist_entry__get_srcfile(right);
457
458 return strcmp(right->srcfile, left->srcfile);
459 }
460
hist_entry__srcfile_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)461 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
462 size_t size, unsigned int width)
463 {
464 if (!he->srcfile)
465 he->srcfile = hist_entry__get_srcfile(he);
466
467 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
468 }
469
470 struct sort_entry sort_srcfile = {
471 .se_header = "Source File",
472 .se_cmp = sort__srcfile_cmp,
473 .se_snprintf = hist_entry__srcfile_snprintf,
474 .se_width_idx = HISTC_SRCFILE,
475 };
476
477 /* --sort parent */
478
479 static int64_t
sort__parent_cmp(struct hist_entry * left,struct hist_entry * right)480 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
481 {
482 struct symbol *sym_l = left->parent;
483 struct symbol *sym_r = right->parent;
484
485 if (!sym_l || !sym_r)
486 return cmp_null(sym_l, sym_r);
487
488 return strcmp(sym_r->name, sym_l->name);
489 }
490
hist_entry__parent_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)491 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
492 size_t size, unsigned int width)
493 {
494 return repsep_snprintf(bf, size, "%-*.*s", width, width,
495 he->parent ? he->parent->name : "[other]");
496 }
497
498 struct sort_entry sort_parent = {
499 .se_header = "Parent symbol",
500 .se_cmp = sort__parent_cmp,
501 .se_snprintf = hist_entry__parent_snprintf,
502 .se_width_idx = HISTC_PARENT,
503 };
504
505 /* --sort cpu */
506
507 static int64_t
sort__cpu_cmp(struct hist_entry * left,struct hist_entry * right)508 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
509 {
510 return right->cpu - left->cpu;
511 }
512
hist_entry__cpu_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)513 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
514 size_t size, unsigned int width)
515 {
516 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
517 }
518
519 struct sort_entry sort_cpu = {
520 .se_header = "CPU",
521 .se_cmp = sort__cpu_cmp,
522 .se_snprintf = hist_entry__cpu_snprintf,
523 .se_width_idx = HISTC_CPU,
524 };
525
526 /* --sort cgroup_id */
527
_sort__cgroup_dev_cmp(u64 left_dev,u64 right_dev)528 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
529 {
530 return (int64_t)(right_dev - left_dev);
531 }
532
_sort__cgroup_inode_cmp(u64 left_ino,u64 right_ino)533 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
534 {
535 return (int64_t)(right_ino - left_ino);
536 }
537
538 static int64_t
sort__cgroup_id_cmp(struct hist_entry * left,struct hist_entry * right)539 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
540 {
541 int64_t ret;
542
543 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
544 if (ret != 0)
545 return ret;
546
547 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
548 left->cgroup_id.ino);
549 }
550
hist_entry__cgroup_id_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width __maybe_unused)551 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
552 char *bf, size_t size,
553 unsigned int width __maybe_unused)
554 {
555 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
556 he->cgroup_id.ino);
557 }
558
559 struct sort_entry sort_cgroup_id = {
560 .se_header = "cgroup id (dev/inode)",
561 .se_cmp = sort__cgroup_id_cmp,
562 .se_snprintf = hist_entry__cgroup_id_snprintf,
563 .se_width_idx = HISTC_CGROUP_ID,
564 };
565
566 /* --sort socket */
567
568 static int64_t
sort__socket_cmp(struct hist_entry * left,struct hist_entry * right)569 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
570 {
571 return right->socket - left->socket;
572 }
573
hist_entry__socket_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)574 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
575 size_t size, unsigned int width)
576 {
577 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
578 }
579
hist_entry__socket_filter(struct hist_entry * he,int type,const void * arg)580 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
581 {
582 int sk = *(const int *)arg;
583
584 if (type != HIST_FILTER__SOCKET)
585 return -1;
586
587 return sk >= 0 && he->socket != sk;
588 }
589
590 struct sort_entry sort_socket = {
591 .se_header = "Socket",
592 .se_cmp = sort__socket_cmp,
593 .se_snprintf = hist_entry__socket_snprintf,
594 .se_filter = hist_entry__socket_filter,
595 .se_width_idx = HISTC_SOCKET,
596 };
597
598 /* --sort trace */
599
get_trace_output(struct hist_entry * he)600 static char *get_trace_output(struct hist_entry *he)
601 {
602 struct trace_seq seq;
603 struct perf_evsel *evsel;
604 struct tep_record rec = {
605 .data = he->raw_data,
606 .size = he->raw_size,
607 };
608
609 evsel = hists_to_evsel(he->hists);
610
611 trace_seq_init(&seq);
612 if (symbol_conf.raw_trace) {
613 tep_print_fields(&seq, he->raw_data, he->raw_size,
614 evsel->tp_format);
615 } else {
616 tep_event_info(&seq, evsel->tp_format, &rec);
617 }
618 /*
619 * Trim the buffer, it starts at 4KB and we're not going to
620 * add anything more to this buffer.
621 */
622 return realloc(seq.buffer, seq.len + 1);
623 }
624
625 static int64_t
sort__trace_cmp(struct hist_entry * left,struct hist_entry * right)626 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
627 {
628 struct perf_evsel *evsel;
629
630 evsel = hists_to_evsel(left->hists);
631 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
632 return 0;
633
634 if (left->trace_output == NULL)
635 left->trace_output = get_trace_output(left);
636 if (right->trace_output == NULL)
637 right->trace_output = get_trace_output(right);
638
639 return strcmp(right->trace_output, left->trace_output);
640 }
641
hist_entry__trace_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)642 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
643 size_t size, unsigned int width)
644 {
645 struct perf_evsel *evsel;
646
647 evsel = hists_to_evsel(he->hists);
648 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
649 return scnprintf(bf, size, "%-.*s", width, "N/A");
650
651 if (he->trace_output == NULL)
652 he->trace_output = get_trace_output(he);
653 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
654 }
655
656 struct sort_entry sort_trace = {
657 .se_header = "Trace output",
658 .se_cmp = sort__trace_cmp,
659 .se_snprintf = hist_entry__trace_snprintf,
660 .se_width_idx = HISTC_TRACE,
661 };
662
663 /* sort keys for branch stacks */
664
665 static int64_t
sort__dso_from_cmp(struct hist_entry * left,struct hist_entry * right)666 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
667 {
668 if (!left->branch_info || !right->branch_info)
669 return cmp_null(left->branch_info, right->branch_info);
670
671 return _sort__dso_cmp(left->branch_info->from.map,
672 right->branch_info->from.map);
673 }
674
hist_entry__dso_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)675 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
676 size_t size, unsigned int width)
677 {
678 if (he->branch_info)
679 return _hist_entry__dso_snprintf(he->branch_info->from.map,
680 bf, size, width);
681 else
682 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
683 }
684
hist_entry__dso_from_filter(struct hist_entry * he,int type,const void * arg)685 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
686 const void *arg)
687 {
688 const struct dso *dso = arg;
689
690 if (type != HIST_FILTER__DSO)
691 return -1;
692
693 return dso && (!he->branch_info || !he->branch_info->from.map ||
694 he->branch_info->from.map->dso != dso);
695 }
696
697 static int64_t
sort__dso_to_cmp(struct hist_entry * left,struct hist_entry * right)698 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
699 {
700 if (!left->branch_info || !right->branch_info)
701 return cmp_null(left->branch_info, right->branch_info);
702
703 return _sort__dso_cmp(left->branch_info->to.map,
704 right->branch_info->to.map);
705 }
706
hist_entry__dso_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)707 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
708 size_t size, unsigned int width)
709 {
710 if (he->branch_info)
711 return _hist_entry__dso_snprintf(he->branch_info->to.map,
712 bf, size, width);
713 else
714 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
715 }
716
hist_entry__dso_to_filter(struct hist_entry * he,int type,const void * arg)717 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
718 const void *arg)
719 {
720 const struct dso *dso = arg;
721
722 if (type != HIST_FILTER__DSO)
723 return -1;
724
725 return dso && (!he->branch_info || !he->branch_info->to.map ||
726 he->branch_info->to.map->dso != dso);
727 }
728
729 static int64_t
sort__sym_from_cmp(struct hist_entry * left,struct hist_entry * right)730 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
731 {
732 struct addr_map_symbol *from_l = &left->branch_info->from;
733 struct addr_map_symbol *from_r = &right->branch_info->from;
734
735 if (!left->branch_info || !right->branch_info)
736 return cmp_null(left->branch_info, right->branch_info);
737
738 from_l = &left->branch_info->from;
739 from_r = &right->branch_info->from;
740
741 if (!from_l->sym && !from_r->sym)
742 return _sort__addr_cmp(from_l->addr, from_r->addr);
743
744 return _sort__sym_cmp(from_l->sym, from_r->sym);
745 }
746
747 static int64_t
sort__sym_to_cmp(struct hist_entry * left,struct hist_entry * right)748 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
749 {
750 struct addr_map_symbol *to_l, *to_r;
751
752 if (!left->branch_info || !right->branch_info)
753 return cmp_null(left->branch_info, right->branch_info);
754
755 to_l = &left->branch_info->to;
756 to_r = &right->branch_info->to;
757
758 if (!to_l->sym && !to_r->sym)
759 return _sort__addr_cmp(to_l->addr, to_r->addr);
760
761 return _sort__sym_cmp(to_l->sym, to_r->sym);
762 }
763
hist_entry__sym_from_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)764 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
765 size_t size, unsigned int width)
766 {
767 if (he->branch_info) {
768 struct addr_map_symbol *from = &he->branch_info->from;
769
770 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
771 he->level, bf, size, width);
772 }
773
774 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
775 }
776
hist_entry__sym_to_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)777 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
778 size_t size, unsigned int width)
779 {
780 if (he->branch_info) {
781 struct addr_map_symbol *to = &he->branch_info->to;
782
783 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
784 he->level, bf, size, width);
785 }
786
787 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
788 }
789
hist_entry__sym_from_filter(struct hist_entry * he,int type,const void * arg)790 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
791 const void *arg)
792 {
793 const char *sym = arg;
794
795 if (type != HIST_FILTER__SYMBOL)
796 return -1;
797
798 return sym && !(he->branch_info && he->branch_info->from.sym &&
799 strstr(he->branch_info->from.sym->name, sym));
800 }
801
hist_entry__sym_to_filter(struct hist_entry * he,int type,const void * arg)802 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
803 const void *arg)
804 {
805 const char *sym = arg;
806
807 if (type != HIST_FILTER__SYMBOL)
808 return -1;
809
810 return sym && !(he->branch_info && he->branch_info->to.sym &&
811 strstr(he->branch_info->to.sym->name, sym));
812 }
813
814 struct sort_entry sort_dso_from = {
815 .se_header = "Source Shared Object",
816 .se_cmp = sort__dso_from_cmp,
817 .se_snprintf = hist_entry__dso_from_snprintf,
818 .se_filter = hist_entry__dso_from_filter,
819 .se_width_idx = HISTC_DSO_FROM,
820 };
821
822 struct sort_entry sort_dso_to = {
823 .se_header = "Target Shared Object",
824 .se_cmp = sort__dso_to_cmp,
825 .se_snprintf = hist_entry__dso_to_snprintf,
826 .se_filter = hist_entry__dso_to_filter,
827 .se_width_idx = HISTC_DSO_TO,
828 };
829
830 struct sort_entry sort_sym_from = {
831 .se_header = "Source Symbol",
832 .se_cmp = sort__sym_from_cmp,
833 .se_snprintf = hist_entry__sym_from_snprintf,
834 .se_filter = hist_entry__sym_from_filter,
835 .se_width_idx = HISTC_SYMBOL_FROM,
836 };
837
838 struct sort_entry sort_sym_to = {
839 .se_header = "Target Symbol",
840 .se_cmp = sort__sym_to_cmp,
841 .se_snprintf = hist_entry__sym_to_snprintf,
842 .se_filter = hist_entry__sym_to_filter,
843 .se_width_idx = HISTC_SYMBOL_TO,
844 };
845
846 static int64_t
sort__mispredict_cmp(struct hist_entry * left,struct hist_entry * right)847 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
848 {
849 unsigned char mp, p;
850
851 if (!left->branch_info || !right->branch_info)
852 return cmp_null(left->branch_info, right->branch_info);
853
854 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
855 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
856 return mp || p;
857 }
858
hist_entry__mispredict_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)859 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
860 size_t size, unsigned int width){
861 static const char *out = "N/A";
862
863 if (he->branch_info) {
864 if (he->branch_info->flags.predicted)
865 out = "N";
866 else if (he->branch_info->flags.mispred)
867 out = "Y";
868 }
869
870 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
871 }
872
873 static int64_t
sort__cycles_cmp(struct hist_entry * left,struct hist_entry * right)874 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
875 {
876 if (!left->branch_info || !right->branch_info)
877 return cmp_null(left->branch_info, right->branch_info);
878
879 return left->branch_info->flags.cycles -
880 right->branch_info->flags.cycles;
881 }
882
hist_entry__cycles_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)883 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
884 size_t size, unsigned int width)
885 {
886 if (!he->branch_info)
887 return scnprintf(bf, size, "%-.*s", width, "N/A");
888 if (he->branch_info->flags.cycles == 0)
889 return repsep_snprintf(bf, size, "%-*s", width, "-");
890 return repsep_snprintf(bf, size, "%-*hd", width,
891 he->branch_info->flags.cycles);
892 }
893
894 struct sort_entry sort_cycles = {
895 .se_header = "Basic Block Cycles",
896 .se_cmp = sort__cycles_cmp,
897 .se_snprintf = hist_entry__cycles_snprintf,
898 .se_width_idx = HISTC_CYCLES,
899 };
900
901 /* --sort daddr_sym */
902 int64_t
sort__daddr_cmp(struct hist_entry * left,struct hist_entry * right)903 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
904 {
905 uint64_t l = 0, r = 0;
906
907 if (left->mem_info)
908 l = left->mem_info->daddr.addr;
909 if (right->mem_info)
910 r = right->mem_info->daddr.addr;
911
912 return (int64_t)(r - l);
913 }
914
hist_entry__daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)915 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
916 size_t size, unsigned int width)
917 {
918 uint64_t addr = 0;
919 struct map *map = NULL;
920 struct symbol *sym = NULL;
921
922 if (he->mem_info) {
923 addr = he->mem_info->daddr.addr;
924 map = he->mem_info->daddr.map;
925 sym = he->mem_info->daddr.sym;
926 }
927 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
928 width);
929 }
930
931 int64_t
sort__iaddr_cmp(struct hist_entry * left,struct hist_entry * right)932 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
933 {
934 uint64_t l = 0, r = 0;
935
936 if (left->mem_info)
937 l = left->mem_info->iaddr.addr;
938 if (right->mem_info)
939 r = right->mem_info->iaddr.addr;
940
941 return (int64_t)(r - l);
942 }
943
hist_entry__iaddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)944 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
945 size_t size, unsigned int width)
946 {
947 uint64_t addr = 0;
948 struct map *map = NULL;
949 struct symbol *sym = NULL;
950
951 if (he->mem_info) {
952 addr = he->mem_info->iaddr.addr;
953 map = he->mem_info->iaddr.map;
954 sym = he->mem_info->iaddr.sym;
955 }
956 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
957 width);
958 }
959
960 static int64_t
sort__dso_daddr_cmp(struct hist_entry * left,struct hist_entry * right)961 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
962 {
963 struct map *map_l = NULL;
964 struct map *map_r = NULL;
965
966 if (left->mem_info)
967 map_l = left->mem_info->daddr.map;
968 if (right->mem_info)
969 map_r = right->mem_info->daddr.map;
970
971 return _sort__dso_cmp(map_l, map_r);
972 }
973
hist_entry__dso_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)974 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
975 size_t size, unsigned int width)
976 {
977 struct map *map = NULL;
978
979 if (he->mem_info)
980 map = he->mem_info->daddr.map;
981
982 return _hist_entry__dso_snprintf(map, bf, size, width);
983 }
984
985 static int64_t
sort__locked_cmp(struct hist_entry * left,struct hist_entry * right)986 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
987 {
988 union perf_mem_data_src data_src_l;
989 union perf_mem_data_src data_src_r;
990
991 if (left->mem_info)
992 data_src_l = left->mem_info->data_src;
993 else
994 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
995
996 if (right->mem_info)
997 data_src_r = right->mem_info->data_src;
998 else
999 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1000
1001 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1002 }
1003
hist_entry__locked_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1004 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1005 size_t size, unsigned int width)
1006 {
1007 char out[10];
1008
1009 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1010 return repsep_snprintf(bf, size, "%.*s", width, out);
1011 }
1012
1013 static int64_t
sort__tlb_cmp(struct hist_entry * left,struct hist_entry * right)1014 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1015 {
1016 union perf_mem_data_src data_src_l;
1017 union perf_mem_data_src data_src_r;
1018
1019 if (left->mem_info)
1020 data_src_l = left->mem_info->data_src;
1021 else
1022 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1023
1024 if (right->mem_info)
1025 data_src_r = right->mem_info->data_src;
1026 else
1027 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1028
1029 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1030 }
1031
hist_entry__tlb_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1032 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1033 size_t size, unsigned int width)
1034 {
1035 char out[64];
1036
1037 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1038 return repsep_snprintf(bf, size, "%-*s", width, out);
1039 }
1040
1041 static int64_t
sort__lvl_cmp(struct hist_entry * left,struct hist_entry * right)1042 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1043 {
1044 union perf_mem_data_src data_src_l;
1045 union perf_mem_data_src data_src_r;
1046
1047 if (left->mem_info)
1048 data_src_l = left->mem_info->data_src;
1049 else
1050 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1051
1052 if (right->mem_info)
1053 data_src_r = right->mem_info->data_src;
1054 else
1055 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1056
1057 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1058 }
1059
hist_entry__lvl_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1060 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1061 size_t size, unsigned int width)
1062 {
1063 char out[64];
1064
1065 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1066 return repsep_snprintf(bf, size, "%-*s", width, out);
1067 }
1068
1069 static int64_t
sort__snoop_cmp(struct hist_entry * left,struct hist_entry * right)1070 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1071 {
1072 union perf_mem_data_src data_src_l;
1073 union perf_mem_data_src data_src_r;
1074
1075 if (left->mem_info)
1076 data_src_l = left->mem_info->data_src;
1077 else
1078 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1079
1080 if (right->mem_info)
1081 data_src_r = right->mem_info->data_src;
1082 else
1083 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1084
1085 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1086 }
1087
hist_entry__snoop_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1088 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1089 size_t size, unsigned int width)
1090 {
1091 char out[64];
1092
1093 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1094 return repsep_snprintf(bf, size, "%-*s", width, out);
1095 }
1096
1097 int64_t
sort__dcacheline_cmp(struct hist_entry * left,struct hist_entry * right)1098 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1099 {
1100 u64 l, r;
1101 struct map *l_map, *r_map;
1102
1103 if (!left->mem_info) return -1;
1104 if (!right->mem_info) return 1;
1105
1106 /* group event types together */
1107 if (left->cpumode > right->cpumode) return -1;
1108 if (left->cpumode < right->cpumode) return 1;
1109
1110 l_map = left->mem_info->daddr.map;
1111 r_map = right->mem_info->daddr.map;
1112
1113 /* if both are NULL, jump to sort on al_addr instead */
1114 if (!l_map && !r_map)
1115 goto addr;
1116
1117 if (!l_map) return -1;
1118 if (!r_map) return 1;
1119
1120 if (l_map->maj > r_map->maj) return -1;
1121 if (l_map->maj < r_map->maj) return 1;
1122
1123 if (l_map->min > r_map->min) return -1;
1124 if (l_map->min < r_map->min) return 1;
1125
1126 if (l_map->ino > r_map->ino) return -1;
1127 if (l_map->ino < r_map->ino) return 1;
1128
1129 if (l_map->ino_generation > r_map->ino_generation) return -1;
1130 if (l_map->ino_generation < r_map->ino_generation) return 1;
1131
1132 /*
1133 * Addresses with no major/minor numbers are assumed to be
1134 * anonymous in userspace. Sort those on pid then address.
1135 *
1136 * The kernel and non-zero major/minor mapped areas are
1137 * assumed to be unity mapped. Sort those on address.
1138 */
1139
1140 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1141 (!(l_map->flags & MAP_SHARED)) &&
1142 !l_map->maj && !l_map->min && !l_map->ino &&
1143 !l_map->ino_generation) {
1144 /* userspace anonymous */
1145
1146 if (left->thread->pid_ > right->thread->pid_) return -1;
1147 if (left->thread->pid_ < right->thread->pid_) return 1;
1148 }
1149
1150 addr:
1151 /* al_addr does all the right addr - start + offset calculations */
1152 l = cl_address(left->mem_info->daddr.al_addr);
1153 r = cl_address(right->mem_info->daddr.al_addr);
1154
1155 if (l > r) return -1;
1156 if (l < r) return 1;
1157
1158 return 0;
1159 }
1160
hist_entry__dcacheline_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1161 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1162 size_t size, unsigned int width)
1163 {
1164
1165 uint64_t addr = 0;
1166 struct map *map = NULL;
1167 struct symbol *sym = NULL;
1168 char level = he->level;
1169
1170 if (he->mem_info) {
1171 addr = cl_address(he->mem_info->daddr.al_addr);
1172 map = he->mem_info->daddr.map;
1173 sym = he->mem_info->daddr.sym;
1174
1175 /* print [s] for shared data mmaps */
1176 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1177 map && !(map->prot & PROT_EXEC) &&
1178 (map->flags & MAP_SHARED) &&
1179 (map->maj || map->min || map->ino ||
1180 map->ino_generation))
1181 level = 's';
1182 else if (!map)
1183 level = 'X';
1184 }
1185 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1186 width);
1187 }
1188
1189 struct sort_entry sort_mispredict = {
1190 .se_header = "Branch Mispredicted",
1191 .se_cmp = sort__mispredict_cmp,
1192 .se_snprintf = hist_entry__mispredict_snprintf,
1193 .se_width_idx = HISTC_MISPREDICT,
1194 };
1195
he_weight(struct hist_entry * he)1196 static u64 he_weight(struct hist_entry *he)
1197 {
1198 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1199 }
1200
1201 static int64_t
sort__local_weight_cmp(struct hist_entry * left,struct hist_entry * right)1202 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1203 {
1204 return he_weight(left) - he_weight(right);
1205 }
1206
hist_entry__local_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1207 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1208 size_t size, unsigned int width)
1209 {
1210 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1211 }
1212
1213 struct sort_entry sort_local_weight = {
1214 .se_header = "Local Weight",
1215 .se_cmp = sort__local_weight_cmp,
1216 .se_snprintf = hist_entry__local_weight_snprintf,
1217 .se_width_idx = HISTC_LOCAL_WEIGHT,
1218 };
1219
1220 static int64_t
sort__global_weight_cmp(struct hist_entry * left,struct hist_entry * right)1221 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1222 {
1223 return left->stat.weight - right->stat.weight;
1224 }
1225
hist_entry__global_weight_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1226 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1227 size_t size, unsigned int width)
1228 {
1229 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1230 }
1231
1232 struct sort_entry sort_global_weight = {
1233 .se_header = "Weight",
1234 .se_cmp = sort__global_weight_cmp,
1235 .se_snprintf = hist_entry__global_weight_snprintf,
1236 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1237 };
1238
1239 struct sort_entry sort_mem_daddr_sym = {
1240 .se_header = "Data Symbol",
1241 .se_cmp = sort__daddr_cmp,
1242 .se_snprintf = hist_entry__daddr_snprintf,
1243 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1244 };
1245
1246 struct sort_entry sort_mem_iaddr_sym = {
1247 .se_header = "Code Symbol",
1248 .se_cmp = sort__iaddr_cmp,
1249 .se_snprintf = hist_entry__iaddr_snprintf,
1250 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1251 };
1252
1253 struct sort_entry sort_mem_daddr_dso = {
1254 .se_header = "Data Object",
1255 .se_cmp = sort__dso_daddr_cmp,
1256 .se_snprintf = hist_entry__dso_daddr_snprintf,
1257 .se_width_idx = HISTC_MEM_DADDR_DSO,
1258 };
1259
1260 struct sort_entry sort_mem_locked = {
1261 .se_header = "Locked",
1262 .se_cmp = sort__locked_cmp,
1263 .se_snprintf = hist_entry__locked_snprintf,
1264 .se_width_idx = HISTC_MEM_LOCKED,
1265 };
1266
1267 struct sort_entry sort_mem_tlb = {
1268 .se_header = "TLB access",
1269 .se_cmp = sort__tlb_cmp,
1270 .se_snprintf = hist_entry__tlb_snprintf,
1271 .se_width_idx = HISTC_MEM_TLB,
1272 };
1273
1274 struct sort_entry sort_mem_lvl = {
1275 .se_header = "Memory access",
1276 .se_cmp = sort__lvl_cmp,
1277 .se_snprintf = hist_entry__lvl_snprintf,
1278 .se_width_idx = HISTC_MEM_LVL,
1279 };
1280
1281 struct sort_entry sort_mem_snoop = {
1282 .se_header = "Snoop",
1283 .se_cmp = sort__snoop_cmp,
1284 .se_snprintf = hist_entry__snoop_snprintf,
1285 .se_width_idx = HISTC_MEM_SNOOP,
1286 };
1287
1288 struct sort_entry sort_mem_dcacheline = {
1289 .se_header = "Data Cacheline",
1290 .se_cmp = sort__dcacheline_cmp,
1291 .se_snprintf = hist_entry__dcacheline_snprintf,
1292 .se_width_idx = HISTC_MEM_DCACHELINE,
1293 };
1294
1295 static int64_t
sort__phys_daddr_cmp(struct hist_entry * left,struct hist_entry * right)1296 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1297 {
1298 uint64_t l = 0, r = 0;
1299
1300 if (left->mem_info)
1301 l = left->mem_info->daddr.phys_addr;
1302 if (right->mem_info)
1303 r = right->mem_info->daddr.phys_addr;
1304
1305 return (int64_t)(r - l);
1306 }
1307
hist_entry__phys_daddr_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1308 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1309 size_t size, unsigned int width)
1310 {
1311 uint64_t addr = 0;
1312 size_t ret = 0;
1313 size_t len = BITS_PER_LONG / 4;
1314
1315 addr = he->mem_info->daddr.phys_addr;
1316
1317 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1318
1319 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1320
1321 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1322
1323 if (ret > width)
1324 bf[width] = '\0';
1325
1326 return width;
1327 }
1328
1329 struct sort_entry sort_mem_phys_daddr = {
1330 .se_header = "Data Physical Address",
1331 .se_cmp = sort__phys_daddr_cmp,
1332 .se_snprintf = hist_entry__phys_daddr_snprintf,
1333 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1334 };
1335
1336 static int64_t
sort__abort_cmp(struct hist_entry * left,struct hist_entry * right)1337 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1338 {
1339 if (!left->branch_info || !right->branch_info)
1340 return cmp_null(left->branch_info, right->branch_info);
1341
1342 return left->branch_info->flags.abort !=
1343 right->branch_info->flags.abort;
1344 }
1345
hist_entry__abort_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1346 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1347 size_t size, unsigned int width)
1348 {
1349 static const char *out = "N/A";
1350
1351 if (he->branch_info) {
1352 if (he->branch_info->flags.abort)
1353 out = "A";
1354 else
1355 out = ".";
1356 }
1357
1358 return repsep_snprintf(bf, size, "%-*s", width, out);
1359 }
1360
1361 struct sort_entry sort_abort = {
1362 .se_header = "Transaction abort",
1363 .se_cmp = sort__abort_cmp,
1364 .se_snprintf = hist_entry__abort_snprintf,
1365 .se_width_idx = HISTC_ABORT,
1366 };
1367
1368 static int64_t
sort__in_tx_cmp(struct hist_entry * left,struct hist_entry * right)1369 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1370 {
1371 if (!left->branch_info || !right->branch_info)
1372 return cmp_null(left->branch_info, right->branch_info);
1373
1374 return left->branch_info->flags.in_tx !=
1375 right->branch_info->flags.in_tx;
1376 }
1377
hist_entry__in_tx_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1378 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1379 size_t size, unsigned int width)
1380 {
1381 static const char *out = "N/A";
1382
1383 if (he->branch_info) {
1384 if (he->branch_info->flags.in_tx)
1385 out = "T";
1386 else
1387 out = ".";
1388 }
1389
1390 return repsep_snprintf(bf, size, "%-*s", width, out);
1391 }
1392
1393 struct sort_entry sort_in_tx = {
1394 .se_header = "Branch in transaction",
1395 .se_cmp = sort__in_tx_cmp,
1396 .se_snprintf = hist_entry__in_tx_snprintf,
1397 .se_width_idx = HISTC_IN_TX,
1398 };
1399
1400 static int64_t
sort__transaction_cmp(struct hist_entry * left,struct hist_entry * right)1401 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1402 {
1403 return left->transaction - right->transaction;
1404 }
1405
add_str(char * p,const char * str)1406 static inline char *add_str(char *p, const char *str)
1407 {
1408 strcpy(p, str);
1409 return p + strlen(str);
1410 }
1411
1412 static struct txbit {
1413 unsigned flag;
1414 const char *name;
1415 int skip_for_len;
1416 } txbits[] = {
1417 { PERF_TXN_ELISION, "EL ", 0 },
1418 { PERF_TXN_TRANSACTION, "TX ", 1 },
1419 { PERF_TXN_SYNC, "SYNC ", 1 },
1420 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1421 { PERF_TXN_RETRY, "RETRY ", 0 },
1422 { PERF_TXN_CONFLICT, "CON ", 0 },
1423 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1424 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1425 { 0, NULL, 0 }
1426 };
1427
hist_entry__transaction_len(void)1428 int hist_entry__transaction_len(void)
1429 {
1430 int i;
1431 int len = 0;
1432
1433 for (i = 0; txbits[i].name; i++) {
1434 if (!txbits[i].skip_for_len)
1435 len += strlen(txbits[i].name);
1436 }
1437 len += 4; /* :XX<space> */
1438 return len;
1439 }
1440
hist_entry__transaction_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1441 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1442 size_t size, unsigned int width)
1443 {
1444 u64 t = he->transaction;
1445 char buf[128];
1446 char *p = buf;
1447 int i;
1448
1449 buf[0] = 0;
1450 for (i = 0; txbits[i].name; i++)
1451 if (txbits[i].flag & t)
1452 p = add_str(p, txbits[i].name);
1453 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1454 p = add_str(p, "NEITHER ");
1455 if (t & PERF_TXN_ABORT_MASK) {
1456 sprintf(p, ":%" PRIx64,
1457 (t & PERF_TXN_ABORT_MASK) >>
1458 PERF_TXN_ABORT_SHIFT);
1459 p += strlen(p);
1460 }
1461
1462 return repsep_snprintf(bf, size, "%-*s", width, buf);
1463 }
1464
1465 struct sort_entry sort_transaction = {
1466 .se_header = "Transaction ",
1467 .se_cmp = sort__transaction_cmp,
1468 .se_snprintf = hist_entry__transaction_snprintf,
1469 .se_width_idx = HISTC_TRANSACTION,
1470 };
1471
1472 /* --sort symbol_size */
1473
_sort__sym_size_cmp(struct symbol * sym_l,struct symbol * sym_r)1474 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1475 {
1476 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1477 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1478
1479 return size_l < size_r ? -1 :
1480 size_l == size_r ? 0 : 1;
1481 }
1482
1483 static int64_t
sort__sym_size_cmp(struct hist_entry * left,struct hist_entry * right)1484 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1485 {
1486 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1487 }
1488
_hist_entry__sym_size_snprintf(struct symbol * sym,char * bf,size_t bf_size,unsigned int width)1489 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1490 size_t bf_size, unsigned int width)
1491 {
1492 if (sym)
1493 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1494
1495 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1496 }
1497
hist_entry__sym_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1498 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1499 size_t size, unsigned int width)
1500 {
1501 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1502 }
1503
1504 struct sort_entry sort_sym_size = {
1505 .se_header = "Symbol size",
1506 .se_cmp = sort__sym_size_cmp,
1507 .se_snprintf = hist_entry__sym_size_snprintf,
1508 .se_width_idx = HISTC_SYM_SIZE,
1509 };
1510
1511 /* --sort dso_size */
1512
_sort__dso_size_cmp(struct map * map_l,struct map * map_r)1513 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1514 {
1515 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1516 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1517
1518 return size_l < size_r ? -1 :
1519 size_l == size_r ? 0 : 1;
1520 }
1521
1522 static int64_t
sort__dso_size_cmp(struct hist_entry * left,struct hist_entry * right)1523 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1524 {
1525 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1526 }
1527
_hist_entry__dso_size_snprintf(struct map * map,char * bf,size_t bf_size,unsigned int width)1528 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1529 size_t bf_size, unsigned int width)
1530 {
1531 if (map && map->dso)
1532 return repsep_snprintf(bf, bf_size, "%*d", width,
1533 map__size(map));
1534
1535 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1536 }
1537
hist_entry__dso_size_snprintf(struct hist_entry * he,char * bf,size_t size,unsigned int width)1538 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1539 size_t size, unsigned int width)
1540 {
1541 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1542 }
1543
1544 struct sort_entry sort_dso_size = {
1545 .se_header = "DSO size",
1546 .se_cmp = sort__dso_size_cmp,
1547 .se_snprintf = hist_entry__dso_size_snprintf,
1548 .se_width_idx = HISTC_DSO_SIZE,
1549 };
1550
1551
1552 struct sort_dimension {
1553 const char *name;
1554 struct sort_entry *entry;
1555 int taken;
1556 };
1557
1558 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1559
1560 static struct sort_dimension common_sort_dimensions[] = {
1561 DIM(SORT_PID, "pid", sort_thread),
1562 DIM(SORT_COMM, "comm", sort_comm),
1563 DIM(SORT_DSO, "dso", sort_dso),
1564 DIM(SORT_SYM, "symbol", sort_sym),
1565 DIM(SORT_PARENT, "parent", sort_parent),
1566 DIM(SORT_CPU, "cpu", sort_cpu),
1567 DIM(SORT_SOCKET, "socket", sort_socket),
1568 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1569 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1570 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1571 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1572 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1573 DIM(SORT_TRACE, "trace", sort_trace),
1574 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1575 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1576 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1577 };
1578
1579 #undef DIM
1580
1581 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1582
1583 static struct sort_dimension bstack_sort_dimensions[] = {
1584 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1585 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1586 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1587 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1588 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1589 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1590 DIM(SORT_ABORT, "abort", sort_abort),
1591 DIM(SORT_CYCLES, "cycles", sort_cycles),
1592 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1593 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1594 };
1595
1596 #undef DIM
1597
1598 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1599
1600 static struct sort_dimension memory_sort_dimensions[] = {
1601 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1602 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1603 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1604 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1605 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1606 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1607 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1608 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1609 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1610 };
1611
1612 #undef DIM
1613
1614 struct hpp_dimension {
1615 const char *name;
1616 struct perf_hpp_fmt *fmt;
1617 int taken;
1618 };
1619
1620 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1621
1622 static struct hpp_dimension hpp_sort_dimensions[] = {
1623 DIM(PERF_HPP__OVERHEAD, "overhead"),
1624 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1625 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1626 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1627 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1628 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1629 DIM(PERF_HPP__SAMPLES, "sample"),
1630 DIM(PERF_HPP__PERIOD, "period"),
1631 };
1632
1633 #undef DIM
1634
1635 struct hpp_sort_entry {
1636 struct perf_hpp_fmt hpp;
1637 struct sort_entry *se;
1638 };
1639
perf_hpp__reset_sort_width(struct perf_hpp_fmt * fmt,struct hists * hists)1640 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1641 {
1642 struct hpp_sort_entry *hse;
1643
1644 if (!perf_hpp__is_sort_entry(fmt))
1645 return;
1646
1647 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1648 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1649 }
1650
__sort__hpp_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)1651 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1652 struct hists *hists, int line __maybe_unused,
1653 int *span __maybe_unused)
1654 {
1655 struct hpp_sort_entry *hse;
1656 size_t len = fmt->user_len;
1657
1658 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1659
1660 if (!len)
1661 len = hists__col_len(hists, hse->se->se_width_idx);
1662
1663 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1664 }
1665
__sort__hpp_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists)1666 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1667 struct perf_hpp *hpp __maybe_unused,
1668 struct hists *hists)
1669 {
1670 struct hpp_sort_entry *hse;
1671 size_t len = fmt->user_len;
1672
1673 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1674
1675 if (!len)
1676 len = hists__col_len(hists, hse->se->se_width_idx);
1677
1678 return len;
1679 }
1680
__sort__hpp_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)1681 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1682 struct hist_entry *he)
1683 {
1684 struct hpp_sort_entry *hse;
1685 size_t len = fmt->user_len;
1686
1687 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1688
1689 if (!len)
1690 len = hists__col_len(he->hists, hse->se->se_width_idx);
1691
1692 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1693 }
1694
__sort__hpp_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)1695 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1696 struct hist_entry *a, struct hist_entry *b)
1697 {
1698 struct hpp_sort_entry *hse;
1699
1700 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1701 return hse->se->se_cmp(a, b);
1702 }
1703
__sort__hpp_collapse(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)1704 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1705 struct hist_entry *a, struct hist_entry *b)
1706 {
1707 struct hpp_sort_entry *hse;
1708 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1709
1710 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1711 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1712 return collapse_fn(a, b);
1713 }
1714
__sort__hpp_sort(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)1715 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1716 struct hist_entry *a, struct hist_entry *b)
1717 {
1718 struct hpp_sort_entry *hse;
1719 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1720
1721 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1722 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1723 return sort_fn(a, b);
1724 }
1725
perf_hpp__is_sort_entry(struct perf_hpp_fmt * format)1726 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1727 {
1728 return format->header == __sort__hpp_header;
1729 }
1730
1731 #define MK_SORT_ENTRY_CHK(key) \
1732 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
1733 { \
1734 struct hpp_sort_entry *hse; \
1735 \
1736 if (!perf_hpp__is_sort_entry(fmt)) \
1737 return false; \
1738 \
1739 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
1740 return hse->se == &sort_ ## key ; \
1741 }
1742
1743 MK_SORT_ENTRY_CHK(trace)
MK_SORT_ENTRY_CHK(srcline)1744 MK_SORT_ENTRY_CHK(srcline)
1745 MK_SORT_ENTRY_CHK(srcfile)
1746 MK_SORT_ENTRY_CHK(thread)
1747 MK_SORT_ENTRY_CHK(comm)
1748 MK_SORT_ENTRY_CHK(dso)
1749 MK_SORT_ENTRY_CHK(sym)
1750
1751
1752 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1753 {
1754 struct hpp_sort_entry *hse_a;
1755 struct hpp_sort_entry *hse_b;
1756
1757 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1758 return false;
1759
1760 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1761 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1762
1763 return hse_a->se == hse_b->se;
1764 }
1765
hse_free(struct perf_hpp_fmt * fmt)1766 static void hse_free(struct perf_hpp_fmt *fmt)
1767 {
1768 struct hpp_sort_entry *hse;
1769
1770 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1771 free(hse);
1772 }
1773
1774 static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension * sd,int level)1775 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
1776 {
1777 struct hpp_sort_entry *hse;
1778
1779 hse = malloc(sizeof(*hse));
1780 if (hse == NULL) {
1781 pr_err("Memory allocation failed\n");
1782 return NULL;
1783 }
1784
1785 hse->se = sd->entry;
1786 hse->hpp.name = sd->entry->se_header;
1787 hse->hpp.header = __sort__hpp_header;
1788 hse->hpp.width = __sort__hpp_width;
1789 hse->hpp.entry = __sort__hpp_entry;
1790 hse->hpp.color = NULL;
1791
1792 hse->hpp.cmp = __sort__hpp_cmp;
1793 hse->hpp.collapse = __sort__hpp_collapse;
1794 hse->hpp.sort = __sort__hpp_sort;
1795 hse->hpp.equal = __sort__hpp_equal;
1796 hse->hpp.free = hse_free;
1797
1798 INIT_LIST_HEAD(&hse->hpp.list);
1799 INIT_LIST_HEAD(&hse->hpp.sort_list);
1800 hse->hpp.elide = false;
1801 hse->hpp.len = 0;
1802 hse->hpp.user_len = 0;
1803 hse->hpp.level = level;
1804
1805 return hse;
1806 }
1807
hpp_free(struct perf_hpp_fmt * fmt)1808 static void hpp_free(struct perf_hpp_fmt *fmt)
1809 {
1810 free(fmt);
1811 }
1812
__hpp_dimension__alloc_hpp(struct hpp_dimension * hd,int level)1813 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
1814 int level)
1815 {
1816 struct perf_hpp_fmt *fmt;
1817
1818 fmt = memdup(hd->fmt, sizeof(*fmt));
1819 if (fmt) {
1820 INIT_LIST_HEAD(&fmt->list);
1821 INIT_LIST_HEAD(&fmt->sort_list);
1822 fmt->free = hpp_free;
1823 fmt->level = level;
1824 }
1825
1826 return fmt;
1827 }
1828
hist_entry__filter(struct hist_entry * he,int type,const void * arg)1829 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
1830 {
1831 struct perf_hpp_fmt *fmt;
1832 struct hpp_sort_entry *hse;
1833 int ret = -1;
1834 int r;
1835
1836 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1837 if (!perf_hpp__is_sort_entry(fmt))
1838 continue;
1839
1840 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1841 if (hse->se->se_filter == NULL)
1842 continue;
1843
1844 /*
1845 * hist entry is filtered if any of sort key in the hpp list
1846 * is applied. But it should skip non-matched filter types.
1847 */
1848 r = hse->se->se_filter(he, type, arg);
1849 if (r >= 0) {
1850 if (ret < 0)
1851 ret = 0;
1852 ret |= r;
1853 }
1854 }
1855
1856 return ret;
1857 }
1858
__sort_dimension__add_hpp_sort(struct sort_dimension * sd,struct perf_hpp_list * list,int level)1859 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
1860 struct perf_hpp_list *list,
1861 int level)
1862 {
1863 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
1864
1865 if (hse == NULL)
1866 return -1;
1867
1868 perf_hpp_list__register_sort_field(list, &hse->hpp);
1869 return 0;
1870 }
1871
__sort_dimension__add_hpp_output(struct sort_dimension * sd,struct perf_hpp_list * list)1872 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
1873 struct perf_hpp_list *list)
1874 {
1875 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
1876
1877 if (hse == NULL)
1878 return -1;
1879
1880 perf_hpp_list__column_register(list, &hse->hpp);
1881 return 0;
1882 }
1883
1884 struct hpp_dynamic_entry {
1885 struct perf_hpp_fmt hpp;
1886 struct perf_evsel *evsel;
1887 struct format_field *field;
1888 unsigned dynamic_len;
1889 bool raw_trace;
1890 };
1891
hde_width(struct hpp_dynamic_entry * hde)1892 static int hde_width(struct hpp_dynamic_entry *hde)
1893 {
1894 if (!hde->hpp.len) {
1895 int len = hde->dynamic_len;
1896 int namelen = strlen(hde->field->name);
1897 int fieldlen = hde->field->size;
1898
1899 if (namelen > len)
1900 len = namelen;
1901
1902 if (!(hde->field->flags & FIELD_IS_STRING)) {
1903 /* length for print hex numbers */
1904 fieldlen = hde->field->size * 2 + 2;
1905 }
1906 if (fieldlen > len)
1907 len = fieldlen;
1908
1909 hde->hpp.len = len;
1910 }
1911 return hde->hpp.len;
1912 }
1913
update_dynamic_len(struct hpp_dynamic_entry * hde,struct hist_entry * he)1914 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1915 struct hist_entry *he)
1916 {
1917 char *str, *pos;
1918 struct format_field *field = hde->field;
1919 size_t namelen;
1920 bool last = false;
1921
1922 if (hde->raw_trace)
1923 return;
1924
1925 /* parse pretty print result and update max length */
1926 if (!he->trace_output)
1927 he->trace_output = get_trace_output(he);
1928
1929 namelen = strlen(field->name);
1930 str = he->trace_output;
1931
1932 while (str) {
1933 pos = strchr(str, ' ');
1934 if (pos == NULL) {
1935 last = true;
1936 pos = str + strlen(str);
1937 }
1938
1939 if (!strncmp(str, field->name, namelen)) {
1940 size_t len;
1941
1942 str += namelen + 1;
1943 len = pos - str;
1944
1945 if (len > hde->dynamic_len)
1946 hde->dynamic_len = len;
1947 break;
1948 }
1949
1950 if (last)
1951 str = NULL;
1952 else
1953 str = pos + 1;
1954 }
1955 }
1956
__sort__hde_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists __maybe_unused,int line __maybe_unused,int * span __maybe_unused)1957 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1958 struct hists *hists __maybe_unused,
1959 int line __maybe_unused,
1960 int *span __maybe_unused)
1961 {
1962 struct hpp_dynamic_entry *hde;
1963 size_t len = fmt->user_len;
1964
1965 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1966
1967 if (!len)
1968 len = hde_width(hde);
1969
1970 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1971 }
1972
__sort__hde_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)1973 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1974 struct perf_hpp *hpp __maybe_unused,
1975 struct hists *hists __maybe_unused)
1976 {
1977 struct hpp_dynamic_entry *hde;
1978 size_t len = fmt->user_len;
1979
1980 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1981
1982 if (!len)
1983 len = hde_width(hde);
1984
1985 return len;
1986 }
1987
perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt * fmt,struct hists * hists)1988 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1989 {
1990 struct hpp_dynamic_entry *hde;
1991
1992 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1993
1994 return hists_to_evsel(hists) == hde->evsel;
1995 }
1996
__sort__hde_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)1997 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1998 struct hist_entry *he)
1999 {
2000 struct hpp_dynamic_entry *hde;
2001 size_t len = fmt->user_len;
2002 char *str, *pos;
2003 struct format_field *field;
2004 size_t namelen;
2005 bool last = false;
2006 int ret;
2007
2008 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2009
2010 if (!len)
2011 len = hde_width(hde);
2012
2013 if (hde->raw_trace)
2014 goto raw_field;
2015
2016 if (!he->trace_output)
2017 he->trace_output = get_trace_output(he);
2018
2019 field = hde->field;
2020 namelen = strlen(field->name);
2021 str = he->trace_output;
2022
2023 while (str) {
2024 pos = strchr(str, ' ');
2025 if (pos == NULL) {
2026 last = true;
2027 pos = str + strlen(str);
2028 }
2029
2030 if (!strncmp(str, field->name, namelen)) {
2031 str += namelen + 1;
2032 str = strndup(str, pos - str);
2033
2034 if (str == NULL)
2035 return scnprintf(hpp->buf, hpp->size,
2036 "%*.*s", len, len, "ERROR");
2037 break;
2038 }
2039
2040 if (last)
2041 str = NULL;
2042 else
2043 str = pos + 1;
2044 }
2045
2046 if (str == NULL) {
2047 struct trace_seq seq;
2048 raw_field:
2049 trace_seq_init(&seq);
2050 tep_print_field(&seq, he->raw_data, hde->field);
2051 str = seq.buffer;
2052 }
2053
2054 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2055 free(str);
2056 return ret;
2057 }
2058
__sort__hde_cmp(struct perf_hpp_fmt * fmt,struct hist_entry * a,struct hist_entry * b)2059 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2060 struct hist_entry *a, struct hist_entry *b)
2061 {
2062 struct hpp_dynamic_entry *hde;
2063 struct format_field *field;
2064 unsigned offset, size;
2065
2066 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2067
2068 if (b == NULL) {
2069 update_dynamic_len(hde, a);
2070 return 0;
2071 }
2072
2073 field = hde->field;
2074 if (field->flags & FIELD_IS_DYNAMIC) {
2075 unsigned long long dyn;
2076
2077 tep_read_number_field(field, a->raw_data, &dyn);
2078 offset = dyn & 0xffff;
2079 size = (dyn >> 16) & 0xffff;
2080
2081 /* record max width for output */
2082 if (size > hde->dynamic_len)
2083 hde->dynamic_len = size;
2084 } else {
2085 offset = field->offset;
2086 size = field->size;
2087 }
2088
2089 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2090 }
2091
perf_hpp__is_dynamic_entry(struct perf_hpp_fmt * fmt)2092 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2093 {
2094 return fmt->cmp == __sort__hde_cmp;
2095 }
2096
__sort__hde_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)2097 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2098 {
2099 struct hpp_dynamic_entry *hde_a;
2100 struct hpp_dynamic_entry *hde_b;
2101
2102 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2103 return false;
2104
2105 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2106 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2107
2108 return hde_a->field == hde_b->field;
2109 }
2110
hde_free(struct perf_hpp_fmt * fmt)2111 static void hde_free(struct perf_hpp_fmt *fmt)
2112 {
2113 struct hpp_dynamic_entry *hde;
2114
2115 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2116 free(hde);
2117 }
2118
2119 static struct hpp_dynamic_entry *
__alloc_dynamic_entry(struct perf_evsel * evsel,struct format_field * field,int level)2120 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
2121 int level)
2122 {
2123 struct hpp_dynamic_entry *hde;
2124
2125 hde = malloc(sizeof(*hde));
2126 if (hde == NULL) {
2127 pr_debug("Memory allocation failed\n");
2128 return NULL;
2129 }
2130
2131 hde->evsel = evsel;
2132 hde->field = field;
2133 hde->dynamic_len = 0;
2134
2135 hde->hpp.name = field->name;
2136 hde->hpp.header = __sort__hde_header;
2137 hde->hpp.width = __sort__hde_width;
2138 hde->hpp.entry = __sort__hde_entry;
2139 hde->hpp.color = NULL;
2140
2141 hde->hpp.cmp = __sort__hde_cmp;
2142 hde->hpp.collapse = __sort__hde_cmp;
2143 hde->hpp.sort = __sort__hde_cmp;
2144 hde->hpp.equal = __sort__hde_equal;
2145 hde->hpp.free = hde_free;
2146
2147 INIT_LIST_HEAD(&hde->hpp.list);
2148 INIT_LIST_HEAD(&hde->hpp.sort_list);
2149 hde->hpp.elide = false;
2150 hde->hpp.len = 0;
2151 hde->hpp.user_len = 0;
2152 hde->hpp.level = level;
2153
2154 return hde;
2155 }
2156
perf_hpp_fmt__dup(struct perf_hpp_fmt * fmt)2157 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2158 {
2159 struct perf_hpp_fmt *new_fmt = NULL;
2160
2161 if (perf_hpp__is_sort_entry(fmt)) {
2162 struct hpp_sort_entry *hse, *new_hse;
2163
2164 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2165 new_hse = memdup(hse, sizeof(*hse));
2166 if (new_hse)
2167 new_fmt = &new_hse->hpp;
2168 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2169 struct hpp_dynamic_entry *hde, *new_hde;
2170
2171 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2172 new_hde = memdup(hde, sizeof(*hde));
2173 if (new_hde)
2174 new_fmt = &new_hde->hpp;
2175 } else {
2176 new_fmt = memdup(fmt, sizeof(*fmt));
2177 }
2178
2179 INIT_LIST_HEAD(&new_fmt->list);
2180 INIT_LIST_HEAD(&new_fmt->sort_list);
2181
2182 return new_fmt;
2183 }
2184
parse_field_name(char * str,char ** event,char ** field,char ** opt)2185 static int parse_field_name(char *str, char **event, char **field, char **opt)
2186 {
2187 char *event_name, *field_name, *opt_name;
2188
2189 event_name = str;
2190 field_name = strchr(str, '.');
2191
2192 if (field_name) {
2193 *field_name++ = '\0';
2194 } else {
2195 event_name = NULL;
2196 field_name = str;
2197 }
2198
2199 opt_name = strchr(field_name, '/');
2200 if (opt_name)
2201 *opt_name++ = '\0';
2202
2203 *event = event_name;
2204 *field = field_name;
2205 *opt = opt_name;
2206
2207 return 0;
2208 }
2209
2210 /* find match evsel using a given event name. The event name can be:
2211 * 1. '%' + event index (e.g. '%1' for first event)
2212 * 2. full event name (e.g. sched:sched_switch)
2213 * 3. partial event name (should not contain ':')
2214 */
find_evsel(struct perf_evlist * evlist,char * event_name)2215 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
2216 {
2217 struct perf_evsel *evsel = NULL;
2218 struct perf_evsel *pos;
2219 bool full_name;
2220
2221 /* case 1 */
2222 if (event_name[0] == '%') {
2223 int nr = strtol(event_name+1, NULL, 0);
2224
2225 if (nr > evlist->nr_entries)
2226 return NULL;
2227
2228 evsel = perf_evlist__first(evlist);
2229 while (--nr > 0)
2230 evsel = perf_evsel__next(evsel);
2231
2232 return evsel;
2233 }
2234
2235 full_name = !!strchr(event_name, ':');
2236 evlist__for_each_entry(evlist, pos) {
2237 /* case 2 */
2238 if (full_name && !strcmp(pos->name, event_name))
2239 return pos;
2240 /* case 3 */
2241 if (!full_name && strstr(pos->name, event_name)) {
2242 if (evsel) {
2243 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2244 event_name, evsel->name, pos->name);
2245 return NULL;
2246 }
2247 evsel = pos;
2248 }
2249 }
2250
2251 return evsel;
2252 }
2253
__dynamic_dimension__add(struct perf_evsel * evsel,struct format_field * field,bool raw_trace,int level)2254 static int __dynamic_dimension__add(struct perf_evsel *evsel,
2255 struct format_field *field,
2256 bool raw_trace, int level)
2257 {
2258 struct hpp_dynamic_entry *hde;
2259
2260 hde = __alloc_dynamic_entry(evsel, field, level);
2261 if (hde == NULL)
2262 return -ENOMEM;
2263
2264 hde->raw_trace = raw_trace;
2265
2266 perf_hpp__register_sort_field(&hde->hpp);
2267 return 0;
2268 }
2269
add_evsel_fields(struct perf_evsel * evsel,bool raw_trace,int level)2270 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
2271 {
2272 int ret;
2273 struct format_field *field;
2274
2275 field = evsel->tp_format->format.fields;
2276 while (field) {
2277 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2278 if (ret < 0)
2279 return ret;
2280
2281 field = field->next;
2282 }
2283 return 0;
2284 }
2285
add_all_dynamic_fields(struct perf_evlist * evlist,bool raw_trace,int level)2286 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
2287 int level)
2288 {
2289 int ret;
2290 struct perf_evsel *evsel;
2291
2292 evlist__for_each_entry(evlist, evsel) {
2293 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2294 continue;
2295
2296 ret = add_evsel_fields(evsel, raw_trace, level);
2297 if (ret < 0)
2298 return ret;
2299 }
2300 return 0;
2301 }
2302
add_all_matching_fields(struct perf_evlist * evlist,char * field_name,bool raw_trace,int level)2303 static int add_all_matching_fields(struct perf_evlist *evlist,
2304 char *field_name, bool raw_trace, int level)
2305 {
2306 int ret = -ESRCH;
2307 struct perf_evsel *evsel;
2308 struct format_field *field;
2309
2310 evlist__for_each_entry(evlist, evsel) {
2311 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2312 continue;
2313
2314 field = tep_find_any_field(evsel->tp_format, field_name);
2315 if (field == NULL)
2316 continue;
2317
2318 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2319 if (ret < 0)
2320 break;
2321 }
2322 return ret;
2323 }
2324
add_dynamic_entry(struct perf_evlist * evlist,const char * tok,int level)2325 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
2326 int level)
2327 {
2328 char *str, *event_name, *field_name, *opt_name;
2329 struct perf_evsel *evsel;
2330 struct format_field *field;
2331 bool raw_trace = symbol_conf.raw_trace;
2332 int ret = 0;
2333
2334 if (evlist == NULL)
2335 return -ENOENT;
2336
2337 str = strdup(tok);
2338 if (str == NULL)
2339 return -ENOMEM;
2340
2341 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2342 ret = -EINVAL;
2343 goto out;
2344 }
2345
2346 if (opt_name) {
2347 if (strcmp(opt_name, "raw")) {
2348 pr_debug("unsupported field option %s\n", opt_name);
2349 ret = -EINVAL;
2350 goto out;
2351 }
2352 raw_trace = true;
2353 }
2354
2355 if (!strcmp(field_name, "trace_fields")) {
2356 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2357 goto out;
2358 }
2359
2360 if (event_name == NULL) {
2361 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2362 goto out;
2363 }
2364
2365 evsel = find_evsel(evlist, event_name);
2366 if (evsel == NULL) {
2367 pr_debug("Cannot find event: %s\n", event_name);
2368 ret = -ENOENT;
2369 goto out;
2370 }
2371
2372 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2373 pr_debug("%s is not a tracepoint event\n", event_name);
2374 ret = -EINVAL;
2375 goto out;
2376 }
2377
2378 if (!strcmp(field_name, "*")) {
2379 ret = add_evsel_fields(evsel, raw_trace, level);
2380 } else {
2381 field = tep_find_any_field(evsel->tp_format, field_name);
2382 if (field == NULL) {
2383 pr_debug("Cannot find event field for %s.%s\n",
2384 event_name, field_name);
2385 return -ENOENT;
2386 }
2387
2388 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2389 }
2390
2391 out:
2392 free(str);
2393 return ret;
2394 }
2395
__sort_dimension__add(struct sort_dimension * sd,struct perf_hpp_list * list,int level)2396 static int __sort_dimension__add(struct sort_dimension *sd,
2397 struct perf_hpp_list *list,
2398 int level)
2399 {
2400 if (sd->taken)
2401 return 0;
2402
2403 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2404 return -1;
2405
2406 if (sd->entry->se_collapse)
2407 list->need_collapse = 1;
2408
2409 sd->taken = 1;
2410
2411 return 0;
2412 }
2413
__hpp_dimension__add(struct hpp_dimension * hd,struct perf_hpp_list * list,int level)2414 static int __hpp_dimension__add(struct hpp_dimension *hd,
2415 struct perf_hpp_list *list,
2416 int level)
2417 {
2418 struct perf_hpp_fmt *fmt;
2419
2420 if (hd->taken)
2421 return 0;
2422
2423 fmt = __hpp_dimension__alloc_hpp(hd, level);
2424 if (!fmt)
2425 return -1;
2426
2427 hd->taken = 1;
2428 perf_hpp_list__register_sort_field(list, fmt);
2429 return 0;
2430 }
2431
__sort_dimension__add_output(struct perf_hpp_list * list,struct sort_dimension * sd)2432 static int __sort_dimension__add_output(struct perf_hpp_list *list,
2433 struct sort_dimension *sd)
2434 {
2435 if (sd->taken)
2436 return 0;
2437
2438 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2439 return -1;
2440
2441 sd->taken = 1;
2442 return 0;
2443 }
2444
__hpp_dimension__add_output(struct perf_hpp_list * list,struct hpp_dimension * hd)2445 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2446 struct hpp_dimension *hd)
2447 {
2448 struct perf_hpp_fmt *fmt;
2449
2450 if (hd->taken)
2451 return 0;
2452
2453 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2454 if (!fmt)
2455 return -1;
2456
2457 hd->taken = 1;
2458 perf_hpp_list__column_register(list, fmt);
2459 return 0;
2460 }
2461
hpp_dimension__add_output(unsigned col)2462 int hpp_dimension__add_output(unsigned col)
2463 {
2464 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2465 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2466 }
2467
sort_dimension__add(struct perf_hpp_list * list,const char * tok,struct perf_evlist * evlist,int level)2468 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2469 struct perf_evlist *evlist,
2470 int level)
2471 {
2472 unsigned int i;
2473
2474 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2475 struct sort_dimension *sd = &common_sort_dimensions[i];
2476
2477 if (strncasecmp(tok, sd->name, strlen(tok)))
2478 continue;
2479
2480 if (sd->entry == &sort_parent) {
2481 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2482 if (ret) {
2483 char err[BUFSIZ];
2484
2485 regerror(ret, &parent_regex, err, sizeof(err));
2486 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2487 return -EINVAL;
2488 }
2489 list->parent = 1;
2490 } else if (sd->entry == &sort_sym) {
2491 list->sym = 1;
2492 /*
2493 * perf diff displays the performance difference amongst
2494 * two or more perf.data files. Those files could come
2495 * from different binaries. So we should not compare
2496 * their ips, but the name of symbol.
2497 */
2498 if (sort__mode == SORT_MODE__DIFF)
2499 sd->entry->se_collapse = sort__sym_sort;
2500
2501 } else if (sd->entry == &sort_dso) {
2502 list->dso = 1;
2503 } else if (sd->entry == &sort_socket) {
2504 list->socket = 1;
2505 } else if (sd->entry == &sort_thread) {
2506 list->thread = 1;
2507 } else if (sd->entry == &sort_comm) {
2508 list->comm = 1;
2509 }
2510
2511 return __sort_dimension__add(sd, list, level);
2512 }
2513
2514 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2515 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2516
2517 if (strncasecmp(tok, hd->name, strlen(tok)))
2518 continue;
2519
2520 return __hpp_dimension__add(hd, list, level);
2521 }
2522
2523 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2524 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2525
2526 if (strncasecmp(tok, sd->name, strlen(tok)))
2527 continue;
2528
2529 if (sort__mode != SORT_MODE__BRANCH)
2530 return -EINVAL;
2531
2532 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2533 list->sym = 1;
2534
2535 __sort_dimension__add(sd, list, level);
2536 return 0;
2537 }
2538
2539 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2540 struct sort_dimension *sd = &memory_sort_dimensions[i];
2541
2542 if (strncasecmp(tok, sd->name, strlen(tok)))
2543 continue;
2544
2545 if (sort__mode != SORT_MODE__MEMORY)
2546 return -EINVAL;
2547
2548 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2549 return -EINVAL;
2550
2551 if (sd->entry == &sort_mem_daddr_sym)
2552 list->sym = 1;
2553
2554 __sort_dimension__add(sd, list, level);
2555 return 0;
2556 }
2557
2558 if (!add_dynamic_entry(evlist, tok, level))
2559 return 0;
2560
2561 return -ESRCH;
2562 }
2563
setup_sort_list(struct perf_hpp_list * list,char * str,struct perf_evlist * evlist)2564 static int setup_sort_list(struct perf_hpp_list *list, char *str,
2565 struct perf_evlist *evlist)
2566 {
2567 char *tmp, *tok;
2568 int ret = 0;
2569 int level = 0;
2570 int next_level = 1;
2571 bool in_group = false;
2572
2573 do {
2574 tok = str;
2575 tmp = strpbrk(str, "{}, ");
2576 if (tmp) {
2577 if (in_group)
2578 next_level = level;
2579 else
2580 next_level = level + 1;
2581
2582 if (*tmp == '{')
2583 in_group = true;
2584 else if (*tmp == '}')
2585 in_group = false;
2586
2587 *tmp = '\0';
2588 str = tmp + 1;
2589 }
2590
2591 if (*tok) {
2592 ret = sort_dimension__add(list, tok, evlist, level);
2593 if (ret == -EINVAL) {
2594 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2595 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2596 else
2597 pr_err("Invalid --sort key: `%s'", tok);
2598 break;
2599 } else if (ret == -ESRCH) {
2600 pr_err("Unknown --sort key: `%s'", tok);
2601 break;
2602 }
2603 }
2604
2605 level = next_level;
2606 } while (tmp);
2607
2608 return ret;
2609 }
2610
get_default_sort_order(struct perf_evlist * evlist)2611 static const char *get_default_sort_order(struct perf_evlist *evlist)
2612 {
2613 const char *default_sort_orders[] = {
2614 default_sort_order,
2615 default_branch_sort_order,
2616 default_mem_sort_order,
2617 default_top_sort_order,
2618 default_diff_sort_order,
2619 default_tracepoint_sort_order,
2620 };
2621 bool use_trace = true;
2622 struct perf_evsel *evsel;
2623
2624 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2625
2626 if (evlist == NULL || perf_evlist__empty(evlist))
2627 goto out_no_evlist;
2628
2629 evlist__for_each_entry(evlist, evsel) {
2630 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2631 use_trace = false;
2632 break;
2633 }
2634 }
2635
2636 if (use_trace) {
2637 sort__mode = SORT_MODE__TRACEPOINT;
2638 if (symbol_conf.raw_trace)
2639 return "trace_fields";
2640 }
2641 out_no_evlist:
2642 return default_sort_orders[sort__mode];
2643 }
2644
setup_sort_order(struct perf_evlist * evlist)2645 static int setup_sort_order(struct perf_evlist *evlist)
2646 {
2647 char *new_sort_order;
2648
2649 /*
2650 * Append '+'-prefixed sort order to the default sort
2651 * order string.
2652 */
2653 if (!sort_order || is_strict_order(sort_order))
2654 return 0;
2655
2656 if (sort_order[1] == '\0') {
2657 pr_err("Invalid --sort key: `+'");
2658 return -EINVAL;
2659 }
2660
2661 /*
2662 * We allocate new sort_order string, but we never free it,
2663 * because it's checked over the rest of the code.
2664 */
2665 if (asprintf(&new_sort_order, "%s,%s",
2666 get_default_sort_order(evlist), sort_order + 1) < 0) {
2667 pr_err("Not enough memory to set up --sort");
2668 return -ENOMEM;
2669 }
2670
2671 sort_order = new_sort_order;
2672 return 0;
2673 }
2674
2675 /*
2676 * Adds 'pre,' prefix into 'str' is 'pre' is
2677 * not already part of 'str'.
2678 */
prefix_if_not_in(const char * pre,char * str)2679 static char *prefix_if_not_in(const char *pre, char *str)
2680 {
2681 char *n;
2682
2683 if (!str || strstr(str, pre))
2684 return str;
2685
2686 if (asprintf(&n, "%s,%s", pre, str) < 0)
2687 return NULL;
2688
2689 free(str);
2690 return n;
2691 }
2692
setup_overhead(char * keys)2693 static char *setup_overhead(char *keys)
2694 {
2695 if (sort__mode == SORT_MODE__DIFF)
2696 return keys;
2697
2698 keys = prefix_if_not_in("overhead", keys);
2699
2700 if (symbol_conf.cumulate_callchain)
2701 keys = prefix_if_not_in("overhead_children", keys);
2702
2703 return keys;
2704 }
2705
__setup_sorting(struct perf_evlist * evlist)2706 static int __setup_sorting(struct perf_evlist *evlist)
2707 {
2708 char *str;
2709 const char *sort_keys;
2710 int ret = 0;
2711
2712 ret = setup_sort_order(evlist);
2713 if (ret)
2714 return ret;
2715
2716 sort_keys = sort_order;
2717 if (sort_keys == NULL) {
2718 if (is_strict_order(field_order)) {
2719 /*
2720 * If user specified field order but no sort order,
2721 * we'll honor it and not add default sort orders.
2722 */
2723 return 0;
2724 }
2725
2726 sort_keys = get_default_sort_order(evlist);
2727 }
2728
2729 str = strdup(sort_keys);
2730 if (str == NULL) {
2731 pr_err("Not enough memory to setup sort keys");
2732 return -ENOMEM;
2733 }
2734
2735 /*
2736 * Prepend overhead fields for backward compatibility.
2737 */
2738 if (!is_strict_order(field_order)) {
2739 str = setup_overhead(str);
2740 if (str == NULL) {
2741 pr_err("Not enough memory to setup overhead keys");
2742 return -ENOMEM;
2743 }
2744 }
2745
2746 ret = setup_sort_list(&perf_hpp_list, str, evlist);
2747
2748 free(str);
2749 return ret;
2750 }
2751
perf_hpp__set_elide(int idx,bool elide)2752 void perf_hpp__set_elide(int idx, bool elide)
2753 {
2754 struct perf_hpp_fmt *fmt;
2755 struct hpp_sort_entry *hse;
2756
2757 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2758 if (!perf_hpp__is_sort_entry(fmt))
2759 continue;
2760
2761 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2762 if (hse->se->se_width_idx == idx) {
2763 fmt->elide = elide;
2764 break;
2765 }
2766 }
2767 }
2768
__get_elide(struct strlist * list,const char * list_name,FILE * fp)2769 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2770 {
2771 if (list && strlist__nr_entries(list) == 1) {
2772 if (fp != NULL)
2773 fprintf(fp, "# %s: %s\n", list_name,
2774 strlist__entry(list, 0)->s);
2775 return true;
2776 }
2777 return false;
2778 }
2779
get_elide(int idx,FILE * output)2780 static bool get_elide(int idx, FILE *output)
2781 {
2782 switch (idx) {
2783 case HISTC_SYMBOL:
2784 return __get_elide(symbol_conf.sym_list, "symbol", output);
2785 case HISTC_DSO:
2786 return __get_elide(symbol_conf.dso_list, "dso", output);
2787 case HISTC_COMM:
2788 return __get_elide(symbol_conf.comm_list, "comm", output);
2789 default:
2790 break;
2791 }
2792
2793 if (sort__mode != SORT_MODE__BRANCH)
2794 return false;
2795
2796 switch (idx) {
2797 case HISTC_SYMBOL_FROM:
2798 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2799 case HISTC_SYMBOL_TO:
2800 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2801 case HISTC_DSO_FROM:
2802 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2803 case HISTC_DSO_TO:
2804 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2805 default:
2806 break;
2807 }
2808
2809 return false;
2810 }
2811
sort__setup_elide(FILE * output)2812 void sort__setup_elide(FILE *output)
2813 {
2814 struct perf_hpp_fmt *fmt;
2815 struct hpp_sort_entry *hse;
2816
2817 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2818 if (!perf_hpp__is_sort_entry(fmt))
2819 continue;
2820
2821 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2822 fmt->elide = get_elide(hse->se->se_width_idx, output);
2823 }
2824
2825 /*
2826 * It makes no sense to elide all of sort entries.
2827 * Just revert them to show up again.
2828 */
2829 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2830 if (!perf_hpp__is_sort_entry(fmt))
2831 continue;
2832
2833 if (!fmt->elide)
2834 return;
2835 }
2836
2837 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
2838 if (!perf_hpp__is_sort_entry(fmt))
2839 continue;
2840
2841 fmt->elide = false;
2842 }
2843 }
2844
output_field_add(struct perf_hpp_list * list,char * tok)2845 int output_field_add(struct perf_hpp_list *list, char *tok)
2846 {
2847 unsigned int i;
2848
2849 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2850 struct sort_dimension *sd = &common_sort_dimensions[i];
2851
2852 if (strncasecmp(tok, sd->name, strlen(tok)))
2853 continue;
2854
2855 return __sort_dimension__add_output(list, sd);
2856 }
2857
2858 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2859 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2860
2861 if (strncasecmp(tok, hd->name, strlen(tok)))
2862 continue;
2863
2864 return __hpp_dimension__add_output(list, hd);
2865 }
2866
2867 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2868 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2869
2870 if (strncasecmp(tok, sd->name, strlen(tok)))
2871 continue;
2872
2873 return __sort_dimension__add_output(list, sd);
2874 }
2875
2876 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2877 struct sort_dimension *sd = &memory_sort_dimensions[i];
2878
2879 if (strncasecmp(tok, sd->name, strlen(tok)))
2880 continue;
2881
2882 return __sort_dimension__add_output(list, sd);
2883 }
2884
2885 return -ESRCH;
2886 }
2887
setup_output_list(struct perf_hpp_list * list,char * str)2888 static int setup_output_list(struct perf_hpp_list *list, char *str)
2889 {
2890 char *tmp, *tok;
2891 int ret = 0;
2892
2893 for (tok = strtok_r(str, ", ", &tmp);
2894 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2895 ret = output_field_add(list, tok);
2896 if (ret == -EINVAL) {
2897 ui__error("Invalid --fields key: `%s'", tok);
2898 break;
2899 } else if (ret == -ESRCH) {
2900 ui__error("Unknown --fields key: `%s'", tok);
2901 break;
2902 }
2903 }
2904
2905 return ret;
2906 }
2907
reset_dimensions(void)2908 void reset_dimensions(void)
2909 {
2910 unsigned int i;
2911
2912 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2913 common_sort_dimensions[i].taken = 0;
2914
2915 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2916 hpp_sort_dimensions[i].taken = 0;
2917
2918 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2919 bstack_sort_dimensions[i].taken = 0;
2920
2921 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2922 memory_sort_dimensions[i].taken = 0;
2923 }
2924
is_strict_order(const char * order)2925 bool is_strict_order(const char *order)
2926 {
2927 return order && (*order != '+');
2928 }
2929
__setup_output_field(void)2930 static int __setup_output_field(void)
2931 {
2932 char *str, *strp;
2933 int ret = -EINVAL;
2934
2935 if (field_order == NULL)
2936 return 0;
2937
2938 strp = str = strdup(field_order);
2939 if (str == NULL) {
2940 pr_err("Not enough memory to setup output fields");
2941 return -ENOMEM;
2942 }
2943
2944 if (!is_strict_order(field_order))
2945 strp++;
2946
2947 if (!strlen(strp)) {
2948 pr_err("Invalid --fields key: `+'");
2949 goto out;
2950 }
2951
2952 ret = setup_output_list(&perf_hpp_list, strp);
2953
2954 out:
2955 free(str);
2956 return ret;
2957 }
2958
setup_sorting(struct perf_evlist * evlist)2959 int setup_sorting(struct perf_evlist *evlist)
2960 {
2961 int err;
2962
2963 err = __setup_sorting(evlist);
2964 if (err < 0)
2965 return err;
2966
2967 if (parent_pattern != default_parent_pattern) {
2968 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
2969 if (err < 0)
2970 return err;
2971 }
2972
2973 reset_dimensions();
2974
2975 /*
2976 * perf diff doesn't use default hpp output fields.
2977 */
2978 if (sort__mode != SORT_MODE__DIFF)
2979 perf_hpp__init();
2980
2981 err = __setup_output_field();
2982 if (err < 0)
2983 return err;
2984
2985 /* copy sort keys to output fields */
2986 perf_hpp__setup_output_field(&perf_hpp_list);
2987 /* and then copy output fields to sort keys */
2988 perf_hpp__append_sort_keys(&perf_hpp_list);
2989
2990 /* setup hists-specific output fields */
2991 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
2992 return -1;
2993
2994 return 0;
2995 }
2996
reset_output_field(void)2997 void reset_output_field(void)
2998 {
2999 perf_hpp_list.need_collapse = 0;
3000 perf_hpp_list.parent = 0;
3001 perf_hpp_list.sym = 0;
3002 perf_hpp_list.dso = 0;
3003
3004 field_order = NULL;
3005 sort_order = NULL;
3006
3007 reset_dimensions();
3008 perf_hpp__reset_output_field(&perf_hpp_list);
3009 }
3010