]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - tools/perf/util/sort.c
perf tools: Add 'trace_fields' dynamic sort key
[mirror_ubuntu-focal-kernel.git] / tools / perf / util / sort.c
1 #include <sys/mman.h>
2 #include "sort.h"
3 #include "hist.h"
4 #include "comm.h"
5 #include "symbol.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include <traceevent/event-parse.h>
9
10 regex_t parent_regex;
11 const char default_parent_pattern[] = "^sys_|^do_page_fault";
12 const char *parent_pattern = default_parent_pattern;
13 const char default_sort_order[] = "comm,dso,symbol";
14 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
15 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16 const char default_top_sort_order[] = "dso,symbol";
17 const char default_diff_sort_order[] = "dso,symbol";
18 const char *sort_order;
19 const char *field_order;
20 regex_t ignore_callees_regex;
21 int have_ignore_callees = 0;
22 int sort__need_collapse = 0;
23 int sort__has_parent = 0;
24 int sort__has_sym = 0;
25 int sort__has_dso = 0;
26 int sort__has_socket = 0;
27 enum sort_mode sort__mode = SORT_MODE__NORMAL;
28
29
30 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
31 {
32 int n;
33 va_list ap;
34
35 va_start(ap, fmt);
36 n = vsnprintf(bf, size, fmt, ap);
37 if (symbol_conf.field_sep && n > 0) {
38 char *sep = bf;
39
40 while (1) {
41 sep = strchr(sep, *symbol_conf.field_sep);
42 if (sep == NULL)
43 break;
44 *sep = '.';
45 }
46 }
47 va_end(ap);
48
49 if (n >= (int)size)
50 return size - 1;
51 return n;
52 }
53
54 static int64_t cmp_null(const void *l, const void *r)
55 {
56 if (!l && !r)
57 return 0;
58 else if (!l)
59 return -1;
60 else
61 return 1;
62 }
63
64 /* --sort pid */
65
66 static int64_t
67 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
68 {
69 return right->thread->tid - left->thread->tid;
70 }
71
72 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
73 size_t size, unsigned int width)
74 {
75 const char *comm = thread__comm_str(he->thread);
76
77 width = max(7U, width) - 6;
78 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
79 width, width, comm ?: "");
80 }
81
82 struct sort_entry sort_thread = {
83 .se_header = " Pid:Command",
84 .se_cmp = sort__thread_cmp,
85 .se_snprintf = hist_entry__thread_snprintf,
86 .se_width_idx = HISTC_THREAD,
87 };
88
89 /* --sort comm */
90
91 static int64_t
92 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
93 {
94 /* Compare the addr that should be unique among comm */
95 return strcmp(comm__str(right->comm), comm__str(left->comm));
96 }
97
98 static int64_t
99 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
100 {
101 /* Compare the addr that should be unique among comm */
102 return strcmp(comm__str(right->comm), comm__str(left->comm));
103 }
104
105 static int64_t
106 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
107 {
108 return strcmp(comm__str(right->comm), comm__str(left->comm));
109 }
110
111 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
112 size_t size, unsigned int width)
113 {
114 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
115 }
116
117 struct sort_entry sort_comm = {
118 .se_header = "Command",
119 .se_cmp = sort__comm_cmp,
120 .se_collapse = sort__comm_collapse,
121 .se_sort = sort__comm_sort,
122 .se_snprintf = hist_entry__comm_snprintf,
123 .se_width_idx = HISTC_COMM,
124 };
125
126 /* --sort dso */
127
128 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
129 {
130 struct dso *dso_l = map_l ? map_l->dso : NULL;
131 struct dso *dso_r = map_r ? map_r->dso : NULL;
132 const char *dso_name_l, *dso_name_r;
133
134 if (!dso_l || !dso_r)
135 return cmp_null(dso_r, dso_l);
136
137 if (verbose) {
138 dso_name_l = dso_l->long_name;
139 dso_name_r = dso_r->long_name;
140 } else {
141 dso_name_l = dso_l->short_name;
142 dso_name_r = dso_r->short_name;
143 }
144
145 return strcmp(dso_name_l, dso_name_r);
146 }
147
148 static int64_t
149 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
150 {
151 return _sort__dso_cmp(right->ms.map, left->ms.map);
152 }
153
154 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
155 size_t size, unsigned int width)
156 {
157 if (map && map->dso) {
158 const char *dso_name = !verbose ? map->dso->short_name :
159 map->dso->long_name;
160 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
161 }
162
163 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
164 }
165
166 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
167 size_t size, unsigned int width)
168 {
169 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
170 }
171
172 struct sort_entry sort_dso = {
173 .se_header = "Shared Object",
174 .se_cmp = sort__dso_cmp,
175 .se_snprintf = hist_entry__dso_snprintf,
176 .se_width_idx = HISTC_DSO,
177 };
178
179 /* --sort symbol */
180
181 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
182 {
183 return (int64_t)(right_ip - left_ip);
184 }
185
186 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
187 {
188 if (!sym_l || !sym_r)
189 return cmp_null(sym_l, sym_r);
190
191 if (sym_l == sym_r)
192 return 0;
193
194 if (sym_l->start != sym_r->start)
195 return (int64_t)(sym_r->start - sym_l->start);
196
197 return (int64_t)(sym_r->end - sym_l->end);
198 }
199
200 static int64_t
201 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
202 {
203 int64_t ret;
204
205 if (!left->ms.sym && !right->ms.sym)
206 return _sort__addr_cmp(left->ip, right->ip);
207
208 /*
209 * comparing symbol address alone is not enough since it's a
210 * relative address within a dso.
211 */
212 if (!sort__has_dso) {
213 ret = sort__dso_cmp(left, right);
214 if (ret != 0)
215 return ret;
216 }
217
218 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
219 }
220
221 static int64_t
222 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
223 {
224 if (!left->ms.sym || !right->ms.sym)
225 return cmp_null(left->ms.sym, right->ms.sym);
226
227 return strcmp(right->ms.sym->name, left->ms.sym->name);
228 }
229
230 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
231 u64 ip, char level, char *bf, size_t size,
232 unsigned int width)
233 {
234 size_t ret = 0;
235
236 if (verbose) {
237 char o = map ? dso__symtab_origin(map->dso) : '!';
238 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
239 BITS_PER_LONG / 4 + 2, ip, o);
240 }
241
242 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
243 if (sym && map) {
244 if (map->type == MAP__VARIABLE) {
245 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
246 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
247 ip - map->unmap_ip(map, sym->start));
248 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
249 width - ret, "");
250 } else {
251 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
252 width - ret,
253 sym->name);
254 }
255 } else {
256 size_t len = BITS_PER_LONG / 4;
257 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
258 len, ip);
259 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
260 width - ret, "");
261 }
262
263 if (ret > width)
264 bf[width] = '\0';
265
266 return width;
267 }
268
269 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
270 size_t size, unsigned int width)
271 {
272 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
273 he->level, bf, size, width);
274 }
275
276 struct sort_entry sort_sym = {
277 .se_header = "Symbol",
278 .se_cmp = sort__sym_cmp,
279 .se_sort = sort__sym_sort,
280 .se_snprintf = hist_entry__sym_snprintf,
281 .se_width_idx = HISTC_SYMBOL,
282 };
283
284 /* --sort srcline */
285
286 static int64_t
287 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
288 {
289 if (!left->srcline) {
290 if (!left->ms.map)
291 left->srcline = SRCLINE_UNKNOWN;
292 else {
293 struct map *map = left->ms.map;
294 left->srcline = get_srcline(map->dso,
295 map__rip_2objdump(map, left->ip),
296 left->ms.sym, true);
297 }
298 }
299 if (!right->srcline) {
300 if (!right->ms.map)
301 right->srcline = SRCLINE_UNKNOWN;
302 else {
303 struct map *map = right->ms.map;
304 right->srcline = get_srcline(map->dso,
305 map__rip_2objdump(map, right->ip),
306 right->ms.sym, true);
307 }
308 }
309 return strcmp(right->srcline, left->srcline);
310 }
311
312 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
313 size_t size, unsigned int width)
314 {
315 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
316 }
317
318 struct sort_entry sort_srcline = {
319 .se_header = "Source:Line",
320 .se_cmp = sort__srcline_cmp,
321 .se_snprintf = hist_entry__srcline_snprintf,
322 .se_width_idx = HISTC_SRCLINE,
323 };
324
325 /* --sort srcfile */
326
327 static char no_srcfile[1];
328
329 static char *get_srcfile(struct hist_entry *e)
330 {
331 char *sf, *p;
332 struct map *map = e->ms.map;
333
334 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
335 e->ms.sym, false, true);
336 if (!strcmp(sf, SRCLINE_UNKNOWN))
337 return no_srcfile;
338 p = strchr(sf, ':');
339 if (p && *sf) {
340 *p = 0;
341 return sf;
342 }
343 free(sf);
344 return no_srcfile;
345 }
346
347 static int64_t
348 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
349 {
350 if (!left->srcfile) {
351 if (!left->ms.map)
352 left->srcfile = no_srcfile;
353 else
354 left->srcfile = get_srcfile(left);
355 }
356 if (!right->srcfile) {
357 if (!right->ms.map)
358 right->srcfile = no_srcfile;
359 else
360 right->srcfile = get_srcfile(right);
361 }
362 return strcmp(right->srcfile, left->srcfile);
363 }
364
365 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
366 size_t size, unsigned int width)
367 {
368 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
369 }
370
371 struct sort_entry sort_srcfile = {
372 .se_header = "Source File",
373 .se_cmp = sort__srcfile_cmp,
374 .se_snprintf = hist_entry__srcfile_snprintf,
375 .se_width_idx = HISTC_SRCFILE,
376 };
377
378 /* --sort parent */
379
380 static int64_t
381 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
382 {
383 struct symbol *sym_l = left->parent;
384 struct symbol *sym_r = right->parent;
385
386 if (!sym_l || !sym_r)
387 return cmp_null(sym_l, sym_r);
388
389 return strcmp(sym_r->name, sym_l->name);
390 }
391
392 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
393 size_t size, unsigned int width)
394 {
395 return repsep_snprintf(bf, size, "%-*.*s", width, width,
396 he->parent ? he->parent->name : "[other]");
397 }
398
399 struct sort_entry sort_parent = {
400 .se_header = "Parent symbol",
401 .se_cmp = sort__parent_cmp,
402 .se_snprintf = hist_entry__parent_snprintf,
403 .se_width_idx = HISTC_PARENT,
404 };
405
406 /* --sort cpu */
407
408 static int64_t
409 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
410 {
411 return right->cpu - left->cpu;
412 }
413
414 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
415 size_t size, unsigned int width)
416 {
417 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
418 }
419
420 struct sort_entry sort_cpu = {
421 .se_header = "CPU",
422 .se_cmp = sort__cpu_cmp,
423 .se_snprintf = hist_entry__cpu_snprintf,
424 .se_width_idx = HISTC_CPU,
425 };
426
427 /* --sort socket */
428
429 static int64_t
430 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
431 {
432 return right->socket - left->socket;
433 }
434
435 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
436 size_t size, unsigned int width)
437 {
438 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
439 }
440
441 struct sort_entry sort_socket = {
442 .se_header = "Socket",
443 .se_cmp = sort__socket_cmp,
444 .se_snprintf = hist_entry__socket_snprintf,
445 .se_width_idx = HISTC_SOCKET,
446 };
447
448 /* --sort trace */
449
450 static char *get_trace_output(struct hist_entry *he)
451 {
452 struct trace_seq seq;
453 struct perf_evsel *evsel;
454 struct pevent_record rec = {
455 .data = he->raw_data,
456 .size = he->raw_size,
457 };
458
459 evsel = hists_to_evsel(he->hists);
460
461 trace_seq_init(&seq);
462 if (symbol_conf.raw_trace) {
463 pevent_print_fields(&seq, he->raw_data, he->raw_size,
464 evsel->tp_format);
465 } else {
466 pevent_event_info(&seq, evsel->tp_format, &rec);
467 }
468 return seq.buffer;
469 }
470
471 static int64_t
472 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
473 {
474 struct perf_evsel *evsel;
475
476 evsel = hists_to_evsel(left->hists);
477 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
478 return 0;
479
480 if (left->trace_output == NULL)
481 left->trace_output = get_trace_output(left);
482 if (right->trace_output == NULL)
483 right->trace_output = get_trace_output(right);
484
485 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
486 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
487
488 return strcmp(right->trace_output, left->trace_output);
489 }
490
491 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
492 size_t size, unsigned int width)
493 {
494 struct perf_evsel *evsel;
495
496 evsel = hists_to_evsel(he->hists);
497 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
498 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
499
500 if (he->trace_output == NULL)
501 he->trace_output = get_trace_output(he);
502 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
503 }
504
505 struct sort_entry sort_trace = {
506 .se_header = "Trace output",
507 .se_cmp = sort__trace_cmp,
508 .se_snprintf = hist_entry__trace_snprintf,
509 .se_width_idx = HISTC_TRACE,
510 };
511
512 /* sort keys for branch stacks */
513
514 static int64_t
515 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
516 {
517 if (!left->branch_info || !right->branch_info)
518 return cmp_null(left->branch_info, right->branch_info);
519
520 return _sort__dso_cmp(left->branch_info->from.map,
521 right->branch_info->from.map);
522 }
523
524 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
525 size_t size, unsigned int width)
526 {
527 if (he->branch_info)
528 return _hist_entry__dso_snprintf(he->branch_info->from.map,
529 bf, size, width);
530 else
531 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
532 }
533
534 static int64_t
535 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
536 {
537 if (!left->branch_info || !right->branch_info)
538 return cmp_null(left->branch_info, right->branch_info);
539
540 return _sort__dso_cmp(left->branch_info->to.map,
541 right->branch_info->to.map);
542 }
543
544 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
545 size_t size, unsigned int width)
546 {
547 if (he->branch_info)
548 return _hist_entry__dso_snprintf(he->branch_info->to.map,
549 bf, size, width);
550 else
551 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
552 }
553
554 static int64_t
555 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
556 {
557 struct addr_map_symbol *from_l = &left->branch_info->from;
558 struct addr_map_symbol *from_r = &right->branch_info->from;
559
560 if (!left->branch_info || !right->branch_info)
561 return cmp_null(left->branch_info, right->branch_info);
562
563 from_l = &left->branch_info->from;
564 from_r = &right->branch_info->from;
565
566 if (!from_l->sym && !from_r->sym)
567 return _sort__addr_cmp(from_l->addr, from_r->addr);
568
569 return _sort__sym_cmp(from_l->sym, from_r->sym);
570 }
571
572 static int64_t
573 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
574 {
575 struct addr_map_symbol *to_l, *to_r;
576
577 if (!left->branch_info || !right->branch_info)
578 return cmp_null(left->branch_info, right->branch_info);
579
580 to_l = &left->branch_info->to;
581 to_r = &right->branch_info->to;
582
583 if (!to_l->sym && !to_r->sym)
584 return _sort__addr_cmp(to_l->addr, to_r->addr);
585
586 return _sort__sym_cmp(to_l->sym, to_r->sym);
587 }
588
589 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
590 size_t size, unsigned int width)
591 {
592 if (he->branch_info) {
593 struct addr_map_symbol *from = &he->branch_info->from;
594
595 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
596 he->level, bf, size, width);
597 }
598
599 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
600 }
601
602 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
603 size_t size, unsigned int width)
604 {
605 if (he->branch_info) {
606 struct addr_map_symbol *to = &he->branch_info->to;
607
608 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
609 he->level, bf, size, width);
610 }
611
612 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
613 }
614
615 struct sort_entry sort_dso_from = {
616 .se_header = "Source Shared Object",
617 .se_cmp = sort__dso_from_cmp,
618 .se_snprintf = hist_entry__dso_from_snprintf,
619 .se_width_idx = HISTC_DSO_FROM,
620 };
621
622 struct sort_entry sort_dso_to = {
623 .se_header = "Target Shared Object",
624 .se_cmp = sort__dso_to_cmp,
625 .se_snprintf = hist_entry__dso_to_snprintf,
626 .se_width_idx = HISTC_DSO_TO,
627 };
628
629 struct sort_entry sort_sym_from = {
630 .se_header = "Source Symbol",
631 .se_cmp = sort__sym_from_cmp,
632 .se_snprintf = hist_entry__sym_from_snprintf,
633 .se_width_idx = HISTC_SYMBOL_FROM,
634 };
635
636 struct sort_entry sort_sym_to = {
637 .se_header = "Target Symbol",
638 .se_cmp = sort__sym_to_cmp,
639 .se_snprintf = hist_entry__sym_to_snprintf,
640 .se_width_idx = HISTC_SYMBOL_TO,
641 };
642
643 static int64_t
644 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
645 {
646 unsigned char mp, p;
647
648 if (!left->branch_info || !right->branch_info)
649 return cmp_null(left->branch_info, right->branch_info);
650
651 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
652 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
653 return mp || p;
654 }
655
656 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
657 size_t size, unsigned int width){
658 static const char *out = "N/A";
659
660 if (he->branch_info) {
661 if (he->branch_info->flags.predicted)
662 out = "N";
663 else if (he->branch_info->flags.mispred)
664 out = "Y";
665 }
666
667 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
668 }
669
670 static int64_t
671 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
672 {
673 return left->branch_info->flags.cycles -
674 right->branch_info->flags.cycles;
675 }
676
677 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
678 size_t size, unsigned int width)
679 {
680 if (he->branch_info->flags.cycles == 0)
681 return repsep_snprintf(bf, size, "%-*s", width, "-");
682 return repsep_snprintf(bf, size, "%-*hd", width,
683 he->branch_info->flags.cycles);
684 }
685
686 struct sort_entry sort_cycles = {
687 .se_header = "Basic Block Cycles",
688 .se_cmp = sort__cycles_cmp,
689 .se_snprintf = hist_entry__cycles_snprintf,
690 .se_width_idx = HISTC_CYCLES,
691 };
692
693 /* --sort daddr_sym */
694 static int64_t
695 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
696 {
697 uint64_t l = 0, r = 0;
698
699 if (left->mem_info)
700 l = left->mem_info->daddr.addr;
701 if (right->mem_info)
702 r = right->mem_info->daddr.addr;
703
704 return (int64_t)(r - l);
705 }
706
707 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
708 size_t size, unsigned int width)
709 {
710 uint64_t addr = 0;
711 struct map *map = NULL;
712 struct symbol *sym = NULL;
713
714 if (he->mem_info) {
715 addr = he->mem_info->daddr.addr;
716 map = he->mem_info->daddr.map;
717 sym = he->mem_info->daddr.sym;
718 }
719 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
720 width);
721 }
722
723 static int64_t
724 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
725 {
726 uint64_t l = 0, r = 0;
727
728 if (left->mem_info)
729 l = left->mem_info->iaddr.addr;
730 if (right->mem_info)
731 r = right->mem_info->iaddr.addr;
732
733 return (int64_t)(r - l);
734 }
735
736 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
737 size_t size, unsigned int width)
738 {
739 uint64_t addr = 0;
740 struct map *map = NULL;
741 struct symbol *sym = NULL;
742
743 if (he->mem_info) {
744 addr = he->mem_info->iaddr.addr;
745 map = he->mem_info->iaddr.map;
746 sym = he->mem_info->iaddr.sym;
747 }
748 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
749 width);
750 }
751
752 static int64_t
753 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
754 {
755 struct map *map_l = NULL;
756 struct map *map_r = NULL;
757
758 if (left->mem_info)
759 map_l = left->mem_info->daddr.map;
760 if (right->mem_info)
761 map_r = right->mem_info->daddr.map;
762
763 return _sort__dso_cmp(map_l, map_r);
764 }
765
766 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
767 size_t size, unsigned int width)
768 {
769 struct map *map = NULL;
770
771 if (he->mem_info)
772 map = he->mem_info->daddr.map;
773
774 return _hist_entry__dso_snprintf(map, bf, size, width);
775 }
776
777 static int64_t
778 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
779 {
780 union perf_mem_data_src data_src_l;
781 union perf_mem_data_src data_src_r;
782
783 if (left->mem_info)
784 data_src_l = left->mem_info->data_src;
785 else
786 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
787
788 if (right->mem_info)
789 data_src_r = right->mem_info->data_src;
790 else
791 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
792
793 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
794 }
795
796 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
797 size_t size, unsigned int width)
798 {
799 const char *out;
800 u64 mask = PERF_MEM_LOCK_NA;
801
802 if (he->mem_info)
803 mask = he->mem_info->data_src.mem_lock;
804
805 if (mask & PERF_MEM_LOCK_NA)
806 out = "N/A";
807 else if (mask & PERF_MEM_LOCK_LOCKED)
808 out = "Yes";
809 else
810 out = "No";
811
812 return repsep_snprintf(bf, size, "%-*s", width, out);
813 }
814
815 static int64_t
816 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
817 {
818 union perf_mem_data_src data_src_l;
819 union perf_mem_data_src data_src_r;
820
821 if (left->mem_info)
822 data_src_l = left->mem_info->data_src;
823 else
824 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
825
826 if (right->mem_info)
827 data_src_r = right->mem_info->data_src;
828 else
829 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
830
831 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
832 }
833
834 static const char * const tlb_access[] = {
835 "N/A",
836 "HIT",
837 "MISS",
838 "L1",
839 "L2",
840 "Walker",
841 "Fault",
842 };
843 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
844
845 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
846 size_t size, unsigned int width)
847 {
848 char out[64];
849 size_t sz = sizeof(out) - 1; /* -1 for null termination */
850 size_t l = 0, i;
851 u64 m = PERF_MEM_TLB_NA;
852 u64 hit, miss;
853
854 out[0] = '\0';
855
856 if (he->mem_info)
857 m = he->mem_info->data_src.mem_dtlb;
858
859 hit = m & PERF_MEM_TLB_HIT;
860 miss = m & PERF_MEM_TLB_MISS;
861
862 /* already taken care of */
863 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
864
865 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
866 if (!(m & 0x1))
867 continue;
868 if (l) {
869 strcat(out, " or ");
870 l += 4;
871 }
872 strncat(out, tlb_access[i], sz - l);
873 l += strlen(tlb_access[i]);
874 }
875 if (*out == '\0')
876 strcpy(out, "N/A");
877 if (hit)
878 strncat(out, " hit", sz - l);
879 if (miss)
880 strncat(out, " miss", sz - l);
881
882 return repsep_snprintf(bf, size, "%-*s", width, out);
883 }
884
885 static int64_t
886 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
887 {
888 union perf_mem_data_src data_src_l;
889 union perf_mem_data_src data_src_r;
890
891 if (left->mem_info)
892 data_src_l = left->mem_info->data_src;
893 else
894 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
895
896 if (right->mem_info)
897 data_src_r = right->mem_info->data_src;
898 else
899 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
900
901 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
902 }
903
904 static const char * const mem_lvl[] = {
905 "N/A",
906 "HIT",
907 "MISS",
908 "L1",
909 "LFB",
910 "L2",
911 "L3",
912 "Local RAM",
913 "Remote RAM (1 hop)",
914 "Remote RAM (2 hops)",
915 "Remote Cache (1 hop)",
916 "Remote Cache (2 hops)",
917 "I/O",
918 "Uncached",
919 };
920 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
921
922 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
923 size_t size, unsigned int width)
924 {
925 char out[64];
926 size_t sz = sizeof(out) - 1; /* -1 for null termination */
927 size_t i, l = 0;
928 u64 m = PERF_MEM_LVL_NA;
929 u64 hit, miss;
930
931 if (he->mem_info)
932 m = he->mem_info->data_src.mem_lvl;
933
934 out[0] = '\0';
935
936 hit = m & PERF_MEM_LVL_HIT;
937 miss = m & PERF_MEM_LVL_MISS;
938
939 /* already taken care of */
940 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
941
942 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
943 if (!(m & 0x1))
944 continue;
945 if (l) {
946 strcat(out, " or ");
947 l += 4;
948 }
949 strncat(out, mem_lvl[i], sz - l);
950 l += strlen(mem_lvl[i]);
951 }
952 if (*out == '\0')
953 strcpy(out, "N/A");
954 if (hit)
955 strncat(out, " hit", sz - l);
956 if (miss)
957 strncat(out, " miss", sz - l);
958
959 return repsep_snprintf(bf, size, "%-*s", width, out);
960 }
961
962 static int64_t
963 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
964 {
965 union perf_mem_data_src data_src_l;
966 union perf_mem_data_src data_src_r;
967
968 if (left->mem_info)
969 data_src_l = left->mem_info->data_src;
970 else
971 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
972
973 if (right->mem_info)
974 data_src_r = right->mem_info->data_src;
975 else
976 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
977
978 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
979 }
980
981 static const char * const snoop_access[] = {
982 "N/A",
983 "None",
984 "Miss",
985 "Hit",
986 "HitM",
987 };
988 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
989
990 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
991 size_t size, unsigned int width)
992 {
993 char out[64];
994 size_t sz = sizeof(out) - 1; /* -1 for null termination */
995 size_t i, l = 0;
996 u64 m = PERF_MEM_SNOOP_NA;
997
998 out[0] = '\0';
999
1000 if (he->mem_info)
1001 m = he->mem_info->data_src.mem_snoop;
1002
1003 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1004 if (!(m & 0x1))
1005 continue;
1006 if (l) {
1007 strcat(out, " or ");
1008 l += 4;
1009 }
1010 strncat(out, snoop_access[i], sz - l);
1011 l += strlen(snoop_access[i]);
1012 }
1013
1014 if (*out == '\0')
1015 strcpy(out, "N/A");
1016
1017 return repsep_snprintf(bf, size, "%-*s", width, out);
1018 }
1019
1020 static inline u64 cl_address(u64 address)
1021 {
1022 /* return the cacheline of the address */
1023 return (address & ~(cacheline_size - 1));
1024 }
1025
1026 static int64_t
1027 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1028 {
1029 u64 l, r;
1030 struct map *l_map, *r_map;
1031
1032 if (!left->mem_info) return -1;
1033 if (!right->mem_info) return 1;
1034
1035 /* group event types together */
1036 if (left->cpumode > right->cpumode) return -1;
1037 if (left->cpumode < right->cpumode) return 1;
1038
1039 l_map = left->mem_info->daddr.map;
1040 r_map = right->mem_info->daddr.map;
1041
1042 /* if both are NULL, jump to sort on al_addr instead */
1043 if (!l_map && !r_map)
1044 goto addr;
1045
1046 if (!l_map) return -1;
1047 if (!r_map) return 1;
1048
1049 if (l_map->maj > r_map->maj) return -1;
1050 if (l_map->maj < r_map->maj) return 1;
1051
1052 if (l_map->min > r_map->min) return -1;
1053 if (l_map->min < r_map->min) return 1;
1054
1055 if (l_map->ino > r_map->ino) return -1;
1056 if (l_map->ino < r_map->ino) return 1;
1057
1058 if (l_map->ino_generation > r_map->ino_generation) return -1;
1059 if (l_map->ino_generation < r_map->ino_generation) return 1;
1060
1061 /*
1062 * Addresses with no major/minor numbers are assumed to be
1063 * anonymous in userspace. Sort those on pid then address.
1064 *
1065 * The kernel and non-zero major/minor mapped areas are
1066 * assumed to be unity mapped. Sort those on address.
1067 */
1068
1069 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1070 (!(l_map->flags & MAP_SHARED)) &&
1071 !l_map->maj && !l_map->min && !l_map->ino &&
1072 !l_map->ino_generation) {
1073 /* userspace anonymous */
1074
1075 if (left->thread->pid_ > right->thread->pid_) return -1;
1076 if (left->thread->pid_ < right->thread->pid_) return 1;
1077 }
1078
1079 addr:
1080 /* al_addr does all the right addr - start + offset calculations */
1081 l = cl_address(left->mem_info->daddr.al_addr);
1082 r = cl_address(right->mem_info->daddr.al_addr);
1083
1084 if (l > r) return -1;
1085 if (l < r) return 1;
1086
1087 return 0;
1088 }
1089
1090 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1091 size_t size, unsigned int width)
1092 {
1093
1094 uint64_t addr = 0;
1095 struct map *map = NULL;
1096 struct symbol *sym = NULL;
1097 char level = he->level;
1098
1099 if (he->mem_info) {
1100 addr = cl_address(he->mem_info->daddr.al_addr);
1101 map = he->mem_info->daddr.map;
1102 sym = he->mem_info->daddr.sym;
1103
1104 /* print [s] for shared data mmaps */
1105 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1106 map && (map->type == MAP__VARIABLE) &&
1107 (map->flags & MAP_SHARED) &&
1108 (map->maj || map->min || map->ino ||
1109 map->ino_generation))
1110 level = 's';
1111 else if (!map)
1112 level = 'X';
1113 }
1114 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1115 width);
1116 }
1117
1118 struct sort_entry sort_mispredict = {
1119 .se_header = "Branch Mispredicted",
1120 .se_cmp = sort__mispredict_cmp,
1121 .se_snprintf = hist_entry__mispredict_snprintf,
1122 .se_width_idx = HISTC_MISPREDICT,
1123 };
1124
1125 static u64 he_weight(struct hist_entry *he)
1126 {
1127 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1128 }
1129
1130 static int64_t
1131 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1132 {
1133 return he_weight(left) - he_weight(right);
1134 }
1135
1136 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1137 size_t size, unsigned int width)
1138 {
1139 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
1140 }
1141
1142 struct sort_entry sort_local_weight = {
1143 .se_header = "Local Weight",
1144 .se_cmp = sort__local_weight_cmp,
1145 .se_snprintf = hist_entry__local_weight_snprintf,
1146 .se_width_idx = HISTC_LOCAL_WEIGHT,
1147 };
1148
1149 static int64_t
1150 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1151 {
1152 return left->stat.weight - right->stat.weight;
1153 }
1154
1155 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1156 size_t size, unsigned int width)
1157 {
1158 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
1159 }
1160
1161 struct sort_entry sort_global_weight = {
1162 .se_header = "Weight",
1163 .se_cmp = sort__global_weight_cmp,
1164 .se_snprintf = hist_entry__global_weight_snprintf,
1165 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1166 };
1167
1168 struct sort_entry sort_mem_daddr_sym = {
1169 .se_header = "Data Symbol",
1170 .se_cmp = sort__daddr_cmp,
1171 .se_snprintf = hist_entry__daddr_snprintf,
1172 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1173 };
1174
1175 struct sort_entry sort_mem_iaddr_sym = {
1176 .se_header = "Code Symbol",
1177 .se_cmp = sort__iaddr_cmp,
1178 .se_snprintf = hist_entry__iaddr_snprintf,
1179 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1180 };
1181
1182 struct sort_entry sort_mem_daddr_dso = {
1183 .se_header = "Data Object",
1184 .se_cmp = sort__dso_daddr_cmp,
1185 .se_snprintf = hist_entry__dso_daddr_snprintf,
1186 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1187 };
1188
1189 struct sort_entry sort_mem_locked = {
1190 .se_header = "Locked",
1191 .se_cmp = sort__locked_cmp,
1192 .se_snprintf = hist_entry__locked_snprintf,
1193 .se_width_idx = HISTC_MEM_LOCKED,
1194 };
1195
1196 struct sort_entry sort_mem_tlb = {
1197 .se_header = "TLB access",
1198 .se_cmp = sort__tlb_cmp,
1199 .se_snprintf = hist_entry__tlb_snprintf,
1200 .se_width_idx = HISTC_MEM_TLB,
1201 };
1202
1203 struct sort_entry sort_mem_lvl = {
1204 .se_header = "Memory access",
1205 .se_cmp = sort__lvl_cmp,
1206 .se_snprintf = hist_entry__lvl_snprintf,
1207 .se_width_idx = HISTC_MEM_LVL,
1208 };
1209
1210 struct sort_entry sort_mem_snoop = {
1211 .se_header = "Snoop",
1212 .se_cmp = sort__snoop_cmp,
1213 .se_snprintf = hist_entry__snoop_snprintf,
1214 .se_width_idx = HISTC_MEM_SNOOP,
1215 };
1216
1217 struct sort_entry sort_mem_dcacheline = {
1218 .se_header = "Data Cacheline",
1219 .se_cmp = sort__dcacheline_cmp,
1220 .se_snprintf = hist_entry__dcacheline_snprintf,
1221 .se_width_idx = HISTC_MEM_DCACHELINE,
1222 };
1223
1224 static int64_t
1225 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1226 {
1227 if (!left->branch_info || !right->branch_info)
1228 return cmp_null(left->branch_info, right->branch_info);
1229
1230 return left->branch_info->flags.abort !=
1231 right->branch_info->flags.abort;
1232 }
1233
1234 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1235 size_t size, unsigned int width)
1236 {
1237 static const char *out = "N/A";
1238
1239 if (he->branch_info) {
1240 if (he->branch_info->flags.abort)
1241 out = "A";
1242 else
1243 out = ".";
1244 }
1245
1246 return repsep_snprintf(bf, size, "%-*s", width, out);
1247 }
1248
1249 struct sort_entry sort_abort = {
1250 .se_header = "Transaction abort",
1251 .se_cmp = sort__abort_cmp,
1252 .se_snprintf = hist_entry__abort_snprintf,
1253 .se_width_idx = HISTC_ABORT,
1254 };
1255
1256 static int64_t
1257 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1258 {
1259 if (!left->branch_info || !right->branch_info)
1260 return cmp_null(left->branch_info, right->branch_info);
1261
1262 return left->branch_info->flags.in_tx !=
1263 right->branch_info->flags.in_tx;
1264 }
1265
1266 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1267 size_t size, unsigned int width)
1268 {
1269 static const char *out = "N/A";
1270
1271 if (he->branch_info) {
1272 if (he->branch_info->flags.in_tx)
1273 out = "T";
1274 else
1275 out = ".";
1276 }
1277
1278 return repsep_snprintf(bf, size, "%-*s", width, out);
1279 }
1280
1281 struct sort_entry sort_in_tx = {
1282 .se_header = "Branch in transaction",
1283 .se_cmp = sort__in_tx_cmp,
1284 .se_snprintf = hist_entry__in_tx_snprintf,
1285 .se_width_idx = HISTC_IN_TX,
1286 };
1287
1288 static int64_t
1289 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1290 {
1291 return left->transaction - right->transaction;
1292 }
1293
1294 static inline char *add_str(char *p, const char *str)
1295 {
1296 strcpy(p, str);
1297 return p + strlen(str);
1298 }
1299
1300 static struct txbit {
1301 unsigned flag;
1302 const char *name;
1303 int skip_for_len;
1304 } txbits[] = {
1305 { PERF_TXN_ELISION, "EL ", 0 },
1306 { PERF_TXN_TRANSACTION, "TX ", 1 },
1307 { PERF_TXN_SYNC, "SYNC ", 1 },
1308 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1309 { PERF_TXN_RETRY, "RETRY ", 0 },
1310 { PERF_TXN_CONFLICT, "CON ", 0 },
1311 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1312 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1313 { 0, NULL, 0 }
1314 };
1315
1316 int hist_entry__transaction_len(void)
1317 {
1318 int i;
1319 int len = 0;
1320
1321 for (i = 0; txbits[i].name; i++) {
1322 if (!txbits[i].skip_for_len)
1323 len += strlen(txbits[i].name);
1324 }
1325 len += 4; /* :XX<space> */
1326 return len;
1327 }
1328
1329 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1330 size_t size, unsigned int width)
1331 {
1332 u64 t = he->transaction;
1333 char buf[128];
1334 char *p = buf;
1335 int i;
1336
1337 buf[0] = 0;
1338 for (i = 0; txbits[i].name; i++)
1339 if (txbits[i].flag & t)
1340 p = add_str(p, txbits[i].name);
1341 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1342 p = add_str(p, "NEITHER ");
1343 if (t & PERF_TXN_ABORT_MASK) {
1344 sprintf(p, ":%" PRIx64,
1345 (t & PERF_TXN_ABORT_MASK) >>
1346 PERF_TXN_ABORT_SHIFT);
1347 p += strlen(p);
1348 }
1349
1350 return repsep_snprintf(bf, size, "%-*s", width, buf);
1351 }
1352
1353 struct sort_entry sort_transaction = {
1354 .se_header = "Transaction ",
1355 .se_cmp = sort__transaction_cmp,
1356 .se_snprintf = hist_entry__transaction_snprintf,
1357 .se_width_idx = HISTC_TRANSACTION,
1358 };
1359
1360 struct sort_dimension {
1361 const char *name;
1362 struct sort_entry *entry;
1363 int taken;
1364 };
1365
1366 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1367
1368 static struct sort_dimension common_sort_dimensions[] = {
1369 DIM(SORT_PID, "pid", sort_thread),
1370 DIM(SORT_COMM, "comm", sort_comm),
1371 DIM(SORT_DSO, "dso", sort_dso),
1372 DIM(SORT_SYM, "symbol", sort_sym),
1373 DIM(SORT_PARENT, "parent", sort_parent),
1374 DIM(SORT_CPU, "cpu", sort_cpu),
1375 DIM(SORT_SOCKET, "socket", sort_socket),
1376 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1377 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1378 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1379 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1380 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1381 DIM(SORT_TRACE, "trace", sort_trace),
1382 };
1383
1384 #undef DIM
1385
1386 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1387
1388 static struct sort_dimension bstack_sort_dimensions[] = {
1389 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1390 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1391 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1392 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1393 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1394 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1395 DIM(SORT_ABORT, "abort", sort_abort),
1396 DIM(SORT_CYCLES, "cycles", sort_cycles),
1397 };
1398
1399 #undef DIM
1400
1401 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1402
1403 static struct sort_dimension memory_sort_dimensions[] = {
1404 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1405 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1406 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1407 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1408 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1409 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1410 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1411 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1412 };
1413
1414 #undef DIM
1415
1416 struct hpp_dimension {
1417 const char *name;
1418 struct perf_hpp_fmt *fmt;
1419 int taken;
1420 };
1421
1422 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1423
1424 static struct hpp_dimension hpp_sort_dimensions[] = {
1425 DIM(PERF_HPP__OVERHEAD, "overhead"),
1426 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1427 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1428 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1429 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1430 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1431 DIM(PERF_HPP__SAMPLES, "sample"),
1432 DIM(PERF_HPP__PERIOD, "period"),
1433 };
1434
1435 #undef DIM
1436
1437 struct hpp_sort_entry {
1438 struct perf_hpp_fmt hpp;
1439 struct sort_entry *se;
1440 };
1441
1442 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1443 {
1444 struct hpp_sort_entry *hse_a;
1445 struct hpp_sort_entry *hse_b;
1446
1447 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1448 return false;
1449
1450 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1451 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1452
1453 return hse_a->se == hse_b->se;
1454 }
1455
1456 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1457 {
1458 struct hpp_sort_entry *hse;
1459
1460 if (!perf_hpp__is_sort_entry(fmt))
1461 return;
1462
1463 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1464 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1465 }
1466
1467 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1468 struct perf_evsel *evsel)
1469 {
1470 struct hpp_sort_entry *hse;
1471 size_t len = fmt->user_len;
1472
1473 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1474
1475 if (!len)
1476 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1477
1478 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1479 }
1480
1481 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1482 struct perf_hpp *hpp __maybe_unused,
1483 struct perf_evsel *evsel)
1484 {
1485 struct hpp_sort_entry *hse;
1486 size_t len = fmt->user_len;
1487
1488 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1489
1490 if (!len)
1491 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
1492
1493 return len;
1494 }
1495
1496 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1497 struct hist_entry *he)
1498 {
1499 struct hpp_sort_entry *hse;
1500 size_t len = fmt->user_len;
1501
1502 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1503
1504 if (!len)
1505 len = hists__col_len(he->hists, hse->se->se_width_idx);
1506
1507 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1508 }
1509
1510 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1511 struct hist_entry *a, struct hist_entry *b)
1512 {
1513 struct hpp_sort_entry *hse;
1514
1515 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1516 return hse->se->se_cmp(a, b);
1517 }
1518
1519 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1520 struct hist_entry *a, struct hist_entry *b)
1521 {
1522 struct hpp_sort_entry *hse;
1523 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1524
1525 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1526 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1527 return collapse_fn(a, b);
1528 }
1529
1530 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1531 struct hist_entry *a, struct hist_entry *b)
1532 {
1533 struct hpp_sort_entry *hse;
1534 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1535
1536 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1537 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1538 return sort_fn(a, b);
1539 }
1540
1541 static struct hpp_sort_entry *
1542 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
1543 {
1544 struct hpp_sort_entry *hse;
1545
1546 hse = malloc(sizeof(*hse));
1547 if (hse == NULL) {
1548 pr_err("Memory allocation failed\n");
1549 return NULL;
1550 }
1551
1552 hse->se = sd->entry;
1553 hse->hpp.name = sd->entry->se_header;
1554 hse->hpp.header = __sort__hpp_header;
1555 hse->hpp.width = __sort__hpp_width;
1556 hse->hpp.entry = __sort__hpp_entry;
1557 hse->hpp.color = NULL;
1558
1559 hse->hpp.cmp = __sort__hpp_cmp;
1560 hse->hpp.collapse = __sort__hpp_collapse;
1561 hse->hpp.sort = __sort__hpp_sort;
1562
1563 INIT_LIST_HEAD(&hse->hpp.list);
1564 INIT_LIST_HEAD(&hse->hpp.sort_list);
1565 hse->hpp.elide = false;
1566 hse->hpp.len = 0;
1567 hse->hpp.user_len = 0;
1568
1569 return hse;
1570 }
1571
1572 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1573 {
1574 return format->header == __sort__hpp_header;
1575 }
1576
1577 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1578 {
1579 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1580
1581 if (hse == NULL)
1582 return -1;
1583
1584 perf_hpp__register_sort_field(&hse->hpp);
1585 return 0;
1586 }
1587
1588 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1589 {
1590 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1591
1592 if (hse == NULL)
1593 return -1;
1594
1595 perf_hpp__column_register(&hse->hpp);
1596 return 0;
1597 }
1598
1599 struct hpp_dynamic_entry {
1600 struct perf_hpp_fmt hpp;
1601 struct perf_evsel *evsel;
1602 struct format_field *field;
1603 unsigned dynamic_len;
1604 bool raw_trace;
1605 };
1606
1607 static int hde_width(struct hpp_dynamic_entry *hde)
1608 {
1609 if (!hde->hpp.len) {
1610 int len = hde->dynamic_len;
1611 int namelen = strlen(hde->field->name);
1612 int fieldlen = hde->field->size;
1613
1614 if (namelen > len)
1615 len = namelen;
1616
1617 if (!(hde->field->flags & FIELD_IS_STRING)) {
1618 /* length for print hex numbers */
1619 fieldlen = hde->field->size * 2 + 2;
1620 }
1621 if (fieldlen > len)
1622 len = fieldlen;
1623
1624 hde->hpp.len = len;
1625 }
1626 return hde->hpp.len;
1627 }
1628
1629 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1630 struct hist_entry *he)
1631 {
1632 char *str, *pos;
1633 struct format_field *field = hde->field;
1634 size_t namelen;
1635 bool last = false;
1636
1637 if (hde->raw_trace)
1638 return;
1639
1640 /* parse pretty print result and update max length */
1641 if (!he->trace_output)
1642 he->trace_output = get_trace_output(he);
1643
1644 namelen = strlen(field->name);
1645 str = he->trace_output;
1646
1647 while (str) {
1648 pos = strchr(str, ' ');
1649 if (pos == NULL) {
1650 last = true;
1651 pos = str + strlen(str);
1652 }
1653
1654 if (!strncmp(str, field->name, namelen)) {
1655 size_t len;
1656
1657 str += namelen + 1;
1658 len = pos - str;
1659
1660 if (len > hde->dynamic_len)
1661 hde->dynamic_len = len;
1662 break;
1663 }
1664
1665 if (last)
1666 str = NULL;
1667 else
1668 str = pos + 1;
1669 }
1670 }
1671
1672 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1673 struct perf_evsel *evsel __maybe_unused)
1674 {
1675 struct hpp_dynamic_entry *hde;
1676 size_t len = fmt->user_len;
1677
1678 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1679
1680 if (!len)
1681 len = hde_width(hde);
1682
1683 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1684 }
1685
1686 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1687 struct perf_hpp *hpp __maybe_unused,
1688 struct perf_evsel *evsel __maybe_unused)
1689 {
1690 struct hpp_dynamic_entry *hde;
1691 size_t len = fmt->user_len;
1692
1693 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1694
1695 if (!len)
1696 len = hde_width(hde);
1697
1698 return len;
1699 }
1700
1701 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1702 {
1703 struct hpp_dynamic_entry *hde;
1704
1705 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1706
1707 return hists_to_evsel(hists) == hde->evsel;
1708 }
1709
1710 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1711 struct hist_entry *he)
1712 {
1713 struct hpp_dynamic_entry *hde;
1714 size_t len = fmt->user_len;
1715 char *str, *pos;
1716 struct format_field *field;
1717 size_t namelen;
1718 bool last = false;
1719 int ret;
1720
1721 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1722
1723 if (!len)
1724 len = hde_width(hde);
1725
1726 if (hde->raw_trace)
1727 goto raw_field;
1728
1729 field = hde->field;
1730 namelen = strlen(field->name);
1731 str = he->trace_output;
1732
1733 while (str) {
1734 pos = strchr(str, ' ');
1735 if (pos == NULL) {
1736 last = true;
1737 pos = str + strlen(str);
1738 }
1739
1740 if (!strncmp(str, field->name, namelen)) {
1741 str += namelen + 1;
1742 str = strndup(str, pos - str);
1743
1744 if (str == NULL)
1745 return scnprintf(hpp->buf, hpp->size,
1746 "%*.*s", len, len, "ERROR");
1747 break;
1748 }
1749
1750 if (last)
1751 str = NULL;
1752 else
1753 str = pos + 1;
1754 }
1755
1756 if (str == NULL) {
1757 struct trace_seq seq;
1758 raw_field:
1759 trace_seq_init(&seq);
1760 pevent_print_field(&seq, he->raw_data, hde->field);
1761 str = seq.buffer;
1762 }
1763
1764 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1765 free(str);
1766 return ret;
1767 }
1768
1769 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1770 struct hist_entry *a, struct hist_entry *b)
1771 {
1772 struct hpp_dynamic_entry *hde;
1773 struct format_field *field;
1774 unsigned offset, size;
1775
1776 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1777
1778 field = hde->field;
1779 if (field->flags & FIELD_IS_DYNAMIC) {
1780 unsigned long long dyn;
1781
1782 pevent_read_number_field(field, a->raw_data, &dyn);
1783 offset = dyn & 0xffff;
1784 size = (dyn >> 16) & 0xffff;
1785
1786 /* record max width for output */
1787 if (size > hde->dynamic_len)
1788 hde->dynamic_len = size;
1789 } else {
1790 offset = field->offset;
1791 size = field->size;
1792
1793 update_dynamic_len(hde, a);
1794 update_dynamic_len(hde, b);
1795 }
1796
1797 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1798 }
1799
1800 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1801 {
1802 return fmt->cmp == __sort__hde_cmp;
1803 }
1804
1805 static struct hpp_dynamic_entry *
1806 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1807 {
1808 struct hpp_dynamic_entry *hde;
1809
1810 hde = malloc(sizeof(*hde));
1811 if (hde == NULL) {
1812 pr_debug("Memory allocation failed\n");
1813 return NULL;
1814 }
1815
1816 hde->evsel = evsel;
1817 hde->field = field;
1818 hde->dynamic_len = 0;
1819
1820 hde->hpp.name = field->name;
1821 hde->hpp.header = __sort__hde_header;
1822 hde->hpp.width = __sort__hde_width;
1823 hde->hpp.entry = __sort__hde_entry;
1824 hde->hpp.color = NULL;
1825
1826 hde->hpp.cmp = __sort__hde_cmp;
1827 hde->hpp.collapse = __sort__hde_cmp;
1828 hde->hpp.sort = __sort__hde_cmp;
1829
1830 INIT_LIST_HEAD(&hde->hpp.list);
1831 INIT_LIST_HEAD(&hde->hpp.sort_list);
1832 hde->hpp.elide = false;
1833 hde->hpp.len = 0;
1834 hde->hpp.user_len = 0;
1835
1836 return hde;
1837 }
1838
1839 static int parse_field_name(char *str, char **event, char **field, char **opt)
1840 {
1841 char *event_name, *field_name, *opt_name;
1842
1843 event_name = str;
1844 field_name = strchr(str, '.');
1845
1846 if (field_name) {
1847 *field_name++ = '\0';
1848 } else {
1849 event_name = NULL;
1850 field_name = str;
1851 }
1852
1853 opt_name = strchr(field_name, '/');
1854 if (opt_name)
1855 *opt_name++ = '\0';
1856
1857 *event = event_name;
1858 *field = field_name;
1859 *opt = opt_name;
1860
1861 return 0;
1862 }
1863
1864 /* find match evsel using a given event name. The event name can be:
1865 * 1. NULL - only valid for single event session
1866 * 2. '%' + event index (e.g. '%1' for first event)
1867 * 3. full event name (e.g. sched:sched_switch)
1868 * 4. partial event name (should not contain ':')
1869 */
1870 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1871 {
1872 struct perf_evsel *evsel = NULL;
1873 struct perf_evsel *pos;
1874 bool full_name;
1875
1876 /* case 1 */
1877 if (event_name == NULL) {
1878 if (evlist->nr_entries != 1) {
1879 pr_debug("event name should be given\n");
1880 return NULL;
1881 }
1882
1883 return perf_evlist__first(evlist);
1884 }
1885
1886 /* case 2 */
1887 if (event_name[0] == '%') {
1888 int nr = strtol(event_name+1, NULL, 0);
1889
1890 if (nr > evlist->nr_entries)
1891 return NULL;
1892
1893 evsel = perf_evlist__first(evlist);
1894 while (--nr > 0)
1895 evsel = perf_evsel__next(evsel);
1896
1897 return evsel;
1898 }
1899
1900 full_name = !!strchr(event_name, ':');
1901 evlist__for_each(evlist, pos) {
1902 /* case 3 */
1903 if (full_name && !strcmp(pos->name, event_name))
1904 return pos;
1905 /* case 4 */
1906 if (!full_name && strstr(pos->name, event_name)) {
1907 if (evsel) {
1908 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1909 event_name, evsel->name, pos->name);
1910 return NULL;
1911 }
1912 evsel = pos;
1913 }
1914 }
1915
1916 return evsel;
1917 }
1918
1919 static int __dynamic_dimension__add(struct perf_evsel *evsel,
1920 struct format_field *field,
1921 bool raw_trace)
1922 {
1923 struct hpp_dynamic_entry *hde;
1924
1925 hde = __alloc_dynamic_entry(evsel, field);
1926 if (hde == NULL)
1927 return -ENOMEM;
1928
1929 hde->raw_trace = raw_trace;
1930
1931 perf_hpp__register_sort_field(&hde->hpp);
1932 return 0;
1933 }
1934
1935 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1936 {
1937 int ret;
1938 struct format_field *field;
1939
1940 field = evsel->tp_format->format.fields;
1941 while (field) {
1942 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1943 if (ret < 0)
1944 return ret;
1945
1946 field = field->next;
1947 }
1948 return 0;
1949 }
1950
1951 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1952 {
1953 int ret;
1954 struct perf_evsel *evsel;
1955
1956 evlist__for_each(evlist, evsel) {
1957 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1958 continue;
1959
1960 ret = add_evsel_fields(evsel, raw_trace);
1961 if (ret < 0)
1962 return ret;
1963 }
1964 return 0;
1965 }
1966
1967 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
1968 {
1969 char *str, *event_name, *field_name, *opt_name;
1970 struct perf_evsel *evsel;
1971 struct format_field *field;
1972 bool raw_trace = symbol_conf.raw_trace;
1973 int ret = 0;
1974
1975 if (evlist == NULL)
1976 return -ENOENT;
1977
1978 str = strdup(tok);
1979 if (str == NULL)
1980 return -ENOMEM;
1981
1982 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
1983 ret = -EINVAL;
1984 goto out;
1985 }
1986
1987 if (opt_name) {
1988 if (strcmp(opt_name, "raw")) {
1989 pr_debug("unsupported field option %s\n", opt_name);
1990 ret = -EINVAL;
1991 goto out;
1992 }
1993 raw_trace = true;
1994 }
1995
1996 if (!strcmp(field_name, "trace_fields")) {
1997 ret = add_all_dynamic_fields(evlist, raw_trace);
1998 goto out;
1999 }
2000
2001 evsel = find_evsel(evlist, event_name);
2002 if (evsel == NULL) {
2003 pr_debug("Cannot find event: %s\n", event_name);
2004 ret = -ENOENT;
2005 goto out;
2006 }
2007
2008 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2009 pr_debug("%s is not a tracepoint event\n", event_name);
2010 ret = -EINVAL;
2011 goto out;
2012 }
2013
2014 if (!strcmp(field_name, "*")) {
2015 ret = add_evsel_fields(evsel, raw_trace);
2016 } else {
2017 field = pevent_find_any_field(evsel->tp_format, field_name);
2018 if (field == NULL) {
2019 pr_debug("Cannot find event field for %s.%s\n",
2020 event_name, field_name);
2021 return -ENOENT;
2022 }
2023
2024 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2025 }
2026
2027 out:
2028 free(str);
2029 return ret;
2030 }
2031
2032 static int __sort_dimension__add(struct sort_dimension *sd)
2033 {
2034 if (sd->taken)
2035 return 0;
2036
2037 if (__sort_dimension__add_hpp_sort(sd) < 0)
2038 return -1;
2039
2040 if (sd->entry->se_collapse)
2041 sort__need_collapse = 1;
2042
2043 sd->taken = 1;
2044
2045 return 0;
2046 }
2047
2048 static int __hpp_dimension__add(struct hpp_dimension *hd)
2049 {
2050 if (!hd->taken) {
2051 hd->taken = 1;
2052
2053 perf_hpp__register_sort_field(hd->fmt);
2054 }
2055 return 0;
2056 }
2057
2058 static int __sort_dimension__add_output(struct sort_dimension *sd)
2059 {
2060 if (sd->taken)
2061 return 0;
2062
2063 if (__sort_dimension__add_hpp_output(sd) < 0)
2064 return -1;
2065
2066 sd->taken = 1;
2067 return 0;
2068 }
2069
2070 static int __hpp_dimension__add_output(struct hpp_dimension *hd)
2071 {
2072 if (!hd->taken) {
2073 hd->taken = 1;
2074
2075 perf_hpp__column_register(hd->fmt);
2076 }
2077 return 0;
2078 }
2079
2080 int hpp_dimension__add_output(unsigned col)
2081 {
2082 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2083 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
2084 }
2085
2086 static int sort_dimension__add(const char *tok,
2087 struct perf_evlist *evlist __maybe_unused)
2088 {
2089 unsigned int i;
2090
2091 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2092 struct sort_dimension *sd = &common_sort_dimensions[i];
2093
2094 if (strncasecmp(tok, sd->name, strlen(tok)))
2095 continue;
2096
2097 if (sd->entry == &sort_parent) {
2098 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2099 if (ret) {
2100 char err[BUFSIZ];
2101
2102 regerror(ret, &parent_regex, err, sizeof(err));
2103 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2104 return -EINVAL;
2105 }
2106 sort__has_parent = 1;
2107 } else if (sd->entry == &sort_sym) {
2108 sort__has_sym = 1;
2109 /*
2110 * perf diff displays the performance difference amongst
2111 * two or more perf.data files. Those files could come
2112 * from different binaries. So we should not compare
2113 * their ips, but the name of symbol.
2114 */
2115 if (sort__mode == SORT_MODE__DIFF)
2116 sd->entry->se_collapse = sort__sym_sort;
2117
2118 } else if (sd->entry == &sort_dso) {
2119 sort__has_dso = 1;
2120 } else if (sd->entry == &sort_socket) {
2121 sort__has_socket = 1;
2122 }
2123
2124 return __sort_dimension__add(sd);
2125 }
2126
2127 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2128 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2129
2130 if (strncasecmp(tok, hd->name, strlen(tok)))
2131 continue;
2132
2133 return __hpp_dimension__add(hd);
2134 }
2135
2136 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2137 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2138
2139 if (strncasecmp(tok, sd->name, strlen(tok)))
2140 continue;
2141
2142 if (sort__mode != SORT_MODE__BRANCH)
2143 return -EINVAL;
2144
2145 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2146 sort__has_sym = 1;
2147
2148 __sort_dimension__add(sd);
2149 return 0;
2150 }
2151
2152 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2153 struct sort_dimension *sd = &memory_sort_dimensions[i];
2154
2155 if (strncasecmp(tok, sd->name, strlen(tok)))
2156 continue;
2157
2158 if (sort__mode != SORT_MODE__MEMORY)
2159 return -EINVAL;
2160
2161 if (sd->entry == &sort_mem_daddr_sym)
2162 sort__has_sym = 1;
2163
2164 __sort_dimension__add(sd);
2165 return 0;
2166 }
2167
2168 if (!add_dynamic_entry(evlist, tok))
2169 return 0;
2170
2171 return -ESRCH;
2172 }
2173
2174 static const char *get_default_sort_order(void)
2175 {
2176 const char *default_sort_orders[] = {
2177 default_sort_order,
2178 default_branch_sort_order,
2179 default_mem_sort_order,
2180 default_top_sort_order,
2181 default_diff_sort_order,
2182 };
2183
2184 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2185
2186 return default_sort_orders[sort__mode];
2187 }
2188
2189 static int setup_sort_order(void)
2190 {
2191 char *new_sort_order;
2192
2193 /*
2194 * Append '+'-prefixed sort order to the default sort
2195 * order string.
2196 */
2197 if (!sort_order || is_strict_order(sort_order))
2198 return 0;
2199
2200 if (sort_order[1] == '\0') {
2201 error("Invalid --sort key: `+'");
2202 return -EINVAL;
2203 }
2204
2205 /*
2206 * We allocate new sort_order string, but we never free it,
2207 * because it's checked over the rest of the code.
2208 */
2209 if (asprintf(&new_sort_order, "%s,%s",
2210 get_default_sort_order(), sort_order + 1) < 0) {
2211 error("Not enough memory to set up --sort");
2212 return -ENOMEM;
2213 }
2214
2215 sort_order = new_sort_order;
2216 return 0;
2217 }
2218
2219 static int __setup_sorting(struct perf_evlist *evlist)
2220 {
2221 char *tmp, *tok, *str;
2222 const char *sort_keys;
2223 int ret = 0;
2224
2225 ret = setup_sort_order();
2226 if (ret)
2227 return ret;
2228
2229 sort_keys = sort_order;
2230 if (sort_keys == NULL) {
2231 if (is_strict_order(field_order)) {
2232 /*
2233 * If user specified field order but no sort order,
2234 * we'll honor it and not add default sort orders.
2235 */
2236 return 0;
2237 }
2238
2239 sort_keys = get_default_sort_order();
2240 }
2241
2242 str = strdup(sort_keys);
2243 if (str == NULL) {
2244 error("Not enough memory to setup sort keys");
2245 return -ENOMEM;
2246 }
2247
2248 for (tok = strtok_r(str, ", ", &tmp);
2249 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2250 ret = sort_dimension__add(tok, evlist);
2251 if (ret == -EINVAL) {
2252 error("Invalid --sort key: `%s'", tok);
2253 break;
2254 } else if (ret == -ESRCH) {
2255 error("Unknown --sort key: `%s'", tok);
2256 break;
2257 }
2258 }
2259
2260 free(str);
2261 return ret;
2262 }
2263
2264 void perf_hpp__set_elide(int idx, bool elide)
2265 {
2266 struct perf_hpp_fmt *fmt;
2267 struct hpp_sort_entry *hse;
2268
2269 perf_hpp__for_each_format(fmt) {
2270 if (!perf_hpp__is_sort_entry(fmt))
2271 continue;
2272
2273 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2274 if (hse->se->se_width_idx == idx) {
2275 fmt->elide = elide;
2276 break;
2277 }
2278 }
2279 }
2280
2281 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
2282 {
2283 if (list && strlist__nr_entries(list) == 1) {
2284 if (fp != NULL)
2285 fprintf(fp, "# %s: %s\n", list_name,
2286 strlist__entry(list, 0)->s);
2287 return true;
2288 }
2289 return false;
2290 }
2291
2292 static bool get_elide(int idx, FILE *output)
2293 {
2294 switch (idx) {
2295 case HISTC_SYMBOL:
2296 return __get_elide(symbol_conf.sym_list, "symbol", output);
2297 case HISTC_DSO:
2298 return __get_elide(symbol_conf.dso_list, "dso", output);
2299 case HISTC_COMM:
2300 return __get_elide(symbol_conf.comm_list, "comm", output);
2301 default:
2302 break;
2303 }
2304
2305 if (sort__mode != SORT_MODE__BRANCH)
2306 return false;
2307
2308 switch (idx) {
2309 case HISTC_SYMBOL_FROM:
2310 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2311 case HISTC_SYMBOL_TO:
2312 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2313 case HISTC_DSO_FROM:
2314 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2315 case HISTC_DSO_TO:
2316 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2317 default:
2318 break;
2319 }
2320
2321 return false;
2322 }
2323
2324 void sort__setup_elide(FILE *output)
2325 {
2326 struct perf_hpp_fmt *fmt;
2327 struct hpp_sort_entry *hse;
2328
2329 perf_hpp__for_each_format(fmt) {
2330 if (!perf_hpp__is_sort_entry(fmt))
2331 continue;
2332
2333 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2334 fmt->elide = get_elide(hse->se->se_width_idx, output);
2335 }
2336
2337 /*
2338 * It makes no sense to elide all of sort entries.
2339 * Just revert them to show up again.
2340 */
2341 perf_hpp__for_each_format(fmt) {
2342 if (!perf_hpp__is_sort_entry(fmt))
2343 continue;
2344
2345 if (!fmt->elide)
2346 return;
2347 }
2348
2349 perf_hpp__for_each_format(fmt) {
2350 if (!perf_hpp__is_sort_entry(fmt))
2351 continue;
2352
2353 fmt->elide = false;
2354 }
2355 }
2356
2357 static int output_field_add(char *tok)
2358 {
2359 unsigned int i;
2360
2361 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2362 struct sort_dimension *sd = &common_sort_dimensions[i];
2363
2364 if (strncasecmp(tok, sd->name, strlen(tok)))
2365 continue;
2366
2367 return __sort_dimension__add_output(sd);
2368 }
2369
2370 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2371 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2372
2373 if (strncasecmp(tok, hd->name, strlen(tok)))
2374 continue;
2375
2376 return __hpp_dimension__add_output(hd);
2377 }
2378
2379 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2380 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2381
2382 if (strncasecmp(tok, sd->name, strlen(tok)))
2383 continue;
2384
2385 return __sort_dimension__add_output(sd);
2386 }
2387
2388 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2389 struct sort_dimension *sd = &memory_sort_dimensions[i];
2390
2391 if (strncasecmp(tok, sd->name, strlen(tok)))
2392 continue;
2393
2394 return __sort_dimension__add_output(sd);
2395 }
2396
2397 return -ESRCH;
2398 }
2399
2400 static void reset_dimensions(void)
2401 {
2402 unsigned int i;
2403
2404 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2405 common_sort_dimensions[i].taken = 0;
2406
2407 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2408 hpp_sort_dimensions[i].taken = 0;
2409
2410 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2411 bstack_sort_dimensions[i].taken = 0;
2412
2413 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2414 memory_sort_dimensions[i].taken = 0;
2415 }
2416
2417 bool is_strict_order(const char *order)
2418 {
2419 return order && (*order != '+');
2420 }
2421
2422 static int __setup_output_field(void)
2423 {
2424 char *tmp, *tok, *str, *strp;
2425 int ret = -EINVAL;
2426
2427 if (field_order == NULL)
2428 return 0;
2429
2430 strp = str = strdup(field_order);
2431 if (str == NULL) {
2432 error("Not enough memory to setup output fields");
2433 return -ENOMEM;
2434 }
2435
2436 if (!is_strict_order(field_order))
2437 strp++;
2438
2439 if (!strlen(strp)) {
2440 error("Invalid --fields key: `+'");
2441 goto out;
2442 }
2443
2444 for (tok = strtok_r(strp, ", ", &tmp);
2445 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2446 ret = output_field_add(tok);
2447 if (ret == -EINVAL) {
2448 error("Invalid --fields key: `%s'", tok);
2449 break;
2450 } else if (ret == -ESRCH) {
2451 error("Unknown --fields key: `%s'", tok);
2452 break;
2453 }
2454 }
2455
2456 out:
2457 free(str);
2458 return ret;
2459 }
2460
2461 int setup_sorting(struct perf_evlist *evlist)
2462 {
2463 int err;
2464
2465 err = __setup_sorting(evlist);
2466 if (err < 0)
2467 return err;
2468
2469 if (parent_pattern != default_parent_pattern) {
2470 err = sort_dimension__add("parent", evlist);
2471 if (err < 0)
2472 return err;
2473 }
2474
2475 reset_dimensions();
2476
2477 /*
2478 * perf diff doesn't use default hpp output fields.
2479 */
2480 if (sort__mode != SORT_MODE__DIFF)
2481 perf_hpp__init();
2482
2483 err = __setup_output_field();
2484 if (err < 0)
2485 return err;
2486
2487 /* copy sort keys to output fields */
2488 perf_hpp__setup_output_field();
2489 /* and then copy output fields to sort keys */
2490 perf_hpp__append_sort_keys();
2491
2492 return 0;
2493 }
2494
2495 void reset_output_field(void)
2496 {
2497 sort__need_collapse = 0;
2498 sort__has_parent = 0;
2499 sort__has_sym = 0;
2500 sort__has_dso = 0;
2501
2502 field_order = NULL;
2503 sort_order = NULL;
2504
2505 reset_dimensions();
2506 perf_hpp__reset_output_field();
2507 }