]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/machine.c
perf tools: Fix annotation with kcore
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
CommitLineData
3f067dca 1#include "callchain.h"
b0a7d1a0
ACM
2#include "debug.h"
3#include "event.h"
3f067dca
ACM
4#include "evsel.h"
5#include "hist.h"
9d2f8e22
ACM
6#include "machine.h"
7#include "map.h"
3f067dca 8#include "sort.h"
69d2591a 9#include "strlist.h"
9d2f8e22 10#include "thread.h"
d027b640 11#include "vdso.h"
9d2f8e22 12#include <stdbool.h>
c506c96b 13#include <symbol/kallsyms.h>
3f067dca 14#include "unwind.h"
9d2f8e22 15
e167f995
ACM
16static void dsos__init(struct dsos *dsos)
17{
18 INIT_LIST_HEAD(&dsos->head);
19 dsos->root = RB_ROOT;
20}
21
69d2591a
ACM
22int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
23{
11246c70 24 map_groups__init(&machine->kmaps, machine);
69d2591a 25 RB_CLEAR_NODE(&machine->rb_node);
e167f995
ACM
26 dsos__init(&machine->user_dsos);
27 dsos__init(&machine->kernel_dsos);
69d2591a
ACM
28
29 machine->threads = RB_ROOT;
30 INIT_LIST_HEAD(&machine->dead_threads);
31 machine->last_match = NULL;
32
d027b640
AH
33 machine->vdso_info = NULL;
34
69d2591a
ACM
35 machine->pid = pid;
36
611a5ce8 37 machine->symbol_filter = NULL;
14bd6d20 38 machine->id_hdr_size = 0;
cfe1c414 39 machine->comm_exec = false;
fbe2af45 40 machine->kernel_start = 0;
611a5ce8 41
69d2591a
ACM
42 machine->root_dir = strdup(root_dir);
43 if (machine->root_dir == NULL)
44 return -ENOMEM;
45
46 if (pid != HOST_KERNEL_ID) {
1fcb8768 47 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 48 pid);
69d2591a
ACM
49 char comm[64];
50
51 if (thread == NULL)
52 return -ENOMEM;
53
54 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 55 thread__set_comm(thread, comm, 0);
69d2591a
ACM
56 }
57
b9d266ba
AH
58 machine->current_tid = NULL;
59
69d2591a
ACM
60 return 0;
61}
62
8fb598e5
DA
63struct machine *machine__new_host(void)
64{
65 struct machine *machine = malloc(sizeof(*machine));
66
67 if (machine != NULL) {
68 machine__init(machine, "", HOST_KERNEL_ID);
69
70 if (machine__create_kernel_maps(machine) < 0)
71 goto out_delete;
72 }
73
74 return machine;
75out_delete:
76 free(machine);
77 return NULL;
78}
79
8fa7d87f 80static void dsos__delete(struct dsos *dsos)
69d2591a
ACM
81{
82 struct dso *pos, *n;
83
8fa7d87f 84 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 85 RB_CLEAR_NODE(&pos->rb_node);
69d2591a
ACM
86 list_del(&pos->node);
87 dso__delete(pos);
88 }
89}
90
3f067dca
ACM
91void machine__delete_dead_threads(struct machine *machine)
92{
93 struct thread *n, *t;
94
95 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
96 list_del(&t->node);
97 thread__delete(t);
98 }
99}
100
101void machine__delete_threads(struct machine *machine)
102{
103 struct rb_node *nd = rb_first(&machine->threads);
104
105 while (nd) {
106 struct thread *t = rb_entry(nd, struct thread, rb_node);
107
108 rb_erase(&t->rb_node, &machine->threads);
109 nd = rb_next(nd);
110 thread__delete(t);
111 }
112}
113
69d2591a
ACM
114void machine__exit(struct machine *machine)
115{
116 map_groups__exit(&machine->kmaps);
117 dsos__delete(&machine->user_dsos);
118 dsos__delete(&machine->kernel_dsos);
d027b640 119 vdso__exit(machine);
04662523 120 zfree(&machine->root_dir);
b9d266ba 121 zfree(&machine->current_tid);
69d2591a
ACM
122}
123
124void machine__delete(struct machine *machine)
125{
126 machine__exit(machine);
127 free(machine);
128}
129
876650e6
ACM
130void machines__init(struct machines *machines)
131{
132 machine__init(&machines->host, "", HOST_KERNEL_ID);
133 machines->guests = RB_ROOT;
611a5ce8 134 machines->symbol_filter = NULL;
876650e6
ACM
135}
136
137void machines__exit(struct machines *machines)
138{
139 machine__exit(&machines->host);
140 /* XXX exit guest */
141}
142
143struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
144 const char *root_dir)
145{
876650e6 146 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
147 struct rb_node *parent = NULL;
148 struct machine *pos, *machine = malloc(sizeof(*machine));
149
150 if (machine == NULL)
151 return NULL;
152
153 if (machine__init(machine, root_dir, pid) != 0) {
154 free(machine);
155 return NULL;
156 }
157
611a5ce8
AH
158 machine->symbol_filter = machines->symbol_filter;
159
69d2591a
ACM
160 while (*p != NULL) {
161 parent = *p;
162 pos = rb_entry(parent, struct machine, rb_node);
163 if (pid < pos->pid)
164 p = &(*p)->rb_left;
165 else
166 p = &(*p)->rb_right;
167 }
168
169 rb_link_node(&machine->rb_node, parent, p);
876650e6 170 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
171
172 return machine;
173}
174
611a5ce8
AH
175void machines__set_symbol_filter(struct machines *machines,
176 symbol_filter_t symbol_filter)
177{
178 struct rb_node *nd;
179
180 machines->symbol_filter = symbol_filter;
181 machines->host.symbol_filter = symbol_filter;
182
183 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
184 struct machine *machine = rb_entry(nd, struct machine, rb_node);
185
186 machine->symbol_filter = symbol_filter;
187 }
188}
189
cfe1c414
AH
190void machines__set_comm_exec(struct machines *machines, bool comm_exec)
191{
192 struct rb_node *nd;
193
194 machines->host.comm_exec = comm_exec;
195
196 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
197 struct machine *machine = rb_entry(nd, struct machine, rb_node);
198
199 machine->comm_exec = comm_exec;
200 }
201}
202
876650e6 203struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 204{
876650e6 205 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
206 struct rb_node *parent = NULL;
207 struct machine *machine;
208 struct machine *default_machine = NULL;
209
876650e6
ACM
210 if (pid == HOST_KERNEL_ID)
211 return &machines->host;
212
69d2591a
ACM
213 while (*p != NULL) {
214 parent = *p;
215 machine = rb_entry(parent, struct machine, rb_node);
216 if (pid < machine->pid)
217 p = &(*p)->rb_left;
218 else if (pid > machine->pid)
219 p = &(*p)->rb_right;
220 else
221 return machine;
222 if (!machine->pid)
223 default_machine = machine;
224 }
225
226 return default_machine;
227}
228
876650e6 229struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
230{
231 char path[PATH_MAX];
232 const char *root_dir = "";
233 struct machine *machine = machines__find(machines, pid);
234
235 if (machine && (machine->pid == pid))
236 goto out;
237
238 if ((pid != HOST_KERNEL_ID) &&
239 (pid != DEFAULT_GUEST_KERNEL_ID) &&
240 (symbol_conf.guestmount)) {
241 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
242 if (access(path, R_OK)) {
243 static struct strlist *seen;
244
245 if (!seen)
246 seen = strlist__new(true, NULL);
247
248 if (!strlist__has_entry(seen, path)) {
249 pr_err("Can't access file %s\n", path);
250 strlist__add(seen, path);
251 }
252 machine = NULL;
253 goto out;
254 }
255 root_dir = path;
256 }
257
258 machine = machines__add(machines, pid, root_dir);
259out:
260 return machine;
261}
262
876650e6
ACM
263void machines__process_guests(struct machines *machines,
264 machine__process_t process, void *data)
69d2591a
ACM
265{
266 struct rb_node *nd;
267
876650e6 268 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
269 struct machine *pos = rb_entry(nd, struct machine, rb_node);
270 process(pos, data);
271 }
272}
273
274char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
275{
276 if (machine__is_host(machine))
277 snprintf(bf, size, "[%s]", "kernel.kallsyms");
278 else if (machine__is_default_guest(machine))
279 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
280 else {
281 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
282 machine->pid);
283 }
284
285 return bf;
286}
287
876650e6 288void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
289{
290 struct rb_node *node;
291 struct machine *machine;
292
876650e6
ACM
293 machines->host.id_hdr_size = id_hdr_size;
294
295 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
296 machine = rb_entry(node, struct machine, rb_node);
297 machine->id_hdr_size = id_hdr_size;
298 }
299
300 return;
301}
302
29ce3612
AH
303static void machine__update_thread_pid(struct machine *machine,
304 struct thread *th, pid_t pid)
305{
306 struct thread *leader;
307
308 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
309 return;
310
311 th->pid_ = pid;
312
313 if (th->pid_ == th->tid)
314 return;
315
316 leader = machine__findnew_thread(machine, th->pid_, th->pid_);
317 if (!leader)
318 goto out_err;
319
320 if (!leader->mg)
11246c70 321 leader->mg = map_groups__new(machine);
29ce3612
AH
322
323 if (!leader->mg)
324 goto out_err;
325
326 if (th->mg == leader->mg)
327 return;
328
329 if (th->mg) {
330 /*
331 * Maps are created from MMAP events which provide the pid and
332 * tid. Consequently there never should be any maps on a thread
333 * with an unknown pid. Just print an error if there are.
334 */
335 if (!map_groups__empty(th->mg))
336 pr_err("Discarding thread maps for %d:%d\n",
337 th->pid_, th->tid);
338 map_groups__delete(th->mg);
339 }
340
341 th->mg = map_groups__get(leader->mg);
342
343 return;
344
345out_err:
346 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
347}
348
99d725fc
AH
349static struct thread *__machine__findnew_thread(struct machine *machine,
350 pid_t pid, pid_t tid,
9d2f8e22
ACM
351 bool create)
352{
353 struct rb_node **p = &machine->threads.rb_node;
354 struct rb_node *parent = NULL;
355 struct thread *th;
356
357 /*
38051234 358 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
359 * so most of the time we dont have to look up
360 * the full rbtree:
361 */
29ce3612
AH
362 th = machine->last_match;
363 if (th && th->tid == tid) {
364 machine__update_thread_pid(machine, th, pid);
365 return th;
99d725fc 366 }
9d2f8e22
ACM
367
368 while (*p != NULL) {
369 parent = *p;
370 th = rb_entry(parent, struct thread, rb_node);
371
38051234 372 if (th->tid == tid) {
9d2f8e22 373 machine->last_match = th;
29ce3612 374 machine__update_thread_pid(machine, th, pid);
9d2f8e22
ACM
375 return th;
376 }
377
38051234 378 if (tid < th->tid)
9d2f8e22
ACM
379 p = &(*p)->rb_left;
380 else
381 p = &(*p)->rb_right;
382 }
383
384 if (!create)
385 return NULL;
386
99d725fc 387 th = thread__new(pid, tid);
9d2f8e22
ACM
388 if (th != NULL) {
389 rb_link_node(&th->rb_node, parent, p);
390 rb_insert_color(&th->rb_node, &machine->threads);
391 machine->last_match = th;
cddcef60
JO
392
393 /*
394 * We have to initialize map_groups separately
395 * after rb tree is updated.
396 *
397 * The reason is that we call machine__findnew_thread
398 * within thread__init_map_groups to find the thread
399 * leader and that would screwed the rb tree.
400 */
418029b7
AH
401 if (thread__init_map_groups(th, machine)) {
402 thread__delete(th);
cddcef60 403 return NULL;
418029b7 404 }
9d2f8e22
ACM
405 }
406
407 return th;
408}
409
314add6b
AH
410struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
411 pid_t tid)
9d2f8e22 412{
314add6b 413 return __machine__findnew_thread(machine, pid, tid, true);
9d2f8e22
ACM
414}
415
d75e6097
JO
416struct thread *machine__find_thread(struct machine *machine, pid_t pid,
417 pid_t tid)
9d2f8e22 418{
d75e6097 419 return __machine__findnew_thread(machine, pid, tid, false);
9d2f8e22 420}
b0a7d1a0 421
cfe1c414
AH
422struct comm *machine__thread_exec_comm(struct machine *machine,
423 struct thread *thread)
424{
425 if (machine->comm_exec)
426 return thread__exec_comm(thread);
427 else
428 return thread__comm(thread);
429}
430
162f0bef
FW
431int machine__process_comm_event(struct machine *machine, union perf_event *event,
432 struct perf_sample *sample)
b0a7d1a0 433{
314add6b
AH
434 struct thread *thread = machine__findnew_thread(machine,
435 event->comm.pid,
436 event->comm.tid);
65de51f9 437 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b0a7d1a0 438
cfe1c414
AH
439 if (exec)
440 machine->comm_exec = true;
441
b0a7d1a0
ACM
442 if (dump_trace)
443 perf_event__fprintf_comm(event, stdout);
444
65de51f9
AH
445 if (thread == NULL ||
446 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0
ACM
447 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
448 return -1;
449 }
450
451 return 0;
452}
453
454int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 455 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
456{
457 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
458 event->lost.id, event->lost.lost);
459 return 0;
460}
461
3f067dca
ACM
462struct map *machine__new_module(struct machine *machine, u64 start,
463 const char *filename)
464{
465 struct map *map;
466 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
c00c48fc 467 bool compressed;
3f067dca
ACM
468
469 if (dso == NULL)
470 return NULL;
471
472 map = map__new2(start, dso, MAP__FUNCTION);
473 if (map == NULL)
474 return NULL;
475
476 if (machine__is_host(machine))
477 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
478 else
479 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
c00c48fc
NK
480
481 /* _KMODULE_COMP should be next to _KMODULE */
482 if (is_kernel_module(filename, &compressed) && compressed)
483 dso->symtab_type++;
484
3f067dca
ACM
485 map_groups__insert(&machine->kmaps, map);
486 return map;
487}
488
876650e6 489size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
490{
491 struct rb_node *nd;
8fa7d87f
WL
492 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
493 __dsos__fprintf(&machines->host.user_dsos.head, fp);
3f067dca 494
876650e6 495 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 496 struct machine *pos = rb_entry(nd, struct machine, rb_node);
8fa7d87f
WL
497 ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
498 ret += __dsos__fprintf(&pos->user_dsos.head, fp);
3f067dca
ACM
499 }
500
501 return ret;
502}
503
8fa7d87f 504size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
505 bool (skip)(struct dso *dso, int parm), int parm)
506{
8fa7d87f
WL
507 return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
508 __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
3f067dca
ACM
509}
510
876650e6 511size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
512 bool (skip)(struct dso *dso, int parm), int parm)
513{
514 struct rb_node *nd;
876650e6 515 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 516
876650e6 517 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
518 struct machine *pos = rb_entry(nd, struct machine, rb_node);
519 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
520 }
521 return ret;
522}
523
524size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
525{
526 int i;
527 size_t printed = 0;
528 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
529
530 if (kdso->has_build_id) {
531 char filename[PATH_MAX];
532 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
533 printed += fprintf(fp, "[0] %s\n", filename);
534 }
535
536 for (i = 0; i < vmlinux_path__nr_entries; ++i)
537 printed += fprintf(fp, "[%d] %s\n",
538 i + kdso->has_build_id, vmlinux_path[i]);
539
540 return printed;
541}
542
543size_t machine__fprintf(struct machine *machine, FILE *fp)
544{
545 size_t ret = 0;
546 struct rb_node *nd;
547
548 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
549 struct thread *pos = rb_entry(nd, struct thread, rb_node);
550
551 ret += thread__fprintf(pos, fp);
552 }
553
554 return ret;
555}
556
557static struct dso *machine__get_kernel(struct machine *machine)
558{
559 const char *vmlinux_name = NULL;
560 struct dso *kernel;
561
562 if (machine__is_host(machine)) {
563 vmlinux_name = symbol_conf.vmlinux_name;
564 if (!vmlinux_name)
565 vmlinux_name = "[kernel.kallsyms]";
566
567 kernel = dso__kernel_findnew(machine, vmlinux_name,
568 "[kernel]",
569 DSO_TYPE_KERNEL);
570 } else {
571 char bf[PATH_MAX];
572
573 if (machine__is_default_guest(machine))
574 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
575 if (!vmlinux_name)
576 vmlinux_name = machine__mmap_name(machine, bf,
577 sizeof(bf));
578
579 kernel = dso__kernel_findnew(machine, vmlinux_name,
580 "[guest.kernel]",
581 DSO_TYPE_GUEST_KERNEL);
582 }
583
584 if (kernel != NULL && (!kernel->has_build_id))
585 dso__read_running_kernel_build_id(kernel, machine);
586
587 return kernel;
588}
589
590struct process_args {
591 u64 start;
592};
593
15a0a870
AH
594static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
595 size_t bufsz)
596{
597 if (machine__is_default_guest(machine))
598 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
599 else
600 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
601}
602
a93f0e55
SQ
603const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
604
605/* Figure out the start address of kernel map from /proc/kallsyms.
606 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
607 * symbol_name if it's not that important.
608 */
4b99375b
AH
609static u64 machine__get_running_kernel_start(struct machine *machine,
610 const char **symbol_name)
3f067dca 611{
15a0a870 612 char filename[PATH_MAX];
a93f0e55
SQ
613 int i;
614 const char *name;
615 u64 addr = 0;
3f067dca 616
15a0a870 617 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
618
619 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
620 return 0;
621
a93f0e55
SQ
622 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
623 addr = kallsyms__get_function_start(filename, name);
624 if (addr)
625 break;
626 }
627
628 if (symbol_name)
629 *symbol_name = name;
3f067dca 630
a93f0e55 631 return addr;
3f067dca
ACM
632}
633
634int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
635{
636 enum map_type type;
4b99375b 637 u64 start = machine__get_running_kernel_start(machine, NULL);
3f067dca
ACM
638
639 for (type = 0; type < MAP__NR_TYPES; ++type) {
640 struct kmap *kmap;
641
642 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
643 if (machine->vmlinux_maps[type] == NULL)
644 return -1;
645
646 machine->vmlinux_maps[type]->map_ip =
647 machine->vmlinux_maps[type]->unmap_ip =
648 identity__map_ip;
649 kmap = map__kmap(machine->vmlinux_maps[type]);
650 kmap->kmaps = &machine->kmaps;
651 map_groups__insert(&machine->kmaps,
652 machine->vmlinux_maps[type]);
653 }
654
655 return 0;
656}
657
658void machine__destroy_kernel_maps(struct machine *machine)
659{
660 enum map_type type;
661
662 for (type = 0; type < MAP__NR_TYPES; ++type) {
663 struct kmap *kmap;
664
665 if (machine->vmlinux_maps[type] == NULL)
666 continue;
667
668 kmap = map__kmap(machine->vmlinux_maps[type]);
669 map_groups__remove(&machine->kmaps,
670 machine->vmlinux_maps[type]);
671 if (kmap->ref_reloc_sym) {
672 /*
673 * ref_reloc_sym is shared among all maps, so free just
674 * on one of them.
675 */
676 if (type == MAP__FUNCTION) {
04662523
ACM
677 zfree((char **)&kmap->ref_reloc_sym->name);
678 zfree(&kmap->ref_reloc_sym);
679 } else
680 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
681 }
682
683 map__delete(machine->vmlinux_maps[type]);
684 machine->vmlinux_maps[type] = NULL;
685 }
686}
687
876650e6 688int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
689{
690 int ret = 0;
691 struct dirent **namelist = NULL;
692 int i, items = 0;
693 char path[PATH_MAX];
694 pid_t pid;
695 char *endp;
696
697 if (symbol_conf.default_guest_vmlinux_name ||
698 symbol_conf.default_guest_modules ||
699 symbol_conf.default_guest_kallsyms) {
700 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
701 }
702
703 if (symbol_conf.guestmount) {
704 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
705 if (items <= 0)
706 return -ENOENT;
707 for (i = 0; i < items; i++) {
708 if (!isdigit(namelist[i]->d_name[0])) {
709 /* Filter out . and .. */
710 continue;
711 }
712 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
713 if ((*endp != '\0') ||
714 (endp == namelist[i]->d_name) ||
715 (errno == ERANGE)) {
716 pr_debug("invalid directory (%s). Skipping.\n",
717 namelist[i]->d_name);
718 continue;
719 }
720 sprintf(path, "%s/%s/proc/kallsyms",
721 symbol_conf.guestmount,
722 namelist[i]->d_name);
723 ret = access(path, R_OK);
724 if (ret) {
725 pr_debug("Can't access file %s\n", path);
726 goto failure;
727 }
728 machines__create_kernel_maps(machines, pid);
729 }
730failure:
731 free(namelist);
732 }
733
734 return ret;
735}
736
876650e6 737void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 738{
876650e6
ACM
739 struct rb_node *next = rb_first(&machines->guests);
740
741 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
742
743 while (next) {
744 struct machine *pos = rb_entry(next, struct machine, rb_node);
745
746 next = rb_next(&pos->rb_node);
876650e6 747 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
748 machine__delete(pos);
749 }
750}
751
876650e6 752int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
753{
754 struct machine *machine = machines__findnew(machines, pid);
755
756 if (machine == NULL)
757 return -1;
758
759 return machine__create_kernel_maps(machine);
760}
761
762int machine__load_kallsyms(struct machine *machine, const char *filename,
763 enum map_type type, symbol_filter_t filter)
764{
765 struct map *map = machine->vmlinux_maps[type];
766 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
767
768 if (ret > 0) {
769 dso__set_loaded(map->dso, type);
770 /*
771 * Since /proc/kallsyms will have multiple sessions for the
772 * kernel, with modules between them, fixup the end of all
773 * sections.
774 */
775 __map_groups__fixup_end(&machine->kmaps, type);
776 }
777
778 return ret;
779}
780
781int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
782 symbol_filter_t filter)
783{
784 struct map *map = machine->vmlinux_maps[type];
785 int ret = dso__load_vmlinux_path(map->dso, map, filter);
786
39b12f78 787 if (ret > 0)
3f067dca 788 dso__set_loaded(map->dso, type);
3f067dca
ACM
789
790 return ret;
791}
792
793static void map_groups__fixup_end(struct map_groups *mg)
794{
795 int i;
796 for (i = 0; i < MAP__NR_TYPES; ++i)
797 __map_groups__fixup_end(mg, i);
798}
799
800static char *get_kernel_version(const char *root_dir)
801{
802 char version[PATH_MAX];
803 FILE *file;
804 char *name, *tmp;
805 const char *prefix = "Linux version ";
806
807 sprintf(version, "%s/proc/version", root_dir);
808 file = fopen(version, "r");
809 if (!file)
810 return NULL;
811
812 version[0] = '\0';
813 tmp = fgets(version, sizeof(version), file);
814 fclose(file);
815
816 name = strstr(version, prefix);
817 if (!name)
818 return NULL;
819 name += strlen(prefix);
820 tmp = strchr(name, ' ');
821 if (tmp)
822 *tmp = '\0';
823
824 return strdup(name);
825}
826
827static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 828 const char *dir_name, int depth)
3f067dca
ACM
829{
830 struct dirent *dent;
831 DIR *dir = opendir(dir_name);
832 int ret = 0;
833
834 if (!dir) {
835 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
836 return -1;
837 }
838
839 while ((dent = readdir(dir)) != NULL) {
840 char path[PATH_MAX];
841 struct stat st;
842
843 /*sshfs might return bad dent->d_type, so we have to stat*/
844 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
845 if (stat(path, &st))
846 continue;
847
848 if (S_ISDIR(st.st_mode)) {
849 if (!strcmp(dent->d_name, ".") ||
850 !strcmp(dent->d_name, ".."))
851 continue;
852
61d4290c
RY
853 /* Do not follow top-level source and build symlinks */
854 if (depth == 0) {
855 if (!strcmp(dent->d_name, "source") ||
856 !strcmp(dent->d_name, "build"))
857 continue;
858 }
859
860 ret = map_groups__set_modules_path_dir(mg, path,
861 depth + 1);
3f067dca
ACM
862 if (ret < 0)
863 goto out;
864 } else {
865 char *dot = strrchr(dent->d_name, '.'),
866 dso_name[PATH_MAX];
867 struct map *map;
868 char *long_name;
869
c00c48fc 870 if (dot == NULL)
3f067dca 871 continue;
c00c48fc
NK
872
873 /* On some system, modules are compressed like .ko.gz */
874 if (is_supported_compression(dot + 1) &&
875 is_kmodule_extension(dot - 2))
876 dot -= 3;
877
3f067dca
ACM
878 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
879 (int)(dot - dent->d_name), dent->d_name);
880
881 strxfrchar(dso_name, '-', '_');
882 map = map_groups__find_by_name(mg, MAP__FUNCTION,
883 dso_name);
884 if (map == NULL)
885 continue;
886
887 long_name = strdup(path);
888 if (long_name == NULL) {
889 ret = -1;
890 goto out;
891 }
7e155d4d 892 dso__set_long_name(map->dso, long_name, true);
3f067dca
ACM
893 dso__kernel_module_get_build_id(map->dso, "");
894 }
895 }
896
897out:
898 closedir(dir);
899 return ret;
900}
901
902static int machine__set_modules_path(struct machine *machine)
903{
904 char *version;
905 char modules_path[PATH_MAX];
906
907 version = get_kernel_version(machine->root_dir);
908 if (!version)
909 return -1;
910
61d4290c 911 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
912 machine->root_dir, version);
913 free(version);
914
61d4290c 915 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca
ACM
916}
917
316d70d6 918static int machine__create_module(void *arg, const char *name, u64 start)
3f067dca 919{
316d70d6 920 struct machine *machine = arg;
3f067dca 921 struct map *map;
316d70d6
AH
922
923 map = machine__new_module(machine, start, name);
924 if (map == NULL)
925 return -1;
926
927 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
928
929 return 0;
930}
931
932static int machine__create_modules(struct machine *machine)
933{
3f067dca
ACM
934 const char *modules;
935 char path[PATH_MAX];
936
f4be904d 937 if (machine__is_default_guest(machine)) {
3f067dca 938 modules = symbol_conf.default_guest_modules;
f4be904d
AH
939 } else {
940 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
941 modules = path;
942 }
943
aa7fe3b0 944 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
945 return -1;
946
316d70d6 947 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
948 return -1;
949
316d70d6
AH
950 if (!machine__set_modules_path(machine))
951 return 0;
3f067dca 952
316d70d6 953 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 954
8f76fcd9 955 return 0;
3f067dca
ACM
956}
957
958int machine__create_kernel_maps(struct machine *machine)
959{
960 struct dso *kernel = machine__get_kernel(machine);
5512cf24 961 const char *name;
4b99375b 962 u64 addr = machine__get_running_kernel_start(machine, &name);
5512cf24
AH
963 if (!addr)
964 return -1;
3f067dca
ACM
965
966 if (kernel == NULL ||
967 __machine__create_kernel_maps(machine, kernel) < 0)
968 return -1;
969
970 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
971 if (machine__is_host(machine))
972 pr_debug("Problems creating module maps, "
973 "continuing anyway...\n");
974 else
975 pr_debug("Problems creating module maps for guest %d, "
976 "continuing anyway...\n", machine->pid);
977 }
978
979 /*
980 * Now that we have all the maps created, just set the ->end of them:
981 */
982 map_groups__fixup_end(&machine->kmaps);
5512cf24
AH
983
984 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
985 addr)) {
986 machine__destroy_kernel_maps(machine);
987 return -1;
988 }
989
3f067dca
ACM
990 return 0;
991}
992
b0a7d1a0
ACM
993static void machine__set_kernel_mmap_len(struct machine *machine,
994 union perf_event *event)
995{
4552cf0f
NK
996 int i;
997
998 for (i = 0; i < MAP__NR_TYPES; i++) {
999 machine->vmlinux_maps[i]->start = event->mmap.start;
1000 machine->vmlinux_maps[i]->end = (event->mmap.start +
1001 event->mmap.len);
1002 /*
1003 * Be a bit paranoid here, some perf.data file came with
1004 * a zero sized synthesized MMAP event for the kernel.
1005 */
1006 if (machine->vmlinux_maps[i]->end == 0)
1007 machine->vmlinux_maps[i]->end = ~0ULL;
1008 }
b0a7d1a0
ACM
1009}
1010
8e0cf965
AH
1011static bool machine__uses_kcore(struct machine *machine)
1012{
1013 struct dso *dso;
1014
8fa7d87f 1015 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
8e0cf965
AH
1016 if (dso__is_kcore(dso))
1017 return true;
1018 }
1019
1020 return false;
1021}
1022
b0a7d1a0
ACM
1023static int machine__process_kernel_mmap_event(struct machine *machine,
1024 union perf_event *event)
1025{
1026 struct map *map;
1027 char kmmap_prefix[PATH_MAX];
1028 enum dso_kernel_type kernel_type;
1029 bool is_kernel_mmap;
1030
8e0cf965
AH
1031 /* If we have maps from kcore then we do not need or want any others */
1032 if (machine__uses_kcore(machine))
1033 return 0;
1034
b0a7d1a0
ACM
1035 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1036 if (machine__is_host(machine))
1037 kernel_type = DSO_TYPE_KERNEL;
1038 else
1039 kernel_type = DSO_TYPE_GUEST_KERNEL;
1040
1041 is_kernel_mmap = memcmp(event->mmap.filename,
1042 kmmap_prefix,
1043 strlen(kmmap_prefix) - 1) == 0;
1044 if (event->mmap.filename[0] == '/' ||
1045 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1046
1047 char short_module_name[1024];
1048 char *name, *dot;
1049
1050 if (event->mmap.filename[0] == '/') {
1051 name = strrchr(event->mmap.filename, '/');
1052 if (name == NULL)
1053 goto out_problem;
1054
1055 ++name; /* skip / */
1056 dot = strrchr(name, '.');
1057 if (dot == NULL)
1058 goto out_problem;
c00c48fc
NK
1059 /* On some system, modules are compressed like .ko.gz */
1060 if (is_supported_compression(dot + 1))
1061 dot -= 3;
1062 if (!is_kmodule_extension(dot + 1))
1063 goto out_problem;
b0a7d1a0
ACM
1064 snprintf(short_module_name, sizeof(short_module_name),
1065 "[%.*s]", (int)(dot - name), name);
1066 strxfrchar(short_module_name, '-', '_');
1067 } else
1068 strcpy(short_module_name, event->mmap.filename);
1069
1070 map = machine__new_module(machine, event->mmap.start,
1071 event->mmap.filename);
1072 if (map == NULL)
1073 goto out_problem;
1074
1075 name = strdup(short_module_name);
1076 if (name == NULL)
1077 goto out_problem;
1078
58a98c9c 1079 dso__set_short_name(map->dso, name, true);
b0a7d1a0
ACM
1080 map->end = map->start + event->mmap.len;
1081 } else if (is_kernel_mmap) {
1082 const char *symbol_name = (event->mmap.filename +
1083 strlen(kmmap_prefix));
1084 /*
1085 * Should be there already, from the build-id table in
1086 * the header.
1087 */
b837a8bd
NK
1088 struct dso *kernel = NULL;
1089 struct dso *dso;
1090
1091 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
1092 if (is_kernel_module(dso->long_name, NULL))
1093 continue;
1094
1095 kernel = dso;
1096 break;
1097 }
1098
1099 if (kernel == NULL)
1100 kernel = __dsos__findnew(&machine->kernel_dsos,
1101 kmmap_prefix);
b0a7d1a0
ACM
1102 if (kernel == NULL)
1103 goto out_problem;
1104
1105 kernel->kernel = kernel_type;
1106 if (__machine__create_kernel_maps(machine, kernel) < 0)
1107 goto out_problem;
1108
96d78059
NK
1109 if (strstr(dso->long_name, "vmlinux"))
1110 dso__set_short_name(dso, "[kernel.vmlinux]", false);
1111
b0a7d1a0
ACM
1112 machine__set_kernel_mmap_len(machine, event);
1113
1114 /*
1115 * Avoid using a zero address (kptr_restrict) for the ref reloc
1116 * symbol. Effectively having zero here means that at record
1117 * time /proc/sys/kernel/kptr_restrict was non zero.
1118 */
1119 if (event->mmap.pgoff != 0) {
1120 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1121 symbol_name,
1122 event->mmap.pgoff);
1123 }
1124
1125 if (machine__is_default_guest(machine)) {
1126 /*
1127 * preload dso of guest kernel and modules
1128 */
1129 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1130 NULL);
1131 }
1132 }
1133 return 0;
1134out_problem:
1135 return -1;
1136}
1137
5c5e854b 1138int machine__process_mmap2_event(struct machine *machine,
162f0bef
FW
1139 union perf_event *event,
1140 struct perf_sample *sample __maybe_unused)
5c5e854b
SE
1141{
1142 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1143 struct thread *thread;
1144 struct map *map;
1145 enum map_type type;
1146 int ret = 0;
1147
1148 if (dump_trace)
1149 perf_event__fprintf_mmap2(event, stdout);
1150
1151 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1152 cpumode == PERF_RECORD_MISC_KERNEL) {
1153 ret = machine__process_kernel_mmap_event(machine, event);
1154 if (ret < 0)
1155 goto out_problem;
1156 return 0;
1157 }
1158
1159 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1160 event->mmap2.tid);
5c5e854b
SE
1161 if (thread == NULL)
1162 goto out_problem;
1163
1164 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1165 type = MAP__VARIABLE;
1166 else
1167 type = MAP__FUNCTION;
1168
2a03068c 1169 map = map__new(machine, event->mmap2.start,
5c5e854b
SE
1170 event->mmap2.len, event->mmap2.pgoff,
1171 event->mmap2.pid, event->mmap2.maj,
1172 event->mmap2.min, event->mmap2.ino,
1173 event->mmap2.ino_generation,
7ef80703
DZ
1174 event->mmap2.prot,
1175 event->mmap2.flags,
5835edda 1176 event->mmap2.filename, type, thread);
5c5e854b
SE
1177
1178 if (map == NULL)
1179 goto out_problem;
1180
1181 thread__insert_map(thread, map);
1182 return 0;
1183
1184out_problem:
1185 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1186 return 0;
1187}
1188
162f0bef
FW
1189int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1190 struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
1191{
1192 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1193 struct thread *thread;
1194 struct map *map;
bad40917 1195 enum map_type type;
b0a7d1a0
ACM
1196 int ret = 0;
1197
1198 if (dump_trace)
1199 perf_event__fprintf_mmap(event, stdout);
1200
1201 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1202 cpumode == PERF_RECORD_MISC_KERNEL) {
1203 ret = machine__process_kernel_mmap_event(machine, event);
1204 if (ret < 0)
1205 goto out_problem;
1206 return 0;
1207 }
1208
314add6b 1209 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1210 event->mmap.tid);
b0a7d1a0
ACM
1211 if (thread == NULL)
1212 goto out_problem;
bad40917
SE
1213
1214 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1215 type = MAP__VARIABLE;
1216 else
1217 type = MAP__FUNCTION;
1218
2a03068c 1219 map = map__new(machine, event->mmap.start,
b0a7d1a0 1220 event->mmap.len, event->mmap.pgoff,
7ef80703 1221 event->mmap.pid, 0, 0, 0, 0, 0, 0,
5c5e854b 1222 event->mmap.filename,
5835edda 1223 type, thread);
bad40917 1224
b0a7d1a0
ACM
1225 if (map == NULL)
1226 goto out_problem;
1227
1228 thread__insert_map(thread, map);
1229 return 0;
1230
1231out_problem:
1232 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1233 return 0;
1234}
1235
236a3bbd
DA
1236static void machine__remove_thread(struct machine *machine, struct thread *th)
1237{
1238 machine->last_match = NULL;
1239 rb_erase(&th->rb_node, &machine->threads);
1240 /*
1241 * We may have references to this thread, for instance in some hist_entry
1242 * instances, so just move them to a separate list.
1243 */
1244 list_add_tail(&th->node, &machine->dead_threads);
1245}
1246
162f0bef
FW
1247int machine__process_fork_event(struct machine *machine, union perf_event *event,
1248 struct perf_sample *sample)
b0a7d1a0 1249{
d75e6097
JO
1250 struct thread *thread = machine__find_thread(machine,
1251 event->fork.pid,
1252 event->fork.tid);
314add6b
AH
1253 struct thread *parent = machine__findnew_thread(machine,
1254 event->fork.ppid,
1255 event->fork.ptid);
b0a7d1a0 1256
236a3bbd
DA
1257 /* if a thread currently exists for the thread id remove it */
1258 if (thread != NULL)
1259 machine__remove_thread(machine, thread);
1260
314add6b
AH
1261 thread = machine__findnew_thread(machine, event->fork.pid,
1262 event->fork.tid);
b0a7d1a0
ACM
1263 if (dump_trace)
1264 perf_event__fprintf_task(event, stdout);
1265
1266 if (thread == NULL || parent == NULL ||
162f0bef 1267 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0
ACM
1268 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1269 return -1;
1270 }
1271
1272 return 0;
1273}
1274
162f0bef
FW
1275int machine__process_exit_event(struct machine *machine, union perf_event *event,
1276 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1277{
d75e6097
JO
1278 struct thread *thread = machine__find_thread(machine,
1279 event->fork.pid,
1280 event->fork.tid);
b0a7d1a0
ACM
1281
1282 if (dump_trace)
1283 perf_event__fprintf_task(event, stdout);
1284
1285 if (thread != NULL)
236a3bbd 1286 thread__exited(thread);
b0a7d1a0
ACM
1287
1288 return 0;
1289}
1290
162f0bef
FW
1291int machine__process_event(struct machine *machine, union perf_event *event,
1292 struct perf_sample *sample)
b0a7d1a0
ACM
1293{
1294 int ret;
1295
1296 switch (event->header.type) {
1297 case PERF_RECORD_COMM:
162f0bef 1298 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1299 case PERF_RECORD_MMAP:
162f0bef 1300 ret = machine__process_mmap_event(machine, event, sample); break;
5c5e854b 1301 case PERF_RECORD_MMAP2:
162f0bef 1302 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1303 case PERF_RECORD_FORK:
162f0bef 1304 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1305 case PERF_RECORD_EXIT:
162f0bef 1306 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1307 case PERF_RECORD_LOST:
162f0bef 1308 ret = machine__process_lost_event(machine, event, sample); break;
b0a7d1a0
ACM
1309 default:
1310 ret = -1;
1311 break;
1312 }
1313
1314 return ret;
1315}
3f067dca 1316
b21484f1 1317static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1318{
b21484f1 1319 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1320 return 1;
3f067dca
ACM
1321 return 0;
1322}
1323
bb871a9c 1324static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1325 struct addr_map_symbol *ams,
1326 u64 ip)
1327{
1328 struct addr_location al;
3f067dca
ACM
1329
1330 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1331 /*
1332 * We cannot use the header.misc hint to determine whether a
1333 * branch stack address is user, kernel, guest, hypervisor.
1334 * Branches may straddle the kernel/user/hypervisor boundaries.
1335 * Thus, we have to try consecutively until we find a match
1336 * or else, the symbol is unknown
1337 */
bb871a9c 1338 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
3f067dca 1339
3f067dca
ACM
1340 ams->addr = ip;
1341 ams->al_addr = al.addr;
1342 ams->sym = al.sym;
1343 ams->map = al.map;
1344}
1345
bb871a9c 1346static void ip__resolve_data(struct thread *thread,
98a3b32c
SE
1347 u8 m, struct addr_map_symbol *ams, u64 addr)
1348{
1349 struct addr_location al;
1350
1351 memset(&al, 0, sizeof(al));
1352
bb871a9c 1353 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
06b2afc0
DZ
1354 if (al.map == NULL) {
1355 /*
1356 * some shared data regions have execute bit set which puts
1357 * their mapping in the MAP__FUNCTION type array.
1358 * Check there as a fallback option before dropping the sample.
1359 */
bb871a9c 1360 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
06b2afc0
DZ
1361 }
1362
98a3b32c
SE
1363 ams->addr = addr;
1364 ams->al_addr = al.addr;
1365 ams->sym = al.sym;
1366 ams->map = al.map;
1367}
1368
e80faac0
ACM
1369struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1370 struct addr_location *al)
98a3b32c
SE
1371{
1372 struct mem_info *mi = zalloc(sizeof(*mi));
1373
1374 if (!mi)
1375 return NULL;
1376
bb871a9c
ACM
1377 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1378 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
98a3b32c
SE
1379 mi->data_src.val = sample->data_src;
1380
1381 return mi;
1382}
1383
644f2df2
ACM
1384struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1385 struct addr_location *al)
3f067dca 1386{
3f067dca 1387 unsigned int i;
644f2df2
ACM
1388 const struct branch_stack *bs = sample->branch_stack;
1389 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1390
3f067dca
ACM
1391 if (!bi)
1392 return NULL;
1393
1394 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1395 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1396 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1397 bi[i].flags = bs->entries[i].flags;
1398 }
1399 return bi;
1400}
1401
bb871a9c 1402static int thread__resolve_callchain_sample(struct thread *thread,
3f067dca 1403 struct ip_callchain *chain,
b21484f1 1404 struct symbol **parent,
91e95617
WL
1405 struct addr_location *root_al,
1406 int max_stack)
3f067dca
ACM
1407{
1408 u8 cpumode = PERF_RECORD_MISC_USER;
91e95617
WL
1409 int chain_nr = min(max_stack, (int)chain->nr);
1410 int i;
a60335ba 1411 int j;
3f067dca 1412 int err;
a60335ba 1413 int skip_idx __maybe_unused;
3f067dca
ACM
1414
1415 callchain_cursor_reset(&callchain_cursor);
1416
1417 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1418 pr_warning("corrupted callchain. skipping...\n");
1419 return 0;
1420 }
1421
a60335ba
SB
1422 /*
1423 * Based on DWARF debug information, some architectures skip
1424 * a callchain entry saved by the kernel.
1425 */
bb871a9c 1426 skip_idx = arch_skip_callchain_idx(thread, chain);
a60335ba 1427
91e95617 1428 for (i = 0; i < chain_nr; i++) {
3f067dca
ACM
1429 u64 ip;
1430 struct addr_location al;
1431
1432 if (callchain_param.order == ORDER_CALLEE)
a60335ba 1433 j = i;
3f067dca 1434 else
a60335ba
SB
1435 j = chain->nr - i - 1;
1436
1437#ifdef HAVE_SKIP_CALLCHAIN_IDX
1438 if (j == skip_idx)
1439 continue;
1440#endif
1441 ip = chain->ips[j];
3f067dca
ACM
1442
1443 if (ip >= PERF_CONTEXT_MAX) {
1444 switch (ip) {
1445 case PERF_CONTEXT_HV:
1446 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1447 break;
1448 case PERF_CONTEXT_KERNEL:
1449 cpumode = PERF_RECORD_MISC_KERNEL;
1450 break;
1451 case PERF_CONTEXT_USER:
1452 cpumode = PERF_RECORD_MISC_USER;
1453 break;
1454 default:
1455 pr_debug("invalid callchain context: "
1456 "%"PRId64"\n", (s64) ip);
1457 /*
1458 * It seems the callchain is corrupted.
1459 * Discard all.
1460 */
1461 callchain_cursor_reset(&callchain_cursor);
1462 return 0;
1463 }
1464 continue;
1465 }
1466
b3cef7f6 1467 al.filtered = 0;
bb871a9c 1468 thread__find_addr_location(thread, cpumode,
61710bde 1469 MAP__FUNCTION, ip, &al);
3f067dca
ACM
1470 if (al.sym != NULL) {
1471 if (sort__has_parent && !*parent &&
b21484f1 1472 symbol__match_regex(al.sym, &parent_regex))
3f067dca 1473 *parent = al.sym;
b21484f1
GP
1474 else if (have_ignore_callees && root_al &&
1475 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1476 /* Treat this symbol as the root,
1477 forgetting its callees. */
1478 *root_al = al;
1479 callchain_cursor_reset(&callchain_cursor);
1480 }
3f067dca
ACM
1481 }
1482
1483 err = callchain_cursor_append(&callchain_cursor,
1484 ip, al.map, al.sym);
1485 if (err)
1486 return err;
1487 }
1488
1489 return 0;
1490}
1491
1492static int unwind_entry(struct unwind_entry *entry, void *arg)
1493{
1494 struct callchain_cursor *cursor = arg;
1495 return callchain_cursor_append(cursor, entry->ip,
1496 entry->map, entry->sym);
1497}
1498
cc8b7c2b
ACM
1499int thread__resolve_callchain(struct thread *thread,
1500 struct perf_evsel *evsel,
1501 struct perf_sample *sample,
1502 struct symbol **parent,
1503 struct addr_location *root_al,
1504 int max_stack)
3f067dca 1505{
bb871a9c
ACM
1506 int ret = thread__resolve_callchain_sample(thread, sample->callchain,
1507 parent, root_al, max_stack);
3f067dca
ACM
1508 if (ret)
1509 return ret;
1510
1511 /* Can we do dwarf post unwind? */
1512 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1513 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1514 return 0;
1515
1516 /* Bail out if nothing was captured. */
1517 if ((!sample->user_regs.regs) ||
1518 (!sample->user_stack.size))
1519 return 0;
1520
dd8c17a5 1521 return unwind__get_entries(unwind_entry, &callchain_cursor,
352ea45a 1522 thread, sample, max_stack);
3f067dca
ACM
1523
1524}
35feee19
DA
1525
1526int machine__for_each_thread(struct machine *machine,
1527 int (*fn)(struct thread *thread, void *p),
1528 void *priv)
1529{
1530 struct rb_node *nd;
1531 struct thread *thread;
1532 int rc = 0;
1533
1534 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1535 thread = rb_entry(nd, struct thread, rb_node);
1536 rc = fn(thread, priv);
1537 if (rc != 0)
1538 return rc;
1539 }
1540
1541 list_for_each_entry(thread, &machine->dead_threads, node) {
1542 rc = fn(thread, priv);
1543 if (rc != 0)
1544 return rc;
1545 }
1546 return rc;
1547}
58d925dc 1548
a33fbd56 1549int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 1550 struct target *target, struct thread_map *threads,
a33fbd56 1551 perf_event__handler_t process, bool data_mmap)
58d925dc 1552{
602ad878 1553 if (target__has_task(target))
58d925dc 1554 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
602ad878 1555 else if (target__has_cpu(target))
58d925dc
ACM
1556 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1557 /* command specified */
1558 return 0;
1559}
b9d266ba
AH
1560
1561pid_t machine__get_current_tid(struct machine *machine, int cpu)
1562{
1563 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1564 return -1;
1565
1566 return machine->current_tid[cpu];
1567}
1568
1569int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1570 pid_t tid)
1571{
1572 struct thread *thread;
1573
1574 if (cpu < 0)
1575 return -EINVAL;
1576
1577 if (!machine->current_tid) {
1578 int i;
1579
1580 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1581 if (!machine->current_tid)
1582 return -ENOMEM;
1583 for (i = 0; i < MAX_NR_CPUS; i++)
1584 machine->current_tid[i] = -1;
1585 }
1586
1587 if (cpu >= MAX_NR_CPUS) {
1588 pr_err("Requested CPU %d too large. ", cpu);
1589 pr_err("Consider raising MAX_NR_CPUS\n");
1590 return -EINVAL;
1591 }
1592
1593 machine->current_tid[cpu] = tid;
1594
1595 thread = machine__findnew_thread(machine, pid, tid);
1596 if (!thread)
1597 return -ENOMEM;
1598
1599 thread->cpu = cpu;
1600
1601 return 0;
1602}
fbe2af45
AH
1603
1604int machine__get_kernel_start(struct machine *machine)
1605{
1606 struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
1607 int err = 0;
1608
1609 /*
1610 * The only addresses above 2^63 are kernel addresses of a 64-bit
1611 * kernel. Note that addresses are unsigned so that on a 32-bit system
1612 * all addresses including kernel addresses are less than 2^32. In
1613 * that case (32-bit system), if the kernel mapping is unknown, all
1614 * addresses will be assumed to be in user space - see
1615 * machine__kernel_ip().
1616 */
1617 machine->kernel_start = 1ULL << 63;
1618 if (map) {
1619 err = map__load(map, machine->symbol_filter);
1620 if (map->start)
1621 machine->kernel_start = map->start;
1622 }
1623 return err;
1624}