]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/machine.c
perf tools: Remove include dirent.h from util.h
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
CommitLineData
76b31a29 1#include <dirent.h>
a43783ae 2#include <errno.h>
fd20e811 3#include <inttypes.h>
3f067dca 4#include "callchain.h"
b0a7d1a0
ACM
5#include "debug.h"
6#include "event.h"
3f067dca
ACM
7#include "evsel.h"
8#include "hist.h"
9d2f8e22
ACM
9#include "machine.h"
10#include "map.h"
3f067dca 11#include "sort.h"
69d2591a 12#include "strlist.h"
9d2f8e22 13#include "thread.h"
d027b640 14#include "vdso.h"
9d2f8e22 15#include <stdbool.h>
3f067dca 16#include "unwind.h"
8b7bad58 17#include "linux/hash.h"
f3b3614a 18#include "asm/bug.h"
9d2f8e22 19
3d689ed6
ACM
20#include "sane_ctype.h"
21#include <symbol/kallsyms.h>
22
b91fc39f
ACM
23static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
24
e167f995
ACM
25static void dsos__init(struct dsos *dsos)
26{
27 INIT_LIST_HEAD(&dsos->head);
28 dsos->root = RB_ROOT;
e8807844 29 pthread_rwlock_init(&dsos->lock, NULL);
e167f995
ACM
30}
31
69d2591a
ACM
32int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
33{
93b0ba3c 34 memset(machine, 0, sizeof(*machine));
11246c70 35 map_groups__init(&machine->kmaps, machine);
69d2591a 36 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 37 dsos__init(&machine->dsos);
69d2591a
ACM
38
39 machine->threads = RB_ROOT;
b91fc39f 40 pthread_rwlock_init(&machine->threads_lock, NULL);
d2c11034 41 machine->nr_threads = 0;
69d2591a
ACM
42 INIT_LIST_HEAD(&machine->dead_threads);
43 machine->last_match = NULL;
44
d027b640 45 machine->vdso_info = NULL;
4cde998d 46 machine->env = NULL;
d027b640 47
69d2591a
ACM
48 machine->pid = pid;
49
14bd6d20 50 machine->id_hdr_size = 0;
caf8a0d0 51 machine->kptr_restrict_warned = false;
cfe1c414 52 machine->comm_exec = false;
fbe2af45 53 machine->kernel_start = 0;
611a5ce8 54
cc1121ab
MH
55 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
56
69d2591a
ACM
57 machine->root_dir = strdup(root_dir);
58 if (machine->root_dir == NULL)
59 return -ENOMEM;
60
61 if (pid != HOST_KERNEL_ID) {
1fcb8768 62 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 63 pid);
69d2591a
ACM
64 char comm[64];
65
66 if (thread == NULL)
67 return -ENOMEM;
68
69 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 70 thread__set_comm(thread, comm, 0);
b91fc39f 71 thread__put(thread);
69d2591a
ACM
72 }
73
b9d266ba
AH
74 machine->current_tid = NULL;
75
69d2591a
ACM
76 return 0;
77}
78
8fb598e5
DA
79struct machine *machine__new_host(void)
80{
81 struct machine *machine = malloc(sizeof(*machine));
82
83 if (machine != NULL) {
84 machine__init(machine, "", HOST_KERNEL_ID);
85
86 if (machine__create_kernel_maps(machine) < 0)
87 goto out_delete;
88 }
89
90 return machine;
91out_delete:
92 free(machine);
93 return NULL;
94}
95
7d132caa
ACM
96struct machine *machine__new_kallsyms(void)
97{
98 struct machine *machine = machine__new_host();
99 /*
100 * FIXME:
101 * 1) MAP__FUNCTION will go away when we stop loading separate maps for
102 * functions and data objects.
103 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
104 * ask for not using the kcore parsing code, once this one is fixed
105 * to create a map per module.
106 */
107 if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) {
108 machine__delete(machine);
109 machine = NULL;
110 }
111
112 return machine;
113}
114
d3a7c489 115static void dsos__purge(struct dsos *dsos)
69d2591a
ACM
116{
117 struct dso *pos, *n;
118
e8807844
ACM
119 pthread_rwlock_wrlock(&dsos->lock);
120
8fa7d87f 121 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 122 RB_CLEAR_NODE(&pos->rb_node);
e266a753 123 pos->root = NULL;
d3a7c489
ACM
124 list_del_init(&pos->node);
125 dso__put(pos);
69d2591a 126 }
e8807844
ACM
127
128 pthread_rwlock_unlock(&dsos->lock);
d3a7c489 129}
e8807844 130
d3a7c489
ACM
131static void dsos__exit(struct dsos *dsos)
132{
133 dsos__purge(dsos);
e8807844 134 pthread_rwlock_destroy(&dsos->lock);
69d2591a
ACM
135}
136
3f067dca
ACM
137void machine__delete_threads(struct machine *machine)
138{
b91fc39f 139 struct rb_node *nd;
3f067dca 140
b91fc39f
ACM
141 pthread_rwlock_wrlock(&machine->threads_lock);
142 nd = rb_first(&machine->threads);
3f067dca
ACM
143 while (nd) {
144 struct thread *t = rb_entry(nd, struct thread, rb_node);
145
3f067dca 146 nd = rb_next(nd);
b91fc39f 147 __machine__remove_thread(machine, t, false);
3f067dca 148 }
b91fc39f 149 pthread_rwlock_unlock(&machine->threads_lock);
3f067dca
ACM
150}
151
69d2591a
ACM
152void machine__exit(struct machine *machine)
153{
ebe9729c 154 machine__destroy_kernel_maps(machine);
69d2591a 155 map_groups__exit(&machine->kmaps);
e8807844 156 dsos__exit(&machine->dsos);
9a4388c7 157 machine__exit_vdso(machine);
04662523 158 zfree(&machine->root_dir);
b9d266ba 159 zfree(&machine->current_tid);
b91fc39f 160 pthread_rwlock_destroy(&machine->threads_lock);
69d2591a
ACM
161}
162
163void machine__delete(struct machine *machine)
164{
32ca678d
ACM
165 if (machine) {
166 machine__exit(machine);
167 free(machine);
168 }
69d2591a
ACM
169}
170
876650e6
ACM
171void machines__init(struct machines *machines)
172{
173 machine__init(&machines->host, "", HOST_KERNEL_ID);
174 machines->guests = RB_ROOT;
175}
176
177void machines__exit(struct machines *machines)
178{
179 machine__exit(&machines->host);
180 /* XXX exit guest */
181}
182
183struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
184 const char *root_dir)
185{
876650e6 186 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
187 struct rb_node *parent = NULL;
188 struct machine *pos, *machine = malloc(sizeof(*machine));
189
190 if (machine == NULL)
191 return NULL;
192
193 if (machine__init(machine, root_dir, pid) != 0) {
194 free(machine);
195 return NULL;
196 }
197
198 while (*p != NULL) {
199 parent = *p;
200 pos = rb_entry(parent, struct machine, rb_node);
201 if (pid < pos->pid)
202 p = &(*p)->rb_left;
203 else
204 p = &(*p)->rb_right;
205 }
206
207 rb_link_node(&machine->rb_node, parent, p);
876650e6 208 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
209
210 return machine;
211}
212
cfe1c414
AH
213void machines__set_comm_exec(struct machines *machines, bool comm_exec)
214{
215 struct rb_node *nd;
216
217 machines->host.comm_exec = comm_exec;
218
219 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
220 struct machine *machine = rb_entry(nd, struct machine, rb_node);
221
222 machine->comm_exec = comm_exec;
223 }
224}
225
876650e6 226struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 227{
876650e6 228 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
229 struct rb_node *parent = NULL;
230 struct machine *machine;
231 struct machine *default_machine = NULL;
232
876650e6
ACM
233 if (pid == HOST_KERNEL_ID)
234 return &machines->host;
235
69d2591a
ACM
236 while (*p != NULL) {
237 parent = *p;
238 machine = rb_entry(parent, struct machine, rb_node);
239 if (pid < machine->pid)
240 p = &(*p)->rb_left;
241 else if (pid > machine->pid)
242 p = &(*p)->rb_right;
243 else
244 return machine;
245 if (!machine->pid)
246 default_machine = machine;
247 }
248
249 return default_machine;
250}
251
876650e6 252struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
253{
254 char path[PATH_MAX];
255 const char *root_dir = "";
256 struct machine *machine = machines__find(machines, pid);
257
258 if (machine && (machine->pid == pid))
259 goto out;
260
261 if ((pid != HOST_KERNEL_ID) &&
262 (pid != DEFAULT_GUEST_KERNEL_ID) &&
263 (symbol_conf.guestmount)) {
264 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
265 if (access(path, R_OK)) {
266 static struct strlist *seen;
267
268 if (!seen)
4a77e218 269 seen = strlist__new(NULL, NULL);
69d2591a
ACM
270
271 if (!strlist__has_entry(seen, path)) {
272 pr_err("Can't access file %s\n", path);
273 strlist__add(seen, path);
274 }
275 machine = NULL;
276 goto out;
277 }
278 root_dir = path;
279 }
280
281 machine = machines__add(machines, pid, root_dir);
282out:
283 return machine;
284}
285
876650e6
ACM
286void machines__process_guests(struct machines *machines,
287 machine__process_t process, void *data)
69d2591a
ACM
288{
289 struct rb_node *nd;
290
876650e6 291 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
292 struct machine *pos = rb_entry(nd, struct machine, rb_node);
293 process(pos, data);
294 }
295}
296
297char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
298{
299 if (machine__is_host(machine))
300 snprintf(bf, size, "[%s]", "kernel.kallsyms");
301 else if (machine__is_default_guest(machine))
302 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
303 else {
304 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
305 machine->pid);
306 }
307
308 return bf;
309}
310
876650e6 311void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
312{
313 struct rb_node *node;
314 struct machine *machine;
315
876650e6
ACM
316 machines->host.id_hdr_size = id_hdr_size;
317
318 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
319 machine = rb_entry(node, struct machine, rb_node);
320 machine->id_hdr_size = id_hdr_size;
321 }
322
323 return;
324}
325
29ce3612
AH
326static void machine__update_thread_pid(struct machine *machine,
327 struct thread *th, pid_t pid)
328{
329 struct thread *leader;
330
331 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
332 return;
333
334 th->pid_ = pid;
335
336 if (th->pid_ == th->tid)
337 return;
338
b91fc39f 339 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
340 if (!leader)
341 goto out_err;
342
343 if (!leader->mg)
11246c70 344 leader->mg = map_groups__new(machine);
29ce3612
AH
345
346 if (!leader->mg)
347 goto out_err;
348
349 if (th->mg == leader->mg)
350 return;
351
352 if (th->mg) {
353 /*
354 * Maps are created from MMAP events which provide the pid and
355 * tid. Consequently there never should be any maps on a thread
356 * with an unknown pid. Just print an error if there are.
357 */
358 if (!map_groups__empty(th->mg))
359 pr_err("Discarding thread maps for %d:%d\n",
360 th->pid_, th->tid);
8e160b2e 361 map_groups__put(th->mg);
29ce3612
AH
362 }
363
364 th->mg = map_groups__get(leader->mg);
abd82868
ACM
365out_put:
366 thread__put(leader);
29ce3612 367 return;
29ce3612
AH
368out_err:
369 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
abd82868 370 goto out_put;
29ce3612
AH
371}
372
abd82868 373/*
bd1a0be5 374 * Caller must eventually drop thread->refcnt returned with a successful
abd82868
ACM
375 * lookup/new thread inserted.
376 */
b91fc39f
ACM
377static struct thread *____machine__findnew_thread(struct machine *machine,
378 pid_t pid, pid_t tid,
379 bool create)
9d2f8e22
ACM
380{
381 struct rb_node **p = &machine->threads.rb_node;
382 struct rb_node *parent = NULL;
383 struct thread *th;
384
385 /*
38051234 386 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
387 * so most of the time we dont have to look up
388 * the full rbtree:
389 */
29ce3612 390 th = machine->last_match;
f3b623b8
ACM
391 if (th != NULL) {
392 if (th->tid == tid) {
393 machine__update_thread_pid(machine, th, pid);
abd82868 394 return thread__get(th);
f3b623b8
ACM
395 }
396
0ceb8f6e 397 machine->last_match = NULL;
99d725fc 398 }
9d2f8e22
ACM
399
400 while (*p != NULL) {
401 parent = *p;
402 th = rb_entry(parent, struct thread, rb_node);
403
38051234 404 if (th->tid == tid) {
0ceb8f6e 405 machine->last_match = th;
29ce3612 406 machine__update_thread_pid(machine, th, pid);
abd82868 407 return thread__get(th);
9d2f8e22
ACM
408 }
409
38051234 410 if (tid < th->tid)
9d2f8e22
ACM
411 p = &(*p)->rb_left;
412 else
413 p = &(*p)->rb_right;
414 }
415
416 if (!create)
417 return NULL;
418
99d725fc 419 th = thread__new(pid, tid);
9d2f8e22
ACM
420 if (th != NULL) {
421 rb_link_node(&th->rb_node, parent, p);
422 rb_insert_color(&th->rb_node, &machine->threads);
cddcef60
JO
423
424 /*
425 * We have to initialize map_groups separately
426 * after rb tree is updated.
427 *
428 * The reason is that we call machine__findnew_thread
429 * within thread__init_map_groups to find the thread
430 * leader and that would screwed the rb tree.
431 */
418029b7 432 if (thread__init_map_groups(th, machine)) {
0170b14f 433 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 434 RB_CLEAR_NODE(&th->rb_node);
abd82868 435 thread__put(th);
cddcef60 436 return NULL;
418029b7 437 }
f3b623b8
ACM
438 /*
439 * It is now in the rbtree, get a ref
440 */
441 thread__get(th);
0ceb8f6e 442 machine->last_match = th;
d2c11034 443 ++machine->nr_threads;
9d2f8e22
ACM
444 }
445
446 return th;
447}
448
b91fc39f
ACM
449struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
450{
451 return ____machine__findnew_thread(machine, pid, tid, true);
452}
453
314add6b
AH
454struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
455 pid_t tid)
9d2f8e22 456{
b91fc39f
ACM
457 struct thread *th;
458
459 pthread_rwlock_wrlock(&machine->threads_lock);
abd82868 460 th = __machine__findnew_thread(machine, pid, tid);
b91fc39f
ACM
461 pthread_rwlock_unlock(&machine->threads_lock);
462 return th;
9d2f8e22
ACM
463}
464
d75e6097
JO
465struct thread *machine__find_thread(struct machine *machine, pid_t pid,
466 pid_t tid)
9d2f8e22 467{
b91fc39f
ACM
468 struct thread *th;
469 pthread_rwlock_rdlock(&machine->threads_lock);
abd82868 470 th = ____machine__findnew_thread(machine, pid, tid, false);
b91fc39f
ACM
471 pthread_rwlock_unlock(&machine->threads_lock);
472 return th;
9d2f8e22 473}
b0a7d1a0 474
cfe1c414
AH
475struct comm *machine__thread_exec_comm(struct machine *machine,
476 struct thread *thread)
477{
478 if (machine->comm_exec)
479 return thread__exec_comm(thread);
480 else
481 return thread__comm(thread);
482}
483
162f0bef
FW
484int machine__process_comm_event(struct machine *machine, union perf_event *event,
485 struct perf_sample *sample)
b0a7d1a0 486{
314add6b
AH
487 struct thread *thread = machine__findnew_thread(machine,
488 event->comm.pid,
489 event->comm.tid);
65de51f9 490 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 491 int err = 0;
b0a7d1a0 492
cfe1c414
AH
493 if (exec)
494 machine->comm_exec = true;
495
b0a7d1a0
ACM
496 if (dump_trace)
497 perf_event__fprintf_comm(event, stdout);
498
65de51f9
AH
499 if (thread == NULL ||
500 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 501 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 502 err = -1;
b0a7d1a0
ACM
503 }
504
b91fc39f
ACM
505 thread__put(thread);
506
507 return err;
b0a7d1a0
ACM
508}
509
f3b3614a
HB
510int machine__process_namespaces_event(struct machine *machine __maybe_unused,
511 union perf_event *event,
512 struct perf_sample *sample __maybe_unused)
513{
514 struct thread *thread = machine__findnew_thread(machine,
515 event->namespaces.pid,
516 event->namespaces.tid);
517 int err = 0;
518
519 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
520 "\nWARNING: kernel seems to support more namespaces than perf"
521 " tool.\nTry updating the perf tool..\n\n");
522
523 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
524 "\nWARNING: perf tool seems to support more namespaces than"
525 " the kernel.\nTry updating the kernel..\n\n");
526
527 if (dump_trace)
528 perf_event__fprintf_namespaces(event, stdout);
529
530 if (thread == NULL ||
531 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
532 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
533 err = -1;
534 }
535
536 thread__put(thread);
537
538 return err;
539}
540
b0a7d1a0 541int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 542 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
543{
544 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
545 event->lost.id, event->lost.lost);
546 return 0;
547}
548
c4937a91
KL
549int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
550 union perf_event *event, struct perf_sample *sample)
551{
552 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
553 sample->id, event->lost_samples.lost);
554 return 0;
555}
556
9f2de315
ACM
557static struct dso *machine__findnew_module_dso(struct machine *machine,
558 struct kmod_path *m,
559 const char *filename)
da17ea33
JO
560{
561 struct dso *dso;
da17ea33 562
e8807844
ACM
563 pthread_rwlock_wrlock(&machine->dsos.lock);
564
565 dso = __dsos__find(&machine->dsos, m->name, true);
da17ea33 566 if (!dso) {
e8807844 567 dso = __dsos__addnew(&machine->dsos, m->name);
da17ea33 568 if (dso == NULL)
e8807844 569 goto out_unlock;
da17ea33
JO
570
571 if (machine__is_host(machine))
572 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
573 else
574 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
575
576 /* _KMODULE_COMP should be next to _KMODULE */
ca33380a 577 if (m->kmod && m->comp)
da17ea33 578 dso->symtab_type++;
ca33380a
JO
579
580 dso__set_short_name(dso, strdup(m->name), true);
581 dso__set_long_name(dso, strdup(filename), true);
da17ea33
JO
582 }
583
d3a7c489 584 dso__get(dso);
e8807844
ACM
585out_unlock:
586 pthread_rwlock_unlock(&machine->dsos.lock);
da17ea33
JO
587 return dso;
588}
589
4a96f7a0
AH
590int machine__process_aux_event(struct machine *machine __maybe_unused,
591 union perf_event *event)
592{
593 if (dump_trace)
594 perf_event__fprintf_aux(event, stdout);
595 return 0;
596}
597
0ad21f68
AH
598int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
599 union perf_event *event)
600{
601 if (dump_trace)
602 perf_event__fprintf_itrace_start(event, stdout);
603 return 0;
604}
605
0286039f
AH
606int machine__process_switch_event(struct machine *machine __maybe_unused,
607 union perf_event *event)
608{
609 if (dump_trace)
610 perf_event__fprintf_switch(event, stdout);
611 return 0;
612}
613
c03d5184
WN
614static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
615{
616 const char *dup_filename;
617
618 if (!filename || !dso || !dso->long_name)
619 return;
620 if (dso->long_name[0] != '[')
621 return;
622 if (!strchr(filename, '/'))
623 return;
624
625 dup_filename = strdup(filename);
626 if (!dup_filename)
627 return;
628
5dcf16df 629 dso__set_long_name(dso, dup_filename, true);
c03d5184
WN
630}
631
9f2de315
ACM
632struct map *machine__findnew_module_map(struct machine *machine, u64 start,
633 const char *filename)
3f067dca 634{
ca33380a 635 struct map *map = NULL;
566c69c3 636 struct dso *dso = NULL;
ca33380a 637 struct kmod_path m;
3f067dca 638
ca33380a 639 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
640 return NULL;
641
bc84f464
JO
642 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
643 m.name);
c03d5184
WN
644 if (map) {
645 /*
646 * If the map's dso is an offline module, give dso__load()
647 * a chance to find the file path of that module by fixing
648 * long_name.
649 */
650 dso__adjust_kmod_long_name(map->dso, filename);
bc84f464 651 goto out;
c03d5184 652 }
bc84f464 653
9f2de315 654 dso = machine__findnew_module_dso(machine, &m, filename);
ca33380a
JO
655 if (dso == NULL)
656 goto out;
657
3f067dca
ACM
658 map = map__new2(start, dso, MAP__FUNCTION);
659 if (map == NULL)
ca33380a 660 goto out;
3f067dca 661
3f067dca 662 map_groups__insert(&machine->kmaps, map);
ca33380a 663
9afcb420
MH
664 /* Put the map here because map_groups__insert alread got it */
665 map__put(map);
ca33380a 666out:
566c69c3
MH
667 /* put the dso here, corresponding to machine__findnew_module_dso */
668 dso__put(dso);
ca33380a 669 free(m.name);
3f067dca
ACM
670 return map;
671}
672
876650e6 673size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
674{
675 struct rb_node *nd;
3d39ac53 676 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 677
876650e6 678 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 679 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 680 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
681 }
682
683 return ret;
684}
685
8fa7d87f 686size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
687 bool (skip)(struct dso *dso, int parm), int parm)
688{
3d39ac53 689 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
690}
691
876650e6 692size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
693 bool (skip)(struct dso *dso, int parm), int parm)
694{
695 struct rb_node *nd;
876650e6 696 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 697
876650e6 698 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
699 struct machine *pos = rb_entry(nd, struct machine, rb_node);
700 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
701 }
702 return ret;
703}
704
705size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
706{
707 int i;
708 size_t printed = 0;
a5e813c6 709 struct dso *kdso = machine__kernel_map(machine)->dso;
3f067dca
ACM
710
711 if (kdso->has_build_id) {
712 char filename[PATH_MAX];
713 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
714 printed += fprintf(fp, "[0] %s\n", filename);
715 }
716
717 for (i = 0; i < vmlinux_path__nr_entries; ++i)
718 printed += fprintf(fp, "[%d] %s\n",
719 i + kdso->has_build_id, vmlinux_path[i]);
720
721 return printed;
722}
723
724size_t machine__fprintf(struct machine *machine, FILE *fp)
725{
d2c11034 726 size_t ret;
3f067dca
ACM
727 struct rb_node *nd;
728
b91fc39f
ACM
729 pthread_rwlock_rdlock(&machine->threads_lock);
730
d2c11034
ACM
731 ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);
732
3f067dca
ACM
733 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
734 struct thread *pos = rb_entry(nd, struct thread, rb_node);
735
736 ret += thread__fprintf(pos, fp);
737 }
738
b91fc39f
ACM
739 pthread_rwlock_unlock(&machine->threads_lock);
740
3f067dca
ACM
741 return ret;
742}
743
744static struct dso *machine__get_kernel(struct machine *machine)
745{
746 const char *vmlinux_name = NULL;
747 struct dso *kernel;
748
749 if (machine__is_host(machine)) {
750 vmlinux_name = symbol_conf.vmlinux_name;
751 if (!vmlinux_name)
0a77582f 752 vmlinux_name = DSO__NAME_KALLSYMS;
3f067dca 753
459ce518
ACM
754 kernel = machine__findnew_kernel(machine, vmlinux_name,
755 "[kernel]", DSO_TYPE_KERNEL);
3f067dca
ACM
756 } else {
757 char bf[PATH_MAX];
758
759 if (machine__is_default_guest(machine))
760 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
761 if (!vmlinux_name)
762 vmlinux_name = machine__mmap_name(machine, bf,
763 sizeof(bf));
764
459ce518
ACM
765 kernel = machine__findnew_kernel(machine, vmlinux_name,
766 "[guest.kernel]",
767 DSO_TYPE_GUEST_KERNEL);
3f067dca
ACM
768 }
769
770 if (kernel != NULL && (!kernel->has_build_id))
771 dso__read_running_kernel_build_id(kernel, machine);
772
773 return kernel;
774}
775
776struct process_args {
777 u64 start;
778};
779
15a0a870
AH
780static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
781 size_t bufsz)
782{
783 if (machine__is_default_guest(machine))
784 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
785 else
786 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
787}
788
a93f0e55
SQ
789const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
790
791/* Figure out the start address of kernel map from /proc/kallsyms.
792 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
793 * symbol_name if it's not that important.
794 */
4b99375b
AH
795static u64 machine__get_running_kernel_start(struct machine *machine,
796 const char **symbol_name)
3f067dca 797{
15a0a870 798 char filename[PATH_MAX];
a93f0e55
SQ
799 int i;
800 const char *name;
801 u64 addr = 0;
3f067dca 802
15a0a870 803 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
804
805 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
806 return 0;
807
a93f0e55
SQ
808 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
809 addr = kallsyms__get_function_start(filename, name);
810 if (addr)
811 break;
812 }
813
814 if (symbol_name)
815 *symbol_name = name;
3f067dca 816
a93f0e55 817 return addr;
3f067dca
ACM
818}
819
820int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
821{
a0b2f5af 822 int type;
4b99375b 823 u64 start = machine__get_running_kernel_start(machine, NULL);
3f067dca 824
cc1121ab
MH
825 /* In case of renewal the kernel map, destroy previous one */
826 machine__destroy_kernel_maps(machine);
827
3f067dca
ACM
828 for (type = 0; type < MAP__NR_TYPES; ++type) {
829 struct kmap *kmap;
77e65977 830 struct map *map;
3f067dca
ACM
831
832 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
833 if (machine->vmlinux_maps[type] == NULL)
834 return -1;
835
836 machine->vmlinux_maps[type]->map_ip =
837 machine->vmlinux_maps[type]->unmap_ip =
838 identity__map_ip;
a5e813c6 839 map = __machine__kernel_map(machine, type);
77e65977 840 kmap = map__kmap(map);
ba92732e
WN
841 if (!kmap)
842 return -1;
843
3f067dca 844 kmap->kmaps = &machine->kmaps;
77e65977 845 map_groups__insert(&machine->kmaps, map);
3f067dca
ACM
846 }
847
848 return 0;
849}
850
851void machine__destroy_kernel_maps(struct machine *machine)
852{
a0b2f5af 853 int type;
3f067dca
ACM
854
855 for (type = 0; type < MAP__NR_TYPES; ++type) {
856 struct kmap *kmap;
a5e813c6 857 struct map *map = __machine__kernel_map(machine, type);
3f067dca 858
77e65977 859 if (map == NULL)
3f067dca
ACM
860 continue;
861
77e65977
ACM
862 kmap = map__kmap(map);
863 map_groups__remove(&machine->kmaps, map);
ba92732e 864 if (kmap && kmap->ref_reloc_sym) {
3f067dca
ACM
865 /*
866 * ref_reloc_sym is shared among all maps, so free just
867 * on one of them.
868 */
869 if (type == MAP__FUNCTION) {
04662523
ACM
870 zfree((char **)&kmap->ref_reloc_sym->name);
871 zfree(&kmap->ref_reloc_sym);
872 } else
873 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
874 }
875
e96e4078 876 map__put(machine->vmlinux_maps[type]);
3f067dca
ACM
877 machine->vmlinux_maps[type] = NULL;
878 }
879}
880
876650e6 881int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
882{
883 int ret = 0;
884 struct dirent **namelist = NULL;
885 int i, items = 0;
886 char path[PATH_MAX];
887 pid_t pid;
888 char *endp;
889
890 if (symbol_conf.default_guest_vmlinux_name ||
891 symbol_conf.default_guest_modules ||
892 symbol_conf.default_guest_kallsyms) {
893 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
894 }
895
896 if (symbol_conf.guestmount) {
897 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
898 if (items <= 0)
899 return -ENOENT;
900 for (i = 0; i < items; i++) {
901 if (!isdigit(namelist[i]->d_name[0])) {
902 /* Filter out . and .. */
903 continue;
904 }
905 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
906 if ((*endp != '\0') ||
907 (endp == namelist[i]->d_name) ||
908 (errno == ERANGE)) {
909 pr_debug("invalid directory (%s). Skipping.\n",
910 namelist[i]->d_name);
911 continue;
912 }
913 sprintf(path, "%s/%s/proc/kallsyms",
914 symbol_conf.guestmount,
915 namelist[i]->d_name);
916 ret = access(path, R_OK);
917 if (ret) {
918 pr_debug("Can't access file %s\n", path);
919 goto failure;
920 }
921 machines__create_kernel_maps(machines, pid);
922 }
923failure:
924 free(namelist);
925 }
926
927 return ret;
928}
929
876650e6 930void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 931{
876650e6
ACM
932 struct rb_node *next = rb_first(&machines->guests);
933
934 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
935
936 while (next) {
937 struct machine *pos = rb_entry(next, struct machine, rb_node);
938
939 next = rb_next(&pos->rb_node);
876650e6 940 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
941 machine__delete(pos);
942 }
943}
944
876650e6 945int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
946{
947 struct machine *machine = machines__findnew(machines, pid);
948
949 if (machine == NULL)
950 return -1;
951
952 return machine__create_kernel_maps(machine);
953}
954
e02092b9 955int __machine__load_kallsyms(struct machine *machine, const char *filename,
be39db9f 956 enum map_type type, bool no_kcore)
3f067dca 957{
a5e813c6 958 struct map *map = machine__kernel_map(machine);
be39db9f 959 int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore);
3f067dca
ACM
960
961 if (ret > 0) {
962 dso__set_loaded(map->dso, type);
963 /*
964 * Since /proc/kallsyms will have multiple sessions for the
965 * kernel, with modules between them, fixup the end of all
966 * sections.
967 */
968 __map_groups__fixup_end(&machine->kmaps, type);
969 }
970
971 return ret;
972}
973
e02092b9 974int machine__load_kallsyms(struct machine *machine, const char *filename,
be39db9f 975 enum map_type type)
e02092b9 976{
be39db9f 977 return __machine__load_kallsyms(machine, filename, type, false);
e02092b9
ACM
978}
979
be39db9f 980int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
3f067dca 981{
a5e813c6 982 struct map *map = machine__kernel_map(machine);
be39db9f 983 int ret = dso__load_vmlinux_path(map->dso, map);
3f067dca 984
39b12f78 985 if (ret > 0)
3f067dca 986 dso__set_loaded(map->dso, type);
3f067dca
ACM
987
988 return ret;
989}
990
991static void map_groups__fixup_end(struct map_groups *mg)
992{
993 int i;
994 for (i = 0; i < MAP__NR_TYPES; ++i)
995 __map_groups__fixup_end(mg, i);
996}
997
998static char *get_kernel_version(const char *root_dir)
999{
1000 char version[PATH_MAX];
1001 FILE *file;
1002 char *name, *tmp;
1003 const char *prefix = "Linux version ";
1004
1005 sprintf(version, "%s/proc/version", root_dir);
1006 file = fopen(version, "r");
1007 if (!file)
1008 return NULL;
1009
1010 version[0] = '\0';
1011 tmp = fgets(version, sizeof(version), file);
1012 fclose(file);
1013
1014 name = strstr(version, prefix);
1015 if (!name)
1016 return NULL;
1017 name += strlen(prefix);
1018 tmp = strchr(name, ' ');
1019 if (tmp)
1020 *tmp = '\0';
1021
1022 return strdup(name);
1023}
1024
bb58a8a4
JO
1025static bool is_kmod_dso(struct dso *dso)
1026{
1027 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1028 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1029}
1030
1031static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1032 struct kmod_path *m)
1033{
1034 struct map *map;
1035 char *long_name;
1036
1037 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
1038 if (map == NULL)
1039 return 0;
1040
1041 long_name = strdup(path);
1042 if (long_name == NULL)
1043 return -ENOMEM;
1044
1045 dso__set_long_name(map->dso, long_name, true);
1046 dso__kernel_module_get_build_id(map->dso, "");
1047
1048 /*
1049 * Full name could reveal us kmod compression, so
1050 * we need to update the symtab_type if needed.
1051 */
1052 if (m->comp && is_kmod_dso(map->dso))
1053 map->dso->symtab_type++;
1054
1055 return 0;
1056}
1057
3f067dca 1058static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 1059 const char *dir_name, int depth)
3f067dca
ACM
1060{
1061 struct dirent *dent;
1062 DIR *dir = opendir(dir_name);
1063 int ret = 0;
1064
1065 if (!dir) {
1066 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1067 return -1;
1068 }
1069
1070 while ((dent = readdir(dir)) != NULL) {
1071 char path[PATH_MAX];
1072 struct stat st;
1073
1074 /*sshfs might return bad dent->d_type, so we have to stat*/
1075 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1076 if (stat(path, &st))
1077 continue;
1078
1079 if (S_ISDIR(st.st_mode)) {
1080 if (!strcmp(dent->d_name, ".") ||
1081 !strcmp(dent->d_name, ".."))
1082 continue;
1083
61d4290c
RY
1084 /* Do not follow top-level source and build symlinks */
1085 if (depth == 0) {
1086 if (!strcmp(dent->d_name, "source") ||
1087 !strcmp(dent->d_name, "build"))
1088 continue;
1089 }
1090
1091 ret = map_groups__set_modules_path_dir(mg, path,
1092 depth + 1);
3f067dca
ACM
1093 if (ret < 0)
1094 goto out;
1095 } else {
bb58a8a4 1096 struct kmod_path m;
3f067dca 1097
bb58a8a4
JO
1098 ret = kmod_path__parse_name(&m, dent->d_name);
1099 if (ret)
1100 goto out;
c00c48fc 1101
bb58a8a4
JO
1102 if (m.kmod)
1103 ret = map_groups__set_module_path(mg, path, &m);
c00c48fc 1104
bb58a8a4 1105 free(m.name);
3f067dca 1106
bb58a8a4 1107 if (ret)
3f067dca 1108 goto out;
3f067dca
ACM
1109 }
1110 }
1111
1112out:
1113 closedir(dir);
1114 return ret;
1115}
1116
1117static int machine__set_modules_path(struct machine *machine)
1118{
1119 char *version;
1120 char modules_path[PATH_MAX];
1121
1122 version = get_kernel_version(machine->root_dir);
1123 if (!version)
1124 return -1;
1125
61d4290c 1126 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1127 machine->root_dir, version);
1128 free(version);
1129
61d4290c 1130 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca 1131}
203d8a4a
SSG
1132int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1133 const char *name __maybe_unused)
1134{
1135 return 0;
1136}
3f067dca 1137
316d70d6 1138static int machine__create_module(void *arg, const char *name, u64 start)
3f067dca 1139{
316d70d6 1140 struct machine *machine = arg;
3f067dca 1141 struct map *map;
316d70d6 1142
203d8a4a
SSG
1143 if (arch__fix_module_text_start(&start, name) < 0)
1144 return -1;
1145
9f2de315 1146 map = machine__findnew_module_map(machine, start, name);
316d70d6
AH
1147 if (map == NULL)
1148 return -1;
1149
1150 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1151
1152 return 0;
1153}
1154
1155static int machine__create_modules(struct machine *machine)
1156{
3f067dca
ACM
1157 const char *modules;
1158 char path[PATH_MAX];
1159
f4be904d 1160 if (machine__is_default_guest(machine)) {
3f067dca 1161 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1162 } else {
1163 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1164 modules = path;
1165 }
1166
aa7fe3b0 1167 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1168 return -1;
1169
316d70d6 1170 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1171 return -1;
1172
316d70d6
AH
1173 if (!machine__set_modules_path(machine))
1174 return 0;
3f067dca 1175
316d70d6 1176 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1177
8f76fcd9 1178 return 0;
3f067dca
ACM
1179}
1180
1181int machine__create_kernel_maps(struct machine *machine)
1182{
1183 struct dso *kernel = machine__get_kernel(machine);
5512cf24 1184 const char *name;
45e90056 1185 u64 addr;
1154c957
MH
1186 int ret;
1187
45e90056 1188 if (kernel == NULL)
5512cf24 1189 return -1;
3f067dca 1190
1154c957
MH
1191 ret = __machine__create_kernel_maps(machine, kernel);
1192 dso__put(kernel);
1193 if (ret < 0)
3f067dca
ACM
1194 return -1;
1195
1196 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1197 if (machine__is_host(machine))
1198 pr_debug("Problems creating module maps, "
1199 "continuing anyway...\n");
1200 else
1201 pr_debug("Problems creating module maps for guest %d, "
1202 "continuing anyway...\n", machine->pid);
1203 }
1204
1205 /*
1206 * Now that we have all the maps created, just set the ->end of them:
1207 */
1208 map_groups__fixup_end(&machine->kmaps);
5512cf24 1209
45e90056
ACM
1210 addr = machine__get_running_kernel_start(machine, &name);
1211 if (!addr) {
1212 } else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
5512cf24
AH
1213 machine__destroy_kernel_maps(machine);
1214 return -1;
1215 }
1216
3f067dca
ACM
1217 return 0;
1218}
1219
b0a7d1a0
ACM
1220static void machine__set_kernel_mmap_len(struct machine *machine,
1221 union perf_event *event)
1222{
4552cf0f
NK
1223 int i;
1224
1225 for (i = 0; i < MAP__NR_TYPES; i++) {
1226 machine->vmlinux_maps[i]->start = event->mmap.start;
1227 machine->vmlinux_maps[i]->end = (event->mmap.start +
1228 event->mmap.len);
1229 /*
1230 * Be a bit paranoid here, some perf.data file came with
1231 * a zero sized synthesized MMAP event for the kernel.
1232 */
1233 if (machine->vmlinux_maps[i]->end == 0)
1234 machine->vmlinux_maps[i]->end = ~0ULL;
1235 }
b0a7d1a0
ACM
1236}
1237
8e0cf965
AH
1238static bool machine__uses_kcore(struct machine *machine)
1239{
1240 struct dso *dso;
1241
3d39ac53 1242 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1243 if (dso__is_kcore(dso))
1244 return true;
1245 }
1246
1247 return false;
1248}
1249
b0a7d1a0
ACM
1250static int machine__process_kernel_mmap_event(struct machine *machine,
1251 union perf_event *event)
1252{
1253 struct map *map;
1254 char kmmap_prefix[PATH_MAX];
1255 enum dso_kernel_type kernel_type;
1256 bool is_kernel_mmap;
1257
8e0cf965
AH
1258 /* If we have maps from kcore then we do not need or want any others */
1259 if (machine__uses_kcore(machine))
1260 return 0;
1261
b0a7d1a0
ACM
1262 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1263 if (machine__is_host(machine))
1264 kernel_type = DSO_TYPE_KERNEL;
1265 else
1266 kernel_type = DSO_TYPE_GUEST_KERNEL;
1267
1268 is_kernel_mmap = memcmp(event->mmap.filename,
1269 kmmap_prefix,
1270 strlen(kmmap_prefix) - 1) == 0;
1271 if (event->mmap.filename[0] == '/' ||
1272 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
9f2de315
ACM
1273 map = machine__findnew_module_map(machine, event->mmap.start,
1274 event->mmap.filename);
b0a7d1a0
ACM
1275 if (map == NULL)
1276 goto out_problem;
1277
b0a7d1a0
ACM
1278 map->end = map->start + event->mmap.len;
1279 } else if (is_kernel_mmap) {
1280 const char *symbol_name = (event->mmap.filename +
1281 strlen(kmmap_prefix));
1282 /*
1283 * Should be there already, from the build-id table in
1284 * the header.
1285 */
b837a8bd
NK
1286 struct dso *kernel = NULL;
1287 struct dso *dso;
1288
e8807844
ACM
1289 pthread_rwlock_rdlock(&machine->dsos.lock);
1290
3d39ac53 1291 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1292
1293 /*
1294 * The cpumode passed to is_kernel_module is not the
1295 * cpumode of *this* event. If we insist on passing
1296 * correct cpumode to is_kernel_module, we should
1297 * record the cpumode when we adding this dso to the
1298 * linked list.
1299 *
1300 * However we don't really need passing correct
1301 * cpumode. We know the correct cpumode must be kernel
1302 * mode (if not, we should not link it onto kernel_dsos
1303 * list).
1304 *
1305 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1306 * is_kernel_module() treats it as a kernel cpumode.
1307 */
1308
1309 if (!dso->kernel ||
1310 is_kernel_module(dso->long_name,
1311 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1312 continue;
1313
1f121b03 1314
b837a8bd
NK
1315 kernel = dso;
1316 break;
1317 }
1318
e8807844
ACM
1319 pthread_rwlock_unlock(&machine->dsos.lock);
1320
b837a8bd 1321 if (kernel == NULL)
aa7cc2ae 1322 kernel = machine__findnew_dso(machine, kmmap_prefix);
b0a7d1a0
ACM
1323 if (kernel == NULL)
1324 goto out_problem;
1325
1326 kernel->kernel = kernel_type;
d3a7c489
ACM
1327 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1328 dso__put(kernel);
b0a7d1a0 1329 goto out_problem;
d3a7c489 1330 }
b0a7d1a0 1331
330dfa22
NK
1332 if (strstr(kernel->long_name, "vmlinux"))
1333 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1334
b0a7d1a0
ACM
1335 machine__set_kernel_mmap_len(machine, event);
1336
1337 /*
1338 * Avoid using a zero address (kptr_restrict) for the ref reloc
1339 * symbol. Effectively having zero here means that at record
1340 * time /proc/sys/kernel/kptr_restrict was non zero.
1341 */
1342 if (event->mmap.pgoff != 0) {
1343 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1344 symbol_name,
1345 event->mmap.pgoff);
1346 }
1347
1348 if (machine__is_default_guest(machine)) {
1349 /*
1350 * preload dso of guest kernel and modules
1351 */
be39db9f 1352 dso__load(kernel, machine__kernel_map(machine));
b0a7d1a0
ACM
1353 }
1354 }
1355 return 0;
1356out_problem:
1357 return -1;
1358}
1359
5c5e854b 1360int machine__process_mmap2_event(struct machine *machine,
162f0bef 1361 union perf_event *event,
473398a2 1362 struct perf_sample *sample)
5c5e854b 1363{
5c5e854b
SE
1364 struct thread *thread;
1365 struct map *map;
1366 enum map_type type;
1367 int ret = 0;
1368
1369 if (dump_trace)
1370 perf_event__fprintf_mmap2(event, stdout);
1371
473398a2
ACM
1372 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1373 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
5c5e854b
SE
1374 ret = machine__process_kernel_mmap_event(machine, event);
1375 if (ret < 0)
1376 goto out_problem;
1377 return 0;
1378 }
1379
1380 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1381 event->mmap2.tid);
5c5e854b
SE
1382 if (thread == NULL)
1383 goto out_problem;
1384
1385 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1386 type = MAP__VARIABLE;
1387 else
1388 type = MAP__FUNCTION;
1389
2a03068c 1390 map = map__new(machine, event->mmap2.start,
5c5e854b
SE
1391 event->mmap2.len, event->mmap2.pgoff,
1392 event->mmap2.pid, event->mmap2.maj,
1393 event->mmap2.min, event->mmap2.ino,
1394 event->mmap2.ino_generation,
7ef80703
DZ
1395 event->mmap2.prot,
1396 event->mmap2.flags,
5835edda 1397 event->mmap2.filename, type, thread);
5c5e854b
SE
1398
1399 if (map == NULL)
b91fc39f 1400 goto out_problem_map;
5c5e854b 1401
8132a2a8
HK
1402 ret = thread__insert_map(thread, map);
1403 if (ret)
1404 goto out_problem_insert;
1405
b91fc39f 1406 thread__put(thread);
84c2cafa 1407 map__put(map);
5c5e854b
SE
1408 return 0;
1409
8132a2a8
HK
1410out_problem_insert:
1411 map__put(map);
b91fc39f
ACM
1412out_problem_map:
1413 thread__put(thread);
5c5e854b
SE
1414out_problem:
1415 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1416 return 0;
1417}
1418
162f0bef 1419int machine__process_mmap_event(struct machine *machine, union perf_event *event,
473398a2 1420 struct perf_sample *sample)
b0a7d1a0 1421{
b0a7d1a0
ACM
1422 struct thread *thread;
1423 struct map *map;
bad40917 1424 enum map_type type;
b0a7d1a0
ACM
1425 int ret = 0;
1426
1427 if (dump_trace)
1428 perf_event__fprintf_mmap(event, stdout);
1429
473398a2
ACM
1430 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1431 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
b0a7d1a0
ACM
1432 ret = machine__process_kernel_mmap_event(machine, event);
1433 if (ret < 0)
1434 goto out_problem;
1435 return 0;
1436 }
1437
314add6b 1438 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1439 event->mmap.tid);
b0a7d1a0
ACM
1440 if (thread == NULL)
1441 goto out_problem;
bad40917
SE
1442
1443 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1444 type = MAP__VARIABLE;
1445 else
1446 type = MAP__FUNCTION;
1447
2a03068c 1448 map = map__new(machine, event->mmap.start,
b0a7d1a0 1449 event->mmap.len, event->mmap.pgoff,
7ef80703 1450 event->mmap.pid, 0, 0, 0, 0, 0, 0,
5c5e854b 1451 event->mmap.filename,
5835edda 1452 type, thread);
bad40917 1453
b0a7d1a0 1454 if (map == NULL)
b91fc39f 1455 goto out_problem_map;
b0a7d1a0 1456
8132a2a8
HK
1457 ret = thread__insert_map(thread, map);
1458 if (ret)
1459 goto out_problem_insert;
1460
b91fc39f 1461 thread__put(thread);
84c2cafa 1462 map__put(map);
b0a7d1a0
ACM
1463 return 0;
1464
8132a2a8
HK
1465out_problem_insert:
1466 map__put(map);
b91fc39f
ACM
1467out_problem_map:
1468 thread__put(thread);
b0a7d1a0
ACM
1469out_problem:
1470 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1471 return 0;
1472}
1473
b91fc39f 1474static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1475{
f3b623b8 1476 if (machine->last_match == th)
0ceb8f6e 1477 machine->last_match = NULL;
f3b623b8 1478
e34f5b11 1479 BUG_ON(refcount_read(&th->refcnt) == 0);
b91fc39f
ACM
1480 if (lock)
1481 pthread_rwlock_wrlock(&machine->threads_lock);
0170b14f 1482 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 1483 RB_CLEAR_NODE(&th->rb_node);
d2c11034 1484 --machine->nr_threads;
236a3bbd 1485 /*
f3b623b8
ACM
1486 * Move it first to the dead_threads list, then drop the reference,
1487 * if this is the last reference, then the thread__delete destructor
1488 * will be called and we will remove it from the dead_threads list.
236a3bbd
DA
1489 */
1490 list_add_tail(&th->node, &machine->dead_threads);
b91fc39f
ACM
1491 if (lock)
1492 pthread_rwlock_unlock(&machine->threads_lock);
f3b623b8 1493 thread__put(th);
236a3bbd
DA
1494}
1495
b91fc39f
ACM
1496void machine__remove_thread(struct machine *machine, struct thread *th)
1497{
1498 return __machine__remove_thread(machine, th, true);
1499}
1500
162f0bef
FW
1501int machine__process_fork_event(struct machine *machine, union perf_event *event,
1502 struct perf_sample *sample)
b0a7d1a0 1503{
d75e6097
JO
1504 struct thread *thread = machine__find_thread(machine,
1505 event->fork.pid,
1506 event->fork.tid);
314add6b
AH
1507 struct thread *parent = machine__findnew_thread(machine,
1508 event->fork.ppid,
1509 event->fork.ptid);
b91fc39f 1510 int err = 0;
b0a7d1a0 1511
5cb73340
AH
1512 if (dump_trace)
1513 perf_event__fprintf_task(event, stdout);
1514
1515 /*
1516 * There may be an existing thread that is not actually the parent,
1517 * either because we are processing events out of order, or because the
1518 * (fork) event that would have removed the thread was lost. Assume the
1519 * latter case and continue on as best we can.
1520 */
1521 if (parent->pid_ != (pid_t)event->fork.ppid) {
1522 dump_printf("removing erroneous parent thread %d/%d\n",
1523 parent->pid_, parent->tid);
1524 machine__remove_thread(machine, parent);
1525 thread__put(parent);
1526 parent = machine__findnew_thread(machine, event->fork.ppid,
1527 event->fork.ptid);
1528 }
1529
236a3bbd 1530 /* if a thread currently exists for the thread id remove it */
b91fc39f 1531 if (thread != NULL) {
236a3bbd 1532 machine__remove_thread(machine, thread);
b91fc39f
ACM
1533 thread__put(thread);
1534 }
236a3bbd 1535
314add6b
AH
1536 thread = machine__findnew_thread(machine, event->fork.pid,
1537 event->fork.tid);
b0a7d1a0
ACM
1538
1539 if (thread == NULL || parent == NULL ||
162f0bef 1540 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0 1541 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1542 err = -1;
b0a7d1a0 1543 }
b91fc39f
ACM
1544 thread__put(thread);
1545 thread__put(parent);
b0a7d1a0 1546
b91fc39f 1547 return err;
b0a7d1a0
ACM
1548}
1549
162f0bef
FW
1550int machine__process_exit_event(struct machine *machine, union perf_event *event,
1551 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1552{
d75e6097
JO
1553 struct thread *thread = machine__find_thread(machine,
1554 event->fork.pid,
1555 event->fork.tid);
b0a7d1a0
ACM
1556
1557 if (dump_trace)
1558 perf_event__fprintf_task(event, stdout);
1559
b91fc39f 1560 if (thread != NULL) {
236a3bbd 1561 thread__exited(thread);
b91fc39f
ACM
1562 thread__put(thread);
1563 }
b0a7d1a0
ACM
1564
1565 return 0;
1566}
1567
162f0bef
FW
1568int machine__process_event(struct machine *machine, union perf_event *event,
1569 struct perf_sample *sample)
b0a7d1a0
ACM
1570{
1571 int ret;
1572
1573 switch (event->header.type) {
1574 case PERF_RECORD_COMM:
162f0bef 1575 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1576 case PERF_RECORD_MMAP:
162f0bef 1577 ret = machine__process_mmap_event(machine, event, sample); break;
f3b3614a
HB
1578 case PERF_RECORD_NAMESPACES:
1579 ret = machine__process_namespaces_event(machine, event, sample); break;
5c5e854b 1580 case PERF_RECORD_MMAP2:
162f0bef 1581 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1582 case PERF_RECORD_FORK:
162f0bef 1583 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1584 case PERF_RECORD_EXIT:
162f0bef 1585 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1586 case PERF_RECORD_LOST:
162f0bef 1587 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1588 case PERF_RECORD_AUX:
1589 ret = machine__process_aux_event(machine, event); break;
0ad21f68 1590 case PERF_RECORD_ITRACE_START:
ceb92913 1591 ret = machine__process_itrace_start_event(machine, event); break;
c4937a91
KL
1592 case PERF_RECORD_LOST_SAMPLES:
1593 ret = machine__process_lost_samples_event(machine, event, sample); break;
0286039f
AH
1594 case PERF_RECORD_SWITCH:
1595 case PERF_RECORD_SWITCH_CPU_WIDE:
1596 ret = machine__process_switch_event(machine, event); break;
b0a7d1a0
ACM
1597 default:
1598 ret = -1;
1599 break;
1600 }
1601
1602 return ret;
1603}
3f067dca 1604
b21484f1 1605static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1606{
a7c3899c 1607 if (!regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1608 return 1;
3f067dca
ACM
1609 return 0;
1610}
1611
bb871a9c 1612static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1613 struct addr_map_symbol *ams,
1614 u64 ip)
1615{
1616 struct addr_location al;
3f067dca
ACM
1617
1618 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1619 /*
1620 * We cannot use the header.misc hint to determine whether a
1621 * branch stack address is user, kernel, guest, hypervisor.
1622 * Branches may straddle the kernel/user/hypervisor boundaries.
1623 * Thus, we have to try consecutively until we find a match
1624 * or else, the symbol is unknown
1625 */
bb871a9c 1626 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
3f067dca 1627
3f067dca
ACM
1628 ams->addr = ip;
1629 ams->al_addr = al.addr;
1630 ams->sym = al.sym;
1631 ams->map = al.map;
1632}
1633
bb871a9c 1634static void ip__resolve_data(struct thread *thread,
98a3b32c
SE
1635 u8 m, struct addr_map_symbol *ams, u64 addr)
1636{
1637 struct addr_location al;
1638
1639 memset(&al, 0, sizeof(al));
1640
bb871a9c 1641 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
06b2afc0
DZ
1642 if (al.map == NULL) {
1643 /*
1644 * some shared data regions have execute bit set which puts
1645 * their mapping in the MAP__FUNCTION type array.
1646 * Check there as a fallback option before dropping the sample.
1647 */
bb871a9c 1648 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
06b2afc0
DZ
1649 }
1650
98a3b32c
SE
1651 ams->addr = addr;
1652 ams->al_addr = al.addr;
1653 ams->sym = al.sym;
1654 ams->map = al.map;
1655}
1656
e80faac0
ACM
1657struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1658 struct addr_location *al)
98a3b32c
SE
1659{
1660 struct mem_info *mi = zalloc(sizeof(*mi));
1661
1662 if (!mi)
1663 return NULL;
1664
bb871a9c
ACM
1665 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1666 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
98a3b32c
SE
1667 mi->data_src.val = sample->data_src;
1668
1669 return mi;
1670}
1671
37592b8a 1672static int add_callchain_ip(struct thread *thread,
91d7b2de 1673 struct callchain_cursor *cursor,
37592b8a
AK
1674 struct symbol **parent,
1675 struct addr_location *root_al,
73dbcd65 1676 u8 *cpumode,
410024db
JY
1677 u64 ip,
1678 bool branch,
1679 struct branch_flags *flags,
1680 int nr_loop_iter,
1681 int samples)
37592b8a
AK
1682{
1683 struct addr_location al;
1684
1685 al.filtered = 0;
1686 al.sym = NULL;
73dbcd65 1687 if (!cpumode) {
8b7bad58
AK
1688 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1689 ip, &al);
73dbcd65 1690 } else {
2e77784b
KL
1691 if (ip >= PERF_CONTEXT_MAX) {
1692 switch (ip) {
1693 case PERF_CONTEXT_HV:
73dbcd65 1694 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
1695 break;
1696 case PERF_CONTEXT_KERNEL:
73dbcd65 1697 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
1698 break;
1699 case PERF_CONTEXT_USER:
73dbcd65 1700 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
1701 break;
1702 default:
1703 pr_debug("invalid callchain context: "
1704 "%"PRId64"\n", (s64) ip);
1705 /*
1706 * It seems the callchain is corrupted.
1707 * Discard all.
1708 */
91d7b2de 1709 callchain_cursor_reset(cursor);
2e77784b
KL
1710 return 1;
1711 }
1712 return 0;
1713 }
73dbcd65
DH
1714 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1715 ip, &al);
2e77784b
KL
1716 }
1717
37592b8a 1718 if (al.sym != NULL) {
de7e6a7c 1719 if (perf_hpp_list.parent && !*parent &&
37592b8a
AK
1720 symbol__match_regex(al.sym, &parent_regex))
1721 *parent = al.sym;
1722 else if (have_ignore_callees && root_al &&
1723 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1724 /* Treat this symbol as the root,
1725 forgetting its callees. */
1726 *root_al = al;
91d7b2de 1727 callchain_cursor_reset(cursor);
37592b8a
AK
1728 }
1729 }
1730
b49a8fe5
NK
1731 if (symbol_conf.hide_unresolved && al.sym == NULL)
1732 return 0;
410024db
JY
1733 return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1734 branch, flags, nr_loop_iter, samples);
37592b8a
AK
1735}
1736
644f2df2
ACM
1737struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1738 struct addr_location *al)
3f067dca 1739{
3f067dca 1740 unsigned int i;
644f2df2
ACM
1741 const struct branch_stack *bs = sample->branch_stack;
1742 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1743
3f067dca
ACM
1744 if (!bi)
1745 return NULL;
1746
1747 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1748 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1749 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1750 bi[i].flags = bs->entries[i].flags;
1751 }
1752 return bi;
1753}
1754
8b7bad58
AK
1755#define CHASHSZ 127
1756#define CHASHBITS 7
1757#define NO_ENTRY 0xff
1758
1759#define PERF_MAX_BRANCH_DEPTH 127
1760
1761/* Remove loops. */
1762static int remove_loops(struct branch_entry *l, int nr)
1763{
1764 int i, j, off;
1765 unsigned char chash[CHASHSZ];
1766
1767 memset(chash, NO_ENTRY, sizeof(chash));
1768
1769 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1770
1771 for (i = 0; i < nr; i++) {
1772 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1773
1774 /* no collision handling for now */
1775 if (chash[h] == NO_ENTRY) {
1776 chash[h] = i;
1777 } else if (l[chash[h]].from == l[i].from) {
1778 bool is_loop = true;
1779 /* check if it is a real loop */
1780 off = 0;
1781 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1782 if (l[j].from != l[i + off].from) {
1783 is_loop = false;
1784 break;
1785 }
1786 if (is_loop) {
1787 memmove(l + i, l + i + off,
1788 (nr - (i + off)) * sizeof(*l));
1789 nr -= off;
1790 }
1791 }
1792 }
1793 return nr;
1794}
1795
384b6055
KL
1796/*
1797 * Recolve LBR callstack chain sample
1798 * Return:
1799 * 1 on success get LBR callchain information
1800 * 0 no available LBR callchain information, should try fp
1801 * negative error code on other errors.
1802 */
1803static int resolve_lbr_callchain_sample(struct thread *thread,
91d7b2de 1804 struct callchain_cursor *cursor,
384b6055
KL
1805 struct perf_sample *sample,
1806 struct symbol **parent,
1807 struct addr_location *root_al,
1808 int max_stack)
3f067dca 1809{
384b6055 1810 struct ip_callchain *chain = sample->callchain;
18ef15c6 1811 int chain_nr = min(max_stack, (int)chain->nr), i;
73dbcd65 1812 u8 cpumode = PERF_RECORD_MISC_USER;
384b6055
KL
1813 u64 ip;
1814
1815 for (i = 0; i < chain_nr; i++) {
1816 if (chain->ips[i] == PERF_CONTEXT_USER)
1817 break;
1818 }
1819
1820 /* LBR only affects the user callchain */
1821 if (i != chain_nr) {
1822 struct branch_stack *lbr_stack = sample->branch_stack;
410024db
JY
1823 int lbr_nr = lbr_stack->nr, j, k;
1824 bool branch;
1825 struct branch_flags *flags;
384b6055
KL
1826 /*
1827 * LBR callstack can only get user call chain.
1828 * The mix_chain_nr is kernel call chain
1829 * number plus LBR user call chain number.
1830 * i is kernel call chain number,
1831 * 1 is PERF_CONTEXT_USER,
1832 * lbr_nr + 1 is the user call chain number.
1833 * For details, please refer to the comments
1834 * in callchain__printf
1835 */
1836 int mix_chain_nr = i + 1 + lbr_nr + 1;
1837
384b6055 1838 for (j = 0; j < mix_chain_nr; j++) {
18ef15c6 1839 int err;
410024db
JY
1840 branch = false;
1841 flags = NULL;
1842
384b6055
KL
1843 if (callchain_param.order == ORDER_CALLEE) {
1844 if (j < i + 1)
1845 ip = chain->ips[j];
410024db
JY
1846 else if (j > i + 1) {
1847 k = j - i - 2;
1848 ip = lbr_stack->entries[k].from;
1849 branch = true;
1850 flags = &lbr_stack->entries[k].flags;
1851 } else {
384b6055 1852 ip = lbr_stack->entries[0].to;
410024db
JY
1853 branch = true;
1854 flags = &lbr_stack->entries[0].flags;
1855 }
384b6055 1856 } else {
410024db
JY
1857 if (j < lbr_nr) {
1858 k = lbr_nr - j - 1;
1859 ip = lbr_stack->entries[k].from;
1860 branch = true;
1861 flags = &lbr_stack->entries[k].flags;
1862 }
384b6055
KL
1863 else if (j > lbr_nr)
1864 ip = chain->ips[i + 1 - (j - lbr_nr)];
410024db 1865 else {
384b6055 1866 ip = lbr_stack->entries[0].to;
410024db
JY
1867 branch = true;
1868 flags = &lbr_stack->entries[0].flags;
1869 }
384b6055
KL
1870 }
1871
410024db
JY
1872 err = add_callchain_ip(thread, cursor, parent,
1873 root_al, &cpumode, ip,
1874 branch, flags, 0, 0);
384b6055
KL
1875 if (err)
1876 return (err < 0) ? err : 0;
1877 }
1878 return 1;
1879 }
1880
1881 return 0;
1882}
1883
1884static int thread__resolve_callchain_sample(struct thread *thread,
91d7b2de 1885 struct callchain_cursor *cursor,
384b6055
KL
1886 struct perf_evsel *evsel,
1887 struct perf_sample *sample,
1888 struct symbol **parent,
1889 struct addr_location *root_al,
1890 int max_stack)
1891{
1892 struct branch_stack *branch = sample->branch_stack;
1893 struct ip_callchain *chain = sample->callchain;
a29d5c9b 1894 int chain_nr = chain->nr;
73dbcd65 1895 u8 cpumode = PERF_RECORD_MISC_USER;
bf8bddbf 1896 int i, j, err, nr_entries;
8b7bad58
AK
1897 int skip_idx = -1;
1898 int first_call = 0;
410024db 1899 int nr_loop_iter;
8b7bad58 1900
acf2abbd 1901 if (perf_evsel__has_branch_callstack(evsel)) {
91d7b2de 1902 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
384b6055
KL
1903 root_al, max_stack);
1904 if (err)
1905 return (err < 0) ? err : 0;
1906 }
1907
8b7bad58
AK
1908 /*
1909 * Based on DWARF debug information, some architectures skip
1910 * a callchain entry saved by the kernel.
1911 */
bf8bddbf 1912 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 1913
8b7bad58
AK
1914 /*
1915 * Add branches to call stack for easier browsing. This gives
1916 * more context for a sample than just the callers.
1917 *
1918 * This uses individual histograms of paths compared to the
1919 * aggregated histograms the normal LBR mode uses.
1920 *
1921 * Limitations for now:
1922 * - No extra filters
1923 * - No annotations (should annotate somehow)
1924 */
1925
1926 if (branch && callchain_param.branch_callstack) {
1927 int nr = min(max_stack, (int)branch->nr);
1928 struct branch_entry be[nr];
1929
1930 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1931 pr_warning("corrupted branch chain. skipping...\n");
1932 goto check_calls;
1933 }
1934
1935 for (i = 0; i < nr; i++) {
1936 if (callchain_param.order == ORDER_CALLEE) {
1937 be[i] = branch->entries[i];
1938 /*
1939 * Check for overlap into the callchain.
1940 * The return address is one off compared to
1941 * the branch entry. To adjust for this
1942 * assume the calling instruction is not longer
1943 * than 8 bytes.
1944 */
1945 if (i == skip_idx ||
1946 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1947 first_call++;
1948 else if (be[i].from < chain->ips[first_call] &&
1949 be[i].from >= chain->ips[first_call] - 8)
1950 first_call++;
1951 } else
1952 be[i] = branch->entries[branch->nr - i - 1];
1953 }
1954
410024db 1955 nr_loop_iter = nr;
8b7bad58
AK
1956 nr = remove_loops(be, nr);
1957
410024db
JY
1958 /*
1959 * Get the number of iterations.
1960 * It's only approximation, but good enough in practice.
1961 */
1962 if (nr_loop_iter > nr)
1963 nr_loop_iter = nr_loop_iter - nr + 1;
1964 else
1965 nr_loop_iter = 0;
1966
8b7bad58 1967 for (i = 0; i < nr; i++) {
410024db
JY
1968 if (i == nr - 1)
1969 err = add_callchain_ip(thread, cursor, parent,
1970 root_al,
1971 NULL, be[i].to,
1972 true, &be[i].flags,
1973 nr_loop_iter, 1);
1974 else
1975 err = add_callchain_ip(thread, cursor, parent,
1976 root_al,
1977 NULL, be[i].to,
1978 true, &be[i].flags,
1979 0, 0);
1980
8b7bad58 1981 if (!err)
91d7b2de 1982 err = add_callchain_ip(thread, cursor, parent, root_al,
410024db
JY
1983 NULL, be[i].from,
1984 true, &be[i].flags,
1985 0, 0);
8b7bad58
AK
1986 if (err == -EINVAL)
1987 break;
1988 if (err)
1989 return err;
1990 }
1991 chain_nr -= nr;
1992 }
1993
1994check_calls:
bf8bddbf 1995 for (i = first_call, nr_entries = 0;
a29d5c9b 1996 i < chain_nr && nr_entries < max_stack; i++) {
3f067dca 1997 u64 ip;
3f067dca
ACM
1998
1999 if (callchain_param.order == ORDER_CALLEE)
a60335ba 2000 j = i;
3f067dca 2001 else
a60335ba
SB
2002 j = chain->nr - i - 1;
2003
2004#ifdef HAVE_SKIP_CALLCHAIN_IDX
2005 if (j == skip_idx)
2006 continue;
2007#endif
2008 ip = chain->ips[j];
3f067dca 2009
bf8bddbf
ACM
2010 if (ip < PERF_CONTEXT_MAX)
2011 ++nr_entries;
a29d5c9b 2012
410024db
JY
2013 err = add_callchain_ip(thread, cursor, parent,
2014 root_al, &cpumode, ip,
2015 false, NULL, 0, 0);
3f067dca 2016
3f067dca 2017 if (err)
2e77784b 2018 return (err < 0) ? err : 0;
3f067dca
ACM
2019 }
2020
2021 return 0;
2022}
2023
2024static int unwind_entry(struct unwind_entry *entry, void *arg)
2025{
2026 struct callchain_cursor *cursor = arg;
b49a8fe5
NK
2027
2028 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2029 return 0;
3f067dca 2030 return callchain_cursor_append(cursor, entry->ip,
410024db
JY
2031 entry->map, entry->sym,
2032 false, NULL, 0, 0);
3f067dca
ACM
2033}
2034
9919a65e
CP
2035static int thread__resolve_callchain_unwind(struct thread *thread,
2036 struct callchain_cursor *cursor,
2037 struct perf_evsel *evsel,
2038 struct perf_sample *sample,
2039 int max_stack)
3f067dca 2040{
3f067dca
ACM
2041 /* Can we do dwarf post unwind? */
2042 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2043 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
2044 return 0;
2045
2046 /* Bail out if nothing was captured. */
2047 if ((!sample->user_regs.regs) ||
2048 (!sample->user_stack.size))
2049 return 0;
2050
91d7b2de 2051 return unwind__get_entries(unwind_entry, cursor,
352ea45a 2052 thread, sample, max_stack);
9919a65e 2053}
3f067dca 2054
9919a65e
CP
2055int thread__resolve_callchain(struct thread *thread,
2056 struct callchain_cursor *cursor,
2057 struct perf_evsel *evsel,
2058 struct perf_sample *sample,
2059 struct symbol **parent,
2060 struct addr_location *root_al,
2061 int max_stack)
2062{
2063 int ret = 0;
2064
2065 callchain_cursor_reset(&callchain_cursor);
2066
2067 if (callchain_param.order == ORDER_CALLEE) {
2068 ret = thread__resolve_callchain_sample(thread, cursor,
2069 evsel, sample,
2070 parent, root_al,
2071 max_stack);
2072 if (ret)
2073 return ret;
2074 ret = thread__resolve_callchain_unwind(thread, cursor,
2075 evsel, sample,
2076 max_stack);
2077 } else {
2078 ret = thread__resolve_callchain_unwind(thread, cursor,
2079 evsel, sample,
2080 max_stack);
2081 if (ret)
2082 return ret;
2083 ret = thread__resolve_callchain_sample(thread, cursor,
2084 evsel, sample,
2085 parent, root_al,
2086 max_stack);
2087 }
2088
2089 return ret;
3f067dca 2090}
35feee19
DA
2091
2092int machine__for_each_thread(struct machine *machine,
2093 int (*fn)(struct thread *thread, void *p),
2094 void *priv)
2095{
2096 struct rb_node *nd;
2097 struct thread *thread;
2098 int rc = 0;
2099
2100 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
2101 thread = rb_entry(nd, struct thread, rb_node);
2102 rc = fn(thread, priv);
2103 if (rc != 0)
2104 return rc;
2105 }
2106
2107 list_for_each_entry(thread, &machine->dead_threads, node) {
2108 rc = fn(thread, priv);
2109 if (rc != 0)
2110 return rc;
2111 }
2112 return rc;
2113}
58d925dc 2114
a5499b37
AH
2115int machines__for_each_thread(struct machines *machines,
2116 int (*fn)(struct thread *thread, void *p),
2117 void *priv)
2118{
2119 struct rb_node *nd;
2120 int rc = 0;
2121
2122 rc = machine__for_each_thread(&machines->host, fn, priv);
2123 if (rc != 0)
2124 return rc;
2125
2126 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
2127 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2128
2129 rc = machine__for_each_thread(machine, fn, priv);
2130 if (rc != 0)
2131 return rc;
2132 }
2133 return rc;
2134}
2135
a33fbd56 2136int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 2137 struct target *target, struct thread_map *threads,
9d9cad76
KL
2138 perf_event__handler_t process, bool data_mmap,
2139 unsigned int proc_map_timeout)
58d925dc 2140{
602ad878 2141 if (target__has_task(target))
9d9cad76 2142 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
602ad878 2143 else if (target__has_cpu(target))
9d9cad76 2144 return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
58d925dc
ACM
2145 /* command specified */
2146 return 0;
2147}
b9d266ba
AH
2148
2149pid_t machine__get_current_tid(struct machine *machine, int cpu)
2150{
2151 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
2152 return -1;
2153
2154 return machine->current_tid[cpu];
2155}
2156
2157int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2158 pid_t tid)
2159{
2160 struct thread *thread;
2161
2162 if (cpu < 0)
2163 return -EINVAL;
2164
2165 if (!machine->current_tid) {
2166 int i;
2167
2168 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
2169 if (!machine->current_tid)
2170 return -ENOMEM;
2171 for (i = 0; i < MAX_NR_CPUS; i++)
2172 machine->current_tid[i] = -1;
2173 }
2174
2175 if (cpu >= MAX_NR_CPUS) {
2176 pr_err("Requested CPU %d too large. ", cpu);
2177 pr_err("Consider raising MAX_NR_CPUS\n");
2178 return -EINVAL;
2179 }
2180
2181 machine->current_tid[cpu] = tid;
2182
2183 thread = machine__findnew_thread(machine, pid, tid);
2184 if (!thread)
2185 return -ENOMEM;
2186
2187 thread->cpu = cpu;
b91fc39f 2188 thread__put(thread);
b9d266ba
AH
2189
2190 return 0;
2191}
fbe2af45
AH
2192
2193int machine__get_kernel_start(struct machine *machine)
2194{
a5e813c6 2195 struct map *map = machine__kernel_map(machine);
fbe2af45
AH
2196 int err = 0;
2197
2198 /*
2199 * The only addresses above 2^63 are kernel addresses of a 64-bit
2200 * kernel. Note that addresses are unsigned so that on a 32-bit system
2201 * all addresses including kernel addresses are less than 2^32. In
2202 * that case (32-bit system), if the kernel mapping is unknown, all
2203 * addresses will be assumed to be in user space - see
2204 * machine__kernel_ip().
2205 */
2206 machine->kernel_start = 1ULL << 63;
2207 if (map) {
be39db9f 2208 err = map__load(map);
fbe2af45
AH
2209 if (map->start)
2210 machine->kernel_start = map->start;
2211 }
2212 return err;
2213}
aa7cc2ae
ACM
2214
2215struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2216{
e8807844 2217 return dsos__findnew(&machine->dsos, filename);
aa7cc2ae 2218}
c3168b0d
ACM
2219
2220char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2221{
2222 struct machine *machine = vmachine;
2223 struct map *map;
be39db9f 2224 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
c3168b0d
ACM
2225
2226 if (sym == NULL)
2227 return NULL;
2228
2229 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2230 *addrp = map->unmap_ip(map, sym->start);
2231 return sym->name;
2232}