]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/machine.c
perf tools: Move extra string util functions to util/string2.h
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
CommitLineData
fd20e811 1#include <inttypes.h>
3f067dca 2#include "callchain.h"
b0a7d1a0
ACM
3#include "debug.h"
4#include "event.h"
3f067dca
ACM
5#include "evsel.h"
6#include "hist.h"
9d2f8e22
ACM
7#include "machine.h"
8#include "map.h"
3f067dca 9#include "sort.h"
69d2591a 10#include "strlist.h"
9d2f8e22 11#include "thread.h"
d027b640 12#include "vdso.h"
9d2f8e22 13#include <stdbool.h>
3f067dca 14#include "unwind.h"
8b7bad58 15#include "linux/hash.h"
f3b3614a 16#include "asm/bug.h"
9d2f8e22 17
3d689ed6
ACM
18#include "sane_ctype.h"
19#include <symbol/kallsyms.h>
20
b91fc39f
ACM
21static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
22
e167f995
ACM
23static void dsos__init(struct dsos *dsos)
24{
25 INIT_LIST_HEAD(&dsos->head);
26 dsos->root = RB_ROOT;
e8807844 27 pthread_rwlock_init(&dsos->lock, NULL);
e167f995
ACM
28}
29
69d2591a
ACM
30int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
31{
93b0ba3c 32 memset(machine, 0, sizeof(*machine));
11246c70 33 map_groups__init(&machine->kmaps, machine);
69d2591a 34 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 35 dsos__init(&machine->dsos);
69d2591a
ACM
36
37 machine->threads = RB_ROOT;
b91fc39f 38 pthread_rwlock_init(&machine->threads_lock, NULL);
d2c11034 39 machine->nr_threads = 0;
69d2591a
ACM
40 INIT_LIST_HEAD(&machine->dead_threads);
41 machine->last_match = NULL;
42
d027b640 43 machine->vdso_info = NULL;
4cde998d 44 machine->env = NULL;
d027b640 45
69d2591a
ACM
46 machine->pid = pid;
47
14bd6d20 48 machine->id_hdr_size = 0;
caf8a0d0 49 machine->kptr_restrict_warned = false;
cfe1c414 50 machine->comm_exec = false;
fbe2af45 51 machine->kernel_start = 0;
611a5ce8 52
cc1121ab
MH
53 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
54
69d2591a
ACM
55 machine->root_dir = strdup(root_dir);
56 if (machine->root_dir == NULL)
57 return -ENOMEM;
58
59 if (pid != HOST_KERNEL_ID) {
1fcb8768 60 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 61 pid);
69d2591a
ACM
62 char comm[64];
63
64 if (thread == NULL)
65 return -ENOMEM;
66
67 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 68 thread__set_comm(thread, comm, 0);
b91fc39f 69 thread__put(thread);
69d2591a
ACM
70 }
71
b9d266ba
AH
72 machine->current_tid = NULL;
73
69d2591a
ACM
74 return 0;
75}
76
8fb598e5
DA
77struct machine *machine__new_host(void)
78{
79 struct machine *machine = malloc(sizeof(*machine));
80
81 if (machine != NULL) {
82 machine__init(machine, "", HOST_KERNEL_ID);
83
84 if (machine__create_kernel_maps(machine) < 0)
85 goto out_delete;
86 }
87
88 return machine;
89out_delete:
90 free(machine);
91 return NULL;
92}
93
7d132caa
ACM
94struct machine *machine__new_kallsyms(void)
95{
96 struct machine *machine = machine__new_host();
97 /*
98 * FIXME:
99 * 1) MAP__FUNCTION will go away when we stop loading separate maps for
100 * functions and data objects.
101 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
102 * ask for not using the kcore parsing code, once this one is fixed
103 * to create a map per module.
104 */
105 if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) {
106 machine__delete(machine);
107 machine = NULL;
108 }
109
110 return machine;
111}
112
d3a7c489 113static void dsos__purge(struct dsos *dsos)
69d2591a
ACM
114{
115 struct dso *pos, *n;
116
e8807844
ACM
117 pthread_rwlock_wrlock(&dsos->lock);
118
8fa7d87f 119 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 120 RB_CLEAR_NODE(&pos->rb_node);
e266a753 121 pos->root = NULL;
d3a7c489
ACM
122 list_del_init(&pos->node);
123 dso__put(pos);
69d2591a 124 }
e8807844
ACM
125
126 pthread_rwlock_unlock(&dsos->lock);
d3a7c489 127}
e8807844 128
d3a7c489
ACM
129static void dsos__exit(struct dsos *dsos)
130{
131 dsos__purge(dsos);
e8807844 132 pthread_rwlock_destroy(&dsos->lock);
69d2591a
ACM
133}
134
3f067dca
ACM
135void machine__delete_threads(struct machine *machine)
136{
b91fc39f 137 struct rb_node *nd;
3f067dca 138
b91fc39f
ACM
139 pthread_rwlock_wrlock(&machine->threads_lock);
140 nd = rb_first(&machine->threads);
3f067dca
ACM
141 while (nd) {
142 struct thread *t = rb_entry(nd, struct thread, rb_node);
143
3f067dca 144 nd = rb_next(nd);
b91fc39f 145 __machine__remove_thread(machine, t, false);
3f067dca 146 }
b91fc39f 147 pthread_rwlock_unlock(&machine->threads_lock);
3f067dca
ACM
148}
149
69d2591a
ACM
150void machine__exit(struct machine *machine)
151{
ebe9729c 152 machine__destroy_kernel_maps(machine);
69d2591a 153 map_groups__exit(&machine->kmaps);
e8807844 154 dsos__exit(&machine->dsos);
9a4388c7 155 machine__exit_vdso(machine);
04662523 156 zfree(&machine->root_dir);
b9d266ba 157 zfree(&machine->current_tid);
b91fc39f 158 pthread_rwlock_destroy(&machine->threads_lock);
69d2591a
ACM
159}
160
161void machine__delete(struct machine *machine)
162{
32ca678d
ACM
163 if (machine) {
164 machine__exit(machine);
165 free(machine);
166 }
69d2591a
ACM
167}
168
876650e6
ACM
169void machines__init(struct machines *machines)
170{
171 machine__init(&machines->host, "", HOST_KERNEL_ID);
172 machines->guests = RB_ROOT;
173}
174
175void machines__exit(struct machines *machines)
176{
177 machine__exit(&machines->host);
178 /* XXX exit guest */
179}
180
181struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
182 const char *root_dir)
183{
876650e6 184 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
185 struct rb_node *parent = NULL;
186 struct machine *pos, *machine = malloc(sizeof(*machine));
187
188 if (machine == NULL)
189 return NULL;
190
191 if (machine__init(machine, root_dir, pid) != 0) {
192 free(machine);
193 return NULL;
194 }
195
196 while (*p != NULL) {
197 parent = *p;
198 pos = rb_entry(parent, struct machine, rb_node);
199 if (pid < pos->pid)
200 p = &(*p)->rb_left;
201 else
202 p = &(*p)->rb_right;
203 }
204
205 rb_link_node(&machine->rb_node, parent, p);
876650e6 206 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
207
208 return machine;
209}
210
cfe1c414
AH
211void machines__set_comm_exec(struct machines *machines, bool comm_exec)
212{
213 struct rb_node *nd;
214
215 machines->host.comm_exec = comm_exec;
216
217 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
218 struct machine *machine = rb_entry(nd, struct machine, rb_node);
219
220 machine->comm_exec = comm_exec;
221 }
222}
223
876650e6 224struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 225{
876650e6 226 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
227 struct rb_node *parent = NULL;
228 struct machine *machine;
229 struct machine *default_machine = NULL;
230
876650e6
ACM
231 if (pid == HOST_KERNEL_ID)
232 return &machines->host;
233
69d2591a
ACM
234 while (*p != NULL) {
235 parent = *p;
236 machine = rb_entry(parent, struct machine, rb_node);
237 if (pid < machine->pid)
238 p = &(*p)->rb_left;
239 else if (pid > machine->pid)
240 p = &(*p)->rb_right;
241 else
242 return machine;
243 if (!machine->pid)
244 default_machine = machine;
245 }
246
247 return default_machine;
248}
249
876650e6 250struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
251{
252 char path[PATH_MAX];
253 const char *root_dir = "";
254 struct machine *machine = machines__find(machines, pid);
255
256 if (machine && (machine->pid == pid))
257 goto out;
258
259 if ((pid != HOST_KERNEL_ID) &&
260 (pid != DEFAULT_GUEST_KERNEL_ID) &&
261 (symbol_conf.guestmount)) {
262 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
263 if (access(path, R_OK)) {
264 static struct strlist *seen;
265
266 if (!seen)
4a77e218 267 seen = strlist__new(NULL, NULL);
69d2591a
ACM
268
269 if (!strlist__has_entry(seen, path)) {
270 pr_err("Can't access file %s\n", path);
271 strlist__add(seen, path);
272 }
273 machine = NULL;
274 goto out;
275 }
276 root_dir = path;
277 }
278
279 machine = machines__add(machines, pid, root_dir);
280out:
281 return machine;
282}
283
876650e6
ACM
284void machines__process_guests(struct machines *machines,
285 machine__process_t process, void *data)
69d2591a
ACM
286{
287 struct rb_node *nd;
288
876650e6 289 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
290 struct machine *pos = rb_entry(nd, struct machine, rb_node);
291 process(pos, data);
292 }
293}
294
295char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
296{
297 if (machine__is_host(machine))
298 snprintf(bf, size, "[%s]", "kernel.kallsyms");
299 else if (machine__is_default_guest(machine))
300 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
301 else {
302 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
303 machine->pid);
304 }
305
306 return bf;
307}
308
876650e6 309void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
310{
311 struct rb_node *node;
312 struct machine *machine;
313
876650e6
ACM
314 machines->host.id_hdr_size = id_hdr_size;
315
316 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
317 machine = rb_entry(node, struct machine, rb_node);
318 machine->id_hdr_size = id_hdr_size;
319 }
320
321 return;
322}
323
29ce3612
AH
324static void machine__update_thread_pid(struct machine *machine,
325 struct thread *th, pid_t pid)
326{
327 struct thread *leader;
328
329 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
330 return;
331
332 th->pid_ = pid;
333
334 if (th->pid_ == th->tid)
335 return;
336
b91fc39f 337 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
338 if (!leader)
339 goto out_err;
340
341 if (!leader->mg)
11246c70 342 leader->mg = map_groups__new(machine);
29ce3612
AH
343
344 if (!leader->mg)
345 goto out_err;
346
347 if (th->mg == leader->mg)
348 return;
349
350 if (th->mg) {
351 /*
352 * Maps are created from MMAP events which provide the pid and
353 * tid. Consequently there never should be any maps on a thread
354 * with an unknown pid. Just print an error if there are.
355 */
356 if (!map_groups__empty(th->mg))
357 pr_err("Discarding thread maps for %d:%d\n",
358 th->pid_, th->tid);
8e160b2e 359 map_groups__put(th->mg);
29ce3612
AH
360 }
361
362 th->mg = map_groups__get(leader->mg);
abd82868
ACM
363out_put:
364 thread__put(leader);
29ce3612 365 return;
29ce3612
AH
366out_err:
367 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
abd82868 368 goto out_put;
29ce3612
AH
369}
370
abd82868 371/*
bd1a0be5 372 * Caller must eventually drop thread->refcnt returned with a successful
abd82868
ACM
373 * lookup/new thread inserted.
374 */
b91fc39f
ACM
375static struct thread *____machine__findnew_thread(struct machine *machine,
376 pid_t pid, pid_t tid,
377 bool create)
9d2f8e22
ACM
378{
379 struct rb_node **p = &machine->threads.rb_node;
380 struct rb_node *parent = NULL;
381 struct thread *th;
382
383 /*
38051234 384 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
385 * so most of the time we dont have to look up
386 * the full rbtree:
387 */
29ce3612 388 th = machine->last_match;
f3b623b8
ACM
389 if (th != NULL) {
390 if (th->tid == tid) {
391 machine__update_thread_pid(machine, th, pid);
abd82868 392 return thread__get(th);
f3b623b8
ACM
393 }
394
0ceb8f6e 395 machine->last_match = NULL;
99d725fc 396 }
9d2f8e22
ACM
397
398 while (*p != NULL) {
399 parent = *p;
400 th = rb_entry(parent, struct thread, rb_node);
401
38051234 402 if (th->tid == tid) {
0ceb8f6e 403 machine->last_match = th;
29ce3612 404 machine__update_thread_pid(machine, th, pid);
abd82868 405 return thread__get(th);
9d2f8e22
ACM
406 }
407
38051234 408 if (tid < th->tid)
9d2f8e22
ACM
409 p = &(*p)->rb_left;
410 else
411 p = &(*p)->rb_right;
412 }
413
414 if (!create)
415 return NULL;
416
99d725fc 417 th = thread__new(pid, tid);
9d2f8e22
ACM
418 if (th != NULL) {
419 rb_link_node(&th->rb_node, parent, p);
420 rb_insert_color(&th->rb_node, &machine->threads);
cddcef60
JO
421
422 /*
423 * We have to initialize map_groups separately
424 * after rb tree is updated.
425 *
426 * The reason is that we call machine__findnew_thread
427 * within thread__init_map_groups to find the thread
428 * leader and that would screwed the rb tree.
429 */
418029b7 430 if (thread__init_map_groups(th, machine)) {
0170b14f 431 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 432 RB_CLEAR_NODE(&th->rb_node);
abd82868 433 thread__put(th);
cddcef60 434 return NULL;
418029b7 435 }
f3b623b8
ACM
436 /*
437 * It is now in the rbtree, get a ref
438 */
439 thread__get(th);
0ceb8f6e 440 machine->last_match = th;
d2c11034 441 ++machine->nr_threads;
9d2f8e22
ACM
442 }
443
444 return th;
445}
446
b91fc39f
ACM
447struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
448{
449 return ____machine__findnew_thread(machine, pid, tid, true);
450}
451
314add6b
AH
452struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
453 pid_t tid)
9d2f8e22 454{
b91fc39f
ACM
455 struct thread *th;
456
457 pthread_rwlock_wrlock(&machine->threads_lock);
abd82868 458 th = __machine__findnew_thread(machine, pid, tid);
b91fc39f
ACM
459 pthread_rwlock_unlock(&machine->threads_lock);
460 return th;
9d2f8e22
ACM
461}
462
d75e6097
JO
463struct thread *machine__find_thread(struct machine *machine, pid_t pid,
464 pid_t tid)
9d2f8e22 465{
b91fc39f
ACM
466 struct thread *th;
467 pthread_rwlock_rdlock(&machine->threads_lock);
abd82868 468 th = ____machine__findnew_thread(machine, pid, tid, false);
b91fc39f
ACM
469 pthread_rwlock_unlock(&machine->threads_lock);
470 return th;
9d2f8e22 471}
b0a7d1a0 472
cfe1c414
AH
473struct comm *machine__thread_exec_comm(struct machine *machine,
474 struct thread *thread)
475{
476 if (machine->comm_exec)
477 return thread__exec_comm(thread);
478 else
479 return thread__comm(thread);
480}
481
162f0bef
FW
482int machine__process_comm_event(struct machine *machine, union perf_event *event,
483 struct perf_sample *sample)
b0a7d1a0 484{
314add6b
AH
485 struct thread *thread = machine__findnew_thread(machine,
486 event->comm.pid,
487 event->comm.tid);
65de51f9 488 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 489 int err = 0;
b0a7d1a0 490
cfe1c414
AH
491 if (exec)
492 machine->comm_exec = true;
493
b0a7d1a0
ACM
494 if (dump_trace)
495 perf_event__fprintf_comm(event, stdout);
496
65de51f9
AH
497 if (thread == NULL ||
498 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 499 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 500 err = -1;
b0a7d1a0
ACM
501 }
502
b91fc39f
ACM
503 thread__put(thread);
504
505 return err;
b0a7d1a0
ACM
506}
507
f3b3614a
HB
508int machine__process_namespaces_event(struct machine *machine __maybe_unused,
509 union perf_event *event,
510 struct perf_sample *sample __maybe_unused)
511{
512 struct thread *thread = machine__findnew_thread(machine,
513 event->namespaces.pid,
514 event->namespaces.tid);
515 int err = 0;
516
517 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
518 "\nWARNING: kernel seems to support more namespaces than perf"
519 " tool.\nTry updating the perf tool..\n\n");
520
521 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
522 "\nWARNING: perf tool seems to support more namespaces than"
523 " the kernel.\nTry updating the kernel..\n\n");
524
525 if (dump_trace)
526 perf_event__fprintf_namespaces(event, stdout);
527
528 if (thread == NULL ||
529 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
530 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
531 err = -1;
532 }
533
534 thread__put(thread);
535
536 return err;
537}
538
b0a7d1a0 539int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 540 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
541{
542 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
543 event->lost.id, event->lost.lost);
544 return 0;
545}
546
c4937a91
KL
547int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
548 union perf_event *event, struct perf_sample *sample)
549{
550 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
551 sample->id, event->lost_samples.lost);
552 return 0;
553}
554
9f2de315
ACM
555static struct dso *machine__findnew_module_dso(struct machine *machine,
556 struct kmod_path *m,
557 const char *filename)
da17ea33
JO
558{
559 struct dso *dso;
da17ea33 560
e8807844
ACM
561 pthread_rwlock_wrlock(&machine->dsos.lock);
562
563 dso = __dsos__find(&machine->dsos, m->name, true);
da17ea33 564 if (!dso) {
e8807844 565 dso = __dsos__addnew(&machine->dsos, m->name);
da17ea33 566 if (dso == NULL)
e8807844 567 goto out_unlock;
da17ea33
JO
568
569 if (machine__is_host(machine))
570 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
571 else
572 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
573
574 /* _KMODULE_COMP should be next to _KMODULE */
ca33380a 575 if (m->kmod && m->comp)
da17ea33 576 dso->symtab_type++;
ca33380a
JO
577
578 dso__set_short_name(dso, strdup(m->name), true);
579 dso__set_long_name(dso, strdup(filename), true);
da17ea33
JO
580 }
581
d3a7c489 582 dso__get(dso);
e8807844
ACM
583out_unlock:
584 pthread_rwlock_unlock(&machine->dsos.lock);
da17ea33
JO
585 return dso;
586}
587
4a96f7a0
AH
588int machine__process_aux_event(struct machine *machine __maybe_unused,
589 union perf_event *event)
590{
591 if (dump_trace)
592 perf_event__fprintf_aux(event, stdout);
593 return 0;
594}
595
0ad21f68
AH
596int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
597 union perf_event *event)
598{
599 if (dump_trace)
600 perf_event__fprintf_itrace_start(event, stdout);
601 return 0;
602}
603
0286039f
AH
604int machine__process_switch_event(struct machine *machine __maybe_unused,
605 union perf_event *event)
606{
607 if (dump_trace)
608 perf_event__fprintf_switch(event, stdout);
609 return 0;
610}
611
c03d5184
WN
612static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
613{
614 const char *dup_filename;
615
616 if (!filename || !dso || !dso->long_name)
617 return;
618 if (dso->long_name[0] != '[')
619 return;
620 if (!strchr(filename, '/'))
621 return;
622
623 dup_filename = strdup(filename);
624 if (!dup_filename)
625 return;
626
5dcf16df 627 dso__set_long_name(dso, dup_filename, true);
c03d5184
WN
628}
629
9f2de315
ACM
630struct map *machine__findnew_module_map(struct machine *machine, u64 start,
631 const char *filename)
3f067dca 632{
ca33380a 633 struct map *map = NULL;
566c69c3 634 struct dso *dso = NULL;
ca33380a 635 struct kmod_path m;
3f067dca 636
ca33380a 637 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
638 return NULL;
639
bc84f464
JO
640 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
641 m.name);
c03d5184
WN
642 if (map) {
643 /*
644 * If the map's dso is an offline module, give dso__load()
645 * a chance to find the file path of that module by fixing
646 * long_name.
647 */
648 dso__adjust_kmod_long_name(map->dso, filename);
bc84f464 649 goto out;
c03d5184 650 }
bc84f464 651
9f2de315 652 dso = machine__findnew_module_dso(machine, &m, filename);
ca33380a
JO
653 if (dso == NULL)
654 goto out;
655
3f067dca
ACM
656 map = map__new2(start, dso, MAP__FUNCTION);
657 if (map == NULL)
ca33380a 658 goto out;
3f067dca 659
3f067dca 660 map_groups__insert(&machine->kmaps, map);
ca33380a 661
9afcb420
MH
662 /* Put the map here because map_groups__insert alread got it */
663 map__put(map);
ca33380a 664out:
566c69c3
MH
665 /* put the dso here, corresponding to machine__findnew_module_dso */
666 dso__put(dso);
ca33380a 667 free(m.name);
3f067dca
ACM
668 return map;
669}
670
876650e6 671size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
672{
673 struct rb_node *nd;
3d39ac53 674 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 675
876650e6 676 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 677 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 678 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
679 }
680
681 return ret;
682}
683
8fa7d87f 684size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
685 bool (skip)(struct dso *dso, int parm), int parm)
686{
3d39ac53 687 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
688}
689
876650e6 690size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
691 bool (skip)(struct dso *dso, int parm), int parm)
692{
693 struct rb_node *nd;
876650e6 694 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 695
876650e6 696 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
697 struct machine *pos = rb_entry(nd, struct machine, rb_node);
698 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
699 }
700 return ret;
701}
702
703size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
704{
705 int i;
706 size_t printed = 0;
a5e813c6 707 struct dso *kdso = machine__kernel_map(machine)->dso;
3f067dca
ACM
708
709 if (kdso->has_build_id) {
710 char filename[PATH_MAX];
711 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
712 printed += fprintf(fp, "[0] %s\n", filename);
713 }
714
715 for (i = 0; i < vmlinux_path__nr_entries; ++i)
716 printed += fprintf(fp, "[%d] %s\n",
717 i + kdso->has_build_id, vmlinux_path[i]);
718
719 return printed;
720}
721
722size_t machine__fprintf(struct machine *machine, FILE *fp)
723{
d2c11034 724 size_t ret;
3f067dca
ACM
725 struct rb_node *nd;
726
b91fc39f
ACM
727 pthread_rwlock_rdlock(&machine->threads_lock);
728
d2c11034
ACM
729 ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);
730
3f067dca
ACM
731 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
732 struct thread *pos = rb_entry(nd, struct thread, rb_node);
733
734 ret += thread__fprintf(pos, fp);
735 }
736
b91fc39f
ACM
737 pthread_rwlock_unlock(&machine->threads_lock);
738
3f067dca
ACM
739 return ret;
740}
741
742static struct dso *machine__get_kernel(struct machine *machine)
743{
744 const char *vmlinux_name = NULL;
745 struct dso *kernel;
746
747 if (machine__is_host(machine)) {
748 vmlinux_name = symbol_conf.vmlinux_name;
749 if (!vmlinux_name)
0a77582f 750 vmlinux_name = DSO__NAME_KALLSYMS;
3f067dca 751
459ce518
ACM
752 kernel = machine__findnew_kernel(machine, vmlinux_name,
753 "[kernel]", DSO_TYPE_KERNEL);
3f067dca
ACM
754 } else {
755 char bf[PATH_MAX];
756
757 if (machine__is_default_guest(machine))
758 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
759 if (!vmlinux_name)
760 vmlinux_name = machine__mmap_name(machine, bf,
761 sizeof(bf));
762
459ce518
ACM
763 kernel = machine__findnew_kernel(machine, vmlinux_name,
764 "[guest.kernel]",
765 DSO_TYPE_GUEST_KERNEL);
3f067dca
ACM
766 }
767
768 if (kernel != NULL && (!kernel->has_build_id))
769 dso__read_running_kernel_build_id(kernel, machine);
770
771 return kernel;
772}
773
774struct process_args {
775 u64 start;
776};
777
15a0a870
AH
778static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
779 size_t bufsz)
780{
781 if (machine__is_default_guest(machine))
782 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
783 else
784 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
785}
786
a93f0e55
SQ
787const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
788
789/* Figure out the start address of kernel map from /proc/kallsyms.
790 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
791 * symbol_name if it's not that important.
792 */
4b99375b
AH
793static u64 machine__get_running_kernel_start(struct machine *machine,
794 const char **symbol_name)
3f067dca 795{
15a0a870 796 char filename[PATH_MAX];
a93f0e55
SQ
797 int i;
798 const char *name;
799 u64 addr = 0;
3f067dca 800
15a0a870 801 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
802
803 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
804 return 0;
805
a93f0e55
SQ
806 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
807 addr = kallsyms__get_function_start(filename, name);
808 if (addr)
809 break;
810 }
811
812 if (symbol_name)
813 *symbol_name = name;
3f067dca 814
a93f0e55 815 return addr;
3f067dca
ACM
816}
817
818int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
819{
a0b2f5af 820 int type;
4b99375b 821 u64 start = machine__get_running_kernel_start(machine, NULL);
3f067dca 822
cc1121ab
MH
823 /* In case of renewal the kernel map, destroy previous one */
824 machine__destroy_kernel_maps(machine);
825
3f067dca
ACM
826 for (type = 0; type < MAP__NR_TYPES; ++type) {
827 struct kmap *kmap;
77e65977 828 struct map *map;
3f067dca
ACM
829
830 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
831 if (machine->vmlinux_maps[type] == NULL)
832 return -1;
833
834 machine->vmlinux_maps[type]->map_ip =
835 machine->vmlinux_maps[type]->unmap_ip =
836 identity__map_ip;
a5e813c6 837 map = __machine__kernel_map(machine, type);
77e65977 838 kmap = map__kmap(map);
ba92732e
WN
839 if (!kmap)
840 return -1;
841
3f067dca 842 kmap->kmaps = &machine->kmaps;
77e65977 843 map_groups__insert(&machine->kmaps, map);
3f067dca
ACM
844 }
845
846 return 0;
847}
848
849void machine__destroy_kernel_maps(struct machine *machine)
850{
a0b2f5af 851 int type;
3f067dca
ACM
852
853 for (type = 0; type < MAP__NR_TYPES; ++type) {
854 struct kmap *kmap;
a5e813c6 855 struct map *map = __machine__kernel_map(machine, type);
3f067dca 856
77e65977 857 if (map == NULL)
3f067dca
ACM
858 continue;
859
77e65977
ACM
860 kmap = map__kmap(map);
861 map_groups__remove(&machine->kmaps, map);
ba92732e 862 if (kmap && kmap->ref_reloc_sym) {
3f067dca
ACM
863 /*
864 * ref_reloc_sym is shared among all maps, so free just
865 * on one of them.
866 */
867 if (type == MAP__FUNCTION) {
04662523
ACM
868 zfree((char **)&kmap->ref_reloc_sym->name);
869 zfree(&kmap->ref_reloc_sym);
870 } else
871 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
872 }
873
e96e4078 874 map__put(machine->vmlinux_maps[type]);
3f067dca
ACM
875 machine->vmlinux_maps[type] = NULL;
876 }
877}
878
876650e6 879int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
880{
881 int ret = 0;
882 struct dirent **namelist = NULL;
883 int i, items = 0;
884 char path[PATH_MAX];
885 pid_t pid;
886 char *endp;
887
888 if (symbol_conf.default_guest_vmlinux_name ||
889 symbol_conf.default_guest_modules ||
890 symbol_conf.default_guest_kallsyms) {
891 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
892 }
893
894 if (symbol_conf.guestmount) {
895 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
896 if (items <= 0)
897 return -ENOENT;
898 for (i = 0; i < items; i++) {
899 if (!isdigit(namelist[i]->d_name[0])) {
900 /* Filter out . and .. */
901 continue;
902 }
903 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
904 if ((*endp != '\0') ||
905 (endp == namelist[i]->d_name) ||
906 (errno == ERANGE)) {
907 pr_debug("invalid directory (%s). Skipping.\n",
908 namelist[i]->d_name);
909 continue;
910 }
911 sprintf(path, "%s/%s/proc/kallsyms",
912 symbol_conf.guestmount,
913 namelist[i]->d_name);
914 ret = access(path, R_OK);
915 if (ret) {
916 pr_debug("Can't access file %s\n", path);
917 goto failure;
918 }
919 machines__create_kernel_maps(machines, pid);
920 }
921failure:
922 free(namelist);
923 }
924
925 return ret;
926}
927
876650e6 928void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 929{
876650e6
ACM
930 struct rb_node *next = rb_first(&machines->guests);
931
932 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
933
934 while (next) {
935 struct machine *pos = rb_entry(next, struct machine, rb_node);
936
937 next = rb_next(&pos->rb_node);
876650e6 938 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
939 machine__delete(pos);
940 }
941}
942
876650e6 943int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
944{
945 struct machine *machine = machines__findnew(machines, pid);
946
947 if (machine == NULL)
948 return -1;
949
950 return machine__create_kernel_maps(machine);
951}
952
e02092b9 953int __machine__load_kallsyms(struct machine *machine, const char *filename,
be39db9f 954 enum map_type type, bool no_kcore)
3f067dca 955{
a5e813c6 956 struct map *map = machine__kernel_map(machine);
be39db9f 957 int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore);
3f067dca
ACM
958
959 if (ret > 0) {
960 dso__set_loaded(map->dso, type);
961 /*
962 * Since /proc/kallsyms will have multiple sessions for the
963 * kernel, with modules between them, fixup the end of all
964 * sections.
965 */
966 __map_groups__fixup_end(&machine->kmaps, type);
967 }
968
969 return ret;
970}
971
e02092b9 972int machine__load_kallsyms(struct machine *machine, const char *filename,
be39db9f 973 enum map_type type)
e02092b9 974{
be39db9f 975 return __machine__load_kallsyms(machine, filename, type, false);
e02092b9
ACM
976}
977
be39db9f 978int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
3f067dca 979{
a5e813c6 980 struct map *map = machine__kernel_map(machine);
be39db9f 981 int ret = dso__load_vmlinux_path(map->dso, map);
3f067dca 982
39b12f78 983 if (ret > 0)
3f067dca 984 dso__set_loaded(map->dso, type);
3f067dca
ACM
985
986 return ret;
987}
988
989static void map_groups__fixup_end(struct map_groups *mg)
990{
991 int i;
992 for (i = 0; i < MAP__NR_TYPES; ++i)
993 __map_groups__fixup_end(mg, i);
994}
995
996static char *get_kernel_version(const char *root_dir)
997{
998 char version[PATH_MAX];
999 FILE *file;
1000 char *name, *tmp;
1001 const char *prefix = "Linux version ";
1002
1003 sprintf(version, "%s/proc/version", root_dir);
1004 file = fopen(version, "r");
1005 if (!file)
1006 return NULL;
1007
1008 version[0] = '\0';
1009 tmp = fgets(version, sizeof(version), file);
1010 fclose(file);
1011
1012 name = strstr(version, prefix);
1013 if (!name)
1014 return NULL;
1015 name += strlen(prefix);
1016 tmp = strchr(name, ' ');
1017 if (tmp)
1018 *tmp = '\0';
1019
1020 return strdup(name);
1021}
1022
bb58a8a4
JO
1023static bool is_kmod_dso(struct dso *dso)
1024{
1025 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1026 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1027}
1028
1029static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1030 struct kmod_path *m)
1031{
1032 struct map *map;
1033 char *long_name;
1034
1035 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
1036 if (map == NULL)
1037 return 0;
1038
1039 long_name = strdup(path);
1040 if (long_name == NULL)
1041 return -ENOMEM;
1042
1043 dso__set_long_name(map->dso, long_name, true);
1044 dso__kernel_module_get_build_id(map->dso, "");
1045
1046 /*
1047 * Full name could reveal us kmod compression, so
1048 * we need to update the symtab_type if needed.
1049 */
1050 if (m->comp && is_kmod_dso(map->dso))
1051 map->dso->symtab_type++;
1052
1053 return 0;
1054}
1055
3f067dca 1056static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 1057 const char *dir_name, int depth)
3f067dca
ACM
1058{
1059 struct dirent *dent;
1060 DIR *dir = opendir(dir_name);
1061 int ret = 0;
1062
1063 if (!dir) {
1064 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1065 return -1;
1066 }
1067
1068 while ((dent = readdir(dir)) != NULL) {
1069 char path[PATH_MAX];
1070 struct stat st;
1071
1072 /*sshfs might return bad dent->d_type, so we have to stat*/
1073 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1074 if (stat(path, &st))
1075 continue;
1076
1077 if (S_ISDIR(st.st_mode)) {
1078 if (!strcmp(dent->d_name, ".") ||
1079 !strcmp(dent->d_name, ".."))
1080 continue;
1081
61d4290c
RY
1082 /* Do not follow top-level source and build symlinks */
1083 if (depth == 0) {
1084 if (!strcmp(dent->d_name, "source") ||
1085 !strcmp(dent->d_name, "build"))
1086 continue;
1087 }
1088
1089 ret = map_groups__set_modules_path_dir(mg, path,
1090 depth + 1);
3f067dca
ACM
1091 if (ret < 0)
1092 goto out;
1093 } else {
bb58a8a4 1094 struct kmod_path m;
3f067dca 1095
bb58a8a4
JO
1096 ret = kmod_path__parse_name(&m, dent->d_name);
1097 if (ret)
1098 goto out;
c00c48fc 1099
bb58a8a4
JO
1100 if (m.kmod)
1101 ret = map_groups__set_module_path(mg, path, &m);
c00c48fc 1102
bb58a8a4 1103 free(m.name);
3f067dca 1104
bb58a8a4 1105 if (ret)
3f067dca 1106 goto out;
3f067dca
ACM
1107 }
1108 }
1109
1110out:
1111 closedir(dir);
1112 return ret;
1113}
1114
1115static int machine__set_modules_path(struct machine *machine)
1116{
1117 char *version;
1118 char modules_path[PATH_MAX];
1119
1120 version = get_kernel_version(machine->root_dir);
1121 if (!version)
1122 return -1;
1123
61d4290c 1124 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1125 machine->root_dir, version);
1126 free(version);
1127
61d4290c 1128 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca 1129}
203d8a4a
SSG
1130int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1131 const char *name __maybe_unused)
1132{
1133 return 0;
1134}
3f067dca 1135
316d70d6 1136static int machine__create_module(void *arg, const char *name, u64 start)
3f067dca 1137{
316d70d6 1138 struct machine *machine = arg;
3f067dca 1139 struct map *map;
316d70d6 1140
203d8a4a
SSG
1141 if (arch__fix_module_text_start(&start, name) < 0)
1142 return -1;
1143
9f2de315 1144 map = machine__findnew_module_map(machine, start, name);
316d70d6
AH
1145 if (map == NULL)
1146 return -1;
1147
1148 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1149
1150 return 0;
1151}
1152
1153static int machine__create_modules(struct machine *machine)
1154{
3f067dca
ACM
1155 const char *modules;
1156 char path[PATH_MAX];
1157
f4be904d 1158 if (machine__is_default_guest(machine)) {
3f067dca 1159 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1160 } else {
1161 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1162 modules = path;
1163 }
1164
aa7fe3b0 1165 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1166 return -1;
1167
316d70d6 1168 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1169 return -1;
1170
316d70d6
AH
1171 if (!machine__set_modules_path(machine))
1172 return 0;
3f067dca 1173
316d70d6 1174 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1175
8f76fcd9 1176 return 0;
3f067dca
ACM
1177}
1178
1179int machine__create_kernel_maps(struct machine *machine)
1180{
1181 struct dso *kernel = machine__get_kernel(machine);
5512cf24 1182 const char *name;
45e90056 1183 u64 addr;
1154c957
MH
1184 int ret;
1185
45e90056 1186 if (kernel == NULL)
5512cf24 1187 return -1;
3f067dca 1188
1154c957
MH
1189 ret = __machine__create_kernel_maps(machine, kernel);
1190 dso__put(kernel);
1191 if (ret < 0)
3f067dca
ACM
1192 return -1;
1193
1194 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1195 if (machine__is_host(machine))
1196 pr_debug("Problems creating module maps, "
1197 "continuing anyway...\n");
1198 else
1199 pr_debug("Problems creating module maps for guest %d, "
1200 "continuing anyway...\n", machine->pid);
1201 }
1202
1203 /*
1204 * Now that we have all the maps created, just set the ->end of them:
1205 */
1206 map_groups__fixup_end(&machine->kmaps);
5512cf24 1207
45e90056
ACM
1208 addr = machine__get_running_kernel_start(machine, &name);
1209 if (!addr) {
1210 } else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
5512cf24
AH
1211 machine__destroy_kernel_maps(machine);
1212 return -1;
1213 }
1214
3f067dca
ACM
1215 return 0;
1216}
1217
b0a7d1a0
ACM
1218static void machine__set_kernel_mmap_len(struct machine *machine,
1219 union perf_event *event)
1220{
4552cf0f
NK
1221 int i;
1222
1223 for (i = 0; i < MAP__NR_TYPES; i++) {
1224 machine->vmlinux_maps[i]->start = event->mmap.start;
1225 machine->vmlinux_maps[i]->end = (event->mmap.start +
1226 event->mmap.len);
1227 /*
1228 * Be a bit paranoid here, some perf.data file came with
1229 * a zero sized synthesized MMAP event for the kernel.
1230 */
1231 if (machine->vmlinux_maps[i]->end == 0)
1232 machine->vmlinux_maps[i]->end = ~0ULL;
1233 }
b0a7d1a0
ACM
1234}
1235
8e0cf965
AH
1236static bool machine__uses_kcore(struct machine *machine)
1237{
1238 struct dso *dso;
1239
3d39ac53 1240 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1241 if (dso__is_kcore(dso))
1242 return true;
1243 }
1244
1245 return false;
1246}
1247
b0a7d1a0
ACM
1248static int machine__process_kernel_mmap_event(struct machine *machine,
1249 union perf_event *event)
1250{
1251 struct map *map;
1252 char kmmap_prefix[PATH_MAX];
1253 enum dso_kernel_type kernel_type;
1254 bool is_kernel_mmap;
1255
8e0cf965
AH
1256 /* If we have maps from kcore then we do not need or want any others */
1257 if (machine__uses_kcore(machine))
1258 return 0;
1259
b0a7d1a0
ACM
1260 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1261 if (machine__is_host(machine))
1262 kernel_type = DSO_TYPE_KERNEL;
1263 else
1264 kernel_type = DSO_TYPE_GUEST_KERNEL;
1265
1266 is_kernel_mmap = memcmp(event->mmap.filename,
1267 kmmap_prefix,
1268 strlen(kmmap_prefix) - 1) == 0;
1269 if (event->mmap.filename[0] == '/' ||
1270 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
9f2de315
ACM
1271 map = machine__findnew_module_map(machine, event->mmap.start,
1272 event->mmap.filename);
b0a7d1a0
ACM
1273 if (map == NULL)
1274 goto out_problem;
1275
b0a7d1a0
ACM
1276 map->end = map->start + event->mmap.len;
1277 } else if (is_kernel_mmap) {
1278 const char *symbol_name = (event->mmap.filename +
1279 strlen(kmmap_prefix));
1280 /*
1281 * Should be there already, from the build-id table in
1282 * the header.
1283 */
b837a8bd
NK
1284 struct dso *kernel = NULL;
1285 struct dso *dso;
1286
e8807844
ACM
1287 pthread_rwlock_rdlock(&machine->dsos.lock);
1288
3d39ac53 1289 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1290
1291 /*
1292 * The cpumode passed to is_kernel_module is not the
1293 * cpumode of *this* event. If we insist on passing
1294 * correct cpumode to is_kernel_module, we should
1295 * record the cpumode when we adding this dso to the
1296 * linked list.
1297 *
1298 * However we don't really need passing correct
1299 * cpumode. We know the correct cpumode must be kernel
1300 * mode (if not, we should not link it onto kernel_dsos
1301 * list).
1302 *
1303 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1304 * is_kernel_module() treats it as a kernel cpumode.
1305 */
1306
1307 if (!dso->kernel ||
1308 is_kernel_module(dso->long_name,
1309 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1310 continue;
1311
1f121b03 1312
b837a8bd
NK
1313 kernel = dso;
1314 break;
1315 }
1316
e8807844
ACM
1317 pthread_rwlock_unlock(&machine->dsos.lock);
1318
b837a8bd 1319 if (kernel == NULL)
aa7cc2ae 1320 kernel = machine__findnew_dso(machine, kmmap_prefix);
b0a7d1a0
ACM
1321 if (kernel == NULL)
1322 goto out_problem;
1323
1324 kernel->kernel = kernel_type;
d3a7c489
ACM
1325 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1326 dso__put(kernel);
b0a7d1a0 1327 goto out_problem;
d3a7c489 1328 }
b0a7d1a0 1329
330dfa22
NK
1330 if (strstr(kernel->long_name, "vmlinux"))
1331 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1332
b0a7d1a0
ACM
1333 machine__set_kernel_mmap_len(machine, event);
1334
1335 /*
1336 * Avoid using a zero address (kptr_restrict) for the ref reloc
1337 * symbol. Effectively having zero here means that at record
1338 * time /proc/sys/kernel/kptr_restrict was non zero.
1339 */
1340 if (event->mmap.pgoff != 0) {
1341 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1342 symbol_name,
1343 event->mmap.pgoff);
1344 }
1345
1346 if (machine__is_default_guest(machine)) {
1347 /*
1348 * preload dso of guest kernel and modules
1349 */
be39db9f 1350 dso__load(kernel, machine__kernel_map(machine));
b0a7d1a0
ACM
1351 }
1352 }
1353 return 0;
1354out_problem:
1355 return -1;
1356}
1357
5c5e854b 1358int machine__process_mmap2_event(struct machine *machine,
162f0bef 1359 union perf_event *event,
473398a2 1360 struct perf_sample *sample)
5c5e854b 1361{
5c5e854b
SE
1362 struct thread *thread;
1363 struct map *map;
1364 enum map_type type;
1365 int ret = 0;
1366
1367 if (dump_trace)
1368 perf_event__fprintf_mmap2(event, stdout);
1369
473398a2
ACM
1370 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1371 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
5c5e854b
SE
1372 ret = machine__process_kernel_mmap_event(machine, event);
1373 if (ret < 0)
1374 goto out_problem;
1375 return 0;
1376 }
1377
1378 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1379 event->mmap2.tid);
5c5e854b
SE
1380 if (thread == NULL)
1381 goto out_problem;
1382
1383 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1384 type = MAP__VARIABLE;
1385 else
1386 type = MAP__FUNCTION;
1387
2a03068c 1388 map = map__new(machine, event->mmap2.start,
5c5e854b
SE
1389 event->mmap2.len, event->mmap2.pgoff,
1390 event->mmap2.pid, event->mmap2.maj,
1391 event->mmap2.min, event->mmap2.ino,
1392 event->mmap2.ino_generation,
7ef80703
DZ
1393 event->mmap2.prot,
1394 event->mmap2.flags,
5835edda 1395 event->mmap2.filename, type, thread);
5c5e854b
SE
1396
1397 if (map == NULL)
b91fc39f 1398 goto out_problem_map;
5c5e854b 1399
8132a2a8
HK
1400 ret = thread__insert_map(thread, map);
1401 if (ret)
1402 goto out_problem_insert;
1403
b91fc39f 1404 thread__put(thread);
84c2cafa 1405 map__put(map);
5c5e854b
SE
1406 return 0;
1407
8132a2a8
HK
1408out_problem_insert:
1409 map__put(map);
b91fc39f
ACM
1410out_problem_map:
1411 thread__put(thread);
5c5e854b
SE
1412out_problem:
1413 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1414 return 0;
1415}
1416
162f0bef 1417int machine__process_mmap_event(struct machine *machine, union perf_event *event,
473398a2 1418 struct perf_sample *sample)
b0a7d1a0 1419{
b0a7d1a0
ACM
1420 struct thread *thread;
1421 struct map *map;
bad40917 1422 enum map_type type;
b0a7d1a0
ACM
1423 int ret = 0;
1424
1425 if (dump_trace)
1426 perf_event__fprintf_mmap(event, stdout);
1427
473398a2
ACM
1428 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1429 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
b0a7d1a0
ACM
1430 ret = machine__process_kernel_mmap_event(machine, event);
1431 if (ret < 0)
1432 goto out_problem;
1433 return 0;
1434 }
1435
314add6b 1436 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1437 event->mmap.tid);
b0a7d1a0
ACM
1438 if (thread == NULL)
1439 goto out_problem;
bad40917
SE
1440
1441 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1442 type = MAP__VARIABLE;
1443 else
1444 type = MAP__FUNCTION;
1445
2a03068c 1446 map = map__new(machine, event->mmap.start,
b0a7d1a0 1447 event->mmap.len, event->mmap.pgoff,
7ef80703 1448 event->mmap.pid, 0, 0, 0, 0, 0, 0,
5c5e854b 1449 event->mmap.filename,
5835edda 1450 type, thread);
bad40917 1451
b0a7d1a0 1452 if (map == NULL)
b91fc39f 1453 goto out_problem_map;
b0a7d1a0 1454
8132a2a8
HK
1455 ret = thread__insert_map(thread, map);
1456 if (ret)
1457 goto out_problem_insert;
1458
b91fc39f 1459 thread__put(thread);
84c2cafa 1460 map__put(map);
b0a7d1a0
ACM
1461 return 0;
1462
8132a2a8
HK
1463out_problem_insert:
1464 map__put(map);
b91fc39f
ACM
1465out_problem_map:
1466 thread__put(thread);
b0a7d1a0
ACM
1467out_problem:
1468 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1469 return 0;
1470}
1471
b91fc39f 1472static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1473{
f3b623b8 1474 if (machine->last_match == th)
0ceb8f6e 1475 machine->last_match = NULL;
f3b623b8 1476
e34f5b11 1477 BUG_ON(refcount_read(&th->refcnt) == 0);
b91fc39f
ACM
1478 if (lock)
1479 pthread_rwlock_wrlock(&machine->threads_lock);
0170b14f 1480 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 1481 RB_CLEAR_NODE(&th->rb_node);
d2c11034 1482 --machine->nr_threads;
236a3bbd 1483 /*
f3b623b8
ACM
1484 * Move it first to the dead_threads list, then drop the reference,
1485 * if this is the last reference, then the thread__delete destructor
1486 * will be called and we will remove it from the dead_threads list.
236a3bbd
DA
1487 */
1488 list_add_tail(&th->node, &machine->dead_threads);
b91fc39f
ACM
1489 if (lock)
1490 pthread_rwlock_unlock(&machine->threads_lock);
f3b623b8 1491 thread__put(th);
236a3bbd
DA
1492}
1493
b91fc39f
ACM
1494void machine__remove_thread(struct machine *machine, struct thread *th)
1495{
1496 return __machine__remove_thread(machine, th, true);
1497}
1498
162f0bef
FW
1499int machine__process_fork_event(struct machine *machine, union perf_event *event,
1500 struct perf_sample *sample)
b0a7d1a0 1501{
d75e6097
JO
1502 struct thread *thread = machine__find_thread(machine,
1503 event->fork.pid,
1504 event->fork.tid);
314add6b
AH
1505 struct thread *parent = machine__findnew_thread(machine,
1506 event->fork.ppid,
1507 event->fork.ptid);
b91fc39f 1508 int err = 0;
b0a7d1a0 1509
5cb73340
AH
1510 if (dump_trace)
1511 perf_event__fprintf_task(event, stdout);
1512
1513 /*
1514 * There may be an existing thread that is not actually the parent,
1515 * either because we are processing events out of order, or because the
1516 * (fork) event that would have removed the thread was lost. Assume the
1517 * latter case and continue on as best we can.
1518 */
1519 if (parent->pid_ != (pid_t)event->fork.ppid) {
1520 dump_printf("removing erroneous parent thread %d/%d\n",
1521 parent->pid_, parent->tid);
1522 machine__remove_thread(machine, parent);
1523 thread__put(parent);
1524 parent = machine__findnew_thread(machine, event->fork.ppid,
1525 event->fork.ptid);
1526 }
1527
236a3bbd 1528 /* if a thread currently exists for the thread id remove it */
b91fc39f 1529 if (thread != NULL) {
236a3bbd 1530 machine__remove_thread(machine, thread);
b91fc39f
ACM
1531 thread__put(thread);
1532 }
236a3bbd 1533
314add6b
AH
1534 thread = machine__findnew_thread(machine, event->fork.pid,
1535 event->fork.tid);
b0a7d1a0
ACM
1536
1537 if (thread == NULL || parent == NULL ||
162f0bef 1538 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0 1539 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1540 err = -1;
b0a7d1a0 1541 }
b91fc39f
ACM
1542 thread__put(thread);
1543 thread__put(parent);
b0a7d1a0 1544
b91fc39f 1545 return err;
b0a7d1a0
ACM
1546}
1547
162f0bef
FW
1548int machine__process_exit_event(struct machine *machine, union perf_event *event,
1549 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1550{
d75e6097
JO
1551 struct thread *thread = machine__find_thread(machine,
1552 event->fork.pid,
1553 event->fork.tid);
b0a7d1a0
ACM
1554
1555 if (dump_trace)
1556 perf_event__fprintf_task(event, stdout);
1557
b91fc39f 1558 if (thread != NULL) {
236a3bbd 1559 thread__exited(thread);
b91fc39f
ACM
1560 thread__put(thread);
1561 }
b0a7d1a0
ACM
1562
1563 return 0;
1564}
1565
162f0bef
FW
1566int machine__process_event(struct machine *machine, union perf_event *event,
1567 struct perf_sample *sample)
b0a7d1a0
ACM
1568{
1569 int ret;
1570
1571 switch (event->header.type) {
1572 case PERF_RECORD_COMM:
162f0bef 1573 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1574 case PERF_RECORD_MMAP:
162f0bef 1575 ret = machine__process_mmap_event(machine, event, sample); break;
f3b3614a
HB
1576 case PERF_RECORD_NAMESPACES:
1577 ret = machine__process_namespaces_event(machine, event, sample); break;
5c5e854b 1578 case PERF_RECORD_MMAP2:
162f0bef 1579 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1580 case PERF_RECORD_FORK:
162f0bef 1581 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1582 case PERF_RECORD_EXIT:
162f0bef 1583 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1584 case PERF_RECORD_LOST:
162f0bef 1585 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1586 case PERF_RECORD_AUX:
1587 ret = machine__process_aux_event(machine, event); break;
0ad21f68 1588 case PERF_RECORD_ITRACE_START:
ceb92913 1589 ret = machine__process_itrace_start_event(machine, event); break;
c4937a91
KL
1590 case PERF_RECORD_LOST_SAMPLES:
1591 ret = machine__process_lost_samples_event(machine, event, sample); break;
0286039f
AH
1592 case PERF_RECORD_SWITCH:
1593 case PERF_RECORD_SWITCH_CPU_WIDE:
1594 ret = machine__process_switch_event(machine, event); break;
b0a7d1a0
ACM
1595 default:
1596 ret = -1;
1597 break;
1598 }
1599
1600 return ret;
1601}
3f067dca 1602
b21484f1 1603static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1604{
a7c3899c 1605 if (!regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1606 return 1;
3f067dca
ACM
1607 return 0;
1608}
1609
bb871a9c 1610static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1611 struct addr_map_symbol *ams,
1612 u64 ip)
1613{
1614 struct addr_location al;
3f067dca
ACM
1615
1616 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1617 /*
1618 * We cannot use the header.misc hint to determine whether a
1619 * branch stack address is user, kernel, guest, hypervisor.
1620 * Branches may straddle the kernel/user/hypervisor boundaries.
1621 * Thus, we have to try consecutively until we find a match
1622 * or else, the symbol is unknown
1623 */
bb871a9c 1624 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
3f067dca 1625
3f067dca
ACM
1626 ams->addr = ip;
1627 ams->al_addr = al.addr;
1628 ams->sym = al.sym;
1629 ams->map = al.map;
1630}
1631
bb871a9c 1632static void ip__resolve_data(struct thread *thread,
98a3b32c
SE
1633 u8 m, struct addr_map_symbol *ams, u64 addr)
1634{
1635 struct addr_location al;
1636
1637 memset(&al, 0, sizeof(al));
1638
bb871a9c 1639 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
06b2afc0
DZ
1640 if (al.map == NULL) {
1641 /*
1642 * some shared data regions have execute bit set which puts
1643 * their mapping in the MAP__FUNCTION type array.
1644 * Check there as a fallback option before dropping the sample.
1645 */
bb871a9c 1646 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
06b2afc0
DZ
1647 }
1648
98a3b32c
SE
1649 ams->addr = addr;
1650 ams->al_addr = al.addr;
1651 ams->sym = al.sym;
1652 ams->map = al.map;
1653}
1654
e80faac0
ACM
1655struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1656 struct addr_location *al)
98a3b32c
SE
1657{
1658 struct mem_info *mi = zalloc(sizeof(*mi));
1659
1660 if (!mi)
1661 return NULL;
1662
bb871a9c
ACM
1663 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1664 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
98a3b32c
SE
1665 mi->data_src.val = sample->data_src;
1666
1667 return mi;
1668}
1669
37592b8a 1670static int add_callchain_ip(struct thread *thread,
91d7b2de 1671 struct callchain_cursor *cursor,
37592b8a
AK
1672 struct symbol **parent,
1673 struct addr_location *root_al,
73dbcd65 1674 u8 *cpumode,
410024db
JY
1675 u64 ip,
1676 bool branch,
1677 struct branch_flags *flags,
1678 int nr_loop_iter,
1679 int samples)
37592b8a
AK
1680{
1681 struct addr_location al;
1682
1683 al.filtered = 0;
1684 al.sym = NULL;
73dbcd65 1685 if (!cpumode) {
8b7bad58
AK
1686 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1687 ip, &al);
73dbcd65 1688 } else {
2e77784b
KL
1689 if (ip >= PERF_CONTEXT_MAX) {
1690 switch (ip) {
1691 case PERF_CONTEXT_HV:
73dbcd65 1692 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
1693 break;
1694 case PERF_CONTEXT_KERNEL:
73dbcd65 1695 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
1696 break;
1697 case PERF_CONTEXT_USER:
73dbcd65 1698 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
1699 break;
1700 default:
1701 pr_debug("invalid callchain context: "
1702 "%"PRId64"\n", (s64) ip);
1703 /*
1704 * It seems the callchain is corrupted.
1705 * Discard all.
1706 */
91d7b2de 1707 callchain_cursor_reset(cursor);
2e77784b
KL
1708 return 1;
1709 }
1710 return 0;
1711 }
73dbcd65
DH
1712 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1713 ip, &al);
2e77784b
KL
1714 }
1715
37592b8a 1716 if (al.sym != NULL) {
de7e6a7c 1717 if (perf_hpp_list.parent && !*parent &&
37592b8a
AK
1718 symbol__match_regex(al.sym, &parent_regex))
1719 *parent = al.sym;
1720 else if (have_ignore_callees && root_al &&
1721 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1722 /* Treat this symbol as the root,
1723 forgetting its callees. */
1724 *root_al = al;
91d7b2de 1725 callchain_cursor_reset(cursor);
37592b8a
AK
1726 }
1727 }
1728
b49a8fe5
NK
1729 if (symbol_conf.hide_unresolved && al.sym == NULL)
1730 return 0;
410024db
JY
1731 return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1732 branch, flags, nr_loop_iter, samples);
37592b8a
AK
1733}
1734
644f2df2
ACM
1735struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1736 struct addr_location *al)
3f067dca 1737{
3f067dca 1738 unsigned int i;
644f2df2
ACM
1739 const struct branch_stack *bs = sample->branch_stack;
1740 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1741
3f067dca
ACM
1742 if (!bi)
1743 return NULL;
1744
1745 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1746 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1747 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1748 bi[i].flags = bs->entries[i].flags;
1749 }
1750 return bi;
1751}
1752
8b7bad58
AK
1753#define CHASHSZ 127
1754#define CHASHBITS 7
1755#define NO_ENTRY 0xff
1756
1757#define PERF_MAX_BRANCH_DEPTH 127
1758
1759/* Remove loops. */
1760static int remove_loops(struct branch_entry *l, int nr)
1761{
1762 int i, j, off;
1763 unsigned char chash[CHASHSZ];
1764
1765 memset(chash, NO_ENTRY, sizeof(chash));
1766
1767 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1768
1769 for (i = 0; i < nr; i++) {
1770 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1771
1772 /* no collision handling for now */
1773 if (chash[h] == NO_ENTRY) {
1774 chash[h] = i;
1775 } else if (l[chash[h]].from == l[i].from) {
1776 bool is_loop = true;
1777 /* check if it is a real loop */
1778 off = 0;
1779 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1780 if (l[j].from != l[i + off].from) {
1781 is_loop = false;
1782 break;
1783 }
1784 if (is_loop) {
1785 memmove(l + i, l + i + off,
1786 (nr - (i + off)) * sizeof(*l));
1787 nr -= off;
1788 }
1789 }
1790 }
1791 return nr;
1792}
1793
384b6055
KL
1794/*
1795 * Recolve LBR callstack chain sample
1796 * Return:
1797 * 1 on success get LBR callchain information
1798 * 0 no available LBR callchain information, should try fp
1799 * negative error code on other errors.
1800 */
1801static int resolve_lbr_callchain_sample(struct thread *thread,
91d7b2de 1802 struct callchain_cursor *cursor,
384b6055
KL
1803 struct perf_sample *sample,
1804 struct symbol **parent,
1805 struct addr_location *root_al,
1806 int max_stack)
3f067dca 1807{
384b6055 1808 struct ip_callchain *chain = sample->callchain;
18ef15c6 1809 int chain_nr = min(max_stack, (int)chain->nr), i;
73dbcd65 1810 u8 cpumode = PERF_RECORD_MISC_USER;
384b6055
KL
1811 u64 ip;
1812
1813 for (i = 0; i < chain_nr; i++) {
1814 if (chain->ips[i] == PERF_CONTEXT_USER)
1815 break;
1816 }
1817
1818 /* LBR only affects the user callchain */
1819 if (i != chain_nr) {
1820 struct branch_stack *lbr_stack = sample->branch_stack;
410024db
JY
1821 int lbr_nr = lbr_stack->nr, j, k;
1822 bool branch;
1823 struct branch_flags *flags;
384b6055
KL
1824 /*
1825 * LBR callstack can only get user call chain.
1826 * The mix_chain_nr is kernel call chain
1827 * number plus LBR user call chain number.
1828 * i is kernel call chain number,
1829 * 1 is PERF_CONTEXT_USER,
1830 * lbr_nr + 1 is the user call chain number.
1831 * For details, please refer to the comments
1832 * in callchain__printf
1833 */
1834 int mix_chain_nr = i + 1 + lbr_nr + 1;
1835
384b6055 1836 for (j = 0; j < mix_chain_nr; j++) {
18ef15c6 1837 int err;
410024db
JY
1838 branch = false;
1839 flags = NULL;
1840
384b6055
KL
1841 if (callchain_param.order == ORDER_CALLEE) {
1842 if (j < i + 1)
1843 ip = chain->ips[j];
410024db
JY
1844 else if (j > i + 1) {
1845 k = j - i - 2;
1846 ip = lbr_stack->entries[k].from;
1847 branch = true;
1848 flags = &lbr_stack->entries[k].flags;
1849 } else {
384b6055 1850 ip = lbr_stack->entries[0].to;
410024db
JY
1851 branch = true;
1852 flags = &lbr_stack->entries[0].flags;
1853 }
384b6055 1854 } else {
410024db
JY
1855 if (j < lbr_nr) {
1856 k = lbr_nr - j - 1;
1857 ip = lbr_stack->entries[k].from;
1858 branch = true;
1859 flags = &lbr_stack->entries[k].flags;
1860 }
384b6055
KL
1861 else if (j > lbr_nr)
1862 ip = chain->ips[i + 1 - (j - lbr_nr)];
410024db 1863 else {
384b6055 1864 ip = lbr_stack->entries[0].to;
410024db
JY
1865 branch = true;
1866 flags = &lbr_stack->entries[0].flags;
1867 }
384b6055
KL
1868 }
1869
410024db
JY
1870 err = add_callchain_ip(thread, cursor, parent,
1871 root_al, &cpumode, ip,
1872 branch, flags, 0, 0);
384b6055
KL
1873 if (err)
1874 return (err < 0) ? err : 0;
1875 }
1876 return 1;
1877 }
1878
1879 return 0;
1880}
1881
1882static int thread__resolve_callchain_sample(struct thread *thread,
91d7b2de 1883 struct callchain_cursor *cursor,
384b6055
KL
1884 struct perf_evsel *evsel,
1885 struct perf_sample *sample,
1886 struct symbol **parent,
1887 struct addr_location *root_al,
1888 int max_stack)
1889{
1890 struct branch_stack *branch = sample->branch_stack;
1891 struct ip_callchain *chain = sample->callchain;
a29d5c9b 1892 int chain_nr = chain->nr;
73dbcd65 1893 u8 cpumode = PERF_RECORD_MISC_USER;
bf8bddbf 1894 int i, j, err, nr_entries;
8b7bad58
AK
1895 int skip_idx = -1;
1896 int first_call = 0;
410024db 1897 int nr_loop_iter;
8b7bad58 1898
acf2abbd 1899 if (perf_evsel__has_branch_callstack(evsel)) {
91d7b2de 1900 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
384b6055
KL
1901 root_al, max_stack);
1902 if (err)
1903 return (err < 0) ? err : 0;
1904 }
1905
8b7bad58
AK
1906 /*
1907 * Based on DWARF debug information, some architectures skip
1908 * a callchain entry saved by the kernel.
1909 */
bf8bddbf 1910 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 1911
8b7bad58
AK
1912 /*
1913 * Add branches to call stack for easier browsing. This gives
1914 * more context for a sample than just the callers.
1915 *
1916 * This uses individual histograms of paths compared to the
1917 * aggregated histograms the normal LBR mode uses.
1918 *
1919 * Limitations for now:
1920 * - No extra filters
1921 * - No annotations (should annotate somehow)
1922 */
1923
1924 if (branch && callchain_param.branch_callstack) {
1925 int nr = min(max_stack, (int)branch->nr);
1926 struct branch_entry be[nr];
1927
1928 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1929 pr_warning("corrupted branch chain. skipping...\n");
1930 goto check_calls;
1931 }
1932
1933 for (i = 0; i < nr; i++) {
1934 if (callchain_param.order == ORDER_CALLEE) {
1935 be[i] = branch->entries[i];
1936 /*
1937 * Check for overlap into the callchain.
1938 * The return address is one off compared to
1939 * the branch entry. To adjust for this
1940 * assume the calling instruction is not longer
1941 * than 8 bytes.
1942 */
1943 if (i == skip_idx ||
1944 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1945 first_call++;
1946 else if (be[i].from < chain->ips[first_call] &&
1947 be[i].from >= chain->ips[first_call] - 8)
1948 first_call++;
1949 } else
1950 be[i] = branch->entries[branch->nr - i - 1];
1951 }
1952
410024db 1953 nr_loop_iter = nr;
8b7bad58
AK
1954 nr = remove_loops(be, nr);
1955
410024db
JY
1956 /*
1957 * Get the number of iterations.
1958 * It's only approximation, but good enough in practice.
1959 */
1960 if (nr_loop_iter > nr)
1961 nr_loop_iter = nr_loop_iter - nr + 1;
1962 else
1963 nr_loop_iter = 0;
1964
8b7bad58 1965 for (i = 0; i < nr; i++) {
410024db
JY
1966 if (i == nr - 1)
1967 err = add_callchain_ip(thread, cursor, parent,
1968 root_al,
1969 NULL, be[i].to,
1970 true, &be[i].flags,
1971 nr_loop_iter, 1);
1972 else
1973 err = add_callchain_ip(thread, cursor, parent,
1974 root_al,
1975 NULL, be[i].to,
1976 true, &be[i].flags,
1977 0, 0);
1978
8b7bad58 1979 if (!err)
91d7b2de 1980 err = add_callchain_ip(thread, cursor, parent, root_al,
410024db
JY
1981 NULL, be[i].from,
1982 true, &be[i].flags,
1983 0, 0);
8b7bad58
AK
1984 if (err == -EINVAL)
1985 break;
1986 if (err)
1987 return err;
1988 }
1989 chain_nr -= nr;
1990 }
1991
1992check_calls:
bf8bddbf 1993 for (i = first_call, nr_entries = 0;
a29d5c9b 1994 i < chain_nr && nr_entries < max_stack; i++) {
3f067dca 1995 u64 ip;
3f067dca
ACM
1996
1997 if (callchain_param.order == ORDER_CALLEE)
a60335ba 1998 j = i;
3f067dca 1999 else
a60335ba
SB
2000 j = chain->nr - i - 1;
2001
2002#ifdef HAVE_SKIP_CALLCHAIN_IDX
2003 if (j == skip_idx)
2004 continue;
2005#endif
2006 ip = chain->ips[j];
3f067dca 2007
bf8bddbf
ACM
2008 if (ip < PERF_CONTEXT_MAX)
2009 ++nr_entries;
a29d5c9b 2010
410024db
JY
2011 err = add_callchain_ip(thread, cursor, parent,
2012 root_al, &cpumode, ip,
2013 false, NULL, 0, 0);
3f067dca 2014
3f067dca 2015 if (err)
2e77784b 2016 return (err < 0) ? err : 0;
3f067dca
ACM
2017 }
2018
2019 return 0;
2020}
2021
2022static int unwind_entry(struct unwind_entry *entry, void *arg)
2023{
2024 struct callchain_cursor *cursor = arg;
b49a8fe5
NK
2025
2026 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2027 return 0;
3f067dca 2028 return callchain_cursor_append(cursor, entry->ip,
410024db
JY
2029 entry->map, entry->sym,
2030 false, NULL, 0, 0);
3f067dca
ACM
2031}
2032
9919a65e
CP
2033static int thread__resolve_callchain_unwind(struct thread *thread,
2034 struct callchain_cursor *cursor,
2035 struct perf_evsel *evsel,
2036 struct perf_sample *sample,
2037 int max_stack)
3f067dca 2038{
3f067dca
ACM
2039 /* Can we do dwarf post unwind? */
2040 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2041 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
2042 return 0;
2043
2044 /* Bail out if nothing was captured. */
2045 if ((!sample->user_regs.regs) ||
2046 (!sample->user_stack.size))
2047 return 0;
2048
91d7b2de 2049 return unwind__get_entries(unwind_entry, cursor,
352ea45a 2050 thread, sample, max_stack);
9919a65e 2051}
3f067dca 2052
9919a65e
CP
2053int thread__resolve_callchain(struct thread *thread,
2054 struct callchain_cursor *cursor,
2055 struct perf_evsel *evsel,
2056 struct perf_sample *sample,
2057 struct symbol **parent,
2058 struct addr_location *root_al,
2059 int max_stack)
2060{
2061 int ret = 0;
2062
2063 callchain_cursor_reset(&callchain_cursor);
2064
2065 if (callchain_param.order == ORDER_CALLEE) {
2066 ret = thread__resolve_callchain_sample(thread, cursor,
2067 evsel, sample,
2068 parent, root_al,
2069 max_stack);
2070 if (ret)
2071 return ret;
2072 ret = thread__resolve_callchain_unwind(thread, cursor,
2073 evsel, sample,
2074 max_stack);
2075 } else {
2076 ret = thread__resolve_callchain_unwind(thread, cursor,
2077 evsel, sample,
2078 max_stack);
2079 if (ret)
2080 return ret;
2081 ret = thread__resolve_callchain_sample(thread, cursor,
2082 evsel, sample,
2083 parent, root_al,
2084 max_stack);
2085 }
2086
2087 return ret;
3f067dca 2088}
35feee19
DA
2089
2090int machine__for_each_thread(struct machine *machine,
2091 int (*fn)(struct thread *thread, void *p),
2092 void *priv)
2093{
2094 struct rb_node *nd;
2095 struct thread *thread;
2096 int rc = 0;
2097
2098 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
2099 thread = rb_entry(nd, struct thread, rb_node);
2100 rc = fn(thread, priv);
2101 if (rc != 0)
2102 return rc;
2103 }
2104
2105 list_for_each_entry(thread, &machine->dead_threads, node) {
2106 rc = fn(thread, priv);
2107 if (rc != 0)
2108 return rc;
2109 }
2110 return rc;
2111}
58d925dc 2112
a5499b37
AH
2113int machines__for_each_thread(struct machines *machines,
2114 int (*fn)(struct thread *thread, void *p),
2115 void *priv)
2116{
2117 struct rb_node *nd;
2118 int rc = 0;
2119
2120 rc = machine__for_each_thread(&machines->host, fn, priv);
2121 if (rc != 0)
2122 return rc;
2123
2124 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
2125 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2126
2127 rc = machine__for_each_thread(machine, fn, priv);
2128 if (rc != 0)
2129 return rc;
2130 }
2131 return rc;
2132}
2133
a33fbd56 2134int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 2135 struct target *target, struct thread_map *threads,
9d9cad76
KL
2136 perf_event__handler_t process, bool data_mmap,
2137 unsigned int proc_map_timeout)
58d925dc 2138{
602ad878 2139 if (target__has_task(target))
9d9cad76 2140 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
602ad878 2141 else if (target__has_cpu(target))
9d9cad76 2142 return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
58d925dc
ACM
2143 /* command specified */
2144 return 0;
2145}
b9d266ba
AH
2146
2147pid_t machine__get_current_tid(struct machine *machine, int cpu)
2148{
2149 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
2150 return -1;
2151
2152 return machine->current_tid[cpu];
2153}
2154
2155int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2156 pid_t tid)
2157{
2158 struct thread *thread;
2159
2160 if (cpu < 0)
2161 return -EINVAL;
2162
2163 if (!machine->current_tid) {
2164 int i;
2165
2166 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
2167 if (!machine->current_tid)
2168 return -ENOMEM;
2169 for (i = 0; i < MAX_NR_CPUS; i++)
2170 machine->current_tid[i] = -1;
2171 }
2172
2173 if (cpu >= MAX_NR_CPUS) {
2174 pr_err("Requested CPU %d too large. ", cpu);
2175 pr_err("Consider raising MAX_NR_CPUS\n");
2176 return -EINVAL;
2177 }
2178
2179 machine->current_tid[cpu] = tid;
2180
2181 thread = machine__findnew_thread(machine, pid, tid);
2182 if (!thread)
2183 return -ENOMEM;
2184
2185 thread->cpu = cpu;
b91fc39f 2186 thread__put(thread);
b9d266ba
AH
2187
2188 return 0;
2189}
fbe2af45
AH
2190
2191int machine__get_kernel_start(struct machine *machine)
2192{
a5e813c6 2193 struct map *map = machine__kernel_map(machine);
fbe2af45
AH
2194 int err = 0;
2195
2196 /*
2197 * The only addresses above 2^63 are kernel addresses of a 64-bit
2198 * kernel. Note that addresses are unsigned so that on a 32-bit system
2199 * all addresses including kernel addresses are less than 2^32. In
2200 * that case (32-bit system), if the kernel mapping is unknown, all
2201 * addresses will be assumed to be in user space - see
2202 * machine__kernel_ip().
2203 */
2204 machine->kernel_start = 1ULL << 63;
2205 if (map) {
be39db9f 2206 err = map__load(map);
fbe2af45
AH
2207 if (map->start)
2208 machine->kernel_start = map->start;
2209 }
2210 return err;
2211}
aa7cc2ae
ACM
2212
2213struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2214{
e8807844 2215 return dsos__findnew(&machine->dsos, filename);
aa7cc2ae 2216}
c3168b0d
ACM
2217
2218char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2219{
2220 struct machine *machine = vmachine;
2221 struct map *map;
be39db9f 2222 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
c3168b0d
ACM
2223
2224 if (sym == NULL)
2225 return NULL;
2226
2227 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2228 *addrp = map->unmap_ip(map, sym->start);
2229 return sym->name;
2230}