]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/machine.c
perf tools: Import rb_erase_init from block/ in the kernel sources
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
CommitLineData
3f067dca 1#include "callchain.h"
b0a7d1a0
ACM
2#include "debug.h"
3#include "event.h"
3f067dca
ACM
4#include "evsel.h"
5#include "hist.h"
9d2f8e22
ACM
6#include "machine.h"
7#include "map.h"
3f067dca 8#include "sort.h"
69d2591a 9#include "strlist.h"
9d2f8e22 10#include "thread.h"
d027b640 11#include "vdso.h"
9d2f8e22 12#include <stdbool.h>
c506c96b 13#include <symbol/kallsyms.h>
3f067dca 14#include "unwind.h"
8b7bad58 15#include "linux/hash.h"
9d2f8e22 16
b91fc39f
ACM
17static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
18
e167f995
ACM
19static void dsos__init(struct dsos *dsos)
20{
21 INIT_LIST_HEAD(&dsos->head);
22 dsos->root = RB_ROOT;
23}
24
69d2591a
ACM
25int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
26{
11246c70 27 map_groups__init(&machine->kmaps, machine);
69d2591a 28 RB_CLEAR_NODE(&machine->rb_node);
e167f995
ACM
29 dsos__init(&machine->user_dsos);
30 dsos__init(&machine->kernel_dsos);
69d2591a
ACM
31
32 machine->threads = RB_ROOT;
b91fc39f 33 pthread_rwlock_init(&machine->threads_lock, NULL);
69d2591a
ACM
34 INIT_LIST_HEAD(&machine->dead_threads);
35 machine->last_match = NULL;
36
d027b640
AH
37 machine->vdso_info = NULL;
38
69d2591a
ACM
39 machine->pid = pid;
40
611a5ce8 41 machine->symbol_filter = NULL;
14bd6d20 42 machine->id_hdr_size = 0;
cfe1c414 43 machine->comm_exec = false;
fbe2af45 44 machine->kernel_start = 0;
611a5ce8 45
69d2591a
ACM
46 machine->root_dir = strdup(root_dir);
47 if (machine->root_dir == NULL)
48 return -ENOMEM;
49
50 if (pid != HOST_KERNEL_ID) {
1fcb8768 51 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 52 pid);
69d2591a
ACM
53 char comm[64];
54
55 if (thread == NULL)
56 return -ENOMEM;
57
58 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 59 thread__set_comm(thread, comm, 0);
b91fc39f 60 thread__put(thread);
69d2591a
ACM
61 }
62
b9d266ba
AH
63 machine->current_tid = NULL;
64
69d2591a
ACM
65 return 0;
66}
67
8fb598e5
DA
68struct machine *machine__new_host(void)
69{
70 struct machine *machine = malloc(sizeof(*machine));
71
72 if (machine != NULL) {
73 machine__init(machine, "", HOST_KERNEL_ID);
74
75 if (machine__create_kernel_maps(machine) < 0)
76 goto out_delete;
77 }
78
79 return machine;
80out_delete:
81 free(machine);
82 return NULL;
83}
84
8fa7d87f 85static void dsos__delete(struct dsos *dsos)
69d2591a
ACM
86{
87 struct dso *pos, *n;
88
8fa7d87f 89 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 90 RB_CLEAR_NODE(&pos->rb_node);
69d2591a
ACM
91 list_del(&pos->node);
92 dso__delete(pos);
93 }
94}
95
3f067dca
ACM
96void machine__delete_threads(struct machine *machine)
97{
b91fc39f 98 struct rb_node *nd;
3f067dca 99
b91fc39f
ACM
100 pthread_rwlock_wrlock(&machine->threads_lock);
101 nd = rb_first(&machine->threads);
3f067dca
ACM
102 while (nd) {
103 struct thread *t = rb_entry(nd, struct thread, rb_node);
104
3f067dca 105 nd = rb_next(nd);
b91fc39f 106 __machine__remove_thread(machine, t, false);
3f067dca 107 }
b91fc39f 108 pthread_rwlock_unlock(&machine->threads_lock);
3f067dca
ACM
109}
110
69d2591a
ACM
111void machine__exit(struct machine *machine)
112{
113 map_groups__exit(&machine->kmaps);
114 dsos__delete(&machine->user_dsos);
115 dsos__delete(&machine->kernel_dsos);
d027b640 116 vdso__exit(machine);
04662523 117 zfree(&machine->root_dir);
b9d266ba 118 zfree(&machine->current_tid);
b91fc39f 119 pthread_rwlock_destroy(&machine->threads_lock);
69d2591a
ACM
120}
121
122void machine__delete(struct machine *machine)
123{
124 machine__exit(machine);
125 free(machine);
126}
127
876650e6
ACM
128void machines__init(struct machines *machines)
129{
130 machine__init(&machines->host, "", HOST_KERNEL_ID);
131 machines->guests = RB_ROOT;
611a5ce8 132 machines->symbol_filter = NULL;
876650e6
ACM
133}
134
135void machines__exit(struct machines *machines)
136{
137 machine__exit(&machines->host);
138 /* XXX exit guest */
139}
140
141struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
142 const char *root_dir)
143{
876650e6 144 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
145 struct rb_node *parent = NULL;
146 struct machine *pos, *machine = malloc(sizeof(*machine));
147
148 if (machine == NULL)
149 return NULL;
150
151 if (machine__init(machine, root_dir, pid) != 0) {
152 free(machine);
153 return NULL;
154 }
155
611a5ce8
AH
156 machine->symbol_filter = machines->symbol_filter;
157
69d2591a
ACM
158 while (*p != NULL) {
159 parent = *p;
160 pos = rb_entry(parent, struct machine, rb_node);
161 if (pid < pos->pid)
162 p = &(*p)->rb_left;
163 else
164 p = &(*p)->rb_right;
165 }
166
167 rb_link_node(&machine->rb_node, parent, p);
876650e6 168 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
169
170 return machine;
171}
172
611a5ce8
AH
173void machines__set_symbol_filter(struct machines *machines,
174 symbol_filter_t symbol_filter)
175{
176 struct rb_node *nd;
177
178 machines->symbol_filter = symbol_filter;
179 machines->host.symbol_filter = symbol_filter;
180
181 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
182 struct machine *machine = rb_entry(nd, struct machine, rb_node);
183
184 machine->symbol_filter = symbol_filter;
185 }
186}
187
cfe1c414
AH
188void machines__set_comm_exec(struct machines *machines, bool comm_exec)
189{
190 struct rb_node *nd;
191
192 machines->host.comm_exec = comm_exec;
193
194 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
195 struct machine *machine = rb_entry(nd, struct machine, rb_node);
196
197 machine->comm_exec = comm_exec;
198 }
199}
200
876650e6 201struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 202{
876650e6 203 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
204 struct rb_node *parent = NULL;
205 struct machine *machine;
206 struct machine *default_machine = NULL;
207
876650e6
ACM
208 if (pid == HOST_KERNEL_ID)
209 return &machines->host;
210
69d2591a
ACM
211 while (*p != NULL) {
212 parent = *p;
213 machine = rb_entry(parent, struct machine, rb_node);
214 if (pid < machine->pid)
215 p = &(*p)->rb_left;
216 else if (pid > machine->pid)
217 p = &(*p)->rb_right;
218 else
219 return machine;
220 if (!machine->pid)
221 default_machine = machine;
222 }
223
224 return default_machine;
225}
226
876650e6 227struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
228{
229 char path[PATH_MAX];
230 const char *root_dir = "";
231 struct machine *machine = machines__find(machines, pid);
232
233 if (machine && (machine->pid == pid))
234 goto out;
235
236 if ((pid != HOST_KERNEL_ID) &&
237 (pid != DEFAULT_GUEST_KERNEL_ID) &&
238 (symbol_conf.guestmount)) {
239 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
240 if (access(path, R_OK)) {
241 static struct strlist *seen;
242
243 if (!seen)
244 seen = strlist__new(true, NULL);
245
246 if (!strlist__has_entry(seen, path)) {
247 pr_err("Can't access file %s\n", path);
248 strlist__add(seen, path);
249 }
250 machine = NULL;
251 goto out;
252 }
253 root_dir = path;
254 }
255
256 machine = machines__add(machines, pid, root_dir);
257out:
258 return machine;
259}
260
876650e6
ACM
261void machines__process_guests(struct machines *machines,
262 machine__process_t process, void *data)
69d2591a
ACM
263{
264 struct rb_node *nd;
265
876650e6 266 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
267 struct machine *pos = rb_entry(nd, struct machine, rb_node);
268 process(pos, data);
269 }
270}
271
272char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
273{
274 if (machine__is_host(machine))
275 snprintf(bf, size, "[%s]", "kernel.kallsyms");
276 else if (machine__is_default_guest(machine))
277 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
278 else {
279 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
280 machine->pid);
281 }
282
283 return bf;
284}
285
876650e6 286void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
287{
288 struct rb_node *node;
289 struct machine *machine;
290
876650e6
ACM
291 machines->host.id_hdr_size = id_hdr_size;
292
293 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
294 machine = rb_entry(node, struct machine, rb_node);
295 machine->id_hdr_size = id_hdr_size;
296 }
297
298 return;
299}
300
29ce3612
AH
301static void machine__update_thread_pid(struct machine *machine,
302 struct thread *th, pid_t pid)
303{
304 struct thread *leader;
305
306 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
307 return;
308
309 th->pid_ = pid;
310
311 if (th->pid_ == th->tid)
312 return;
313
b91fc39f 314 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
315 if (!leader)
316 goto out_err;
317
318 if (!leader->mg)
11246c70 319 leader->mg = map_groups__new(machine);
29ce3612
AH
320
321 if (!leader->mg)
322 goto out_err;
323
324 if (th->mg == leader->mg)
325 return;
326
327 if (th->mg) {
328 /*
329 * Maps are created from MMAP events which provide the pid and
330 * tid. Consequently there never should be any maps on a thread
331 * with an unknown pid. Just print an error if there are.
332 */
333 if (!map_groups__empty(th->mg))
334 pr_err("Discarding thread maps for %d:%d\n",
335 th->pid_, th->tid);
8e160b2e 336 map_groups__put(th->mg);
29ce3612
AH
337 }
338
339 th->mg = map_groups__get(leader->mg);
340
341 return;
342
343out_err:
344 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
345}
346
b91fc39f
ACM
347static struct thread *____machine__findnew_thread(struct machine *machine,
348 pid_t pid, pid_t tid,
349 bool create)
9d2f8e22
ACM
350{
351 struct rb_node **p = &machine->threads.rb_node;
352 struct rb_node *parent = NULL;
353 struct thread *th;
354
355 /*
38051234 356 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
357 * so most of the time we dont have to look up
358 * the full rbtree:
359 */
29ce3612 360 th = machine->last_match;
f3b623b8
ACM
361 if (th != NULL) {
362 if (th->tid == tid) {
363 machine__update_thread_pid(machine, th, pid);
364 return th;
365 }
366
0ceb8f6e 367 machine->last_match = NULL;
99d725fc 368 }
9d2f8e22
ACM
369
370 while (*p != NULL) {
371 parent = *p;
372 th = rb_entry(parent, struct thread, rb_node);
373
38051234 374 if (th->tid == tid) {
0ceb8f6e 375 machine->last_match = th;
29ce3612 376 machine__update_thread_pid(machine, th, pid);
9d2f8e22
ACM
377 return th;
378 }
379
38051234 380 if (tid < th->tid)
9d2f8e22
ACM
381 p = &(*p)->rb_left;
382 else
383 p = &(*p)->rb_right;
384 }
385
386 if (!create)
387 return NULL;
388
99d725fc 389 th = thread__new(pid, tid);
9d2f8e22
ACM
390 if (th != NULL) {
391 rb_link_node(&th->rb_node, parent, p);
392 rb_insert_color(&th->rb_node, &machine->threads);
cddcef60
JO
393
394 /*
395 * We have to initialize map_groups separately
396 * after rb tree is updated.
397 *
398 * The reason is that we call machine__findnew_thread
399 * within thread__init_map_groups to find the thread
400 * leader and that would screwed the rb tree.
401 */
418029b7 402 if (thread__init_map_groups(th, machine)) {
260d819e 403 rb_erase(&th->rb_node, &machine->threads);
b91fc39f 404 RB_CLEAR_NODE(&th->rb_node);
418029b7 405 thread__delete(th);
cddcef60 406 return NULL;
418029b7 407 }
f3b623b8
ACM
408 /*
409 * It is now in the rbtree, get a ref
410 */
411 thread__get(th);
0ceb8f6e 412 machine->last_match = th;
9d2f8e22
ACM
413 }
414
415 return th;
416}
417
b91fc39f
ACM
418struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
419{
420 return ____machine__findnew_thread(machine, pid, tid, true);
421}
422
314add6b
AH
423struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
424 pid_t tid)
9d2f8e22 425{
b91fc39f
ACM
426 struct thread *th;
427
428 pthread_rwlock_wrlock(&machine->threads_lock);
429 th = thread__get(__machine__findnew_thread(machine, pid, tid));
430 pthread_rwlock_unlock(&machine->threads_lock);
431 return th;
9d2f8e22
ACM
432}
433
d75e6097
JO
434struct thread *machine__find_thread(struct machine *machine, pid_t pid,
435 pid_t tid)
9d2f8e22 436{
b91fc39f
ACM
437 struct thread *th;
438 pthread_rwlock_rdlock(&machine->threads_lock);
439 th = thread__get(____machine__findnew_thread(machine, pid, tid, false));
440 pthread_rwlock_unlock(&machine->threads_lock);
441 return th;
9d2f8e22 442}
b0a7d1a0 443
cfe1c414
AH
444struct comm *machine__thread_exec_comm(struct machine *machine,
445 struct thread *thread)
446{
447 if (machine->comm_exec)
448 return thread__exec_comm(thread);
449 else
450 return thread__comm(thread);
451}
452
162f0bef
FW
453int machine__process_comm_event(struct machine *machine, union perf_event *event,
454 struct perf_sample *sample)
b0a7d1a0 455{
314add6b
AH
456 struct thread *thread = machine__findnew_thread(machine,
457 event->comm.pid,
458 event->comm.tid);
65de51f9 459 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 460 int err = 0;
b0a7d1a0 461
cfe1c414
AH
462 if (exec)
463 machine->comm_exec = true;
464
b0a7d1a0
ACM
465 if (dump_trace)
466 perf_event__fprintf_comm(event, stdout);
467
65de51f9
AH
468 if (thread == NULL ||
469 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 470 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 471 err = -1;
b0a7d1a0
ACM
472 }
473
b91fc39f
ACM
474 thread__put(thread);
475
476 return err;
b0a7d1a0
ACM
477}
478
479int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 480 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
481{
482 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
483 event->lost.id, event->lost.lost);
484 return 0;
485}
486
ca33380a
JO
487static struct dso*
488machine__module_dso(struct machine *machine, struct kmod_path *m,
489 const char *filename)
da17ea33
JO
490{
491 struct dso *dso;
da17ea33 492
ca33380a 493 dso = dsos__find(&machine->kernel_dsos, m->name, true);
da17ea33 494 if (!dso) {
ca33380a 495 dso = dsos__addnew(&machine->kernel_dsos, m->name);
da17ea33
JO
496 if (dso == NULL)
497 return NULL;
498
499 if (machine__is_host(machine))
500 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
501 else
502 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
503
504 /* _KMODULE_COMP should be next to _KMODULE */
ca33380a 505 if (m->kmod && m->comp)
da17ea33 506 dso->symtab_type++;
ca33380a
JO
507
508 dso__set_short_name(dso, strdup(m->name), true);
509 dso__set_long_name(dso, strdup(filename), true);
da17ea33
JO
510 }
511
512 return dso;
513}
514
4a96f7a0
AH
515int machine__process_aux_event(struct machine *machine __maybe_unused,
516 union perf_event *event)
517{
518 if (dump_trace)
519 perf_event__fprintf_aux(event, stdout);
520 return 0;
521}
522
0ad21f68
AH
523int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
524 union perf_event *event)
525{
526 if (dump_trace)
527 perf_event__fprintf_itrace_start(event, stdout);
528 return 0;
529}
530
3f067dca
ACM
531struct map *machine__new_module(struct machine *machine, u64 start,
532 const char *filename)
533{
ca33380a
JO
534 struct map *map = NULL;
535 struct dso *dso;
536 struct kmod_path m;
3f067dca 537
ca33380a 538 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
539 return NULL;
540
bc84f464
JO
541 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
542 m.name);
543 if (map)
544 goto out;
545
ca33380a
JO
546 dso = machine__module_dso(machine, &m, filename);
547 if (dso == NULL)
548 goto out;
549
3f067dca
ACM
550 map = map__new2(start, dso, MAP__FUNCTION);
551 if (map == NULL)
ca33380a 552 goto out;
3f067dca 553
3f067dca 554 map_groups__insert(&machine->kmaps, map);
ca33380a
JO
555
556out:
557 free(m.name);
3f067dca
ACM
558 return map;
559}
560
876650e6 561size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
562{
563 struct rb_node *nd;
8fa7d87f
WL
564 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
565 __dsos__fprintf(&machines->host.user_dsos.head, fp);
3f067dca 566
876650e6 567 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 568 struct machine *pos = rb_entry(nd, struct machine, rb_node);
8fa7d87f
WL
569 ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
570 ret += __dsos__fprintf(&pos->user_dsos.head, fp);
3f067dca
ACM
571 }
572
573 return ret;
574}
575
8fa7d87f 576size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
577 bool (skip)(struct dso *dso, int parm), int parm)
578{
8fa7d87f
WL
579 return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
580 __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
3f067dca
ACM
581}
582
876650e6 583size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
584 bool (skip)(struct dso *dso, int parm), int parm)
585{
586 struct rb_node *nd;
876650e6 587 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 588
876650e6 589 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
590 struct machine *pos = rb_entry(nd, struct machine, rb_node);
591 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
592 }
593 return ret;
594}
595
596size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
597{
598 int i;
599 size_t printed = 0;
600 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
601
602 if (kdso->has_build_id) {
603 char filename[PATH_MAX];
604 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
605 printed += fprintf(fp, "[0] %s\n", filename);
606 }
607
608 for (i = 0; i < vmlinux_path__nr_entries; ++i)
609 printed += fprintf(fp, "[%d] %s\n",
610 i + kdso->has_build_id, vmlinux_path[i]);
611
612 return printed;
613}
614
615size_t machine__fprintf(struct machine *machine, FILE *fp)
616{
617 size_t ret = 0;
618 struct rb_node *nd;
619
b91fc39f
ACM
620 pthread_rwlock_rdlock(&machine->threads_lock);
621
3f067dca
ACM
622 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
623 struct thread *pos = rb_entry(nd, struct thread, rb_node);
624
625 ret += thread__fprintf(pos, fp);
626 }
627
b91fc39f
ACM
628 pthread_rwlock_unlock(&machine->threads_lock);
629
3f067dca
ACM
630 return ret;
631}
632
633static struct dso *machine__get_kernel(struct machine *machine)
634{
635 const char *vmlinux_name = NULL;
636 struct dso *kernel;
637
638 if (machine__is_host(machine)) {
639 vmlinux_name = symbol_conf.vmlinux_name;
640 if (!vmlinux_name)
641 vmlinux_name = "[kernel.kallsyms]";
642
643 kernel = dso__kernel_findnew(machine, vmlinux_name,
644 "[kernel]",
645 DSO_TYPE_KERNEL);
646 } else {
647 char bf[PATH_MAX];
648
649 if (machine__is_default_guest(machine))
650 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
651 if (!vmlinux_name)
652 vmlinux_name = machine__mmap_name(machine, bf,
653 sizeof(bf));
654
655 kernel = dso__kernel_findnew(machine, vmlinux_name,
656 "[guest.kernel]",
657 DSO_TYPE_GUEST_KERNEL);
658 }
659
660 if (kernel != NULL && (!kernel->has_build_id))
661 dso__read_running_kernel_build_id(kernel, machine);
662
663 return kernel;
664}
665
666struct process_args {
667 u64 start;
668};
669
15a0a870
AH
670static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
671 size_t bufsz)
672{
673 if (machine__is_default_guest(machine))
674 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
675 else
676 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
677}
678
a93f0e55
SQ
679const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
680
681/* Figure out the start address of kernel map from /proc/kallsyms.
682 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
683 * symbol_name if it's not that important.
684 */
4b99375b
AH
685static u64 machine__get_running_kernel_start(struct machine *machine,
686 const char **symbol_name)
3f067dca 687{
15a0a870 688 char filename[PATH_MAX];
a93f0e55
SQ
689 int i;
690 const char *name;
691 u64 addr = 0;
3f067dca 692
15a0a870 693 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
694
695 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
696 return 0;
697
a93f0e55
SQ
698 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
699 addr = kallsyms__get_function_start(filename, name);
700 if (addr)
701 break;
702 }
703
704 if (symbol_name)
705 *symbol_name = name;
3f067dca 706
a93f0e55 707 return addr;
3f067dca
ACM
708}
709
710int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
711{
712 enum map_type type;
4b99375b 713 u64 start = machine__get_running_kernel_start(machine, NULL);
3f067dca
ACM
714
715 for (type = 0; type < MAP__NR_TYPES; ++type) {
716 struct kmap *kmap;
717
718 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
719 if (machine->vmlinux_maps[type] == NULL)
720 return -1;
721
722 machine->vmlinux_maps[type]->map_ip =
723 machine->vmlinux_maps[type]->unmap_ip =
724 identity__map_ip;
725 kmap = map__kmap(machine->vmlinux_maps[type]);
ba92732e
WN
726 if (!kmap)
727 return -1;
728
3f067dca
ACM
729 kmap->kmaps = &machine->kmaps;
730 map_groups__insert(&machine->kmaps,
731 machine->vmlinux_maps[type]);
732 }
733
734 return 0;
735}
736
737void machine__destroy_kernel_maps(struct machine *machine)
738{
739 enum map_type type;
740
741 for (type = 0; type < MAP__NR_TYPES; ++type) {
742 struct kmap *kmap;
743
744 if (machine->vmlinux_maps[type] == NULL)
745 continue;
746
747 kmap = map__kmap(machine->vmlinux_maps[type]);
748 map_groups__remove(&machine->kmaps,
749 machine->vmlinux_maps[type]);
ba92732e 750 if (kmap && kmap->ref_reloc_sym) {
3f067dca
ACM
751 /*
752 * ref_reloc_sym is shared among all maps, so free just
753 * on one of them.
754 */
755 if (type == MAP__FUNCTION) {
04662523
ACM
756 zfree((char **)&kmap->ref_reloc_sym->name);
757 zfree(&kmap->ref_reloc_sym);
758 } else
759 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
760 }
761
762 map__delete(machine->vmlinux_maps[type]);
763 machine->vmlinux_maps[type] = NULL;
764 }
765}
766
876650e6 767int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
768{
769 int ret = 0;
770 struct dirent **namelist = NULL;
771 int i, items = 0;
772 char path[PATH_MAX];
773 pid_t pid;
774 char *endp;
775
776 if (symbol_conf.default_guest_vmlinux_name ||
777 symbol_conf.default_guest_modules ||
778 symbol_conf.default_guest_kallsyms) {
779 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
780 }
781
782 if (symbol_conf.guestmount) {
783 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
784 if (items <= 0)
785 return -ENOENT;
786 for (i = 0; i < items; i++) {
787 if (!isdigit(namelist[i]->d_name[0])) {
788 /* Filter out . and .. */
789 continue;
790 }
791 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
792 if ((*endp != '\0') ||
793 (endp == namelist[i]->d_name) ||
794 (errno == ERANGE)) {
795 pr_debug("invalid directory (%s). Skipping.\n",
796 namelist[i]->d_name);
797 continue;
798 }
799 sprintf(path, "%s/%s/proc/kallsyms",
800 symbol_conf.guestmount,
801 namelist[i]->d_name);
802 ret = access(path, R_OK);
803 if (ret) {
804 pr_debug("Can't access file %s\n", path);
805 goto failure;
806 }
807 machines__create_kernel_maps(machines, pid);
808 }
809failure:
810 free(namelist);
811 }
812
813 return ret;
814}
815
876650e6 816void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 817{
876650e6
ACM
818 struct rb_node *next = rb_first(&machines->guests);
819
820 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
821
822 while (next) {
823 struct machine *pos = rb_entry(next, struct machine, rb_node);
824
825 next = rb_next(&pos->rb_node);
876650e6 826 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
827 machine__delete(pos);
828 }
829}
830
876650e6 831int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
832{
833 struct machine *machine = machines__findnew(machines, pid);
834
835 if (machine == NULL)
836 return -1;
837
838 return machine__create_kernel_maps(machine);
839}
840
841int machine__load_kallsyms(struct machine *machine, const char *filename,
842 enum map_type type, symbol_filter_t filter)
843{
844 struct map *map = machine->vmlinux_maps[type];
845 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
846
847 if (ret > 0) {
848 dso__set_loaded(map->dso, type);
849 /*
850 * Since /proc/kallsyms will have multiple sessions for the
851 * kernel, with modules between them, fixup the end of all
852 * sections.
853 */
854 __map_groups__fixup_end(&machine->kmaps, type);
855 }
856
857 return ret;
858}
859
860int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
861 symbol_filter_t filter)
862{
863 struct map *map = machine->vmlinux_maps[type];
864 int ret = dso__load_vmlinux_path(map->dso, map, filter);
865
39b12f78 866 if (ret > 0)
3f067dca 867 dso__set_loaded(map->dso, type);
3f067dca
ACM
868
869 return ret;
870}
871
872static void map_groups__fixup_end(struct map_groups *mg)
873{
874 int i;
875 for (i = 0; i < MAP__NR_TYPES; ++i)
876 __map_groups__fixup_end(mg, i);
877}
878
879static char *get_kernel_version(const char *root_dir)
880{
881 char version[PATH_MAX];
882 FILE *file;
883 char *name, *tmp;
884 const char *prefix = "Linux version ";
885
886 sprintf(version, "%s/proc/version", root_dir);
887 file = fopen(version, "r");
888 if (!file)
889 return NULL;
890
891 version[0] = '\0';
892 tmp = fgets(version, sizeof(version), file);
893 fclose(file);
894
895 name = strstr(version, prefix);
896 if (!name)
897 return NULL;
898 name += strlen(prefix);
899 tmp = strchr(name, ' ');
900 if (tmp)
901 *tmp = '\0';
902
903 return strdup(name);
904}
905
bb58a8a4
JO
906static bool is_kmod_dso(struct dso *dso)
907{
908 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
909 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
910}
911
912static int map_groups__set_module_path(struct map_groups *mg, const char *path,
913 struct kmod_path *m)
914{
915 struct map *map;
916 char *long_name;
917
918 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
919 if (map == NULL)
920 return 0;
921
922 long_name = strdup(path);
923 if (long_name == NULL)
924 return -ENOMEM;
925
926 dso__set_long_name(map->dso, long_name, true);
927 dso__kernel_module_get_build_id(map->dso, "");
928
929 /*
930 * Full name could reveal us kmod compression, so
931 * we need to update the symtab_type if needed.
932 */
933 if (m->comp && is_kmod_dso(map->dso))
934 map->dso->symtab_type++;
935
936 return 0;
937}
938
3f067dca 939static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 940 const char *dir_name, int depth)
3f067dca
ACM
941{
942 struct dirent *dent;
943 DIR *dir = opendir(dir_name);
944 int ret = 0;
945
946 if (!dir) {
947 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
948 return -1;
949 }
950
951 while ((dent = readdir(dir)) != NULL) {
952 char path[PATH_MAX];
953 struct stat st;
954
955 /*sshfs might return bad dent->d_type, so we have to stat*/
956 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
957 if (stat(path, &st))
958 continue;
959
960 if (S_ISDIR(st.st_mode)) {
961 if (!strcmp(dent->d_name, ".") ||
962 !strcmp(dent->d_name, ".."))
963 continue;
964
61d4290c
RY
965 /* Do not follow top-level source and build symlinks */
966 if (depth == 0) {
967 if (!strcmp(dent->d_name, "source") ||
968 !strcmp(dent->d_name, "build"))
969 continue;
970 }
971
972 ret = map_groups__set_modules_path_dir(mg, path,
973 depth + 1);
3f067dca
ACM
974 if (ret < 0)
975 goto out;
976 } else {
bb58a8a4 977 struct kmod_path m;
3f067dca 978
bb58a8a4
JO
979 ret = kmod_path__parse_name(&m, dent->d_name);
980 if (ret)
981 goto out;
c00c48fc 982
bb58a8a4
JO
983 if (m.kmod)
984 ret = map_groups__set_module_path(mg, path, &m);
c00c48fc 985
bb58a8a4 986 free(m.name);
3f067dca 987
bb58a8a4 988 if (ret)
3f067dca 989 goto out;
3f067dca
ACM
990 }
991 }
992
993out:
994 closedir(dir);
995 return ret;
996}
997
998static int machine__set_modules_path(struct machine *machine)
999{
1000 char *version;
1001 char modules_path[PATH_MAX];
1002
1003 version = get_kernel_version(machine->root_dir);
1004 if (!version)
1005 return -1;
1006
61d4290c 1007 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1008 machine->root_dir, version);
1009 free(version);
1010
61d4290c 1011 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca
ACM
1012}
1013
316d70d6 1014static int machine__create_module(void *arg, const char *name, u64 start)
3f067dca 1015{
316d70d6 1016 struct machine *machine = arg;
3f067dca 1017 struct map *map;
316d70d6
AH
1018
1019 map = machine__new_module(machine, start, name);
1020 if (map == NULL)
1021 return -1;
1022
1023 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1024
1025 return 0;
1026}
1027
1028static int machine__create_modules(struct machine *machine)
1029{
3f067dca
ACM
1030 const char *modules;
1031 char path[PATH_MAX];
1032
f4be904d 1033 if (machine__is_default_guest(machine)) {
3f067dca 1034 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1035 } else {
1036 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1037 modules = path;
1038 }
1039
aa7fe3b0 1040 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1041 return -1;
1042
316d70d6 1043 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1044 return -1;
1045
316d70d6
AH
1046 if (!machine__set_modules_path(machine))
1047 return 0;
3f067dca 1048
316d70d6 1049 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1050
8f76fcd9 1051 return 0;
3f067dca
ACM
1052}
1053
1054int machine__create_kernel_maps(struct machine *machine)
1055{
1056 struct dso *kernel = machine__get_kernel(machine);
5512cf24 1057 const char *name;
4b99375b 1058 u64 addr = machine__get_running_kernel_start(machine, &name);
5512cf24
AH
1059 if (!addr)
1060 return -1;
3f067dca
ACM
1061
1062 if (kernel == NULL ||
1063 __machine__create_kernel_maps(machine, kernel) < 0)
1064 return -1;
1065
1066 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1067 if (machine__is_host(machine))
1068 pr_debug("Problems creating module maps, "
1069 "continuing anyway...\n");
1070 else
1071 pr_debug("Problems creating module maps for guest %d, "
1072 "continuing anyway...\n", machine->pid);
1073 }
1074
1075 /*
1076 * Now that we have all the maps created, just set the ->end of them:
1077 */
1078 map_groups__fixup_end(&machine->kmaps);
5512cf24
AH
1079
1080 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
1081 addr)) {
1082 machine__destroy_kernel_maps(machine);
1083 return -1;
1084 }
1085
3f067dca
ACM
1086 return 0;
1087}
1088
b0a7d1a0
ACM
1089static void machine__set_kernel_mmap_len(struct machine *machine,
1090 union perf_event *event)
1091{
4552cf0f
NK
1092 int i;
1093
1094 for (i = 0; i < MAP__NR_TYPES; i++) {
1095 machine->vmlinux_maps[i]->start = event->mmap.start;
1096 machine->vmlinux_maps[i]->end = (event->mmap.start +
1097 event->mmap.len);
1098 /*
1099 * Be a bit paranoid here, some perf.data file came with
1100 * a zero sized synthesized MMAP event for the kernel.
1101 */
1102 if (machine->vmlinux_maps[i]->end == 0)
1103 machine->vmlinux_maps[i]->end = ~0ULL;
1104 }
b0a7d1a0
ACM
1105}
1106
8e0cf965
AH
1107static bool machine__uses_kcore(struct machine *machine)
1108{
1109 struct dso *dso;
1110
8fa7d87f 1111 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
8e0cf965
AH
1112 if (dso__is_kcore(dso))
1113 return true;
1114 }
1115
1116 return false;
1117}
1118
b0a7d1a0
ACM
1119static int machine__process_kernel_mmap_event(struct machine *machine,
1120 union perf_event *event)
1121{
1122 struct map *map;
1123 char kmmap_prefix[PATH_MAX];
1124 enum dso_kernel_type kernel_type;
1125 bool is_kernel_mmap;
1126
8e0cf965
AH
1127 /* If we have maps from kcore then we do not need or want any others */
1128 if (machine__uses_kcore(machine))
1129 return 0;
1130
b0a7d1a0
ACM
1131 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1132 if (machine__is_host(machine))
1133 kernel_type = DSO_TYPE_KERNEL;
1134 else
1135 kernel_type = DSO_TYPE_GUEST_KERNEL;
1136
1137 is_kernel_mmap = memcmp(event->mmap.filename,
1138 kmmap_prefix,
1139 strlen(kmmap_prefix) - 1) == 0;
1140 if (event->mmap.filename[0] == '/' ||
1141 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
b0a7d1a0
ACM
1142 map = machine__new_module(machine, event->mmap.start,
1143 event->mmap.filename);
1144 if (map == NULL)
1145 goto out_problem;
1146
b0a7d1a0
ACM
1147 map->end = map->start + event->mmap.len;
1148 } else if (is_kernel_mmap) {
1149 const char *symbol_name = (event->mmap.filename +
1150 strlen(kmmap_prefix));
1151 /*
1152 * Should be there already, from the build-id table in
1153 * the header.
1154 */
b837a8bd
NK
1155 struct dso *kernel = NULL;
1156 struct dso *dso;
1157
1158 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
e746b3ea 1159 if (is_kernel_module(dso->long_name))
b837a8bd
NK
1160 continue;
1161
1162 kernel = dso;
1163 break;
1164 }
1165
1166 if (kernel == NULL)
1167 kernel = __dsos__findnew(&machine->kernel_dsos,
1168 kmmap_prefix);
b0a7d1a0
ACM
1169 if (kernel == NULL)
1170 goto out_problem;
1171
1172 kernel->kernel = kernel_type;
1173 if (__machine__create_kernel_maps(machine, kernel) < 0)
1174 goto out_problem;
1175
330dfa22
NK
1176 if (strstr(kernel->long_name, "vmlinux"))
1177 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1178
b0a7d1a0
ACM
1179 machine__set_kernel_mmap_len(machine, event);
1180
1181 /*
1182 * Avoid using a zero address (kptr_restrict) for the ref reloc
1183 * symbol. Effectively having zero here means that at record
1184 * time /proc/sys/kernel/kptr_restrict was non zero.
1185 */
1186 if (event->mmap.pgoff != 0) {
1187 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1188 symbol_name,
1189 event->mmap.pgoff);
1190 }
1191
1192 if (machine__is_default_guest(machine)) {
1193 /*
1194 * preload dso of guest kernel and modules
1195 */
1196 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1197 NULL);
1198 }
1199 }
1200 return 0;
1201out_problem:
1202 return -1;
1203}
1204
5c5e854b 1205int machine__process_mmap2_event(struct machine *machine,
162f0bef
FW
1206 union perf_event *event,
1207 struct perf_sample *sample __maybe_unused)
5c5e854b
SE
1208{
1209 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1210 struct thread *thread;
1211 struct map *map;
1212 enum map_type type;
1213 int ret = 0;
1214
1215 if (dump_trace)
1216 perf_event__fprintf_mmap2(event, stdout);
1217
1218 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1219 cpumode == PERF_RECORD_MISC_KERNEL) {
1220 ret = machine__process_kernel_mmap_event(machine, event);
1221 if (ret < 0)
1222 goto out_problem;
1223 return 0;
1224 }
1225
1226 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1227 event->mmap2.tid);
5c5e854b
SE
1228 if (thread == NULL)
1229 goto out_problem;
1230
1231 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1232 type = MAP__VARIABLE;
1233 else
1234 type = MAP__FUNCTION;
1235
2a03068c 1236 map = map__new(machine, event->mmap2.start,
5c5e854b
SE
1237 event->mmap2.len, event->mmap2.pgoff,
1238 event->mmap2.pid, event->mmap2.maj,
1239 event->mmap2.min, event->mmap2.ino,
1240 event->mmap2.ino_generation,
7ef80703
DZ
1241 event->mmap2.prot,
1242 event->mmap2.flags,
5835edda 1243 event->mmap2.filename, type, thread);
5c5e854b
SE
1244
1245 if (map == NULL)
b91fc39f 1246 goto out_problem_map;
5c5e854b
SE
1247
1248 thread__insert_map(thread, map);
b91fc39f 1249 thread__put(thread);
5c5e854b
SE
1250 return 0;
1251
b91fc39f
ACM
1252out_problem_map:
1253 thread__put(thread);
5c5e854b
SE
1254out_problem:
1255 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1256 return 0;
1257}
1258
162f0bef
FW
1259int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1260 struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
1261{
1262 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1263 struct thread *thread;
1264 struct map *map;
bad40917 1265 enum map_type type;
b0a7d1a0
ACM
1266 int ret = 0;
1267
1268 if (dump_trace)
1269 perf_event__fprintf_mmap(event, stdout);
1270
1271 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1272 cpumode == PERF_RECORD_MISC_KERNEL) {
1273 ret = machine__process_kernel_mmap_event(machine, event);
1274 if (ret < 0)
1275 goto out_problem;
1276 return 0;
1277 }
1278
314add6b 1279 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1280 event->mmap.tid);
b0a7d1a0
ACM
1281 if (thread == NULL)
1282 goto out_problem;
bad40917
SE
1283
1284 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1285 type = MAP__VARIABLE;
1286 else
1287 type = MAP__FUNCTION;
1288
2a03068c 1289 map = map__new(machine, event->mmap.start,
b0a7d1a0 1290 event->mmap.len, event->mmap.pgoff,
7ef80703 1291 event->mmap.pid, 0, 0, 0, 0, 0, 0,
5c5e854b 1292 event->mmap.filename,
5835edda 1293 type, thread);
bad40917 1294
b0a7d1a0 1295 if (map == NULL)
b91fc39f 1296 goto out_problem_map;
b0a7d1a0
ACM
1297
1298 thread__insert_map(thread, map);
b91fc39f 1299 thread__put(thread);
b0a7d1a0
ACM
1300 return 0;
1301
b91fc39f
ACM
1302out_problem_map:
1303 thread__put(thread);
b0a7d1a0
ACM
1304out_problem:
1305 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1306 return 0;
1307}
1308
b91fc39f 1309static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1310{
f3b623b8 1311 if (machine->last_match == th)
0ceb8f6e 1312 machine->last_match = NULL;
f3b623b8 1313
59a51c1d 1314 BUG_ON(atomic_read(&th->refcnt) == 0);
b91fc39f
ACM
1315 if (lock)
1316 pthread_rwlock_wrlock(&machine->threads_lock);
236a3bbd 1317 rb_erase(&th->rb_node, &machine->threads);
b91fc39f 1318 RB_CLEAR_NODE(&th->rb_node);
236a3bbd 1319 /*
f3b623b8
ACM
1320 * Move it first to the dead_threads list, then drop the reference,
1321 * if this is the last reference, then the thread__delete destructor
1322 * will be called and we will remove it from the dead_threads list.
236a3bbd
DA
1323 */
1324 list_add_tail(&th->node, &machine->dead_threads);
b91fc39f
ACM
1325 if (lock)
1326 pthread_rwlock_unlock(&machine->threads_lock);
f3b623b8 1327 thread__put(th);
236a3bbd
DA
1328}
1329
b91fc39f
ACM
1330void machine__remove_thread(struct machine *machine, struct thread *th)
1331{
1332 return __machine__remove_thread(machine, th, true);
1333}
1334
162f0bef
FW
1335int machine__process_fork_event(struct machine *machine, union perf_event *event,
1336 struct perf_sample *sample)
b0a7d1a0 1337{
d75e6097
JO
1338 struct thread *thread = machine__find_thread(machine,
1339 event->fork.pid,
1340 event->fork.tid);
314add6b
AH
1341 struct thread *parent = machine__findnew_thread(machine,
1342 event->fork.ppid,
1343 event->fork.ptid);
b91fc39f 1344 int err = 0;
b0a7d1a0 1345
236a3bbd 1346 /* if a thread currently exists for the thread id remove it */
b91fc39f 1347 if (thread != NULL) {
236a3bbd 1348 machine__remove_thread(machine, thread);
b91fc39f
ACM
1349 thread__put(thread);
1350 }
236a3bbd 1351
314add6b
AH
1352 thread = machine__findnew_thread(machine, event->fork.pid,
1353 event->fork.tid);
b0a7d1a0
ACM
1354 if (dump_trace)
1355 perf_event__fprintf_task(event, stdout);
1356
1357 if (thread == NULL || parent == NULL ||
162f0bef 1358 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0 1359 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1360 err = -1;
b0a7d1a0 1361 }
b91fc39f
ACM
1362 thread__put(thread);
1363 thread__put(parent);
b0a7d1a0 1364
b91fc39f 1365 return err;
b0a7d1a0
ACM
1366}
1367
162f0bef
FW
1368int machine__process_exit_event(struct machine *machine, union perf_event *event,
1369 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1370{
d75e6097
JO
1371 struct thread *thread = machine__find_thread(machine,
1372 event->fork.pid,
1373 event->fork.tid);
b0a7d1a0
ACM
1374
1375 if (dump_trace)
1376 perf_event__fprintf_task(event, stdout);
1377
b91fc39f 1378 if (thread != NULL) {
236a3bbd 1379 thread__exited(thread);
b91fc39f
ACM
1380 thread__put(thread);
1381 }
b0a7d1a0
ACM
1382
1383 return 0;
1384}
1385
162f0bef
FW
1386int machine__process_event(struct machine *machine, union perf_event *event,
1387 struct perf_sample *sample)
b0a7d1a0
ACM
1388{
1389 int ret;
1390
1391 switch (event->header.type) {
1392 case PERF_RECORD_COMM:
162f0bef 1393 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1394 case PERF_RECORD_MMAP:
162f0bef 1395 ret = machine__process_mmap_event(machine, event, sample); break;
5c5e854b 1396 case PERF_RECORD_MMAP2:
162f0bef 1397 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1398 case PERF_RECORD_FORK:
162f0bef 1399 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1400 case PERF_RECORD_EXIT:
162f0bef 1401 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1402 case PERF_RECORD_LOST:
162f0bef 1403 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1404 case PERF_RECORD_AUX:
1405 ret = machine__process_aux_event(machine, event); break;
0ad21f68
AH
1406 case PERF_RECORD_ITRACE_START:
1407 ret = machine__process_itrace_start_event(machine, event);
1408 break;
b0a7d1a0
ACM
1409 default:
1410 ret = -1;
1411 break;
1412 }
1413
1414 return ret;
1415}
3f067dca 1416
b21484f1 1417static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1418{
b21484f1 1419 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1420 return 1;
3f067dca
ACM
1421 return 0;
1422}
1423
bb871a9c 1424static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1425 struct addr_map_symbol *ams,
1426 u64 ip)
1427{
1428 struct addr_location al;
3f067dca
ACM
1429
1430 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1431 /*
1432 * We cannot use the header.misc hint to determine whether a
1433 * branch stack address is user, kernel, guest, hypervisor.
1434 * Branches may straddle the kernel/user/hypervisor boundaries.
1435 * Thus, we have to try consecutively until we find a match
1436 * or else, the symbol is unknown
1437 */
bb871a9c 1438 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
3f067dca 1439
3f067dca
ACM
1440 ams->addr = ip;
1441 ams->al_addr = al.addr;
1442 ams->sym = al.sym;
1443 ams->map = al.map;
1444}
1445
bb871a9c 1446static void ip__resolve_data(struct thread *thread,
98a3b32c
SE
1447 u8 m, struct addr_map_symbol *ams, u64 addr)
1448{
1449 struct addr_location al;
1450
1451 memset(&al, 0, sizeof(al));
1452
bb871a9c 1453 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
06b2afc0
DZ
1454 if (al.map == NULL) {
1455 /*
1456 * some shared data regions have execute bit set which puts
1457 * their mapping in the MAP__FUNCTION type array.
1458 * Check there as a fallback option before dropping the sample.
1459 */
bb871a9c 1460 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
06b2afc0
DZ
1461 }
1462
98a3b32c
SE
1463 ams->addr = addr;
1464 ams->al_addr = al.addr;
1465 ams->sym = al.sym;
1466 ams->map = al.map;
1467}
1468
e80faac0
ACM
1469struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1470 struct addr_location *al)
98a3b32c
SE
1471{
1472 struct mem_info *mi = zalloc(sizeof(*mi));
1473
1474 if (!mi)
1475 return NULL;
1476
bb871a9c
ACM
1477 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1478 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
98a3b32c
SE
1479 mi->data_src.val = sample->data_src;
1480
1481 return mi;
1482}
1483
37592b8a
AK
1484static int add_callchain_ip(struct thread *thread,
1485 struct symbol **parent,
1486 struct addr_location *root_al,
73dbcd65 1487 u8 *cpumode,
37592b8a
AK
1488 u64 ip)
1489{
1490 struct addr_location al;
1491
1492 al.filtered = 0;
1493 al.sym = NULL;
73dbcd65 1494 if (!cpumode) {
8b7bad58
AK
1495 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1496 ip, &al);
73dbcd65 1497 } else {
2e77784b
KL
1498 if (ip >= PERF_CONTEXT_MAX) {
1499 switch (ip) {
1500 case PERF_CONTEXT_HV:
73dbcd65 1501 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
1502 break;
1503 case PERF_CONTEXT_KERNEL:
73dbcd65 1504 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
1505 break;
1506 case PERF_CONTEXT_USER:
73dbcd65 1507 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
1508 break;
1509 default:
1510 pr_debug("invalid callchain context: "
1511 "%"PRId64"\n", (s64) ip);
1512 /*
1513 * It seems the callchain is corrupted.
1514 * Discard all.
1515 */
1516 callchain_cursor_reset(&callchain_cursor);
1517 return 1;
1518 }
1519 return 0;
1520 }
73dbcd65
DH
1521 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1522 ip, &al);
2e77784b
KL
1523 }
1524
37592b8a
AK
1525 if (al.sym != NULL) {
1526 if (sort__has_parent && !*parent &&
1527 symbol__match_regex(al.sym, &parent_regex))
1528 *parent = al.sym;
1529 else if (have_ignore_callees && root_al &&
1530 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1531 /* Treat this symbol as the root,
1532 forgetting its callees. */
1533 *root_al = al;
1534 callchain_cursor_reset(&callchain_cursor);
1535 }
1536 }
1537
5550171b 1538 return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
37592b8a
AK
1539}
1540
644f2df2
ACM
1541struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1542 struct addr_location *al)
3f067dca 1543{
3f067dca 1544 unsigned int i;
644f2df2
ACM
1545 const struct branch_stack *bs = sample->branch_stack;
1546 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1547
3f067dca
ACM
1548 if (!bi)
1549 return NULL;
1550
1551 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1552 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1553 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1554 bi[i].flags = bs->entries[i].flags;
1555 }
1556 return bi;
1557}
1558
8b7bad58
AK
1559#define CHASHSZ 127
1560#define CHASHBITS 7
1561#define NO_ENTRY 0xff
1562
1563#define PERF_MAX_BRANCH_DEPTH 127
1564
1565/* Remove loops. */
1566static int remove_loops(struct branch_entry *l, int nr)
1567{
1568 int i, j, off;
1569 unsigned char chash[CHASHSZ];
1570
1571 memset(chash, NO_ENTRY, sizeof(chash));
1572
1573 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1574
1575 for (i = 0; i < nr; i++) {
1576 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1577
1578 /* no collision handling for now */
1579 if (chash[h] == NO_ENTRY) {
1580 chash[h] = i;
1581 } else if (l[chash[h]].from == l[i].from) {
1582 bool is_loop = true;
1583 /* check if it is a real loop */
1584 off = 0;
1585 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1586 if (l[j].from != l[i + off].from) {
1587 is_loop = false;
1588 break;
1589 }
1590 if (is_loop) {
1591 memmove(l + i, l + i + off,
1592 (nr - (i + off)) * sizeof(*l));
1593 nr -= off;
1594 }
1595 }
1596 }
1597 return nr;
1598}
1599
384b6055
KL
1600/*
1601 * Recolve LBR callstack chain sample
1602 * Return:
1603 * 1 on success get LBR callchain information
1604 * 0 no available LBR callchain information, should try fp
1605 * negative error code on other errors.
1606 */
1607static int resolve_lbr_callchain_sample(struct thread *thread,
1608 struct perf_sample *sample,
1609 struct symbol **parent,
1610 struct addr_location *root_al,
1611 int max_stack)
3f067dca 1612{
384b6055
KL
1613 struct ip_callchain *chain = sample->callchain;
1614 int chain_nr = min(max_stack, (int)chain->nr);
73dbcd65 1615 u8 cpumode = PERF_RECORD_MISC_USER;
384b6055
KL
1616 int i, j, err;
1617 u64 ip;
1618
1619 for (i = 0; i < chain_nr; i++) {
1620 if (chain->ips[i] == PERF_CONTEXT_USER)
1621 break;
1622 }
1623
1624 /* LBR only affects the user callchain */
1625 if (i != chain_nr) {
1626 struct branch_stack *lbr_stack = sample->branch_stack;
1627 int lbr_nr = lbr_stack->nr;
1628 /*
1629 * LBR callstack can only get user call chain.
1630 * The mix_chain_nr is kernel call chain
1631 * number plus LBR user call chain number.
1632 * i is kernel call chain number,
1633 * 1 is PERF_CONTEXT_USER,
1634 * lbr_nr + 1 is the user call chain number.
1635 * For details, please refer to the comments
1636 * in callchain__printf
1637 */
1638 int mix_chain_nr = i + 1 + lbr_nr + 1;
1639
1640 if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
1641 pr_warning("corrupted callchain. skipping...\n");
1642 return 0;
1643 }
1644
1645 for (j = 0; j < mix_chain_nr; j++) {
1646 if (callchain_param.order == ORDER_CALLEE) {
1647 if (j < i + 1)
1648 ip = chain->ips[j];
1649 else if (j > i + 1)
1650 ip = lbr_stack->entries[j - i - 2].from;
1651 else
1652 ip = lbr_stack->entries[0].to;
1653 } else {
1654 if (j < lbr_nr)
1655 ip = lbr_stack->entries[lbr_nr - j - 1].from;
1656 else if (j > lbr_nr)
1657 ip = chain->ips[i + 1 - (j - lbr_nr)];
1658 else
1659 ip = lbr_stack->entries[0].to;
1660 }
1661
73dbcd65 1662 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
384b6055
KL
1663 if (err)
1664 return (err < 0) ? err : 0;
1665 }
1666 return 1;
1667 }
1668
1669 return 0;
1670}
1671
1672static int thread__resolve_callchain_sample(struct thread *thread,
1673 struct perf_evsel *evsel,
1674 struct perf_sample *sample,
1675 struct symbol **parent,
1676 struct addr_location *root_al,
1677 int max_stack)
1678{
1679 struct branch_stack *branch = sample->branch_stack;
1680 struct ip_callchain *chain = sample->callchain;
91e95617 1681 int chain_nr = min(max_stack, (int)chain->nr);
73dbcd65 1682 u8 cpumode = PERF_RECORD_MISC_USER;
2e77784b 1683 int i, j, err;
8b7bad58
AK
1684 int skip_idx = -1;
1685 int first_call = 0;
1686
384b6055
KL
1687 callchain_cursor_reset(&callchain_cursor);
1688
1689 if (has_branch_callstack(evsel)) {
1690 err = resolve_lbr_callchain_sample(thread, sample, parent,
1691 root_al, max_stack);
1692 if (err)
1693 return (err < 0) ? err : 0;
1694 }
1695
8b7bad58
AK
1696 /*
1697 * Based on DWARF debug information, some architectures skip
1698 * a callchain entry saved by the kernel.
1699 */
1700 if (chain->nr < PERF_MAX_STACK_DEPTH)
1701 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 1702
8b7bad58
AK
1703 /*
1704 * Add branches to call stack for easier browsing. This gives
1705 * more context for a sample than just the callers.
1706 *
1707 * This uses individual histograms of paths compared to the
1708 * aggregated histograms the normal LBR mode uses.
1709 *
1710 * Limitations for now:
1711 * - No extra filters
1712 * - No annotations (should annotate somehow)
1713 */
1714
1715 if (branch && callchain_param.branch_callstack) {
1716 int nr = min(max_stack, (int)branch->nr);
1717 struct branch_entry be[nr];
1718
1719 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1720 pr_warning("corrupted branch chain. skipping...\n");
1721 goto check_calls;
1722 }
1723
1724 for (i = 0; i < nr; i++) {
1725 if (callchain_param.order == ORDER_CALLEE) {
1726 be[i] = branch->entries[i];
1727 /*
1728 * Check for overlap into the callchain.
1729 * The return address is one off compared to
1730 * the branch entry. To adjust for this
1731 * assume the calling instruction is not longer
1732 * than 8 bytes.
1733 */
1734 if (i == skip_idx ||
1735 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1736 first_call++;
1737 else if (be[i].from < chain->ips[first_call] &&
1738 be[i].from >= chain->ips[first_call] - 8)
1739 first_call++;
1740 } else
1741 be[i] = branch->entries[branch->nr - i - 1];
1742 }
1743
1744 nr = remove_loops(be, nr);
1745
1746 for (i = 0; i < nr; i++) {
1747 err = add_callchain_ip(thread, parent, root_al,
73dbcd65 1748 NULL, be[i].to);
8b7bad58
AK
1749 if (!err)
1750 err = add_callchain_ip(thread, parent, root_al,
73dbcd65 1751 NULL, be[i].from);
8b7bad58
AK
1752 if (err == -EINVAL)
1753 break;
1754 if (err)
1755 return err;
1756 }
1757 chain_nr -= nr;
1758 }
1759
1760check_calls:
3f067dca
ACM
1761 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1762 pr_warning("corrupted callchain. skipping...\n");
1763 return 0;
1764 }
1765
8b7bad58 1766 for (i = first_call; i < chain_nr; i++) {
3f067dca 1767 u64 ip;
3f067dca
ACM
1768
1769 if (callchain_param.order == ORDER_CALLEE)
a60335ba 1770 j = i;
3f067dca 1771 else
a60335ba
SB
1772 j = chain->nr - i - 1;
1773
1774#ifdef HAVE_SKIP_CALLCHAIN_IDX
1775 if (j == skip_idx)
1776 continue;
1777#endif
1778 ip = chain->ips[j];
3f067dca 1779
73dbcd65 1780 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
3f067dca 1781
3f067dca 1782 if (err)
2e77784b 1783 return (err < 0) ? err : 0;
3f067dca
ACM
1784 }
1785
1786 return 0;
1787}
1788
1789static int unwind_entry(struct unwind_entry *entry, void *arg)
1790{
1791 struct callchain_cursor *cursor = arg;
1792 return callchain_cursor_append(cursor, entry->ip,
1793 entry->map, entry->sym);
1794}
1795
cc8b7c2b
ACM
1796int thread__resolve_callchain(struct thread *thread,
1797 struct perf_evsel *evsel,
1798 struct perf_sample *sample,
1799 struct symbol **parent,
1800 struct addr_location *root_al,
1801 int max_stack)
3f067dca 1802{
384b6055
KL
1803 int ret = thread__resolve_callchain_sample(thread, evsel,
1804 sample, parent,
1805 root_al, max_stack);
3f067dca
ACM
1806 if (ret)
1807 return ret;
1808
1809 /* Can we do dwarf post unwind? */
1810 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1811 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1812 return 0;
1813
1814 /* Bail out if nothing was captured. */
1815 if ((!sample->user_regs.regs) ||
1816 (!sample->user_stack.size))
1817 return 0;
1818
dd8c17a5 1819 return unwind__get_entries(unwind_entry, &callchain_cursor,
352ea45a 1820 thread, sample, max_stack);
3f067dca
ACM
1821
1822}
35feee19
DA
1823
1824int machine__for_each_thread(struct machine *machine,
1825 int (*fn)(struct thread *thread, void *p),
1826 void *priv)
1827{
1828 struct rb_node *nd;
1829 struct thread *thread;
1830 int rc = 0;
1831
1832 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1833 thread = rb_entry(nd, struct thread, rb_node);
1834 rc = fn(thread, priv);
1835 if (rc != 0)
1836 return rc;
1837 }
1838
1839 list_for_each_entry(thread, &machine->dead_threads, node) {
1840 rc = fn(thread, priv);
1841 if (rc != 0)
1842 return rc;
1843 }
1844 return rc;
1845}
58d925dc 1846
a33fbd56 1847int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 1848 struct target *target, struct thread_map *threads,
a33fbd56 1849 perf_event__handler_t process, bool data_mmap)
58d925dc 1850{
602ad878 1851 if (target__has_task(target))
58d925dc 1852 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
602ad878 1853 else if (target__has_cpu(target))
58d925dc
ACM
1854 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1855 /* command specified */
1856 return 0;
1857}
b9d266ba
AH
1858
1859pid_t machine__get_current_tid(struct machine *machine, int cpu)
1860{
1861 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1862 return -1;
1863
1864 return machine->current_tid[cpu];
1865}
1866
1867int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1868 pid_t tid)
1869{
1870 struct thread *thread;
1871
1872 if (cpu < 0)
1873 return -EINVAL;
1874
1875 if (!machine->current_tid) {
1876 int i;
1877
1878 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1879 if (!machine->current_tid)
1880 return -ENOMEM;
1881 for (i = 0; i < MAX_NR_CPUS; i++)
1882 machine->current_tid[i] = -1;
1883 }
1884
1885 if (cpu >= MAX_NR_CPUS) {
1886 pr_err("Requested CPU %d too large. ", cpu);
1887 pr_err("Consider raising MAX_NR_CPUS\n");
1888 return -EINVAL;
1889 }
1890
1891 machine->current_tid[cpu] = tid;
1892
1893 thread = machine__findnew_thread(machine, pid, tid);
1894 if (!thread)
1895 return -ENOMEM;
1896
1897 thread->cpu = cpu;
b91fc39f 1898 thread__put(thread);
b9d266ba
AH
1899
1900 return 0;
1901}
fbe2af45
AH
1902
1903int machine__get_kernel_start(struct machine *machine)
1904{
1905 struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
1906 int err = 0;
1907
1908 /*
1909 * The only addresses above 2^63 are kernel addresses of a 64-bit
1910 * kernel. Note that addresses are unsigned so that on a 32-bit system
1911 * all addresses including kernel addresses are less than 2^32. In
1912 * that case (32-bit system), if the kernel mapping is unknown, all
1913 * addresses will be assumed to be in user space - see
1914 * machine__kernel_ip().
1915 */
1916 machine->kernel_start = 1ULL << 63;
1917 if (map) {
1918 err = map__load(map, machine->symbol_filter);
1919 if (map->start)
1920 machine->kernel_start = map->start;
1921 }
1922 return err;
1923}