]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/machine.c
perf/x86/intel: Introduce PERF_RECORD_LOST_SAMPLES
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
CommitLineData
3f067dca 1#include "callchain.h"
b0a7d1a0
ACM
2#include "debug.h"
3#include "event.h"
3f067dca
ACM
4#include "evsel.h"
5#include "hist.h"
9d2f8e22
ACM
6#include "machine.h"
7#include "map.h"
3f067dca 8#include "sort.h"
69d2591a 9#include "strlist.h"
9d2f8e22 10#include "thread.h"
d027b640 11#include "vdso.h"
9d2f8e22 12#include <stdbool.h>
c506c96b 13#include <symbol/kallsyms.h>
3f067dca 14#include "unwind.h"
8b7bad58 15#include "linux/hash.h"
9d2f8e22 16
b91fc39f
ACM
17static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
18
e167f995
ACM
19static void dsos__init(struct dsos *dsos)
20{
21 INIT_LIST_HEAD(&dsos->head);
22 dsos->root = RB_ROOT;
23}
24
69d2591a
ACM
25int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
26{
11246c70 27 map_groups__init(&machine->kmaps, machine);
69d2591a 28 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 29 dsos__init(&machine->dsos);
69d2591a
ACM
30
31 machine->threads = RB_ROOT;
b91fc39f 32 pthread_rwlock_init(&machine->threads_lock, NULL);
69d2591a
ACM
33 INIT_LIST_HEAD(&machine->dead_threads);
34 machine->last_match = NULL;
35
d027b640
AH
36 machine->vdso_info = NULL;
37
69d2591a
ACM
38 machine->pid = pid;
39
611a5ce8 40 machine->symbol_filter = NULL;
14bd6d20 41 machine->id_hdr_size = 0;
cfe1c414 42 machine->comm_exec = false;
fbe2af45 43 machine->kernel_start = 0;
611a5ce8 44
69d2591a
ACM
45 machine->root_dir = strdup(root_dir);
46 if (machine->root_dir == NULL)
47 return -ENOMEM;
48
49 if (pid != HOST_KERNEL_ID) {
1fcb8768 50 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 51 pid);
69d2591a
ACM
52 char comm[64];
53
54 if (thread == NULL)
55 return -ENOMEM;
56
57 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 58 thread__set_comm(thread, comm, 0);
b91fc39f 59 thread__put(thread);
69d2591a
ACM
60 }
61
b9d266ba
AH
62 machine->current_tid = NULL;
63
69d2591a
ACM
64 return 0;
65}
66
8fb598e5
DA
67struct machine *machine__new_host(void)
68{
69 struct machine *machine = malloc(sizeof(*machine));
70
71 if (machine != NULL) {
72 machine__init(machine, "", HOST_KERNEL_ID);
73
74 if (machine__create_kernel_maps(machine) < 0)
75 goto out_delete;
76 }
77
78 return machine;
79out_delete:
80 free(machine);
81 return NULL;
82}
83
8fa7d87f 84static void dsos__delete(struct dsos *dsos)
69d2591a
ACM
85{
86 struct dso *pos, *n;
87
8fa7d87f 88 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 89 RB_CLEAR_NODE(&pos->rb_node);
69d2591a
ACM
90 list_del(&pos->node);
91 dso__delete(pos);
92 }
93}
94
3f067dca
ACM
95void machine__delete_threads(struct machine *machine)
96{
b91fc39f 97 struct rb_node *nd;
3f067dca 98
b91fc39f
ACM
99 pthread_rwlock_wrlock(&machine->threads_lock);
100 nd = rb_first(&machine->threads);
3f067dca
ACM
101 while (nd) {
102 struct thread *t = rb_entry(nd, struct thread, rb_node);
103
3f067dca 104 nd = rb_next(nd);
b91fc39f 105 __machine__remove_thread(machine, t, false);
3f067dca 106 }
b91fc39f 107 pthread_rwlock_unlock(&machine->threads_lock);
3f067dca
ACM
108}
109
69d2591a
ACM
110void machine__exit(struct machine *machine)
111{
112 map_groups__exit(&machine->kmaps);
3d39ac53 113 dsos__delete(&machine->dsos);
9a4388c7 114 machine__exit_vdso(machine);
04662523 115 zfree(&machine->root_dir);
b9d266ba 116 zfree(&machine->current_tid);
b91fc39f 117 pthread_rwlock_destroy(&machine->threads_lock);
69d2591a
ACM
118}
119
120void machine__delete(struct machine *machine)
121{
122 machine__exit(machine);
123 free(machine);
124}
125
876650e6
ACM
126void machines__init(struct machines *machines)
127{
128 machine__init(&machines->host, "", HOST_KERNEL_ID);
129 machines->guests = RB_ROOT;
611a5ce8 130 machines->symbol_filter = NULL;
876650e6
ACM
131}
132
133void machines__exit(struct machines *machines)
134{
135 machine__exit(&machines->host);
136 /* XXX exit guest */
137}
138
139struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
140 const char *root_dir)
141{
876650e6 142 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
143 struct rb_node *parent = NULL;
144 struct machine *pos, *machine = malloc(sizeof(*machine));
145
146 if (machine == NULL)
147 return NULL;
148
149 if (machine__init(machine, root_dir, pid) != 0) {
150 free(machine);
151 return NULL;
152 }
153
611a5ce8
AH
154 machine->symbol_filter = machines->symbol_filter;
155
69d2591a
ACM
156 while (*p != NULL) {
157 parent = *p;
158 pos = rb_entry(parent, struct machine, rb_node);
159 if (pid < pos->pid)
160 p = &(*p)->rb_left;
161 else
162 p = &(*p)->rb_right;
163 }
164
165 rb_link_node(&machine->rb_node, parent, p);
876650e6 166 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
167
168 return machine;
169}
170
611a5ce8
AH
171void machines__set_symbol_filter(struct machines *machines,
172 symbol_filter_t symbol_filter)
173{
174 struct rb_node *nd;
175
176 machines->symbol_filter = symbol_filter;
177 machines->host.symbol_filter = symbol_filter;
178
179 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
180 struct machine *machine = rb_entry(nd, struct machine, rb_node);
181
182 machine->symbol_filter = symbol_filter;
183 }
184}
185
cfe1c414
AH
186void machines__set_comm_exec(struct machines *machines, bool comm_exec)
187{
188 struct rb_node *nd;
189
190 machines->host.comm_exec = comm_exec;
191
192 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
193 struct machine *machine = rb_entry(nd, struct machine, rb_node);
194
195 machine->comm_exec = comm_exec;
196 }
197}
198
876650e6 199struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 200{
876650e6 201 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
202 struct rb_node *parent = NULL;
203 struct machine *machine;
204 struct machine *default_machine = NULL;
205
876650e6
ACM
206 if (pid == HOST_KERNEL_ID)
207 return &machines->host;
208
69d2591a
ACM
209 while (*p != NULL) {
210 parent = *p;
211 machine = rb_entry(parent, struct machine, rb_node);
212 if (pid < machine->pid)
213 p = &(*p)->rb_left;
214 else if (pid > machine->pid)
215 p = &(*p)->rb_right;
216 else
217 return machine;
218 if (!machine->pid)
219 default_machine = machine;
220 }
221
222 return default_machine;
223}
224
876650e6 225struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
226{
227 char path[PATH_MAX];
228 const char *root_dir = "";
229 struct machine *machine = machines__find(machines, pid);
230
231 if (machine && (machine->pid == pid))
232 goto out;
233
234 if ((pid != HOST_KERNEL_ID) &&
235 (pid != DEFAULT_GUEST_KERNEL_ID) &&
236 (symbol_conf.guestmount)) {
237 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
238 if (access(path, R_OK)) {
239 static struct strlist *seen;
240
241 if (!seen)
242 seen = strlist__new(true, NULL);
243
244 if (!strlist__has_entry(seen, path)) {
245 pr_err("Can't access file %s\n", path);
246 strlist__add(seen, path);
247 }
248 machine = NULL;
249 goto out;
250 }
251 root_dir = path;
252 }
253
254 machine = machines__add(machines, pid, root_dir);
255out:
256 return machine;
257}
258
876650e6
ACM
259void machines__process_guests(struct machines *machines,
260 machine__process_t process, void *data)
69d2591a
ACM
261{
262 struct rb_node *nd;
263
876650e6 264 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
265 struct machine *pos = rb_entry(nd, struct machine, rb_node);
266 process(pos, data);
267 }
268}
269
270char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
271{
272 if (machine__is_host(machine))
273 snprintf(bf, size, "[%s]", "kernel.kallsyms");
274 else if (machine__is_default_guest(machine))
275 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
276 else {
277 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
278 machine->pid);
279 }
280
281 return bf;
282}
283
876650e6 284void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
285{
286 struct rb_node *node;
287 struct machine *machine;
288
876650e6
ACM
289 machines->host.id_hdr_size = id_hdr_size;
290
291 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
292 machine = rb_entry(node, struct machine, rb_node);
293 machine->id_hdr_size = id_hdr_size;
294 }
295
296 return;
297}
298
29ce3612
AH
299static void machine__update_thread_pid(struct machine *machine,
300 struct thread *th, pid_t pid)
301{
302 struct thread *leader;
303
304 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
305 return;
306
307 th->pid_ = pid;
308
309 if (th->pid_ == th->tid)
310 return;
311
b91fc39f 312 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
313 if (!leader)
314 goto out_err;
315
316 if (!leader->mg)
11246c70 317 leader->mg = map_groups__new(machine);
29ce3612
AH
318
319 if (!leader->mg)
320 goto out_err;
321
322 if (th->mg == leader->mg)
323 return;
324
325 if (th->mg) {
326 /*
327 * Maps are created from MMAP events which provide the pid and
328 * tid. Consequently there never should be any maps on a thread
329 * with an unknown pid. Just print an error if there are.
330 */
331 if (!map_groups__empty(th->mg))
332 pr_err("Discarding thread maps for %d:%d\n",
333 th->pid_, th->tid);
8e160b2e 334 map_groups__put(th->mg);
29ce3612
AH
335 }
336
337 th->mg = map_groups__get(leader->mg);
338
339 return;
340
341out_err:
342 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
343}
344
b91fc39f
ACM
345static struct thread *____machine__findnew_thread(struct machine *machine,
346 pid_t pid, pid_t tid,
347 bool create)
9d2f8e22
ACM
348{
349 struct rb_node **p = &machine->threads.rb_node;
350 struct rb_node *parent = NULL;
351 struct thread *th;
352
353 /*
38051234 354 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
355 * so most of the time we dont have to look up
356 * the full rbtree:
357 */
29ce3612 358 th = machine->last_match;
f3b623b8
ACM
359 if (th != NULL) {
360 if (th->tid == tid) {
361 machine__update_thread_pid(machine, th, pid);
362 return th;
363 }
364
0ceb8f6e 365 machine->last_match = NULL;
99d725fc 366 }
9d2f8e22
ACM
367
368 while (*p != NULL) {
369 parent = *p;
370 th = rb_entry(parent, struct thread, rb_node);
371
38051234 372 if (th->tid == tid) {
0ceb8f6e 373 machine->last_match = th;
29ce3612 374 machine__update_thread_pid(machine, th, pid);
9d2f8e22
ACM
375 return th;
376 }
377
38051234 378 if (tid < th->tid)
9d2f8e22
ACM
379 p = &(*p)->rb_left;
380 else
381 p = &(*p)->rb_right;
382 }
383
384 if (!create)
385 return NULL;
386
99d725fc 387 th = thread__new(pid, tid);
9d2f8e22
ACM
388 if (th != NULL) {
389 rb_link_node(&th->rb_node, parent, p);
390 rb_insert_color(&th->rb_node, &machine->threads);
cddcef60
JO
391
392 /*
393 * We have to initialize map_groups separately
394 * after rb tree is updated.
395 *
396 * The reason is that we call machine__findnew_thread
397 * within thread__init_map_groups to find the thread
398 * leader and that would screwed the rb tree.
399 */
418029b7 400 if (thread__init_map_groups(th, machine)) {
0170b14f 401 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 402 RB_CLEAR_NODE(&th->rb_node);
418029b7 403 thread__delete(th);
cddcef60 404 return NULL;
418029b7 405 }
f3b623b8
ACM
406 /*
407 * It is now in the rbtree, get a ref
408 */
409 thread__get(th);
0ceb8f6e 410 machine->last_match = th;
9d2f8e22
ACM
411 }
412
413 return th;
414}
415
b91fc39f
ACM
416struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
417{
418 return ____machine__findnew_thread(machine, pid, tid, true);
419}
420
314add6b
AH
421struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
422 pid_t tid)
9d2f8e22 423{
b91fc39f
ACM
424 struct thread *th;
425
426 pthread_rwlock_wrlock(&machine->threads_lock);
427 th = thread__get(__machine__findnew_thread(machine, pid, tid));
428 pthread_rwlock_unlock(&machine->threads_lock);
429 return th;
9d2f8e22
ACM
430}
431
d75e6097
JO
432struct thread *machine__find_thread(struct machine *machine, pid_t pid,
433 pid_t tid)
9d2f8e22 434{
b91fc39f
ACM
435 struct thread *th;
436 pthread_rwlock_rdlock(&machine->threads_lock);
437 th = thread__get(____machine__findnew_thread(machine, pid, tid, false));
438 pthread_rwlock_unlock(&machine->threads_lock);
439 return th;
9d2f8e22 440}
b0a7d1a0 441
cfe1c414
AH
442struct comm *machine__thread_exec_comm(struct machine *machine,
443 struct thread *thread)
444{
445 if (machine->comm_exec)
446 return thread__exec_comm(thread);
447 else
448 return thread__comm(thread);
449}
450
162f0bef
FW
451int machine__process_comm_event(struct machine *machine, union perf_event *event,
452 struct perf_sample *sample)
b0a7d1a0 453{
314add6b
AH
454 struct thread *thread = machine__findnew_thread(machine,
455 event->comm.pid,
456 event->comm.tid);
65de51f9 457 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 458 int err = 0;
b0a7d1a0 459
cfe1c414
AH
460 if (exec)
461 machine->comm_exec = true;
462
b0a7d1a0
ACM
463 if (dump_trace)
464 perf_event__fprintf_comm(event, stdout);
465
65de51f9
AH
466 if (thread == NULL ||
467 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 468 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 469 err = -1;
b0a7d1a0
ACM
470 }
471
b91fc39f
ACM
472 thread__put(thread);
473
474 return err;
b0a7d1a0
ACM
475}
476
477int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 478 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
479{
480 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
481 event->lost.id, event->lost.lost);
482 return 0;
483}
484
ca33380a
JO
485static struct dso*
486machine__module_dso(struct machine *machine, struct kmod_path *m,
487 const char *filename)
da17ea33
JO
488{
489 struct dso *dso;
da17ea33 490
3d39ac53 491 dso = dsos__find(&machine->dsos, m->name, true);
da17ea33 492 if (!dso) {
3d39ac53 493 dso = dsos__addnew(&machine->dsos, m->name);
da17ea33
JO
494 if (dso == NULL)
495 return NULL;
496
497 if (machine__is_host(machine))
498 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
499 else
500 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
501
502 /* _KMODULE_COMP should be next to _KMODULE */
ca33380a 503 if (m->kmod && m->comp)
da17ea33 504 dso->symtab_type++;
ca33380a
JO
505
506 dso__set_short_name(dso, strdup(m->name), true);
507 dso__set_long_name(dso, strdup(filename), true);
da17ea33
JO
508 }
509
510 return dso;
511}
512
4a96f7a0
AH
513int machine__process_aux_event(struct machine *machine __maybe_unused,
514 union perf_event *event)
515{
516 if (dump_trace)
517 perf_event__fprintf_aux(event, stdout);
518 return 0;
519}
520
0ad21f68
AH
521int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
522 union perf_event *event)
523{
524 if (dump_trace)
525 perf_event__fprintf_itrace_start(event, stdout);
526 return 0;
527}
528
3f067dca
ACM
529struct map *machine__new_module(struct machine *machine, u64 start,
530 const char *filename)
531{
ca33380a
JO
532 struct map *map = NULL;
533 struct dso *dso;
534 struct kmod_path m;
3f067dca 535
ca33380a 536 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
537 return NULL;
538
bc84f464
JO
539 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
540 m.name);
541 if (map)
542 goto out;
543
ca33380a
JO
544 dso = machine__module_dso(machine, &m, filename);
545 if (dso == NULL)
546 goto out;
547
3f067dca
ACM
548 map = map__new2(start, dso, MAP__FUNCTION);
549 if (map == NULL)
ca33380a 550 goto out;
3f067dca 551
3f067dca 552 map_groups__insert(&machine->kmaps, map);
ca33380a
JO
553
554out:
555 free(m.name);
3f067dca
ACM
556 return map;
557}
558
876650e6 559size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
560{
561 struct rb_node *nd;
3d39ac53 562 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 563
876650e6 564 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 565 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 566 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
567 }
568
569 return ret;
570}
571
8fa7d87f 572size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
573 bool (skip)(struct dso *dso, int parm), int parm)
574{
3d39ac53 575 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
576}
577
876650e6 578size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
579 bool (skip)(struct dso *dso, int parm), int parm)
580{
581 struct rb_node *nd;
876650e6 582 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 583
876650e6 584 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
585 struct machine *pos = rb_entry(nd, struct machine, rb_node);
586 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
587 }
588 return ret;
589}
590
591size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
592{
593 int i;
594 size_t printed = 0;
595 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
596
597 if (kdso->has_build_id) {
598 char filename[PATH_MAX];
599 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
600 printed += fprintf(fp, "[0] %s\n", filename);
601 }
602
603 for (i = 0; i < vmlinux_path__nr_entries; ++i)
604 printed += fprintf(fp, "[%d] %s\n",
605 i + kdso->has_build_id, vmlinux_path[i]);
606
607 return printed;
608}
609
610size_t machine__fprintf(struct machine *machine, FILE *fp)
611{
612 size_t ret = 0;
613 struct rb_node *nd;
614
b91fc39f
ACM
615 pthread_rwlock_rdlock(&machine->threads_lock);
616
3f067dca
ACM
617 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
618 struct thread *pos = rb_entry(nd, struct thread, rb_node);
619
620 ret += thread__fprintf(pos, fp);
621 }
622
b91fc39f
ACM
623 pthread_rwlock_unlock(&machine->threads_lock);
624
3f067dca
ACM
625 return ret;
626}
627
628static struct dso *machine__get_kernel(struct machine *machine)
629{
630 const char *vmlinux_name = NULL;
631 struct dso *kernel;
632
633 if (machine__is_host(machine)) {
634 vmlinux_name = symbol_conf.vmlinux_name;
635 if (!vmlinux_name)
636 vmlinux_name = "[kernel.kallsyms]";
637
459ce518
ACM
638 kernel = machine__findnew_kernel(machine, vmlinux_name,
639 "[kernel]", DSO_TYPE_KERNEL);
3f067dca
ACM
640 } else {
641 char bf[PATH_MAX];
642
643 if (machine__is_default_guest(machine))
644 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
645 if (!vmlinux_name)
646 vmlinux_name = machine__mmap_name(machine, bf,
647 sizeof(bf));
648
459ce518
ACM
649 kernel = machine__findnew_kernel(machine, vmlinux_name,
650 "[guest.kernel]",
651 DSO_TYPE_GUEST_KERNEL);
3f067dca
ACM
652 }
653
654 if (kernel != NULL && (!kernel->has_build_id))
655 dso__read_running_kernel_build_id(kernel, machine);
656
657 return kernel;
658}
659
660struct process_args {
661 u64 start;
662};
663
15a0a870
AH
664static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
665 size_t bufsz)
666{
667 if (machine__is_default_guest(machine))
668 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
669 else
670 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
671}
672
a93f0e55
SQ
673const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
674
675/* Figure out the start address of kernel map from /proc/kallsyms.
676 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
677 * symbol_name if it's not that important.
678 */
4b99375b
AH
679static u64 machine__get_running_kernel_start(struct machine *machine,
680 const char **symbol_name)
3f067dca 681{
15a0a870 682 char filename[PATH_MAX];
a93f0e55
SQ
683 int i;
684 const char *name;
685 u64 addr = 0;
3f067dca 686
15a0a870 687 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
688
689 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
690 return 0;
691
a93f0e55
SQ
692 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
693 addr = kallsyms__get_function_start(filename, name);
694 if (addr)
695 break;
696 }
697
698 if (symbol_name)
699 *symbol_name = name;
3f067dca 700
a93f0e55 701 return addr;
3f067dca
ACM
702}
703
704int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
705{
706 enum map_type type;
4b99375b 707 u64 start = machine__get_running_kernel_start(machine, NULL);
3f067dca
ACM
708
709 for (type = 0; type < MAP__NR_TYPES; ++type) {
710 struct kmap *kmap;
711
712 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
713 if (machine->vmlinux_maps[type] == NULL)
714 return -1;
715
716 machine->vmlinux_maps[type]->map_ip =
717 machine->vmlinux_maps[type]->unmap_ip =
718 identity__map_ip;
719 kmap = map__kmap(machine->vmlinux_maps[type]);
ba92732e
WN
720 if (!kmap)
721 return -1;
722
3f067dca
ACM
723 kmap->kmaps = &machine->kmaps;
724 map_groups__insert(&machine->kmaps,
725 machine->vmlinux_maps[type]);
726 }
727
728 return 0;
729}
730
731void machine__destroy_kernel_maps(struct machine *machine)
732{
733 enum map_type type;
734
735 for (type = 0; type < MAP__NR_TYPES; ++type) {
736 struct kmap *kmap;
737
738 if (machine->vmlinux_maps[type] == NULL)
739 continue;
740
741 kmap = map__kmap(machine->vmlinux_maps[type]);
742 map_groups__remove(&machine->kmaps,
743 machine->vmlinux_maps[type]);
ba92732e 744 if (kmap && kmap->ref_reloc_sym) {
3f067dca
ACM
745 /*
746 * ref_reloc_sym is shared among all maps, so free just
747 * on one of them.
748 */
749 if (type == MAP__FUNCTION) {
04662523
ACM
750 zfree((char **)&kmap->ref_reloc_sym->name);
751 zfree(&kmap->ref_reloc_sym);
752 } else
753 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
754 }
755
3f067dca
ACM
756 machine->vmlinux_maps[type] = NULL;
757 }
758}
759
876650e6 760int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
761{
762 int ret = 0;
763 struct dirent **namelist = NULL;
764 int i, items = 0;
765 char path[PATH_MAX];
766 pid_t pid;
767 char *endp;
768
769 if (symbol_conf.default_guest_vmlinux_name ||
770 symbol_conf.default_guest_modules ||
771 symbol_conf.default_guest_kallsyms) {
772 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
773 }
774
775 if (symbol_conf.guestmount) {
776 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
777 if (items <= 0)
778 return -ENOENT;
779 for (i = 0; i < items; i++) {
780 if (!isdigit(namelist[i]->d_name[0])) {
781 /* Filter out . and .. */
782 continue;
783 }
784 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
785 if ((*endp != '\0') ||
786 (endp == namelist[i]->d_name) ||
787 (errno == ERANGE)) {
788 pr_debug("invalid directory (%s). Skipping.\n",
789 namelist[i]->d_name);
790 continue;
791 }
792 sprintf(path, "%s/%s/proc/kallsyms",
793 symbol_conf.guestmount,
794 namelist[i]->d_name);
795 ret = access(path, R_OK);
796 if (ret) {
797 pr_debug("Can't access file %s\n", path);
798 goto failure;
799 }
800 machines__create_kernel_maps(machines, pid);
801 }
802failure:
803 free(namelist);
804 }
805
806 return ret;
807}
808
876650e6 809void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 810{
876650e6
ACM
811 struct rb_node *next = rb_first(&machines->guests);
812
813 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
814
815 while (next) {
816 struct machine *pos = rb_entry(next, struct machine, rb_node);
817
818 next = rb_next(&pos->rb_node);
876650e6 819 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
820 machine__delete(pos);
821 }
822}
823
876650e6 824int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
825{
826 struct machine *machine = machines__findnew(machines, pid);
827
828 if (machine == NULL)
829 return -1;
830
831 return machine__create_kernel_maps(machine);
832}
833
834int machine__load_kallsyms(struct machine *machine, const char *filename,
835 enum map_type type, symbol_filter_t filter)
836{
837 struct map *map = machine->vmlinux_maps[type];
838 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
839
840 if (ret > 0) {
841 dso__set_loaded(map->dso, type);
842 /*
843 * Since /proc/kallsyms will have multiple sessions for the
844 * kernel, with modules between them, fixup the end of all
845 * sections.
846 */
847 __map_groups__fixup_end(&machine->kmaps, type);
848 }
849
850 return ret;
851}
852
853int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
854 symbol_filter_t filter)
855{
856 struct map *map = machine->vmlinux_maps[type];
857 int ret = dso__load_vmlinux_path(map->dso, map, filter);
858
39b12f78 859 if (ret > 0)
3f067dca 860 dso__set_loaded(map->dso, type);
3f067dca
ACM
861
862 return ret;
863}
864
865static void map_groups__fixup_end(struct map_groups *mg)
866{
867 int i;
868 for (i = 0; i < MAP__NR_TYPES; ++i)
869 __map_groups__fixup_end(mg, i);
870}
871
872static char *get_kernel_version(const char *root_dir)
873{
874 char version[PATH_MAX];
875 FILE *file;
876 char *name, *tmp;
877 const char *prefix = "Linux version ";
878
879 sprintf(version, "%s/proc/version", root_dir);
880 file = fopen(version, "r");
881 if (!file)
882 return NULL;
883
884 version[0] = '\0';
885 tmp = fgets(version, sizeof(version), file);
886 fclose(file);
887
888 name = strstr(version, prefix);
889 if (!name)
890 return NULL;
891 name += strlen(prefix);
892 tmp = strchr(name, ' ');
893 if (tmp)
894 *tmp = '\0';
895
896 return strdup(name);
897}
898
bb58a8a4
JO
899static bool is_kmod_dso(struct dso *dso)
900{
901 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
902 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
903}
904
905static int map_groups__set_module_path(struct map_groups *mg, const char *path,
906 struct kmod_path *m)
907{
908 struct map *map;
909 char *long_name;
910
911 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
912 if (map == NULL)
913 return 0;
914
915 long_name = strdup(path);
916 if (long_name == NULL)
917 return -ENOMEM;
918
919 dso__set_long_name(map->dso, long_name, true);
920 dso__kernel_module_get_build_id(map->dso, "");
921
922 /*
923 * Full name could reveal us kmod compression, so
924 * we need to update the symtab_type if needed.
925 */
926 if (m->comp && is_kmod_dso(map->dso))
927 map->dso->symtab_type++;
928
929 return 0;
930}
931
3f067dca 932static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 933 const char *dir_name, int depth)
3f067dca
ACM
934{
935 struct dirent *dent;
936 DIR *dir = opendir(dir_name);
937 int ret = 0;
938
939 if (!dir) {
940 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
941 return -1;
942 }
943
944 while ((dent = readdir(dir)) != NULL) {
945 char path[PATH_MAX];
946 struct stat st;
947
948 /*sshfs might return bad dent->d_type, so we have to stat*/
949 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
950 if (stat(path, &st))
951 continue;
952
953 if (S_ISDIR(st.st_mode)) {
954 if (!strcmp(dent->d_name, ".") ||
955 !strcmp(dent->d_name, ".."))
956 continue;
957
61d4290c
RY
958 /* Do not follow top-level source and build symlinks */
959 if (depth == 0) {
960 if (!strcmp(dent->d_name, "source") ||
961 !strcmp(dent->d_name, "build"))
962 continue;
963 }
964
965 ret = map_groups__set_modules_path_dir(mg, path,
966 depth + 1);
3f067dca
ACM
967 if (ret < 0)
968 goto out;
969 } else {
bb58a8a4 970 struct kmod_path m;
3f067dca 971
bb58a8a4
JO
972 ret = kmod_path__parse_name(&m, dent->d_name);
973 if (ret)
974 goto out;
c00c48fc 975
bb58a8a4
JO
976 if (m.kmod)
977 ret = map_groups__set_module_path(mg, path, &m);
c00c48fc 978
bb58a8a4 979 free(m.name);
3f067dca 980
bb58a8a4 981 if (ret)
3f067dca 982 goto out;
3f067dca
ACM
983 }
984 }
985
986out:
987 closedir(dir);
988 return ret;
989}
990
991static int machine__set_modules_path(struct machine *machine)
992{
993 char *version;
994 char modules_path[PATH_MAX];
995
996 version = get_kernel_version(machine->root_dir);
997 if (!version)
998 return -1;
999
61d4290c 1000 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1001 machine->root_dir, version);
1002 free(version);
1003
61d4290c 1004 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca
ACM
1005}
1006
316d70d6 1007static int machine__create_module(void *arg, const char *name, u64 start)
3f067dca 1008{
316d70d6 1009 struct machine *machine = arg;
3f067dca 1010 struct map *map;
316d70d6
AH
1011
1012 map = machine__new_module(machine, start, name);
1013 if (map == NULL)
1014 return -1;
1015
1016 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1017
1018 return 0;
1019}
1020
1021static int machine__create_modules(struct machine *machine)
1022{
3f067dca
ACM
1023 const char *modules;
1024 char path[PATH_MAX];
1025
f4be904d 1026 if (machine__is_default_guest(machine)) {
3f067dca 1027 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1028 } else {
1029 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1030 modules = path;
1031 }
1032
aa7fe3b0 1033 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1034 return -1;
1035
316d70d6 1036 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1037 return -1;
1038
316d70d6
AH
1039 if (!machine__set_modules_path(machine))
1040 return 0;
3f067dca 1041
316d70d6 1042 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1043
8f76fcd9 1044 return 0;
3f067dca
ACM
1045}
1046
1047int machine__create_kernel_maps(struct machine *machine)
1048{
1049 struct dso *kernel = machine__get_kernel(machine);
5512cf24 1050 const char *name;
4b99375b 1051 u64 addr = machine__get_running_kernel_start(machine, &name);
5512cf24
AH
1052 if (!addr)
1053 return -1;
3f067dca
ACM
1054
1055 if (kernel == NULL ||
1056 __machine__create_kernel_maps(machine, kernel) < 0)
1057 return -1;
1058
1059 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1060 if (machine__is_host(machine))
1061 pr_debug("Problems creating module maps, "
1062 "continuing anyway...\n");
1063 else
1064 pr_debug("Problems creating module maps for guest %d, "
1065 "continuing anyway...\n", machine->pid);
1066 }
1067
1068 /*
1069 * Now that we have all the maps created, just set the ->end of them:
1070 */
1071 map_groups__fixup_end(&machine->kmaps);
5512cf24
AH
1072
1073 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
1074 addr)) {
1075 machine__destroy_kernel_maps(machine);
1076 return -1;
1077 }
1078
3f067dca
ACM
1079 return 0;
1080}
1081
b0a7d1a0
ACM
1082static void machine__set_kernel_mmap_len(struct machine *machine,
1083 union perf_event *event)
1084{
4552cf0f
NK
1085 int i;
1086
1087 for (i = 0; i < MAP__NR_TYPES; i++) {
1088 machine->vmlinux_maps[i]->start = event->mmap.start;
1089 machine->vmlinux_maps[i]->end = (event->mmap.start +
1090 event->mmap.len);
1091 /*
1092 * Be a bit paranoid here, some perf.data file came with
1093 * a zero sized synthesized MMAP event for the kernel.
1094 */
1095 if (machine->vmlinux_maps[i]->end == 0)
1096 machine->vmlinux_maps[i]->end = ~0ULL;
1097 }
b0a7d1a0
ACM
1098}
1099
8e0cf965
AH
1100static bool machine__uses_kcore(struct machine *machine)
1101{
1102 struct dso *dso;
1103
3d39ac53 1104 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1105 if (dso__is_kcore(dso))
1106 return true;
1107 }
1108
1109 return false;
1110}
1111
b0a7d1a0
ACM
1112static int machine__process_kernel_mmap_event(struct machine *machine,
1113 union perf_event *event)
1114{
1115 struct map *map;
1116 char kmmap_prefix[PATH_MAX];
1117 enum dso_kernel_type kernel_type;
1118 bool is_kernel_mmap;
1119
8e0cf965
AH
1120 /* If we have maps from kcore then we do not need or want any others */
1121 if (machine__uses_kcore(machine))
1122 return 0;
1123
b0a7d1a0
ACM
1124 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1125 if (machine__is_host(machine))
1126 kernel_type = DSO_TYPE_KERNEL;
1127 else
1128 kernel_type = DSO_TYPE_GUEST_KERNEL;
1129
1130 is_kernel_mmap = memcmp(event->mmap.filename,
1131 kmmap_prefix,
1132 strlen(kmmap_prefix) - 1) == 0;
1133 if (event->mmap.filename[0] == '/' ||
1134 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
b0a7d1a0
ACM
1135 map = machine__new_module(machine, event->mmap.start,
1136 event->mmap.filename);
1137 if (map == NULL)
1138 goto out_problem;
1139
b0a7d1a0
ACM
1140 map->end = map->start + event->mmap.len;
1141 } else if (is_kernel_mmap) {
1142 const char *symbol_name = (event->mmap.filename +
1143 strlen(kmmap_prefix));
1144 /*
1145 * Should be there already, from the build-id table in
1146 * the header.
1147 */
b837a8bd
NK
1148 struct dso *kernel = NULL;
1149 struct dso *dso;
1150
3d39ac53 1151 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1152
1153 /*
1154 * The cpumode passed to is_kernel_module is not the
1155 * cpumode of *this* event. If we insist on passing
1156 * correct cpumode to is_kernel_module, we should
1157 * record the cpumode when we adding this dso to the
1158 * linked list.
1159 *
1160 * However we don't really need passing correct
1161 * cpumode. We know the correct cpumode must be kernel
1162 * mode (if not, we should not link it onto kernel_dsos
1163 * list).
1164 *
1165 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1166 * is_kernel_module() treats it as a kernel cpumode.
1167 */
1168
1169 if (!dso->kernel ||
1170 is_kernel_module(dso->long_name,
1171 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1172 continue;
1173
1f121b03 1174
b837a8bd
NK
1175 kernel = dso;
1176 break;
1177 }
1178
1179 if (kernel == NULL)
aa7cc2ae 1180 kernel = machine__findnew_dso(machine, kmmap_prefix);
b0a7d1a0
ACM
1181 if (kernel == NULL)
1182 goto out_problem;
1183
1184 kernel->kernel = kernel_type;
1185 if (__machine__create_kernel_maps(machine, kernel) < 0)
1186 goto out_problem;
1187
330dfa22
NK
1188 if (strstr(kernel->long_name, "vmlinux"))
1189 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1190
b0a7d1a0
ACM
1191 machine__set_kernel_mmap_len(machine, event);
1192
1193 /*
1194 * Avoid using a zero address (kptr_restrict) for the ref reloc
1195 * symbol. Effectively having zero here means that at record
1196 * time /proc/sys/kernel/kptr_restrict was non zero.
1197 */
1198 if (event->mmap.pgoff != 0) {
1199 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1200 symbol_name,
1201 event->mmap.pgoff);
1202 }
1203
1204 if (machine__is_default_guest(machine)) {
1205 /*
1206 * preload dso of guest kernel and modules
1207 */
1208 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1209 NULL);
1210 }
1211 }
1212 return 0;
1213out_problem:
1214 return -1;
1215}
1216
5c5e854b 1217int machine__process_mmap2_event(struct machine *machine,
162f0bef
FW
1218 union perf_event *event,
1219 struct perf_sample *sample __maybe_unused)
5c5e854b
SE
1220{
1221 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1222 struct thread *thread;
1223 struct map *map;
1224 enum map_type type;
1225 int ret = 0;
1226
1227 if (dump_trace)
1228 perf_event__fprintf_mmap2(event, stdout);
1229
1230 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1231 cpumode == PERF_RECORD_MISC_KERNEL) {
1232 ret = machine__process_kernel_mmap_event(machine, event);
1233 if (ret < 0)
1234 goto out_problem;
1235 return 0;
1236 }
1237
1238 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1239 event->mmap2.tid);
5c5e854b
SE
1240 if (thread == NULL)
1241 goto out_problem;
1242
1243 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1244 type = MAP__VARIABLE;
1245 else
1246 type = MAP__FUNCTION;
1247
2a03068c 1248 map = map__new(machine, event->mmap2.start,
5c5e854b
SE
1249 event->mmap2.len, event->mmap2.pgoff,
1250 event->mmap2.pid, event->mmap2.maj,
1251 event->mmap2.min, event->mmap2.ino,
1252 event->mmap2.ino_generation,
7ef80703
DZ
1253 event->mmap2.prot,
1254 event->mmap2.flags,
5835edda 1255 event->mmap2.filename, type, thread);
5c5e854b
SE
1256
1257 if (map == NULL)
b91fc39f 1258 goto out_problem_map;
5c5e854b
SE
1259
1260 thread__insert_map(thread, map);
b91fc39f 1261 thread__put(thread);
84c2cafa 1262 map__put(map);
5c5e854b
SE
1263 return 0;
1264
b91fc39f
ACM
1265out_problem_map:
1266 thread__put(thread);
5c5e854b
SE
1267out_problem:
1268 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1269 return 0;
1270}
1271
162f0bef
FW
1272int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1273 struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
1274{
1275 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1276 struct thread *thread;
1277 struct map *map;
bad40917 1278 enum map_type type;
b0a7d1a0
ACM
1279 int ret = 0;
1280
1281 if (dump_trace)
1282 perf_event__fprintf_mmap(event, stdout);
1283
1284 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1285 cpumode == PERF_RECORD_MISC_KERNEL) {
1286 ret = machine__process_kernel_mmap_event(machine, event);
1287 if (ret < 0)
1288 goto out_problem;
1289 return 0;
1290 }
1291
314add6b 1292 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1293 event->mmap.tid);
b0a7d1a0
ACM
1294 if (thread == NULL)
1295 goto out_problem;
bad40917
SE
1296
1297 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1298 type = MAP__VARIABLE;
1299 else
1300 type = MAP__FUNCTION;
1301
2a03068c 1302 map = map__new(machine, event->mmap.start,
b0a7d1a0 1303 event->mmap.len, event->mmap.pgoff,
7ef80703 1304 event->mmap.pid, 0, 0, 0, 0, 0, 0,
5c5e854b 1305 event->mmap.filename,
5835edda 1306 type, thread);
bad40917 1307
b0a7d1a0 1308 if (map == NULL)
b91fc39f 1309 goto out_problem_map;
b0a7d1a0
ACM
1310
1311 thread__insert_map(thread, map);
b91fc39f 1312 thread__put(thread);
84c2cafa 1313 map__put(map);
b0a7d1a0
ACM
1314 return 0;
1315
b91fc39f
ACM
1316out_problem_map:
1317 thread__put(thread);
b0a7d1a0
ACM
1318out_problem:
1319 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1320 return 0;
1321}
1322
b91fc39f 1323static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1324{
f3b623b8 1325 if (machine->last_match == th)
0ceb8f6e 1326 machine->last_match = NULL;
f3b623b8 1327
59a51c1d 1328 BUG_ON(atomic_read(&th->refcnt) == 0);
b91fc39f
ACM
1329 if (lock)
1330 pthread_rwlock_wrlock(&machine->threads_lock);
0170b14f 1331 rb_erase_init(&th->rb_node, &machine->threads);
b91fc39f 1332 RB_CLEAR_NODE(&th->rb_node);
236a3bbd 1333 /*
f3b623b8
ACM
1334 * Move it first to the dead_threads list, then drop the reference,
1335 * if this is the last reference, then the thread__delete destructor
1336 * will be called and we will remove it from the dead_threads list.
236a3bbd
DA
1337 */
1338 list_add_tail(&th->node, &machine->dead_threads);
b91fc39f
ACM
1339 if (lock)
1340 pthread_rwlock_unlock(&machine->threads_lock);
f3b623b8 1341 thread__put(th);
236a3bbd
DA
1342}
1343
b91fc39f
ACM
1344void machine__remove_thread(struct machine *machine, struct thread *th)
1345{
1346 return __machine__remove_thread(machine, th, true);
1347}
1348
162f0bef
FW
1349int machine__process_fork_event(struct machine *machine, union perf_event *event,
1350 struct perf_sample *sample)
b0a7d1a0 1351{
d75e6097
JO
1352 struct thread *thread = machine__find_thread(machine,
1353 event->fork.pid,
1354 event->fork.tid);
314add6b
AH
1355 struct thread *parent = machine__findnew_thread(machine,
1356 event->fork.ppid,
1357 event->fork.ptid);
b91fc39f 1358 int err = 0;
b0a7d1a0 1359
236a3bbd 1360 /* if a thread currently exists for the thread id remove it */
b91fc39f 1361 if (thread != NULL) {
236a3bbd 1362 machine__remove_thread(machine, thread);
b91fc39f
ACM
1363 thread__put(thread);
1364 }
236a3bbd 1365
314add6b
AH
1366 thread = machine__findnew_thread(machine, event->fork.pid,
1367 event->fork.tid);
b0a7d1a0
ACM
1368 if (dump_trace)
1369 perf_event__fprintf_task(event, stdout);
1370
1371 if (thread == NULL || parent == NULL ||
162f0bef 1372 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0 1373 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1374 err = -1;
b0a7d1a0 1375 }
b91fc39f
ACM
1376 thread__put(thread);
1377 thread__put(parent);
b0a7d1a0 1378
b91fc39f 1379 return err;
b0a7d1a0
ACM
1380}
1381
162f0bef
FW
1382int machine__process_exit_event(struct machine *machine, union perf_event *event,
1383 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1384{
d75e6097
JO
1385 struct thread *thread = machine__find_thread(machine,
1386 event->fork.pid,
1387 event->fork.tid);
b0a7d1a0
ACM
1388
1389 if (dump_trace)
1390 perf_event__fprintf_task(event, stdout);
1391
b91fc39f 1392 if (thread != NULL) {
236a3bbd 1393 thread__exited(thread);
b91fc39f
ACM
1394 thread__put(thread);
1395 }
b0a7d1a0
ACM
1396
1397 return 0;
1398}
1399
162f0bef
FW
1400int machine__process_event(struct machine *machine, union perf_event *event,
1401 struct perf_sample *sample)
b0a7d1a0
ACM
1402{
1403 int ret;
1404
1405 switch (event->header.type) {
1406 case PERF_RECORD_COMM:
162f0bef 1407 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1408 case PERF_RECORD_MMAP:
162f0bef 1409 ret = machine__process_mmap_event(machine, event, sample); break;
5c5e854b 1410 case PERF_RECORD_MMAP2:
162f0bef 1411 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1412 case PERF_RECORD_FORK:
162f0bef 1413 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1414 case PERF_RECORD_EXIT:
162f0bef 1415 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1416 case PERF_RECORD_LOST:
162f0bef 1417 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1418 case PERF_RECORD_AUX:
1419 ret = machine__process_aux_event(machine, event); break;
0ad21f68
AH
1420 case PERF_RECORD_ITRACE_START:
1421 ret = machine__process_itrace_start_event(machine, event);
1422 break;
b0a7d1a0
ACM
1423 default:
1424 ret = -1;
1425 break;
1426 }
1427
1428 return ret;
1429}
3f067dca 1430
b21484f1 1431static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1432{
b21484f1 1433 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1434 return 1;
3f067dca
ACM
1435 return 0;
1436}
1437
bb871a9c 1438static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1439 struct addr_map_symbol *ams,
1440 u64 ip)
1441{
1442 struct addr_location al;
3f067dca
ACM
1443
1444 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1445 /*
1446 * We cannot use the header.misc hint to determine whether a
1447 * branch stack address is user, kernel, guest, hypervisor.
1448 * Branches may straddle the kernel/user/hypervisor boundaries.
1449 * Thus, we have to try consecutively until we find a match
1450 * or else, the symbol is unknown
1451 */
bb871a9c 1452 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
3f067dca 1453
3f067dca
ACM
1454 ams->addr = ip;
1455 ams->al_addr = al.addr;
1456 ams->sym = al.sym;
1457 ams->map = al.map;
1458}
1459
bb871a9c 1460static void ip__resolve_data(struct thread *thread,
98a3b32c
SE
1461 u8 m, struct addr_map_symbol *ams, u64 addr)
1462{
1463 struct addr_location al;
1464
1465 memset(&al, 0, sizeof(al));
1466
bb871a9c 1467 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
06b2afc0
DZ
1468 if (al.map == NULL) {
1469 /*
1470 * some shared data regions have execute bit set which puts
1471 * their mapping in the MAP__FUNCTION type array.
1472 * Check there as a fallback option before dropping the sample.
1473 */
bb871a9c 1474 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
06b2afc0
DZ
1475 }
1476
98a3b32c
SE
1477 ams->addr = addr;
1478 ams->al_addr = al.addr;
1479 ams->sym = al.sym;
1480 ams->map = al.map;
1481}
1482
e80faac0
ACM
1483struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1484 struct addr_location *al)
98a3b32c
SE
1485{
1486 struct mem_info *mi = zalloc(sizeof(*mi));
1487
1488 if (!mi)
1489 return NULL;
1490
bb871a9c
ACM
1491 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1492 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
98a3b32c
SE
1493 mi->data_src.val = sample->data_src;
1494
1495 return mi;
1496}
1497
37592b8a
AK
1498static int add_callchain_ip(struct thread *thread,
1499 struct symbol **parent,
1500 struct addr_location *root_al,
73dbcd65 1501 u8 *cpumode,
37592b8a
AK
1502 u64 ip)
1503{
1504 struct addr_location al;
1505
1506 al.filtered = 0;
1507 al.sym = NULL;
73dbcd65 1508 if (!cpumode) {
8b7bad58
AK
1509 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1510 ip, &al);
73dbcd65 1511 } else {
2e77784b
KL
1512 if (ip >= PERF_CONTEXT_MAX) {
1513 switch (ip) {
1514 case PERF_CONTEXT_HV:
73dbcd65 1515 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
1516 break;
1517 case PERF_CONTEXT_KERNEL:
73dbcd65 1518 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
1519 break;
1520 case PERF_CONTEXT_USER:
73dbcd65 1521 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
1522 break;
1523 default:
1524 pr_debug("invalid callchain context: "
1525 "%"PRId64"\n", (s64) ip);
1526 /*
1527 * It seems the callchain is corrupted.
1528 * Discard all.
1529 */
1530 callchain_cursor_reset(&callchain_cursor);
1531 return 1;
1532 }
1533 return 0;
1534 }
73dbcd65
DH
1535 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1536 ip, &al);
2e77784b
KL
1537 }
1538
37592b8a
AK
1539 if (al.sym != NULL) {
1540 if (sort__has_parent && !*parent &&
1541 symbol__match_regex(al.sym, &parent_regex))
1542 *parent = al.sym;
1543 else if (have_ignore_callees && root_al &&
1544 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1545 /* Treat this symbol as the root,
1546 forgetting its callees. */
1547 *root_al = al;
1548 callchain_cursor_reset(&callchain_cursor);
1549 }
1550 }
1551
5550171b 1552 return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
37592b8a
AK
1553}
1554
644f2df2
ACM
1555struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1556 struct addr_location *al)
3f067dca 1557{
3f067dca 1558 unsigned int i;
644f2df2
ACM
1559 const struct branch_stack *bs = sample->branch_stack;
1560 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1561
3f067dca
ACM
1562 if (!bi)
1563 return NULL;
1564
1565 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1566 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1567 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1568 bi[i].flags = bs->entries[i].flags;
1569 }
1570 return bi;
1571}
1572
8b7bad58
AK
1573#define CHASHSZ 127
1574#define CHASHBITS 7
1575#define NO_ENTRY 0xff
1576
1577#define PERF_MAX_BRANCH_DEPTH 127
1578
1579/* Remove loops. */
1580static int remove_loops(struct branch_entry *l, int nr)
1581{
1582 int i, j, off;
1583 unsigned char chash[CHASHSZ];
1584
1585 memset(chash, NO_ENTRY, sizeof(chash));
1586
1587 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1588
1589 for (i = 0; i < nr; i++) {
1590 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1591
1592 /* no collision handling for now */
1593 if (chash[h] == NO_ENTRY) {
1594 chash[h] = i;
1595 } else if (l[chash[h]].from == l[i].from) {
1596 bool is_loop = true;
1597 /* check if it is a real loop */
1598 off = 0;
1599 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1600 if (l[j].from != l[i + off].from) {
1601 is_loop = false;
1602 break;
1603 }
1604 if (is_loop) {
1605 memmove(l + i, l + i + off,
1606 (nr - (i + off)) * sizeof(*l));
1607 nr -= off;
1608 }
1609 }
1610 }
1611 return nr;
1612}
1613
384b6055
KL
1614/*
1615 * Recolve LBR callstack chain sample
1616 * Return:
1617 * 1 on success get LBR callchain information
1618 * 0 no available LBR callchain information, should try fp
1619 * negative error code on other errors.
1620 */
1621static int resolve_lbr_callchain_sample(struct thread *thread,
1622 struct perf_sample *sample,
1623 struct symbol **parent,
1624 struct addr_location *root_al,
1625 int max_stack)
3f067dca 1626{
384b6055
KL
1627 struct ip_callchain *chain = sample->callchain;
1628 int chain_nr = min(max_stack, (int)chain->nr);
73dbcd65 1629 u8 cpumode = PERF_RECORD_MISC_USER;
384b6055
KL
1630 int i, j, err;
1631 u64 ip;
1632
1633 for (i = 0; i < chain_nr; i++) {
1634 if (chain->ips[i] == PERF_CONTEXT_USER)
1635 break;
1636 }
1637
1638 /* LBR only affects the user callchain */
1639 if (i != chain_nr) {
1640 struct branch_stack *lbr_stack = sample->branch_stack;
1641 int lbr_nr = lbr_stack->nr;
1642 /*
1643 * LBR callstack can only get user call chain.
1644 * The mix_chain_nr is kernel call chain
1645 * number plus LBR user call chain number.
1646 * i is kernel call chain number,
1647 * 1 is PERF_CONTEXT_USER,
1648 * lbr_nr + 1 is the user call chain number.
1649 * For details, please refer to the comments
1650 * in callchain__printf
1651 */
1652 int mix_chain_nr = i + 1 + lbr_nr + 1;
1653
1654 if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
1655 pr_warning("corrupted callchain. skipping...\n");
1656 return 0;
1657 }
1658
1659 for (j = 0; j < mix_chain_nr; j++) {
1660 if (callchain_param.order == ORDER_CALLEE) {
1661 if (j < i + 1)
1662 ip = chain->ips[j];
1663 else if (j > i + 1)
1664 ip = lbr_stack->entries[j - i - 2].from;
1665 else
1666 ip = lbr_stack->entries[0].to;
1667 } else {
1668 if (j < lbr_nr)
1669 ip = lbr_stack->entries[lbr_nr - j - 1].from;
1670 else if (j > lbr_nr)
1671 ip = chain->ips[i + 1 - (j - lbr_nr)];
1672 else
1673 ip = lbr_stack->entries[0].to;
1674 }
1675
73dbcd65 1676 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
384b6055
KL
1677 if (err)
1678 return (err < 0) ? err : 0;
1679 }
1680 return 1;
1681 }
1682
1683 return 0;
1684}
1685
1686static int thread__resolve_callchain_sample(struct thread *thread,
1687 struct perf_evsel *evsel,
1688 struct perf_sample *sample,
1689 struct symbol **parent,
1690 struct addr_location *root_al,
1691 int max_stack)
1692{
1693 struct branch_stack *branch = sample->branch_stack;
1694 struct ip_callchain *chain = sample->callchain;
91e95617 1695 int chain_nr = min(max_stack, (int)chain->nr);
73dbcd65 1696 u8 cpumode = PERF_RECORD_MISC_USER;
2e77784b 1697 int i, j, err;
8b7bad58
AK
1698 int skip_idx = -1;
1699 int first_call = 0;
1700
384b6055
KL
1701 callchain_cursor_reset(&callchain_cursor);
1702
1703 if (has_branch_callstack(evsel)) {
1704 err = resolve_lbr_callchain_sample(thread, sample, parent,
1705 root_al, max_stack);
1706 if (err)
1707 return (err < 0) ? err : 0;
1708 }
1709
8b7bad58
AK
1710 /*
1711 * Based on DWARF debug information, some architectures skip
1712 * a callchain entry saved by the kernel.
1713 */
1714 if (chain->nr < PERF_MAX_STACK_DEPTH)
1715 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 1716
8b7bad58
AK
1717 /*
1718 * Add branches to call stack for easier browsing. This gives
1719 * more context for a sample than just the callers.
1720 *
1721 * This uses individual histograms of paths compared to the
1722 * aggregated histograms the normal LBR mode uses.
1723 *
1724 * Limitations for now:
1725 * - No extra filters
1726 * - No annotations (should annotate somehow)
1727 */
1728
1729 if (branch && callchain_param.branch_callstack) {
1730 int nr = min(max_stack, (int)branch->nr);
1731 struct branch_entry be[nr];
1732
1733 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1734 pr_warning("corrupted branch chain. skipping...\n");
1735 goto check_calls;
1736 }
1737
1738 for (i = 0; i < nr; i++) {
1739 if (callchain_param.order == ORDER_CALLEE) {
1740 be[i] = branch->entries[i];
1741 /*
1742 * Check for overlap into the callchain.
1743 * The return address is one off compared to
1744 * the branch entry. To adjust for this
1745 * assume the calling instruction is not longer
1746 * than 8 bytes.
1747 */
1748 if (i == skip_idx ||
1749 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1750 first_call++;
1751 else if (be[i].from < chain->ips[first_call] &&
1752 be[i].from >= chain->ips[first_call] - 8)
1753 first_call++;
1754 } else
1755 be[i] = branch->entries[branch->nr - i - 1];
1756 }
1757
1758 nr = remove_loops(be, nr);
1759
1760 for (i = 0; i < nr; i++) {
1761 err = add_callchain_ip(thread, parent, root_al,
73dbcd65 1762 NULL, be[i].to);
8b7bad58
AK
1763 if (!err)
1764 err = add_callchain_ip(thread, parent, root_al,
73dbcd65 1765 NULL, be[i].from);
8b7bad58
AK
1766 if (err == -EINVAL)
1767 break;
1768 if (err)
1769 return err;
1770 }
1771 chain_nr -= nr;
1772 }
1773
1774check_calls:
3f067dca
ACM
1775 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1776 pr_warning("corrupted callchain. skipping...\n");
1777 return 0;
1778 }
1779
8b7bad58 1780 for (i = first_call; i < chain_nr; i++) {
3f067dca 1781 u64 ip;
3f067dca
ACM
1782
1783 if (callchain_param.order == ORDER_CALLEE)
a60335ba 1784 j = i;
3f067dca 1785 else
a60335ba
SB
1786 j = chain->nr - i - 1;
1787
1788#ifdef HAVE_SKIP_CALLCHAIN_IDX
1789 if (j == skip_idx)
1790 continue;
1791#endif
1792 ip = chain->ips[j];
3f067dca 1793
73dbcd65 1794 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
3f067dca 1795
3f067dca 1796 if (err)
2e77784b 1797 return (err < 0) ? err : 0;
3f067dca
ACM
1798 }
1799
1800 return 0;
1801}
1802
1803static int unwind_entry(struct unwind_entry *entry, void *arg)
1804{
1805 struct callchain_cursor *cursor = arg;
1806 return callchain_cursor_append(cursor, entry->ip,
1807 entry->map, entry->sym);
1808}
1809
cc8b7c2b
ACM
1810int thread__resolve_callchain(struct thread *thread,
1811 struct perf_evsel *evsel,
1812 struct perf_sample *sample,
1813 struct symbol **parent,
1814 struct addr_location *root_al,
1815 int max_stack)
3f067dca 1816{
384b6055
KL
1817 int ret = thread__resolve_callchain_sample(thread, evsel,
1818 sample, parent,
1819 root_al, max_stack);
3f067dca
ACM
1820 if (ret)
1821 return ret;
1822
1823 /* Can we do dwarf post unwind? */
1824 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1825 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1826 return 0;
1827
1828 /* Bail out if nothing was captured. */
1829 if ((!sample->user_regs.regs) ||
1830 (!sample->user_stack.size))
1831 return 0;
1832
dd8c17a5 1833 return unwind__get_entries(unwind_entry, &callchain_cursor,
352ea45a 1834 thread, sample, max_stack);
3f067dca
ACM
1835
1836}
35feee19
DA
1837
1838int machine__for_each_thread(struct machine *machine,
1839 int (*fn)(struct thread *thread, void *p),
1840 void *priv)
1841{
1842 struct rb_node *nd;
1843 struct thread *thread;
1844 int rc = 0;
1845
1846 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1847 thread = rb_entry(nd, struct thread, rb_node);
1848 rc = fn(thread, priv);
1849 if (rc != 0)
1850 return rc;
1851 }
1852
1853 list_for_each_entry(thread, &machine->dead_threads, node) {
1854 rc = fn(thread, priv);
1855 if (rc != 0)
1856 return rc;
1857 }
1858 return rc;
1859}
58d925dc 1860
a33fbd56 1861int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 1862 struct target *target, struct thread_map *threads,
a33fbd56 1863 perf_event__handler_t process, bool data_mmap)
58d925dc 1864{
602ad878 1865 if (target__has_task(target))
58d925dc 1866 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
602ad878 1867 else if (target__has_cpu(target))
58d925dc
ACM
1868 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1869 /* command specified */
1870 return 0;
1871}
b9d266ba
AH
1872
1873pid_t machine__get_current_tid(struct machine *machine, int cpu)
1874{
1875 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1876 return -1;
1877
1878 return machine->current_tid[cpu];
1879}
1880
1881int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1882 pid_t tid)
1883{
1884 struct thread *thread;
1885
1886 if (cpu < 0)
1887 return -EINVAL;
1888
1889 if (!machine->current_tid) {
1890 int i;
1891
1892 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1893 if (!machine->current_tid)
1894 return -ENOMEM;
1895 for (i = 0; i < MAX_NR_CPUS; i++)
1896 machine->current_tid[i] = -1;
1897 }
1898
1899 if (cpu >= MAX_NR_CPUS) {
1900 pr_err("Requested CPU %d too large. ", cpu);
1901 pr_err("Consider raising MAX_NR_CPUS\n");
1902 return -EINVAL;
1903 }
1904
1905 machine->current_tid[cpu] = tid;
1906
1907 thread = machine__findnew_thread(machine, pid, tid);
1908 if (!thread)
1909 return -ENOMEM;
1910
1911 thread->cpu = cpu;
b91fc39f 1912 thread__put(thread);
b9d266ba
AH
1913
1914 return 0;
1915}
fbe2af45
AH
1916
1917int machine__get_kernel_start(struct machine *machine)
1918{
1919 struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
1920 int err = 0;
1921
1922 /*
1923 * The only addresses above 2^63 are kernel addresses of a 64-bit
1924 * kernel. Note that addresses are unsigned so that on a 32-bit system
1925 * all addresses including kernel addresses are less than 2^32. In
1926 * that case (32-bit system), if the kernel mapping is unknown, all
1927 * addresses will be assumed to be in user space - see
1928 * machine__kernel_ip().
1929 */
1930 machine->kernel_start = 1ULL << 63;
1931 if (map) {
1932 err = map__load(map, machine->symbol_filter);
1933 if (map->start)
1934 machine->kernel_start = map->start;
1935 }
1936 return err;
1937}
aa7cc2ae
ACM
1938
1939struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
1940{
1941 return __dsos__findnew(&machine->dsos, filename);
1942}