]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/machine.c
Revert "perf: Remove the extra validity check on nr_pages"
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
CommitLineData
3f067dca 1#include "callchain.h"
b0a7d1a0
ACM
2#include "debug.h"
3#include "event.h"
3f067dca
ACM
4#include "evsel.h"
5#include "hist.h"
9d2f8e22
ACM
6#include "machine.h"
7#include "map.h"
3f067dca 8#include "sort.h"
69d2591a 9#include "strlist.h"
9d2f8e22 10#include "thread.h"
d027b640 11#include "vdso.h"
9d2f8e22 12#include <stdbool.h>
c506c96b 13#include <symbol/kallsyms.h>
3f067dca 14#include "unwind.h"
8b7bad58 15#include "linux/hash.h"
9d2f8e22 16
e167f995
ACM
17static void dsos__init(struct dsos *dsos)
18{
19 INIT_LIST_HEAD(&dsos->head);
20 dsos->root = RB_ROOT;
21}
22
69d2591a
ACM
23int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
24{
11246c70 25 map_groups__init(&machine->kmaps, machine);
69d2591a 26 RB_CLEAR_NODE(&machine->rb_node);
e167f995
ACM
27 dsos__init(&machine->user_dsos);
28 dsos__init(&machine->kernel_dsos);
69d2591a
ACM
29
30 machine->threads = RB_ROOT;
31 INIT_LIST_HEAD(&machine->dead_threads);
32 machine->last_match = NULL;
33
d027b640
AH
34 machine->vdso_info = NULL;
35
69d2591a
ACM
36 machine->pid = pid;
37
611a5ce8 38 machine->symbol_filter = NULL;
14bd6d20 39 machine->id_hdr_size = 0;
cfe1c414 40 machine->comm_exec = false;
fbe2af45 41 machine->kernel_start = 0;
611a5ce8 42
69d2591a
ACM
43 machine->root_dir = strdup(root_dir);
44 if (machine->root_dir == NULL)
45 return -ENOMEM;
46
47 if (pid != HOST_KERNEL_ID) {
1fcb8768 48 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 49 pid);
69d2591a
ACM
50 char comm[64];
51
52 if (thread == NULL)
53 return -ENOMEM;
54
55 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 56 thread__set_comm(thread, comm, 0);
69d2591a
ACM
57 }
58
b9d266ba
AH
59 machine->current_tid = NULL;
60
69d2591a
ACM
61 return 0;
62}
63
8fb598e5
DA
64struct machine *machine__new_host(void)
65{
66 struct machine *machine = malloc(sizeof(*machine));
67
68 if (machine != NULL) {
69 machine__init(machine, "", HOST_KERNEL_ID);
70
71 if (machine__create_kernel_maps(machine) < 0)
72 goto out_delete;
73 }
74
75 return machine;
76out_delete:
77 free(machine);
78 return NULL;
79}
80
8fa7d87f 81static void dsos__delete(struct dsos *dsos)
69d2591a
ACM
82{
83 struct dso *pos, *n;
84
8fa7d87f 85 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 86 RB_CLEAR_NODE(&pos->rb_node);
69d2591a
ACM
87 list_del(&pos->node);
88 dso__delete(pos);
89 }
90}
91
3f067dca
ACM
92void machine__delete_dead_threads(struct machine *machine)
93{
94 struct thread *n, *t;
95
96 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
97 list_del(&t->node);
98 thread__delete(t);
99 }
100}
101
102void machine__delete_threads(struct machine *machine)
103{
104 struct rb_node *nd = rb_first(&machine->threads);
105
106 while (nd) {
107 struct thread *t = rb_entry(nd, struct thread, rb_node);
108
109 rb_erase(&t->rb_node, &machine->threads);
110 nd = rb_next(nd);
111 thread__delete(t);
112 }
113}
114
69d2591a
ACM
115void machine__exit(struct machine *machine)
116{
117 map_groups__exit(&machine->kmaps);
118 dsos__delete(&machine->user_dsos);
119 dsos__delete(&machine->kernel_dsos);
d027b640 120 vdso__exit(machine);
04662523 121 zfree(&machine->root_dir);
b9d266ba 122 zfree(&machine->current_tid);
69d2591a
ACM
123}
124
125void machine__delete(struct machine *machine)
126{
127 machine__exit(machine);
128 free(machine);
129}
130
876650e6
ACM
131void machines__init(struct machines *machines)
132{
133 machine__init(&machines->host, "", HOST_KERNEL_ID);
134 machines->guests = RB_ROOT;
611a5ce8 135 machines->symbol_filter = NULL;
876650e6
ACM
136}
137
138void machines__exit(struct machines *machines)
139{
140 machine__exit(&machines->host);
141 /* XXX exit guest */
142}
143
144struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
145 const char *root_dir)
146{
876650e6 147 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
148 struct rb_node *parent = NULL;
149 struct machine *pos, *machine = malloc(sizeof(*machine));
150
151 if (machine == NULL)
152 return NULL;
153
154 if (machine__init(machine, root_dir, pid) != 0) {
155 free(machine);
156 return NULL;
157 }
158
611a5ce8
AH
159 machine->symbol_filter = machines->symbol_filter;
160
69d2591a
ACM
161 while (*p != NULL) {
162 parent = *p;
163 pos = rb_entry(parent, struct machine, rb_node);
164 if (pid < pos->pid)
165 p = &(*p)->rb_left;
166 else
167 p = &(*p)->rb_right;
168 }
169
170 rb_link_node(&machine->rb_node, parent, p);
876650e6 171 rb_insert_color(&machine->rb_node, &machines->guests);
69d2591a
ACM
172
173 return machine;
174}
175
611a5ce8
AH
176void machines__set_symbol_filter(struct machines *machines,
177 symbol_filter_t symbol_filter)
178{
179 struct rb_node *nd;
180
181 machines->symbol_filter = symbol_filter;
182 machines->host.symbol_filter = symbol_filter;
183
184 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
185 struct machine *machine = rb_entry(nd, struct machine, rb_node);
186
187 machine->symbol_filter = symbol_filter;
188 }
189}
190
cfe1c414
AH
191void machines__set_comm_exec(struct machines *machines, bool comm_exec)
192{
193 struct rb_node *nd;
194
195 machines->host.comm_exec = comm_exec;
196
197 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
198 struct machine *machine = rb_entry(nd, struct machine, rb_node);
199
200 machine->comm_exec = comm_exec;
201 }
202}
203
876650e6 204struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 205{
876650e6 206 struct rb_node **p = &machines->guests.rb_node;
69d2591a
ACM
207 struct rb_node *parent = NULL;
208 struct machine *machine;
209 struct machine *default_machine = NULL;
210
876650e6
ACM
211 if (pid == HOST_KERNEL_ID)
212 return &machines->host;
213
69d2591a
ACM
214 while (*p != NULL) {
215 parent = *p;
216 machine = rb_entry(parent, struct machine, rb_node);
217 if (pid < machine->pid)
218 p = &(*p)->rb_left;
219 else if (pid > machine->pid)
220 p = &(*p)->rb_right;
221 else
222 return machine;
223 if (!machine->pid)
224 default_machine = machine;
225 }
226
227 return default_machine;
228}
229
876650e6 230struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
231{
232 char path[PATH_MAX];
233 const char *root_dir = "";
234 struct machine *machine = machines__find(machines, pid);
235
236 if (machine && (machine->pid == pid))
237 goto out;
238
239 if ((pid != HOST_KERNEL_ID) &&
240 (pid != DEFAULT_GUEST_KERNEL_ID) &&
241 (symbol_conf.guestmount)) {
242 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
243 if (access(path, R_OK)) {
244 static struct strlist *seen;
245
246 if (!seen)
247 seen = strlist__new(true, NULL);
248
249 if (!strlist__has_entry(seen, path)) {
250 pr_err("Can't access file %s\n", path);
251 strlist__add(seen, path);
252 }
253 machine = NULL;
254 goto out;
255 }
256 root_dir = path;
257 }
258
259 machine = machines__add(machines, pid, root_dir);
260out:
261 return machine;
262}
263
876650e6
ACM
264void machines__process_guests(struct machines *machines,
265 machine__process_t process, void *data)
69d2591a
ACM
266{
267 struct rb_node *nd;
268
876650e6 269 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
270 struct machine *pos = rb_entry(nd, struct machine, rb_node);
271 process(pos, data);
272 }
273}
274
275char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
276{
277 if (machine__is_host(machine))
278 snprintf(bf, size, "[%s]", "kernel.kallsyms");
279 else if (machine__is_default_guest(machine))
280 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
281 else {
282 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
283 machine->pid);
284 }
285
286 return bf;
287}
288
876650e6 289void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
290{
291 struct rb_node *node;
292 struct machine *machine;
293
876650e6
ACM
294 machines->host.id_hdr_size = id_hdr_size;
295
296 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
69d2591a
ACM
297 machine = rb_entry(node, struct machine, rb_node);
298 machine->id_hdr_size = id_hdr_size;
299 }
300
301 return;
302}
303
29ce3612
AH
304static void machine__update_thread_pid(struct machine *machine,
305 struct thread *th, pid_t pid)
306{
307 struct thread *leader;
308
309 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
310 return;
311
312 th->pid_ = pid;
313
314 if (th->pid_ == th->tid)
315 return;
316
317 leader = machine__findnew_thread(machine, th->pid_, th->pid_);
318 if (!leader)
319 goto out_err;
320
321 if (!leader->mg)
11246c70 322 leader->mg = map_groups__new(machine);
29ce3612
AH
323
324 if (!leader->mg)
325 goto out_err;
326
327 if (th->mg == leader->mg)
328 return;
329
330 if (th->mg) {
331 /*
332 * Maps are created from MMAP events which provide the pid and
333 * tid. Consequently there never should be any maps on a thread
334 * with an unknown pid. Just print an error if there are.
335 */
336 if (!map_groups__empty(th->mg))
337 pr_err("Discarding thread maps for %d:%d\n",
338 th->pid_, th->tid);
339 map_groups__delete(th->mg);
340 }
341
342 th->mg = map_groups__get(leader->mg);
343
344 return;
345
346out_err:
347 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
348}
349
99d725fc
AH
350static struct thread *__machine__findnew_thread(struct machine *machine,
351 pid_t pid, pid_t tid,
9d2f8e22
ACM
352 bool create)
353{
354 struct rb_node **p = &machine->threads.rb_node;
355 struct rb_node *parent = NULL;
356 struct thread *th;
357
358 /*
38051234 359 * Front-end cache - TID lookups come in blocks,
9d2f8e22
ACM
360 * so most of the time we dont have to look up
361 * the full rbtree:
362 */
29ce3612
AH
363 th = machine->last_match;
364 if (th && th->tid == tid) {
365 machine__update_thread_pid(machine, th, pid);
366 return th;
99d725fc 367 }
9d2f8e22
ACM
368
369 while (*p != NULL) {
370 parent = *p;
371 th = rb_entry(parent, struct thread, rb_node);
372
38051234 373 if (th->tid == tid) {
9d2f8e22 374 machine->last_match = th;
29ce3612 375 machine__update_thread_pid(machine, th, pid);
9d2f8e22
ACM
376 return th;
377 }
378
38051234 379 if (tid < th->tid)
9d2f8e22
ACM
380 p = &(*p)->rb_left;
381 else
382 p = &(*p)->rb_right;
383 }
384
385 if (!create)
386 return NULL;
387
99d725fc 388 th = thread__new(pid, tid);
9d2f8e22
ACM
389 if (th != NULL) {
390 rb_link_node(&th->rb_node, parent, p);
391 rb_insert_color(&th->rb_node, &machine->threads);
cddcef60
JO
392
393 /*
394 * We have to initialize map_groups separately
395 * after rb tree is updated.
396 *
397 * The reason is that we call machine__findnew_thread
398 * within thread__init_map_groups to find the thread
399 * leader and that would screwed the rb tree.
400 */
418029b7 401 if (thread__init_map_groups(th, machine)) {
260d819e 402 rb_erase(&th->rb_node, &machine->threads);
418029b7 403 thread__delete(th);
cddcef60 404 return NULL;
418029b7 405 }
260d819e
NK
406
407 machine->last_match = th;
9d2f8e22
ACM
408 }
409
410 return th;
411}
412
314add6b
AH
413struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
414 pid_t tid)
9d2f8e22 415{
314add6b 416 return __machine__findnew_thread(machine, pid, tid, true);
9d2f8e22
ACM
417}
418
d75e6097
JO
419struct thread *machine__find_thread(struct machine *machine, pid_t pid,
420 pid_t tid)
9d2f8e22 421{
d75e6097 422 return __machine__findnew_thread(machine, pid, tid, false);
9d2f8e22 423}
b0a7d1a0 424
cfe1c414
AH
425struct comm *machine__thread_exec_comm(struct machine *machine,
426 struct thread *thread)
427{
428 if (machine->comm_exec)
429 return thread__exec_comm(thread);
430 else
431 return thread__comm(thread);
432}
433
162f0bef
FW
434int machine__process_comm_event(struct machine *machine, union perf_event *event,
435 struct perf_sample *sample)
b0a7d1a0 436{
314add6b
AH
437 struct thread *thread = machine__findnew_thread(machine,
438 event->comm.pid,
439 event->comm.tid);
65de51f9 440 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b0a7d1a0 441
cfe1c414
AH
442 if (exec)
443 machine->comm_exec = true;
444
b0a7d1a0
ACM
445 if (dump_trace)
446 perf_event__fprintf_comm(event, stdout);
447
65de51f9
AH
448 if (thread == NULL ||
449 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0
ACM
450 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
451 return -1;
452 }
453
454 return 0;
455}
456
457int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 458 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
459{
460 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
461 event->lost.id, event->lost.lost);
462 return 0;
463}
464
3f067dca
ACM
465struct map *machine__new_module(struct machine *machine, u64 start,
466 const char *filename)
467{
468 struct map *map;
469 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
c00c48fc 470 bool compressed;
3f067dca
ACM
471
472 if (dso == NULL)
473 return NULL;
474
475 map = map__new2(start, dso, MAP__FUNCTION);
476 if (map == NULL)
477 return NULL;
478
479 if (machine__is_host(machine))
480 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
481 else
482 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
c00c48fc
NK
483
484 /* _KMODULE_COMP should be next to _KMODULE */
485 if (is_kernel_module(filename, &compressed) && compressed)
486 dso->symtab_type++;
487
3f067dca
ACM
488 map_groups__insert(&machine->kmaps, map);
489 return map;
490}
491
876650e6 492size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
493{
494 struct rb_node *nd;
8fa7d87f
WL
495 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
496 __dsos__fprintf(&machines->host.user_dsos.head, fp);
3f067dca 497
876650e6 498 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 499 struct machine *pos = rb_entry(nd, struct machine, rb_node);
8fa7d87f
WL
500 ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
501 ret += __dsos__fprintf(&pos->user_dsos.head, fp);
3f067dca
ACM
502 }
503
504 return ret;
505}
506
8fa7d87f 507size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
508 bool (skip)(struct dso *dso, int parm), int parm)
509{
8fa7d87f
WL
510 return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
511 __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
3f067dca
ACM
512}
513
876650e6 514size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
515 bool (skip)(struct dso *dso, int parm), int parm)
516{
517 struct rb_node *nd;
876650e6 518 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 519
876650e6 520 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
521 struct machine *pos = rb_entry(nd, struct machine, rb_node);
522 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
523 }
524 return ret;
525}
526
527size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
528{
529 int i;
530 size_t printed = 0;
531 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
532
533 if (kdso->has_build_id) {
534 char filename[PATH_MAX];
535 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
536 printed += fprintf(fp, "[0] %s\n", filename);
537 }
538
539 for (i = 0; i < vmlinux_path__nr_entries; ++i)
540 printed += fprintf(fp, "[%d] %s\n",
541 i + kdso->has_build_id, vmlinux_path[i]);
542
543 return printed;
544}
545
546size_t machine__fprintf(struct machine *machine, FILE *fp)
547{
548 size_t ret = 0;
549 struct rb_node *nd;
550
551 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
552 struct thread *pos = rb_entry(nd, struct thread, rb_node);
553
554 ret += thread__fprintf(pos, fp);
555 }
556
557 return ret;
558}
559
560static struct dso *machine__get_kernel(struct machine *machine)
561{
562 const char *vmlinux_name = NULL;
563 struct dso *kernel;
564
565 if (machine__is_host(machine)) {
566 vmlinux_name = symbol_conf.vmlinux_name;
567 if (!vmlinux_name)
568 vmlinux_name = "[kernel.kallsyms]";
569
570 kernel = dso__kernel_findnew(machine, vmlinux_name,
571 "[kernel]",
572 DSO_TYPE_KERNEL);
573 } else {
574 char bf[PATH_MAX];
575
576 if (machine__is_default_guest(machine))
577 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
578 if (!vmlinux_name)
579 vmlinux_name = machine__mmap_name(machine, bf,
580 sizeof(bf));
581
582 kernel = dso__kernel_findnew(machine, vmlinux_name,
583 "[guest.kernel]",
584 DSO_TYPE_GUEST_KERNEL);
585 }
586
587 if (kernel != NULL && (!kernel->has_build_id))
588 dso__read_running_kernel_build_id(kernel, machine);
589
590 return kernel;
591}
592
593struct process_args {
594 u64 start;
595};
596
15a0a870
AH
597static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
598 size_t bufsz)
599{
600 if (machine__is_default_guest(machine))
601 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
602 else
603 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
604}
605
a93f0e55
SQ
606const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
607
608/* Figure out the start address of kernel map from /proc/kallsyms.
609 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
610 * symbol_name if it's not that important.
611 */
4b99375b
AH
612static u64 machine__get_running_kernel_start(struct machine *machine,
613 const char **symbol_name)
3f067dca 614{
15a0a870 615 char filename[PATH_MAX];
a93f0e55
SQ
616 int i;
617 const char *name;
618 u64 addr = 0;
3f067dca 619
15a0a870 620 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
621
622 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
623 return 0;
624
a93f0e55
SQ
625 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
626 addr = kallsyms__get_function_start(filename, name);
627 if (addr)
628 break;
629 }
630
631 if (symbol_name)
632 *symbol_name = name;
3f067dca 633
a93f0e55 634 return addr;
3f067dca
ACM
635}
636
637int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
638{
639 enum map_type type;
4b99375b 640 u64 start = machine__get_running_kernel_start(machine, NULL);
3f067dca
ACM
641
642 for (type = 0; type < MAP__NR_TYPES; ++type) {
643 struct kmap *kmap;
644
645 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
646 if (machine->vmlinux_maps[type] == NULL)
647 return -1;
648
649 machine->vmlinux_maps[type]->map_ip =
650 machine->vmlinux_maps[type]->unmap_ip =
651 identity__map_ip;
652 kmap = map__kmap(machine->vmlinux_maps[type]);
653 kmap->kmaps = &machine->kmaps;
654 map_groups__insert(&machine->kmaps,
655 machine->vmlinux_maps[type]);
656 }
657
658 return 0;
659}
660
661void machine__destroy_kernel_maps(struct machine *machine)
662{
663 enum map_type type;
664
665 for (type = 0; type < MAP__NR_TYPES; ++type) {
666 struct kmap *kmap;
667
668 if (machine->vmlinux_maps[type] == NULL)
669 continue;
670
671 kmap = map__kmap(machine->vmlinux_maps[type]);
672 map_groups__remove(&machine->kmaps,
673 machine->vmlinux_maps[type]);
674 if (kmap->ref_reloc_sym) {
675 /*
676 * ref_reloc_sym is shared among all maps, so free just
677 * on one of them.
678 */
679 if (type == MAP__FUNCTION) {
04662523
ACM
680 zfree((char **)&kmap->ref_reloc_sym->name);
681 zfree(&kmap->ref_reloc_sym);
682 } else
683 kmap->ref_reloc_sym = NULL;
3f067dca
ACM
684 }
685
686 map__delete(machine->vmlinux_maps[type]);
687 machine->vmlinux_maps[type] = NULL;
688 }
689}
690
876650e6 691int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
692{
693 int ret = 0;
694 struct dirent **namelist = NULL;
695 int i, items = 0;
696 char path[PATH_MAX];
697 pid_t pid;
698 char *endp;
699
700 if (symbol_conf.default_guest_vmlinux_name ||
701 symbol_conf.default_guest_modules ||
702 symbol_conf.default_guest_kallsyms) {
703 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
704 }
705
706 if (symbol_conf.guestmount) {
707 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
708 if (items <= 0)
709 return -ENOENT;
710 for (i = 0; i < items; i++) {
711 if (!isdigit(namelist[i]->d_name[0])) {
712 /* Filter out . and .. */
713 continue;
714 }
715 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
716 if ((*endp != '\0') ||
717 (endp == namelist[i]->d_name) ||
718 (errno == ERANGE)) {
719 pr_debug("invalid directory (%s). Skipping.\n",
720 namelist[i]->d_name);
721 continue;
722 }
723 sprintf(path, "%s/%s/proc/kallsyms",
724 symbol_conf.guestmount,
725 namelist[i]->d_name);
726 ret = access(path, R_OK);
727 if (ret) {
728 pr_debug("Can't access file %s\n", path);
729 goto failure;
730 }
731 machines__create_kernel_maps(machines, pid);
732 }
733failure:
734 free(namelist);
735 }
736
737 return ret;
738}
739
876650e6 740void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 741{
876650e6
ACM
742 struct rb_node *next = rb_first(&machines->guests);
743
744 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
745
746 while (next) {
747 struct machine *pos = rb_entry(next, struct machine, rb_node);
748
749 next = rb_next(&pos->rb_node);
876650e6 750 rb_erase(&pos->rb_node, &machines->guests);
3f067dca
ACM
751 machine__delete(pos);
752 }
753}
754
876650e6 755int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
756{
757 struct machine *machine = machines__findnew(machines, pid);
758
759 if (machine == NULL)
760 return -1;
761
762 return machine__create_kernel_maps(machine);
763}
764
765int machine__load_kallsyms(struct machine *machine, const char *filename,
766 enum map_type type, symbol_filter_t filter)
767{
768 struct map *map = machine->vmlinux_maps[type];
769 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
770
771 if (ret > 0) {
772 dso__set_loaded(map->dso, type);
773 /*
774 * Since /proc/kallsyms will have multiple sessions for the
775 * kernel, with modules between them, fixup the end of all
776 * sections.
777 */
778 __map_groups__fixup_end(&machine->kmaps, type);
779 }
780
781 return ret;
782}
783
784int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
785 symbol_filter_t filter)
786{
787 struct map *map = machine->vmlinux_maps[type];
788 int ret = dso__load_vmlinux_path(map->dso, map, filter);
789
39b12f78 790 if (ret > 0)
3f067dca 791 dso__set_loaded(map->dso, type);
3f067dca
ACM
792
793 return ret;
794}
795
796static void map_groups__fixup_end(struct map_groups *mg)
797{
798 int i;
799 for (i = 0; i < MAP__NR_TYPES; ++i)
800 __map_groups__fixup_end(mg, i);
801}
802
803static char *get_kernel_version(const char *root_dir)
804{
805 char version[PATH_MAX];
806 FILE *file;
807 char *name, *tmp;
808 const char *prefix = "Linux version ";
809
810 sprintf(version, "%s/proc/version", root_dir);
811 file = fopen(version, "r");
812 if (!file)
813 return NULL;
814
815 version[0] = '\0';
816 tmp = fgets(version, sizeof(version), file);
817 fclose(file);
818
819 name = strstr(version, prefix);
820 if (!name)
821 return NULL;
822 name += strlen(prefix);
823 tmp = strchr(name, ' ');
824 if (tmp)
825 *tmp = '\0';
826
827 return strdup(name);
828}
829
830static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 831 const char *dir_name, int depth)
3f067dca
ACM
832{
833 struct dirent *dent;
834 DIR *dir = opendir(dir_name);
835 int ret = 0;
836
837 if (!dir) {
838 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
839 return -1;
840 }
841
842 while ((dent = readdir(dir)) != NULL) {
843 char path[PATH_MAX];
844 struct stat st;
845
846 /*sshfs might return bad dent->d_type, so we have to stat*/
847 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
848 if (stat(path, &st))
849 continue;
850
851 if (S_ISDIR(st.st_mode)) {
852 if (!strcmp(dent->d_name, ".") ||
853 !strcmp(dent->d_name, ".."))
854 continue;
855
61d4290c
RY
856 /* Do not follow top-level source and build symlinks */
857 if (depth == 0) {
858 if (!strcmp(dent->d_name, "source") ||
859 !strcmp(dent->d_name, "build"))
860 continue;
861 }
862
863 ret = map_groups__set_modules_path_dir(mg, path,
864 depth + 1);
3f067dca
ACM
865 if (ret < 0)
866 goto out;
867 } else {
868 char *dot = strrchr(dent->d_name, '.'),
869 dso_name[PATH_MAX];
870 struct map *map;
871 char *long_name;
872
c00c48fc 873 if (dot == NULL)
3f067dca 874 continue;
c00c48fc
NK
875
876 /* On some system, modules are compressed like .ko.gz */
877 if (is_supported_compression(dot + 1) &&
878 is_kmodule_extension(dot - 2))
879 dot -= 3;
880
3f067dca
ACM
881 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
882 (int)(dot - dent->d_name), dent->d_name);
883
884 strxfrchar(dso_name, '-', '_');
885 map = map_groups__find_by_name(mg, MAP__FUNCTION,
886 dso_name);
887 if (map == NULL)
888 continue;
889
890 long_name = strdup(path);
891 if (long_name == NULL) {
892 ret = -1;
893 goto out;
894 }
7e155d4d 895 dso__set_long_name(map->dso, long_name, true);
3f067dca
ACM
896 dso__kernel_module_get_build_id(map->dso, "");
897 }
898 }
899
900out:
901 closedir(dir);
902 return ret;
903}
904
905static int machine__set_modules_path(struct machine *machine)
906{
907 char *version;
908 char modules_path[PATH_MAX];
909
910 version = get_kernel_version(machine->root_dir);
911 if (!version)
912 return -1;
913
61d4290c 914 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
915 machine->root_dir, version);
916 free(version);
917
61d4290c 918 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca
ACM
919}
920
316d70d6 921static int machine__create_module(void *arg, const char *name, u64 start)
3f067dca 922{
316d70d6 923 struct machine *machine = arg;
3f067dca 924 struct map *map;
316d70d6
AH
925
926 map = machine__new_module(machine, start, name);
927 if (map == NULL)
928 return -1;
929
930 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
931
932 return 0;
933}
934
935static int machine__create_modules(struct machine *machine)
936{
3f067dca
ACM
937 const char *modules;
938 char path[PATH_MAX];
939
f4be904d 940 if (machine__is_default_guest(machine)) {
3f067dca 941 modules = symbol_conf.default_guest_modules;
f4be904d
AH
942 } else {
943 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
944 modules = path;
945 }
946
aa7fe3b0 947 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
948 return -1;
949
316d70d6 950 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
951 return -1;
952
316d70d6
AH
953 if (!machine__set_modules_path(machine))
954 return 0;
3f067dca 955
316d70d6 956 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 957
8f76fcd9 958 return 0;
3f067dca
ACM
959}
960
961int machine__create_kernel_maps(struct machine *machine)
962{
963 struct dso *kernel = machine__get_kernel(machine);
5512cf24 964 const char *name;
4b99375b 965 u64 addr = machine__get_running_kernel_start(machine, &name);
5512cf24
AH
966 if (!addr)
967 return -1;
3f067dca
ACM
968
969 if (kernel == NULL ||
970 __machine__create_kernel_maps(machine, kernel) < 0)
971 return -1;
972
973 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
974 if (machine__is_host(machine))
975 pr_debug("Problems creating module maps, "
976 "continuing anyway...\n");
977 else
978 pr_debug("Problems creating module maps for guest %d, "
979 "continuing anyway...\n", machine->pid);
980 }
981
982 /*
983 * Now that we have all the maps created, just set the ->end of them:
984 */
985 map_groups__fixup_end(&machine->kmaps);
5512cf24
AH
986
987 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
988 addr)) {
989 machine__destroy_kernel_maps(machine);
990 return -1;
991 }
992
3f067dca
ACM
993 return 0;
994}
995
b0a7d1a0
ACM
996static void machine__set_kernel_mmap_len(struct machine *machine,
997 union perf_event *event)
998{
4552cf0f
NK
999 int i;
1000
1001 for (i = 0; i < MAP__NR_TYPES; i++) {
1002 machine->vmlinux_maps[i]->start = event->mmap.start;
1003 machine->vmlinux_maps[i]->end = (event->mmap.start +
1004 event->mmap.len);
1005 /*
1006 * Be a bit paranoid here, some perf.data file came with
1007 * a zero sized synthesized MMAP event for the kernel.
1008 */
1009 if (machine->vmlinux_maps[i]->end == 0)
1010 machine->vmlinux_maps[i]->end = ~0ULL;
1011 }
b0a7d1a0
ACM
1012}
1013
8e0cf965
AH
1014static bool machine__uses_kcore(struct machine *machine)
1015{
1016 struct dso *dso;
1017
8fa7d87f 1018 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
8e0cf965
AH
1019 if (dso__is_kcore(dso))
1020 return true;
1021 }
1022
1023 return false;
1024}
1025
b0a7d1a0
ACM
1026static int machine__process_kernel_mmap_event(struct machine *machine,
1027 union perf_event *event)
1028{
1029 struct map *map;
1030 char kmmap_prefix[PATH_MAX];
1031 enum dso_kernel_type kernel_type;
1032 bool is_kernel_mmap;
1033
8e0cf965
AH
1034 /* If we have maps from kcore then we do not need or want any others */
1035 if (machine__uses_kcore(machine))
1036 return 0;
1037
b0a7d1a0
ACM
1038 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1039 if (machine__is_host(machine))
1040 kernel_type = DSO_TYPE_KERNEL;
1041 else
1042 kernel_type = DSO_TYPE_GUEST_KERNEL;
1043
1044 is_kernel_mmap = memcmp(event->mmap.filename,
1045 kmmap_prefix,
1046 strlen(kmmap_prefix) - 1) == 0;
1047 if (event->mmap.filename[0] == '/' ||
1048 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1049
1050 char short_module_name[1024];
1051 char *name, *dot;
1052
1053 if (event->mmap.filename[0] == '/') {
1054 name = strrchr(event->mmap.filename, '/');
1055 if (name == NULL)
1056 goto out_problem;
1057
1058 ++name; /* skip / */
1059 dot = strrchr(name, '.');
1060 if (dot == NULL)
1061 goto out_problem;
c00c48fc
NK
1062 /* On some system, modules are compressed like .ko.gz */
1063 if (is_supported_compression(dot + 1))
1064 dot -= 3;
1065 if (!is_kmodule_extension(dot + 1))
1066 goto out_problem;
b0a7d1a0
ACM
1067 snprintf(short_module_name, sizeof(short_module_name),
1068 "[%.*s]", (int)(dot - name), name);
1069 strxfrchar(short_module_name, '-', '_');
1070 } else
1071 strcpy(short_module_name, event->mmap.filename);
1072
1073 map = machine__new_module(machine, event->mmap.start,
1074 event->mmap.filename);
1075 if (map == NULL)
1076 goto out_problem;
1077
1078 name = strdup(short_module_name);
1079 if (name == NULL)
1080 goto out_problem;
1081
58a98c9c 1082 dso__set_short_name(map->dso, name, true);
b0a7d1a0
ACM
1083 map->end = map->start + event->mmap.len;
1084 } else if (is_kernel_mmap) {
1085 const char *symbol_name = (event->mmap.filename +
1086 strlen(kmmap_prefix));
1087 /*
1088 * Should be there already, from the build-id table in
1089 * the header.
1090 */
b837a8bd
NK
1091 struct dso *kernel = NULL;
1092 struct dso *dso;
1093
1094 list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
1095 if (is_kernel_module(dso->long_name, NULL))
1096 continue;
1097
1098 kernel = dso;
1099 break;
1100 }
1101
1102 if (kernel == NULL)
1103 kernel = __dsos__findnew(&machine->kernel_dsos,
1104 kmmap_prefix);
b0a7d1a0
ACM
1105 if (kernel == NULL)
1106 goto out_problem;
1107
1108 kernel->kernel = kernel_type;
1109 if (__machine__create_kernel_maps(machine, kernel) < 0)
1110 goto out_problem;
1111
330dfa22
NK
1112 if (strstr(kernel->long_name, "vmlinux"))
1113 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1114
b0a7d1a0
ACM
1115 machine__set_kernel_mmap_len(machine, event);
1116
1117 /*
1118 * Avoid using a zero address (kptr_restrict) for the ref reloc
1119 * symbol. Effectively having zero here means that at record
1120 * time /proc/sys/kernel/kptr_restrict was non zero.
1121 */
1122 if (event->mmap.pgoff != 0) {
1123 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1124 symbol_name,
1125 event->mmap.pgoff);
1126 }
1127
1128 if (machine__is_default_guest(machine)) {
1129 /*
1130 * preload dso of guest kernel and modules
1131 */
1132 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1133 NULL);
1134 }
1135 }
1136 return 0;
1137out_problem:
1138 return -1;
1139}
1140
5c5e854b 1141int machine__process_mmap2_event(struct machine *machine,
162f0bef
FW
1142 union perf_event *event,
1143 struct perf_sample *sample __maybe_unused)
5c5e854b
SE
1144{
1145 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1146 struct thread *thread;
1147 struct map *map;
1148 enum map_type type;
1149 int ret = 0;
1150
1151 if (dump_trace)
1152 perf_event__fprintf_mmap2(event, stdout);
1153
1154 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1155 cpumode == PERF_RECORD_MISC_KERNEL) {
1156 ret = machine__process_kernel_mmap_event(machine, event);
1157 if (ret < 0)
1158 goto out_problem;
1159 return 0;
1160 }
1161
1162 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1163 event->mmap2.tid);
5c5e854b
SE
1164 if (thread == NULL)
1165 goto out_problem;
1166
1167 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1168 type = MAP__VARIABLE;
1169 else
1170 type = MAP__FUNCTION;
1171
2a03068c 1172 map = map__new(machine, event->mmap2.start,
5c5e854b
SE
1173 event->mmap2.len, event->mmap2.pgoff,
1174 event->mmap2.pid, event->mmap2.maj,
1175 event->mmap2.min, event->mmap2.ino,
1176 event->mmap2.ino_generation,
7ef80703
DZ
1177 event->mmap2.prot,
1178 event->mmap2.flags,
5835edda 1179 event->mmap2.filename, type, thread);
5c5e854b
SE
1180
1181 if (map == NULL)
1182 goto out_problem;
1183
1184 thread__insert_map(thread, map);
1185 return 0;
1186
1187out_problem:
1188 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1189 return 0;
1190}
1191
162f0bef
FW
1192int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1193 struct perf_sample *sample __maybe_unused)
b0a7d1a0
ACM
1194{
1195 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1196 struct thread *thread;
1197 struct map *map;
bad40917 1198 enum map_type type;
b0a7d1a0
ACM
1199 int ret = 0;
1200
1201 if (dump_trace)
1202 perf_event__fprintf_mmap(event, stdout);
1203
1204 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1205 cpumode == PERF_RECORD_MISC_KERNEL) {
1206 ret = machine__process_kernel_mmap_event(machine, event);
1207 if (ret < 0)
1208 goto out_problem;
1209 return 0;
1210 }
1211
314add6b 1212 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1213 event->mmap.tid);
b0a7d1a0
ACM
1214 if (thread == NULL)
1215 goto out_problem;
bad40917
SE
1216
1217 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1218 type = MAP__VARIABLE;
1219 else
1220 type = MAP__FUNCTION;
1221
2a03068c 1222 map = map__new(machine, event->mmap.start,
b0a7d1a0 1223 event->mmap.len, event->mmap.pgoff,
7ef80703 1224 event->mmap.pid, 0, 0, 0, 0, 0, 0,
5c5e854b 1225 event->mmap.filename,
5835edda 1226 type, thread);
bad40917 1227
b0a7d1a0
ACM
1228 if (map == NULL)
1229 goto out_problem;
1230
1231 thread__insert_map(thread, map);
1232 return 0;
1233
1234out_problem:
1235 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1236 return 0;
1237}
1238
236a3bbd
DA
1239static void machine__remove_thread(struct machine *machine, struct thread *th)
1240{
1241 machine->last_match = NULL;
1242 rb_erase(&th->rb_node, &machine->threads);
1243 /*
1244 * We may have references to this thread, for instance in some hist_entry
1245 * instances, so just move them to a separate list.
1246 */
1247 list_add_tail(&th->node, &machine->dead_threads);
1248}
1249
162f0bef
FW
1250int machine__process_fork_event(struct machine *machine, union perf_event *event,
1251 struct perf_sample *sample)
b0a7d1a0 1252{
d75e6097
JO
1253 struct thread *thread = machine__find_thread(machine,
1254 event->fork.pid,
1255 event->fork.tid);
314add6b
AH
1256 struct thread *parent = machine__findnew_thread(machine,
1257 event->fork.ppid,
1258 event->fork.ptid);
b0a7d1a0 1259
236a3bbd
DA
1260 /* if a thread currently exists for the thread id remove it */
1261 if (thread != NULL)
1262 machine__remove_thread(machine, thread);
1263
314add6b
AH
1264 thread = machine__findnew_thread(machine, event->fork.pid,
1265 event->fork.tid);
b0a7d1a0
ACM
1266 if (dump_trace)
1267 perf_event__fprintf_task(event, stdout);
1268
1269 if (thread == NULL || parent == NULL ||
162f0bef 1270 thread__fork(thread, parent, sample->time) < 0) {
b0a7d1a0
ACM
1271 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1272 return -1;
1273 }
1274
1275 return 0;
1276}
1277
162f0bef
FW
1278int machine__process_exit_event(struct machine *machine, union perf_event *event,
1279 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1280{
d75e6097
JO
1281 struct thread *thread = machine__find_thread(machine,
1282 event->fork.pid,
1283 event->fork.tid);
b0a7d1a0
ACM
1284
1285 if (dump_trace)
1286 perf_event__fprintf_task(event, stdout);
1287
1288 if (thread != NULL)
236a3bbd 1289 thread__exited(thread);
b0a7d1a0
ACM
1290
1291 return 0;
1292}
1293
162f0bef
FW
1294int machine__process_event(struct machine *machine, union perf_event *event,
1295 struct perf_sample *sample)
b0a7d1a0
ACM
1296{
1297 int ret;
1298
1299 switch (event->header.type) {
1300 case PERF_RECORD_COMM:
162f0bef 1301 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1302 case PERF_RECORD_MMAP:
162f0bef 1303 ret = machine__process_mmap_event(machine, event, sample); break;
5c5e854b 1304 case PERF_RECORD_MMAP2:
162f0bef 1305 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1306 case PERF_RECORD_FORK:
162f0bef 1307 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1308 case PERF_RECORD_EXIT:
162f0bef 1309 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1310 case PERF_RECORD_LOST:
162f0bef 1311 ret = machine__process_lost_event(machine, event, sample); break;
b0a7d1a0
ACM
1312 default:
1313 ret = -1;
1314 break;
1315 }
1316
1317 return ret;
1318}
3f067dca 1319
b21484f1 1320static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1321{
b21484f1 1322 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1323 return 1;
3f067dca
ACM
1324 return 0;
1325}
1326
bb871a9c 1327static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1328 struct addr_map_symbol *ams,
1329 u64 ip)
1330{
1331 struct addr_location al;
3f067dca
ACM
1332
1333 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1334 /*
1335 * We cannot use the header.misc hint to determine whether a
1336 * branch stack address is user, kernel, guest, hypervisor.
1337 * Branches may straddle the kernel/user/hypervisor boundaries.
1338 * Thus, we have to try consecutively until we find a match
1339 * or else, the symbol is unknown
1340 */
bb871a9c 1341 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
3f067dca 1342
3f067dca
ACM
1343 ams->addr = ip;
1344 ams->al_addr = al.addr;
1345 ams->sym = al.sym;
1346 ams->map = al.map;
1347}
1348
bb871a9c 1349static void ip__resolve_data(struct thread *thread,
98a3b32c
SE
1350 u8 m, struct addr_map_symbol *ams, u64 addr)
1351{
1352 struct addr_location al;
1353
1354 memset(&al, 0, sizeof(al));
1355
bb871a9c 1356 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
06b2afc0
DZ
1357 if (al.map == NULL) {
1358 /*
1359 * some shared data regions have execute bit set which puts
1360 * their mapping in the MAP__FUNCTION type array.
1361 * Check there as a fallback option before dropping the sample.
1362 */
bb871a9c 1363 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
06b2afc0
DZ
1364 }
1365
98a3b32c
SE
1366 ams->addr = addr;
1367 ams->al_addr = al.addr;
1368 ams->sym = al.sym;
1369 ams->map = al.map;
1370}
1371
e80faac0
ACM
1372struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1373 struct addr_location *al)
98a3b32c
SE
1374{
1375 struct mem_info *mi = zalloc(sizeof(*mi));
1376
1377 if (!mi)
1378 return NULL;
1379
bb871a9c
ACM
1380 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1381 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
98a3b32c
SE
1382 mi->data_src.val = sample->data_src;
1383
1384 return mi;
1385}
1386
37592b8a
AK
1387static int add_callchain_ip(struct thread *thread,
1388 struct symbol **parent,
1389 struct addr_location *root_al,
2e77784b 1390 bool branch_history,
37592b8a
AK
1391 u64 ip)
1392{
1393 struct addr_location al;
1394
1395 al.filtered = 0;
1396 al.sym = NULL;
2e77784b 1397 if (branch_history)
8b7bad58
AK
1398 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1399 ip, &al);
2e77784b
KL
1400 else {
1401 u8 cpumode = PERF_RECORD_MISC_USER;
1402
1403 if (ip >= PERF_CONTEXT_MAX) {
1404 switch (ip) {
1405 case PERF_CONTEXT_HV:
1406 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1407 break;
1408 case PERF_CONTEXT_KERNEL:
1409 cpumode = PERF_RECORD_MISC_KERNEL;
1410 break;
1411 case PERF_CONTEXT_USER:
1412 cpumode = PERF_RECORD_MISC_USER;
1413 break;
1414 default:
1415 pr_debug("invalid callchain context: "
1416 "%"PRId64"\n", (s64) ip);
1417 /*
1418 * It seems the callchain is corrupted.
1419 * Discard all.
1420 */
1421 callchain_cursor_reset(&callchain_cursor);
1422 return 1;
1423 }
1424 return 0;
1425 }
8b7bad58 1426 thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
37592b8a 1427 ip, &al);
2e77784b
KL
1428 }
1429
37592b8a
AK
1430 if (al.sym != NULL) {
1431 if (sort__has_parent && !*parent &&
1432 symbol__match_regex(al.sym, &parent_regex))
1433 *parent = al.sym;
1434 else if (have_ignore_callees && root_al &&
1435 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1436 /* Treat this symbol as the root,
1437 forgetting its callees. */
1438 *root_al = al;
1439 callchain_cursor_reset(&callchain_cursor);
1440 }
1441 }
1442
5550171b 1443 return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
37592b8a
AK
1444}
1445
644f2df2
ACM
1446struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1447 struct addr_location *al)
3f067dca 1448{
3f067dca 1449 unsigned int i;
644f2df2
ACM
1450 const struct branch_stack *bs = sample->branch_stack;
1451 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 1452
3f067dca
ACM
1453 if (!bi)
1454 return NULL;
1455
1456 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
1457 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1458 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
1459 bi[i].flags = bs->entries[i].flags;
1460 }
1461 return bi;
1462}
1463
8b7bad58
AK
1464#define CHASHSZ 127
1465#define CHASHBITS 7
1466#define NO_ENTRY 0xff
1467
1468#define PERF_MAX_BRANCH_DEPTH 127
1469
1470/* Remove loops. */
1471static int remove_loops(struct branch_entry *l, int nr)
1472{
1473 int i, j, off;
1474 unsigned char chash[CHASHSZ];
1475
1476 memset(chash, NO_ENTRY, sizeof(chash));
1477
1478 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1479
1480 for (i = 0; i < nr; i++) {
1481 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1482
1483 /* no collision handling for now */
1484 if (chash[h] == NO_ENTRY) {
1485 chash[h] = i;
1486 } else if (l[chash[h]].from == l[i].from) {
1487 bool is_loop = true;
1488 /* check if it is a real loop */
1489 off = 0;
1490 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1491 if (l[j].from != l[i + off].from) {
1492 is_loop = false;
1493 break;
1494 }
1495 if (is_loop) {
1496 memmove(l + i, l + i + off,
1497 (nr - (i + off)) * sizeof(*l));
1498 nr -= off;
1499 }
1500 }
1501 }
1502 return nr;
1503}
1504
384b6055
KL
1505/*
1506 * Recolve LBR callstack chain sample
1507 * Return:
1508 * 1 on success get LBR callchain information
1509 * 0 no available LBR callchain information, should try fp
1510 * negative error code on other errors.
1511 */
1512static int resolve_lbr_callchain_sample(struct thread *thread,
1513 struct perf_sample *sample,
1514 struct symbol **parent,
1515 struct addr_location *root_al,
1516 int max_stack)
3f067dca 1517{
384b6055
KL
1518 struct ip_callchain *chain = sample->callchain;
1519 int chain_nr = min(max_stack, (int)chain->nr);
1520 int i, j, err;
1521 u64 ip;
1522
1523 for (i = 0; i < chain_nr; i++) {
1524 if (chain->ips[i] == PERF_CONTEXT_USER)
1525 break;
1526 }
1527
1528 /* LBR only affects the user callchain */
1529 if (i != chain_nr) {
1530 struct branch_stack *lbr_stack = sample->branch_stack;
1531 int lbr_nr = lbr_stack->nr;
1532 /*
1533 * LBR callstack can only get user call chain.
1534 * The mix_chain_nr is kernel call chain
1535 * number plus LBR user call chain number.
1536 * i is kernel call chain number,
1537 * 1 is PERF_CONTEXT_USER,
1538 * lbr_nr + 1 is the user call chain number.
1539 * For details, please refer to the comments
1540 * in callchain__printf
1541 */
1542 int mix_chain_nr = i + 1 + lbr_nr + 1;
1543
1544 if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
1545 pr_warning("corrupted callchain. skipping...\n");
1546 return 0;
1547 }
1548
1549 for (j = 0; j < mix_chain_nr; j++) {
1550 if (callchain_param.order == ORDER_CALLEE) {
1551 if (j < i + 1)
1552 ip = chain->ips[j];
1553 else if (j > i + 1)
1554 ip = lbr_stack->entries[j - i - 2].from;
1555 else
1556 ip = lbr_stack->entries[0].to;
1557 } else {
1558 if (j < lbr_nr)
1559 ip = lbr_stack->entries[lbr_nr - j - 1].from;
1560 else if (j > lbr_nr)
1561 ip = chain->ips[i + 1 - (j - lbr_nr)];
1562 else
1563 ip = lbr_stack->entries[0].to;
1564 }
1565
1566 err = add_callchain_ip(thread, parent, root_al, false, ip);
1567 if (err)
1568 return (err < 0) ? err : 0;
1569 }
1570 return 1;
1571 }
1572
1573 return 0;
1574}
1575
1576static int thread__resolve_callchain_sample(struct thread *thread,
1577 struct perf_evsel *evsel,
1578 struct perf_sample *sample,
1579 struct symbol **parent,
1580 struct addr_location *root_al,
1581 int max_stack)
1582{
1583 struct branch_stack *branch = sample->branch_stack;
1584 struct ip_callchain *chain = sample->callchain;
91e95617 1585 int chain_nr = min(max_stack, (int)chain->nr);
2e77784b 1586 int i, j, err;
8b7bad58
AK
1587 int skip_idx = -1;
1588 int first_call = 0;
1589
384b6055
KL
1590 callchain_cursor_reset(&callchain_cursor);
1591
1592 if (has_branch_callstack(evsel)) {
1593 err = resolve_lbr_callchain_sample(thread, sample, parent,
1594 root_al, max_stack);
1595 if (err)
1596 return (err < 0) ? err : 0;
1597 }
1598
8b7bad58
AK
1599 /*
1600 * Based on DWARF debug information, some architectures skip
1601 * a callchain entry saved by the kernel.
1602 */
1603 if (chain->nr < PERF_MAX_STACK_DEPTH)
1604 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 1605
8b7bad58
AK
1606 /*
1607 * Add branches to call stack for easier browsing. This gives
1608 * more context for a sample than just the callers.
1609 *
1610 * This uses individual histograms of paths compared to the
1611 * aggregated histograms the normal LBR mode uses.
1612 *
1613 * Limitations for now:
1614 * - No extra filters
1615 * - No annotations (should annotate somehow)
1616 */
1617
1618 if (branch && callchain_param.branch_callstack) {
1619 int nr = min(max_stack, (int)branch->nr);
1620 struct branch_entry be[nr];
1621
1622 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1623 pr_warning("corrupted branch chain. skipping...\n");
1624 goto check_calls;
1625 }
1626
1627 for (i = 0; i < nr; i++) {
1628 if (callchain_param.order == ORDER_CALLEE) {
1629 be[i] = branch->entries[i];
1630 /*
1631 * Check for overlap into the callchain.
1632 * The return address is one off compared to
1633 * the branch entry. To adjust for this
1634 * assume the calling instruction is not longer
1635 * than 8 bytes.
1636 */
1637 if (i == skip_idx ||
1638 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1639 first_call++;
1640 else if (be[i].from < chain->ips[first_call] &&
1641 be[i].from >= chain->ips[first_call] - 8)
1642 first_call++;
1643 } else
1644 be[i] = branch->entries[branch->nr - i - 1];
1645 }
1646
1647 nr = remove_loops(be, nr);
1648
1649 for (i = 0; i < nr; i++) {
1650 err = add_callchain_ip(thread, parent, root_al,
2e77784b 1651 true, be[i].to);
8b7bad58
AK
1652 if (!err)
1653 err = add_callchain_ip(thread, parent, root_al,
2e77784b 1654 true, be[i].from);
8b7bad58
AK
1655 if (err == -EINVAL)
1656 break;
1657 if (err)
1658 return err;
1659 }
1660 chain_nr -= nr;
1661 }
1662
1663check_calls:
3f067dca
ACM
1664 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1665 pr_warning("corrupted callchain. skipping...\n");
1666 return 0;
1667 }
1668
8b7bad58 1669 for (i = first_call; i < chain_nr; i++) {
3f067dca 1670 u64 ip;
3f067dca
ACM
1671
1672 if (callchain_param.order == ORDER_CALLEE)
a60335ba 1673 j = i;
3f067dca 1674 else
a60335ba
SB
1675 j = chain->nr - i - 1;
1676
1677#ifdef HAVE_SKIP_CALLCHAIN_IDX
1678 if (j == skip_idx)
1679 continue;
1680#endif
1681 ip = chain->ips[j];
3f067dca 1682
2e77784b 1683 err = add_callchain_ip(thread, parent, root_al, false, ip);
3f067dca 1684
3f067dca 1685 if (err)
2e77784b 1686 return (err < 0) ? err : 0;
3f067dca
ACM
1687 }
1688
1689 return 0;
1690}
1691
1692static int unwind_entry(struct unwind_entry *entry, void *arg)
1693{
1694 struct callchain_cursor *cursor = arg;
1695 return callchain_cursor_append(cursor, entry->ip,
1696 entry->map, entry->sym);
1697}
1698
cc8b7c2b
ACM
1699int thread__resolve_callchain(struct thread *thread,
1700 struct perf_evsel *evsel,
1701 struct perf_sample *sample,
1702 struct symbol **parent,
1703 struct addr_location *root_al,
1704 int max_stack)
3f067dca 1705{
384b6055
KL
1706 int ret = thread__resolve_callchain_sample(thread, evsel,
1707 sample, parent,
1708 root_al, max_stack);
3f067dca
ACM
1709 if (ret)
1710 return ret;
1711
1712 /* Can we do dwarf post unwind? */
1713 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1714 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1715 return 0;
1716
1717 /* Bail out if nothing was captured. */
1718 if ((!sample->user_regs.regs) ||
1719 (!sample->user_stack.size))
1720 return 0;
1721
dd8c17a5 1722 return unwind__get_entries(unwind_entry, &callchain_cursor,
352ea45a 1723 thread, sample, max_stack);
3f067dca
ACM
1724
1725}
35feee19
DA
1726
1727int machine__for_each_thread(struct machine *machine,
1728 int (*fn)(struct thread *thread, void *p),
1729 void *priv)
1730{
1731 struct rb_node *nd;
1732 struct thread *thread;
1733 int rc = 0;
1734
1735 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1736 thread = rb_entry(nd, struct thread, rb_node);
1737 rc = fn(thread, priv);
1738 if (rc != 0)
1739 return rc;
1740 }
1741
1742 list_for_each_entry(thread, &machine->dead_threads, node) {
1743 rc = fn(thread, priv);
1744 if (rc != 0)
1745 return rc;
1746 }
1747 return rc;
1748}
58d925dc 1749
a33fbd56 1750int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
602ad878 1751 struct target *target, struct thread_map *threads,
a33fbd56 1752 perf_event__handler_t process, bool data_mmap)
58d925dc 1753{
602ad878 1754 if (target__has_task(target))
58d925dc 1755 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
602ad878 1756 else if (target__has_cpu(target))
58d925dc
ACM
1757 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1758 /* command specified */
1759 return 0;
1760}
b9d266ba
AH
1761
1762pid_t machine__get_current_tid(struct machine *machine, int cpu)
1763{
1764 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1765 return -1;
1766
1767 return machine->current_tid[cpu];
1768}
1769
1770int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1771 pid_t tid)
1772{
1773 struct thread *thread;
1774
1775 if (cpu < 0)
1776 return -EINVAL;
1777
1778 if (!machine->current_tid) {
1779 int i;
1780
1781 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1782 if (!machine->current_tid)
1783 return -ENOMEM;
1784 for (i = 0; i < MAX_NR_CPUS; i++)
1785 machine->current_tid[i] = -1;
1786 }
1787
1788 if (cpu >= MAX_NR_CPUS) {
1789 pr_err("Requested CPU %d too large. ", cpu);
1790 pr_err("Consider raising MAX_NR_CPUS\n");
1791 return -EINVAL;
1792 }
1793
1794 machine->current_tid[cpu] = tid;
1795
1796 thread = machine__findnew_thread(machine, pid, tid);
1797 if (!thread)
1798 return -ENOMEM;
1799
1800 thread->cpu = cpu;
1801
1802 return 0;
1803}
fbe2af45
AH
1804
1805int machine__get_kernel_start(struct machine *machine)
1806{
1807 struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
1808 int err = 0;
1809
1810 /*
1811 * The only addresses above 2^63 are kernel addresses of a 64-bit
1812 * kernel. Note that addresses are unsigned so that on a 32-bit system
1813 * all addresses including kernel addresses are less than 2^32. In
1814 * that case (32-bit system), if the kernel mapping is unknown, all
1815 * addresses will be assumed to be in user space - see
1816 * machine__kernel_ip().
1817 */
1818 machine->kernel_start = 1ULL << 63;
1819 if (map) {
1820 err = map__load(map, machine->symbol_filter);
1821 if (map->start)
1822 machine->kernel_start = map->start;
1823 }
1824 return err;
1825}