]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/machine.c
perf tools: Update mmap2 interface with protection and flag bits
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
1 #include "callchain.h"
2 #include "debug.h"
3 #include "event.h"
4 #include "evsel.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "map.h"
8 #include "sort.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include <stdbool.h>
12 #include <symbol/kallsyms.h>
13 #include "unwind.h"
14
15 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
16 {
17 map_groups__init(&machine->kmaps);
18 RB_CLEAR_NODE(&machine->rb_node);
19 INIT_LIST_HEAD(&machine->user_dsos);
20 INIT_LIST_HEAD(&machine->kernel_dsos);
21
22 machine->threads = RB_ROOT;
23 INIT_LIST_HEAD(&machine->dead_threads);
24 machine->last_match = NULL;
25
26 machine->kmaps.machine = machine;
27 machine->pid = pid;
28
29 machine->symbol_filter = NULL;
30 machine->id_hdr_size = 0;
31
32 machine->root_dir = strdup(root_dir);
33 if (machine->root_dir == NULL)
34 return -ENOMEM;
35
36 if (pid != HOST_KERNEL_ID) {
37 struct thread *thread = machine__findnew_thread(machine, 0,
38 pid);
39 char comm[64];
40
41 if (thread == NULL)
42 return -ENOMEM;
43
44 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
45 thread__set_comm(thread, comm, 0);
46 }
47
48 return 0;
49 }
50
51 struct machine *machine__new_host(void)
52 {
53 struct machine *machine = malloc(sizeof(*machine));
54
55 if (machine != NULL) {
56 machine__init(machine, "", HOST_KERNEL_ID);
57
58 if (machine__create_kernel_maps(machine) < 0)
59 goto out_delete;
60 }
61
62 return machine;
63 out_delete:
64 free(machine);
65 return NULL;
66 }
67
68 static void dsos__delete(struct list_head *dsos)
69 {
70 struct dso *pos, *n;
71
72 list_for_each_entry_safe(pos, n, dsos, node) {
73 list_del(&pos->node);
74 dso__delete(pos);
75 }
76 }
77
78 void machine__delete_dead_threads(struct machine *machine)
79 {
80 struct thread *n, *t;
81
82 list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
83 list_del(&t->node);
84 thread__delete(t);
85 }
86 }
87
88 void machine__delete_threads(struct machine *machine)
89 {
90 struct rb_node *nd = rb_first(&machine->threads);
91
92 while (nd) {
93 struct thread *t = rb_entry(nd, struct thread, rb_node);
94
95 rb_erase(&t->rb_node, &machine->threads);
96 nd = rb_next(nd);
97 thread__delete(t);
98 }
99 }
100
101 void machine__exit(struct machine *machine)
102 {
103 map_groups__exit(&machine->kmaps);
104 dsos__delete(&machine->user_dsos);
105 dsos__delete(&machine->kernel_dsos);
106 zfree(&machine->root_dir);
107 }
108
109 void machine__delete(struct machine *machine)
110 {
111 machine__exit(machine);
112 free(machine);
113 }
114
115 void machines__init(struct machines *machines)
116 {
117 machine__init(&machines->host, "", HOST_KERNEL_ID);
118 machines->guests = RB_ROOT;
119 machines->symbol_filter = NULL;
120 }
121
122 void machines__exit(struct machines *machines)
123 {
124 machine__exit(&machines->host);
125 /* XXX exit guest */
126 }
127
128 struct machine *machines__add(struct machines *machines, pid_t pid,
129 const char *root_dir)
130 {
131 struct rb_node **p = &machines->guests.rb_node;
132 struct rb_node *parent = NULL;
133 struct machine *pos, *machine = malloc(sizeof(*machine));
134
135 if (machine == NULL)
136 return NULL;
137
138 if (machine__init(machine, root_dir, pid) != 0) {
139 free(machine);
140 return NULL;
141 }
142
143 machine->symbol_filter = machines->symbol_filter;
144
145 while (*p != NULL) {
146 parent = *p;
147 pos = rb_entry(parent, struct machine, rb_node);
148 if (pid < pos->pid)
149 p = &(*p)->rb_left;
150 else
151 p = &(*p)->rb_right;
152 }
153
154 rb_link_node(&machine->rb_node, parent, p);
155 rb_insert_color(&machine->rb_node, &machines->guests);
156
157 return machine;
158 }
159
160 void machines__set_symbol_filter(struct machines *machines,
161 symbol_filter_t symbol_filter)
162 {
163 struct rb_node *nd;
164
165 machines->symbol_filter = symbol_filter;
166 machines->host.symbol_filter = symbol_filter;
167
168 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
169 struct machine *machine = rb_entry(nd, struct machine, rb_node);
170
171 machine->symbol_filter = symbol_filter;
172 }
173 }
174
175 struct machine *machines__find(struct machines *machines, pid_t pid)
176 {
177 struct rb_node **p = &machines->guests.rb_node;
178 struct rb_node *parent = NULL;
179 struct machine *machine;
180 struct machine *default_machine = NULL;
181
182 if (pid == HOST_KERNEL_ID)
183 return &machines->host;
184
185 while (*p != NULL) {
186 parent = *p;
187 machine = rb_entry(parent, struct machine, rb_node);
188 if (pid < machine->pid)
189 p = &(*p)->rb_left;
190 else if (pid > machine->pid)
191 p = &(*p)->rb_right;
192 else
193 return machine;
194 if (!machine->pid)
195 default_machine = machine;
196 }
197
198 return default_machine;
199 }
200
201 struct machine *machines__findnew(struct machines *machines, pid_t pid)
202 {
203 char path[PATH_MAX];
204 const char *root_dir = "";
205 struct machine *machine = machines__find(machines, pid);
206
207 if (machine && (machine->pid == pid))
208 goto out;
209
210 if ((pid != HOST_KERNEL_ID) &&
211 (pid != DEFAULT_GUEST_KERNEL_ID) &&
212 (symbol_conf.guestmount)) {
213 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
214 if (access(path, R_OK)) {
215 static struct strlist *seen;
216
217 if (!seen)
218 seen = strlist__new(true, NULL);
219
220 if (!strlist__has_entry(seen, path)) {
221 pr_err("Can't access file %s\n", path);
222 strlist__add(seen, path);
223 }
224 machine = NULL;
225 goto out;
226 }
227 root_dir = path;
228 }
229
230 machine = machines__add(machines, pid, root_dir);
231 out:
232 return machine;
233 }
234
235 void machines__process_guests(struct machines *machines,
236 machine__process_t process, void *data)
237 {
238 struct rb_node *nd;
239
240 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
241 struct machine *pos = rb_entry(nd, struct machine, rb_node);
242 process(pos, data);
243 }
244 }
245
246 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
247 {
248 if (machine__is_host(machine))
249 snprintf(bf, size, "[%s]", "kernel.kallsyms");
250 else if (machine__is_default_guest(machine))
251 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
252 else {
253 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
254 machine->pid);
255 }
256
257 return bf;
258 }
259
260 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
261 {
262 struct rb_node *node;
263 struct machine *machine;
264
265 machines->host.id_hdr_size = id_hdr_size;
266
267 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
268 machine = rb_entry(node, struct machine, rb_node);
269 machine->id_hdr_size = id_hdr_size;
270 }
271
272 return;
273 }
274
275 static struct thread *__machine__findnew_thread(struct machine *machine,
276 pid_t pid, pid_t tid,
277 bool create)
278 {
279 struct rb_node **p = &machine->threads.rb_node;
280 struct rb_node *parent = NULL;
281 struct thread *th;
282
283 /*
284 * Front-end cache - TID lookups come in blocks,
285 * so most of the time we dont have to look up
286 * the full rbtree:
287 */
288 if (machine->last_match && machine->last_match->tid == tid) {
289 if (pid && pid != machine->last_match->pid_)
290 machine->last_match->pid_ = pid;
291 return machine->last_match;
292 }
293
294 while (*p != NULL) {
295 parent = *p;
296 th = rb_entry(parent, struct thread, rb_node);
297
298 if (th->tid == tid) {
299 machine->last_match = th;
300 if (pid && pid != th->pid_)
301 th->pid_ = pid;
302 return th;
303 }
304
305 if (tid < th->tid)
306 p = &(*p)->rb_left;
307 else
308 p = &(*p)->rb_right;
309 }
310
311 if (!create)
312 return NULL;
313
314 th = thread__new(pid, tid);
315 if (th != NULL) {
316 rb_link_node(&th->rb_node, parent, p);
317 rb_insert_color(&th->rb_node, &machine->threads);
318 machine->last_match = th;
319
320 /*
321 * We have to initialize map_groups separately
322 * after rb tree is updated.
323 *
324 * The reason is that we call machine__findnew_thread
325 * within thread__init_map_groups to find the thread
326 * leader and that would screwed the rb tree.
327 */
328 if (thread__init_map_groups(th, machine))
329 return NULL;
330 }
331
332 return th;
333 }
334
335 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
336 pid_t tid)
337 {
338 return __machine__findnew_thread(machine, pid, tid, true);
339 }
340
341 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
342 pid_t tid)
343 {
344 return __machine__findnew_thread(machine, pid, tid, false);
345 }
346
347 int machine__process_comm_event(struct machine *machine, union perf_event *event,
348 struct perf_sample *sample)
349 {
350 struct thread *thread = machine__findnew_thread(machine,
351 event->comm.pid,
352 event->comm.tid);
353
354 if (dump_trace)
355 perf_event__fprintf_comm(event, stdout);
356
357 if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) {
358 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
359 return -1;
360 }
361
362 return 0;
363 }
364
365 int machine__process_lost_event(struct machine *machine __maybe_unused,
366 union perf_event *event, struct perf_sample *sample __maybe_unused)
367 {
368 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
369 event->lost.id, event->lost.lost);
370 return 0;
371 }
372
373 struct map *machine__new_module(struct machine *machine, u64 start,
374 const char *filename)
375 {
376 struct map *map;
377 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
378
379 if (dso == NULL)
380 return NULL;
381
382 map = map__new2(start, dso, MAP__FUNCTION);
383 if (map == NULL)
384 return NULL;
385
386 if (machine__is_host(machine))
387 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
388 else
389 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
390 map_groups__insert(&machine->kmaps, map);
391 return map;
392 }
393
394 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
395 {
396 struct rb_node *nd;
397 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
398 __dsos__fprintf(&machines->host.user_dsos, fp);
399
400 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
401 struct machine *pos = rb_entry(nd, struct machine, rb_node);
402 ret += __dsos__fprintf(&pos->kernel_dsos, fp);
403 ret += __dsos__fprintf(&pos->user_dsos, fp);
404 }
405
406 return ret;
407 }
408
409 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
410 bool (skip)(struct dso *dso, int parm), int parm)
411 {
412 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
413 __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
414 }
415
416 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
417 bool (skip)(struct dso *dso, int parm), int parm)
418 {
419 struct rb_node *nd;
420 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
421
422 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
423 struct machine *pos = rb_entry(nd, struct machine, rb_node);
424 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
425 }
426 return ret;
427 }
428
429 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
430 {
431 int i;
432 size_t printed = 0;
433 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
434
435 if (kdso->has_build_id) {
436 char filename[PATH_MAX];
437 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
438 printed += fprintf(fp, "[0] %s\n", filename);
439 }
440
441 for (i = 0; i < vmlinux_path__nr_entries; ++i)
442 printed += fprintf(fp, "[%d] %s\n",
443 i + kdso->has_build_id, vmlinux_path[i]);
444
445 return printed;
446 }
447
448 size_t machine__fprintf(struct machine *machine, FILE *fp)
449 {
450 size_t ret = 0;
451 struct rb_node *nd;
452
453 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
454 struct thread *pos = rb_entry(nd, struct thread, rb_node);
455
456 ret += thread__fprintf(pos, fp);
457 }
458
459 return ret;
460 }
461
462 static struct dso *machine__get_kernel(struct machine *machine)
463 {
464 const char *vmlinux_name = NULL;
465 struct dso *kernel;
466
467 if (machine__is_host(machine)) {
468 vmlinux_name = symbol_conf.vmlinux_name;
469 if (!vmlinux_name)
470 vmlinux_name = "[kernel.kallsyms]";
471
472 kernel = dso__kernel_findnew(machine, vmlinux_name,
473 "[kernel]",
474 DSO_TYPE_KERNEL);
475 } else {
476 char bf[PATH_MAX];
477
478 if (machine__is_default_guest(machine))
479 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
480 if (!vmlinux_name)
481 vmlinux_name = machine__mmap_name(machine, bf,
482 sizeof(bf));
483
484 kernel = dso__kernel_findnew(machine, vmlinux_name,
485 "[guest.kernel]",
486 DSO_TYPE_GUEST_KERNEL);
487 }
488
489 if (kernel != NULL && (!kernel->has_build_id))
490 dso__read_running_kernel_build_id(kernel, machine);
491
492 return kernel;
493 }
494
495 struct process_args {
496 u64 start;
497 };
498
499 static int symbol__in_kernel(void *arg, const char *name,
500 char type __maybe_unused, u64 start)
501 {
502 struct process_args *args = arg;
503
504 if (strchr(name, '['))
505 return 0;
506
507 args->start = start;
508 return 1;
509 }
510
511 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
512 size_t bufsz)
513 {
514 if (machine__is_default_guest(machine))
515 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
516 else
517 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
518 }
519
520 /* Figure out the start address of kernel map from /proc/kallsyms */
521 static u64 machine__get_kernel_start_addr(struct machine *machine)
522 {
523 char filename[PATH_MAX];
524 struct process_args args;
525
526 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
527
528 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
529 return 0;
530
531 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
532 return 0;
533
534 return args.start;
535 }
536
537 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
538 {
539 enum map_type type;
540 u64 start = machine__get_kernel_start_addr(machine);
541
542 for (type = 0; type < MAP__NR_TYPES; ++type) {
543 struct kmap *kmap;
544
545 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
546 if (machine->vmlinux_maps[type] == NULL)
547 return -1;
548
549 machine->vmlinux_maps[type]->map_ip =
550 machine->vmlinux_maps[type]->unmap_ip =
551 identity__map_ip;
552 kmap = map__kmap(machine->vmlinux_maps[type]);
553 kmap->kmaps = &machine->kmaps;
554 map_groups__insert(&machine->kmaps,
555 machine->vmlinux_maps[type]);
556 }
557
558 return 0;
559 }
560
561 void machine__destroy_kernel_maps(struct machine *machine)
562 {
563 enum map_type type;
564
565 for (type = 0; type < MAP__NR_TYPES; ++type) {
566 struct kmap *kmap;
567
568 if (machine->vmlinux_maps[type] == NULL)
569 continue;
570
571 kmap = map__kmap(machine->vmlinux_maps[type]);
572 map_groups__remove(&machine->kmaps,
573 machine->vmlinux_maps[type]);
574 if (kmap->ref_reloc_sym) {
575 /*
576 * ref_reloc_sym is shared among all maps, so free just
577 * on one of them.
578 */
579 if (type == MAP__FUNCTION) {
580 zfree((char **)&kmap->ref_reloc_sym->name);
581 zfree(&kmap->ref_reloc_sym);
582 } else
583 kmap->ref_reloc_sym = NULL;
584 }
585
586 map__delete(machine->vmlinux_maps[type]);
587 machine->vmlinux_maps[type] = NULL;
588 }
589 }
590
591 int machines__create_guest_kernel_maps(struct machines *machines)
592 {
593 int ret = 0;
594 struct dirent **namelist = NULL;
595 int i, items = 0;
596 char path[PATH_MAX];
597 pid_t pid;
598 char *endp;
599
600 if (symbol_conf.default_guest_vmlinux_name ||
601 symbol_conf.default_guest_modules ||
602 symbol_conf.default_guest_kallsyms) {
603 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
604 }
605
606 if (symbol_conf.guestmount) {
607 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
608 if (items <= 0)
609 return -ENOENT;
610 for (i = 0; i < items; i++) {
611 if (!isdigit(namelist[i]->d_name[0])) {
612 /* Filter out . and .. */
613 continue;
614 }
615 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
616 if ((*endp != '\0') ||
617 (endp == namelist[i]->d_name) ||
618 (errno == ERANGE)) {
619 pr_debug("invalid directory (%s). Skipping.\n",
620 namelist[i]->d_name);
621 continue;
622 }
623 sprintf(path, "%s/%s/proc/kallsyms",
624 symbol_conf.guestmount,
625 namelist[i]->d_name);
626 ret = access(path, R_OK);
627 if (ret) {
628 pr_debug("Can't access file %s\n", path);
629 goto failure;
630 }
631 machines__create_kernel_maps(machines, pid);
632 }
633 failure:
634 free(namelist);
635 }
636
637 return ret;
638 }
639
640 void machines__destroy_kernel_maps(struct machines *machines)
641 {
642 struct rb_node *next = rb_first(&machines->guests);
643
644 machine__destroy_kernel_maps(&machines->host);
645
646 while (next) {
647 struct machine *pos = rb_entry(next, struct machine, rb_node);
648
649 next = rb_next(&pos->rb_node);
650 rb_erase(&pos->rb_node, &machines->guests);
651 machine__delete(pos);
652 }
653 }
654
655 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
656 {
657 struct machine *machine = machines__findnew(machines, pid);
658
659 if (machine == NULL)
660 return -1;
661
662 return machine__create_kernel_maps(machine);
663 }
664
665 int machine__load_kallsyms(struct machine *machine, const char *filename,
666 enum map_type type, symbol_filter_t filter)
667 {
668 struct map *map = machine->vmlinux_maps[type];
669 int ret = dso__load_kallsyms(map->dso, filename, map, filter);
670
671 if (ret > 0) {
672 dso__set_loaded(map->dso, type);
673 /*
674 * Since /proc/kallsyms will have multiple sessions for the
675 * kernel, with modules between them, fixup the end of all
676 * sections.
677 */
678 __map_groups__fixup_end(&machine->kmaps, type);
679 }
680
681 return ret;
682 }
683
684 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
685 symbol_filter_t filter)
686 {
687 struct map *map = machine->vmlinux_maps[type];
688 int ret = dso__load_vmlinux_path(map->dso, map, filter);
689
690 if (ret > 0)
691 dso__set_loaded(map->dso, type);
692
693 return ret;
694 }
695
696 static void map_groups__fixup_end(struct map_groups *mg)
697 {
698 int i;
699 for (i = 0; i < MAP__NR_TYPES; ++i)
700 __map_groups__fixup_end(mg, i);
701 }
702
703 static char *get_kernel_version(const char *root_dir)
704 {
705 char version[PATH_MAX];
706 FILE *file;
707 char *name, *tmp;
708 const char *prefix = "Linux version ";
709
710 sprintf(version, "%s/proc/version", root_dir);
711 file = fopen(version, "r");
712 if (!file)
713 return NULL;
714
715 version[0] = '\0';
716 tmp = fgets(version, sizeof(version), file);
717 fclose(file);
718
719 name = strstr(version, prefix);
720 if (!name)
721 return NULL;
722 name += strlen(prefix);
723 tmp = strchr(name, ' ');
724 if (tmp)
725 *tmp = '\0';
726
727 return strdup(name);
728 }
729
730 static int map_groups__set_modules_path_dir(struct map_groups *mg,
731 const char *dir_name, int depth)
732 {
733 struct dirent *dent;
734 DIR *dir = opendir(dir_name);
735 int ret = 0;
736
737 if (!dir) {
738 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
739 return -1;
740 }
741
742 while ((dent = readdir(dir)) != NULL) {
743 char path[PATH_MAX];
744 struct stat st;
745
746 /*sshfs might return bad dent->d_type, so we have to stat*/
747 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
748 if (stat(path, &st))
749 continue;
750
751 if (S_ISDIR(st.st_mode)) {
752 if (!strcmp(dent->d_name, ".") ||
753 !strcmp(dent->d_name, ".."))
754 continue;
755
756 /* Do not follow top-level source and build symlinks */
757 if (depth == 0) {
758 if (!strcmp(dent->d_name, "source") ||
759 !strcmp(dent->d_name, "build"))
760 continue;
761 }
762
763 ret = map_groups__set_modules_path_dir(mg, path,
764 depth + 1);
765 if (ret < 0)
766 goto out;
767 } else {
768 char *dot = strrchr(dent->d_name, '.'),
769 dso_name[PATH_MAX];
770 struct map *map;
771 char *long_name;
772
773 if (dot == NULL || strcmp(dot, ".ko"))
774 continue;
775 snprintf(dso_name, sizeof(dso_name), "[%.*s]",
776 (int)(dot - dent->d_name), dent->d_name);
777
778 strxfrchar(dso_name, '-', '_');
779 map = map_groups__find_by_name(mg, MAP__FUNCTION,
780 dso_name);
781 if (map == NULL)
782 continue;
783
784 long_name = strdup(path);
785 if (long_name == NULL) {
786 ret = -1;
787 goto out;
788 }
789 dso__set_long_name(map->dso, long_name, true);
790 dso__kernel_module_get_build_id(map->dso, "");
791 }
792 }
793
794 out:
795 closedir(dir);
796 return ret;
797 }
798
799 static int machine__set_modules_path(struct machine *machine)
800 {
801 char *version;
802 char modules_path[PATH_MAX];
803
804 version = get_kernel_version(machine->root_dir);
805 if (!version)
806 return -1;
807
808 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
809 machine->root_dir, version);
810 free(version);
811
812 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
813 }
814
815 static int machine__create_module(void *arg, const char *name, u64 start)
816 {
817 struct machine *machine = arg;
818 struct map *map;
819
820 map = machine__new_module(machine, start, name);
821 if (map == NULL)
822 return -1;
823
824 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
825
826 return 0;
827 }
828
829 static int machine__create_modules(struct machine *machine)
830 {
831 const char *modules;
832 char path[PATH_MAX];
833
834 if (machine__is_default_guest(machine)) {
835 modules = symbol_conf.default_guest_modules;
836 } else {
837 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
838 modules = path;
839 }
840
841 if (symbol__restricted_filename(modules, "/proc/modules"))
842 return -1;
843
844 if (modules__parse(modules, machine, machine__create_module))
845 return -1;
846
847 if (!machine__set_modules_path(machine))
848 return 0;
849
850 pr_debug("Problems setting modules path maps, continuing anyway...\n");
851
852 return 0;
853 }
854
855 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
856
857 int machine__create_kernel_maps(struct machine *machine)
858 {
859 struct dso *kernel = machine__get_kernel(machine);
860 char filename[PATH_MAX];
861 const char *name;
862 u64 addr = 0;
863 int i;
864
865 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
866
867 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
868 addr = kallsyms__get_function_start(filename, name);
869 if (addr)
870 break;
871 }
872 if (!addr)
873 return -1;
874
875 if (kernel == NULL ||
876 __machine__create_kernel_maps(machine, kernel) < 0)
877 return -1;
878
879 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
880 if (machine__is_host(machine))
881 pr_debug("Problems creating module maps, "
882 "continuing anyway...\n");
883 else
884 pr_debug("Problems creating module maps for guest %d, "
885 "continuing anyway...\n", machine->pid);
886 }
887
888 /*
889 * Now that we have all the maps created, just set the ->end of them:
890 */
891 map_groups__fixup_end(&machine->kmaps);
892
893 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
894 addr)) {
895 machine__destroy_kernel_maps(machine);
896 return -1;
897 }
898
899 return 0;
900 }
901
902 static void machine__set_kernel_mmap_len(struct machine *machine,
903 union perf_event *event)
904 {
905 int i;
906
907 for (i = 0; i < MAP__NR_TYPES; i++) {
908 machine->vmlinux_maps[i]->start = event->mmap.start;
909 machine->vmlinux_maps[i]->end = (event->mmap.start +
910 event->mmap.len);
911 /*
912 * Be a bit paranoid here, some perf.data file came with
913 * a zero sized synthesized MMAP event for the kernel.
914 */
915 if (machine->vmlinux_maps[i]->end == 0)
916 machine->vmlinux_maps[i]->end = ~0ULL;
917 }
918 }
919
920 static bool machine__uses_kcore(struct machine *machine)
921 {
922 struct dso *dso;
923
924 list_for_each_entry(dso, &machine->kernel_dsos, node) {
925 if (dso__is_kcore(dso))
926 return true;
927 }
928
929 return false;
930 }
931
932 static int machine__process_kernel_mmap_event(struct machine *machine,
933 union perf_event *event)
934 {
935 struct map *map;
936 char kmmap_prefix[PATH_MAX];
937 enum dso_kernel_type kernel_type;
938 bool is_kernel_mmap;
939
940 /* If we have maps from kcore then we do not need or want any others */
941 if (machine__uses_kcore(machine))
942 return 0;
943
944 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
945 if (machine__is_host(machine))
946 kernel_type = DSO_TYPE_KERNEL;
947 else
948 kernel_type = DSO_TYPE_GUEST_KERNEL;
949
950 is_kernel_mmap = memcmp(event->mmap.filename,
951 kmmap_prefix,
952 strlen(kmmap_prefix) - 1) == 0;
953 if (event->mmap.filename[0] == '/' ||
954 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
955
956 char short_module_name[1024];
957 char *name, *dot;
958
959 if (event->mmap.filename[0] == '/') {
960 name = strrchr(event->mmap.filename, '/');
961 if (name == NULL)
962 goto out_problem;
963
964 ++name; /* skip / */
965 dot = strrchr(name, '.');
966 if (dot == NULL)
967 goto out_problem;
968 snprintf(short_module_name, sizeof(short_module_name),
969 "[%.*s]", (int)(dot - name), name);
970 strxfrchar(short_module_name, '-', '_');
971 } else
972 strcpy(short_module_name, event->mmap.filename);
973
974 map = machine__new_module(machine, event->mmap.start,
975 event->mmap.filename);
976 if (map == NULL)
977 goto out_problem;
978
979 name = strdup(short_module_name);
980 if (name == NULL)
981 goto out_problem;
982
983 dso__set_short_name(map->dso, name, true);
984 map->end = map->start + event->mmap.len;
985 } else if (is_kernel_mmap) {
986 const char *symbol_name = (event->mmap.filename +
987 strlen(kmmap_prefix));
988 /*
989 * Should be there already, from the build-id table in
990 * the header.
991 */
992 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
993 kmmap_prefix);
994 if (kernel == NULL)
995 goto out_problem;
996
997 kernel->kernel = kernel_type;
998 if (__machine__create_kernel_maps(machine, kernel) < 0)
999 goto out_problem;
1000
1001 machine__set_kernel_mmap_len(machine, event);
1002
1003 /*
1004 * Avoid using a zero address (kptr_restrict) for the ref reloc
1005 * symbol. Effectively having zero here means that at record
1006 * time /proc/sys/kernel/kptr_restrict was non zero.
1007 */
1008 if (event->mmap.pgoff != 0) {
1009 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1010 symbol_name,
1011 event->mmap.pgoff);
1012 }
1013
1014 if (machine__is_default_guest(machine)) {
1015 /*
1016 * preload dso of guest kernel and modules
1017 */
1018 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1019 NULL);
1020 }
1021 }
1022 return 0;
1023 out_problem:
1024 return -1;
1025 }
1026
1027 int machine__process_mmap2_event(struct machine *machine,
1028 union perf_event *event,
1029 struct perf_sample *sample __maybe_unused)
1030 {
1031 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1032 struct thread *thread;
1033 struct map *map;
1034 enum map_type type;
1035 int ret = 0;
1036
1037 if (dump_trace)
1038 perf_event__fprintf_mmap2(event, stdout);
1039
1040 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1041 cpumode == PERF_RECORD_MISC_KERNEL) {
1042 ret = machine__process_kernel_mmap_event(machine, event);
1043 if (ret < 0)
1044 goto out_problem;
1045 return 0;
1046 }
1047
1048 thread = machine__findnew_thread(machine, event->mmap2.pid,
1049 event->mmap2.tid);
1050 if (thread == NULL)
1051 goto out_problem;
1052
1053 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1054 type = MAP__VARIABLE;
1055 else
1056 type = MAP__FUNCTION;
1057
1058 map = map__new(&machine->user_dsos, event->mmap2.start,
1059 event->mmap2.len, event->mmap2.pgoff,
1060 event->mmap2.pid, event->mmap2.maj,
1061 event->mmap2.min, event->mmap2.ino,
1062 event->mmap2.ino_generation,
1063 event->mmap2.prot,
1064 event->mmap2.flags,
1065 event->mmap2.filename, type);
1066
1067 if (map == NULL)
1068 goto out_problem;
1069
1070 thread__insert_map(thread, map);
1071 return 0;
1072
1073 out_problem:
1074 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1075 return 0;
1076 }
1077
1078 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1079 struct perf_sample *sample __maybe_unused)
1080 {
1081 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1082 struct thread *thread;
1083 struct map *map;
1084 enum map_type type;
1085 int ret = 0;
1086
1087 if (dump_trace)
1088 perf_event__fprintf_mmap(event, stdout);
1089
1090 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1091 cpumode == PERF_RECORD_MISC_KERNEL) {
1092 ret = machine__process_kernel_mmap_event(machine, event);
1093 if (ret < 0)
1094 goto out_problem;
1095 return 0;
1096 }
1097
1098 thread = machine__findnew_thread(machine, event->mmap.pid,
1099 event->mmap.tid);
1100 if (thread == NULL)
1101 goto out_problem;
1102
1103 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1104 type = MAP__VARIABLE;
1105 else
1106 type = MAP__FUNCTION;
1107
1108 map = map__new(&machine->user_dsos, event->mmap.start,
1109 event->mmap.len, event->mmap.pgoff,
1110 event->mmap.pid, 0, 0, 0, 0, 0, 0,
1111 event->mmap.filename,
1112 type);
1113
1114 if (map == NULL)
1115 goto out_problem;
1116
1117 thread__insert_map(thread, map);
1118 return 0;
1119
1120 out_problem:
1121 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1122 return 0;
1123 }
1124
1125 static void machine__remove_thread(struct machine *machine, struct thread *th)
1126 {
1127 machine->last_match = NULL;
1128 rb_erase(&th->rb_node, &machine->threads);
1129 /*
1130 * We may have references to this thread, for instance in some hist_entry
1131 * instances, so just move them to a separate list.
1132 */
1133 list_add_tail(&th->node, &machine->dead_threads);
1134 }
1135
1136 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1137 struct perf_sample *sample)
1138 {
1139 struct thread *thread = machine__find_thread(machine,
1140 event->fork.pid,
1141 event->fork.tid);
1142 struct thread *parent = machine__findnew_thread(machine,
1143 event->fork.ppid,
1144 event->fork.ptid);
1145
1146 /* if a thread currently exists for the thread id remove it */
1147 if (thread != NULL)
1148 machine__remove_thread(machine, thread);
1149
1150 thread = machine__findnew_thread(machine, event->fork.pid,
1151 event->fork.tid);
1152 if (dump_trace)
1153 perf_event__fprintf_task(event, stdout);
1154
1155 if (thread == NULL || parent == NULL ||
1156 thread__fork(thread, parent, sample->time) < 0) {
1157 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1158 return -1;
1159 }
1160
1161 return 0;
1162 }
1163
1164 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1165 struct perf_sample *sample __maybe_unused)
1166 {
1167 struct thread *thread = machine__find_thread(machine,
1168 event->fork.pid,
1169 event->fork.tid);
1170
1171 if (dump_trace)
1172 perf_event__fprintf_task(event, stdout);
1173
1174 if (thread != NULL)
1175 thread__exited(thread);
1176
1177 return 0;
1178 }
1179
1180 int machine__process_event(struct machine *machine, union perf_event *event,
1181 struct perf_sample *sample)
1182 {
1183 int ret;
1184
1185 switch (event->header.type) {
1186 case PERF_RECORD_COMM:
1187 ret = machine__process_comm_event(machine, event, sample); break;
1188 case PERF_RECORD_MMAP:
1189 ret = machine__process_mmap_event(machine, event, sample); break;
1190 case PERF_RECORD_MMAP2:
1191 ret = machine__process_mmap2_event(machine, event, sample); break;
1192 case PERF_RECORD_FORK:
1193 ret = machine__process_fork_event(machine, event, sample); break;
1194 case PERF_RECORD_EXIT:
1195 ret = machine__process_exit_event(machine, event, sample); break;
1196 case PERF_RECORD_LOST:
1197 ret = machine__process_lost_event(machine, event, sample); break;
1198 default:
1199 ret = -1;
1200 break;
1201 }
1202
1203 return ret;
1204 }
1205
1206 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1207 {
1208 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
1209 return 1;
1210 return 0;
1211 }
1212
1213 static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1214 struct addr_map_symbol *ams,
1215 u64 ip)
1216 {
1217 struct addr_location al;
1218
1219 memset(&al, 0, sizeof(al));
1220 /*
1221 * We cannot use the header.misc hint to determine whether a
1222 * branch stack address is user, kernel, guest, hypervisor.
1223 * Branches may straddle the kernel/user/hypervisor boundaries.
1224 * Thus, we have to try consecutively until we find a match
1225 * or else, the symbol is unknown
1226 */
1227 thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
1228
1229 ams->addr = ip;
1230 ams->al_addr = al.addr;
1231 ams->sym = al.sym;
1232 ams->map = al.map;
1233 }
1234
1235 static void ip__resolve_data(struct machine *machine, struct thread *thread,
1236 u8 m, struct addr_map_symbol *ams, u64 addr)
1237 {
1238 struct addr_location al;
1239
1240 memset(&al, 0, sizeof(al));
1241
1242 thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
1243 &al);
1244 ams->addr = addr;
1245 ams->al_addr = al.addr;
1246 ams->sym = al.sym;
1247 ams->map = al.map;
1248 }
1249
1250 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1251 struct addr_location *al)
1252 {
1253 struct mem_info *mi = zalloc(sizeof(*mi));
1254
1255 if (!mi)
1256 return NULL;
1257
1258 ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
1259 ip__resolve_data(al->machine, al->thread, al->cpumode,
1260 &mi->daddr, sample->addr);
1261 mi->data_src.val = sample->data_src;
1262
1263 return mi;
1264 }
1265
1266 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1267 struct addr_location *al)
1268 {
1269 unsigned int i;
1270 const struct branch_stack *bs = sample->branch_stack;
1271 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1272
1273 if (!bi)
1274 return NULL;
1275
1276 for (i = 0; i < bs->nr; i++) {
1277 ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
1278 ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
1279 bi[i].flags = bs->entries[i].flags;
1280 }
1281 return bi;
1282 }
1283
1284 static int machine__resolve_callchain_sample(struct machine *machine,
1285 struct thread *thread,
1286 struct ip_callchain *chain,
1287 struct symbol **parent,
1288 struct addr_location *root_al,
1289 int max_stack)
1290 {
1291 u8 cpumode = PERF_RECORD_MISC_USER;
1292 int chain_nr = min(max_stack, (int)chain->nr);
1293 int i;
1294 int err;
1295
1296 callchain_cursor_reset(&callchain_cursor);
1297
1298 if (chain->nr > PERF_MAX_STACK_DEPTH) {
1299 pr_warning("corrupted callchain. skipping...\n");
1300 return 0;
1301 }
1302
1303 for (i = 0; i < chain_nr; i++) {
1304 u64 ip;
1305 struct addr_location al;
1306
1307 if (callchain_param.order == ORDER_CALLEE)
1308 ip = chain->ips[i];
1309 else
1310 ip = chain->ips[chain->nr - i - 1];
1311
1312 if (ip >= PERF_CONTEXT_MAX) {
1313 switch (ip) {
1314 case PERF_CONTEXT_HV:
1315 cpumode = PERF_RECORD_MISC_HYPERVISOR;
1316 break;
1317 case PERF_CONTEXT_KERNEL:
1318 cpumode = PERF_RECORD_MISC_KERNEL;
1319 break;
1320 case PERF_CONTEXT_USER:
1321 cpumode = PERF_RECORD_MISC_USER;
1322 break;
1323 default:
1324 pr_debug("invalid callchain context: "
1325 "%"PRId64"\n", (s64) ip);
1326 /*
1327 * It seems the callchain is corrupted.
1328 * Discard all.
1329 */
1330 callchain_cursor_reset(&callchain_cursor);
1331 return 0;
1332 }
1333 continue;
1334 }
1335
1336 al.filtered = 0;
1337 thread__find_addr_location(thread, machine, cpumode,
1338 MAP__FUNCTION, ip, &al);
1339 if (al.sym != NULL) {
1340 if (sort__has_parent && !*parent &&
1341 symbol__match_regex(al.sym, &parent_regex))
1342 *parent = al.sym;
1343 else if (have_ignore_callees && root_al &&
1344 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1345 /* Treat this symbol as the root,
1346 forgetting its callees. */
1347 *root_al = al;
1348 callchain_cursor_reset(&callchain_cursor);
1349 }
1350 }
1351
1352 err = callchain_cursor_append(&callchain_cursor,
1353 ip, al.map, al.sym);
1354 if (err)
1355 return err;
1356 }
1357
1358 return 0;
1359 }
1360
1361 static int unwind_entry(struct unwind_entry *entry, void *arg)
1362 {
1363 struct callchain_cursor *cursor = arg;
1364 return callchain_cursor_append(cursor, entry->ip,
1365 entry->map, entry->sym);
1366 }
1367
1368 int machine__resolve_callchain(struct machine *machine,
1369 struct perf_evsel *evsel,
1370 struct thread *thread,
1371 struct perf_sample *sample,
1372 struct symbol **parent,
1373 struct addr_location *root_al,
1374 int max_stack)
1375 {
1376 int ret;
1377
1378 ret = machine__resolve_callchain_sample(machine, thread,
1379 sample->callchain, parent,
1380 root_al, max_stack);
1381 if (ret)
1382 return ret;
1383
1384 /* Can we do dwarf post unwind? */
1385 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1386 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1387 return 0;
1388
1389 /* Bail out if nothing was captured. */
1390 if ((!sample->user_regs.regs) ||
1391 (!sample->user_stack.size))
1392 return 0;
1393
1394 return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
1395 thread, sample, max_stack);
1396
1397 }
1398
1399 int machine__for_each_thread(struct machine *machine,
1400 int (*fn)(struct thread *thread, void *p),
1401 void *priv)
1402 {
1403 struct rb_node *nd;
1404 struct thread *thread;
1405 int rc = 0;
1406
1407 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1408 thread = rb_entry(nd, struct thread, rb_node);
1409 rc = fn(thread, priv);
1410 if (rc != 0)
1411 return rc;
1412 }
1413
1414 list_for_each_entry(thread, &machine->dead_threads, node) {
1415 rc = fn(thread, priv);
1416 if (rc != 0)
1417 return rc;
1418 }
1419 return rc;
1420 }
1421
1422 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1423 struct target *target, struct thread_map *threads,
1424 perf_event__handler_t process, bool data_mmap)
1425 {
1426 if (target__has_task(target))
1427 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1428 else if (target__has_cpu(target))
1429 return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1430 /* command specified */
1431 return 0;
1432 }