]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/machine.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / machine.c
1 #include <dirent.h>
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <regex.h>
5 #include "callchain.h"
6 #include "debug.h"
7 #include "event.h"
8 #include "evsel.h"
9 #include "hist.h"
10 #include "machine.h"
11 #include "map.h"
12 #include "sort.h"
13 #include "strlist.h"
14 #include "thread.h"
15 #include "vdso.h"
16 #include <stdbool.h>
17 #include <sys/types.h>
18 #include <sys/stat.h>
19 #include <unistd.h>
20 #include "unwind.h"
21 #include "linux/hash.h"
22 #include "asm/bug.h"
23
24 #include "sane_ctype.h"
25 #include <symbol/kallsyms.h>
26
27 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
28
29 static void dsos__init(struct dsos *dsos)
30 {
31 INIT_LIST_HEAD(&dsos->head);
32 dsos->root = RB_ROOT;
33 pthread_rwlock_init(&dsos->lock, NULL);
34 }
35
36 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
37 {
38 memset(machine, 0, sizeof(*machine));
39 map_groups__init(&machine->kmaps, machine);
40 RB_CLEAR_NODE(&machine->rb_node);
41 dsos__init(&machine->dsos);
42
43 machine->threads = RB_ROOT;
44 pthread_rwlock_init(&machine->threads_lock, NULL);
45 machine->nr_threads = 0;
46 INIT_LIST_HEAD(&machine->dead_threads);
47 machine->last_match = NULL;
48
49 machine->vdso_info = NULL;
50 machine->env = NULL;
51
52 machine->pid = pid;
53
54 machine->id_hdr_size = 0;
55 machine->kptr_restrict_warned = false;
56 machine->comm_exec = false;
57 machine->kernel_start = 0;
58
59 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
60
61 machine->root_dir = strdup(root_dir);
62 if (machine->root_dir == NULL)
63 return -ENOMEM;
64
65 if (pid != HOST_KERNEL_ID) {
66 struct thread *thread = machine__findnew_thread(machine, -1,
67 pid);
68 char comm[64];
69
70 if (thread == NULL)
71 return -ENOMEM;
72
73 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
74 thread__set_comm(thread, comm, 0);
75 thread__put(thread);
76 }
77
78 machine->current_tid = NULL;
79
80 return 0;
81 }
82
83 struct machine *machine__new_host(void)
84 {
85 struct machine *machine = malloc(sizeof(*machine));
86
87 if (machine != NULL) {
88 machine__init(machine, "", HOST_KERNEL_ID);
89
90 if (machine__create_kernel_maps(machine) < 0)
91 goto out_delete;
92 }
93
94 return machine;
95 out_delete:
96 free(machine);
97 return NULL;
98 }
99
100 struct machine *machine__new_kallsyms(void)
101 {
102 struct machine *machine = machine__new_host();
103 /*
104 * FIXME:
105 * 1) MAP__FUNCTION will go away when we stop loading separate maps for
106 * functions and data objects.
107 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
108 * ask for not using the kcore parsing code, once this one is fixed
109 * to create a map per module.
110 */
111 if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) {
112 machine__delete(machine);
113 machine = NULL;
114 }
115
116 return machine;
117 }
118
119 static void dsos__purge(struct dsos *dsos)
120 {
121 struct dso *pos, *n;
122
123 pthread_rwlock_wrlock(&dsos->lock);
124
125 list_for_each_entry_safe(pos, n, &dsos->head, node) {
126 RB_CLEAR_NODE(&pos->rb_node);
127 pos->root = NULL;
128 list_del_init(&pos->node);
129 dso__put(pos);
130 }
131
132 pthread_rwlock_unlock(&dsos->lock);
133 }
134
135 static void dsos__exit(struct dsos *dsos)
136 {
137 dsos__purge(dsos);
138 pthread_rwlock_destroy(&dsos->lock);
139 }
140
141 void machine__delete_threads(struct machine *machine)
142 {
143 struct rb_node *nd;
144
145 pthread_rwlock_wrlock(&machine->threads_lock);
146 nd = rb_first(&machine->threads);
147 while (nd) {
148 struct thread *t = rb_entry(nd, struct thread, rb_node);
149
150 nd = rb_next(nd);
151 __machine__remove_thread(machine, t, false);
152 }
153 pthread_rwlock_unlock(&machine->threads_lock);
154 }
155
156 void machine__exit(struct machine *machine)
157 {
158 machine__destroy_kernel_maps(machine);
159 map_groups__exit(&machine->kmaps);
160 dsos__exit(&machine->dsos);
161 machine__exit_vdso(machine);
162 zfree(&machine->root_dir);
163 zfree(&machine->current_tid);
164 pthread_rwlock_destroy(&machine->threads_lock);
165 }
166
167 void machine__delete(struct machine *machine)
168 {
169 if (machine) {
170 machine__exit(machine);
171 free(machine);
172 }
173 }
174
175 void machines__init(struct machines *machines)
176 {
177 machine__init(&machines->host, "", HOST_KERNEL_ID);
178 machines->guests = RB_ROOT;
179 }
180
181 void machines__exit(struct machines *machines)
182 {
183 machine__exit(&machines->host);
184 /* XXX exit guest */
185 }
186
187 struct machine *machines__add(struct machines *machines, pid_t pid,
188 const char *root_dir)
189 {
190 struct rb_node **p = &machines->guests.rb_node;
191 struct rb_node *parent = NULL;
192 struct machine *pos, *machine = malloc(sizeof(*machine));
193
194 if (machine == NULL)
195 return NULL;
196
197 if (machine__init(machine, root_dir, pid) != 0) {
198 free(machine);
199 return NULL;
200 }
201
202 while (*p != NULL) {
203 parent = *p;
204 pos = rb_entry(parent, struct machine, rb_node);
205 if (pid < pos->pid)
206 p = &(*p)->rb_left;
207 else
208 p = &(*p)->rb_right;
209 }
210
211 rb_link_node(&machine->rb_node, parent, p);
212 rb_insert_color(&machine->rb_node, &machines->guests);
213
214 return machine;
215 }
216
217 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
218 {
219 struct rb_node *nd;
220
221 machines->host.comm_exec = comm_exec;
222
223 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
224 struct machine *machine = rb_entry(nd, struct machine, rb_node);
225
226 machine->comm_exec = comm_exec;
227 }
228 }
229
230 struct machine *machines__find(struct machines *machines, pid_t pid)
231 {
232 struct rb_node **p = &machines->guests.rb_node;
233 struct rb_node *parent = NULL;
234 struct machine *machine;
235 struct machine *default_machine = NULL;
236
237 if (pid == HOST_KERNEL_ID)
238 return &machines->host;
239
240 while (*p != NULL) {
241 parent = *p;
242 machine = rb_entry(parent, struct machine, rb_node);
243 if (pid < machine->pid)
244 p = &(*p)->rb_left;
245 else if (pid > machine->pid)
246 p = &(*p)->rb_right;
247 else
248 return machine;
249 if (!machine->pid)
250 default_machine = machine;
251 }
252
253 return default_machine;
254 }
255
256 struct machine *machines__findnew(struct machines *machines, pid_t pid)
257 {
258 char path[PATH_MAX];
259 const char *root_dir = "";
260 struct machine *machine = machines__find(machines, pid);
261
262 if (machine && (machine->pid == pid))
263 goto out;
264
265 if ((pid != HOST_KERNEL_ID) &&
266 (pid != DEFAULT_GUEST_KERNEL_ID) &&
267 (symbol_conf.guestmount)) {
268 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
269 if (access(path, R_OK)) {
270 static struct strlist *seen;
271
272 if (!seen)
273 seen = strlist__new(NULL, NULL);
274
275 if (!strlist__has_entry(seen, path)) {
276 pr_err("Can't access file %s\n", path);
277 strlist__add(seen, path);
278 }
279 machine = NULL;
280 goto out;
281 }
282 root_dir = path;
283 }
284
285 machine = machines__add(machines, pid, root_dir);
286 out:
287 return machine;
288 }
289
290 void machines__process_guests(struct machines *machines,
291 machine__process_t process, void *data)
292 {
293 struct rb_node *nd;
294
295 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
296 struct machine *pos = rb_entry(nd, struct machine, rb_node);
297 process(pos, data);
298 }
299 }
300
301 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
302 {
303 if (machine__is_host(machine))
304 snprintf(bf, size, "[%s]", "kernel.kallsyms");
305 else if (machine__is_default_guest(machine))
306 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
307 else {
308 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
309 machine->pid);
310 }
311
312 return bf;
313 }
314
315 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
316 {
317 struct rb_node *node;
318 struct machine *machine;
319
320 machines->host.id_hdr_size = id_hdr_size;
321
322 for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
323 machine = rb_entry(node, struct machine, rb_node);
324 machine->id_hdr_size = id_hdr_size;
325 }
326
327 return;
328 }
329
330 static void machine__update_thread_pid(struct machine *machine,
331 struct thread *th, pid_t pid)
332 {
333 struct thread *leader;
334
335 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
336 return;
337
338 th->pid_ = pid;
339
340 if (th->pid_ == th->tid)
341 return;
342
343 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
344 if (!leader)
345 goto out_err;
346
347 if (!leader->mg)
348 leader->mg = map_groups__new(machine);
349
350 if (!leader->mg)
351 goto out_err;
352
353 if (th->mg == leader->mg)
354 return;
355
356 if (th->mg) {
357 /*
358 * Maps are created from MMAP events which provide the pid and
359 * tid. Consequently there never should be any maps on a thread
360 * with an unknown pid. Just print an error if there are.
361 */
362 if (!map_groups__empty(th->mg))
363 pr_err("Discarding thread maps for %d:%d\n",
364 th->pid_, th->tid);
365 map_groups__put(th->mg);
366 }
367
368 th->mg = map_groups__get(leader->mg);
369 out_put:
370 thread__put(leader);
371 return;
372 out_err:
373 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
374 goto out_put;
375 }
376
377 /*
378 * Caller must eventually drop thread->refcnt returned with a successful
379 * lookup/new thread inserted.
380 */
381 static struct thread *____machine__findnew_thread(struct machine *machine,
382 pid_t pid, pid_t tid,
383 bool create)
384 {
385 struct rb_node **p = &machine->threads.rb_node;
386 struct rb_node *parent = NULL;
387 struct thread *th;
388
389 /*
390 * Front-end cache - TID lookups come in blocks,
391 * so most of the time we dont have to look up
392 * the full rbtree:
393 */
394 th = machine->last_match;
395 if (th != NULL) {
396 if (th->tid == tid) {
397 machine__update_thread_pid(machine, th, pid);
398 return thread__get(th);
399 }
400
401 machine->last_match = NULL;
402 }
403
404 while (*p != NULL) {
405 parent = *p;
406 th = rb_entry(parent, struct thread, rb_node);
407
408 if (th->tid == tid) {
409 machine->last_match = th;
410 machine__update_thread_pid(machine, th, pid);
411 return thread__get(th);
412 }
413
414 if (tid < th->tid)
415 p = &(*p)->rb_left;
416 else
417 p = &(*p)->rb_right;
418 }
419
420 if (!create)
421 return NULL;
422
423 th = thread__new(pid, tid);
424 if (th != NULL) {
425 rb_link_node(&th->rb_node, parent, p);
426 rb_insert_color(&th->rb_node, &machine->threads);
427
428 /*
429 * We have to initialize map_groups separately
430 * after rb tree is updated.
431 *
432 * The reason is that we call machine__findnew_thread
433 * within thread__init_map_groups to find the thread
434 * leader and that would screwed the rb tree.
435 */
436 if (thread__init_map_groups(th, machine)) {
437 rb_erase_init(&th->rb_node, &machine->threads);
438 RB_CLEAR_NODE(&th->rb_node);
439 thread__put(th);
440 return NULL;
441 }
442 /*
443 * It is now in the rbtree, get a ref
444 */
445 thread__get(th);
446 machine->last_match = th;
447 ++machine->nr_threads;
448 }
449
450 return th;
451 }
452
453 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
454 {
455 return ____machine__findnew_thread(machine, pid, tid, true);
456 }
457
458 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
459 pid_t tid)
460 {
461 struct thread *th;
462
463 pthread_rwlock_wrlock(&machine->threads_lock);
464 th = __machine__findnew_thread(machine, pid, tid);
465 pthread_rwlock_unlock(&machine->threads_lock);
466 return th;
467 }
468
469 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
470 pid_t tid)
471 {
472 struct thread *th;
473 pthread_rwlock_rdlock(&machine->threads_lock);
474 th = ____machine__findnew_thread(machine, pid, tid, false);
475 pthread_rwlock_unlock(&machine->threads_lock);
476 return th;
477 }
478
479 struct comm *machine__thread_exec_comm(struct machine *machine,
480 struct thread *thread)
481 {
482 if (machine->comm_exec)
483 return thread__exec_comm(thread);
484 else
485 return thread__comm(thread);
486 }
487
488 int machine__process_comm_event(struct machine *machine, union perf_event *event,
489 struct perf_sample *sample)
490 {
491 struct thread *thread = machine__findnew_thread(machine,
492 event->comm.pid,
493 event->comm.tid);
494 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
495 int err = 0;
496
497 if (exec)
498 machine->comm_exec = true;
499
500 if (dump_trace)
501 perf_event__fprintf_comm(event, stdout);
502
503 if (thread == NULL ||
504 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
505 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
506 err = -1;
507 }
508
509 thread__put(thread);
510
511 return err;
512 }
513
514 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
515 union perf_event *event,
516 struct perf_sample *sample __maybe_unused)
517 {
518 struct thread *thread = machine__findnew_thread(machine,
519 event->namespaces.pid,
520 event->namespaces.tid);
521 int err = 0;
522
523 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
524 "\nWARNING: kernel seems to support more namespaces than perf"
525 " tool.\nTry updating the perf tool..\n\n");
526
527 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
528 "\nWARNING: perf tool seems to support more namespaces than"
529 " the kernel.\nTry updating the kernel..\n\n");
530
531 if (dump_trace)
532 perf_event__fprintf_namespaces(event, stdout);
533
534 if (thread == NULL ||
535 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
536 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
537 err = -1;
538 }
539
540 thread__put(thread);
541
542 return err;
543 }
544
545 int machine__process_lost_event(struct machine *machine __maybe_unused,
546 union perf_event *event, struct perf_sample *sample __maybe_unused)
547 {
548 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
549 event->lost.id, event->lost.lost);
550 return 0;
551 }
552
553 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
554 union perf_event *event, struct perf_sample *sample)
555 {
556 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
557 sample->id, event->lost_samples.lost);
558 return 0;
559 }
560
561 static struct dso *machine__findnew_module_dso(struct machine *machine,
562 struct kmod_path *m,
563 const char *filename)
564 {
565 struct dso *dso;
566
567 pthread_rwlock_wrlock(&machine->dsos.lock);
568
569 dso = __dsos__find(&machine->dsos, m->name, true);
570 if (!dso) {
571 dso = __dsos__addnew(&machine->dsos, m->name);
572 if (dso == NULL)
573 goto out_unlock;
574
575 dso__set_module_info(dso, m, machine);
576 dso__set_long_name(dso, strdup(filename), true);
577 }
578
579 dso__get(dso);
580 out_unlock:
581 pthread_rwlock_unlock(&machine->dsos.lock);
582 return dso;
583 }
584
585 int machine__process_aux_event(struct machine *machine __maybe_unused,
586 union perf_event *event)
587 {
588 if (dump_trace)
589 perf_event__fprintf_aux(event, stdout);
590 return 0;
591 }
592
593 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
594 union perf_event *event)
595 {
596 if (dump_trace)
597 perf_event__fprintf_itrace_start(event, stdout);
598 return 0;
599 }
600
601 int machine__process_switch_event(struct machine *machine __maybe_unused,
602 union perf_event *event)
603 {
604 if (dump_trace)
605 perf_event__fprintf_switch(event, stdout);
606 return 0;
607 }
608
609 static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
610 {
611 const char *dup_filename;
612
613 if (!filename || !dso || !dso->long_name)
614 return;
615 if (dso->long_name[0] != '[')
616 return;
617 if (!strchr(filename, '/'))
618 return;
619
620 dup_filename = strdup(filename);
621 if (!dup_filename)
622 return;
623
624 dso__set_long_name(dso, dup_filename, true);
625 }
626
627 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
628 const char *filename)
629 {
630 struct map *map = NULL;
631 struct dso *dso = NULL;
632 struct kmod_path m;
633
634 if (kmod_path__parse_name(&m, filename))
635 return NULL;
636
637 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
638 m.name);
639 if (map) {
640 /*
641 * If the map's dso is an offline module, give dso__load()
642 * a chance to find the file path of that module by fixing
643 * long_name.
644 */
645 dso__adjust_kmod_long_name(map->dso, filename);
646 goto out;
647 }
648
649 dso = machine__findnew_module_dso(machine, &m, filename);
650 if (dso == NULL)
651 goto out;
652
653 map = map__new2(start, dso, MAP__FUNCTION);
654 if (map == NULL)
655 goto out;
656
657 map_groups__insert(&machine->kmaps, map);
658
659 /* Put the map here because map_groups__insert alread got it */
660 map__put(map);
661 out:
662 /* put the dso here, corresponding to machine__findnew_module_dso */
663 dso__put(dso);
664 free(m.name);
665 return map;
666 }
667
668 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
669 {
670 struct rb_node *nd;
671 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
672
673 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
674 struct machine *pos = rb_entry(nd, struct machine, rb_node);
675 ret += __dsos__fprintf(&pos->dsos.head, fp);
676 }
677
678 return ret;
679 }
680
681 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
682 bool (skip)(struct dso *dso, int parm), int parm)
683 {
684 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
685 }
686
687 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
688 bool (skip)(struct dso *dso, int parm), int parm)
689 {
690 struct rb_node *nd;
691 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
692
693 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
694 struct machine *pos = rb_entry(nd, struct machine, rb_node);
695 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
696 }
697 return ret;
698 }
699
700 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
701 {
702 int i;
703 size_t printed = 0;
704 struct dso *kdso = machine__kernel_map(machine)->dso;
705
706 if (kdso->has_build_id) {
707 char filename[PATH_MAX];
708 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
709 printed += fprintf(fp, "[0] %s\n", filename);
710 }
711
712 for (i = 0; i < vmlinux_path__nr_entries; ++i)
713 printed += fprintf(fp, "[%d] %s\n",
714 i + kdso->has_build_id, vmlinux_path[i]);
715
716 return printed;
717 }
718
719 size_t machine__fprintf(struct machine *machine, FILE *fp)
720 {
721 size_t ret;
722 struct rb_node *nd;
723
724 pthread_rwlock_rdlock(&machine->threads_lock);
725
726 ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);
727
728 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
729 struct thread *pos = rb_entry(nd, struct thread, rb_node);
730
731 ret += thread__fprintf(pos, fp);
732 }
733
734 pthread_rwlock_unlock(&machine->threads_lock);
735
736 return ret;
737 }
738
739 static struct dso *machine__get_kernel(struct machine *machine)
740 {
741 const char *vmlinux_name = NULL;
742 struct dso *kernel;
743
744 if (machine__is_host(machine)) {
745 vmlinux_name = symbol_conf.vmlinux_name;
746 if (!vmlinux_name)
747 vmlinux_name = DSO__NAME_KALLSYMS;
748
749 kernel = machine__findnew_kernel(machine, vmlinux_name,
750 "[kernel]", DSO_TYPE_KERNEL);
751 } else {
752 char bf[PATH_MAX];
753
754 if (machine__is_default_guest(machine))
755 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
756 if (!vmlinux_name)
757 vmlinux_name = machine__mmap_name(machine, bf,
758 sizeof(bf));
759
760 kernel = machine__findnew_kernel(machine, vmlinux_name,
761 "[guest.kernel]",
762 DSO_TYPE_GUEST_KERNEL);
763 }
764
765 if (kernel != NULL && (!kernel->has_build_id))
766 dso__read_running_kernel_build_id(kernel, machine);
767
768 return kernel;
769 }
770
771 struct process_args {
772 u64 start;
773 };
774
775 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
776 size_t bufsz)
777 {
778 if (machine__is_default_guest(machine))
779 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
780 else
781 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
782 }
783
784 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
785
786 /* Figure out the start address of kernel map from /proc/kallsyms.
787 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
788 * symbol_name if it's not that important.
789 */
790 static int machine__get_running_kernel_start(struct machine *machine,
791 const char **symbol_name, u64 *start)
792 {
793 char filename[PATH_MAX];
794 int i, err = -1;
795 const char *name;
796 u64 addr = 0;
797
798 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
799
800 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
801 return 0;
802
803 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
804 err = kallsyms__get_function_start(filename, name, &addr);
805 if (!err)
806 break;
807 }
808
809 if (err)
810 return -1;
811
812 if (symbol_name)
813 *symbol_name = name;
814
815 *start = addr;
816 return 0;
817 }
818
819 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
820 {
821 int type;
822 u64 start = 0;
823
824 if (machine__get_running_kernel_start(machine, NULL, &start))
825 return -1;
826
827 /* In case of renewal the kernel map, destroy previous one */
828 machine__destroy_kernel_maps(machine);
829
830 for (type = 0; type < MAP__NR_TYPES; ++type) {
831 struct kmap *kmap;
832 struct map *map;
833
834 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
835 if (machine->vmlinux_maps[type] == NULL)
836 return -1;
837
838 machine->vmlinux_maps[type]->map_ip =
839 machine->vmlinux_maps[type]->unmap_ip =
840 identity__map_ip;
841 map = __machine__kernel_map(machine, type);
842 kmap = map__kmap(map);
843 if (!kmap)
844 return -1;
845
846 kmap->kmaps = &machine->kmaps;
847 map_groups__insert(&machine->kmaps, map);
848 }
849
850 return 0;
851 }
852
853 void machine__destroy_kernel_maps(struct machine *machine)
854 {
855 int type;
856
857 for (type = 0; type < MAP__NR_TYPES; ++type) {
858 struct kmap *kmap;
859 struct map *map = __machine__kernel_map(machine, type);
860
861 if (map == NULL)
862 continue;
863
864 kmap = map__kmap(map);
865 map_groups__remove(&machine->kmaps, map);
866 if (kmap && kmap->ref_reloc_sym) {
867 /*
868 * ref_reloc_sym is shared among all maps, so free just
869 * on one of them.
870 */
871 if (type == MAP__FUNCTION) {
872 zfree((char **)&kmap->ref_reloc_sym->name);
873 zfree(&kmap->ref_reloc_sym);
874 } else
875 kmap->ref_reloc_sym = NULL;
876 }
877
878 map__put(machine->vmlinux_maps[type]);
879 machine->vmlinux_maps[type] = NULL;
880 }
881 }
882
883 int machines__create_guest_kernel_maps(struct machines *machines)
884 {
885 int ret = 0;
886 struct dirent **namelist = NULL;
887 int i, items = 0;
888 char path[PATH_MAX];
889 pid_t pid;
890 char *endp;
891
892 if (symbol_conf.default_guest_vmlinux_name ||
893 symbol_conf.default_guest_modules ||
894 symbol_conf.default_guest_kallsyms) {
895 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
896 }
897
898 if (symbol_conf.guestmount) {
899 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
900 if (items <= 0)
901 return -ENOENT;
902 for (i = 0; i < items; i++) {
903 if (!isdigit(namelist[i]->d_name[0])) {
904 /* Filter out . and .. */
905 continue;
906 }
907 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
908 if ((*endp != '\0') ||
909 (endp == namelist[i]->d_name) ||
910 (errno == ERANGE)) {
911 pr_debug("invalid directory (%s). Skipping.\n",
912 namelist[i]->d_name);
913 continue;
914 }
915 sprintf(path, "%s/%s/proc/kallsyms",
916 symbol_conf.guestmount,
917 namelist[i]->d_name);
918 ret = access(path, R_OK);
919 if (ret) {
920 pr_debug("Can't access file %s\n", path);
921 goto failure;
922 }
923 machines__create_kernel_maps(machines, pid);
924 }
925 failure:
926 free(namelist);
927 }
928
929 return ret;
930 }
931
932 void machines__destroy_kernel_maps(struct machines *machines)
933 {
934 struct rb_node *next = rb_first(&machines->guests);
935
936 machine__destroy_kernel_maps(&machines->host);
937
938 while (next) {
939 struct machine *pos = rb_entry(next, struct machine, rb_node);
940
941 next = rb_next(&pos->rb_node);
942 rb_erase(&pos->rb_node, &machines->guests);
943 machine__delete(pos);
944 }
945 }
946
947 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
948 {
949 struct machine *machine = machines__findnew(machines, pid);
950
951 if (machine == NULL)
952 return -1;
953
954 return machine__create_kernel_maps(machine);
955 }
956
957 int __machine__load_kallsyms(struct machine *machine, const char *filename,
958 enum map_type type, bool no_kcore)
959 {
960 struct map *map = machine__kernel_map(machine);
961 int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore);
962
963 if (ret > 0) {
964 dso__set_loaded(map->dso, type);
965 /*
966 * Since /proc/kallsyms will have multiple sessions for the
967 * kernel, with modules between them, fixup the end of all
968 * sections.
969 */
970 __map_groups__fixup_end(&machine->kmaps, type);
971 }
972
973 return ret;
974 }
975
976 int machine__load_kallsyms(struct machine *machine, const char *filename,
977 enum map_type type)
978 {
979 return __machine__load_kallsyms(machine, filename, type, false);
980 }
981
982 int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
983 {
984 struct map *map = machine__kernel_map(machine);
985 int ret = dso__load_vmlinux_path(map->dso, map);
986
987 if (ret > 0)
988 dso__set_loaded(map->dso, type);
989
990 return ret;
991 }
992
993 static void map_groups__fixup_end(struct map_groups *mg)
994 {
995 int i;
996 for (i = 0; i < MAP__NR_TYPES; ++i)
997 __map_groups__fixup_end(mg, i);
998 }
999
1000 static char *get_kernel_version(const char *root_dir)
1001 {
1002 char version[PATH_MAX];
1003 FILE *file;
1004 char *name, *tmp;
1005 const char *prefix = "Linux version ";
1006
1007 sprintf(version, "%s/proc/version", root_dir);
1008 file = fopen(version, "r");
1009 if (!file)
1010 return NULL;
1011
1012 version[0] = '\0';
1013 tmp = fgets(version, sizeof(version), file);
1014 fclose(file);
1015
1016 name = strstr(version, prefix);
1017 if (!name)
1018 return NULL;
1019 name += strlen(prefix);
1020 tmp = strchr(name, ' ');
1021 if (tmp)
1022 *tmp = '\0';
1023
1024 return strdup(name);
1025 }
1026
1027 static bool is_kmod_dso(struct dso *dso)
1028 {
1029 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1030 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1031 }
1032
1033 static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1034 struct kmod_path *m)
1035 {
1036 struct map *map;
1037 char *long_name;
1038
1039 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
1040 if (map == NULL)
1041 return 0;
1042
1043 long_name = strdup(path);
1044 if (long_name == NULL)
1045 return -ENOMEM;
1046
1047 dso__set_long_name(map->dso, long_name, true);
1048 dso__kernel_module_get_build_id(map->dso, "");
1049
1050 /*
1051 * Full name could reveal us kmod compression, so
1052 * we need to update the symtab_type if needed.
1053 */
1054 if (m->comp && is_kmod_dso(map->dso))
1055 map->dso->symtab_type++;
1056
1057 return 0;
1058 }
1059
1060 static int map_groups__set_modules_path_dir(struct map_groups *mg,
1061 const char *dir_name, int depth)
1062 {
1063 struct dirent *dent;
1064 DIR *dir = opendir(dir_name);
1065 int ret = 0;
1066
1067 if (!dir) {
1068 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1069 return -1;
1070 }
1071
1072 while ((dent = readdir(dir)) != NULL) {
1073 char path[PATH_MAX];
1074 struct stat st;
1075
1076 /*sshfs might return bad dent->d_type, so we have to stat*/
1077 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1078 if (stat(path, &st))
1079 continue;
1080
1081 if (S_ISDIR(st.st_mode)) {
1082 if (!strcmp(dent->d_name, ".") ||
1083 !strcmp(dent->d_name, ".."))
1084 continue;
1085
1086 /* Do not follow top-level source and build symlinks */
1087 if (depth == 0) {
1088 if (!strcmp(dent->d_name, "source") ||
1089 !strcmp(dent->d_name, "build"))
1090 continue;
1091 }
1092
1093 ret = map_groups__set_modules_path_dir(mg, path,
1094 depth + 1);
1095 if (ret < 0)
1096 goto out;
1097 } else {
1098 struct kmod_path m;
1099
1100 ret = kmod_path__parse_name(&m, dent->d_name);
1101 if (ret)
1102 goto out;
1103
1104 if (m.kmod)
1105 ret = map_groups__set_module_path(mg, path, &m);
1106
1107 free(m.name);
1108
1109 if (ret)
1110 goto out;
1111 }
1112 }
1113
1114 out:
1115 closedir(dir);
1116 return ret;
1117 }
1118
1119 static int machine__set_modules_path(struct machine *machine)
1120 {
1121 char *version;
1122 char modules_path[PATH_MAX];
1123
1124 version = get_kernel_version(machine->root_dir);
1125 if (!version)
1126 return -1;
1127
1128 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1129 machine->root_dir, version);
1130 free(version);
1131
1132 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1133 }
1134 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1135 const char *name __maybe_unused)
1136 {
1137 return 0;
1138 }
1139
1140 static int machine__create_module(void *arg, const char *name, u64 start)
1141 {
1142 struct machine *machine = arg;
1143 struct map *map;
1144
1145 if (arch__fix_module_text_start(&start, name) < 0)
1146 return -1;
1147
1148 map = machine__findnew_module_map(machine, start, name);
1149 if (map == NULL)
1150 return -1;
1151
1152 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1153
1154 return 0;
1155 }
1156
1157 static int machine__create_modules(struct machine *machine)
1158 {
1159 const char *modules;
1160 char path[PATH_MAX];
1161
1162 if (machine__is_default_guest(machine)) {
1163 modules = symbol_conf.default_guest_modules;
1164 } else {
1165 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1166 modules = path;
1167 }
1168
1169 if (symbol__restricted_filename(modules, "/proc/modules"))
1170 return -1;
1171
1172 if (modules__parse(modules, machine, machine__create_module))
1173 return -1;
1174
1175 if (!machine__set_modules_path(machine))
1176 return 0;
1177
1178 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1179
1180 return 0;
1181 }
1182
1183 int machine__create_kernel_maps(struct machine *machine)
1184 {
1185 struct dso *kernel = machine__get_kernel(machine);
1186 const char *name = NULL;
1187 u64 addr = 0;
1188 int ret;
1189
1190 if (kernel == NULL)
1191 return -1;
1192
1193 ret = __machine__create_kernel_maps(machine, kernel);
1194 dso__put(kernel);
1195 if (ret < 0)
1196 return -1;
1197
1198 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1199 if (machine__is_host(machine))
1200 pr_debug("Problems creating module maps, "
1201 "continuing anyway...\n");
1202 else
1203 pr_debug("Problems creating module maps for guest %d, "
1204 "continuing anyway...\n", machine->pid);
1205 }
1206
1207 /*
1208 * Now that we have all the maps created, just set the ->end of them:
1209 */
1210 map_groups__fixup_end(&machine->kmaps);
1211
1212 if (!machine__get_running_kernel_start(machine, &name, &addr)) {
1213 if (name &&
1214 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
1215 machine__destroy_kernel_maps(machine);
1216 return -1;
1217 }
1218 }
1219
1220 return 0;
1221 }
1222
1223 static void machine__set_kernel_mmap_len(struct machine *machine,
1224 union perf_event *event)
1225 {
1226 int i;
1227
1228 for (i = 0; i < MAP__NR_TYPES; i++) {
1229 machine->vmlinux_maps[i]->start = event->mmap.start;
1230 machine->vmlinux_maps[i]->end = (event->mmap.start +
1231 event->mmap.len);
1232 /*
1233 * Be a bit paranoid here, some perf.data file came with
1234 * a zero sized synthesized MMAP event for the kernel.
1235 */
1236 if (machine->vmlinux_maps[i]->end == 0)
1237 machine->vmlinux_maps[i]->end = ~0ULL;
1238 }
1239 }
1240
1241 static bool machine__uses_kcore(struct machine *machine)
1242 {
1243 struct dso *dso;
1244
1245 list_for_each_entry(dso, &machine->dsos.head, node) {
1246 if (dso__is_kcore(dso))
1247 return true;
1248 }
1249
1250 return false;
1251 }
1252
1253 static int machine__process_kernel_mmap_event(struct machine *machine,
1254 union perf_event *event)
1255 {
1256 struct map *map;
1257 char kmmap_prefix[PATH_MAX];
1258 enum dso_kernel_type kernel_type;
1259 bool is_kernel_mmap;
1260
1261 /* If we have maps from kcore then we do not need or want any others */
1262 if (machine__uses_kcore(machine))
1263 return 0;
1264
1265 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1266 if (machine__is_host(machine))
1267 kernel_type = DSO_TYPE_KERNEL;
1268 else
1269 kernel_type = DSO_TYPE_GUEST_KERNEL;
1270
1271 is_kernel_mmap = memcmp(event->mmap.filename,
1272 kmmap_prefix,
1273 strlen(kmmap_prefix) - 1) == 0;
1274 if (event->mmap.filename[0] == '/' ||
1275 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1276 map = machine__findnew_module_map(machine, event->mmap.start,
1277 event->mmap.filename);
1278 if (map == NULL)
1279 goto out_problem;
1280
1281 map->end = map->start + event->mmap.len;
1282 } else if (is_kernel_mmap) {
1283 const char *symbol_name = (event->mmap.filename +
1284 strlen(kmmap_prefix));
1285 /*
1286 * Should be there already, from the build-id table in
1287 * the header.
1288 */
1289 struct dso *kernel = NULL;
1290 struct dso *dso;
1291
1292 pthread_rwlock_rdlock(&machine->dsos.lock);
1293
1294 list_for_each_entry(dso, &machine->dsos.head, node) {
1295
1296 /*
1297 * The cpumode passed to is_kernel_module is not the
1298 * cpumode of *this* event. If we insist on passing
1299 * correct cpumode to is_kernel_module, we should
1300 * record the cpumode when we adding this dso to the
1301 * linked list.
1302 *
1303 * However we don't really need passing correct
1304 * cpumode. We know the correct cpumode must be kernel
1305 * mode (if not, we should not link it onto kernel_dsos
1306 * list).
1307 *
1308 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1309 * is_kernel_module() treats it as a kernel cpumode.
1310 */
1311
1312 if (!dso->kernel ||
1313 is_kernel_module(dso->long_name,
1314 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1315 continue;
1316
1317
1318 kernel = dso;
1319 break;
1320 }
1321
1322 pthread_rwlock_unlock(&machine->dsos.lock);
1323
1324 if (kernel == NULL)
1325 kernel = machine__findnew_dso(machine, kmmap_prefix);
1326 if (kernel == NULL)
1327 goto out_problem;
1328
1329 kernel->kernel = kernel_type;
1330 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1331 dso__put(kernel);
1332 goto out_problem;
1333 }
1334
1335 if (strstr(kernel->long_name, "vmlinux"))
1336 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1337
1338 machine__set_kernel_mmap_len(machine, event);
1339
1340 /*
1341 * Avoid using a zero address (kptr_restrict) for the ref reloc
1342 * symbol. Effectively having zero here means that at record
1343 * time /proc/sys/kernel/kptr_restrict was non zero.
1344 */
1345 if (event->mmap.pgoff != 0) {
1346 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1347 symbol_name,
1348 event->mmap.pgoff);
1349 }
1350
1351 if (machine__is_default_guest(machine)) {
1352 /*
1353 * preload dso of guest kernel and modules
1354 */
1355 dso__load(kernel, machine__kernel_map(machine));
1356 }
1357 }
1358 return 0;
1359 out_problem:
1360 return -1;
1361 }
1362
1363 int machine__process_mmap2_event(struct machine *machine,
1364 union perf_event *event,
1365 struct perf_sample *sample)
1366 {
1367 struct thread *thread;
1368 struct map *map;
1369 enum map_type type;
1370 int ret = 0;
1371
1372 if (dump_trace)
1373 perf_event__fprintf_mmap2(event, stdout);
1374
1375 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1376 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1377 ret = machine__process_kernel_mmap_event(machine, event);
1378 if (ret < 0)
1379 goto out_problem;
1380 return 0;
1381 }
1382
1383 thread = machine__findnew_thread(machine, event->mmap2.pid,
1384 event->mmap2.tid);
1385 if (thread == NULL)
1386 goto out_problem;
1387
1388 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1389 type = MAP__VARIABLE;
1390 else
1391 type = MAP__FUNCTION;
1392
1393 map = map__new(machine, event->mmap2.start,
1394 event->mmap2.len, event->mmap2.pgoff,
1395 event->mmap2.pid, event->mmap2.maj,
1396 event->mmap2.min, event->mmap2.ino,
1397 event->mmap2.ino_generation,
1398 event->mmap2.prot,
1399 event->mmap2.flags,
1400 event->mmap2.filename, type, thread);
1401
1402 if (map == NULL)
1403 goto out_problem_map;
1404
1405 ret = thread__insert_map(thread, map);
1406 if (ret)
1407 goto out_problem_insert;
1408
1409 thread__put(thread);
1410 map__put(map);
1411 return 0;
1412
1413 out_problem_insert:
1414 map__put(map);
1415 out_problem_map:
1416 thread__put(thread);
1417 out_problem:
1418 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1419 return 0;
1420 }
1421
1422 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1423 struct perf_sample *sample)
1424 {
1425 struct thread *thread;
1426 struct map *map;
1427 enum map_type type;
1428 int ret = 0;
1429
1430 if (dump_trace)
1431 perf_event__fprintf_mmap(event, stdout);
1432
1433 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1434 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1435 ret = machine__process_kernel_mmap_event(machine, event);
1436 if (ret < 0)
1437 goto out_problem;
1438 return 0;
1439 }
1440
1441 thread = machine__findnew_thread(machine, event->mmap.pid,
1442 event->mmap.tid);
1443 if (thread == NULL)
1444 goto out_problem;
1445
1446 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1447 type = MAP__VARIABLE;
1448 else
1449 type = MAP__FUNCTION;
1450
1451 map = map__new(machine, event->mmap.start,
1452 event->mmap.len, event->mmap.pgoff,
1453 event->mmap.pid, 0, 0, 0, 0, 0, 0,
1454 event->mmap.filename,
1455 type, thread);
1456
1457 if (map == NULL)
1458 goto out_problem_map;
1459
1460 ret = thread__insert_map(thread, map);
1461 if (ret)
1462 goto out_problem_insert;
1463
1464 thread__put(thread);
1465 map__put(map);
1466 return 0;
1467
1468 out_problem_insert:
1469 map__put(map);
1470 out_problem_map:
1471 thread__put(thread);
1472 out_problem:
1473 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1474 return 0;
1475 }
1476
1477 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1478 {
1479 if (machine->last_match == th)
1480 machine->last_match = NULL;
1481
1482 BUG_ON(refcount_read(&th->refcnt) == 0);
1483 if (lock)
1484 pthread_rwlock_wrlock(&machine->threads_lock);
1485 rb_erase_init(&th->rb_node, &machine->threads);
1486 RB_CLEAR_NODE(&th->rb_node);
1487 --machine->nr_threads;
1488 /*
1489 * Move it first to the dead_threads list, then drop the reference,
1490 * if this is the last reference, then the thread__delete destructor
1491 * will be called and we will remove it from the dead_threads list.
1492 */
1493 list_add_tail(&th->node, &machine->dead_threads);
1494 if (lock)
1495 pthread_rwlock_unlock(&machine->threads_lock);
1496 thread__put(th);
1497 }
1498
1499 void machine__remove_thread(struct machine *machine, struct thread *th)
1500 {
1501 return __machine__remove_thread(machine, th, true);
1502 }
1503
1504 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1505 struct perf_sample *sample)
1506 {
1507 struct thread *thread = machine__find_thread(machine,
1508 event->fork.pid,
1509 event->fork.tid);
1510 struct thread *parent = machine__findnew_thread(machine,
1511 event->fork.ppid,
1512 event->fork.ptid);
1513 int err = 0;
1514
1515 if (dump_trace)
1516 perf_event__fprintf_task(event, stdout);
1517
1518 /*
1519 * There may be an existing thread that is not actually the parent,
1520 * either because we are processing events out of order, or because the
1521 * (fork) event that would have removed the thread was lost. Assume the
1522 * latter case and continue on as best we can.
1523 */
1524 if (parent->pid_ != (pid_t)event->fork.ppid) {
1525 dump_printf("removing erroneous parent thread %d/%d\n",
1526 parent->pid_, parent->tid);
1527 machine__remove_thread(machine, parent);
1528 thread__put(parent);
1529 parent = machine__findnew_thread(machine, event->fork.ppid,
1530 event->fork.ptid);
1531 }
1532
1533 /* if a thread currently exists for the thread id remove it */
1534 if (thread != NULL) {
1535 machine__remove_thread(machine, thread);
1536 thread__put(thread);
1537 }
1538
1539 thread = machine__findnew_thread(machine, event->fork.pid,
1540 event->fork.tid);
1541
1542 if (thread == NULL || parent == NULL ||
1543 thread__fork(thread, parent, sample->time) < 0) {
1544 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1545 err = -1;
1546 }
1547 thread__put(thread);
1548 thread__put(parent);
1549
1550 return err;
1551 }
1552
1553 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1554 struct perf_sample *sample __maybe_unused)
1555 {
1556 struct thread *thread = machine__find_thread(machine,
1557 event->fork.pid,
1558 event->fork.tid);
1559
1560 if (dump_trace)
1561 perf_event__fprintf_task(event, stdout);
1562
1563 if (thread != NULL) {
1564 thread__exited(thread);
1565 thread__put(thread);
1566 }
1567
1568 return 0;
1569 }
1570
1571 int machine__process_event(struct machine *machine, union perf_event *event,
1572 struct perf_sample *sample)
1573 {
1574 int ret;
1575
1576 switch (event->header.type) {
1577 case PERF_RECORD_COMM:
1578 ret = machine__process_comm_event(machine, event, sample); break;
1579 case PERF_RECORD_MMAP:
1580 ret = machine__process_mmap_event(machine, event, sample); break;
1581 case PERF_RECORD_NAMESPACES:
1582 ret = machine__process_namespaces_event(machine, event, sample); break;
1583 case PERF_RECORD_MMAP2:
1584 ret = machine__process_mmap2_event(machine, event, sample); break;
1585 case PERF_RECORD_FORK:
1586 ret = machine__process_fork_event(machine, event, sample); break;
1587 case PERF_RECORD_EXIT:
1588 ret = machine__process_exit_event(machine, event, sample); break;
1589 case PERF_RECORD_LOST:
1590 ret = machine__process_lost_event(machine, event, sample); break;
1591 case PERF_RECORD_AUX:
1592 ret = machine__process_aux_event(machine, event); break;
1593 case PERF_RECORD_ITRACE_START:
1594 ret = machine__process_itrace_start_event(machine, event); break;
1595 case PERF_RECORD_LOST_SAMPLES:
1596 ret = machine__process_lost_samples_event(machine, event, sample); break;
1597 case PERF_RECORD_SWITCH:
1598 case PERF_RECORD_SWITCH_CPU_WIDE:
1599 ret = machine__process_switch_event(machine, event); break;
1600 default:
1601 ret = -1;
1602 break;
1603 }
1604
1605 return ret;
1606 }
1607
1608 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1609 {
1610 if (!regexec(regex, sym->name, 0, NULL, 0))
1611 return 1;
1612 return 0;
1613 }
1614
1615 static void ip__resolve_ams(struct thread *thread,
1616 struct addr_map_symbol *ams,
1617 u64 ip)
1618 {
1619 struct addr_location al;
1620
1621 memset(&al, 0, sizeof(al));
1622 /*
1623 * We cannot use the header.misc hint to determine whether a
1624 * branch stack address is user, kernel, guest, hypervisor.
1625 * Branches may straddle the kernel/user/hypervisor boundaries.
1626 * Thus, we have to try consecutively until we find a match
1627 * or else, the symbol is unknown
1628 */
1629 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1630
1631 ams->addr = ip;
1632 ams->al_addr = al.addr;
1633 ams->sym = al.sym;
1634 ams->map = al.map;
1635 }
1636
1637 static void ip__resolve_data(struct thread *thread,
1638 u8 m, struct addr_map_symbol *ams, u64 addr)
1639 {
1640 struct addr_location al;
1641
1642 memset(&al, 0, sizeof(al));
1643
1644 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1645 if (al.map == NULL) {
1646 /*
1647 * some shared data regions have execute bit set which puts
1648 * their mapping in the MAP__FUNCTION type array.
1649 * Check there as a fallback option before dropping the sample.
1650 */
1651 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1652 }
1653
1654 ams->addr = addr;
1655 ams->al_addr = al.addr;
1656 ams->sym = al.sym;
1657 ams->map = al.map;
1658 }
1659
1660 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1661 struct addr_location *al)
1662 {
1663 struct mem_info *mi = zalloc(sizeof(*mi));
1664
1665 if (!mi)
1666 return NULL;
1667
1668 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1669 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
1670 mi->data_src.val = sample->data_src;
1671
1672 return mi;
1673 }
1674
1675 static int add_callchain_ip(struct thread *thread,
1676 struct callchain_cursor *cursor,
1677 struct symbol **parent,
1678 struct addr_location *root_al,
1679 u8 *cpumode,
1680 u64 ip,
1681 bool branch,
1682 struct branch_flags *flags,
1683 int nr_loop_iter,
1684 int samples)
1685 {
1686 struct addr_location al;
1687
1688 al.filtered = 0;
1689 al.sym = NULL;
1690 if (!cpumode) {
1691 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1692 ip, &al);
1693 } else {
1694 if (ip >= PERF_CONTEXT_MAX) {
1695 switch (ip) {
1696 case PERF_CONTEXT_HV:
1697 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
1698 break;
1699 case PERF_CONTEXT_KERNEL:
1700 *cpumode = PERF_RECORD_MISC_KERNEL;
1701 break;
1702 case PERF_CONTEXT_USER:
1703 *cpumode = PERF_RECORD_MISC_USER;
1704 break;
1705 default:
1706 pr_debug("invalid callchain context: "
1707 "%"PRId64"\n", (s64) ip);
1708 /*
1709 * It seems the callchain is corrupted.
1710 * Discard all.
1711 */
1712 callchain_cursor_reset(cursor);
1713 return 1;
1714 }
1715 return 0;
1716 }
1717 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1718 ip, &al);
1719 }
1720
1721 if (al.sym != NULL) {
1722 if (perf_hpp_list.parent && !*parent &&
1723 symbol__match_regex(al.sym, &parent_regex))
1724 *parent = al.sym;
1725 else if (have_ignore_callees && root_al &&
1726 symbol__match_regex(al.sym, &ignore_callees_regex)) {
1727 /* Treat this symbol as the root,
1728 forgetting its callees. */
1729 *root_al = al;
1730 callchain_cursor_reset(cursor);
1731 }
1732 }
1733
1734 if (symbol_conf.hide_unresolved && al.sym == NULL)
1735 return 0;
1736 return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1737 branch, flags, nr_loop_iter, samples);
1738 }
1739
1740 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1741 struct addr_location *al)
1742 {
1743 unsigned int i;
1744 const struct branch_stack *bs = sample->branch_stack;
1745 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1746
1747 if (!bi)
1748 return NULL;
1749
1750 for (i = 0; i < bs->nr; i++) {
1751 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1752 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1753 bi[i].flags = bs->entries[i].flags;
1754 }
1755 return bi;
1756 }
1757
1758 #define CHASHSZ 127
1759 #define CHASHBITS 7
1760 #define NO_ENTRY 0xff
1761
1762 #define PERF_MAX_BRANCH_DEPTH 127
1763
1764 /* Remove loops. */
1765 static int remove_loops(struct branch_entry *l, int nr)
1766 {
1767 int i, j, off;
1768 unsigned char chash[CHASHSZ];
1769
1770 memset(chash, NO_ENTRY, sizeof(chash));
1771
1772 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1773
1774 for (i = 0; i < nr; i++) {
1775 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1776
1777 /* no collision handling for now */
1778 if (chash[h] == NO_ENTRY) {
1779 chash[h] = i;
1780 } else if (l[chash[h]].from == l[i].from) {
1781 bool is_loop = true;
1782 /* check if it is a real loop */
1783 off = 0;
1784 for (j = chash[h]; j < i && i + off < nr; j++, off++)
1785 if (l[j].from != l[i + off].from) {
1786 is_loop = false;
1787 break;
1788 }
1789 if (is_loop) {
1790 memmove(l + i, l + i + off,
1791 (nr - (i + off)) * sizeof(*l));
1792 nr -= off;
1793 }
1794 }
1795 }
1796 return nr;
1797 }
1798
1799 /*
1800 * Recolve LBR callstack chain sample
1801 * Return:
1802 * 1 on success get LBR callchain information
1803 * 0 no available LBR callchain information, should try fp
1804 * negative error code on other errors.
1805 */
1806 static int resolve_lbr_callchain_sample(struct thread *thread,
1807 struct callchain_cursor *cursor,
1808 struct perf_sample *sample,
1809 struct symbol **parent,
1810 struct addr_location *root_al,
1811 int max_stack)
1812 {
1813 struct ip_callchain *chain = sample->callchain;
1814 int chain_nr = min(max_stack, (int)chain->nr), i;
1815 u8 cpumode = PERF_RECORD_MISC_USER;
1816 u64 ip;
1817
1818 for (i = 0; i < chain_nr; i++) {
1819 if (chain->ips[i] == PERF_CONTEXT_USER)
1820 break;
1821 }
1822
1823 /* LBR only affects the user callchain */
1824 if (i != chain_nr) {
1825 struct branch_stack *lbr_stack = sample->branch_stack;
1826 int lbr_nr = lbr_stack->nr, j, k;
1827 bool branch;
1828 struct branch_flags *flags;
1829 /*
1830 * LBR callstack can only get user call chain.
1831 * The mix_chain_nr is kernel call chain
1832 * number plus LBR user call chain number.
1833 * i is kernel call chain number,
1834 * 1 is PERF_CONTEXT_USER,
1835 * lbr_nr + 1 is the user call chain number.
1836 * For details, please refer to the comments
1837 * in callchain__printf
1838 */
1839 int mix_chain_nr = i + 1 + lbr_nr + 1;
1840
1841 for (j = 0; j < mix_chain_nr; j++) {
1842 int err;
1843 branch = false;
1844 flags = NULL;
1845
1846 if (callchain_param.order == ORDER_CALLEE) {
1847 if (j < i + 1)
1848 ip = chain->ips[j];
1849 else if (j > i + 1) {
1850 k = j - i - 2;
1851 ip = lbr_stack->entries[k].from;
1852 branch = true;
1853 flags = &lbr_stack->entries[k].flags;
1854 } else {
1855 ip = lbr_stack->entries[0].to;
1856 branch = true;
1857 flags = &lbr_stack->entries[0].flags;
1858 }
1859 } else {
1860 if (j < lbr_nr) {
1861 k = lbr_nr - j - 1;
1862 ip = lbr_stack->entries[k].from;
1863 branch = true;
1864 flags = &lbr_stack->entries[k].flags;
1865 }
1866 else if (j > lbr_nr)
1867 ip = chain->ips[i + 1 - (j - lbr_nr)];
1868 else {
1869 ip = lbr_stack->entries[0].to;
1870 branch = true;
1871 flags = &lbr_stack->entries[0].flags;
1872 }
1873 }
1874
1875 err = add_callchain_ip(thread, cursor, parent,
1876 root_al, &cpumode, ip,
1877 branch, flags, 0, 0);
1878 if (err)
1879 return (err < 0) ? err : 0;
1880 }
1881 return 1;
1882 }
1883
1884 return 0;
1885 }
1886
1887 static int thread__resolve_callchain_sample(struct thread *thread,
1888 struct callchain_cursor *cursor,
1889 struct perf_evsel *evsel,
1890 struct perf_sample *sample,
1891 struct symbol **parent,
1892 struct addr_location *root_al,
1893 int max_stack)
1894 {
1895 struct branch_stack *branch = sample->branch_stack;
1896 struct ip_callchain *chain = sample->callchain;
1897 int chain_nr = chain->nr;
1898 u8 cpumode = PERF_RECORD_MISC_USER;
1899 int i, j, err, nr_entries;
1900 int skip_idx = -1;
1901 int first_call = 0;
1902 int nr_loop_iter;
1903
1904 if (perf_evsel__has_branch_callstack(evsel)) {
1905 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
1906 root_al, max_stack);
1907 if (err)
1908 return (err < 0) ? err : 0;
1909 }
1910
1911 /*
1912 * Based on DWARF debug information, some architectures skip
1913 * a callchain entry saved by the kernel.
1914 */
1915 skip_idx = arch_skip_callchain_idx(thread, chain);
1916
1917 /*
1918 * Add branches to call stack for easier browsing. This gives
1919 * more context for a sample than just the callers.
1920 *
1921 * This uses individual histograms of paths compared to the
1922 * aggregated histograms the normal LBR mode uses.
1923 *
1924 * Limitations for now:
1925 * - No extra filters
1926 * - No annotations (should annotate somehow)
1927 */
1928
1929 if (branch && callchain_param.branch_callstack) {
1930 int nr = min(max_stack, (int)branch->nr);
1931 struct branch_entry be[nr];
1932
1933 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1934 pr_warning("corrupted branch chain. skipping...\n");
1935 goto check_calls;
1936 }
1937
1938 for (i = 0; i < nr; i++) {
1939 if (callchain_param.order == ORDER_CALLEE) {
1940 be[i] = branch->entries[i];
1941 /*
1942 * Check for overlap into the callchain.
1943 * The return address is one off compared to
1944 * the branch entry. To adjust for this
1945 * assume the calling instruction is not longer
1946 * than 8 bytes.
1947 */
1948 if (i == skip_idx ||
1949 chain->ips[first_call] >= PERF_CONTEXT_MAX)
1950 first_call++;
1951 else if (be[i].from < chain->ips[first_call] &&
1952 be[i].from >= chain->ips[first_call] - 8)
1953 first_call++;
1954 } else
1955 be[i] = branch->entries[branch->nr - i - 1];
1956 }
1957
1958 nr_loop_iter = nr;
1959 nr = remove_loops(be, nr);
1960
1961 /*
1962 * Get the number of iterations.
1963 * It's only approximation, but good enough in practice.
1964 */
1965 if (nr_loop_iter > nr)
1966 nr_loop_iter = nr_loop_iter - nr + 1;
1967 else
1968 nr_loop_iter = 0;
1969
1970 for (i = 0; i < nr; i++) {
1971 if (i == nr - 1)
1972 err = add_callchain_ip(thread, cursor, parent,
1973 root_al,
1974 NULL, be[i].to,
1975 true, &be[i].flags,
1976 nr_loop_iter, 1);
1977 else
1978 err = add_callchain_ip(thread, cursor, parent,
1979 root_al,
1980 NULL, be[i].to,
1981 true, &be[i].flags,
1982 0, 0);
1983
1984 if (!err)
1985 err = add_callchain_ip(thread, cursor, parent, root_al,
1986 NULL, be[i].from,
1987 true, &be[i].flags,
1988 0, 0);
1989 if (err == -EINVAL)
1990 break;
1991 if (err)
1992 return err;
1993 }
1994 chain_nr -= nr;
1995 }
1996
1997 check_calls:
1998 for (i = first_call, nr_entries = 0;
1999 i < chain_nr && nr_entries < max_stack; i++) {
2000 u64 ip;
2001
2002 if (callchain_param.order == ORDER_CALLEE)
2003 j = i;
2004 else
2005 j = chain->nr - i - 1;
2006
2007 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2008 if (j == skip_idx)
2009 continue;
2010 #endif
2011 ip = chain->ips[j];
2012
2013 if (ip < PERF_CONTEXT_MAX)
2014 ++nr_entries;
2015
2016 err = add_callchain_ip(thread, cursor, parent,
2017 root_al, &cpumode, ip,
2018 false, NULL, 0, 0);
2019
2020 if (err)
2021 return (err < 0) ? err : 0;
2022 }
2023
2024 return 0;
2025 }
2026
2027 static int unwind_entry(struct unwind_entry *entry, void *arg)
2028 {
2029 struct callchain_cursor *cursor = arg;
2030
2031 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2032 return 0;
2033 return callchain_cursor_append(cursor, entry->ip,
2034 entry->map, entry->sym,
2035 false, NULL, 0, 0);
2036 }
2037
2038 static int thread__resolve_callchain_unwind(struct thread *thread,
2039 struct callchain_cursor *cursor,
2040 struct perf_evsel *evsel,
2041 struct perf_sample *sample,
2042 int max_stack)
2043 {
2044 /* Can we do dwarf post unwind? */
2045 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2046 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
2047 return 0;
2048
2049 /* Bail out if nothing was captured. */
2050 if ((!sample->user_regs.regs) ||
2051 (!sample->user_stack.size))
2052 return 0;
2053
2054 return unwind__get_entries(unwind_entry, cursor,
2055 thread, sample, max_stack);
2056 }
2057
2058 int thread__resolve_callchain(struct thread *thread,
2059 struct callchain_cursor *cursor,
2060 struct perf_evsel *evsel,
2061 struct perf_sample *sample,
2062 struct symbol **parent,
2063 struct addr_location *root_al,
2064 int max_stack)
2065 {
2066 int ret = 0;
2067
2068 callchain_cursor_reset(&callchain_cursor);
2069
2070 if (callchain_param.order == ORDER_CALLEE) {
2071 ret = thread__resolve_callchain_sample(thread, cursor,
2072 evsel, sample,
2073 parent, root_al,
2074 max_stack);
2075 if (ret)
2076 return ret;
2077 ret = thread__resolve_callchain_unwind(thread, cursor,
2078 evsel, sample,
2079 max_stack);
2080 } else {
2081 ret = thread__resolve_callchain_unwind(thread, cursor,
2082 evsel, sample,
2083 max_stack);
2084 if (ret)
2085 return ret;
2086 ret = thread__resolve_callchain_sample(thread, cursor,
2087 evsel, sample,
2088 parent, root_al,
2089 max_stack);
2090 }
2091
2092 return ret;
2093 }
2094
2095 int machine__for_each_thread(struct machine *machine,
2096 int (*fn)(struct thread *thread, void *p),
2097 void *priv)
2098 {
2099 struct rb_node *nd;
2100 struct thread *thread;
2101 int rc = 0;
2102
2103 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
2104 thread = rb_entry(nd, struct thread, rb_node);
2105 rc = fn(thread, priv);
2106 if (rc != 0)
2107 return rc;
2108 }
2109
2110 list_for_each_entry(thread, &machine->dead_threads, node) {
2111 rc = fn(thread, priv);
2112 if (rc != 0)
2113 return rc;
2114 }
2115 return rc;
2116 }
2117
2118 int machines__for_each_thread(struct machines *machines,
2119 int (*fn)(struct thread *thread, void *p),
2120 void *priv)
2121 {
2122 struct rb_node *nd;
2123 int rc = 0;
2124
2125 rc = machine__for_each_thread(&machines->host, fn, priv);
2126 if (rc != 0)
2127 return rc;
2128
2129 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
2130 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2131
2132 rc = machine__for_each_thread(machine, fn, priv);
2133 if (rc != 0)
2134 return rc;
2135 }
2136 return rc;
2137 }
2138
2139 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2140 struct target *target, struct thread_map *threads,
2141 perf_event__handler_t process, bool data_mmap,
2142 unsigned int proc_map_timeout)
2143 {
2144 if (target__has_task(target))
2145 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
2146 else if (target__has_cpu(target))
2147 return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
2148 /* command specified */
2149 return 0;
2150 }
2151
2152 pid_t machine__get_current_tid(struct machine *machine, int cpu)
2153 {
2154 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
2155 return -1;
2156
2157 return machine->current_tid[cpu];
2158 }
2159
2160 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2161 pid_t tid)
2162 {
2163 struct thread *thread;
2164
2165 if (cpu < 0)
2166 return -EINVAL;
2167
2168 if (!machine->current_tid) {
2169 int i;
2170
2171 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
2172 if (!machine->current_tid)
2173 return -ENOMEM;
2174 for (i = 0; i < MAX_NR_CPUS; i++)
2175 machine->current_tid[i] = -1;
2176 }
2177
2178 if (cpu >= MAX_NR_CPUS) {
2179 pr_err("Requested CPU %d too large. ", cpu);
2180 pr_err("Consider raising MAX_NR_CPUS\n");
2181 return -EINVAL;
2182 }
2183
2184 machine->current_tid[cpu] = tid;
2185
2186 thread = machine__findnew_thread(machine, pid, tid);
2187 if (!thread)
2188 return -ENOMEM;
2189
2190 thread->cpu = cpu;
2191 thread__put(thread);
2192
2193 return 0;
2194 }
2195
2196 int machine__get_kernel_start(struct machine *machine)
2197 {
2198 struct map *map = machine__kernel_map(machine);
2199 int err = 0;
2200
2201 /*
2202 * The only addresses above 2^63 are kernel addresses of a 64-bit
2203 * kernel. Note that addresses are unsigned so that on a 32-bit system
2204 * all addresses including kernel addresses are less than 2^32. In
2205 * that case (32-bit system), if the kernel mapping is unknown, all
2206 * addresses will be assumed to be in user space - see
2207 * machine__kernel_ip().
2208 */
2209 machine->kernel_start = 1ULL << 63;
2210 if (map) {
2211 err = map__load(map);
2212 if (map->start)
2213 machine->kernel_start = map->start;
2214 }
2215 return err;
2216 }
2217
2218 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2219 {
2220 return dsos__findnew(&machine->dsos, filename);
2221 }
2222
2223 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2224 {
2225 struct machine *machine = vmachine;
2226 struct map *map;
2227 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
2228
2229 if (sym == NULL)
2230 return NULL;
2231
2232 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2233 *addrp = map->unmap_ip(map, sym->start);
2234 return sym->name;
2235 }