]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - tools/perf/util/machine.c
UBUNTU: Ubuntu-5.4.0-117.132
[mirror_ubuntu-focal-kernel.git] / tools / perf / util / machine.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
76b31a29 2#include <dirent.h>
a43783ae 3#include <errno.h>
fd20e811 4#include <inttypes.h>
1eae20c1 5#include <regex.h>
f2a39fe8 6#include <stdlib.h>
3f067dca 7#include "callchain.h"
b0a7d1a0 8#include "debug.h"
4a3cec84 9#include "dso.h"
f2a39fe8 10#include "env.h"
b0a7d1a0 11#include "event.h"
3f067dca
ACM
12#include "evsel.h"
13#include "hist.h"
9d2f8e22
ACM
14#include "machine.h"
15#include "map.h"
d3300a3c
ACM
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
97b9d866 19#include "srcline.h"
daecf9e0 20#include "symbol.h"
3f067dca 21#include "sort.h"
69d2591a 22#include "strlist.h"
aeb00b1a 23#include "target.h"
9d2f8e22 24#include "thread.h"
97b9d866 25#include "util.h"
d027b640 26#include "vdso.h"
9d2f8e22 27#include <stdbool.h>
7a8ef4c4
ACM
28#include <sys/types.h>
29#include <sys/stat.h>
30#include <unistd.h>
3f067dca 31#include "unwind.h"
8b7bad58 32#include "linux/hash.h"
f3b3614a 33#include "asm/bug.h"
45178a92 34#include "bpf-event.h"
20f2be1d 35#include <internal/lib.h> // page_size
9d2f8e22 36
3052ba56 37#include <linux/ctype.h>
3d689ed6 38#include <symbol/kallsyms.h>
0f476f2b 39#include <linux/mman.h>
8520a98d 40#include <linux/string.h>
7f7c536f 41#include <linux/zalloc.h>
3d689ed6 42
b91fc39f
ACM
43static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
44
e167f995
ACM
45static void dsos__init(struct dsos *dsos)
46{
47 INIT_LIST_HEAD(&dsos->head);
48 dsos->root = RB_ROOT;
0a7c74ea 49 init_rwsem(&dsos->lock);
e167f995
ACM
50}
51
91e467bc
KL
52static void machine__threads_init(struct machine *machine)
53{
54 int i;
55
56 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
57 struct threads *threads = &machine->threads[i];
f3acb3a8 58 threads->entries = RB_ROOT_CACHED;
0a7c74ea 59 init_rwsem(&threads->lock);
91e467bc
KL
60 threads->nr = 0;
61 INIT_LIST_HEAD(&threads->dead);
62 threads->last_match = NULL;
63 }
64}
65
8c7f1bb3
JO
66static int machine__set_mmap_name(struct machine *machine)
67{
c192524e
JO
68 if (machine__is_host(machine))
69 machine->mmap_name = strdup("[kernel.kallsyms]");
70 else if (machine__is_default_guest(machine))
71 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
72 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
73 machine->pid) < 0)
74 machine->mmap_name = NULL;
8c7f1bb3
JO
75
76 return machine->mmap_name ? 0 : -ENOMEM;
77}
78
69d2591a
ACM
79int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
80{
81f981d7
JO
81 int err = -ENOMEM;
82
93b0ba3c 83 memset(machine, 0, sizeof(*machine));
11246c70 84 map_groups__init(&machine->kmaps, machine);
69d2591a 85 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 86 dsos__init(&machine->dsos);
69d2591a 87
91e467bc 88 machine__threads_init(machine);
69d2591a 89
d027b640 90 machine->vdso_info = NULL;
4cde998d 91 machine->env = NULL;
d027b640 92
69d2591a
ACM
93 machine->pid = pid;
94
14bd6d20 95 machine->id_hdr_size = 0;
caf8a0d0 96 machine->kptr_restrict_warned = false;
cfe1c414 97 machine->comm_exec = false;
fbe2af45 98 machine->kernel_start = 0;
3183f8ca 99 machine->vmlinux_map = NULL;
cc1121ab 100
69d2591a
ACM
101 machine->root_dir = strdup(root_dir);
102 if (machine->root_dir == NULL)
103 return -ENOMEM;
104
8c7f1bb3
JO
105 if (machine__set_mmap_name(machine))
106 goto out;
107
69d2591a 108 if (pid != HOST_KERNEL_ID) {
1fcb8768 109 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 110 pid);
69d2591a
ACM
111 char comm[64];
112
113 if (thread == NULL)
81f981d7 114 goto out;
69d2591a
ACM
115
116 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 117 thread__set_comm(thread, comm, 0);
b91fc39f 118 thread__put(thread);
69d2591a
ACM
119 }
120
b9d266ba 121 machine->current_tid = NULL;
81f981d7 122 err = 0;
b9d266ba 123
81f981d7 124out:
8c7f1bb3 125 if (err) {
81f981d7 126 zfree(&machine->root_dir);
8c7f1bb3
JO
127 zfree(&machine->mmap_name);
128 }
69d2591a
ACM
129 return 0;
130}
131
8fb598e5
DA
132struct machine *machine__new_host(void)
133{
134 struct machine *machine = malloc(sizeof(*machine));
135
136 if (machine != NULL) {
137 machine__init(machine, "", HOST_KERNEL_ID);
138
139 if (machine__create_kernel_maps(machine) < 0)
140 goto out_delete;
141 }
142
143 return machine;
144out_delete:
145 free(machine);
146 return NULL;
147}
148
7d132caa
ACM
149struct machine *machine__new_kallsyms(void)
150{
151 struct machine *machine = machine__new_host();
152 /*
153 * FIXME:
adba1634 154 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
7d132caa
ACM
155 * ask for not using the kcore parsing code, once this one is fixed
156 * to create a map per module.
157 */
329f0ade 158 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
7d132caa
ACM
159 machine__delete(machine);
160 machine = NULL;
161 }
162
163 return machine;
164}
165
d3a7c489 166static void dsos__purge(struct dsos *dsos)
69d2591a
ACM
167{
168 struct dso *pos, *n;
169
0a7c74ea 170 down_write(&dsos->lock);
e8807844 171
8fa7d87f 172 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 173 RB_CLEAR_NODE(&pos->rb_node);
e266a753 174 pos->root = NULL;
d3a7c489
ACM
175 list_del_init(&pos->node);
176 dso__put(pos);
69d2591a 177 }
e8807844 178
0a7c74ea 179 up_write(&dsos->lock);
d3a7c489 180}
e8807844 181
d3a7c489
ACM
182static void dsos__exit(struct dsos *dsos)
183{
184 dsos__purge(dsos);
0a7c74ea 185 exit_rwsem(&dsos->lock);
69d2591a
ACM
186}
187
3f067dca
ACM
188void machine__delete_threads(struct machine *machine)
189{
b91fc39f 190 struct rb_node *nd;
91e467bc 191 int i;
3f067dca 192
91e467bc
KL
193 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
194 struct threads *threads = &machine->threads[i];
0a7c74ea 195 down_write(&threads->lock);
f3acb3a8 196 nd = rb_first_cached(&threads->entries);
91e467bc
KL
197 while (nd) {
198 struct thread *t = rb_entry(nd, struct thread, rb_node);
3f067dca 199
91e467bc
KL
200 nd = rb_next(nd);
201 __machine__remove_thread(machine, t, false);
202 }
0a7c74ea 203 up_write(&threads->lock);
3f067dca
ACM
204 }
205}
206
69d2591a
ACM
207void machine__exit(struct machine *machine)
208{
91e467bc
KL
209 int i;
210
19993b82
ACM
211 if (machine == NULL)
212 return;
213
ebe9729c 214 machine__destroy_kernel_maps(machine);
69d2591a 215 map_groups__exit(&machine->kmaps);
e8807844 216 dsos__exit(&machine->dsos);
9a4388c7 217 machine__exit_vdso(machine);
04662523 218 zfree(&machine->root_dir);
8c7f1bb3 219 zfree(&machine->mmap_name);
b9d266ba 220 zfree(&machine->current_tid);
91e467bc
KL
221
222 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
223 struct threads *threads = &machine->threads[i];
4c00af0e
ACM
224 struct thread *thread, *n;
225 /*
226 * Forget about the dead, at this point whatever threads were
227 * left in the dead lists better have a reference count taken
228 * by who is using them, and then, when they drop those references
229 * and it finally hits zero, thread__put() will check and see that
230 * its not in the dead threads list and will not try to remove it
231 * from there, just calling thread__delete() straight away.
232 */
233 list_for_each_entry_safe(thread, n, &threads->dead, node)
234 list_del_init(&thread->node);
235
0a7c74ea 236 exit_rwsem(&threads->lock);
91e467bc 237 }
69d2591a
ACM
238}
239
240void machine__delete(struct machine *machine)
241{
32ca678d
ACM
242 if (machine) {
243 machine__exit(machine);
244 free(machine);
245 }
69d2591a
ACM
246}
247
876650e6
ACM
248void machines__init(struct machines *machines)
249{
250 machine__init(&machines->host, "", HOST_KERNEL_ID);
f3acb3a8 251 machines->guests = RB_ROOT_CACHED;
876650e6
ACM
252}
253
254void machines__exit(struct machines *machines)
255{
256 machine__exit(&machines->host);
257 /* XXX exit guest */
258}
259
260struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
261 const char *root_dir)
262{
f3acb3a8 263 struct rb_node **p = &machines->guests.rb_root.rb_node;
69d2591a
ACM
264 struct rb_node *parent = NULL;
265 struct machine *pos, *machine = malloc(sizeof(*machine));
f3acb3a8 266 bool leftmost = true;
69d2591a
ACM
267
268 if (machine == NULL)
269 return NULL;
270
271 if (machine__init(machine, root_dir, pid) != 0) {
272 free(machine);
273 return NULL;
274 }
275
276 while (*p != NULL) {
277 parent = *p;
278 pos = rb_entry(parent, struct machine, rb_node);
279 if (pid < pos->pid)
280 p = &(*p)->rb_left;
f3acb3a8 281 else {
69d2591a 282 p = &(*p)->rb_right;
f3acb3a8
DB
283 leftmost = false;
284 }
69d2591a
ACM
285 }
286
287 rb_link_node(&machine->rb_node, parent, p);
f3acb3a8 288 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
69d2591a
ACM
289
290 return machine;
291}
292
cfe1c414
AH
293void machines__set_comm_exec(struct machines *machines, bool comm_exec)
294{
295 struct rb_node *nd;
296
297 machines->host.comm_exec = comm_exec;
298
f3acb3a8 299 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
cfe1c414
AH
300 struct machine *machine = rb_entry(nd, struct machine, rb_node);
301
302 machine->comm_exec = comm_exec;
303 }
304}
305
876650e6 306struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 307{
f3acb3a8 308 struct rb_node **p = &machines->guests.rb_root.rb_node;
69d2591a
ACM
309 struct rb_node *parent = NULL;
310 struct machine *machine;
311 struct machine *default_machine = NULL;
312
876650e6
ACM
313 if (pid == HOST_KERNEL_ID)
314 return &machines->host;
315
69d2591a
ACM
316 while (*p != NULL) {
317 parent = *p;
318 machine = rb_entry(parent, struct machine, rb_node);
319 if (pid < machine->pid)
320 p = &(*p)->rb_left;
321 else if (pid > machine->pid)
322 p = &(*p)->rb_right;
323 else
324 return machine;
325 if (!machine->pid)
326 default_machine = machine;
327 }
328
329 return default_machine;
330}
331
876650e6 332struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
333{
334 char path[PATH_MAX];
335 const char *root_dir = "";
336 struct machine *machine = machines__find(machines, pid);
337
338 if (machine && (machine->pid == pid))
339 goto out;
340
341 if ((pid != HOST_KERNEL_ID) &&
342 (pid != DEFAULT_GUEST_KERNEL_ID) &&
343 (symbol_conf.guestmount)) {
344 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
345 if (access(path, R_OK)) {
346 static struct strlist *seen;
347
348 if (!seen)
4a77e218 349 seen = strlist__new(NULL, NULL);
69d2591a
ACM
350
351 if (!strlist__has_entry(seen, path)) {
352 pr_err("Can't access file %s\n", path);
353 strlist__add(seen, path);
354 }
355 machine = NULL;
356 goto out;
357 }
358 root_dir = path;
359 }
360
361 machine = machines__add(machines, pid, root_dir);
362out:
363 return machine;
364}
365
876650e6
ACM
366void machines__process_guests(struct machines *machines,
367 machine__process_t process, void *data)
69d2591a
ACM
368{
369 struct rb_node *nd;
370
f3acb3a8 371 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
372 struct machine *pos = rb_entry(nd, struct machine, rb_node);
373 process(pos, data);
374 }
375}
376
876650e6 377void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
378{
379 struct rb_node *node;
380 struct machine *machine;
381
876650e6
ACM
382 machines->host.id_hdr_size = id_hdr_size;
383
f3acb3a8
DB
384 for (node = rb_first_cached(&machines->guests); node;
385 node = rb_next(node)) {
69d2591a
ACM
386 machine = rb_entry(node, struct machine, rb_node);
387 machine->id_hdr_size = id_hdr_size;
388 }
389
390 return;
391}
392
29ce3612
AH
393static void machine__update_thread_pid(struct machine *machine,
394 struct thread *th, pid_t pid)
395{
396 struct thread *leader;
397
398 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
399 return;
400
401 th->pid_ = pid;
402
403 if (th->pid_ == th->tid)
404 return;
405
b91fc39f 406 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
407 if (!leader)
408 goto out_err;
409
410 if (!leader->mg)
11246c70 411 leader->mg = map_groups__new(machine);
29ce3612
AH
412
413 if (!leader->mg)
414 goto out_err;
415
416 if (th->mg == leader->mg)
417 return;
418
419 if (th->mg) {
420 /*
421 * Maps are created from MMAP events which provide the pid and
422 * tid. Consequently there never should be any maps on a thread
423 * with an unknown pid. Just print an error if there are.
424 */
425 if (!map_groups__empty(th->mg))
426 pr_err("Discarding thread maps for %d:%d\n",
427 th->pid_, th->tid);
8e160b2e 428 map_groups__put(th->mg);
29ce3612
AH
429 }
430
431 th->mg = map_groups__get(leader->mg);
abd82868
ACM
432out_put:
433 thread__put(leader);
29ce3612 434 return;
29ce3612
AH
435out_err:
436 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
abd82868 437 goto out_put;
29ce3612
AH
438}
439
abd82868 440/*
f8b2ebb5
JO
441 * Front-end cache - TID lookups come in blocks,
442 * so most of the time we dont have to look up
443 * the full rbtree:
abd82868 444 */
f8b2ebb5 445static struct thread*
b57334b9
JO
446__threads__get_last_match(struct threads *threads, struct machine *machine,
447 int pid, int tid)
9d2f8e22 448{
9d2f8e22
ACM
449 struct thread *th;
450
91e467bc 451 th = threads->last_match;
f3b623b8
ACM
452 if (th != NULL) {
453 if (th->tid == tid) {
454 machine__update_thread_pid(machine, th, pid);
abd82868 455 return thread__get(th);
f3b623b8
ACM
456 }
457
91e467bc 458 threads->last_match = NULL;
99d725fc 459 }
9d2f8e22 460
f8b2ebb5
JO
461 return NULL;
462}
463
b57334b9
JO
464static struct thread*
465threads__get_last_match(struct threads *threads, struct machine *machine,
466 int pid, int tid)
467{
468 struct thread *th = NULL;
469
470 if (perf_singlethreaded)
471 th = __threads__get_last_match(threads, machine, pid, tid);
472
473 return th;
474}
475
67fda0f3 476static void
b57334b9 477__threads__set_last_match(struct threads *threads, struct thread *th)
67fda0f3
JO
478{
479 threads->last_match = th;
480}
481
b57334b9
JO
482static void
483threads__set_last_match(struct threads *threads, struct thread *th)
484{
485 if (perf_singlethreaded)
486 __threads__set_last_match(threads, th);
487}
488
f8b2ebb5
JO
489/*
490 * Caller must eventually drop thread->refcnt returned with a successful
491 * lookup/new thread inserted.
492 */
493static struct thread *____machine__findnew_thread(struct machine *machine,
494 struct threads *threads,
495 pid_t pid, pid_t tid,
496 bool create)
497{
f3acb3a8 498 struct rb_node **p = &threads->entries.rb_root.rb_node;
f8b2ebb5
JO
499 struct rb_node *parent = NULL;
500 struct thread *th;
f3acb3a8 501 bool leftmost = true;
f8b2ebb5
JO
502
503 th = threads__get_last_match(threads, machine, pid, tid);
504 if (th)
505 return th;
506
9d2f8e22
ACM
507 while (*p != NULL) {
508 parent = *p;
509 th = rb_entry(parent, struct thread, rb_node);
510
38051234 511 if (th->tid == tid) {
67fda0f3 512 threads__set_last_match(threads, th);
29ce3612 513 machine__update_thread_pid(machine, th, pid);
abd82868 514 return thread__get(th);
9d2f8e22
ACM
515 }
516
38051234 517 if (tid < th->tid)
9d2f8e22 518 p = &(*p)->rb_left;
f3acb3a8 519 else {
9d2f8e22 520 p = &(*p)->rb_right;
f3acb3a8
DB
521 leftmost = false;
522 }
9d2f8e22
ACM
523 }
524
525 if (!create)
526 return NULL;
527
99d725fc 528 th = thread__new(pid, tid);
9d2f8e22
ACM
529 if (th != NULL) {
530 rb_link_node(&th->rb_node, parent, p);
f3acb3a8 531 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
cddcef60
JO
532
533 /*
534 * We have to initialize map_groups separately
535 * after rb tree is updated.
536 *
537 * The reason is that we call machine__findnew_thread
538 * within thread__init_map_groups to find the thread
539 * leader and that would screwed the rb tree.
540 */
418029b7 541 if (thread__init_map_groups(th, machine)) {
f3acb3a8 542 rb_erase_cached(&th->rb_node, &threads->entries);
b91fc39f 543 RB_CLEAR_NODE(&th->rb_node);
abd82868 544 thread__put(th);
cddcef60 545 return NULL;
418029b7 546 }
f3b623b8
ACM
547 /*
548 * It is now in the rbtree, get a ref
549 */
550 thread__get(th);
67fda0f3 551 threads__set_last_match(threads, th);
91e467bc 552 ++threads->nr;
9d2f8e22
ACM
553 }
554
555 return th;
556}
557
b91fc39f
ACM
558struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
559{
75e45e43 560 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
b91fc39f
ACM
561}
562
314add6b
AH
563struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
564 pid_t tid)
9d2f8e22 565{
91e467bc 566 struct threads *threads = machine__threads(machine, tid);
b91fc39f
ACM
567 struct thread *th;
568
0a7c74ea 569 down_write(&threads->lock);
abd82868 570 th = __machine__findnew_thread(machine, pid, tid);
0a7c74ea 571 up_write(&threads->lock);
b91fc39f 572 return th;
9d2f8e22
ACM
573}
574
d75e6097
JO
575struct thread *machine__find_thread(struct machine *machine, pid_t pid,
576 pid_t tid)
9d2f8e22 577{
91e467bc 578 struct threads *threads = machine__threads(machine, tid);
b91fc39f 579 struct thread *th;
91e467bc 580
0a7c74ea 581 down_read(&threads->lock);
75e45e43 582 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
0a7c74ea 583 up_read(&threads->lock);
b91fc39f 584 return th;
9d2f8e22 585}
b0a7d1a0 586
cfe1c414
AH
587struct comm *machine__thread_exec_comm(struct machine *machine,
588 struct thread *thread)
589{
590 if (machine->comm_exec)
591 return thread__exec_comm(thread);
592 else
593 return thread__comm(thread);
594}
595
162f0bef
FW
596int machine__process_comm_event(struct machine *machine, union perf_event *event,
597 struct perf_sample *sample)
b0a7d1a0 598{
314add6b
AH
599 struct thread *thread = machine__findnew_thread(machine,
600 event->comm.pid,
601 event->comm.tid);
65de51f9 602 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 603 int err = 0;
b0a7d1a0 604
cfe1c414
AH
605 if (exec)
606 machine->comm_exec = true;
607
b0a7d1a0
ACM
608 if (dump_trace)
609 perf_event__fprintf_comm(event, stdout);
610
65de51f9
AH
611 if (thread == NULL ||
612 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 613 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 614 err = -1;
b0a7d1a0
ACM
615 }
616
b91fc39f
ACM
617 thread__put(thread);
618
619 return err;
b0a7d1a0
ACM
620}
621
f3b3614a
HB
622int machine__process_namespaces_event(struct machine *machine __maybe_unused,
623 union perf_event *event,
624 struct perf_sample *sample __maybe_unused)
625{
626 struct thread *thread = machine__findnew_thread(machine,
627 event->namespaces.pid,
628 event->namespaces.tid);
629 int err = 0;
630
631 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
632 "\nWARNING: kernel seems to support more namespaces than perf"
633 " tool.\nTry updating the perf tool..\n\n");
634
635 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
636 "\nWARNING: perf tool seems to support more namespaces than"
637 " the kernel.\nTry updating the kernel..\n\n");
638
639 if (dump_trace)
640 perf_event__fprintf_namespaces(event, stdout);
641
642 if (thread == NULL ||
643 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
644 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
645 err = -1;
646 }
647
648 thread__put(thread);
649
650 return err;
651}
652
b0a7d1a0 653int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 654 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0 655{
5290ed69 656 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
b0a7d1a0
ACM
657 event->lost.id, event->lost.lost);
658 return 0;
659}
660
c4937a91
KL
661int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
662 union perf_event *event, struct perf_sample *sample)
663{
a2e254d8 664 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
c4937a91
KL
665 sample->id, event->lost_samples.lost);
666 return 0;
667}
668
9f2de315
ACM
669static struct dso *machine__findnew_module_dso(struct machine *machine,
670 struct kmod_path *m,
671 const char *filename)
da17ea33
JO
672{
673 struct dso *dso;
da17ea33 674
0a7c74ea 675 down_write(&machine->dsos.lock);
e8807844
ACM
676
677 dso = __dsos__find(&machine->dsos, m->name, true);
da17ea33 678 if (!dso) {
e8807844 679 dso = __dsos__addnew(&machine->dsos, m->name);
da17ea33 680 if (dso == NULL)
e8807844 681 goto out_unlock;
da17ea33 682
6b335e8f 683 dso__set_module_info(dso, m, machine);
ca33380a 684 dso__set_long_name(dso, strdup(filename), true);
da17ea33
JO
685 }
686
d3a7c489 687 dso__get(dso);
e8807844 688out_unlock:
0a7c74ea 689 up_write(&machine->dsos.lock);
da17ea33
JO
690 return dso;
691}
692
4a96f7a0
AH
693int machine__process_aux_event(struct machine *machine __maybe_unused,
694 union perf_event *event)
695{
696 if (dump_trace)
697 perf_event__fprintf_aux(event, stdout);
698 return 0;
699}
700
0ad21f68
AH
701int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
702 union perf_event *event)
703{
704 if (dump_trace)
705 perf_event__fprintf_itrace_start(event, stdout);
706 return 0;
707}
708
0286039f
AH
709int machine__process_switch_event(struct machine *machine __maybe_unused,
710 union perf_event *event)
711{
712 if (dump_trace)
713 perf_event__fprintf_switch(event, stdout);
714 return 0;
715}
716
9aa0bfa3
SL
717static int machine__process_ksymbol_register(struct machine *machine,
718 union perf_event *event,
719 struct perf_sample *sample __maybe_unused)
720{
721 struct symbol *sym;
722 struct map *map;
723
ebdba16e 724 map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
9aa0bfa3 725 if (!map) {
ebdba16e 726 map = dso__new_map(event->ksymbol.name);
9aa0bfa3
SL
727 if (!map)
728 return -ENOMEM;
729
ebdba16e
ACM
730 map->start = event->ksymbol.addr;
731 map->end = map->start + event->ksymbol.len;
9aa0bfa3
SL
732 map_groups__insert(&machine->kmaps, map);
733 }
734
8529f2e6 735 sym = symbol__new(map->map_ip(map, map->start),
ebdba16e
ACM
736 event->ksymbol.len,
737 0, 0, event->ksymbol.name);
9aa0bfa3
SL
738 if (!sym)
739 return -ENOMEM;
740 dso__insert_symbol(map->dso, sym);
741 return 0;
742}
743
744static int machine__process_ksymbol_unregister(struct machine *machine,
745 union perf_event *event,
746 struct perf_sample *sample __maybe_unused)
747{
748 struct map *map;
749
ebdba16e 750 map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
9aa0bfa3
SL
751 if (map)
752 map_groups__remove(&machine->kmaps, map);
753
754 return 0;
755}
756
757int machine__process_ksymbol(struct machine *machine __maybe_unused,
758 union perf_event *event,
759 struct perf_sample *sample)
760{
761 if (dump_trace)
762 perf_event__fprintf_ksymbol(event, stdout);
763
ebdba16e 764 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
9aa0bfa3
SL
765 return machine__process_ksymbol_unregister(machine, event,
766 sample);
767 return machine__process_ksymbol_register(machine, event, sample);
768}
769
9f2de315
ACM
770struct map *machine__findnew_module_map(struct machine *machine, u64 start,
771 const char *filename)
3f067dca 772{
ca33380a 773 struct map *map = NULL;
566c69c3 774 struct dso *dso = NULL;
ca33380a 775 struct kmod_path m;
3f067dca 776
ca33380a 777 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
778 return NULL;
779
83cf774b 780 map = map_groups__find_by_name(&machine->kmaps, m.name);
47b657fa 781 if (map)
bc84f464
JO
782 goto out;
783
9f2de315 784 dso = machine__findnew_module_dso(machine, &m, filename);
ca33380a
JO
785 if (dso == NULL)
786 goto out;
787
3183f8ca 788 map = map__new2(start, dso);
3f067dca 789 if (map == NULL)
ca33380a 790 goto out;
3f067dca 791
3f067dca 792 map_groups__insert(&machine->kmaps, map);
ca33380a 793
9afcb420
MH
794 /* Put the map here because map_groups__insert alread got it */
795 map__put(map);
ca33380a 796out:
566c69c3
MH
797 /* put the dso here, corresponding to machine__findnew_module_dso */
798 dso__put(dso);
d8f9da24 799 zfree(&m.name);
3f067dca
ACM
800 return map;
801}
802
876650e6 803size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
804{
805 struct rb_node *nd;
3d39ac53 806 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 807
f3acb3a8 808 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 809 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 810 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
811 }
812
813 return ret;
814}
815
8fa7d87f 816size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
817 bool (skip)(struct dso *dso, int parm), int parm)
818{
3d39ac53 819 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
820}
821
876650e6 822size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
823 bool (skip)(struct dso *dso, int parm), int parm)
824{
825 struct rb_node *nd;
876650e6 826 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 827
f3acb3a8 828 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
829 struct machine *pos = rb_entry(nd, struct machine, rb_node);
830 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
831 }
832 return ret;
833}
834
835size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
836{
837 int i;
838 size_t printed = 0;
a5e813c6 839 struct dso *kdso = machine__kernel_map(machine)->dso;
3f067dca
ACM
840
841 if (kdso->has_build_id) {
842 char filename[PATH_MAX];
d2396999
KJ
843 if (dso__build_id_filename(kdso, filename, sizeof(filename),
844 false))
3f067dca
ACM
845 printed += fprintf(fp, "[0] %s\n", filename);
846 }
847
848 for (i = 0; i < vmlinux_path__nr_entries; ++i)
849 printed += fprintf(fp, "[%d] %s\n",
850 i + kdso->has_build_id, vmlinux_path[i]);
851
852 return printed;
853}
854
855size_t machine__fprintf(struct machine *machine, FILE *fp)
856{
3f067dca 857 struct rb_node *nd;
91e467bc
KL
858 size_t ret;
859 int i;
3f067dca 860
91e467bc
KL
861 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
862 struct threads *threads = &machine->threads[i];
0a7c74ea
ACM
863
864 down_read(&threads->lock);
d2c11034 865
91e467bc 866 ret = fprintf(fp, "Threads: %u\n", threads->nr);
3f067dca 867
f3acb3a8
DB
868 for (nd = rb_first_cached(&threads->entries); nd;
869 nd = rb_next(nd)) {
91e467bc 870 struct thread *pos = rb_entry(nd, struct thread, rb_node);
3f067dca 871
91e467bc
KL
872 ret += thread__fprintf(pos, fp);
873 }
b91fc39f 874
0a7c74ea 875 up_read(&threads->lock);
91e467bc 876 }
3f067dca
ACM
877 return ret;
878}
879
880static struct dso *machine__get_kernel(struct machine *machine)
881{
8c7f1bb3 882 const char *vmlinux_name = machine->mmap_name;
3f067dca
ACM
883 struct dso *kernel;
884
885 if (machine__is_host(machine)) {
c192524e
JO
886 if (symbol_conf.vmlinux_name)
887 vmlinux_name = symbol_conf.vmlinux_name;
888
459ce518
ACM
889 kernel = machine__findnew_kernel(machine, vmlinux_name,
890 "[kernel]", DSO_TYPE_KERNEL);
3f067dca 891 } else {
c192524e
JO
892 if (symbol_conf.default_guest_vmlinux_name)
893 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
894
459ce518
ACM
895 kernel = machine__findnew_kernel(machine, vmlinux_name,
896 "[guest.kernel]",
897 DSO_TYPE_GUEST_KERNEL);
3f067dca
ACM
898 }
899
900 if (kernel != NULL && (!kernel->has_build_id))
901 dso__read_running_kernel_build_id(kernel, machine);
902
903 return kernel;
904}
905
906struct process_args {
907 u64 start;
908};
909
1c5aae77
AH
910void machine__get_kallsyms_filename(struct machine *machine, char *buf,
911 size_t bufsz)
15a0a870
AH
912{
913 if (machine__is_default_guest(machine))
914 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
915 else
916 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
917}
918
a93f0e55
SQ
919const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
920
921/* Figure out the start address of kernel map from /proc/kallsyms.
922 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
923 * symbol_name if it's not that important.
924 */
b843f62a 925static int machine__get_running_kernel_start(struct machine *machine,
ed9adb20
JO
926 const char **symbol_name,
927 u64 *start, u64 *end)
3f067dca 928{
15a0a870 929 char filename[PATH_MAX];
b843f62a 930 int i, err = -1;
a93f0e55
SQ
931 const char *name;
932 u64 addr = 0;
3f067dca 933
15a0a870 934 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
935
936 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
937 return 0;
938
a93f0e55 939 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
b843f62a
ACM
940 err = kallsyms__get_function_start(filename, name, &addr);
941 if (!err)
a93f0e55
SQ
942 break;
943 }
944
b843f62a
ACM
945 if (err)
946 return -1;
947
a93f0e55
SQ
948 if (symbol_name)
949 *symbol_name = name;
3f067dca 950
b843f62a 951 *start = addr;
ed9adb20
JO
952
953 err = kallsyms__get_function_start(filename, "_etext", &addr);
954 if (!err)
955 *end = addr;
956
b843f62a 957 return 0;
3f067dca
ACM
958}
959
1c5aae77
AH
960int machine__create_extra_kernel_map(struct machine *machine,
961 struct dso *kernel,
962 struct extra_kernel_map *xm)
4d99e413
AH
963{
964 struct kmap *kmap;
965 struct map *map;
966
967 map = map__new2(xm->start, kernel);
968 if (!map)
969 return -1;
970
971 map->end = xm->end;
972 map->pgoff = xm->pgoff;
973
974 kmap = map__kmap(map);
975
976 kmap->kmaps = &machine->kmaps;
5759a682 977 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
4d99e413
AH
978
979 map_groups__insert(&machine->kmaps, map);
980
5759a682
AH
981 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
982 kmap->name, map->start, map->end);
4d99e413
AH
983
984 map__put(map);
985
986 return 0;
987}
988
989static u64 find_entry_trampoline(struct dso *dso)
990{
991 /* Duplicates are removed so lookup all aliases */
992 const char *syms[] = {
993 "_entry_trampoline",
994 "__entry_trampoline_start",
995 "entry_SYSCALL_64_trampoline",
996 };
997 struct symbol *sym = dso__first_symbol(dso);
998 unsigned int i;
999
1000 for (; sym; sym = dso__next_symbol(sym)) {
1001 if (sym->binding != STB_GLOBAL)
1002 continue;
1003 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1004 if (!strcmp(sym->name, syms[i]))
1005 return sym->start;
1006 }
1007 }
1008
1009 return 0;
1010}
1011
1012/*
1013 * These values can be used for kernels that do not have symbols for the entry
1014 * trampolines in kallsyms.
1015 */
1016#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1017#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1018#define X86_64_ENTRY_TRAMPOLINE 0x6000
1019
1020/* Map x86_64 PTI entry trampolines */
1021int machine__map_x86_64_entry_trampolines(struct machine *machine,
1022 struct dso *kernel)
1023{
1c5aae77
AH
1024 struct map_groups *kmaps = &machine->kmaps;
1025 struct maps *maps = &kmaps->maps;
4d99e413 1026 int nr_cpus_avail, cpu;
1c5aae77
AH
1027 bool found = false;
1028 struct map *map;
1029 u64 pgoff;
1030
1031 /*
1032 * In the vmlinux case, pgoff is a virtual address which must now be
1033 * mapped to a vmlinux offset.
1034 */
1035 for (map = maps__first(maps); map; map = map__next(map)) {
1036 struct kmap *kmap = __map__kmap(map);
1037 struct map *dest_map;
1038
1039 if (!kmap || !is_entry_trampoline(kmap->name))
1040 continue;
1041
1042 dest_map = map_groups__find(kmaps, map->pgoff);
1043 if (dest_map != map)
1044 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1045 found = true;
1046 }
1047 if (found || machine->trampolines_mapped)
1048 return 0;
4d99e413 1049
1c5aae77 1050 pgoff = find_entry_trampoline(kernel);
4d99e413
AH
1051 if (!pgoff)
1052 return 0;
1053
1054 nr_cpus_avail = machine__nr_cpus_avail(machine);
1055
1056 /* Add a 1 page map for each CPU's entry trampoline */
1057 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1058 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1059 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1060 X86_64_ENTRY_TRAMPOLINE;
1061 struct extra_kernel_map xm = {
1062 .start = va,
1063 .end = va + page_size,
1064 .pgoff = pgoff,
1065 };
1066
5759a682
AH
1067 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1068
4d99e413
AH
1069 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1070 return -1;
1071 }
1072
1c5aae77
AH
1073 machine->trampolines_mapped = nr_cpus_avail;
1074
1075 return 0;
1076}
1077
1078int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1079 struct dso *kernel __maybe_unused)
1080{
4d99e413
AH
1081 return 0;
1082}
1083
1fb87b8e
JO
1084static int
1085__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
3f067dca 1086{
3183f8ca
ACM
1087 struct kmap *kmap;
1088 struct map *map;
3f067dca 1089
cc1121ab
MH
1090 /* In case of renewal the kernel map, destroy previous one */
1091 machine__destroy_kernel_maps(machine);
1092
3183f8ca
ACM
1093 machine->vmlinux_map = map__new2(0, kernel);
1094 if (machine->vmlinux_map == NULL)
1095 return -1;
3f067dca 1096
3183f8ca
ACM
1097 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1098 map = machine__kernel_map(machine);
1099 kmap = map__kmap(map);
1100 if (!kmap)
1101 return -1;
ba92732e 1102
3183f8ca
ACM
1103 kmap->kmaps = &machine->kmaps;
1104 map_groups__insert(&machine->kmaps, map);
3f067dca
ACM
1105
1106 return 0;
1107}
1108
1109void machine__destroy_kernel_maps(struct machine *machine)
1110{
3183f8ca
ACM
1111 struct kmap *kmap;
1112 struct map *map = machine__kernel_map(machine);
3f067dca 1113
3183f8ca
ACM
1114 if (map == NULL)
1115 return;
3f067dca 1116
3183f8ca
ACM
1117 kmap = map__kmap(map);
1118 map_groups__remove(&machine->kmaps, map);
1119 if (kmap && kmap->ref_reloc_sym) {
1120 zfree((char **)&kmap->ref_reloc_sym->name);
1121 zfree(&kmap->ref_reloc_sym);
3f067dca 1122 }
3183f8ca
ACM
1123
1124 map__zput(machine->vmlinux_map);
3f067dca
ACM
1125}
1126
876650e6 1127int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
1128{
1129 int ret = 0;
1130 struct dirent **namelist = NULL;
1131 int i, items = 0;
1132 char path[PATH_MAX];
1133 pid_t pid;
1134 char *endp;
1135
1136 if (symbol_conf.default_guest_vmlinux_name ||
1137 symbol_conf.default_guest_modules ||
1138 symbol_conf.default_guest_kallsyms) {
1139 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1140 }
1141
1142 if (symbol_conf.guestmount) {
1143 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1144 if (items <= 0)
1145 return -ENOENT;
1146 for (i = 0; i < items; i++) {
1147 if (!isdigit(namelist[i]->d_name[0])) {
1148 /* Filter out . and .. */
1149 continue;
1150 }
1151 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1152 if ((*endp != '\0') ||
1153 (endp == namelist[i]->d_name) ||
1154 (errno == ERANGE)) {
1155 pr_debug("invalid directory (%s). Skipping.\n",
1156 namelist[i]->d_name);
1157 continue;
1158 }
1159 sprintf(path, "%s/%s/proc/kallsyms",
1160 symbol_conf.guestmount,
1161 namelist[i]->d_name);
1162 ret = access(path, R_OK);
1163 if (ret) {
1164 pr_debug("Can't access file %s\n", path);
1165 goto failure;
1166 }
1167 machines__create_kernel_maps(machines, pid);
1168 }
1169failure:
1170 free(namelist);
1171 }
1172
1173 return ret;
1174}
1175
876650e6 1176void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 1177{
f3acb3a8 1178 struct rb_node *next = rb_first_cached(&machines->guests);
876650e6
ACM
1179
1180 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
1181
1182 while (next) {
1183 struct machine *pos = rb_entry(next, struct machine, rb_node);
1184
1185 next = rb_next(&pos->rb_node);
f3acb3a8 1186 rb_erase_cached(&pos->rb_node, &machines->guests);
3f067dca
ACM
1187 machine__delete(pos);
1188 }
1189}
1190
876650e6 1191int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
1192{
1193 struct machine *machine = machines__findnew(machines, pid);
1194
1195 if (machine == NULL)
1196 return -1;
1197
1198 return machine__create_kernel_maps(machine);
1199}
1200
3183f8ca 1201int machine__load_kallsyms(struct machine *machine, const char *filename)
3f067dca 1202{
a5e813c6 1203 struct map *map = machine__kernel_map(machine);
e8f3879f 1204 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
3f067dca
ACM
1205
1206 if (ret > 0) {
3183f8ca 1207 dso__set_loaded(map->dso);
3f067dca
ACM
1208 /*
1209 * Since /proc/kallsyms will have multiple sessions for the
1210 * kernel, with modules between them, fixup the end of all
1211 * sections.
1212 */
3183f8ca 1213 map_groups__fixup_end(&machine->kmaps);
3f067dca
ACM
1214 }
1215
1216 return ret;
1217}
1218
1d1a2654 1219int machine__load_vmlinux_path(struct machine *machine)
3f067dca 1220{
a5e813c6 1221 struct map *map = machine__kernel_map(machine);
be39db9f 1222 int ret = dso__load_vmlinux_path(map->dso, map);
3f067dca 1223
39b12f78 1224 if (ret > 0)
3183f8ca 1225 dso__set_loaded(map->dso);
3f067dca
ACM
1226
1227 return ret;
1228}
1229
3f067dca
ACM
1230static char *get_kernel_version(const char *root_dir)
1231{
1232 char version[PATH_MAX];
1233 FILE *file;
1234 char *name, *tmp;
1235 const char *prefix = "Linux version ";
1236
1237 sprintf(version, "%s/proc/version", root_dir);
1238 file = fopen(version, "r");
1239 if (!file)
1240 return NULL;
1241
3f067dca
ACM
1242 tmp = fgets(version, sizeof(version), file);
1243 fclose(file);
34b65aff
DY
1244 if (!tmp)
1245 return NULL;
3f067dca
ACM
1246
1247 name = strstr(version, prefix);
1248 if (!name)
1249 return NULL;
1250 name += strlen(prefix);
1251 tmp = strchr(name, ' ');
1252 if (tmp)
1253 *tmp = '\0';
1254
1255 return strdup(name);
1256}
1257
bb58a8a4
JO
1258static bool is_kmod_dso(struct dso *dso)
1259{
1260 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1261 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1262}
1263
1264static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1265 struct kmod_path *m)
1266{
bb58a8a4 1267 char *long_name;
83cf774b 1268 struct map *map = map_groups__find_by_name(mg, m->name);
bb58a8a4 1269
bb58a8a4
JO
1270 if (map == NULL)
1271 return 0;
1272
1273 long_name = strdup(path);
1274 if (long_name == NULL)
1275 return -ENOMEM;
1276
1277 dso__set_long_name(map->dso, long_name, true);
1278 dso__kernel_module_get_build_id(map->dso, "");
1279
1280 /*
1281 * Full name could reveal us kmod compression, so
1282 * we need to update the symtab_type if needed.
1283 */
2af52475 1284 if (m->comp && is_kmod_dso(map->dso)) {
bb58a8a4 1285 map->dso->symtab_type++;
2af52475
JO
1286 map->dso->comp = m->comp;
1287 }
bb58a8a4
JO
1288
1289 return 0;
1290}
1291
3f067dca 1292static int map_groups__set_modules_path_dir(struct map_groups *mg,
61d4290c 1293 const char *dir_name, int depth)
3f067dca
ACM
1294{
1295 struct dirent *dent;
1296 DIR *dir = opendir(dir_name);
1297 int ret = 0;
1298
1299 if (!dir) {
1300 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1301 return -1;
1302 }
1303
1304 while ((dent = readdir(dir)) != NULL) {
1305 char path[PATH_MAX];
1306 struct stat st;
1307
1308 /*sshfs might return bad dent->d_type, so we have to stat*/
1309 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1310 if (stat(path, &st))
1311 continue;
1312
1313 if (S_ISDIR(st.st_mode)) {
1314 if (!strcmp(dent->d_name, ".") ||
1315 !strcmp(dent->d_name, ".."))
1316 continue;
1317
61d4290c
RY
1318 /* Do not follow top-level source and build symlinks */
1319 if (depth == 0) {
1320 if (!strcmp(dent->d_name, "source") ||
1321 !strcmp(dent->d_name, "build"))
1322 continue;
1323 }
1324
1325 ret = map_groups__set_modules_path_dir(mg, path,
1326 depth + 1);
3f067dca
ACM
1327 if (ret < 0)
1328 goto out;
1329 } else {
bb58a8a4 1330 struct kmod_path m;
3f067dca 1331
bb58a8a4
JO
1332 ret = kmod_path__parse_name(&m, dent->d_name);
1333 if (ret)
1334 goto out;
c00c48fc 1335
bb58a8a4
JO
1336 if (m.kmod)
1337 ret = map_groups__set_module_path(mg, path, &m);
c00c48fc 1338
d8f9da24 1339 zfree(&m.name);
3f067dca 1340
bb58a8a4 1341 if (ret)
3f067dca 1342 goto out;
3f067dca
ACM
1343 }
1344 }
1345
1346out:
1347 closedir(dir);
1348 return ret;
1349}
1350
1351static int machine__set_modules_path(struct machine *machine)
1352{
1353 char *version;
1354 char modules_path[PATH_MAX];
1355
1356 version = get_kernel_version(machine->root_dir);
1357 if (!version)
1358 return -1;
1359
61d4290c 1360 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1361 machine->root_dir, version);
1362 free(version);
1363
61d4290c 1364 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca 1365}
203d8a4a 1366int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
12a6d294 1367 u64 *size __maybe_unused,
203d8a4a
SSG
1368 const char *name __maybe_unused)
1369{
1370 return 0;
1371}
3f067dca 1372
9ad4652b
TR
1373static int machine__create_module(void *arg, const char *name, u64 start,
1374 u64 size)
3f067dca 1375{
316d70d6 1376 struct machine *machine = arg;
3f067dca 1377 struct map *map;
316d70d6 1378
12a6d294 1379 if (arch__fix_module_text_start(&start, &size, name) < 0)
203d8a4a
SSG
1380 return -1;
1381
9f2de315 1382 map = machine__findnew_module_map(machine, start, name);
316d70d6
AH
1383 if (map == NULL)
1384 return -1;
9ad4652b 1385 map->end = start + size;
316d70d6
AH
1386
1387 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1388
1389 return 0;
1390}
1391
1392static int machine__create_modules(struct machine *machine)
1393{
3f067dca
ACM
1394 const char *modules;
1395 char path[PATH_MAX];
1396
f4be904d 1397 if (machine__is_default_guest(machine)) {
3f067dca 1398 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1399 } else {
1400 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1401 modules = path;
1402 }
1403
aa7fe3b0 1404 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1405 return -1;
1406
316d70d6 1407 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1408 return -1;
1409
316d70d6
AH
1410 if (!machine__set_modules_path(machine))
1411 return 0;
3f067dca 1412
316d70d6 1413 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1414
8f76fcd9 1415 return 0;
3f067dca
ACM
1416}
1417
1fb87b8e
JO
1418static void machine__set_kernel_mmap(struct machine *machine,
1419 u64 start, u64 end)
1420{
3183f8ca
ACM
1421 machine->vmlinux_map->start = start;
1422 machine->vmlinux_map->end = end;
1423 /*
1424 * Be a bit paranoid here, some perf.data file came with
1425 * a zero sized synthesized MMAP event for the kernel.
1426 */
1427 if (start == 0 && end == 0)
1428 machine->vmlinux_map->end = ~0ULL;
1fb87b8e
JO
1429}
1430
977c7a6d
WL
1431static void machine__update_kernel_mmap(struct machine *machine,
1432 u64 start, u64 end)
1433{
1434 struct map *map = machine__kernel_map(machine);
1435
1436 map__get(map);
1437 map_groups__remove(&machine->kmaps, map);
1438
1439 machine__set_kernel_mmap(machine, start, end);
1440
1441 map_groups__insert(&machine->kmaps, map);
1442 map__put(map);
1443}
1444
3f067dca
ACM
1445int machine__create_kernel_maps(struct machine *machine)
1446{
1447 struct dso *kernel = machine__get_kernel(machine);
b843f62a 1448 const char *name = NULL;
ee05d217 1449 struct map *map;
ed9adb20 1450 u64 start = 0, end = ~0ULL;
1154c957
MH
1451 int ret;
1452
45e90056 1453 if (kernel == NULL)
5512cf24 1454 return -1;
3f067dca 1455
1154c957 1456 ret = __machine__create_kernel_maps(machine, kernel);
1154c957 1457 if (ret < 0)
1c5aae77 1458 goto out_put;
3f067dca
ACM
1459
1460 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1461 if (machine__is_host(machine))
1462 pr_debug("Problems creating module maps, "
1463 "continuing anyway...\n");
1464 else
1465 pr_debug("Problems creating module maps for guest %d, "
1466 "continuing anyway...\n", machine->pid);
1467 }
1468
ed9adb20 1469 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
3f938ee2 1470 if (name &&
ed9adb20 1471 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
3f938ee2 1472 machine__destroy_kernel_maps(machine);
1c5aae77
AH
1473 ret = -1;
1474 goto out_put;
3f938ee2 1475 }
ee05d217 1476
977c7a6d
WL
1477 /*
1478 * we have a real start address now, so re-order the kmaps
1479 * assume it's the last in the kmaps
1480 */
ed9adb20 1481 machine__update_kernel_mmap(machine, start, end);
5512cf24
AH
1482 }
1483
1c5aae77
AH
1484 if (machine__create_extra_kernel_maps(machine, kernel))
1485 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1486
ed9adb20
JO
1487 if (end == ~0ULL) {
1488 /* update end address of the kernel map using adjacent module address */
1489 map = map__next(machine__kernel_map(machine));
1490 if (map)
1491 machine__set_kernel_mmap(machine, start, map->start);
1492 }
1493
1c5aae77
AH
1494out_put:
1495 dso__put(kernel);
1496 return ret;
3f067dca
ACM
1497}
1498
8e0cf965
AH
1499static bool machine__uses_kcore(struct machine *machine)
1500{
1501 struct dso *dso;
1502
3d39ac53 1503 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1504 if (dso__is_kcore(dso))
1505 return true;
1506 }
1507
1508 return false;
1509}
1510
a8ce99b0
AH
1511static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1512 union perf_event *event)
1513{
1514 return machine__is(machine, "x86_64") &&
1515 is_entry_trampoline(event->mmap.filename);
1516}
1517
1518static int machine__process_extra_kernel_map(struct machine *machine,
1519 union perf_event *event)
1520{
1521 struct map *kernel_map = machine__kernel_map(machine);
1522 struct dso *kernel = kernel_map ? kernel_map->dso : NULL;
1523 struct extra_kernel_map xm = {
1524 .start = event->mmap.start,
1525 .end = event->mmap.start + event->mmap.len,
1526 .pgoff = event->mmap.pgoff,
1527 };
1528
1529 if (kernel == NULL)
1530 return -1;
1531
1532 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1533
1534 return machine__create_extra_kernel_map(machine, kernel, &xm);
1535}
1536
b0a7d1a0
ACM
1537static int machine__process_kernel_mmap_event(struct machine *machine,
1538 union perf_event *event)
1539{
1540 struct map *map;
b0a7d1a0
ACM
1541 enum dso_kernel_type kernel_type;
1542 bool is_kernel_mmap;
1543
8e0cf965
AH
1544 /* If we have maps from kcore then we do not need or want any others */
1545 if (machine__uses_kcore(machine))
1546 return 0;
1547
b0a7d1a0
ACM
1548 if (machine__is_host(machine))
1549 kernel_type = DSO_TYPE_KERNEL;
1550 else
1551 kernel_type = DSO_TYPE_GUEST_KERNEL;
1552
1553 is_kernel_mmap = memcmp(event->mmap.filename,
8c7f1bb3
JO
1554 machine->mmap_name,
1555 strlen(machine->mmap_name) - 1) == 0;
b0a7d1a0
ACM
1556 if (event->mmap.filename[0] == '/' ||
1557 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
9f2de315
ACM
1558 map = machine__findnew_module_map(machine, event->mmap.start,
1559 event->mmap.filename);
b0a7d1a0
ACM
1560 if (map == NULL)
1561 goto out_problem;
1562
b0a7d1a0
ACM
1563 map->end = map->start + event->mmap.len;
1564 } else if (is_kernel_mmap) {
1565 const char *symbol_name = (event->mmap.filename +
8c7f1bb3 1566 strlen(machine->mmap_name));
b0a7d1a0
ACM
1567 /*
1568 * Should be there already, from the build-id table in
1569 * the header.
1570 */
b837a8bd
NK
1571 struct dso *kernel = NULL;
1572 struct dso *dso;
1573
0a7c74ea 1574 down_read(&machine->dsos.lock);
e8807844 1575
3d39ac53 1576 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1577
1578 /*
1579 * The cpumode passed to is_kernel_module is not the
1580 * cpumode of *this* event. If we insist on passing
1581 * correct cpumode to is_kernel_module, we should
1582 * record the cpumode when we adding this dso to the
1583 * linked list.
1584 *
1585 * However we don't really need passing correct
1586 * cpumode. We know the correct cpumode must be kernel
1587 * mode (if not, we should not link it onto kernel_dsos
1588 * list).
1589 *
1590 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1591 * is_kernel_module() treats it as a kernel cpumode.
1592 */
1593
1594 if (!dso->kernel ||
1595 is_kernel_module(dso->long_name,
1596 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1597 continue;
1598
1f121b03 1599
b837a8bd
NK
1600 kernel = dso;
1601 break;
1602 }
1603
0a7c74ea 1604 up_read(&machine->dsos.lock);
e8807844 1605
b837a8bd 1606 if (kernel == NULL)
8c7f1bb3 1607 kernel = machine__findnew_dso(machine, machine->mmap_name);
b0a7d1a0
ACM
1608 if (kernel == NULL)
1609 goto out_problem;
1610
1611 kernel->kernel = kernel_type;
d3a7c489
ACM
1612 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1613 dso__put(kernel);
b0a7d1a0 1614 goto out_problem;
d3a7c489 1615 }
b0a7d1a0 1616
330dfa22
NK
1617 if (strstr(kernel->long_name, "vmlinux"))
1618 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1619
977c7a6d 1620 machine__update_kernel_mmap(machine, event->mmap.start,
05db6ff7 1621 event->mmap.start + event->mmap.len);
b0a7d1a0
ACM
1622
1623 /*
1624 * Avoid using a zero address (kptr_restrict) for the ref reloc
1625 * symbol. Effectively having zero here means that at record
1626 * time /proc/sys/kernel/kptr_restrict was non zero.
1627 */
1628 if (event->mmap.pgoff != 0) {
3183f8ca
ACM
1629 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1630 symbol_name,
1631 event->mmap.pgoff);
b0a7d1a0
ACM
1632 }
1633
1634 if (machine__is_default_guest(machine)) {
1635 /*
1636 * preload dso of guest kernel and modules
1637 */
be39db9f 1638 dso__load(kernel, machine__kernel_map(machine));
b0a7d1a0 1639 }
a8ce99b0
AH
1640 } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1641 return machine__process_extra_kernel_map(machine, event);
b0a7d1a0
ACM
1642 }
1643 return 0;
1644out_problem:
1645 return -1;
1646}
1647
5c5e854b 1648int machine__process_mmap2_event(struct machine *machine,
162f0bef 1649 union perf_event *event,
473398a2 1650 struct perf_sample *sample)
5c5e854b 1651{
5c5e854b
SE
1652 struct thread *thread;
1653 struct map *map;
5c5e854b
SE
1654 int ret = 0;
1655
1656 if (dump_trace)
1657 perf_event__fprintf_mmap2(event, stdout);
1658
473398a2
ACM
1659 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1660 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
5c5e854b
SE
1661 ret = machine__process_kernel_mmap_event(machine, event);
1662 if (ret < 0)
1663 goto out_problem;
1664 return 0;
1665 }
1666
1667 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1668 event->mmap2.tid);
5c5e854b
SE
1669 if (thread == NULL)
1670 goto out_problem;
1671
2a03068c 1672 map = map__new(machine, event->mmap2.start,
5c5e854b 1673 event->mmap2.len, event->mmap2.pgoff,
bf2e710b 1674 event->mmap2.maj,
5c5e854b
SE
1675 event->mmap2.min, event->mmap2.ino,
1676 event->mmap2.ino_generation,
7ef80703
DZ
1677 event->mmap2.prot,
1678 event->mmap2.flags,
3183f8ca 1679 event->mmap2.filename, thread);
5c5e854b
SE
1680
1681 if (map == NULL)
b91fc39f 1682 goto out_problem_map;
5c5e854b 1683
8132a2a8
HK
1684 ret = thread__insert_map(thread, map);
1685 if (ret)
1686 goto out_problem_insert;
1687
b91fc39f 1688 thread__put(thread);
84c2cafa 1689 map__put(map);
5c5e854b
SE
1690 return 0;
1691
8132a2a8
HK
1692out_problem_insert:
1693 map__put(map);
b91fc39f
ACM
1694out_problem_map:
1695 thread__put(thread);
5c5e854b
SE
1696out_problem:
1697 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1698 return 0;
1699}
1700
162f0bef 1701int machine__process_mmap_event(struct machine *machine, union perf_event *event,
473398a2 1702 struct perf_sample *sample)
b0a7d1a0 1703{
b0a7d1a0
ACM
1704 struct thread *thread;
1705 struct map *map;
0f476f2b 1706 u32 prot = 0;
b0a7d1a0
ACM
1707 int ret = 0;
1708
1709 if (dump_trace)
1710 perf_event__fprintf_mmap(event, stdout);
1711
473398a2
ACM
1712 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1713 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
b0a7d1a0
ACM
1714 ret = machine__process_kernel_mmap_event(machine, event);
1715 if (ret < 0)
1716 goto out_problem;
1717 return 0;
1718 }
1719
314add6b 1720 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1721 event->mmap.tid);
b0a7d1a0
ACM
1722 if (thread == NULL)
1723 goto out_problem;
bad40917 1724
3183f8ca 1725 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
0f476f2b 1726 prot = PROT_EXEC;
bad40917 1727
2a03068c 1728 map = map__new(machine, event->mmap.start,
b0a7d1a0 1729 event->mmap.len, event->mmap.pgoff,
0f476f2b 1730 0, 0, 0, 0, prot, 0,
5c5e854b 1731 event->mmap.filename,
3183f8ca 1732 thread);
bad40917 1733
b0a7d1a0 1734 if (map == NULL)
b91fc39f 1735 goto out_problem_map;
b0a7d1a0 1736
8132a2a8
HK
1737 ret = thread__insert_map(thread, map);
1738 if (ret)
1739 goto out_problem_insert;
1740
b91fc39f 1741 thread__put(thread);
84c2cafa 1742 map__put(map);
b0a7d1a0
ACM
1743 return 0;
1744
8132a2a8
HK
1745out_problem_insert:
1746 map__put(map);
b91fc39f
ACM
1747out_problem_map:
1748 thread__put(thread);
b0a7d1a0
ACM
1749out_problem:
1750 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1751 return 0;
1752}
1753
b91fc39f 1754static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1755{
91e467bc
KL
1756 struct threads *threads = machine__threads(machine, th->tid);
1757
1758 if (threads->last_match == th)
67fda0f3 1759 threads__set_last_match(threads, NULL);
f3b623b8 1760
b91fc39f 1761 if (lock)
0a7c74ea 1762 down_write(&threads->lock);
4c00af0e
ACM
1763
1764 BUG_ON(refcount_read(&th->refcnt) == 0);
1765
f3acb3a8 1766 rb_erase_cached(&th->rb_node, &threads->entries);
b91fc39f 1767 RB_CLEAR_NODE(&th->rb_node);
91e467bc 1768 --threads->nr;
236a3bbd 1769 /*
f3b623b8
ACM
1770 * Move it first to the dead_threads list, then drop the reference,
1771 * if this is the last reference, then the thread__delete destructor
1772 * will be called and we will remove it from the dead_threads list.
236a3bbd 1773 */
91e467bc 1774 list_add_tail(&th->node, &threads->dead);
4c00af0e
ACM
1775
1776 /*
1777 * We need to do the put here because if this is the last refcount,
1778 * then we will be touching the threads->dead head when removing the
1779 * thread.
1780 */
1781 thread__put(th);
1782
b91fc39f 1783 if (lock)
0a7c74ea 1784 up_write(&threads->lock);
236a3bbd
DA
1785}
1786
b91fc39f
ACM
1787void machine__remove_thread(struct machine *machine, struct thread *th)
1788{
1789 return __machine__remove_thread(machine, th, true);
1790}
1791
162f0bef
FW
1792int machine__process_fork_event(struct machine *machine, union perf_event *event,
1793 struct perf_sample *sample)
b0a7d1a0 1794{
d75e6097
JO
1795 struct thread *thread = machine__find_thread(machine,
1796 event->fork.pid,
1797 event->fork.tid);
314add6b
AH
1798 struct thread *parent = machine__findnew_thread(machine,
1799 event->fork.ppid,
1800 event->fork.ptid);
4f8f382e 1801 bool do_maps_clone = true;
b91fc39f 1802 int err = 0;
b0a7d1a0 1803
5cb73340
AH
1804 if (dump_trace)
1805 perf_event__fprintf_task(event, stdout);
1806
1807 /*
1808 * There may be an existing thread that is not actually the parent,
1809 * either because we are processing events out of order, or because the
1810 * (fork) event that would have removed the thread was lost. Assume the
1811 * latter case and continue on as best we can.
1812 */
1813 if (parent->pid_ != (pid_t)event->fork.ppid) {
1814 dump_printf("removing erroneous parent thread %d/%d\n",
1815 parent->pid_, parent->tid);
1816 machine__remove_thread(machine, parent);
1817 thread__put(parent);
1818 parent = machine__findnew_thread(machine, event->fork.ppid,
1819 event->fork.ptid);
1820 }
1821
236a3bbd 1822 /* if a thread currently exists for the thread id remove it */
b91fc39f 1823 if (thread != NULL) {
236a3bbd 1824 machine__remove_thread(machine, thread);
b91fc39f
ACM
1825 thread__put(thread);
1826 }
236a3bbd 1827
314add6b
AH
1828 thread = machine__findnew_thread(machine, event->fork.pid,
1829 event->fork.tid);
4f8f382e
DM
1830 /*
1831 * When synthesizing FORK events, we are trying to create thread
1832 * objects for the already running tasks on the machine.
1833 *
1834 * Normally, for a kernel FORK event, we want to clone the parent's
1835 * maps because that is what the kernel just did.
1836 *
1837 * But when synthesizing, this should not be done. If we do, we end up
1838 * with overlapping maps as we process the sythesized MMAP2 events that
1839 * get delivered shortly thereafter.
1840 *
1841 * Use the FORK event misc flags in an internal way to signal this
1842 * situation, so we can elide the map clone when appropriate.
1843 */
1844 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1845 do_maps_clone = false;
b0a7d1a0
ACM
1846
1847 if (thread == NULL || parent == NULL ||
4f8f382e 1848 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
b0a7d1a0 1849 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1850 err = -1;
b0a7d1a0 1851 }
b91fc39f
ACM
1852 thread__put(thread);
1853 thread__put(parent);
b0a7d1a0 1854
b91fc39f 1855 return err;
b0a7d1a0
ACM
1856}
1857
162f0bef
FW
1858int machine__process_exit_event(struct machine *machine, union perf_event *event,
1859 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1860{
d75e6097
JO
1861 struct thread *thread = machine__find_thread(machine,
1862 event->fork.pid,
1863 event->fork.tid);
b0a7d1a0
ACM
1864
1865 if (dump_trace)
1866 perf_event__fprintf_task(event, stdout);
1867
b91fc39f 1868 if (thread != NULL) {
236a3bbd 1869 thread__exited(thread);
b91fc39f
ACM
1870 thread__put(thread);
1871 }
b0a7d1a0
ACM
1872
1873 return 0;
1874}
1875
162f0bef
FW
1876int machine__process_event(struct machine *machine, union perf_event *event,
1877 struct perf_sample *sample)
b0a7d1a0
ACM
1878{
1879 int ret;
1880
1881 switch (event->header.type) {
1882 case PERF_RECORD_COMM:
162f0bef 1883 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1884 case PERF_RECORD_MMAP:
162f0bef 1885 ret = machine__process_mmap_event(machine, event, sample); break;
f3b3614a
HB
1886 case PERF_RECORD_NAMESPACES:
1887 ret = machine__process_namespaces_event(machine, event, sample); break;
5c5e854b 1888 case PERF_RECORD_MMAP2:
162f0bef 1889 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1890 case PERF_RECORD_FORK:
162f0bef 1891 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1892 case PERF_RECORD_EXIT:
162f0bef 1893 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1894 case PERF_RECORD_LOST:
162f0bef 1895 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1896 case PERF_RECORD_AUX:
1897 ret = machine__process_aux_event(machine, event); break;
0ad21f68 1898 case PERF_RECORD_ITRACE_START:
ceb92913 1899 ret = machine__process_itrace_start_event(machine, event); break;
c4937a91
KL
1900 case PERF_RECORD_LOST_SAMPLES:
1901 ret = machine__process_lost_samples_event(machine, event, sample); break;
0286039f
AH
1902 case PERF_RECORD_SWITCH:
1903 case PERF_RECORD_SWITCH_CPU_WIDE:
1904 ret = machine__process_switch_event(machine, event); break;
9aa0bfa3
SL
1905 case PERF_RECORD_KSYMBOL:
1906 ret = machine__process_ksymbol(machine, event, sample); break;
45178a92 1907 case PERF_RECORD_BPF_EVENT:
3f604b5f 1908 ret = machine__process_bpf(machine, event, sample); break;
b0a7d1a0
ACM
1909 default:
1910 ret = -1;
1911 break;
1912 }
1913
1914 return ret;
1915}
3f067dca 1916
b21484f1 1917static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1918{
a7c3899c 1919 if (!regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1920 return 1;
3f067dca
ACM
1921 return 0;
1922}
1923
bb871a9c 1924static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1925 struct addr_map_symbol *ams,
1926 u64 ip)
1927{
1928 struct addr_location al;
3f067dca
ACM
1929
1930 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1931 /*
1932 * We cannot use the header.misc hint to determine whether a
1933 * branch stack address is user, kernel, guest, hypervisor.
1934 * Branches may straddle the kernel/user/hypervisor boundaries.
1935 * Thus, we have to try consecutively until we find a match
1936 * or else, the symbol is unknown
1937 */
26bd9331 1938 thread__find_cpumode_addr_location(thread, ip, &al);
3f067dca 1939
3f067dca
ACM
1940 ams->addr = ip;
1941 ams->al_addr = al.addr;
1942 ams->sym = al.sym;
1943 ams->map = al.map;
8780fb25 1944 ams->phys_addr = 0;
3f067dca
ACM
1945}
1946
bb871a9c 1947static void ip__resolve_data(struct thread *thread,
8780fb25
KL
1948 u8 m, struct addr_map_symbol *ams,
1949 u64 addr, u64 phys_addr)
98a3b32c
SE
1950{
1951 struct addr_location al;
1952
1953 memset(&al, 0, sizeof(al));
1954
117d3c24 1955 thread__find_symbol(thread, m, addr, &al);
06b2afc0 1956
98a3b32c
SE
1957 ams->addr = addr;
1958 ams->al_addr = al.addr;
1959 ams->sym = al.sym;
1960 ams->map = al.map;
8780fb25 1961 ams->phys_addr = phys_addr;
98a3b32c
SE
1962}
1963
e80faac0
ACM
1964struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1965 struct addr_location *al)
98a3b32c 1966{
9f87498f 1967 struct mem_info *mi = mem_info__new();
98a3b32c
SE
1968
1969 if (!mi)
1970 return NULL;
1971
bb871a9c 1972 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
8780fb25
KL
1973 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
1974 sample->addr, sample->phys_addr);
98a3b32c
SE
1975 mi->data_src.val = sample->data_src;
1976
1977 return mi;
1978}
1979
40a342cd
MW
1980static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
1981{
21ac9d54
MW
1982 char *srcline = NULL;
1983
40a342cd 1984 if (!map || callchain_param.key == CCKEY_FUNCTION)
21ac9d54
MW
1985 return srcline;
1986
1987 srcline = srcline__tree_find(&map->dso->srclines, ip);
1988 if (!srcline) {
1989 bool show_sym = false;
1990 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1991
1992 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
935f5a9d 1993 sym, show_sym, show_addr, ip);
21ac9d54
MW
1994 srcline__tree_insert(&map->dso->srclines, ip, srcline);
1995 }
40a342cd 1996
21ac9d54 1997 return srcline;
40a342cd
MW
1998}
1999
c4ee0625
JY
2000struct iterations {
2001 int nr_loop_iter;
2002 u64 cycles;
2003};
2004
37592b8a 2005static int add_callchain_ip(struct thread *thread,
91d7b2de 2006 struct callchain_cursor *cursor,
37592b8a
AK
2007 struct symbol **parent,
2008 struct addr_location *root_al,
73dbcd65 2009 u8 *cpumode,
410024db
JY
2010 u64 ip,
2011 bool branch,
2012 struct branch_flags *flags,
c4ee0625 2013 struct iterations *iter,
b851dd49 2014 u64 branch_from)
37592b8a
AK
2015{
2016 struct addr_location al;
c4ee0625
JY
2017 int nr_loop_iter = 0;
2018 u64 iter_cycles = 0;
40a342cd 2019 const char *srcline = NULL;
37592b8a
AK
2020
2021 al.filtered = 0;
2022 al.sym = NULL;
ceb8e626 2023 al.srcline = NULL;
73dbcd65 2024 if (!cpumode) {
26bd9331 2025 thread__find_cpumode_addr_location(thread, ip, &al);
73dbcd65 2026 } else {
2e77784b
KL
2027 if (ip >= PERF_CONTEXT_MAX) {
2028 switch (ip) {
2029 case PERF_CONTEXT_HV:
73dbcd65 2030 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
2031 break;
2032 case PERF_CONTEXT_KERNEL:
73dbcd65 2033 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
2034 break;
2035 case PERF_CONTEXT_USER:
73dbcd65 2036 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
2037 break;
2038 default:
2039 pr_debug("invalid callchain context: "
2040 "%"PRId64"\n", (s64) ip);
2041 /*
2042 * It seems the callchain is corrupted.
2043 * Discard all.
2044 */
91d7b2de 2045 callchain_cursor_reset(cursor);
2e77784b
KL
2046 return 1;
2047 }
2048 return 0;
2049 }
4546263d 2050 thread__find_symbol(thread, *cpumode, ip, &al);
2e77784b
KL
2051 }
2052
37592b8a 2053 if (al.sym != NULL) {
de7e6a7c 2054 if (perf_hpp_list.parent && !*parent &&
37592b8a
AK
2055 symbol__match_regex(al.sym, &parent_regex))
2056 *parent = al.sym;
2057 else if (have_ignore_callees && root_al &&
2058 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2059 /* Treat this symbol as the root,
2060 forgetting its callees. */
2061 *root_al = al;
91d7b2de 2062 callchain_cursor_reset(cursor);
37592b8a
AK
2063 }
2064 }
2065
b49a8fe5
NK
2066 if (symbol_conf.hide_unresolved && al.sym == NULL)
2067 return 0;
c4ee0625
JY
2068
2069 if (iter) {
2070 nr_loop_iter = iter->nr_loop_iter;
2071 iter_cycles = iter->cycles;
2072 }
2073
40a342cd 2074 srcline = callchain_srcline(al.map, al.sym, al.addr);
19610184 2075 return callchain_cursor_append(cursor, ip, al.map, al.sym,
c4ee0625 2076 branch, flags, nr_loop_iter,
40a342cd 2077 iter_cycles, branch_from, srcline);
37592b8a
AK
2078}
2079
644f2df2
ACM
2080struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2081 struct addr_location *al)
3f067dca 2082{
3f067dca 2083 unsigned int i;
644f2df2
ACM
2084 const struct branch_stack *bs = sample->branch_stack;
2085 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 2086
3f067dca
ACM
2087 if (!bi)
2088 return NULL;
2089
2090 for (i = 0; i < bs->nr; i++) {
bb871a9c
ACM
2091 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
2092 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
3f067dca
ACM
2093 bi[i].flags = bs->entries[i].flags;
2094 }
2095 return bi;
2096}
2097
c4ee0625
JY
2098static void save_iterations(struct iterations *iter,
2099 struct branch_entry *be, int nr)
2100{
2101 int i;
2102
a3366db0 2103 iter->nr_loop_iter++;
c4ee0625
JY
2104 iter->cycles = 0;
2105
2106 for (i = 0; i < nr; i++)
2107 iter->cycles += be[i].flags.cycles;
2108}
2109
8b7bad58
AK
2110#define CHASHSZ 127
2111#define CHASHBITS 7
2112#define NO_ENTRY 0xff
2113
2114#define PERF_MAX_BRANCH_DEPTH 127
2115
2116/* Remove loops. */
c4ee0625
JY
2117static int remove_loops(struct branch_entry *l, int nr,
2118 struct iterations *iter)
8b7bad58
AK
2119{
2120 int i, j, off;
2121 unsigned char chash[CHASHSZ];
2122
2123 memset(chash, NO_ENTRY, sizeof(chash));
2124
2125 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2126
2127 for (i = 0; i < nr; i++) {
2128 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2129
2130 /* no collision handling for now */
2131 if (chash[h] == NO_ENTRY) {
2132 chash[h] = i;
2133 } else if (l[chash[h]].from == l[i].from) {
2134 bool is_loop = true;
2135 /* check if it is a real loop */
2136 off = 0;
2137 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2138 if (l[j].from != l[i + off].from) {
2139 is_loop = false;
2140 break;
2141 }
2142 if (is_loop) {
c4ee0625
JY
2143 j = nr - (i + off);
2144 if (j > 0) {
2145 save_iterations(iter + i + off,
2146 l + i, off);
2147
2148 memmove(iter + i, iter + i + off,
2149 j * sizeof(*iter));
2150
2151 memmove(l + i, l + i + off,
2152 j * sizeof(*l));
2153 }
2154
8b7bad58
AK
2155 nr -= off;
2156 }
2157 }
2158 }
2159 return nr;
2160}
2161
384b6055
KL
2162/*
2163 * Recolve LBR callstack chain sample
2164 * Return:
2165 * 1 on success get LBR callchain information
2166 * 0 no available LBR callchain information, should try fp
2167 * negative error code on other errors.
2168 */
2169static int resolve_lbr_callchain_sample(struct thread *thread,
91d7b2de 2170 struct callchain_cursor *cursor,
384b6055
KL
2171 struct perf_sample *sample,
2172 struct symbol **parent,
2173 struct addr_location *root_al,
2174 int max_stack)
3f067dca 2175{
384b6055 2176 struct ip_callchain *chain = sample->callchain;
18ef15c6 2177 int chain_nr = min(max_stack, (int)chain->nr), i;
73dbcd65 2178 u8 cpumode = PERF_RECORD_MISC_USER;
b851dd49 2179 u64 ip, branch_from = 0;
384b6055
KL
2180
2181 for (i = 0; i < chain_nr; i++) {
2182 if (chain->ips[i] == PERF_CONTEXT_USER)
2183 break;
2184 }
2185
2186 /* LBR only affects the user callchain */
2187 if (i != chain_nr) {
2188 struct branch_stack *lbr_stack = sample->branch_stack;
410024db
JY
2189 int lbr_nr = lbr_stack->nr, j, k;
2190 bool branch;
2191 struct branch_flags *flags;
384b6055
KL
2192 /*
2193 * LBR callstack can only get user call chain.
2194 * The mix_chain_nr is kernel call chain
2195 * number plus LBR user call chain number.
2196 * i is kernel call chain number,
2197 * 1 is PERF_CONTEXT_USER,
2198 * lbr_nr + 1 is the user call chain number.
2199 * For details, please refer to the comments
2200 * in callchain__printf
2201 */
2202 int mix_chain_nr = i + 1 + lbr_nr + 1;
2203
384b6055 2204 for (j = 0; j < mix_chain_nr; j++) {
18ef15c6 2205 int err;
410024db
JY
2206 branch = false;
2207 flags = NULL;
2208
384b6055
KL
2209 if (callchain_param.order == ORDER_CALLEE) {
2210 if (j < i + 1)
2211 ip = chain->ips[j];
410024db
JY
2212 else if (j > i + 1) {
2213 k = j - i - 2;
2214 ip = lbr_stack->entries[k].from;
2215 branch = true;
2216 flags = &lbr_stack->entries[k].flags;
2217 } else {
384b6055 2218 ip = lbr_stack->entries[0].to;
410024db
JY
2219 branch = true;
2220 flags = &lbr_stack->entries[0].flags;
b851dd49
JY
2221 branch_from =
2222 lbr_stack->entries[0].from;
410024db 2223 }
384b6055 2224 } else {
410024db
JY
2225 if (j < lbr_nr) {
2226 k = lbr_nr - j - 1;
2227 ip = lbr_stack->entries[k].from;
2228 branch = true;
2229 flags = &lbr_stack->entries[k].flags;
2230 }
384b6055
KL
2231 else if (j > lbr_nr)
2232 ip = chain->ips[i + 1 - (j - lbr_nr)];
410024db 2233 else {
384b6055 2234 ip = lbr_stack->entries[0].to;
410024db
JY
2235 branch = true;
2236 flags = &lbr_stack->entries[0].flags;
b851dd49
JY
2237 branch_from =
2238 lbr_stack->entries[0].from;
410024db 2239 }
384b6055
KL
2240 }
2241
410024db
JY
2242 err = add_callchain_ip(thread, cursor, parent,
2243 root_al, &cpumode, ip,
c4ee0625 2244 branch, flags, NULL,
b851dd49 2245 branch_from);
384b6055
KL
2246 if (err)
2247 return (err < 0) ? err : 0;
2248 }
2249 return 1;
2250 }
2251
2252 return 0;
2253}
2254
e9024d51
DM
2255static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2256 struct callchain_cursor *cursor,
2257 struct symbol **parent,
2258 struct addr_location *root_al,
2259 u8 *cpumode, int ent)
2260{
2261 int err = 0;
2262
2263 while (--ent >= 0) {
2264 u64 ip = chain->ips[ent];
2265
2266 if (ip >= PERF_CONTEXT_MAX) {
2267 err = add_callchain_ip(thread, cursor, parent,
2268 root_al, cpumode, ip,
2269 false, NULL, NULL, 0);
2270 break;
2271 }
2272 }
2273 return err;
2274}
2275
384b6055 2276static int thread__resolve_callchain_sample(struct thread *thread,
91d7b2de 2277 struct callchain_cursor *cursor,
32dcd021 2278 struct evsel *evsel,
384b6055
KL
2279 struct perf_sample *sample,
2280 struct symbol **parent,
2281 struct addr_location *root_al,
2282 int max_stack)
2283{
2284 struct branch_stack *branch = sample->branch_stack;
2285 struct ip_callchain *chain = sample->callchain;
b49a821e 2286 int chain_nr = 0;
73dbcd65 2287 u8 cpumode = PERF_RECORD_MISC_USER;
bf8bddbf 2288 int i, j, err, nr_entries;
8b7bad58
AK
2289 int skip_idx = -1;
2290 int first_call = 0;
2291
b49a821e
JY
2292 if (chain)
2293 chain_nr = chain->nr;
2294
acf2abbd 2295 if (perf_evsel__has_branch_callstack(evsel)) {
91d7b2de 2296 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
384b6055
KL
2297 root_al, max_stack);
2298 if (err)
2299 return (err < 0) ? err : 0;
2300 }
2301
8b7bad58
AK
2302 /*
2303 * Based on DWARF debug information, some architectures skip
2304 * a callchain entry saved by the kernel.
2305 */
bf8bddbf 2306 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 2307
8b7bad58
AK
2308 /*
2309 * Add branches to call stack for easier browsing. This gives
2310 * more context for a sample than just the callers.
2311 *
2312 * This uses individual histograms of paths compared to the
2313 * aggregated histograms the normal LBR mode uses.
2314 *
2315 * Limitations for now:
2316 * - No extra filters
2317 * - No annotations (should annotate somehow)
2318 */
2319
2320 if (branch && callchain_param.branch_callstack) {
2321 int nr = min(max_stack, (int)branch->nr);
2322 struct branch_entry be[nr];
c4ee0625 2323 struct iterations iter[nr];
8b7bad58
AK
2324
2325 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2326 pr_warning("corrupted branch chain. skipping...\n");
2327 goto check_calls;
2328 }
2329
2330 for (i = 0; i < nr; i++) {
2331 if (callchain_param.order == ORDER_CALLEE) {
2332 be[i] = branch->entries[i];
b49a821e
JY
2333
2334 if (chain == NULL)
2335 continue;
2336
8b7bad58
AK
2337 /*
2338 * Check for overlap into the callchain.
2339 * The return address is one off compared to
2340 * the branch entry. To adjust for this
2341 * assume the calling instruction is not longer
2342 * than 8 bytes.
2343 */
2344 if (i == skip_idx ||
2345 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2346 first_call++;
2347 else if (be[i].from < chain->ips[first_call] &&
2348 be[i].from >= chain->ips[first_call] - 8)
2349 first_call++;
2350 } else
2351 be[i] = branch->entries[branch->nr - i - 1];
2352 }
2353
c4ee0625
JY
2354 memset(iter, 0, sizeof(struct iterations) * nr);
2355 nr = remove_loops(be, nr, iter);
410024db 2356
8b7bad58 2357 for (i = 0; i < nr; i++) {
c4ee0625
JY
2358 err = add_callchain_ip(thread, cursor, parent,
2359 root_al,
2360 NULL, be[i].to,
2361 true, &be[i].flags,
2362 NULL, be[i].from);
410024db 2363
8b7bad58 2364 if (!err)
91d7b2de 2365 err = add_callchain_ip(thread, cursor, parent, root_al,
410024db
JY
2366 NULL, be[i].from,
2367 true, &be[i].flags,
c4ee0625 2368 &iter[i], 0);
8b7bad58
AK
2369 if (err == -EINVAL)
2370 break;
2371 if (err)
2372 return err;
2373 }
b49a821e
JY
2374
2375 if (chain_nr == 0)
2376 return 0;
2377
8b7bad58
AK
2378 chain_nr -= nr;
2379 }
2380
2381check_calls:
169780ef 2382 if (chain && callchain_param.order != ORDER_CALLEE) {
e9024d51
DM
2383 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2384 &cpumode, chain->nr - first_call);
2385 if (err)
2386 return (err < 0) ? err : 0;
2387 }
bf8bddbf 2388 for (i = first_call, nr_entries = 0;
a29d5c9b 2389 i < chain_nr && nr_entries < max_stack; i++) {
3f067dca 2390 u64 ip;
3f067dca
ACM
2391
2392 if (callchain_param.order == ORDER_CALLEE)
a60335ba 2393 j = i;
3f067dca 2394 else
a60335ba
SB
2395 j = chain->nr - i - 1;
2396
2397#ifdef HAVE_SKIP_CALLCHAIN_IDX
2398 if (j == skip_idx)
2399 continue;
2400#endif
2401 ip = chain->ips[j];
bf8bddbf
ACM
2402 if (ip < PERF_CONTEXT_MAX)
2403 ++nr_entries;
e9024d51
DM
2404 else if (callchain_param.order != ORDER_CALLEE) {
2405 err = find_prev_cpumode(chain, thread, cursor, parent,
2406 root_al, &cpumode, j);
2407 if (err)
2408 return (err < 0) ? err : 0;
2409 continue;
2410 }
a29d5c9b 2411
410024db
JY
2412 err = add_callchain_ip(thread, cursor, parent,
2413 root_al, &cpumode, ip,
c4ee0625 2414 false, NULL, NULL, 0);
3f067dca 2415
3f067dca 2416 if (err)
2e77784b 2417 return (err < 0) ? err : 0;
3f067dca
ACM
2418 }
2419
2420 return 0;
2421}
2422
11ea2515
MW
2423static int append_inlines(struct callchain_cursor *cursor,
2424 struct map *map, struct symbol *sym, u64 ip)
2425{
2426 struct inline_node *inline_node;
2427 struct inline_list *ilist;
2428 u64 addr;
b38775cf 2429 int ret = 1;
11ea2515
MW
2430
2431 if (!symbol_conf.inline_name || !map || !sym)
b38775cf 2432 return ret;
11ea2515 2433
7a8a8fcf
MW
2434 addr = map__map_ip(map, ip);
2435 addr = map__rip_2objdump(map, addr);
11ea2515
MW
2436
2437 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2438 if (!inline_node) {
2439 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2440 if (!inline_node)
b38775cf 2441 return ret;
11ea2515
MW
2442 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2443 }
2444
2445 list_for_each_entry(ilist, &inline_node->val, list) {
b38775cf
MW
2446 ret = callchain_cursor_append(cursor, ip, map,
2447 ilist->symbol, false,
2448 NULL, 0, 0, 0, ilist->srcline);
11ea2515
MW
2449
2450 if (ret != 0)
2451 return ret;
2452 }
2453
b38775cf 2454 return ret;
11ea2515
MW
2455}
2456
3f067dca
ACM
2457static int unwind_entry(struct unwind_entry *entry, void *arg)
2458{
2459 struct callchain_cursor *cursor = arg;
40a342cd 2460 const char *srcline = NULL;
ff4ce288 2461 u64 addr = entry->ip;
b49a8fe5
NK
2462
2463 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2464 return 0;
40a342cd 2465
11ea2515
MW
2466 if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
2467 return 0;
2468
2a9d5050
SD
2469 /*
2470 * Convert entry->ip from a virtual address to an offset in
2471 * its corresponding binary.
2472 */
ff4ce288
MW
2473 if (entry->map)
2474 addr = map__map_ip(entry->map, entry->ip);
2a9d5050
SD
2475
2476 srcline = callchain_srcline(entry->map, entry->sym, addr);
3f067dca 2477 return callchain_cursor_append(cursor, entry->ip,
410024db 2478 entry->map, entry->sym,
40a342cd 2479 false, NULL, 0, 0, 0, srcline);
3f067dca
ACM
2480}
2481
9919a65e
CP
2482static int thread__resolve_callchain_unwind(struct thread *thread,
2483 struct callchain_cursor *cursor,
32dcd021 2484 struct evsel *evsel,
9919a65e
CP
2485 struct perf_sample *sample,
2486 int max_stack)
3f067dca 2487{
3f067dca 2488 /* Can we do dwarf post unwind? */
1fc632ce
JO
2489 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2490 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3f067dca
ACM
2491 return 0;
2492
2493 /* Bail out if nothing was captured. */
2494 if ((!sample->user_regs.regs) ||
2495 (!sample->user_stack.size))
2496 return 0;
2497
91d7b2de 2498 return unwind__get_entries(unwind_entry, cursor,
352ea45a 2499 thread, sample, max_stack);
9919a65e 2500}
3f067dca 2501
9919a65e
CP
2502int thread__resolve_callchain(struct thread *thread,
2503 struct callchain_cursor *cursor,
32dcd021 2504 struct evsel *evsel,
9919a65e
CP
2505 struct perf_sample *sample,
2506 struct symbol **parent,
2507 struct addr_location *root_al,
2508 int max_stack)
2509{
2510 int ret = 0;
2511
914eb9ca 2512 callchain_cursor_reset(cursor);
9919a65e
CP
2513
2514 if (callchain_param.order == ORDER_CALLEE) {
2515 ret = thread__resolve_callchain_sample(thread, cursor,
2516 evsel, sample,
2517 parent, root_al,
2518 max_stack);
2519 if (ret)
2520 return ret;
2521 ret = thread__resolve_callchain_unwind(thread, cursor,
2522 evsel, sample,
2523 max_stack);
2524 } else {
2525 ret = thread__resolve_callchain_unwind(thread, cursor,
2526 evsel, sample,
2527 max_stack);
2528 if (ret)
2529 return ret;
2530 ret = thread__resolve_callchain_sample(thread, cursor,
2531 evsel, sample,
2532 parent, root_al,
2533 max_stack);
2534 }
2535
2536 return ret;
3f067dca 2537}
35feee19
DA
2538
2539int machine__for_each_thread(struct machine *machine,
2540 int (*fn)(struct thread *thread, void *p),
2541 void *priv)
2542{
91e467bc 2543 struct threads *threads;
35feee19
DA
2544 struct rb_node *nd;
2545 struct thread *thread;
2546 int rc = 0;
91e467bc 2547 int i;
35feee19 2548
91e467bc
KL
2549 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2550 threads = &machine->threads[i];
f3acb3a8
DB
2551 for (nd = rb_first_cached(&threads->entries); nd;
2552 nd = rb_next(nd)) {
91e467bc
KL
2553 thread = rb_entry(nd, struct thread, rb_node);
2554 rc = fn(thread, priv);
2555 if (rc != 0)
2556 return rc;
2557 }
35feee19 2558
91e467bc
KL
2559 list_for_each_entry(thread, &threads->dead, node) {
2560 rc = fn(thread, priv);
2561 if (rc != 0)
2562 return rc;
2563 }
35feee19
DA
2564 }
2565 return rc;
2566}
58d925dc 2567
a5499b37
AH
2568int machines__for_each_thread(struct machines *machines,
2569 int (*fn)(struct thread *thread, void *p),
2570 void *priv)
2571{
2572 struct rb_node *nd;
2573 int rc = 0;
2574
2575 rc = machine__for_each_thread(&machines->host, fn, priv);
2576 if (rc != 0)
2577 return rc;
2578
f3acb3a8 2579 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
a5499b37
AH
2580 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2581
2582 rc = machine__for_each_thread(machine, fn, priv);
2583 if (rc != 0)
2584 return rc;
2585 }
2586 return rc;
2587}
2588
b9d266ba
AH
2589pid_t machine__get_current_tid(struct machine *machine, int cpu)
2590{
0a3c5537 2591 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
8c727469
KM
2592
2593 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
b9d266ba
AH
2594 return -1;
2595
2596 return machine->current_tid[cpu];
2597}
2598
2599int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2600 pid_t tid)
2601{
2602 struct thread *thread;
0a3c5537 2603 int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
b9d266ba
AH
2604
2605 if (cpu < 0)
2606 return -EINVAL;
2607
2608 if (!machine->current_tid) {
2609 int i;
2610
8c727469 2611 machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
b9d266ba
AH
2612 if (!machine->current_tid)
2613 return -ENOMEM;
8c727469 2614 for (i = 0; i < nr_cpus; i++)
b9d266ba
AH
2615 machine->current_tid[i] = -1;
2616 }
2617
8c727469 2618 if (cpu >= nr_cpus) {
b9d266ba
AH
2619 pr_err("Requested CPU %d too large. ", cpu);
2620 pr_err("Consider raising MAX_NR_CPUS\n");
2621 return -EINVAL;
2622 }
2623
2624 machine->current_tid[cpu] = tid;
2625
2626 thread = machine__findnew_thread(machine, pid, tid);
2627 if (!thread)
2628 return -ENOMEM;
2629
2630 thread->cpu = cpu;
b91fc39f 2631 thread__put(thread);
b9d266ba
AH
2632
2633 return 0;
2634}
fbe2af45 2635
dbbd34a6
AH
2636/*
2637 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2638 * normalized arch is needed.
2639 */
2640bool machine__is(struct machine *machine, const char *arch)
2641{
2642 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
2643}
2644
9cecca32
AH
2645int machine__nr_cpus_avail(struct machine *machine)
2646{
2647 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
2648}
2649
fbe2af45
AH
2650int machine__get_kernel_start(struct machine *machine)
2651{
a5e813c6 2652 struct map *map = machine__kernel_map(machine);
fbe2af45
AH
2653 int err = 0;
2654
2655 /*
2656 * The only addresses above 2^63 are kernel addresses of a 64-bit
2657 * kernel. Note that addresses are unsigned so that on a 32-bit system
2658 * all addresses including kernel addresses are less than 2^32. In
2659 * that case (32-bit system), if the kernel mapping is unknown, all
2660 * addresses will be assumed to be in user space - see
2661 * machine__kernel_ip().
2662 */
2663 machine->kernel_start = 1ULL << 63;
2664 if (map) {
be39db9f 2665 err = map__load(map);
19422a9f
AH
2666 /*
2667 * On x86_64, PTI entry trampolines are less than the
2668 * start of kernel text, but still above 2^63. So leave
2669 * kernel_start = 1ULL << 63 for x86_64.
2670 */
2671 if (!err && !machine__is(machine, "x86_64"))
fbe2af45
AH
2672 machine->kernel_start = map->start;
2673 }
2674 return err;
2675}
aa7cc2ae 2676
8e80ad99
AH
2677u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
2678{
2679 u8 addr_cpumode = cpumode;
2680 bool kernel_ip;
2681
2682 if (!machine->single_address_space)
2683 goto out;
2684
2685 kernel_ip = machine__kernel_ip(machine, addr);
2686 switch (cpumode) {
2687 case PERF_RECORD_MISC_KERNEL:
2688 case PERF_RECORD_MISC_USER:
2689 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
2690 PERF_RECORD_MISC_USER;
2691 break;
2692 case PERF_RECORD_MISC_GUEST_KERNEL:
2693 case PERF_RECORD_MISC_GUEST_USER:
2694 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
2695 PERF_RECORD_MISC_GUEST_USER;
2696 break;
2697 default:
2698 break;
2699 }
2700out:
2701 return addr_cpumode;
2702}
2703
aa7cc2ae
ACM
2704struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2705{
e8807844 2706 return dsos__findnew(&machine->dsos, filename);
aa7cc2ae 2707}
c3168b0d
ACM
2708
2709char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2710{
2711 struct machine *machine = vmachine;
2712 struct map *map;
107cad95 2713 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
c3168b0d
ACM
2714
2715 if (sym == NULL)
2716 return NULL;
2717
2718 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2719 *addrp = map->unmap_ip(map, sym->start);
2720 return sym->name;
2721}