]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - tools/perf/util/machine.c
Merge remote-tracking branch 'torvalds/master' into perf/core
[mirror_ubuntu-jammy-kernel.git] / tools / perf / util / machine.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
76b31a29 2#include <dirent.h>
a43783ae 3#include <errno.h>
fd20e811 4#include <inttypes.h>
1eae20c1 5#include <regex.h>
f2a39fe8 6#include <stdlib.h>
3f067dca 7#include "callchain.h"
b0a7d1a0 8#include "debug.h"
4a3cec84 9#include "dso.h"
f2a39fe8 10#include "env.h"
b0a7d1a0 11#include "event.h"
3f067dca
ACM
12#include "evsel.h"
13#include "hist.h"
9d2f8e22
ACM
14#include "machine.h"
15#include "map.h"
d3300a3c
ACM
16#include "map_symbol.h"
17#include "branch.h"
18#include "mem-events.h"
97b9d866 19#include "srcline.h"
daecf9e0 20#include "symbol.h"
3f067dca 21#include "sort.h"
69d2591a 22#include "strlist.h"
aeb00b1a 23#include "target.h"
9d2f8e22 24#include "thread.h"
97b9d866 25#include "util.h"
d027b640 26#include "vdso.h"
9d2f8e22 27#include <stdbool.h>
7a8ef4c4
ACM
28#include <sys/types.h>
29#include <sys/stat.h>
30#include <unistd.h>
3f067dca 31#include "unwind.h"
8b7bad58 32#include "linux/hash.h"
f3b3614a 33#include "asm/bug.h"
45178a92 34#include "bpf-event.h"
20f2be1d 35#include <internal/lib.h> // page_size
d1277aa3 36#include "cgroup.h"
9d2f8e22 37
3052ba56 38#include <linux/ctype.h>
3d689ed6 39#include <symbol/kallsyms.h>
0f476f2b 40#include <linux/mman.h>
8520a98d 41#include <linux/string.h>
7f7c536f 42#include <linux/zalloc.h>
3d689ed6 43
b91fc39f
ACM
44static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
45
93730f85
ACM
46static struct dso *machine__kernel_dso(struct machine *machine)
47{
48 return machine->vmlinux_map->dso;
49}
50
e167f995
ACM
51static void dsos__init(struct dsos *dsos)
52{
53 INIT_LIST_HEAD(&dsos->head);
54 dsos->root = RB_ROOT;
0a7c74ea 55 init_rwsem(&dsos->lock);
e167f995
ACM
56}
57
91e467bc
KL
58static void machine__threads_init(struct machine *machine)
59{
60 int i;
61
62 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
63 struct threads *threads = &machine->threads[i];
f3acb3a8 64 threads->entries = RB_ROOT_CACHED;
0a7c74ea 65 init_rwsem(&threads->lock);
91e467bc
KL
66 threads->nr = 0;
67 INIT_LIST_HEAD(&threads->dead);
68 threads->last_match = NULL;
69 }
70}
71
8c7f1bb3
JO
72static int machine__set_mmap_name(struct machine *machine)
73{
c192524e
JO
74 if (machine__is_host(machine))
75 machine->mmap_name = strdup("[kernel.kallsyms]");
76 else if (machine__is_default_guest(machine))
77 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
78 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
79 machine->pid) < 0)
80 machine->mmap_name = NULL;
8c7f1bb3
JO
81
82 return machine->mmap_name ? 0 : -ENOMEM;
83}
84
69d2591a
ACM
85int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
86{
81f981d7
JO
87 int err = -ENOMEM;
88
93b0ba3c 89 memset(machine, 0, sizeof(*machine));
79b6bb73 90 maps__init(&machine->kmaps, machine);
69d2591a 91 RB_CLEAR_NODE(&machine->rb_node);
3d39ac53 92 dsos__init(&machine->dsos);
69d2591a 93
91e467bc 94 machine__threads_init(machine);
69d2591a 95
d027b640 96 machine->vdso_info = NULL;
4cde998d 97 machine->env = NULL;
d027b640 98
69d2591a
ACM
99 machine->pid = pid;
100
14bd6d20 101 machine->id_hdr_size = 0;
caf8a0d0 102 machine->kptr_restrict_warned = false;
cfe1c414 103 machine->comm_exec = false;
fbe2af45 104 machine->kernel_start = 0;
3183f8ca 105 machine->vmlinux_map = NULL;
cc1121ab 106
69d2591a
ACM
107 machine->root_dir = strdup(root_dir);
108 if (machine->root_dir == NULL)
109 return -ENOMEM;
110
8c7f1bb3
JO
111 if (machine__set_mmap_name(machine))
112 goto out;
113
69d2591a 114 if (pid != HOST_KERNEL_ID) {
1fcb8768 115 struct thread *thread = machine__findnew_thread(machine, -1,
314add6b 116 pid);
69d2591a
ACM
117 char comm[64];
118
119 if (thread == NULL)
81f981d7 120 goto out;
69d2591a
ACM
121
122 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
162f0bef 123 thread__set_comm(thread, comm, 0);
b91fc39f 124 thread__put(thread);
69d2591a
ACM
125 }
126
b9d266ba 127 machine->current_tid = NULL;
81f981d7 128 err = 0;
b9d266ba 129
81f981d7 130out:
8c7f1bb3 131 if (err) {
81f981d7 132 zfree(&machine->root_dir);
8c7f1bb3
JO
133 zfree(&machine->mmap_name);
134 }
69d2591a
ACM
135 return 0;
136}
137
8fb598e5
DA
138struct machine *machine__new_host(void)
139{
140 struct machine *machine = malloc(sizeof(*machine));
141
142 if (machine != NULL) {
143 machine__init(machine, "", HOST_KERNEL_ID);
144
145 if (machine__create_kernel_maps(machine) < 0)
146 goto out_delete;
147 }
148
149 return machine;
150out_delete:
151 free(machine);
152 return NULL;
153}
154
7d132caa
ACM
155struct machine *machine__new_kallsyms(void)
156{
157 struct machine *machine = machine__new_host();
158 /*
159 * FIXME:
adba1634 160 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
7d132caa
ACM
161 * ask for not using the kcore parsing code, once this one is fixed
162 * to create a map per module.
163 */
329f0ade 164 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
7d132caa
ACM
165 machine__delete(machine);
166 machine = NULL;
167 }
168
169 return machine;
170}
171
d3a7c489 172static void dsos__purge(struct dsos *dsos)
69d2591a
ACM
173{
174 struct dso *pos, *n;
175
0a7c74ea 176 down_write(&dsos->lock);
e8807844 177
8fa7d87f 178 list_for_each_entry_safe(pos, n, &dsos->head, node) {
4598a0a6 179 RB_CLEAR_NODE(&pos->rb_node);
e266a753 180 pos->root = NULL;
d3a7c489
ACM
181 list_del_init(&pos->node);
182 dso__put(pos);
69d2591a 183 }
e8807844 184
0a7c74ea 185 up_write(&dsos->lock);
d3a7c489 186}
e8807844 187
d3a7c489
ACM
188static void dsos__exit(struct dsos *dsos)
189{
190 dsos__purge(dsos);
0a7c74ea 191 exit_rwsem(&dsos->lock);
69d2591a
ACM
192}
193
3f067dca
ACM
194void machine__delete_threads(struct machine *machine)
195{
b91fc39f 196 struct rb_node *nd;
91e467bc 197 int i;
3f067dca 198
91e467bc
KL
199 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
200 struct threads *threads = &machine->threads[i];
0a7c74ea 201 down_write(&threads->lock);
f3acb3a8 202 nd = rb_first_cached(&threads->entries);
91e467bc
KL
203 while (nd) {
204 struct thread *t = rb_entry(nd, struct thread, rb_node);
3f067dca 205
91e467bc
KL
206 nd = rb_next(nd);
207 __machine__remove_thread(machine, t, false);
208 }
0a7c74ea 209 up_write(&threads->lock);
3f067dca
ACM
210 }
211}
212
69d2591a
ACM
213void machine__exit(struct machine *machine)
214{
91e467bc
KL
215 int i;
216
19993b82
ACM
217 if (machine == NULL)
218 return;
219
ebe9729c 220 machine__destroy_kernel_maps(machine);
79b6bb73 221 maps__exit(&machine->kmaps);
e8807844 222 dsos__exit(&machine->dsos);
9a4388c7 223 machine__exit_vdso(machine);
04662523 224 zfree(&machine->root_dir);
8c7f1bb3 225 zfree(&machine->mmap_name);
b9d266ba 226 zfree(&machine->current_tid);
91e467bc
KL
227
228 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
229 struct threads *threads = &machine->threads[i];
4c00af0e
ACM
230 struct thread *thread, *n;
231 /*
232 * Forget about the dead, at this point whatever threads were
233 * left in the dead lists better have a reference count taken
234 * by who is using them, and then, when they drop those references
235 * and it finally hits zero, thread__put() will check and see that
236 * its not in the dead threads list and will not try to remove it
237 * from there, just calling thread__delete() straight away.
238 */
239 list_for_each_entry_safe(thread, n, &threads->dead, node)
240 list_del_init(&thread->node);
241
0a7c74ea 242 exit_rwsem(&threads->lock);
91e467bc 243 }
69d2591a
ACM
244}
245
246void machine__delete(struct machine *machine)
247{
32ca678d
ACM
248 if (machine) {
249 machine__exit(machine);
250 free(machine);
251 }
69d2591a
ACM
252}
253
876650e6
ACM
254void machines__init(struct machines *machines)
255{
256 machine__init(&machines->host, "", HOST_KERNEL_ID);
f3acb3a8 257 machines->guests = RB_ROOT_CACHED;
876650e6
ACM
258}
259
260void machines__exit(struct machines *machines)
261{
262 machine__exit(&machines->host);
263 /* XXX exit guest */
264}
265
266struct machine *machines__add(struct machines *machines, pid_t pid,
69d2591a
ACM
267 const char *root_dir)
268{
f3acb3a8 269 struct rb_node **p = &machines->guests.rb_root.rb_node;
69d2591a
ACM
270 struct rb_node *parent = NULL;
271 struct machine *pos, *machine = malloc(sizeof(*machine));
f3acb3a8 272 bool leftmost = true;
69d2591a
ACM
273
274 if (machine == NULL)
275 return NULL;
276
277 if (machine__init(machine, root_dir, pid) != 0) {
278 free(machine);
279 return NULL;
280 }
281
282 while (*p != NULL) {
283 parent = *p;
284 pos = rb_entry(parent, struct machine, rb_node);
285 if (pid < pos->pid)
286 p = &(*p)->rb_left;
f3acb3a8 287 else {
69d2591a 288 p = &(*p)->rb_right;
f3acb3a8
DB
289 leftmost = false;
290 }
69d2591a
ACM
291 }
292
293 rb_link_node(&machine->rb_node, parent, p);
f3acb3a8 294 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
69d2591a
ACM
295
296 return machine;
297}
298
cfe1c414
AH
299void machines__set_comm_exec(struct machines *machines, bool comm_exec)
300{
301 struct rb_node *nd;
302
303 machines->host.comm_exec = comm_exec;
304
f3acb3a8 305 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
cfe1c414
AH
306 struct machine *machine = rb_entry(nd, struct machine, rb_node);
307
308 machine->comm_exec = comm_exec;
309 }
310}
311
876650e6 312struct machine *machines__find(struct machines *machines, pid_t pid)
69d2591a 313{
f3acb3a8 314 struct rb_node **p = &machines->guests.rb_root.rb_node;
69d2591a
ACM
315 struct rb_node *parent = NULL;
316 struct machine *machine;
317 struct machine *default_machine = NULL;
318
876650e6
ACM
319 if (pid == HOST_KERNEL_ID)
320 return &machines->host;
321
69d2591a
ACM
322 while (*p != NULL) {
323 parent = *p;
324 machine = rb_entry(parent, struct machine, rb_node);
325 if (pid < machine->pid)
326 p = &(*p)->rb_left;
327 else if (pid > machine->pid)
328 p = &(*p)->rb_right;
329 else
330 return machine;
331 if (!machine->pid)
332 default_machine = machine;
333 }
334
335 return default_machine;
336}
337
876650e6 338struct machine *machines__findnew(struct machines *machines, pid_t pid)
69d2591a
ACM
339{
340 char path[PATH_MAX];
341 const char *root_dir = "";
342 struct machine *machine = machines__find(machines, pid);
343
344 if (machine && (machine->pid == pid))
345 goto out;
346
347 if ((pid != HOST_KERNEL_ID) &&
348 (pid != DEFAULT_GUEST_KERNEL_ID) &&
349 (symbol_conf.guestmount)) {
350 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
351 if (access(path, R_OK)) {
352 static struct strlist *seen;
353
354 if (!seen)
4a77e218 355 seen = strlist__new(NULL, NULL);
69d2591a
ACM
356
357 if (!strlist__has_entry(seen, path)) {
358 pr_err("Can't access file %s\n", path);
359 strlist__add(seen, path);
360 }
361 machine = NULL;
362 goto out;
363 }
364 root_dir = path;
365 }
366
367 machine = machines__add(machines, pid, root_dir);
368out:
369 return machine;
370}
371
876650e6
ACM
372void machines__process_guests(struct machines *machines,
373 machine__process_t process, void *data)
69d2591a
ACM
374{
375 struct rb_node *nd;
376
f3acb3a8 377 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
69d2591a
ACM
378 struct machine *pos = rb_entry(nd, struct machine, rb_node);
379 process(pos, data);
380 }
381}
382
876650e6 383void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
69d2591a
ACM
384{
385 struct rb_node *node;
386 struct machine *machine;
387
876650e6
ACM
388 machines->host.id_hdr_size = id_hdr_size;
389
f3acb3a8
DB
390 for (node = rb_first_cached(&machines->guests); node;
391 node = rb_next(node)) {
69d2591a
ACM
392 machine = rb_entry(node, struct machine, rb_node);
393 machine->id_hdr_size = id_hdr_size;
394 }
395
396 return;
397}
398
29ce3612
AH
399static void machine__update_thread_pid(struct machine *machine,
400 struct thread *th, pid_t pid)
401{
402 struct thread *leader;
403
404 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
405 return;
406
407 th->pid_ = pid;
408
409 if (th->pid_ == th->tid)
410 return;
411
b91fc39f 412 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
29ce3612
AH
413 if (!leader)
414 goto out_err;
415
fe87797d
ACM
416 if (!leader->maps)
417 leader->maps = maps__new(machine);
29ce3612 418
fe87797d 419 if (!leader->maps)
29ce3612
AH
420 goto out_err;
421
fe87797d 422 if (th->maps == leader->maps)
29ce3612
AH
423 return;
424
fe87797d 425 if (th->maps) {
29ce3612
AH
426 /*
427 * Maps are created from MMAP events which provide the pid and
428 * tid. Consequently there never should be any maps on a thread
429 * with an unknown pid. Just print an error if there are.
430 */
fe87797d 431 if (!maps__empty(th->maps))
29ce3612
AH
432 pr_err("Discarding thread maps for %d:%d\n",
433 th->pid_, th->tid);
fe87797d 434 maps__put(th->maps);
29ce3612
AH
435 }
436
fe87797d 437 th->maps = maps__get(leader->maps);
abd82868
ACM
438out_put:
439 thread__put(leader);
29ce3612 440 return;
29ce3612
AH
441out_err:
442 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
abd82868 443 goto out_put;
29ce3612
AH
444}
445
abd82868 446/*
f8b2ebb5
JO
447 * Front-end cache - TID lookups come in blocks,
448 * so most of the time we dont have to look up
449 * the full rbtree:
abd82868 450 */
f8b2ebb5 451static struct thread*
b57334b9
JO
452__threads__get_last_match(struct threads *threads, struct machine *machine,
453 int pid, int tid)
9d2f8e22 454{
9d2f8e22
ACM
455 struct thread *th;
456
91e467bc 457 th = threads->last_match;
f3b623b8
ACM
458 if (th != NULL) {
459 if (th->tid == tid) {
460 machine__update_thread_pid(machine, th, pid);
abd82868 461 return thread__get(th);
f3b623b8
ACM
462 }
463
91e467bc 464 threads->last_match = NULL;
99d725fc 465 }
9d2f8e22 466
f8b2ebb5
JO
467 return NULL;
468}
469
b57334b9
JO
470static struct thread*
471threads__get_last_match(struct threads *threads, struct machine *machine,
472 int pid, int tid)
473{
474 struct thread *th = NULL;
475
476 if (perf_singlethreaded)
477 th = __threads__get_last_match(threads, machine, pid, tid);
478
479 return th;
480}
481
67fda0f3 482static void
b57334b9 483__threads__set_last_match(struct threads *threads, struct thread *th)
67fda0f3
JO
484{
485 threads->last_match = th;
486}
487
b57334b9
JO
488static void
489threads__set_last_match(struct threads *threads, struct thread *th)
490{
491 if (perf_singlethreaded)
492 __threads__set_last_match(threads, th);
493}
494
f8b2ebb5
JO
495/*
496 * Caller must eventually drop thread->refcnt returned with a successful
497 * lookup/new thread inserted.
498 */
499static struct thread *____machine__findnew_thread(struct machine *machine,
500 struct threads *threads,
501 pid_t pid, pid_t tid,
502 bool create)
503{
f3acb3a8 504 struct rb_node **p = &threads->entries.rb_root.rb_node;
f8b2ebb5
JO
505 struct rb_node *parent = NULL;
506 struct thread *th;
f3acb3a8 507 bool leftmost = true;
f8b2ebb5
JO
508
509 th = threads__get_last_match(threads, machine, pid, tid);
510 if (th)
511 return th;
512
9d2f8e22
ACM
513 while (*p != NULL) {
514 parent = *p;
515 th = rb_entry(parent, struct thread, rb_node);
516
38051234 517 if (th->tid == tid) {
67fda0f3 518 threads__set_last_match(threads, th);
29ce3612 519 machine__update_thread_pid(machine, th, pid);
abd82868 520 return thread__get(th);
9d2f8e22
ACM
521 }
522
38051234 523 if (tid < th->tid)
9d2f8e22 524 p = &(*p)->rb_left;
f3acb3a8 525 else {
9d2f8e22 526 p = &(*p)->rb_right;
f3acb3a8
DB
527 leftmost = false;
528 }
9d2f8e22
ACM
529 }
530
531 if (!create)
532 return NULL;
533
99d725fc 534 th = thread__new(pid, tid);
9d2f8e22
ACM
535 if (th != NULL) {
536 rb_link_node(&th->rb_node, parent, p);
f3acb3a8 537 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
cddcef60
JO
538
539 /*
79b6bb73 540 * We have to initialize maps separately after rb tree is updated.
cddcef60
JO
541 *
542 * The reason is that we call machine__findnew_thread
79b6bb73 543 * within thread__init_maps to find the thread
cddcef60
JO
544 * leader and that would screwed the rb tree.
545 */
79b6bb73 546 if (thread__init_maps(th, machine)) {
f3acb3a8 547 rb_erase_cached(&th->rb_node, &threads->entries);
b91fc39f 548 RB_CLEAR_NODE(&th->rb_node);
abd82868 549 thread__put(th);
cddcef60 550 return NULL;
418029b7 551 }
f3b623b8
ACM
552 /*
553 * It is now in the rbtree, get a ref
554 */
555 thread__get(th);
67fda0f3 556 threads__set_last_match(threads, th);
91e467bc 557 ++threads->nr;
9d2f8e22
ACM
558 }
559
560 return th;
561}
562
b91fc39f
ACM
563struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
564{
75e45e43 565 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
b91fc39f
ACM
566}
567
314add6b
AH
568struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
569 pid_t tid)
9d2f8e22 570{
91e467bc 571 struct threads *threads = machine__threads(machine, tid);
b91fc39f
ACM
572 struct thread *th;
573
0a7c74ea 574 down_write(&threads->lock);
abd82868 575 th = __machine__findnew_thread(machine, pid, tid);
0a7c74ea 576 up_write(&threads->lock);
b91fc39f 577 return th;
9d2f8e22
ACM
578}
579
d75e6097
JO
580struct thread *machine__find_thread(struct machine *machine, pid_t pid,
581 pid_t tid)
9d2f8e22 582{
91e467bc 583 struct threads *threads = machine__threads(machine, tid);
b91fc39f 584 struct thread *th;
91e467bc 585
0a7c74ea 586 down_read(&threads->lock);
75e45e43 587 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
0a7c74ea 588 up_read(&threads->lock);
b91fc39f 589 return th;
9d2f8e22 590}
b0a7d1a0 591
cfe1c414
AH
592struct comm *machine__thread_exec_comm(struct machine *machine,
593 struct thread *thread)
594{
595 if (machine->comm_exec)
596 return thread__exec_comm(thread);
597 else
598 return thread__comm(thread);
599}
600
162f0bef
FW
601int machine__process_comm_event(struct machine *machine, union perf_event *event,
602 struct perf_sample *sample)
b0a7d1a0 603{
314add6b
AH
604 struct thread *thread = machine__findnew_thread(machine,
605 event->comm.pid,
606 event->comm.tid);
65de51f9 607 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
b91fc39f 608 int err = 0;
b0a7d1a0 609
cfe1c414
AH
610 if (exec)
611 machine->comm_exec = true;
612
b0a7d1a0
ACM
613 if (dump_trace)
614 perf_event__fprintf_comm(event, stdout);
615
65de51f9
AH
616 if (thread == NULL ||
617 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
b0a7d1a0 618 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
b91fc39f 619 err = -1;
b0a7d1a0
ACM
620 }
621
b91fc39f
ACM
622 thread__put(thread);
623
624 return err;
b0a7d1a0
ACM
625}
626
f3b3614a
HB
627int machine__process_namespaces_event(struct machine *machine __maybe_unused,
628 union perf_event *event,
629 struct perf_sample *sample __maybe_unused)
630{
631 struct thread *thread = machine__findnew_thread(machine,
632 event->namespaces.pid,
633 event->namespaces.tid);
634 int err = 0;
635
636 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
637 "\nWARNING: kernel seems to support more namespaces than perf"
638 " tool.\nTry updating the perf tool..\n\n");
639
640 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
641 "\nWARNING: perf tool seems to support more namespaces than"
642 " the kernel.\nTry updating the kernel..\n\n");
643
644 if (dump_trace)
645 perf_event__fprintf_namespaces(event, stdout);
646
647 if (thread == NULL ||
648 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
649 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
650 err = -1;
651 }
652
653 thread__put(thread);
654
655 return err;
656}
657
d1277aa3 658int machine__process_cgroup_event(struct machine *machine,
ba78c1c5
NK
659 union perf_event *event,
660 struct perf_sample *sample __maybe_unused)
661{
d1277aa3
NK
662 struct cgroup *cgrp;
663
ba78c1c5
NK
664 if (dump_trace)
665 perf_event__fprintf_cgroup(event, stdout);
666
d1277aa3
NK
667 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
668 if (cgrp == NULL)
669 return -ENOMEM;
670
ba78c1c5
NK
671 return 0;
672}
673
b0a7d1a0 674int machine__process_lost_event(struct machine *machine __maybe_unused,
162f0bef 675 union perf_event *event, struct perf_sample *sample __maybe_unused)
b0a7d1a0 676{
5290ed69 677 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
b0a7d1a0
ACM
678 event->lost.id, event->lost.lost);
679 return 0;
680}
681
c4937a91
KL
682int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
683 union perf_event *event, struct perf_sample *sample)
684{
a2e254d8 685 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
c4937a91
KL
686 sample->id, event->lost_samples.lost);
687 return 0;
688}
689
9f2de315
ACM
690static struct dso *machine__findnew_module_dso(struct machine *machine,
691 struct kmod_path *m,
692 const char *filename)
da17ea33
JO
693{
694 struct dso *dso;
da17ea33 695
0a7c74ea 696 down_write(&machine->dsos.lock);
e8807844
ACM
697
698 dso = __dsos__find(&machine->dsos, m->name, true);
da17ea33 699 if (!dso) {
e8807844 700 dso = __dsos__addnew(&machine->dsos, m->name);
da17ea33 701 if (dso == NULL)
e8807844 702 goto out_unlock;
da17ea33 703
6b335e8f 704 dso__set_module_info(dso, m, machine);
ca33380a 705 dso__set_long_name(dso, strdup(filename), true);
1c695c88 706 dso->kernel = DSO_SPACE__KERNEL;
da17ea33
JO
707 }
708
d3a7c489 709 dso__get(dso);
e8807844 710out_unlock:
0a7c74ea 711 up_write(&machine->dsos.lock);
da17ea33
JO
712 return dso;
713}
714
4a96f7a0
AH
715int machine__process_aux_event(struct machine *machine __maybe_unused,
716 union perf_event *event)
717{
718 if (dump_trace)
719 perf_event__fprintf_aux(event, stdout);
720 return 0;
721}
722
0ad21f68
AH
723int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
724 union perf_event *event)
725{
726 if (dump_trace)
727 perf_event__fprintf_itrace_start(event, stdout);
728 return 0;
729}
730
0286039f
AH
731int machine__process_switch_event(struct machine *machine __maybe_unused,
732 union perf_event *event)
733{
734 if (dump_trace)
735 perf_event__fprintf_switch(event, stdout);
736 return 0;
737}
738
9aa0bfa3
SL
739static int machine__process_ksymbol_register(struct machine *machine,
740 union perf_event *event,
741 struct perf_sample *sample __maybe_unused)
742{
743 struct symbol *sym;
79b6bb73 744 struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
9aa0bfa3 745
9aa0bfa3 746 if (!map) {
4a4eb615
JO
747 struct dso *dso = dso__new(event->ksymbol.name);
748
749 if (dso) {
1c695c88 750 dso->kernel = DSO_SPACE__KERNEL;
4a4eb615
JO
751 map = map__new2(0, dso);
752 }
753
754 if (!dso || !map) {
755 dso__put(dso);
9aa0bfa3 756 return -ENOMEM;
4a4eb615 757 }
9aa0bfa3 758
789e2419
AH
759 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
760 map->dso->binary_type = DSO_BINARY_TYPE__OOL;
761 map->dso->data.file_size = event->ksymbol.len;
762 dso__set_loaded(map->dso);
763 }
764
ebdba16e
ACM
765 map->start = event->ksymbol.addr;
766 map->end = map->start + event->ksymbol.len;
79b6bb73 767 maps__insert(&machine->kmaps, map);
7eddf7e7 768 dso__set_loaded(dso);
3c29d448
JO
769
770 if (is_bpf_image(event->ksymbol.name)) {
771 dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
772 dso__set_long_name(dso, "", false);
773 }
9aa0bfa3
SL
774 }
775
8529f2e6 776 sym = symbol__new(map->map_ip(map, map->start),
ebdba16e
ACM
777 event->ksymbol.len,
778 0, 0, event->ksymbol.name);
9aa0bfa3
SL
779 if (!sym)
780 return -ENOMEM;
781 dso__insert_symbol(map->dso, sym);
782 return 0;
783}
784
785static int machine__process_ksymbol_unregister(struct machine *machine,
786 union perf_event *event,
787 struct perf_sample *sample __maybe_unused)
788{
789 struct map *map;
790
79b6bb73 791 map = maps__find(&machine->kmaps, event->ksymbol.addr);
9aa0bfa3 792 if (map)
79b6bb73 793 maps__remove(&machine->kmaps, map);
9aa0bfa3
SL
794
795 return 0;
796}
797
798int machine__process_ksymbol(struct machine *machine __maybe_unused,
799 union perf_event *event,
800 struct perf_sample *sample)
801{
802 if (dump_trace)
803 perf_event__fprintf_ksymbol(event, stdout);
804
ebdba16e 805 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
9aa0bfa3
SL
806 return machine__process_ksymbol_unregister(machine, event,
807 sample);
808 return machine__process_ksymbol_register(machine, event, sample);
809}
810
246eba8e
AH
811int machine__process_text_poke(struct machine *machine, union perf_event *event,
812 struct perf_sample *sample __maybe_unused)
813{
814 struct map *map = maps__find(&machine->kmaps, event->text_poke.addr);
815 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
816
817 if (dump_trace)
7eeb9855 818 perf_event__fprintf_text_poke(event, machine, stdout);
246eba8e
AH
819
820 if (!event->text_poke.new_len)
821 return 0;
822
823 if (cpumode != PERF_RECORD_MISC_KERNEL) {
824 pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
825 return 0;
826 }
827
828 if (map && map->dso) {
829 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
830 int ret;
831
832 /*
833 * Kernel maps might be changed when loading symbols so loading
834 * must be done prior to using kernel maps.
835 */
836 map__load(map);
837 ret = dso__data_write_cache_addr(map->dso, map, machine,
838 event->text_poke.addr,
839 new_bytes,
840 event->text_poke.new_len);
841 if (ret != event->text_poke.new_len)
842 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
843 event->text_poke.addr);
844 } else {
845 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
846 event->text_poke.addr);
847 }
848
849 return 0;
850}
851
a94ab91a
ACM
852static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
853 const char *filename)
3f067dca 854{
ca33380a 855 struct map *map = NULL;
ca33380a 856 struct kmod_path m;
a94ab91a 857 struct dso *dso;
3f067dca 858
ca33380a 859 if (kmod_path__parse_name(&m, filename))
3f067dca
ACM
860 return NULL;
861
9f2de315 862 dso = machine__findnew_module_dso(machine, &m, filename);
ca33380a
JO
863 if (dso == NULL)
864 goto out;
865
3183f8ca 866 map = map__new2(start, dso);
3f067dca 867 if (map == NULL)
ca33380a 868 goto out;
3f067dca 869
79b6bb73 870 maps__insert(&machine->kmaps, map);
ca33380a 871
79b6bb73 872 /* Put the map here because maps__insert alread got it */
9afcb420 873 map__put(map);
ca33380a 874out:
566c69c3
MH
875 /* put the dso here, corresponding to machine__findnew_module_dso */
876 dso__put(dso);
d8f9da24 877 zfree(&m.name);
3f067dca
ACM
878 return map;
879}
880
876650e6 881size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
3f067dca
ACM
882{
883 struct rb_node *nd;
3d39ac53 884 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
3f067dca 885
f3acb3a8 886 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca 887 struct machine *pos = rb_entry(nd, struct machine, rb_node);
3d39ac53 888 ret += __dsos__fprintf(&pos->dsos.head, fp);
3f067dca
ACM
889 }
890
891 return ret;
892}
893
8fa7d87f 894size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
3f067dca
ACM
895 bool (skip)(struct dso *dso, int parm), int parm)
896{
3d39ac53 897 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
3f067dca
ACM
898}
899
876650e6 900size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
3f067dca
ACM
901 bool (skip)(struct dso *dso, int parm), int parm)
902{
903 struct rb_node *nd;
876650e6 904 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
3f067dca 905
f3acb3a8 906 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3f067dca
ACM
907 struct machine *pos = rb_entry(nd, struct machine, rb_node);
908 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
909 }
910 return ret;
911}
912
913size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
914{
915 int i;
916 size_t printed = 0;
93730f85 917 struct dso *kdso = machine__kernel_dso(machine);
3f067dca
ACM
918
919 if (kdso->has_build_id) {
920 char filename[PATH_MAX];
d2396999
KJ
921 if (dso__build_id_filename(kdso, filename, sizeof(filename),
922 false))
3f067dca
ACM
923 printed += fprintf(fp, "[0] %s\n", filename);
924 }
925
926 for (i = 0; i < vmlinux_path__nr_entries; ++i)
927 printed += fprintf(fp, "[%d] %s\n",
928 i + kdso->has_build_id, vmlinux_path[i]);
929
930 return printed;
931}
932
933size_t machine__fprintf(struct machine *machine, FILE *fp)
934{
3f067dca 935 struct rb_node *nd;
91e467bc
KL
936 size_t ret;
937 int i;
3f067dca 938
91e467bc
KL
939 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
940 struct threads *threads = &machine->threads[i];
0a7c74ea
ACM
941
942 down_read(&threads->lock);
d2c11034 943
91e467bc 944 ret = fprintf(fp, "Threads: %u\n", threads->nr);
3f067dca 945
f3acb3a8
DB
946 for (nd = rb_first_cached(&threads->entries); nd;
947 nd = rb_next(nd)) {
91e467bc 948 struct thread *pos = rb_entry(nd, struct thread, rb_node);
3f067dca 949
91e467bc
KL
950 ret += thread__fprintf(pos, fp);
951 }
b91fc39f 952
0a7c74ea 953 up_read(&threads->lock);
91e467bc 954 }
3f067dca
ACM
955 return ret;
956}
957
958static struct dso *machine__get_kernel(struct machine *machine)
959{
8c7f1bb3 960 const char *vmlinux_name = machine->mmap_name;
3f067dca
ACM
961 struct dso *kernel;
962
963 if (machine__is_host(machine)) {
c192524e
JO
964 if (symbol_conf.vmlinux_name)
965 vmlinux_name = symbol_conf.vmlinux_name;
966
459ce518 967 kernel = machine__findnew_kernel(machine, vmlinux_name,
1c695c88 968 "[kernel]", DSO_SPACE__KERNEL);
3f067dca 969 } else {
c192524e
JO
970 if (symbol_conf.default_guest_vmlinux_name)
971 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
972
459ce518
ACM
973 kernel = machine__findnew_kernel(machine, vmlinux_name,
974 "[guest.kernel]",
1c695c88 975 DSO_SPACE__KERNEL_GUEST);
3f067dca
ACM
976 }
977
978 if (kernel != NULL && (!kernel->has_build_id))
979 dso__read_running_kernel_build_id(kernel, machine);
980
981 return kernel;
982}
983
984struct process_args {
985 u64 start;
986};
987
1c5aae77
AH
988void machine__get_kallsyms_filename(struct machine *machine, char *buf,
989 size_t bufsz)
15a0a870
AH
990{
991 if (machine__is_default_guest(machine))
992 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
993 else
994 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
995}
996
a93f0e55
SQ
997const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
998
999/* Figure out the start address of kernel map from /proc/kallsyms.
1000 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1001 * symbol_name if it's not that important.
1002 */
b843f62a 1003static int machine__get_running_kernel_start(struct machine *machine,
ed9adb20
JO
1004 const char **symbol_name,
1005 u64 *start, u64 *end)
3f067dca 1006{
15a0a870 1007 char filename[PATH_MAX];
b843f62a 1008 int i, err = -1;
a93f0e55
SQ
1009 const char *name;
1010 u64 addr = 0;
3f067dca 1011
15a0a870 1012 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
3f067dca
ACM
1013
1014 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1015 return 0;
1016
a93f0e55 1017 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
b843f62a
ACM
1018 err = kallsyms__get_function_start(filename, name, &addr);
1019 if (!err)
a93f0e55
SQ
1020 break;
1021 }
1022
b843f62a
ACM
1023 if (err)
1024 return -1;
1025
a93f0e55
SQ
1026 if (symbol_name)
1027 *symbol_name = name;
3f067dca 1028
b843f62a 1029 *start = addr;
ed9adb20
JO
1030
1031 err = kallsyms__get_function_start(filename, "_etext", &addr);
1032 if (!err)
1033 *end = addr;
1034
b843f62a 1035 return 0;
3f067dca
ACM
1036}
1037
1c5aae77
AH
1038int machine__create_extra_kernel_map(struct machine *machine,
1039 struct dso *kernel,
1040 struct extra_kernel_map *xm)
4d99e413
AH
1041{
1042 struct kmap *kmap;
1043 struct map *map;
1044
1045 map = map__new2(xm->start, kernel);
1046 if (!map)
1047 return -1;
1048
1049 map->end = xm->end;
1050 map->pgoff = xm->pgoff;
1051
1052 kmap = map__kmap(map);
1053
5759a682 1054 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
4d99e413 1055
79b6bb73 1056 maps__insert(&machine->kmaps, map);
4d99e413 1057
5759a682
AH
1058 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1059 kmap->name, map->start, map->end);
4d99e413
AH
1060
1061 map__put(map);
1062
1063 return 0;
1064}
1065
1066static u64 find_entry_trampoline(struct dso *dso)
1067{
1068 /* Duplicates are removed so lookup all aliases */
1069 const char *syms[] = {
1070 "_entry_trampoline",
1071 "__entry_trampoline_start",
1072 "entry_SYSCALL_64_trampoline",
1073 };
1074 struct symbol *sym = dso__first_symbol(dso);
1075 unsigned int i;
1076
1077 for (; sym; sym = dso__next_symbol(sym)) {
1078 if (sym->binding != STB_GLOBAL)
1079 continue;
1080 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1081 if (!strcmp(sym->name, syms[i]))
1082 return sym->start;
1083 }
1084 }
1085
1086 return 0;
1087}
1088
1089/*
1090 * These values can be used for kernels that do not have symbols for the entry
1091 * trampolines in kallsyms.
1092 */
1093#define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1094#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1095#define X86_64_ENTRY_TRAMPOLINE 0x6000
1096
1097/* Map x86_64 PTI entry trampolines */
1098int machine__map_x86_64_entry_trampolines(struct machine *machine,
1099 struct dso *kernel)
1100{
79b6bb73 1101 struct maps *kmaps = &machine->kmaps;
4d99e413 1102 int nr_cpus_avail, cpu;
1c5aae77
AH
1103 bool found = false;
1104 struct map *map;
1105 u64 pgoff;
1106
1107 /*
1108 * In the vmlinux case, pgoff is a virtual address which must now be
1109 * mapped to a vmlinux offset.
1110 */
79b6bb73 1111 maps__for_each_entry(kmaps, map) {
1c5aae77
AH
1112 struct kmap *kmap = __map__kmap(map);
1113 struct map *dest_map;
1114
1115 if (!kmap || !is_entry_trampoline(kmap->name))
1116 continue;
1117
79b6bb73 1118 dest_map = maps__find(kmaps, map->pgoff);
1c5aae77
AH
1119 if (dest_map != map)
1120 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1121 found = true;
1122 }
1123 if (found || machine->trampolines_mapped)
1124 return 0;
4d99e413 1125
1c5aae77 1126 pgoff = find_entry_trampoline(kernel);
4d99e413
AH
1127 if (!pgoff)
1128 return 0;
1129
1130 nr_cpus_avail = machine__nr_cpus_avail(machine);
1131
1132 /* Add a 1 page map for each CPU's entry trampoline */
1133 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1134 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1135 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1136 X86_64_ENTRY_TRAMPOLINE;
1137 struct extra_kernel_map xm = {
1138 .start = va,
1139 .end = va + page_size,
1140 .pgoff = pgoff,
1141 };
1142
5759a682
AH
1143 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1144
4d99e413
AH
1145 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1146 return -1;
1147 }
1148
1c5aae77
AH
1149 machine->trampolines_mapped = nr_cpus_avail;
1150
1151 return 0;
1152}
1153
1154int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1155 struct dso *kernel __maybe_unused)
1156{
4d99e413
AH
1157 return 0;
1158}
1159
1fb87b8e
JO
1160static int
1161__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
3f067dca 1162{
cc1121ab
MH
1163 /* In case of renewal the kernel map, destroy previous one */
1164 machine__destroy_kernel_maps(machine);
1165
3183f8ca
ACM
1166 machine->vmlinux_map = map__new2(0, kernel);
1167 if (machine->vmlinux_map == NULL)
1168 return -1;
3f067dca 1169
3183f8ca 1170 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
484214f4 1171 maps__insert(&machine->kmaps, machine->vmlinux_map);
3f067dca
ACM
1172 return 0;
1173}
1174
1175void machine__destroy_kernel_maps(struct machine *machine)
1176{
3183f8ca
ACM
1177 struct kmap *kmap;
1178 struct map *map = machine__kernel_map(machine);
3f067dca 1179
3183f8ca
ACM
1180 if (map == NULL)
1181 return;
3f067dca 1182
3183f8ca 1183 kmap = map__kmap(map);
79b6bb73 1184 maps__remove(&machine->kmaps, map);
3183f8ca
ACM
1185 if (kmap && kmap->ref_reloc_sym) {
1186 zfree((char **)&kmap->ref_reloc_sym->name);
1187 zfree(&kmap->ref_reloc_sym);
3f067dca 1188 }
3183f8ca
ACM
1189
1190 map__zput(machine->vmlinux_map);
3f067dca
ACM
1191}
1192
876650e6 1193int machines__create_guest_kernel_maps(struct machines *machines)
3f067dca
ACM
1194{
1195 int ret = 0;
1196 struct dirent **namelist = NULL;
1197 int i, items = 0;
1198 char path[PATH_MAX];
1199 pid_t pid;
1200 char *endp;
1201
1202 if (symbol_conf.default_guest_vmlinux_name ||
1203 symbol_conf.default_guest_modules ||
1204 symbol_conf.default_guest_kallsyms) {
1205 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1206 }
1207
1208 if (symbol_conf.guestmount) {
1209 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1210 if (items <= 0)
1211 return -ENOENT;
1212 for (i = 0; i < items; i++) {
1213 if (!isdigit(namelist[i]->d_name[0])) {
1214 /* Filter out . and .. */
1215 continue;
1216 }
1217 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1218 if ((*endp != '\0') ||
1219 (endp == namelist[i]->d_name) ||
1220 (errno == ERANGE)) {
1221 pr_debug("invalid directory (%s). Skipping.\n",
1222 namelist[i]->d_name);
1223 continue;
1224 }
1225 sprintf(path, "%s/%s/proc/kallsyms",
1226 symbol_conf.guestmount,
1227 namelist[i]->d_name);
1228 ret = access(path, R_OK);
1229 if (ret) {
1230 pr_debug("Can't access file %s\n", path);
1231 goto failure;
1232 }
1233 machines__create_kernel_maps(machines, pid);
1234 }
1235failure:
1236 free(namelist);
1237 }
1238
1239 return ret;
1240}
1241
876650e6 1242void machines__destroy_kernel_maps(struct machines *machines)
3f067dca 1243{
f3acb3a8 1244 struct rb_node *next = rb_first_cached(&machines->guests);
876650e6
ACM
1245
1246 machine__destroy_kernel_maps(&machines->host);
3f067dca
ACM
1247
1248 while (next) {
1249 struct machine *pos = rb_entry(next, struct machine, rb_node);
1250
1251 next = rb_next(&pos->rb_node);
f3acb3a8 1252 rb_erase_cached(&pos->rb_node, &machines->guests);
3f067dca
ACM
1253 machine__delete(pos);
1254 }
1255}
1256
876650e6 1257int machines__create_kernel_maps(struct machines *machines, pid_t pid)
3f067dca
ACM
1258{
1259 struct machine *machine = machines__findnew(machines, pid);
1260
1261 if (machine == NULL)
1262 return -1;
1263
1264 return machine__create_kernel_maps(machine);
1265}
1266
3183f8ca 1267int machine__load_kallsyms(struct machine *machine, const char *filename)
3f067dca 1268{
a5e813c6 1269 struct map *map = machine__kernel_map(machine);
e8f3879f 1270 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
3f067dca
ACM
1271
1272 if (ret > 0) {
3183f8ca 1273 dso__set_loaded(map->dso);
3f067dca
ACM
1274 /*
1275 * Since /proc/kallsyms will have multiple sessions for the
1276 * kernel, with modules between them, fixup the end of all
1277 * sections.
1278 */
79b6bb73 1279 maps__fixup_end(&machine->kmaps);
3f067dca
ACM
1280 }
1281
1282 return ret;
1283}
1284
1d1a2654 1285int machine__load_vmlinux_path(struct machine *machine)
3f067dca 1286{
a5e813c6 1287 struct map *map = machine__kernel_map(machine);
be39db9f 1288 int ret = dso__load_vmlinux_path(map->dso, map);
3f067dca 1289
39b12f78 1290 if (ret > 0)
3183f8ca 1291 dso__set_loaded(map->dso);
3f067dca
ACM
1292
1293 return ret;
1294}
1295
3f067dca
ACM
1296static char *get_kernel_version(const char *root_dir)
1297{
1298 char version[PATH_MAX];
1299 FILE *file;
1300 char *name, *tmp;
1301 const char *prefix = "Linux version ";
1302
1303 sprintf(version, "%s/proc/version", root_dir);
1304 file = fopen(version, "r");
1305 if (!file)
1306 return NULL;
1307
3f067dca
ACM
1308 tmp = fgets(version, sizeof(version), file);
1309 fclose(file);
34b65aff
DY
1310 if (!tmp)
1311 return NULL;
3f067dca
ACM
1312
1313 name = strstr(version, prefix);
1314 if (!name)
1315 return NULL;
1316 name += strlen(prefix);
1317 tmp = strchr(name, ' ');
1318 if (tmp)
1319 *tmp = '\0';
1320
1321 return strdup(name);
1322}
1323
bb58a8a4
JO
1324static bool is_kmod_dso(struct dso *dso)
1325{
1326 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1327 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1328}
1329
9a29ceee 1330static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
bb58a8a4 1331{
bb58a8a4 1332 char *long_name;
9a29ceee 1333 struct map *map = maps__find_by_name(maps, m->name);
bb58a8a4 1334
bb58a8a4
JO
1335 if (map == NULL)
1336 return 0;
1337
1338 long_name = strdup(path);
1339 if (long_name == NULL)
1340 return -ENOMEM;
1341
1342 dso__set_long_name(map->dso, long_name, true);
1343 dso__kernel_module_get_build_id(map->dso, "");
1344
1345 /*
1346 * Full name could reveal us kmod compression, so
1347 * we need to update the symtab_type if needed.
1348 */
2af52475 1349 if (m->comp && is_kmod_dso(map->dso)) {
bb58a8a4 1350 map->dso->symtab_type++;
2af52475
JO
1351 map->dso->comp = m->comp;
1352 }
bb58a8a4
JO
1353
1354 return 0;
1355}
1356
9a29ceee 1357static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
3f067dca
ACM
1358{
1359 struct dirent *dent;
1360 DIR *dir = opendir(dir_name);
1361 int ret = 0;
1362
1363 if (!dir) {
1364 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1365 return -1;
1366 }
1367
1368 while ((dent = readdir(dir)) != NULL) {
1369 char path[PATH_MAX];
1370 struct stat st;
1371
1372 /*sshfs might return bad dent->d_type, so we have to stat*/
1373 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1374 if (stat(path, &st))
1375 continue;
1376
1377 if (S_ISDIR(st.st_mode)) {
1378 if (!strcmp(dent->d_name, ".") ||
1379 !strcmp(dent->d_name, ".."))
1380 continue;
1381
61d4290c
RY
1382 /* Do not follow top-level source and build symlinks */
1383 if (depth == 0) {
1384 if (!strcmp(dent->d_name, "source") ||
1385 !strcmp(dent->d_name, "build"))
1386 continue;
1387 }
1388
9a29ceee 1389 ret = maps__set_modules_path_dir(maps, path, depth + 1);
3f067dca
ACM
1390 if (ret < 0)
1391 goto out;
1392 } else {
bb58a8a4 1393 struct kmod_path m;
3f067dca 1394
bb58a8a4
JO
1395 ret = kmod_path__parse_name(&m, dent->d_name);
1396 if (ret)
1397 goto out;
c00c48fc 1398
bb58a8a4 1399 if (m.kmod)
9a29ceee 1400 ret = maps__set_module_path(maps, path, &m);
c00c48fc 1401
d8f9da24 1402 zfree(&m.name);
3f067dca 1403
bb58a8a4 1404 if (ret)
3f067dca 1405 goto out;
3f067dca
ACM
1406 }
1407 }
1408
1409out:
1410 closedir(dir);
1411 return ret;
1412}
1413
1414static int machine__set_modules_path(struct machine *machine)
1415{
1416 char *version;
1417 char modules_path[PATH_MAX];
1418
1419 version = get_kernel_version(machine->root_dir);
1420 if (!version)
1421 return -1;
1422
61d4290c 1423 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
3f067dca
ACM
1424 machine->root_dir, version);
1425 free(version);
1426
79b6bb73 1427 return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
3f067dca 1428}
203d8a4a 1429int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
12a6d294 1430 u64 *size __maybe_unused,
203d8a4a
SSG
1431 const char *name __maybe_unused)
1432{
1433 return 0;
1434}
3f067dca 1435
9ad4652b
TR
1436static int machine__create_module(void *arg, const char *name, u64 start,
1437 u64 size)
3f067dca 1438{
316d70d6 1439 struct machine *machine = arg;
3f067dca 1440 struct map *map;
316d70d6 1441
12a6d294 1442 if (arch__fix_module_text_start(&start, &size, name) < 0)
203d8a4a
SSG
1443 return -1;
1444
a94ab91a 1445 map = machine__addnew_module_map(machine, start, name);
316d70d6
AH
1446 if (map == NULL)
1447 return -1;
9ad4652b 1448 map->end = start + size;
316d70d6
AH
1449
1450 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1451
1452 return 0;
1453}
1454
1455static int machine__create_modules(struct machine *machine)
1456{
3f067dca
ACM
1457 const char *modules;
1458 char path[PATH_MAX];
1459
f4be904d 1460 if (machine__is_default_guest(machine)) {
3f067dca 1461 modules = symbol_conf.default_guest_modules;
f4be904d
AH
1462 } else {
1463 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
3f067dca
ACM
1464 modules = path;
1465 }
1466
aa7fe3b0 1467 if (symbol__restricted_filename(modules, "/proc/modules"))
3f067dca
ACM
1468 return -1;
1469
316d70d6 1470 if (modules__parse(modules, machine, machine__create_module))
3f067dca
ACM
1471 return -1;
1472
316d70d6
AH
1473 if (!machine__set_modules_path(machine))
1474 return 0;
3f067dca 1475
316d70d6 1476 pr_debug("Problems setting modules path maps, continuing anyway...\n");
3f067dca 1477
8f76fcd9 1478 return 0;
3f067dca
ACM
1479}
1480
1fb87b8e
JO
1481static void machine__set_kernel_mmap(struct machine *machine,
1482 u64 start, u64 end)
1483{
3183f8ca
ACM
1484 machine->vmlinux_map->start = start;
1485 machine->vmlinux_map->end = end;
1486 /*
1487 * Be a bit paranoid here, some perf.data file came with
1488 * a zero sized synthesized MMAP event for the kernel.
1489 */
1490 if (start == 0 && end == 0)
1491 machine->vmlinux_map->end = ~0ULL;
1fb87b8e
JO
1492}
1493
977c7a6d
WL
1494static void machine__update_kernel_mmap(struct machine *machine,
1495 u64 start, u64 end)
1496{
1497 struct map *map = machine__kernel_map(machine);
1498
1499 map__get(map);
79b6bb73 1500 maps__remove(&machine->kmaps, map);
977c7a6d
WL
1501
1502 machine__set_kernel_mmap(machine, start, end);
1503
79b6bb73 1504 maps__insert(&machine->kmaps, map);
977c7a6d
WL
1505 map__put(map);
1506}
1507
3f067dca
ACM
1508int machine__create_kernel_maps(struct machine *machine)
1509{
1510 struct dso *kernel = machine__get_kernel(machine);
b843f62a 1511 const char *name = NULL;
ee05d217 1512 struct map *map;
ed9adb20 1513 u64 start = 0, end = ~0ULL;
1154c957
MH
1514 int ret;
1515
45e90056 1516 if (kernel == NULL)
5512cf24 1517 return -1;
3f067dca 1518
1154c957 1519 ret = __machine__create_kernel_maps(machine, kernel);
1154c957 1520 if (ret < 0)
1c5aae77 1521 goto out_put;
3f067dca
ACM
1522
1523 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1524 if (machine__is_host(machine))
1525 pr_debug("Problems creating module maps, "
1526 "continuing anyway...\n");
1527 else
1528 pr_debug("Problems creating module maps for guest %d, "
1529 "continuing anyway...\n", machine->pid);
1530 }
1531
ed9adb20 1532 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
3f938ee2 1533 if (name &&
ed9adb20 1534 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
3f938ee2 1535 machine__destroy_kernel_maps(machine);
1c5aae77
AH
1536 ret = -1;
1537 goto out_put;
3f938ee2 1538 }
ee05d217 1539
977c7a6d
WL
1540 /*
1541 * we have a real start address now, so re-order the kmaps
1542 * assume it's the last in the kmaps
1543 */
ed9adb20 1544 machine__update_kernel_mmap(machine, start, end);
5512cf24
AH
1545 }
1546
1c5aae77
AH
1547 if (machine__create_extra_kernel_maps(machine, kernel))
1548 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1549
ed9adb20
JO
1550 if (end == ~0ULL) {
1551 /* update end address of the kernel map using adjacent module address */
1552 map = map__next(machine__kernel_map(machine));
1553 if (map)
1554 machine__set_kernel_mmap(machine, start, map->start);
1555 }
1556
1c5aae77
AH
1557out_put:
1558 dso__put(kernel);
1559 return ret;
3f067dca
ACM
1560}
1561
8e0cf965
AH
1562static bool machine__uses_kcore(struct machine *machine)
1563{
1564 struct dso *dso;
1565
3d39ac53 1566 list_for_each_entry(dso, &machine->dsos.head, node) {
8e0cf965
AH
1567 if (dso__is_kcore(dso))
1568 return true;
1569 }
1570
1571 return false;
1572}
1573
a8ce99b0
AH
1574static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1575 union perf_event *event)
1576{
1577 return machine__is(machine, "x86_64") &&
1578 is_entry_trampoline(event->mmap.filename);
1579}
1580
1581static int machine__process_extra_kernel_map(struct machine *machine,
1582 union perf_event *event)
1583{
93730f85 1584 struct dso *kernel = machine__kernel_dso(machine);
a8ce99b0
AH
1585 struct extra_kernel_map xm = {
1586 .start = event->mmap.start,
1587 .end = event->mmap.start + event->mmap.len,
1588 .pgoff = event->mmap.pgoff,
1589 };
1590
1591 if (kernel == NULL)
1592 return -1;
1593
1594 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1595
1596 return machine__create_extra_kernel_map(machine, kernel, &xm);
1597}
1598
b0a7d1a0
ACM
1599static int machine__process_kernel_mmap_event(struct machine *machine,
1600 union perf_event *event)
1601{
1602 struct map *map;
1c695c88 1603 enum dso_space_type dso_space;
b0a7d1a0
ACM
1604 bool is_kernel_mmap;
1605
8e0cf965
AH
1606 /* If we have maps from kcore then we do not need or want any others */
1607 if (machine__uses_kcore(machine))
1608 return 0;
1609
b0a7d1a0 1610 if (machine__is_host(machine))
1c695c88 1611 dso_space = DSO_SPACE__KERNEL;
b0a7d1a0 1612 else
1c695c88 1613 dso_space = DSO_SPACE__KERNEL_GUEST;
b0a7d1a0
ACM
1614
1615 is_kernel_mmap = memcmp(event->mmap.filename,
8c7f1bb3
JO
1616 machine->mmap_name,
1617 strlen(machine->mmap_name) - 1) == 0;
b0a7d1a0
ACM
1618 if (event->mmap.filename[0] == '/' ||
1619 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
a94ab91a
ACM
1620 map = machine__addnew_module_map(machine, event->mmap.start,
1621 event->mmap.filename);
b0a7d1a0
ACM
1622 if (map == NULL)
1623 goto out_problem;
1624
b0a7d1a0
ACM
1625 map->end = map->start + event->mmap.len;
1626 } else if (is_kernel_mmap) {
1627 const char *symbol_name = (event->mmap.filename +
8c7f1bb3 1628 strlen(machine->mmap_name));
b0a7d1a0
ACM
1629 /*
1630 * Should be there already, from the build-id table in
1631 * the header.
1632 */
b837a8bd
NK
1633 struct dso *kernel = NULL;
1634 struct dso *dso;
1635
0a7c74ea 1636 down_read(&machine->dsos.lock);
e8807844 1637
3d39ac53 1638 list_for_each_entry(dso, &machine->dsos.head, node) {
1f121b03
WN
1639
1640 /*
1641 * The cpumode passed to is_kernel_module is not the
1642 * cpumode of *this* event. If we insist on passing
1643 * correct cpumode to is_kernel_module, we should
1644 * record the cpumode when we adding this dso to the
1645 * linked list.
1646 *
1647 * However we don't really need passing correct
1648 * cpumode. We know the correct cpumode must be kernel
1649 * mode (if not, we should not link it onto kernel_dsos
1650 * list).
1651 *
1652 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1653 * is_kernel_module() treats it as a kernel cpumode.
1654 */
1655
1656 if (!dso->kernel ||
1657 is_kernel_module(dso->long_name,
1658 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
b837a8bd
NK
1659 continue;
1660
1f121b03 1661
b837a8bd
NK
1662 kernel = dso;
1663 break;
1664 }
1665
0a7c74ea 1666 up_read(&machine->dsos.lock);
e8807844 1667
b837a8bd 1668 if (kernel == NULL)
8c7f1bb3 1669 kernel = machine__findnew_dso(machine, machine->mmap_name);
b0a7d1a0
ACM
1670 if (kernel == NULL)
1671 goto out_problem;
1672
1c695c88 1673 kernel->kernel = dso_space;
d3a7c489
ACM
1674 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1675 dso__put(kernel);
b0a7d1a0 1676 goto out_problem;
d3a7c489 1677 }
b0a7d1a0 1678
330dfa22
NK
1679 if (strstr(kernel->long_name, "vmlinux"))
1680 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
96d78059 1681
977c7a6d 1682 machine__update_kernel_mmap(machine, event->mmap.start,
05db6ff7 1683 event->mmap.start + event->mmap.len);
b0a7d1a0
ACM
1684
1685 /*
1686 * Avoid using a zero address (kptr_restrict) for the ref reloc
1687 * symbol. Effectively having zero here means that at record
1688 * time /proc/sys/kernel/kptr_restrict was non zero.
1689 */
1690 if (event->mmap.pgoff != 0) {
3183f8ca
ACM
1691 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1692 symbol_name,
1693 event->mmap.pgoff);
b0a7d1a0
ACM
1694 }
1695
1696 if (machine__is_default_guest(machine)) {
1697 /*
1698 * preload dso of guest kernel and modules
1699 */
be39db9f 1700 dso__load(kernel, machine__kernel_map(machine));
b0a7d1a0 1701 }
a8ce99b0
AH
1702 } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1703 return machine__process_extra_kernel_map(machine, event);
b0a7d1a0
ACM
1704 }
1705 return 0;
1706out_problem:
1707 return -1;
1708}
1709
5c5e854b 1710int machine__process_mmap2_event(struct machine *machine,
162f0bef 1711 union perf_event *event,
473398a2 1712 struct perf_sample *sample)
5c5e854b 1713{
5c5e854b
SE
1714 struct thread *thread;
1715 struct map *map;
4a7380a5
ACM
1716 struct dso_id dso_id = {
1717 .maj = event->mmap2.maj,
1718 .min = event->mmap2.min,
1719 .ino = event->mmap2.ino,
1720 .ino_generation = event->mmap2.ino_generation,
1721 };
5c5e854b
SE
1722 int ret = 0;
1723
1724 if (dump_trace)
1725 perf_event__fprintf_mmap2(event, stdout);
1726
473398a2
ACM
1727 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1728 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
5c5e854b
SE
1729 ret = machine__process_kernel_mmap_event(machine, event);
1730 if (ret < 0)
1731 goto out_problem;
1732 return 0;
1733 }
1734
1735 thread = machine__findnew_thread(machine, event->mmap2.pid,
11c9abf2 1736 event->mmap2.tid);
5c5e854b
SE
1737 if (thread == NULL)
1738 goto out_problem;
1739
2a03068c 1740 map = map__new(machine, event->mmap2.start,
5c5e854b 1741 event->mmap2.len, event->mmap2.pgoff,
4a7380a5 1742 &dso_id, event->mmap2.prot,
7ef80703 1743 event->mmap2.flags,
3183f8ca 1744 event->mmap2.filename, thread);
5c5e854b
SE
1745
1746 if (map == NULL)
b91fc39f 1747 goto out_problem_map;
5c5e854b 1748
8132a2a8
HK
1749 ret = thread__insert_map(thread, map);
1750 if (ret)
1751 goto out_problem_insert;
1752
b91fc39f 1753 thread__put(thread);
84c2cafa 1754 map__put(map);
5c5e854b
SE
1755 return 0;
1756
8132a2a8
HK
1757out_problem_insert:
1758 map__put(map);
b91fc39f
ACM
1759out_problem_map:
1760 thread__put(thread);
5c5e854b
SE
1761out_problem:
1762 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1763 return 0;
1764}
1765
162f0bef 1766int machine__process_mmap_event(struct machine *machine, union perf_event *event,
473398a2 1767 struct perf_sample *sample)
b0a7d1a0 1768{
b0a7d1a0
ACM
1769 struct thread *thread;
1770 struct map *map;
0f476f2b 1771 u32 prot = 0;
b0a7d1a0
ACM
1772 int ret = 0;
1773
1774 if (dump_trace)
1775 perf_event__fprintf_mmap(event, stdout);
1776
473398a2
ACM
1777 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1778 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
b0a7d1a0
ACM
1779 ret = machine__process_kernel_mmap_event(machine, event);
1780 if (ret < 0)
1781 goto out_problem;
1782 return 0;
1783 }
1784
314add6b 1785 thread = machine__findnew_thread(machine, event->mmap.pid,
11c9abf2 1786 event->mmap.tid);
b0a7d1a0
ACM
1787 if (thread == NULL)
1788 goto out_problem;
bad40917 1789
3183f8ca 1790 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
0f476f2b 1791 prot = PROT_EXEC;
bad40917 1792
2a03068c 1793 map = map__new(machine, event->mmap.start,
b0a7d1a0 1794 event->mmap.len, event->mmap.pgoff,
4a7380a5 1795 NULL, prot, 0, event->mmap.filename, thread);
bad40917 1796
b0a7d1a0 1797 if (map == NULL)
b91fc39f 1798 goto out_problem_map;
b0a7d1a0 1799
8132a2a8
HK
1800 ret = thread__insert_map(thread, map);
1801 if (ret)
1802 goto out_problem_insert;
1803
b91fc39f 1804 thread__put(thread);
84c2cafa 1805 map__put(map);
b0a7d1a0
ACM
1806 return 0;
1807
8132a2a8
HK
1808out_problem_insert:
1809 map__put(map);
b91fc39f
ACM
1810out_problem_map:
1811 thread__put(thread);
b0a7d1a0
ACM
1812out_problem:
1813 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1814 return 0;
1815}
1816
b91fc39f 1817static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
236a3bbd 1818{
91e467bc
KL
1819 struct threads *threads = machine__threads(machine, th->tid);
1820
1821 if (threads->last_match == th)
67fda0f3 1822 threads__set_last_match(threads, NULL);
f3b623b8 1823
b91fc39f 1824 if (lock)
0a7c74ea 1825 down_write(&threads->lock);
4c00af0e
ACM
1826
1827 BUG_ON(refcount_read(&th->refcnt) == 0);
1828
f3acb3a8 1829 rb_erase_cached(&th->rb_node, &threads->entries);
b91fc39f 1830 RB_CLEAR_NODE(&th->rb_node);
91e467bc 1831 --threads->nr;
236a3bbd 1832 /*
f3b623b8
ACM
1833 * Move it first to the dead_threads list, then drop the reference,
1834 * if this is the last reference, then the thread__delete destructor
1835 * will be called and we will remove it from the dead_threads list.
236a3bbd 1836 */
91e467bc 1837 list_add_tail(&th->node, &threads->dead);
4c00af0e
ACM
1838
1839 /*
1840 * We need to do the put here because if this is the last refcount,
1841 * then we will be touching the threads->dead head when removing the
1842 * thread.
1843 */
1844 thread__put(th);
1845
b91fc39f 1846 if (lock)
0a7c74ea 1847 up_write(&threads->lock);
236a3bbd
DA
1848}
1849
b91fc39f
ACM
1850void machine__remove_thread(struct machine *machine, struct thread *th)
1851{
1852 return __machine__remove_thread(machine, th, true);
1853}
1854
162f0bef
FW
1855int machine__process_fork_event(struct machine *machine, union perf_event *event,
1856 struct perf_sample *sample)
b0a7d1a0 1857{
d75e6097
JO
1858 struct thread *thread = machine__find_thread(machine,
1859 event->fork.pid,
1860 event->fork.tid);
314add6b
AH
1861 struct thread *parent = machine__findnew_thread(machine,
1862 event->fork.ppid,
1863 event->fork.ptid);
4f8f382e 1864 bool do_maps_clone = true;
b91fc39f 1865 int err = 0;
b0a7d1a0 1866
5cb73340
AH
1867 if (dump_trace)
1868 perf_event__fprintf_task(event, stdout);
1869
1870 /*
1871 * There may be an existing thread that is not actually the parent,
1872 * either because we are processing events out of order, or because the
1873 * (fork) event that would have removed the thread was lost. Assume the
1874 * latter case and continue on as best we can.
1875 */
1876 if (parent->pid_ != (pid_t)event->fork.ppid) {
1877 dump_printf("removing erroneous parent thread %d/%d\n",
1878 parent->pid_, parent->tid);
1879 machine__remove_thread(machine, parent);
1880 thread__put(parent);
1881 parent = machine__findnew_thread(machine, event->fork.ppid,
1882 event->fork.ptid);
1883 }
1884
236a3bbd 1885 /* if a thread currently exists for the thread id remove it */
b91fc39f 1886 if (thread != NULL) {
236a3bbd 1887 machine__remove_thread(machine, thread);
b91fc39f
ACM
1888 thread__put(thread);
1889 }
236a3bbd 1890
314add6b
AH
1891 thread = machine__findnew_thread(machine, event->fork.pid,
1892 event->fork.tid);
4f8f382e
DM
1893 /*
1894 * When synthesizing FORK events, we are trying to create thread
1895 * objects for the already running tasks on the machine.
1896 *
1897 * Normally, for a kernel FORK event, we want to clone the parent's
1898 * maps because that is what the kernel just did.
1899 *
1900 * But when synthesizing, this should not be done. If we do, we end up
1901 * with overlapping maps as we process the sythesized MMAP2 events that
1902 * get delivered shortly thereafter.
1903 *
1904 * Use the FORK event misc flags in an internal way to signal this
1905 * situation, so we can elide the map clone when appropriate.
1906 */
1907 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1908 do_maps_clone = false;
b0a7d1a0
ACM
1909
1910 if (thread == NULL || parent == NULL ||
4f8f382e 1911 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
b0a7d1a0 1912 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
b91fc39f 1913 err = -1;
b0a7d1a0 1914 }
b91fc39f
ACM
1915 thread__put(thread);
1916 thread__put(parent);
b0a7d1a0 1917
b91fc39f 1918 return err;
b0a7d1a0
ACM
1919}
1920
162f0bef
FW
1921int machine__process_exit_event(struct machine *machine, union perf_event *event,
1922 struct perf_sample *sample __maybe_unused)
b0a7d1a0 1923{
d75e6097
JO
1924 struct thread *thread = machine__find_thread(machine,
1925 event->fork.pid,
1926 event->fork.tid);
b0a7d1a0
ACM
1927
1928 if (dump_trace)
1929 perf_event__fprintf_task(event, stdout);
1930
b91fc39f 1931 if (thread != NULL) {
236a3bbd 1932 thread__exited(thread);
b91fc39f
ACM
1933 thread__put(thread);
1934 }
b0a7d1a0
ACM
1935
1936 return 0;
1937}
1938
162f0bef
FW
1939int machine__process_event(struct machine *machine, union perf_event *event,
1940 struct perf_sample *sample)
b0a7d1a0
ACM
1941{
1942 int ret;
1943
1944 switch (event->header.type) {
1945 case PERF_RECORD_COMM:
162f0bef 1946 ret = machine__process_comm_event(machine, event, sample); break;
b0a7d1a0 1947 case PERF_RECORD_MMAP:
162f0bef 1948 ret = machine__process_mmap_event(machine, event, sample); break;
f3b3614a
HB
1949 case PERF_RECORD_NAMESPACES:
1950 ret = machine__process_namespaces_event(machine, event, sample); break;
ba78c1c5
NK
1951 case PERF_RECORD_CGROUP:
1952 ret = machine__process_cgroup_event(machine, event, sample); break;
5c5e854b 1953 case PERF_RECORD_MMAP2:
162f0bef 1954 ret = machine__process_mmap2_event(machine, event, sample); break;
b0a7d1a0 1955 case PERF_RECORD_FORK:
162f0bef 1956 ret = machine__process_fork_event(machine, event, sample); break;
b0a7d1a0 1957 case PERF_RECORD_EXIT:
162f0bef 1958 ret = machine__process_exit_event(machine, event, sample); break;
b0a7d1a0 1959 case PERF_RECORD_LOST:
162f0bef 1960 ret = machine__process_lost_event(machine, event, sample); break;
4a96f7a0
AH
1961 case PERF_RECORD_AUX:
1962 ret = machine__process_aux_event(machine, event); break;
0ad21f68 1963 case PERF_RECORD_ITRACE_START:
ceb92913 1964 ret = machine__process_itrace_start_event(machine, event); break;
c4937a91
KL
1965 case PERF_RECORD_LOST_SAMPLES:
1966 ret = machine__process_lost_samples_event(machine, event, sample); break;
0286039f
AH
1967 case PERF_RECORD_SWITCH:
1968 case PERF_RECORD_SWITCH_CPU_WIDE:
1969 ret = machine__process_switch_event(machine, event); break;
9aa0bfa3
SL
1970 case PERF_RECORD_KSYMBOL:
1971 ret = machine__process_ksymbol(machine, event, sample); break;
45178a92 1972 case PERF_RECORD_BPF_EVENT:
3f604b5f 1973 ret = machine__process_bpf(machine, event, sample); break;
246eba8e
AH
1974 case PERF_RECORD_TEXT_POKE:
1975 ret = machine__process_text_poke(machine, event, sample); break;
b0a7d1a0
ACM
1976 default:
1977 ret = -1;
1978 break;
1979 }
1980
1981 return ret;
1982}
3f067dca 1983
b21484f1 1984static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
3f067dca 1985{
a7c3899c 1986 if (!regexec(regex, sym->name, 0, NULL, 0))
3f067dca 1987 return 1;
3f067dca
ACM
1988 return 0;
1989}
1990
bb871a9c 1991static void ip__resolve_ams(struct thread *thread,
3f067dca
ACM
1992 struct addr_map_symbol *ams,
1993 u64 ip)
1994{
1995 struct addr_location al;
3f067dca
ACM
1996
1997 memset(&al, 0, sizeof(al));
52a3cb8c
ACM
1998 /*
1999 * We cannot use the header.misc hint to determine whether a
2000 * branch stack address is user, kernel, guest, hypervisor.
2001 * Branches may straddle the kernel/user/hypervisor boundaries.
2002 * Thus, we have to try consecutively until we find a match
2003 * or else, the symbol is unknown
2004 */
26bd9331 2005 thread__find_cpumode_addr_location(thread, ip, &al);
3f067dca 2006
3f067dca
ACM
2007 ams->addr = ip;
2008 ams->al_addr = al.addr;
f2eaea09 2009 ams->ms.maps = al.maps;
d46a4cdf
ACM
2010 ams->ms.sym = al.sym;
2011 ams->ms.map = al.map;
8780fb25 2012 ams->phys_addr = 0;
3f067dca
ACM
2013}
2014
bb871a9c 2015static void ip__resolve_data(struct thread *thread,
8780fb25
KL
2016 u8 m, struct addr_map_symbol *ams,
2017 u64 addr, u64 phys_addr)
98a3b32c
SE
2018{
2019 struct addr_location al;
2020
2021 memset(&al, 0, sizeof(al));
2022
117d3c24 2023 thread__find_symbol(thread, m, addr, &al);
06b2afc0 2024
98a3b32c
SE
2025 ams->addr = addr;
2026 ams->al_addr = al.addr;
f2eaea09 2027 ams->ms.maps = al.maps;
d46a4cdf
ACM
2028 ams->ms.sym = al.sym;
2029 ams->ms.map = al.map;
8780fb25 2030 ams->phys_addr = phys_addr;
98a3b32c
SE
2031}
2032
e80faac0
ACM
2033struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2034 struct addr_location *al)
98a3b32c 2035{
9f87498f 2036 struct mem_info *mi = mem_info__new();
98a3b32c
SE
2037
2038 if (!mi)
2039 return NULL;
2040
bb871a9c 2041 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
8780fb25
KL
2042 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2043 sample->addr, sample->phys_addr);
98a3b32c
SE
2044 mi->data_src.val = sample->data_src;
2045
2046 return mi;
2047}
2048
5f0fef8a 2049static char *callchain_srcline(struct map_symbol *ms, u64 ip)
40a342cd 2050{
5f0fef8a 2051 struct map *map = ms->map;
21ac9d54
MW
2052 char *srcline = NULL;
2053
40a342cd 2054 if (!map || callchain_param.key == CCKEY_FUNCTION)
21ac9d54
MW
2055 return srcline;
2056
2057 srcline = srcline__tree_find(&map->dso->srclines, ip);
2058 if (!srcline) {
2059 bool show_sym = false;
2060 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2061
2062 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
5f0fef8a 2063 ms->sym, show_sym, show_addr, ip);
21ac9d54
MW
2064 srcline__tree_insert(&map->dso->srclines, ip, srcline);
2065 }
40a342cd 2066
21ac9d54 2067 return srcline;
40a342cd
MW
2068}
2069
c4ee0625
JY
2070struct iterations {
2071 int nr_loop_iter;
2072 u64 cycles;
2073};
2074
37592b8a 2075static int add_callchain_ip(struct thread *thread,
91d7b2de 2076 struct callchain_cursor *cursor,
37592b8a
AK
2077 struct symbol **parent,
2078 struct addr_location *root_al,
73dbcd65 2079 u8 *cpumode,
410024db
JY
2080 u64 ip,
2081 bool branch,
2082 struct branch_flags *flags,
c4ee0625 2083 struct iterations *iter,
b851dd49 2084 u64 branch_from)
37592b8a 2085{
5f0fef8a 2086 struct map_symbol ms;
37592b8a 2087 struct addr_location al;
c4ee0625
JY
2088 int nr_loop_iter = 0;
2089 u64 iter_cycles = 0;
40a342cd 2090 const char *srcline = NULL;
37592b8a
AK
2091
2092 al.filtered = 0;
2093 al.sym = NULL;
73dbcd65 2094 if (!cpumode) {
26bd9331 2095 thread__find_cpumode_addr_location(thread, ip, &al);
73dbcd65 2096 } else {
2e77784b
KL
2097 if (ip >= PERF_CONTEXT_MAX) {
2098 switch (ip) {
2099 case PERF_CONTEXT_HV:
73dbcd65 2100 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2e77784b
KL
2101 break;
2102 case PERF_CONTEXT_KERNEL:
73dbcd65 2103 *cpumode = PERF_RECORD_MISC_KERNEL;
2e77784b
KL
2104 break;
2105 case PERF_CONTEXT_USER:
73dbcd65 2106 *cpumode = PERF_RECORD_MISC_USER;
2e77784b
KL
2107 break;
2108 default:
2109 pr_debug("invalid callchain context: "
2110 "%"PRId64"\n", (s64) ip);
2111 /*
2112 * It seems the callchain is corrupted.
2113 * Discard all.
2114 */
91d7b2de 2115 callchain_cursor_reset(cursor);
2e77784b
KL
2116 return 1;
2117 }
2118 return 0;
2119 }
4546263d 2120 thread__find_symbol(thread, *cpumode, ip, &al);
2e77784b
KL
2121 }
2122
37592b8a 2123 if (al.sym != NULL) {
de7e6a7c 2124 if (perf_hpp_list.parent && !*parent &&
37592b8a
AK
2125 symbol__match_regex(al.sym, &parent_regex))
2126 *parent = al.sym;
2127 else if (have_ignore_callees && root_al &&
2128 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2129 /* Treat this symbol as the root,
2130 forgetting its callees. */
2131 *root_al = al;
91d7b2de 2132 callchain_cursor_reset(cursor);
37592b8a
AK
2133 }
2134 }
2135
b49a8fe5
NK
2136 if (symbol_conf.hide_unresolved && al.sym == NULL)
2137 return 0;
c4ee0625
JY
2138
2139 if (iter) {
2140 nr_loop_iter = iter->nr_loop_iter;
2141 iter_cycles = iter->cycles;
2142 }
2143
f2eaea09 2144 ms.maps = al.maps;
5f0fef8a
ACM
2145 ms.map = al.map;
2146 ms.sym = al.sym;
2147 srcline = callchain_srcline(&ms, al.addr);
2148 return callchain_cursor_append(cursor, ip, &ms,
c4ee0625 2149 branch, flags, nr_loop_iter,
40a342cd 2150 iter_cycles, branch_from, srcline);
37592b8a
AK
2151}
2152
644f2df2
ACM
2153struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2154 struct addr_location *al)
3f067dca 2155{
3f067dca 2156 unsigned int i;
644f2df2 2157 const struct branch_stack *bs = sample->branch_stack;
42bbabed 2158 struct branch_entry *entries = perf_sample__branch_entries(sample);
644f2df2 2159 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
3f067dca 2160
3f067dca
ACM
2161 if (!bi)
2162 return NULL;
2163
2164 for (i = 0; i < bs->nr; i++) {
42bbabed
KL
2165 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2166 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2167 bi[i].flags = entries[i].flags;
3f067dca
ACM
2168 }
2169 return bi;
2170}
2171
c4ee0625
JY
2172static void save_iterations(struct iterations *iter,
2173 struct branch_entry *be, int nr)
2174{
2175 int i;
2176
a3366db0 2177 iter->nr_loop_iter++;
c4ee0625
JY
2178 iter->cycles = 0;
2179
2180 for (i = 0; i < nr; i++)
2181 iter->cycles += be[i].flags.cycles;
2182}
2183
8b7bad58
AK
2184#define CHASHSZ 127
2185#define CHASHBITS 7
2186#define NO_ENTRY 0xff
2187
2188#define PERF_MAX_BRANCH_DEPTH 127
2189
2190/* Remove loops. */
c4ee0625
JY
2191static int remove_loops(struct branch_entry *l, int nr,
2192 struct iterations *iter)
8b7bad58
AK
2193{
2194 int i, j, off;
2195 unsigned char chash[CHASHSZ];
2196
2197 memset(chash, NO_ENTRY, sizeof(chash));
2198
2199 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2200
2201 for (i = 0; i < nr; i++) {
2202 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2203
2204 /* no collision handling for now */
2205 if (chash[h] == NO_ENTRY) {
2206 chash[h] = i;
2207 } else if (l[chash[h]].from == l[i].from) {
2208 bool is_loop = true;
2209 /* check if it is a real loop */
2210 off = 0;
2211 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2212 if (l[j].from != l[i + off].from) {
2213 is_loop = false;
2214 break;
2215 }
2216 if (is_loop) {
c4ee0625
JY
2217 j = nr - (i + off);
2218 if (j > 0) {
2219 save_iterations(iter + i + off,
2220 l + i, off);
2221
2222 memmove(iter + i, iter + i + off,
2223 j * sizeof(*iter));
2224
2225 memmove(l + i, l + i + off,
2226 j * sizeof(*l));
2227 }
2228
8b7bad58
AK
2229 nr -= off;
2230 }
2231 }
2232 }
2233 return nr;
2234}
2235
dd3e249a
KL
2236static int lbr_callchain_add_kernel_ip(struct thread *thread,
2237 struct callchain_cursor *cursor,
2238 struct perf_sample *sample,
2239 struct symbol **parent,
2240 struct addr_location *root_al,
2241 u64 branch_from,
2242 bool callee, int end)
2243{
2244 struct ip_callchain *chain = sample->callchain;
2245 u8 cpumode = PERF_RECORD_MISC_USER;
2246 int err, i;
2247
2248 if (callee) {
2249 for (i = 0; i < end + 1; i++) {
2250 err = add_callchain_ip(thread, cursor, parent,
2251 root_al, &cpumode, chain->ips[i],
2252 false, NULL, NULL, branch_from);
2253 if (err)
2254 return err;
2255 }
2256 return 0;
2257 }
2258
2259 for (i = end; i >= 0; i--) {
2260 err = add_callchain_ip(thread, cursor, parent,
2261 root_al, &cpumode, chain->ips[i],
2262 false, NULL, NULL, branch_from);
2263 if (err)
2264 return err;
2265 }
2266
2267 return 0;
2268}
2269
7f1d3931
KL
2270static void save_lbr_cursor_node(struct thread *thread,
2271 struct callchain_cursor *cursor,
2272 int idx)
2273{
2274 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2275
2276 if (!lbr_stitch)
2277 return;
2278
2279 if (cursor->pos == cursor->nr) {
2280 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2281 return;
2282 }
2283
2284 if (!cursor->curr)
2285 cursor->curr = cursor->first;
2286 else
2287 cursor->curr = cursor->curr->next;
2288 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2289 sizeof(struct callchain_cursor_node));
2290
2291 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2292 cursor->pos++;
2293}
2294
e2b23483
KL
2295static int lbr_callchain_add_lbr_ip(struct thread *thread,
2296 struct callchain_cursor *cursor,
2297 struct perf_sample *sample,
2298 struct symbol **parent,
2299 struct addr_location *root_al,
2300 u64 *branch_from,
2301 bool callee)
2302{
2303 struct branch_stack *lbr_stack = sample->branch_stack;
2304 struct branch_entry *entries = perf_sample__branch_entries(sample);
2305 u8 cpumode = PERF_RECORD_MISC_USER;
2306 int lbr_nr = lbr_stack->nr;
2307 struct branch_flags *flags;
2308 int err, i;
2309 u64 ip;
2310
7f1d3931
KL
2311 /*
2312 * The curr and pos are not used in writing session. They are cleared
2313 * in callchain_cursor_commit() when the writing session is closed.
2314 * Using curr and pos to track the current cursor node.
2315 */
2316 if (thread->lbr_stitch) {
2317 cursor->curr = NULL;
2318 cursor->pos = cursor->nr;
2319 if (cursor->nr) {
2320 cursor->curr = cursor->first;
2321 for (i = 0; i < (int)(cursor->nr - 1); i++)
2322 cursor->curr = cursor->curr->next;
2323 }
2324 }
2325
e2b23483
KL
2326 if (callee) {
2327 /* Add LBR ip from first entries.to */
2328 ip = entries[0].to;
2329 flags = &entries[0].flags;
2330 *branch_from = entries[0].from;
2331 err = add_callchain_ip(thread, cursor, parent,
2332 root_al, &cpumode, ip,
2333 true, flags, NULL,
2334 *branch_from);
2335 if (err)
2336 return err;
2337
7f1d3931
KL
2338 /*
2339 * The number of cursor node increases.
2340 * Move the current cursor node.
2341 * But does not need to save current cursor node for entry 0.
2342 * It's impossible to stitch the whole LBRs of previous sample.
2343 */
2344 if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2345 if (!cursor->curr)
2346 cursor->curr = cursor->first;
2347 else
2348 cursor->curr = cursor->curr->next;
2349 cursor->pos++;
2350 }
2351
e2b23483
KL
2352 /* Add LBR ip from entries.from one by one. */
2353 for (i = 0; i < lbr_nr; i++) {
2354 ip = entries[i].from;
2355 flags = &entries[i].flags;
2356 err = add_callchain_ip(thread, cursor, parent,
2357 root_al, &cpumode, ip,
2358 true, flags, NULL,
2359 *branch_from);
2360 if (err)
2361 return err;
7f1d3931 2362 save_lbr_cursor_node(thread, cursor, i);
e2b23483
KL
2363 }
2364 return 0;
2365 }
2366
2367 /* Add LBR ip from entries.from one by one. */
2368 for (i = lbr_nr - 1; i >= 0; i--) {
2369 ip = entries[i].from;
2370 flags = &entries[i].flags;
2371 err = add_callchain_ip(thread, cursor, parent,
2372 root_al, &cpumode, ip,
2373 true, flags, NULL,
2374 *branch_from);
2375 if (err)
2376 return err;
7f1d3931 2377 save_lbr_cursor_node(thread, cursor, i);
e2b23483
KL
2378 }
2379
2380 /* Add LBR ip from first entries.to */
2381 ip = entries[0].to;
2382 flags = &entries[0].flags;
2383 *branch_from = entries[0].from;
2384 err = add_callchain_ip(thread, cursor, parent,
2385 root_al, &cpumode, ip,
2386 true, flags, NULL,
2387 *branch_from);
2388 if (err)
2389 return err;
2390
2391 return 0;
2392}
2393
ff165628
KL
2394static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2395 struct callchain_cursor *cursor)
2396{
2397 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2398 struct callchain_cursor_node *cnode;
2399 struct stitch_list *stitch_node;
2400 int err;
2401
2402 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2403 cnode = &stitch_node->cursor;
2404
2405 err = callchain_cursor_append(cursor, cnode->ip,
2406 &cnode->ms,
2407 cnode->branch,
2408 &cnode->branch_flags,
2409 cnode->nr_loop_iter,
2410 cnode->iter_cycles,
2411 cnode->branch_from,
2412 cnode->srcline);
2413 if (err)
2414 return err;
2415 }
2416 return 0;
2417}
2418
2419static struct stitch_list *get_stitch_node(struct thread *thread)
2420{
2421 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2422 struct stitch_list *stitch_node;
2423
2424 if (!list_empty(&lbr_stitch->free_lists)) {
2425 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2426 struct stitch_list, node);
2427 list_del(&stitch_node->node);
2428
2429 return stitch_node;
2430 }
2431
2432 return malloc(sizeof(struct stitch_list));
2433}
2434
2435static bool has_stitched_lbr(struct thread *thread,
2436 struct perf_sample *cur,
2437 struct perf_sample *prev,
2438 unsigned int max_lbr,
2439 bool callee)
2440{
2441 struct branch_stack *cur_stack = cur->branch_stack;
2442 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2443 struct branch_stack *prev_stack = prev->branch_stack;
2444 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2445 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2446 int i, j, nr_identical_branches = 0;
2447 struct stitch_list *stitch_node;
2448 u64 cur_base, distance;
2449
2450 if (!cur_stack || !prev_stack)
2451 return false;
2452
2453 /* Find the physical index of the base-of-stack for current sample. */
2454 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2455
2456 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2457 (max_lbr + prev_stack->hw_idx - cur_base);
2458 /* Previous sample has shorter stack. Nothing can be stitched. */
2459 if (distance + 1 > prev_stack->nr)
2460 return false;
2461
2462 /*
2463 * Check if there are identical LBRs between two samples.
2464 * Identicall LBRs must have same from, to and flags values. Also,
2465 * they have to be saved in the same LBR registers (same physical
2466 * index).
2467 *
2468 * Starts from the base-of-stack of current sample.
2469 */
2470 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2471 if ((prev_entries[i].from != cur_entries[j].from) ||
2472 (prev_entries[i].to != cur_entries[j].to) ||
2473 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2474 break;
2475 nr_identical_branches++;
2476 }
2477
2478 if (!nr_identical_branches)
2479 return false;
2480
2481 /*
2482 * Save the LBRs between the base-of-stack of previous sample
2483 * and the base-of-stack of current sample into lbr_stitch->lists.
2484 * These LBRs will be stitched later.
2485 */
2486 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2487
2488 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2489 continue;
2490
2491 stitch_node = get_stitch_node(thread);
2492 if (!stitch_node)
2493 return false;
2494
2495 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2496 sizeof(struct callchain_cursor_node));
2497
2498 if (callee)
2499 list_add(&stitch_node->node, &lbr_stitch->lists);
2500 else
2501 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2502 }
2503
2504 return true;
2505}
2506
7f1d3931 2507static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
9c6c3f47
KL
2508{
2509 if (thread->lbr_stitch)
2510 return true;
2511
2512 thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2513 if (!thread->lbr_stitch)
2514 goto err;
2515
7f1d3931
KL
2516 thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2517 if (!thread->lbr_stitch->prev_lbr_cursor)
2518 goto free_lbr_stitch;
2519
ff165628
KL
2520 INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2521 INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2522
7f1d3931
KL
2523 return true;
2524
2525free_lbr_stitch:
2526 zfree(&thread->lbr_stitch);
9c6c3f47
KL
2527err:
2528 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2529 thread->lbr_stitch_enable = false;
2530 return false;
2531}
2532
384b6055
KL
2533/*
2534 * Recolve LBR callstack chain sample
2535 * Return:
2536 * 1 on success get LBR callchain information
2537 * 0 no available LBR callchain information, should try fp
2538 * negative error code on other errors.
2539 */
2540static int resolve_lbr_callchain_sample(struct thread *thread,
91d7b2de 2541 struct callchain_cursor *cursor,
384b6055
KL
2542 struct perf_sample *sample,
2543 struct symbol **parent,
2544 struct addr_location *root_al,
7f1d3931
KL
2545 int max_stack,
2546 unsigned int max_lbr)
3f067dca 2547{
ff165628 2548 bool callee = (callchain_param.order == ORDER_CALLEE);
384b6055 2549 struct ip_callchain *chain = sample->callchain;
18ef15c6 2550 int chain_nr = min(max_stack, (int)chain->nr), i;
9c6c3f47 2551 struct lbr_stitch *lbr_stitch;
ff165628 2552 bool stitched_lbr = false;
e2b23483 2553 u64 branch_from = 0;
e48b8311 2554 int err;
384b6055
KL
2555
2556 for (i = 0; i < chain_nr; i++) {
2557 if (chain->ips[i] == PERF_CONTEXT_USER)
2558 break;
2559 }
2560
2561 /* LBR only affects the user callchain */
f8603267
KL
2562 if (i == chain_nr)
2563 return 0;
2564
9c6c3f47 2565 if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
7f1d3931 2566 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
9c6c3f47
KL
2567 lbr_stitch = thread->lbr_stitch;
2568
ff165628
KL
2569 stitched_lbr = has_stitched_lbr(thread, sample,
2570 &lbr_stitch->prev_sample,
2571 max_lbr, callee);
2572
2573 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2574 list_replace_init(&lbr_stitch->lists,
2575 &lbr_stitch->free_lists);
2576 }
9c6c3f47
KL
2577 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2578 }
2579
ff165628 2580 if (callee) {
e48b8311 2581 /* Add kernel ip */
dd3e249a
KL
2582 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2583 parent, root_al, branch_from,
2584 true, i);
2585 if (err)
2586 goto error;
2587
e2b23483
KL
2588 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2589 root_al, &branch_from, true);
e48b8311
KL
2590 if (err)
2591 goto error;
384b6055 2592
ff165628
KL
2593 if (stitched_lbr) {
2594 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2595 if (err)
2596 goto error;
2597 }
2598
e48b8311 2599 } else {
ff165628
KL
2600 if (stitched_lbr) {
2601 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2602 if (err)
2603 goto error;
2604 }
e2b23483
KL
2605 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2606 root_al, &branch_from, false);
f8603267 2607 if (err)
e48b8311
KL
2608 goto error;
2609
2610 /* Add kernel ip */
dd3e249a
KL
2611 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2612 parent, root_al, branch_from,
2613 false, i);
2614 if (err)
2615 goto error;
f8603267
KL
2616 }
2617 return 1;
e48b8311
KL
2618
2619error:
2620 return (err < 0) ? err : 0;
384b6055
KL
2621}
2622
e9024d51
DM
2623static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2624 struct callchain_cursor *cursor,
2625 struct symbol **parent,
2626 struct addr_location *root_al,
2627 u8 *cpumode, int ent)
2628{
2629 int err = 0;
2630
2631 while (--ent >= 0) {
2632 u64 ip = chain->ips[ent];
2633
2634 if (ip >= PERF_CONTEXT_MAX) {
2635 err = add_callchain_ip(thread, cursor, parent,
2636 root_al, cpumode, ip,
2637 false, NULL, NULL, 0);
2638 break;
2639 }
2640 }
2641 return err;
2642}
2643
384b6055 2644static int thread__resolve_callchain_sample(struct thread *thread,
91d7b2de 2645 struct callchain_cursor *cursor,
32dcd021 2646 struct evsel *evsel,
384b6055
KL
2647 struct perf_sample *sample,
2648 struct symbol **parent,
2649 struct addr_location *root_al,
2650 int max_stack)
2651{
2652 struct branch_stack *branch = sample->branch_stack;
42bbabed 2653 struct branch_entry *entries = perf_sample__branch_entries(sample);
384b6055 2654 struct ip_callchain *chain = sample->callchain;
b49a821e 2655 int chain_nr = 0;
73dbcd65 2656 u8 cpumode = PERF_RECORD_MISC_USER;
bf8bddbf 2657 int i, j, err, nr_entries;
8b7bad58
AK
2658 int skip_idx = -1;
2659 int first_call = 0;
2660
b49a821e
JY
2661 if (chain)
2662 chain_nr = chain->nr;
2663
4f138a9e 2664 if (evsel__has_branch_callstack(evsel)) {
6e6d1d65 2665 struct perf_env *env = evsel__env(evsel);
7f1d3931 2666
91d7b2de 2667 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
7f1d3931
KL
2668 root_al, max_stack,
2669 !env ? 0 : env->max_branches);
384b6055
KL
2670 if (err)
2671 return (err < 0) ? err : 0;
2672 }
2673
8b7bad58
AK
2674 /*
2675 * Based on DWARF debug information, some architectures skip
2676 * a callchain entry saved by the kernel.
2677 */
bf8bddbf 2678 skip_idx = arch_skip_callchain_idx(thread, chain);
3f067dca 2679
8b7bad58
AK
2680 /*
2681 * Add branches to call stack for easier browsing. This gives
2682 * more context for a sample than just the callers.
2683 *
2684 * This uses individual histograms of paths compared to the
2685 * aggregated histograms the normal LBR mode uses.
2686 *
2687 * Limitations for now:
2688 * - No extra filters
2689 * - No annotations (should annotate somehow)
2690 */
2691
2692 if (branch && callchain_param.branch_callstack) {
2693 int nr = min(max_stack, (int)branch->nr);
2694 struct branch_entry be[nr];
c4ee0625 2695 struct iterations iter[nr];
8b7bad58
AK
2696
2697 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2698 pr_warning("corrupted branch chain. skipping...\n");
2699 goto check_calls;
2700 }
2701
2702 for (i = 0; i < nr; i++) {
2703 if (callchain_param.order == ORDER_CALLEE) {
42bbabed 2704 be[i] = entries[i];
b49a821e
JY
2705
2706 if (chain == NULL)
2707 continue;
2708
8b7bad58
AK
2709 /*
2710 * Check for overlap into the callchain.
2711 * The return address is one off compared to
2712 * the branch entry. To adjust for this
2713 * assume the calling instruction is not longer
2714 * than 8 bytes.
2715 */
2716 if (i == skip_idx ||
2717 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2718 first_call++;
2719 else if (be[i].from < chain->ips[first_call] &&
2720 be[i].from >= chain->ips[first_call] - 8)
2721 first_call++;
2722 } else
42bbabed 2723 be[i] = entries[branch->nr - i - 1];
8b7bad58
AK
2724 }
2725
c4ee0625
JY
2726 memset(iter, 0, sizeof(struct iterations) * nr);
2727 nr = remove_loops(be, nr, iter);
410024db 2728
8b7bad58 2729 for (i = 0; i < nr; i++) {
c4ee0625
JY
2730 err = add_callchain_ip(thread, cursor, parent,
2731 root_al,
2732 NULL, be[i].to,
2733 true, &be[i].flags,
2734 NULL, be[i].from);
410024db 2735
8b7bad58 2736 if (!err)
91d7b2de 2737 err = add_callchain_ip(thread, cursor, parent, root_al,
410024db
JY
2738 NULL, be[i].from,
2739 true, &be[i].flags,
c4ee0625 2740 &iter[i], 0);
8b7bad58
AK
2741 if (err == -EINVAL)
2742 break;
2743 if (err)
2744 return err;
2745 }
b49a821e
JY
2746
2747 if (chain_nr == 0)
2748 return 0;
2749
8b7bad58
AK
2750 chain_nr -= nr;
2751 }
2752
2753check_calls:
aceb9826 2754 if (chain && callchain_param.order != ORDER_CALLEE) {
e9024d51
DM
2755 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2756 &cpumode, chain->nr - first_call);
2757 if (err)
2758 return (err < 0) ? err : 0;
2759 }
bf8bddbf 2760 for (i = first_call, nr_entries = 0;
a29d5c9b 2761 i < chain_nr && nr_entries < max_stack; i++) {
3f067dca 2762 u64 ip;
3f067dca
ACM
2763
2764 if (callchain_param.order == ORDER_CALLEE)
a60335ba 2765 j = i;
3f067dca 2766 else
a60335ba
SB
2767 j = chain->nr - i - 1;
2768
2769#ifdef HAVE_SKIP_CALLCHAIN_IDX
2770 if (j == skip_idx)
2771 continue;
2772#endif
2773 ip = chain->ips[j];
bf8bddbf
ACM
2774 if (ip < PERF_CONTEXT_MAX)
2775 ++nr_entries;
e9024d51
DM
2776 else if (callchain_param.order != ORDER_CALLEE) {
2777 err = find_prev_cpumode(chain, thread, cursor, parent,
2778 root_al, &cpumode, j);
2779 if (err)
2780 return (err < 0) ? err : 0;
2781 continue;
2782 }
a29d5c9b 2783
410024db
JY
2784 err = add_callchain_ip(thread, cursor, parent,
2785 root_al, &cpumode, ip,
c4ee0625 2786 false, NULL, NULL, 0);
3f067dca 2787
3f067dca 2788 if (err)
2e77784b 2789 return (err < 0) ? err : 0;
3f067dca
ACM
2790 }
2791
2792 return 0;
2793}
2794
c1529738 2795static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
11ea2515 2796{
c1529738
ACM
2797 struct symbol *sym = ms->sym;
2798 struct map *map = ms->map;
11ea2515
MW
2799 struct inline_node *inline_node;
2800 struct inline_list *ilist;
2801 u64 addr;
b38775cf 2802 int ret = 1;
11ea2515
MW
2803
2804 if (!symbol_conf.inline_name || !map || !sym)
b38775cf 2805 return ret;
11ea2515 2806
7a8a8fcf
MW
2807 addr = map__map_ip(map, ip);
2808 addr = map__rip_2objdump(map, addr);
11ea2515
MW
2809
2810 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2811 if (!inline_node) {
2812 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2813 if (!inline_node)
b38775cf 2814 return ret;
11ea2515
MW
2815 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2816 }
2817
2818 list_for_each_entry(ilist, &inline_node->val, list) {
5f0fef8a 2819 struct map_symbol ilist_ms = {
77b91c1a 2820 .maps = ms->maps,
5f0fef8a
ACM
2821 .map = map,
2822 .sym = ilist->symbol,
2823 };
2824 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
b38775cf 2825 NULL, 0, 0, 0, ilist->srcline);
11ea2515
MW
2826
2827 if (ret != 0)
2828 return ret;
2829 }
2830
b38775cf 2831 return ret;
11ea2515
MW
2832}
2833
3f067dca
ACM
2834static int unwind_entry(struct unwind_entry *entry, void *arg)
2835{
2836 struct callchain_cursor *cursor = arg;
40a342cd 2837 const char *srcline = NULL;
ff4ce288 2838 u64 addr = entry->ip;
b49a8fe5 2839
c1529738 2840 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
b49a8fe5 2841 return 0;
40a342cd 2842
c1529738 2843 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
11ea2515
MW
2844 return 0;
2845
2a9d5050
SD
2846 /*
2847 * Convert entry->ip from a virtual address to an offset in
2848 * its corresponding binary.
2849 */
c1529738
ACM
2850 if (entry->ms.map)
2851 addr = map__map_ip(entry->ms.map, entry->ip);
2a9d5050 2852
5f0fef8a
ACM
2853 srcline = callchain_srcline(&entry->ms, addr);
2854 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
40a342cd 2855 false, NULL, 0, 0, 0, srcline);
3f067dca
ACM
2856}
2857
9919a65e
CP
2858static int thread__resolve_callchain_unwind(struct thread *thread,
2859 struct callchain_cursor *cursor,
32dcd021 2860 struct evsel *evsel,
9919a65e
CP
2861 struct perf_sample *sample,
2862 int max_stack)
3f067dca 2863{
3f067dca 2864 /* Can we do dwarf post unwind? */
1fc632ce
JO
2865 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2866 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3f067dca
ACM
2867 return 0;
2868
2869 /* Bail out if nothing was captured. */
2870 if ((!sample->user_regs.regs) ||
2871 (!sample->user_stack.size))
2872 return 0;
2873
91d7b2de 2874 return unwind__get_entries(unwind_entry, cursor,
352ea45a 2875 thread, sample, max_stack);
9919a65e 2876}
3f067dca 2877
9919a65e
CP
2878int thread__resolve_callchain(struct thread *thread,
2879 struct callchain_cursor *cursor,
32dcd021 2880 struct evsel *evsel,
9919a65e
CP
2881 struct perf_sample *sample,
2882 struct symbol **parent,
2883 struct addr_location *root_al,
2884 int max_stack)
2885{
2886 int ret = 0;
2887
914eb9ca 2888 callchain_cursor_reset(cursor);
9919a65e
CP
2889
2890 if (callchain_param.order == ORDER_CALLEE) {
2891 ret = thread__resolve_callchain_sample(thread, cursor,
2892 evsel, sample,
2893 parent, root_al,
2894 max_stack);
2895 if (ret)
2896 return ret;
2897 ret = thread__resolve_callchain_unwind(thread, cursor,
2898 evsel, sample,
2899 max_stack);
2900 } else {
2901 ret = thread__resolve_callchain_unwind(thread, cursor,
2902 evsel, sample,
2903 max_stack);
2904 if (ret)
2905 return ret;
2906 ret = thread__resolve_callchain_sample(thread, cursor,
2907 evsel, sample,
2908 parent, root_al,
2909 max_stack);
2910 }
2911
2912 return ret;
3f067dca 2913}
35feee19
DA
2914
2915int machine__for_each_thread(struct machine *machine,
2916 int (*fn)(struct thread *thread, void *p),
2917 void *priv)
2918{
91e467bc 2919 struct threads *threads;
35feee19
DA
2920 struct rb_node *nd;
2921 struct thread *thread;
2922 int rc = 0;
91e467bc 2923 int i;
35feee19 2924
91e467bc
KL
2925 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2926 threads = &machine->threads[i];
f3acb3a8
DB
2927 for (nd = rb_first_cached(&threads->entries); nd;
2928 nd = rb_next(nd)) {
91e467bc
KL
2929 thread = rb_entry(nd, struct thread, rb_node);
2930 rc = fn(thread, priv);
2931 if (rc != 0)
2932 return rc;
2933 }
35feee19 2934
91e467bc
KL
2935 list_for_each_entry(thread, &threads->dead, node) {
2936 rc = fn(thread, priv);
2937 if (rc != 0)
2938 return rc;
2939 }
35feee19
DA
2940 }
2941 return rc;
2942}
58d925dc 2943
a5499b37
AH
2944int machines__for_each_thread(struct machines *machines,
2945 int (*fn)(struct thread *thread, void *p),
2946 void *priv)
2947{
2948 struct rb_node *nd;
2949 int rc = 0;
2950
2951 rc = machine__for_each_thread(&machines->host, fn, priv);
2952 if (rc != 0)
2953 return rc;
2954
f3acb3a8 2955 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
a5499b37
AH
2956 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2957
2958 rc = machine__for_each_thread(machine, fn, priv);
2959 if (rc != 0)
2960 return rc;
2961 }
2962 return rc;
2963}
2964
b9d266ba
AH
2965pid_t machine__get_current_tid(struct machine *machine, int cpu)
2966{
8c727469
KM
2967 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2968
2969 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
b9d266ba
AH
2970 return -1;
2971
2972 return machine->current_tid[cpu];
2973}
2974
2975int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2976 pid_t tid)
2977{
2978 struct thread *thread;
8c727469 2979 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
b9d266ba
AH
2980
2981 if (cpu < 0)
2982 return -EINVAL;
2983
2984 if (!machine->current_tid) {
2985 int i;
2986
8c727469 2987 machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
b9d266ba
AH
2988 if (!machine->current_tid)
2989 return -ENOMEM;
8c727469 2990 for (i = 0; i < nr_cpus; i++)
b9d266ba
AH
2991 machine->current_tid[i] = -1;
2992 }
2993
8c727469 2994 if (cpu >= nr_cpus) {
b9d266ba
AH
2995 pr_err("Requested CPU %d too large. ", cpu);
2996 pr_err("Consider raising MAX_NR_CPUS\n");
2997 return -EINVAL;
2998 }
2999
3000 machine->current_tid[cpu] = tid;
3001
3002 thread = machine__findnew_thread(machine, pid, tid);
3003 if (!thread)
3004 return -ENOMEM;
3005
3006 thread->cpu = cpu;
b91fc39f 3007 thread__put(thread);
b9d266ba
AH
3008
3009 return 0;
3010}
fbe2af45 3011
dbbd34a6
AH
3012/*
3013 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
3014 * normalized arch is needed.
3015 */
3016bool machine__is(struct machine *machine, const char *arch)
3017{
3018 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3019}
3020
9cecca32
AH
3021int machine__nr_cpus_avail(struct machine *machine)
3022{
3023 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3024}
3025
fbe2af45
AH
3026int machine__get_kernel_start(struct machine *machine)
3027{
a5e813c6 3028 struct map *map = machine__kernel_map(machine);
fbe2af45
AH
3029 int err = 0;
3030
3031 /*
3032 * The only addresses above 2^63 are kernel addresses of a 64-bit
3033 * kernel. Note that addresses are unsigned so that on a 32-bit system
3034 * all addresses including kernel addresses are less than 2^32. In
3035 * that case (32-bit system), if the kernel mapping is unknown, all
3036 * addresses will be assumed to be in user space - see
3037 * machine__kernel_ip().
3038 */
3039 machine->kernel_start = 1ULL << 63;
3040 if (map) {
be39db9f 3041 err = map__load(map);
19422a9f
AH
3042 /*
3043 * On x86_64, PTI entry trampolines are less than the
3044 * start of kernel text, but still above 2^63. So leave
3045 * kernel_start = 1ULL << 63 for x86_64.
3046 */
3047 if (!err && !machine__is(machine, "x86_64"))
fbe2af45
AH
3048 machine->kernel_start = map->start;
3049 }
3050 return err;
3051}
aa7cc2ae 3052
8e80ad99
AH
3053u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3054{
3055 u8 addr_cpumode = cpumode;
3056 bool kernel_ip;
3057
3058 if (!machine->single_address_space)
3059 goto out;
3060
3061 kernel_ip = machine__kernel_ip(machine, addr);
3062 switch (cpumode) {
3063 case PERF_RECORD_MISC_KERNEL:
3064 case PERF_RECORD_MISC_USER:
3065 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3066 PERF_RECORD_MISC_USER;
3067 break;
3068 case PERF_RECORD_MISC_GUEST_KERNEL:
3069 case PERF_RECORD_MISC_GUEST_USER:
3070 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3071 PERF_RECORD_MISC_GUEST_USER;
3072 break;
3073 default:
3074 break;
3075 }
3076out:
3077 return addr_cpumode;
3078}
3079
0e3149f8
ACM
3080struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3081{
3082 return dsos__findnew_id(&machine->dsos, filename, id);
3083}
3084
aa7cc2ae
ACM
3085struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3086{
0e3149f8 3087 return machine__findnew_dso_id(machine, filename, NULL);
aa7cc2ae 3088}
c3168b0d
ACM
3089
3090char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3091{
3092 struct machine *machine = vmachine;
3093 struct map *map;
107cad95 3094 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
c3168b0d
ACM
3095
3096 if (sym == NULL)
3097 return NULL;
3098
3099 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
3100 *addrp = map->unmap_ip(map, sym->start);
3101 return sym->name;
3102}