]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - tools/perf/util/auxtrace.c
perf auxtrace: Prevent decoding when --no-itrace
[mirror_ubuntu-eoan-kernel.git] / tools / perf / util / auxtrace.c
CommitLineData
718c602d
AH
1/*
2 * auxtrace.c: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
fd20e811 16#include <inttypes.h>
718c602d
AH
17#include <sys/types.h>
18#include <sys/mman.h>
19#include <stdbool.h>
1b36c03e
AH
20#include <string.h>
21#include <limits.h>
22#include <errno.h>
718c602d
AH
23
24#include <linux/kernel.h>
25#include <linux/perf_event.h>
26#include <linux/types.h>
27#include <linux/bitops.h>
28#include <linux/log2.h>
e5027893 29#include <linux/string.h>
718c602d 30
e5027893 31#include <sys/param.h>
9e0cc4fe 32#include <stdlib.h>
85ed4729 33#include <stdio.h>
e5027893 34#include <linux/list.h>
9e0cc4fe 35
718c602d
AH
36#include "../perf.h"
37#include "util.h"
38#include "evlist.h"
1b36c03e
AH
39#include "dso.h"
40#include "map.h"
41#include "pmu.h"
42#include "evsel.h"
718c602d
AH
43#include "cpumap.h"
44#include "thread_map.h"
45#include "asm/bug.h"
46#include "auxtrace.h"
47
c3278f02
AH
48#include <linux/hash.h>
49
9e0cc4fe 50#include "event.h"
85ed4729 51#include "session.h"
9e0cc4fe 52#include "debug.h"
4b6ab94e 53#include <subcmd/parse-options.h>
9e0cc4fe 54
440a23b3 55#include "cs-etm.h"
5efb1d54 56#include "intel-pt.h"
d0170af7 57#include "intel-bts.h"
ffd3d18c 58#include "arm-spe.h"
5efb1d54 59
3d689ed6
ACM
60#include "sane_ctype.h"
61#include "symbol/kallsyms.h"
62
2e2967f4
AH
63static bool auxtrace__dont_decode(struct perf_session *session)
64{
65 return !session->itrace_synth_opts ||
66 session->itrace_synth_opts->dont_decode;
67}
68
718c602d
AH
69int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
70 struct auxtrace_mmap_params *mp,
71 void *userpg, int fd)
72{
73 struct perf_event_mmap_page *pc = userpg;
74
718c602d
AH
75 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
76
77 mm->userpg = userpg;
78 mm->mask = mp->mask;
79 mm->len = mp->len;
80 mm->prev = 0;
81 mm->idx = mp->idx;
82 mm->tid = mp->tid;
83 mm->cpu = mp->cpu;
84
85 if (!mp->len) {
86 mm->base = NULL;
87 return 0;
88 }
89
a7fde09a
AH
90#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
91 pr_err("Cannot use AUX area tracing mmaps\n");
92 return -1;
93#endif
94
718c602d
AH
95 pc->aux_offset = mp->offset;
96 pc->aux_size = mp->len;
97
98 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
99 if (mm->base == MAP_FAILED) {
100 pr_debug2("failed to mmap AUX area\n");
101 mm->base = NULL;
102 return -1;
103 }
104
105 return 0;
106}
107
108void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
109{
110 if (mm->base) {
111 munmap(mm->base, mm->len);
112 mm->base = NULL;
113 }
114}
115
116void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
117 off_t auxtrace_offset,
118 unsigned int auxtrace_pages,
119 bool auxtrace_overwrite)
120{
121 if (auxtrace_pages) {
122 mp->offset = auxtrace_offset;
123 mp->len = auxtrace_pages * (size_t)page_size;
124 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
125 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
126 pr_debug2("AUX area mmap length %zu\n", mp->len);
127 } else {
128 mp->len = 0;
129 }
130}
131
132void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
133 struct perf_evlist *evlist, int idx,
134 bool per_cpu)
135{
136 mp->idx = idx;
137
138 if (per_cpu) {
139 mp->cpu = evlist->cpus->map[idx];
140 if (evlist->threads)
e13798c7 141 mp->tid = thread_map__pid(evlist->threads, 0);
718c602d
AH
142 else
143 mp->tid = -1;
144 } else {
145 mp->cpu = -1;
e13798c7 146 mp->tid = thread_map__pid(evlist->threads, idx);
718c602d
AH
147 }
148}
9e0cc4fe 149
e5027893
AH
150#define AUXTRACE_INIT_NR_QUEUES 32
151
152static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
153{
154 struct auxtrace_queue *queue_array;
155 unsigned int max_nr_queues, i;
156
157 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
158 if (nr_queues > max_nr_queues)
159 return NULL;
160
161 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
162 if (!queue_array)
163 return NULL;
164
165 for (i = 0; i < nr_queues; i++) {
166 INIT_LIST_HEAD(&queue_array[i].head);
167 queue_array[i].priv = NULL;
168 }
169
170 return queue_array;
171}
172
173int auxtrace_queues__init(struct auxtrace_queues *queues)
174{
175 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
176 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
177 if (!queues->queue_array)
178 return -ENOMEM;
179 return 0;
180}
181
182static int auxtrace_queues__grow(struct auxtrace_queues *queues,
183 unsigned int new_nr_queues)
184{
185 unsigned int nr_queues = queues->nr_queues;
186 struct auxtrace_queue *queue_array;
187 unsigned int i;
188
189 if (!nr_queues)
190 nr_queues = AUXTRACE_INIT_NR_QUEUES;
191
192 while (nr_queues && nr_queues < new_nr_queues)
193 nr_queues <<= 1;
194
195 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
196 return -EINVAL;
197
198 queue_array = auxtrace_alloc_queue_array(nr_queues);
199 if (!queue_array)
200 return -ENOMEM;
201
202 for (i = 0; i < queues->nr_queues; i++) {
203 list_splice_tail(&queues->queue_array[i].head,
204 &queue_array[i].head);
205 queue_array[i].priv = queues->queue_array[i].priv;
206 }
207
208 queues->nr_queues = nr_queues;
209 queues->queue_array = queue_array;
210
211 return 0;
212}
213
214static void *auxtrace_copy_data(u64 size, struct perf_session *session)
215{
8ceb41d7 216 int fd = perf_data__fd(session->data);
e5027893
AH
217 void *p;
218 ssize_t ret;
219
220 if (size > SSIZE_MAX)
221 return NULL;
222
223 p = malloc(size);
224 if (!p)
225 return NULL;
226
227 ret = readn(fd, p, size);
228 if (ret != (ssize_t)size) {
229 free(p);
230 return NULL;
231 }
232
233 return p;
234}
235
236static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
237 unsigned int idx,
238 struct auxtrace_buffer *buffer)
239{
240 struct auxtrace_queue *queue;
241 int err;
242
243 if (idx >= queues->nr_queues) {
244 err = auxtrace_queues__grow(queues, idx + 1);
245 if (err)
246 return err;
247 }
248
249 queue = &queues->queue_array[idx];
250
251 if (!queue->set) {
252 queue->set = true;
253 queue->tid = buffer->tid;
254 queue->cpu = buffer->cpu;
255 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
256 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
257 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
258 return -EINVAL;
259 }
260
261 buffer->buffer_nr = queues->next_buffer_nr++;
262
263 list_add_tail(&buffer->list, &queue->head);
264
265 queues->new_data = true;
266 queues->populated = true;
267
268 return 0;
269}
270
271/* Limit buffers to 32MiB on 32-bit */
272#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
273
274static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
275 unsigned int idx,
276 struct auxtrace_buffer *buffer)
277{
278 u64 sz = buffer->size;
279 bool consecutive = false;
280 struct auxtrace_buffer *b;
281 int err;
282
283 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
284 b = memdup(buffer, sizeof(struct auxtrace_buffer));
285 if (!b)
286 return -ENOMEM;
287 b->size = BUFFER_LIMIT_FOR_32_BIT;
288 b->consecutive = consecutive;
289 err = auxtrace_queues__add_buffer(queues, idx, b);
290 if (err) {
291 auxtrace_buffer__free(b);
292 return err;
293 }
294 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
295 sz -= BUFFER_LIMIT_FOR_32_BIT;
296 consecutive = true;
297 }
298
299 buffer->size = sz;
300 buffer->consecutive = consecutive;
301
302 return 0;
303}
304
305static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
306 struct perf_session *session,
307 unsigned int idx,
308 struct auxtrace_buffer *buffer)
309{
310 if (session->one_mmap) {
311 buffer->data = buffer->data_offset - session->one_mmap_offset +
312 session->one_mmap_addr;
8ceb41d7 313 } else if (perf_data__is_pipe(session->data)) {
e5027893
AH
314 buffer->data = auxtrace_copy_data(buffer->size, session);
315 if (!buffer->data)
316 return -ENOMEM;
317 buffer->data_needs_freeing = true;
318 } else if (BITS_PER_LONG == 32 &&
319 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
320 int err;
321
322 err = auxtrace_queues__split_buffer(queues, idx, buffer);
323 if (err)
324 return err;
325 }
326
327 return auxtrace_queues__add_buffer(queues, idx, buffer);
328}
329
644e0840
AH
330static bool filter_cpu(struct perf_session *session, int cpu)
331{
332 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
333
334 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
335}
336
e5027893
AH
337int auxtrace_queues__add_event(struct auxtrace_queues *queues,
338 struct perf_session *session,
339 union perf_event *event, off_t data_offset,
340 struct auxtrace_buffer **buffer_ptr)
341{
342 struct auxtrace_buffer *buffer;
343 unsigned int idx;
344 int err;
345
644e0840
AH
346 if (filter_cpu(session, event->auxtrace.cpu))
347 return 0;
348
e5027893
AH
349 buffer = zalloc(sizeof(struct auxtrace_buffer));
350 if (!buffer)
351 return -ENOMEM;
352
353 buffer->pid = -1;
354 buffer->tid = event->auxtrace.tid;
355 buffer->cpu = event->auxtrace.cpu;
356 buffer->data_offset = data_offset;
357 buffer->offset = event->auxtrace.offset;
358 buffer->reference = event->auxtrace.reference;
359 buffer->size = event->auxtrace.size;
360 idx = event->auxtrace.idx;
361
362 err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
363 if (err)
364 goto out_err;
365
366 if (buffer_ptr)
367 *buffer_ptr = buffer;
368
369 return 0;
370
371out_err:
372 auxtrace_buffer__free(buffer);
373 return err;
374}
375
99fa2984
AH
376static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
377 struct perf_session *session,
378 off_t file_offset, size_t sz)
379{
380 union perf_event *event;
381 int err;
382 char buf[PERF_SAMPLE_MAX_SIZE];
383
384 err = perf_session__peek_event(session, file_offset, buf,
385 PERF_SAMPLE_MAX_SIZE, &event, NULL);
386 if (err)
387 return err;
388
389 if (event->header.type == PERF_RECORD_AUXTRACE) {
390 if (event->header.size < sizeof(struct auxtrace_event) ||
391 event->header.size != sz) {
392 err = -EINVAL;
393 goto out;
394 }
395 file_offset += event->header.size;
396 err = auxtrace_queues__add_event(queues, session, event,
397 file_offset, NULL);
398 }
399out:
400 return err;
401}
402
e5027893
AH
403void auxtrace_queues__free(struct auxtrace_queues *queues)
404{
405 unsigned int i;
406
407 for (i = 0; i < queues->nr_queues; i++) {
408 while (!list_empty(&queues->queue_array[i].head)) {
409 struct auxtrace_buffer *buffer;
410
411 buffer = list_entry(queues->queue_array[i].head.next,
412 struct auxtrace_buffer, list);
413 list_del(&buffer->list);
414 auxtrace_buffer__free(buffer);
415 }
416 }
417
418 zfree(&queues->queue_array);
419 queues->nr_queues = 0;
420}
421
f9397155
AH
422static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
423 unsigned int pos, unsigned int queue_nr,
424 u64 ordinal)
425{
426 unsigned int parent;
427
428 while (pos) {
429 parent = (pos - 1) >> 1;
430 if (heap_array[parent].ordinal <= ordinal)
431 break;
432 heap_array[pos] = heap_array[parent];
433 pos = parent;
434 }
435 heap_array[pos].queue_nr = queue_nr;
436 heap_array[pos].ordinal = ordinal;
437}
438
439int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
440 u64 ordinal)
441{
442 struct auxtrace_heap_item *heap_array;
443
444 if (queue_nr >= heap->heap_sz) {
445 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
446
447 while (heap_sz <= queue_nr)
448 heap_sz <<= 1;
449 heap_array = realloc(heap->heap_array,
450 heap_sz * sizeof(struct auxtrace_heap_item));
451 if (!heap_array)
452 return -ENOMEM;
453 heap->heap_array = heap_array;
454 heap->heap_sz = heap_sz;
455 }
456
457 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
458
459 return 0;
460}
461
462void auxtrace_heap__free(struct auxtrace_heap *heap)
463{
464 zfree(&heap->heap_array);
465 heap->heap_cnt = 0;
466 heap->heap_sz = 0;
467}
468
469void auxtrace_heap__pop(struct auxtrace_heap *heap)
470{
471 unsigned int pos, last, heap_cnt = heap->heap_cnt;
472 struct auxtrace_heap_item *heap_array;
473
474 if (!heap_cnt)
475 return;
476
477 heap->heap_cnt -= 1;
478
479 heap_array = heap->heap_array;
480
481 pos = 0;
482 while (1) {
483 unsigned int left, right;
484
485 left = (pos << 1) + 1;
486 if (left >= heap_cnt)
487 break;
488 right = left + 1;
489 if (right >= heap_cnt) {
490 heap_array[pos] = heap_array[left];
491 return;
492 }
493 if (heap_array[left].ordinal < heap_array[right].ordinal) {
494 heap_array[pos] = heap_array[left];
495 pos = left;
496 } else {
497 heap_array[pos] = heap_array[right];
498 pos = right;
499 }
500 }
501
502 last = heap_cnt - 1;
503 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
504 heap_array[last].ordinal);
505}
506
14a05e13
MP
507size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
508 struct perf_evlist *evlist)
9e0cc4fe
AH
509{
510 if (itr)
14a05e13 511 return itr->info_priv_size(itr, evlist);
9e0cc4fe
AH
512 return 0;
513}
514
515static int auxtrace_not_supported(void)
516{
517 pr_err("AUX area tracing is not supported on this architecture\n");
518 return -EINVAL;
519}
520
521int auxtrace_record__info_fill(struct auxtrace_record *itr,
522 struct perf_session *session,
523 struct auxtrace_info_event *auxtrace_info,
524 size_t priv_size)
525{
526 if (itr)
527 return itr->info_fill(itr, session, auxtrace_info, priv_size);
528 return auxtrace_not_supported();
529}
530
531void auxtrace_record__free(struct auxtrace_record *itr)
532{
533 if (itr)
534 itr->free(itr);
535}
536
d20031bb
AH
537int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
538{
539 if (itr && itr->snapshot_start)
540 return itr->snapshot_start(itr);
541 return 0;
542}
543
544int auxtrace_record__snapshot_finish(struct auxtrace_record *itr)
545{
546 if (itr && itr->snapshot_finish)
547 return itr->snapshot_finish(itr);
548 return 0;
549}
550
551int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
552 struct auxtrace_mmap *mm,
553 unsigned char *data, u64 *head, u64 *old)
554{
555 if (itr && itr->find_snapshot)
556 return itr->find_snapshot(itr, idx, mm, data, head, old);
557 return 0;
558}
559
9e0cc4fe
AH
560int auxtrace_record__options(struct auxtrace_record *itr,
561 struct perf_evlist *evlist,
562 struct record_opts *opts)
563{
564 if (itr)
565 return itr->recording_options(itr, evlist, opts);
566 return 0;
567}
568
569u64 auxtrace_record__reference(struct auxtrace_record *itr)
570{
571 if (itr)
572 return itr->reference(itr);
573 return 0;
574}
575
d20031bb
AH
576int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
577 struct record_opts *opts, const char *str)
578{
579 if (!str)
580 return 0;
581
582 if (itr)
583 return itr->parse_snapshot_options(itr, opts, str);
584
585 pr_err("No AUX area tracing to snapshot\n");
586 return -EINVAL;
587}
588
9e0cc4fe
AH
589struct auxtrace_record *__weak
590auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
591{
592 *err = 0;
593 return NULL;
594}
595
99fa2984
AH
596static int auxtrace_index__alloc(struct list_head *head)
597{
598 struct auxtrace_index *auxtrace_index;
599
600 auxtrace_index = malloc(sizeof(struct auxtrace_index));
601 if (!auxtrace_index)
602 return -ENOMEM;
603
604 auxtrace_index->nr = 0;
605 INIT_LIST_HEAD(&auxtrace_index->list);
606
607 list_add_tail(&auxtrace_index->list, head);
608
609 return 0;
610}
611
612void auxtrace_index__free(struct list_head *head)
613{
614 struct auxtrace_index *auxtrace_index, *n;
615
616 list_for_each_entry_safe(auxtrace_index, n, head, list) {
617 list_del(&auxtrace_index->list);
618 free(auxtrace_index);
619 }
620}
621
622static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
623{
624 struct auxtrace_index *auxtrace_index;
625 int err;
626
627 if (list_empty(head)) {
628 err = auxtrace_index__alloc(head);
629 if (err)
630 return NULL;
631 }
632
633 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
634
635 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
636 err = auxtrace_index__alloc(head);
637 if (err)
638 return NULL;
639 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
640 list);
641 }
642
643 return auxtrace_index;
644}
645
646int auxtrace_index__auxtrace_event(struct list_head *head,
647 union perf_event *event, off_t file_offset)
648{
649 struct auxtrace_index *auxtrace_index;
650 size_t nr;
651
652 auxtrace_index = auxtrace_index__last(head);
653 if (!auxtrace_index)
654 return -ENOMEM;
655
656 nr = auxtrace_index->nr;
657 auxtrace_index->entries[nr].file_offset = file_offset;
658 auxtrace_index->entries[nr].sz = event->header.size;
659 auxtrace_index->nr += 1;
660
661 return 0;
662}
663
664static int auxtrace_index__do_write(int fd,
665 struct auxtrace_index *auxtrace_index)
666{
667 struct auxtrace_index_entry ent;
668 size_t i;
669
670 for (i = 0; i < auxtrace_index->nr; i++) {
671 ent.file_offset = auxtrace_index->entries[i].file_offset;
672 ent.sz = auxtrace_index->entries[i].sz;
673 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
674 return -errno;
675 }
676 return 0;
677}
678
679int auxtrace_index__write(int fd, struct list_head *head)
680{
681 struct auxtrace_index *auxtrace_index;
682 u64 total = 0;
683 int err;
684
685 list_for_each_entry(auxtrace_index, head, list)
686 total += auxtrace_index->nr;
687
688 if (writen(fd, &total, sizeof(total)) != sizeof(total))
689 return -errno;
690
691 list_for_each_entry(auxtrace_index, head, list) {
692 err = auxtrace_index__do_write(fd, auxtrace_index);
693 if (err)
694 return err;
695 }
696
697 return 0;
698}
699
700static int auxtrace_index__process_entry(int fd, struct list_head *head,
701 bool needs_swap)
702{
703 struct auxtrace_index *auxtrace_index;
704 struct auxtrace_index_entry ent;
705 size_t nr;
706
707 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
708 return -1;
709
710 auxtrace_index = auxtrace_index__last(head);
711 if (!auxtrace_index)
712 return -1;
713
714 nr = auxtrace_index->nr;
715 if (needs_swap) {
716 auxtrace_index->entries[nr].file_offset =
717 bswap_64(ent.file_offset);
718 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
719 } else {
720 auxtrace_index->entries[nr].file_offset = ent.file_offset;
721 auxtrace_index->entries[nr].sz = ent.sz;
722 }
723
724 auxtrace_index->nr = nr + 1;
725
726 return 0;
727}
728
729int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
730 bool needs_swap)
731{
732 struct list_head *head = &session->auxtrace_index;
733 u64 nr;
734
735 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
736 return -1;
737
738 if (needs_swap)
739 nr = bswap_64(nr);
740
741 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
742 return -1;
743
744 while (nr--) {
745 int err;
746
747 err = auxtrace_index__process_entry(fd, head, needs_swap);
748 if (err)
749 return -1;
750 }
751
752 return 0;
753}
754
755static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
756 struct perf_session *session,
757 struct auxtrace_index_entry *ent)
758{
759 return auxtrace_queues__add_indexed_event(queues, session,
760 ent->file_offset, ent->sz);
761}
762
763int auxtrace_queues__process_index(struct auxtrace_queues *queues,
764 struct perf_session *session)
765{
766 struct auxtrace_index *auxtrace_index;
767 struct auxtrace_index_entry *ent;
768 size_t i;
769 int err;
770
2e2967f4
AH
771 if (auxtrace__dont_decode(session))
772 return 0;
773
99fa2984
AH
774 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
775 for (i = 0; i < auxtrace_index->nr; i++) {
776 ent = &auxtrace_index->entries[i];
777 err = auxtrace_queues__process_index_entry(queues,
778 session,
779 ent);
780 if (err)
781 return err;
782 }
783 }
784 return 0;
785}
786
e5027893
AH
787struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
788 struct auxtrace_buffer *buffer)
789{
790 if (buffer) {
791 if (list_is_last(&buffer->list, &queue->head))
792 return NULL;
793 return list_entry(buffer->list.next, struct auxtrace_buffer,
794 list);
795 } else {
796 if (list_empty(&queue->head))
797 return NULL;
798 return list_entry(queue->head.next, struct auxtrace_buffer,
799 list);
800 }
801}
802
803void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
804{
805 size_t adj = buffer->data_offset & (page_size - 1);
806 size_t size = buffer->size + adj;
807 off_t file_offset = buffer->data_offset - adj;
808 void *addr;
809
810 if (buffer->data)
811 return buffer->data;
812
813 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
814 if (addr == MAP_FAILED)
815 return NULL;
816
817 buffer->mmap_addr = addr;
818 buffer->mmap_size = size;
819
820 buffer->data = addr + adj;
821
822 return buffer->data;
823}
824
825void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
826{
827 if (!buffer->data || !buffer->mmap_addr)
828 return;
829 munmap(buffer->mmap_addr, buffer->mmap_size);
830 buffer->mmap_addr = NULL;
831 buffer->mmap_size = 0;
832 buffer->data = NULL;
833 buffer->use_data = NULL;
834}
835
836void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
837{
838 auxtrace_buffer__put_data(buffer);
839 if (buffer->data_needs_freeing) {
840 buffer->data_needs_freeing = false;
841 zfree(&buffer->data);
842 buffer->use_data = NULL;
843 buffer->size = 0;
844 }
845}
846
847void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
848{
849 auxtrace_buffer__drop_data(buffer);
850 free(buffer);
851}
852
85ed4729
AH
853void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
854 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
855 const char *msg)
856{
857 size_t size;
858
859 memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
860
861 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
862 auxtrace_error->type = type;
863 auxtrace_error->code = code;
864 auxtrace_error->cpu = cpu;
865 auxtrace_error->pid = pid;
866 auxtrace_error->tid = tid;
867 auxtrace_error->ip = ip;
868 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
869
870 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
871 strlen(auxtrace_error->msg) + 1;
872 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
873}
874
9e0cc4fe
AH
875int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
876 struct perf_tool *tool,
877 struct perf_session *session,
878 perf_event__handler_t process)
879{
880 union perf_event *ev;
881 size_t priv_size;
882 int err;
883
884 pr_debug2("Synthesizing auxtrace information\n");
14a05e13 885 priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
9e0cc4fe
AH
886 ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
887 if (!ev)
888 return -ENOMEM;
889
890 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
891 ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
892 priv_size;
893 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
894 priv_size);
895 if (err)
896 goto out_free;
897
898 err = process(tool, ev, NULL, NULL);
899out_free:
900 free(ev);
901 return err;
902}
903
73f75fb1
AH
904int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
905 union perf_event *event,
5efb1d54 906 struct perf_session *session)
73f75fb1
AH
907{
908 enum auxtrace_type type = event->auxtrace_info.type;
909
910 if (dump_trace)
911 fprintf(stdout, " type: %u\n", type);
912
913 switch (type) {
55ea4ab4 914 case PERF_AUXTRACE_INTEL_PT:
5efb1d54 915 return intel_pt_process_auxtrace_info(event, session);
d0170af7
AH
916 case PERF_AUXTRACE_INTEL_BTS:
917 return intel_bts_process_auxtrace_info(event, session);
ffd3d18c
KP
918 case PERF_AUXTRACE_ARM_SPE:
919 return arm_spe_process_auxtrace_info(event, session);
a818c563 920 case PERF_AUXTRACE_CS_ETM:
440a23b3 921 return cs_etm__process_auxtrace_info(event, session);
73f75fb1
AH
922 case PERF_AUXTRACE_UNKNOWN:
923 default:
924 return -EINVAL;
925 }
926}
927
928s64 perf_event__process_auxtrace(struct perf_tool *tool,
929 union perf_event *event,
930 struct perf_session *session)
931{
932 s64 err;
933
934 if (dump_trace)
935 fprintf(stdout, " size: %#"PRIx64" offset: %#"PRIx64" ref: %#"PRIx64" idx: %u tid: %d cpu: %d\n",
936 event->auxtrace.size, event->auxtrace.offset,
937 event->auxtrace.reference, event->auxtrace.idx,
938 event->auxtrace.tid, event->auxtrace.cpu);
939
940 if (auxtrace__dont_decode(session))
941 return event->auxtrace.size;
942
943 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
944 return -EINVAL;
945
946 err = session->auxtrace->process_auxtrace_event(session, event, tool);
947 if (err < 0)
948 return err;
949
950 return event->auxtrace.size;
951}
952
f6986c95
AH
953#define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
954#define PERF_ITRACE_DEFAULT_PERIOD 100000
955#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
956#define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
601897b5
AH
957#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64
958#define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024
f6986c95
AH
959
960void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
961{
962 synth_opts->instructions = true;
963 synth_opts->branches = true;
53c76b0e 964 synth_opts->transactions = true;
3bdafdff 965 synth_opts->ptwrites = true;
70d110d7 966 synth_opts->pwr_events = true;
f6986c95
AH
967 synth_opts->errors = true;
968 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
969 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
970 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
601897b5 971 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
d1706b39 972 synth_opts->initial_skip = 0;
f6986c95
AH
973}
974
975/*
976 * Please check tools/perf/Documentation/perf-script.txt for information
977 * about the options parsed here, which is introduced after this cset,
978 * when support in 'perf script' for these options is introduced.
979 */
980int itrace_parse_synth_opts(const struct option *opt, const char *str,
981 int unset)
982{
983 struct itrace_synth_opts *synth_opts = opt->value;
984 const char *p;
985 char *endptr;
f70cfa07 986 bool period_type_set = false;
e1791347 987 bool period_set = false;
f6986c95
AH
988
989 synth_opts->set = true;
990
991 if (unset) {
992 synth_opts->dont_decode = true;
993 return 0;
994 }
995
996 if (!str) {
997 itrace_synth_opts__set_default(synth_opts);
998 return 0;
999 }
1000
1001 for (p = str; *p;) {
1002 switch (*p++) {
1003 case 'i':
1004 synth_opts->instructions = true;
1005 while (*p == ' ' || *p == ',')
1006 p += 1;
1007 if (isdigit(*p)) {
1008 synth_opts->period = strtoull(p, &endptr, 10);
e1791347 1009 period_set = true;
f6986c95
AH
1010 p = endptr;
1011 while (*p == ' ' || *p == ',')
1012 p += 1;
1013 switch (*p++) {
1014 case 'i':
1015 synth_opts->period_type =
1016 PERF_ITRACE_PERIOD_INSTRUCTIONS;
f70cfa07 1017 period_type_set = true;
f6986c95
AH
1018 break;
1019 case 't':
1020 synth_opts->period_type =
1021 PERF_ITRACE_PERIOD_TICKS;
f70cfa07 1022 period_type_set = true;
f6986c95
AH
1023 break;
1024 case 'm':
1025 synth_opts->period *= 1000;
1026 /* Fall through */
1027 case 'u':
1028 synth_opts->period *= 1000;
1029 /* Fall through */
1030 case 'n':
1031 if (*p++ != 's')
1032 goto out_err;
1033 synth_opts->period_type =
1034 PERF_ITRACE_PERIOD_NANOSECS;
f70cfa07 1035 period_type_set = true;
f6986c95
AH
1036 break;
1037 case '\0':
1038 goto out;
1039 default:
1040 goto out_err;
1041 }
1042 }
1043 break;
1044 case 'b':
1045 synth_opts->branches = true;
1046 break;
53c76b0e
AH
1047 case 'x':
1048 synth_opts->transactions = true;
1049 break;
3bdafdff
AH
1050 case 'w':
1051 synth_opts->ptwrites = true;
1052 break;
70d110d7
AH
1053 case 'p':
1054 synth_opts->pwr_events = true;
1055 break;
f6986c95
AH
1056 case 'e':
1057 synth_opts->errors = true;
1058 break;
1059 case 'd':
1060 synth_opts->log = true;
1061 break;
1062 case 'c':
1063 synth_opts->branches = true;
1064 synth_opts->calls = true;
1065 break;
1066 case 'r':
1067 synth_opts->branches = true;
1068 synth_opts->returns = true;
1069 break;
1070 case 'g':
f6986c95
AH
1071 synth_opts->callchain = true;
1072 synth_opts->callchain_sz =
1073 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1074 while (*p == ' ' || *p == ',')
1075 p += 1;
1076 if (isdigit(*p)) {
1077 unsigned int val;
1078
1079 val = strtoul(p, &endptr, 10);
1080 p = endptr;
1081 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1082 goto out_err;
1083 synth_opts->callchain_sz = val;
1084 }
1085 break;
601897b5
AH
1086 case 'l':
1087 synth_opts->last_branch = true;
1088 synth_opts->last_branch_sz =
1089 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1090 while (*p == ' ' || *p == ',')
1091 p += 1;
1092 if (isdigit(*p)) {
1093 unsigned int val;
1094
1095 val = strtoul(p, &endptr, 10);
1096 p = endptr;
1097 if (!val ||
1098 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1099 goto out_err;
1100 synth_opts->last_branch_sz = val;
1101 }
1102 break;
d1706b39
AK
1103 case 's':
1104 synth_opts->initial_skip = strtoul(p, &endptr, 10);
1105 if (p == endptr)
1106 goto out_err;
1107 p = endptr;
1108 break;
f6986c95
AH
1109 case ' ':
1110 case ',':
1111 break;
1112 default:
1113 goto out_err;
1114 }
1115 }
1116out:
1117 if (synth_opts->instructions) {
f70cfa07 1118 if (!period_type_set)
f6986c95
AH
1119 synth_opts->period_type =
1120 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
e1791347 1121 if (!period_set)
f6986c95
AH
1122 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1123 }
1124
1125 return 0;
1126
1127out_err:
1128 pr_err("Bad Instruction Tracing options '%s'\n", str);
1129 return -EINVAL;
1130}
1131
85ed4729
AH
1132static const char * const auxtrace_error_type_name[] = {
1133 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1134};
1135
1136static const char *auxtrace_error_name(int type)
1137{
1138 const char *error_type_name = NULL;
1139
1140 if (type < PERF_AUXTRACE_ERROR_MAX)
1141 error_type_name = auxtrace_error_type_name[type];
1142 if (!error_type_name)
1143 error_type_name = "unknown AUX";
1144 return error_type_name;
1145}
1146
1147size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1148{
1149 struct auxtrace_error_event *e = &event->auxtrace_error;
1150 int ret;
1151
1152 ret = fprintf(fp, " %s error type %u",
1153 auxtrace_error_name(e->type), e->type);
1154 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
1155 e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
1156 return ret;
1157}
1158
1159void perf_session__auxtrace_error_inc(struct perf_session *session,
1160 union perf_event *event)
1161{
1162 struct auxtrace_error_event *e = &event->auxtrace_error;
1163
1164 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1165 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1166}
1167
1168void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1169{
1170 int i;
1171
1172 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1173 if (!stats->nr_auxtrace_errors[i])
1174 continue;
1175 ui__warning("%u %s errors\n",
1176 stats->nr_auxtrace_errors[i],
1177 auxtrace_error_name(i));
1178 }
1179}
1180
1181int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
1182 union perf_event *event,
73f75fb1 1183 struct perf_session *session)
85ed4729 1184{
73f75fb1
AH
1185 if (auxtrace__dont_decode(session))
1186 return 0;
1187
85ed4729
AH
1188 perf_event__fprintf_auxtrace_error(event, stdout);
1189 return 0;
1190}
1191
d20031bb
AH
1192static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
1193 struct auxtrace_record *itr,
1194 struct perf_tool *tool, process_auxtrace_t fn,
1195 bool snapshot, size_t snapshot_size)
9e0cc4fe 1196{
d20031bb 1197 u64 head, old = mm->prev, offset, ref;
9e0cc4fe
AH
1198 unsigned char *data = mm->base;
1199 size_t size, head_off, old_off, len1, len2, padding;
1200 union perf_event ev;
1201 void *data1, *data2;
1202
d20031bb
AH
1203 if (snapshot) {
1204 head = auxtrace_mmap__read_snapshot_head(mm);
1205 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1206 &head, &old))
1207 return -1;
1208 } else {
1209 head = auxtrace_mmap__read_head(mm);
1210 }
1211
9e0cc4fe
AH
1212 if (old == head)
1213 return 0;
1214
1215 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1216 mm->idx, old, head, head - old);
1217
1218 if (mm->mask) {
1219 head_off = head & mm->mask;
1220 old_off = old & mm->mask;
1221 } else {
1222 head_off = head % mm->len;
1223 old_off = old % mm->len;
1224 }
1225
1226 if (head_off > old_off)
1227 size = head_off - old_off;
1228 else
1229 size = mm->len - (old_off - head_off);
1230
d20031bb
AH
1231 if (snapshot && size > snapshot_size)
1232 size = snapshot_size;
1233
9e0cc4fe
AH
1234 ref = auxtrace_record__reference(itr);
1235
1236 if (head > old || size <= head || mm->mask) {
1237 offset = head - size;
1238 } else {
1239 /*
1240 * When the buffer size is not a power of 2, 'head' wraps at the
1241 * highest multiple of the buffer size, so we have to subtract
1242 * the remainder here.
1243 */
1244 u64 rem = (0ULL - mm->len) % mm->len;
1245
1246 offset = head - size - rem;
1247 }
1248
1249 if (size > head_off) {
1250 len1 = size - head_off;
1251 data1 = &data[mm->len - len1];
1252 len2 = head_off;
1253 data2 = &data[0];
1254 } else {
1255 len1 = size;
1256 data1 = &data[head_off - len1];
1257 len2 = 0;
1258 data2 = NULL;
1259 }
1260
83b2ea25
AH
1261 if (itr->alignment) {
1262 unsigned int unwanted = len1 % itr->alignment;
1263
1264 len1 -= unwanted;
1265 size -= unwanted;
1266 }
1267
9e0cc4fe
AH
1268 /* padding must be written by fn() e.g. record__process_auxtrace() */
1269 padding = size & 7;
1270 if (padding)
1271 padding = 8 - padding;
1272
1273 memset(&ev, 0, sizeof(ev));
1274 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1275 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1276 ev.auxtrace.size = size + padding;
1277 ev.auxtrace.offset = offset;
1278 ev.auxtrace.reference = ref;
1279 ev.auxtrace.idx = mm->idx;
1280 ev.auxtrace.tid = mm->tid;
1281 ev.auxtrace.cpu = mm->cpu;
1282
1283 if (fn(tool, &ev, data1, len1, data2, len2))
1284 return -1;
1285
1286 mm->prev = head;
1287
d20031bb
AH
1288 if (!snapshot) {
1289 auxtrace_mmap__write_tail(mm, head);
1290 if (itr->read_finish) {
1291 int err;
9e0cc4fe 1292
d20031bb
AH
1293 err = itr->read_finish(itr, mm->idx);
1294 if (err < 0)
1295 return err;
1296 }
9e0cc4fe
AH
1297 }
1298
1299 return 1;
1300}
c3278f02 1301
d20031bb
AH
1302int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
1303 struct perf_tool *tool, process_auxtrace_t fn)
1304{
1305 return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
1306}
1307
1308int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
1309 struct auxtrace_record *itr,
1310 struct perf_tool *tool, process_auxtrace_t fn,
1311 size_t snapshot_size)
1312{
1313 return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
1314}
1315
c3278f02
AH
1316/**
1317 * struct auxtrace_cache - hash table to implement a cache
1318 * @hashtable: the hashtable
1319 * @sz: hashtable size (number of hlists)
1320 * @entry_size: size of an entry
1321 * @limit: limit the number of entries to this maximum, when reached the cache
1322 * is dropped and caching begins again with an empty cache
1323 * @cnt: current number of entries
1324 * @bits: hashtable size (@sz = 2^@bits)
1325 */
1326struct auxtrace_cache {
1327 struct hlist_head *hashtable;
1328 size_t sz;
1329 size_t entry_size;
1330 size_t limit;
1331 size_t cnt;
1332 unsigned int bits;
1333};
1334
1335struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1336 unsigned int limit_percent)
1337{
1338 struct auxtrace_cache *c;
1339 struct hlist_head *ht;
1340 size_t sz, i;
1341
1342 c = zalloc(sizeof(struct auxtrace_cache));
1343 if (!c)
1344 return NULL;
1345
1346 sz = 1UL << bits;
1347
1348 ht = calloc(sz, sizeof(struct hlist_head));
1349 if (!ht)
1350 goto out_free;
1351
1352 for (i = 0; i < sz; i++)
1353 INIT_HLIST_HEAD(&ht[i]);
1354
1355 c->hashtable = ht;
1356 c->sz = sz;
1357 c->entry_size = entry_size;
1358 c->limit = (c->sz * limit_percent) / 100;
1359 c->bits = bits;
1360
1361 return c;
1362
1363out_free:
1364 free(c);
1365 return NULL;
1366}
1367
1368static void auxtrace_cache__drop(struct auxtrace_cache *c)
1369{
1370 struct auxtrace_cache_entry *entry;
1371 struct hlist_node *tmp;
1372 size_t i;
1373
1374 if (!c)
1375 return;
1376
1377 for (i = 0; i < c->sz; i++) {
1378 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1379 hlist_del(&entry->hash);
1380 auxtrace_cache__free_entry(c, entry);
1381 }
1382 }
1383
1384 c->cnt = 0;
1385}
1386
1387void auxtrace_cache__free(struct auxtrace_cache *c)
1388{
1389 if (!c)
1390 return;
1391
1392 auxtrace_cache__drop(c);
1393 free(c->hashtable);
1394 free(c);
1395}
1396
1397void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1398{
1399 return malloc(c->entry_size);
1400}
1401
1402void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1403 void *entry)
1404{
1405 free(entry);
1406}
1407
1408int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1409 struct auxtrace_cache_entry *entry)
1410{
1411 if (c->limit && ++c->cnt > c->limit)
1412 auxtrace_cache__drop(c);
1413
1414 entry->key = key;
1415 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1416
1417 return 0;
1418}
1419
1420void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1421{
1422 struct auxtrace_cache_entry *entry;
1423 struct hlist_head *hlist;
1424
1425 if (!c)
1426 return NULL;
1427
1428 hlist = &c->hashtable[hash_32(key, c->bits)];
1429 hlist_for_each_entry(entry, hlist, hash) {
1430 if (entry->key == key)
1431 return entry;
1432 }
1433
1434 return NULL;
1435}
1b36c03e
AH
1436
1437static void addr_filter__free_str(struct addr_filter *filt)
1438{
1439 free(filt->str);
1440 filt->action = NULL;
1441 filt->sym_from = NULL;
1442 filt->sym_to = NULL;
1443 filt->filename = NULL;
1444 filt->str = NULL;
1445}
1446
1447static struct addr_filter *addr_filter__new(void)
1448{
1449 struct addr_filter *filt = zalloc(sizeof(*filt));
1450
1451 if (filt)
1452 INIT_LIST_HEAD(&filt->list);
1453
1454 return filt;
1455}
1456
1457static void addr_filter__free(struct addr_filter *filt)
1458{
1459 if (filt)
1460 addr_filter__free_str(filt);
1461 free(filt);
1462}
1463
1464static void addr_filters__add(struct addr_filters *filts,
1465 struct addr_filter *filt)
1466{
1467 list_add_tail(&filt->list, &filts->head);
1468 filts->cnt += 1;
1469}
1470
1471static void addr_filters__del(struct addr_filters *filts,
1472 struct addr_filter *filt)
1473{
1474 list_del_init(&filt->list);
1475 filts->cnt -= 1;
1476}
1477
1478void addr_filters__init(struct addr_filters *filts)
1479{
1480 INIT_LIST_HEAD(&filts->head);
1481 filts->cnt = 0;
1482}
1483
1484void addr_filters__exit(struct addr_filters *filts)
1485{
1486 struct addr_filter *filt, *n;
1487
1488 list_for_each_entry_safe(filt, n, &filts->head, list) {
1489 addr_filters__del(filts, filt);
1490 addr_filter__free(filt);
1491 }
1492}
1493
1494static int parse_num_or_str(char **inp, u64 *num, const char **str,
1495 const char *str_delim)
1496{
1497 *inp += strspn(*inp, " ");
1498
1499 if (isdigit(**inp)) {
1500 char *endptr;
1501
1502 if (!num)
1503 return -EINVAL;
1504 errno = 0;
1505 *num = strtoull(*inp, &endptr, 0);
1506 if (errno)
1507 return -errno;
1508 if (endptr == *inp)
1509 return -EINVAL;
1510 *inp = endptr;
1511 } else {
1512 size_t n;
1513
1514 if (!str)
1515 return -EINVAL;
1516 *inp += strspn(*inp, " ");
1517 *str = *inp;
1518 n = strcspn(*inp, str_delim);
1519 if (!n)
1520 return -EINVAL;
1521 *inp += n;
1522 if (**inp) {
1523 **inp = '\0';
1524 *inp += 1;
1525 }
1526 }
1527 return 0;
1528}
1529
1530static int parse_action(struct addr_filter *filt)
1531{
1532 if (!strcmp(filt->action, "filter")) {
1533 filt->start = true;
1534 filt->range = true;
1535 } else if (!strcmp(filt->action, "start")) {
1536 filt->start = true;
1537 } else if (!strcmp(filt->action, "stop")) {
1538 filt->start = false;
1539 } else if (!strcmp(filt->action, "tracestop")) {
1540 filt->start = false;
1541 filt->range = true;
1542 filt->action += 5; /* Change 'tracestop' to 'stop' */
1543 } else {
1544 return -EINVAL;
1545 }
1546 return 0;
1547}
1548
1549static int parse_sym_idx(char **inp, int *idx)
1550{
1551 *idx = -1;
1552
1553 *inp += strspn(*inp, " ");
1554
1555 if (**inp != '#')
1556 return 0;
1557
1558 *inp += 1;
1559
1560 if (**inp == 'g' || **inp == 'G') {
1561 *inp += 1;
1562 *idx = 0;
1563 } else {
1564 unsigned long num;
1565 char *endptr;
1566
1567 errno = 0;
1568 num = strtoul(*inp, &endptr, 0);
1569 if (errno)
1570 return -errno;
1571 if (endptr == *inp || num > INT_MAX)
1572 return -EINVAL;
1573 *inp = endptr;
1574 *idx = num;
1575 }
1576
1577 return 0;
1578}
1579
1580static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx)
1581{
1582 int err = parse_num_or_str(inp, num, str, " ");
1583
1584 if (!err && *str)
1585 err = parse_sym_idx(inp, idx);
1586
1587 return err;
1588}
1589
1590static int parse_one_filter(struct addr_filter *filt, const char **filter_inp)
1591{
1592 char *fstr;
1593 int err;
1594
1595 filt->str = fstr = strdup(*filter_inp);
1596 if (!fstr)
1597 return -ENOMEM;
1598
1599 err = parse_num_or_str(&fstr, NULL, &filt->action, " ");
1600 if (err)
1601 goto out_err;
1602
1603 err = parse_action(filt);
1604 if (err)
1605 goto out_err;
1606
1607 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from,
1608 &filt->sym_from_idx);
1609 if (err)
1610 goto out_err;
1611
1612 fstr += strspn(fstr, " ");
1613
1614 if (*fstr == '/') {
1615 fstr += 1;
1616 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to,
1617 &filt->sym_to_idx);
1618 if (err)
1619 goto out_err;
1620 filt->range = true;
1621 }
1622
1623 fstr += strspn(fstr, " ");
1624
1625 if (*fstr == '@') {
1626 fstr += 1;
1627 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,");
1628 if (err)
1629 goto out_err;
1630 }
1631
1632 fstr += strspn(fstr, " ,");
1633
1634 *filter_inp += fstr - filt->str;
1635
1636 return 0;
1637
1638out_err:
1639 addr_filter__free_str(filt);
1640
1641 return err;
1642}
1643
1644int addr_filters__parse_bare_filter(struct addr_filters *filts,
1645 const char *filter)
1646{
1647 struct addr_filter *filt;
1648 const char *fstr = filter;
1649 int err;
1650
1651 while (*fstr) {
1652 filt = addr_filter__new();
1653 err = parse_one_filter(filt, &fstr);
1654 if (err) {
1655 addr_filter__free(filt);
1656 addr_filters__exit(filts);
1657 return err;
1658 }
1659 addr_filters__add(filts, filt);
1660 }
1661
1662 return 0;
1663}
1664
1665struct sym_args {
1666 const char *name;
1667 u64 start;
1668 u64 size;
1669 int idx;
1670 int cnt;
1671 bool started;
1672 bool global;
1673 bool selected;
1674 bool duplicate;
1675 bool near;
1676};
1677
1678static bool kern_sym_match(struct sym_args *args, const char *name, char type)
1679{
1680 /* A function with the same name, and global or the n'th found or any */
1681 return symbol_type__is_a(type, MAP__FUNCTION) &&
1682 !strcmp(name, args->name) &&
1683 ((args->global && isupper(type)) ||
1684 (args->selected && ++(args->cnt) == args->idx) ||
1685 (!args->global && !args->selected));
1686}
1687
1688static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start)
1689{
1690 struct sym_args *args = arg;
1691
1692 if (args->started) {
1693 if (!args->size)
1694 args->size = start - args->start;
1695 if (args->selected) {
1696 if (args->size)
1697 return 1;
1698 } else if (kern_sym_match(args, name, type)) {
1699 args->duplicate = true;
1700 return 1;
1701 }
1702 } else if (kern_sym_match(args, name, type)) {
1703 args->started = true;
1704 args->start = start;
1705 }
1706
1707 return 0;
1708}
1709
1710static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start)
1711{
1712 struct sym_args *args = arg;
1713
1714 if (kern_sym_match(args, name, type)) {
1715 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
1716 ++args->cnt, start, type, name);
1717 args->near = true;
1718 } else if (args->near) {
1719 args->near = false;
1720 pr_err("\t\twhich is near\t\t%s\n", name);
1721 }
1722
1723 return 0;
1724}
1725
1726static int sym_not_found_error(const char *sym_name, int idx)
1727{
1728 if (idx > 0) {
1729 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n",
1730 idx, sym_name);
1731 } else if (!idx) {
1732 pr_err("Global symbol '%s' not found.\n", sym_name);
1733 } else {
1734 pr_err("Symbol '%s' not found.\n", sym_name);
1735 }
1736 pr_err("Note that symbols must be functions.\n");
1737
1738 return -EINVAL;
1739}
1740
1741static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx)
1742{
1743 struct sym_args args = {
1744 .name = sym_name,
1745 .idx = idx,
1746 .global = !idx,
1747 .selected = idx > 0,
1748 };
1749 int err;
1750
1751 *start = 0;
1752 *size = 0;
1753
1754 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb);
1755 if (err < 0) {
1756 pr_err("Failed to parse /proc/kallsyms\n");
1757 return err;
1758 }
1759
1760 if (args.duplicate) {
1761 pr_err("Multiple kernel symbols with name '%s'\n", sym_name);
1762 args.cnt = 0;
1763 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb);
1764 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
1765 sym_name);
1766 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
1767 return -EINVAL;
1768 }
1769
1770 if (!args.started) {
1771 pr_err("Kernel symbol lookup: ");
1772 return sym_not_found_error(sym_name, idx);
1773 }
1774
1775 *start = args.start;
1776 *size = args.size;
1777
1778 return 0;
1779}
1780
1781static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
1782 char type, u64 start)
1783{
1784 struct sym_args *args = arg;
1785
1786 if (!symbol_type__is_a(type, MAP__FUNCTION))
1787 return 0;
1788
1789 if (!args->started) {
1790 args->started = true;
1791 args->start = start;
1792 }
1793 /* Don't know exactly where the kernel ends, so we add a page */
1794 args->size = round_up(start, page_size) + page_size - args->start;
1795
1796 return 0;
1797}
1798
1799static int addr_filter__entire_kernel(struct addr_filter *filt)
1800{
1801 struct sym_args args = { .started = false };
1802 int err;
1803
1804 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb);
1805 if (err < 0 || !args.started) {
1806 pr_err("Failed to parse /proc/kallsyms\n");
1807 return err;
1808 }
1809
1810 filt->addr = args.start;
1811 filt->size = args.size;
1812
1813 return 0;
1814}
1815
1816static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size)
1817{
1818 if (start + size >= filt->addr)
1819 return 0;
1820
1821 if (filt->sym_from) {
1822 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n",
1823 filt->sym_to, start, filt->sym_from, filt->addr);
1824 } else {
1825 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n",
1826 filt->sym_to, start, filt->addr);
1827 }
1828
1829 return -EINVAL;
1830}
1831
1832static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
1833{
1834 bool no_size = false;
1835 u64 start, size;
1836 int err;
1837
1838 if (symbol_conf.kptr_restrict) {
1839 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n");
1840 return -EINVAL;
1841 }
1842
1843 if (filt->sym_from && !strcmp(filt->sym_from, "*"))
1844 return addr_filter__entire_kernel(filt);
1845
1846 if (filt->sym_from) {
1847 err = find_kern_sym(filt->sym_from, &start, &size,
1848 filt->sym_from_idx);
1849 if (err)
1850 return err;
1851 filt->addr = start;
1852 if (filt->range && !filt->size && !filt->sym_to) {
1853 filt->size = size;
c3a0bbc7 1854 no_size = !size;
1b36c03e
AH
1855 }
1856 }
1857
1858 if (filt->sym_to) {
1859 err = find_kern_sym(filt->sym_to, &start, &size,
1860 filt->sym_to_idx);
1861 if (err)
1862 return err;
1863
1864 err = check_end_after_start(filt, start, size);
1865 if (err)
1866 return err;
1867 filt->size = start + size - filt->addr;
c3a0bbc7 1868 no_size = !size;
1b36c03e
AH
1869 }
1870
1871 /* The very last symbol in kallsyms does not imply a particular size */
1872 if (no_size) {
1873 pr_err("Cannot determine size of symbol '%s'\n",
1874 filt->sym_to ? filt->sym_to : filt->sym_from);
1875 return -EINVAL;
1876 }
1877
1878 return 0;
1879}
1880
1881static struct dso *load_dso(const char *name)
1882{
1883 struct map *map;
1884 struct dso *dso;
1885
1886 map = dso__new_map(name);
1887 if (!map)
1888 return NULL;
1889
1890 map__load(map);
1891
1892 dso = dso__get(map->dso);
1893
1894 map__put(map);
1895
1896 return dso;
1897}
1898
1899static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt,
1900 int idx)
1901{
1902 /* Same name, and global or the n'th found or any */
1903 return !arch__compare_symbol_names(name, sym->name) &&
1904 ((!idx && sym->binding == STB_GLOBAL) ||
1905 (idx > 0 && ++*cnt == idx) ||
1906 idx < 0);
1907}
1908
1909static void print_duplicate_syms(struct dso *dso, const char *sym_name)
1910{
1911 struct symbol *sym;
1912 bool near = false;
1913 int cnt = 0;
1914
1915 pr_err("Multiple symbols with name '%s'\n", sym_name);
1916
1917 sym = dso__first_symbol(dso, MAP__FUNCTION);
1918 while (sym) {
1919 if (dso_sym_match(sym, sym_name, &cnt, -1)) {
1920 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n",
1921 ++cnt, sym->start,
1922 sym->binding == STB_GLOBAL ? 'g' :
1923 sym->binding == STB_LOCAL ? 'l' : 'w',
1924 sym->name);
1925 near = true;
1926 } else if (near) {
1927 near = false;
1928 pr_err("\t\twhich is near\t\t%s\n", sym->name);
1929 }
1930 sym = dso__next_symbol(sym);
1931 }
1932
1933 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n",
1934 sym_name);
1935 pr_err("Or select a global symbol by inserting #0 or #g or #G\n");
1936}
1937
1938static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
1939 u64 *size, int idx)
1940{
1941 struct symbol *sym;
1942 int cnt = 0;
1943
1944 *start = 0;
1945 *size = 0;
1946
1947 sym = dso__first_symbol(dso, MAP__FUNCTION);
1948 while (sym) {
1949 if (*start) {
1950 if (!*size)
1951 *size = sym->start - *start;
1952 if (idx > 0) {
1953 if (*size)
1954 return 1;
1955 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
1956 print_duplicate_syms(dso, sym_name);
1957 return -EINVAL;
1958 }
1959 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
1960 *start = sym->start;
1961 *size = sym->end - sym->start;
1962 }
1963 sym = dso__next_symbol(sym);
1964 }
1965
1966 if (!*start)
1967 return sym_not_found_error(sym_name, idx);
1968
1969 return 0;
1970}
1971
1972static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
1973{
1974 struct symbol *first_sym = dso__first_symbol(dso, MAP__FUNCTION);
1975 struct symbol *last_sym = dso__last_symbol(dso, MAP__FUNCTION);
1976
1977 if (!first_sym || !last_sym) {
1978 pr_err("Failed to determine filter for %s\nNo symbols found.\n",
1979 filt->filename);
1980 return -EINVAL;
1981 }
1982
1983 filt->addr = first_sym->start;
1984 filt->size = last_sym->end - first_sym->start;
1985
1986 return 0;
1987}
1988
1989static int addr_filter__resolve_syms(struct addr_filter *filt)
1990{
1991 u64 start, size;
1992 struct dso *dso;
1993 int err = 0;
1994
1995 if (!filt->sym_from && !filt->sym_to)
1996 return 0;
1997
1998 if (!filt->filename)
1999 return addr_filter__resolve_kernel_syms(filt);
2000
2001 dso = load_dso(filt->filename);
2002 if (!dso) {
2003 pr_err("Failed to load symbols from: %s\n", filt->filename);
2004 return -EINVAL;
2005 }
2006
2007 if (filt->sym_from && !strcmp(filt->sym_from, "*")) {
2008 err = addr_filter__entire_dso(filt, dso);
2009 goto put_dso;
2010 }
2011
2012 if (filt->sym_from) {
2013 err = find_dso_sym(dso, filt->sym_from, &start, &size,
2014 filt->sym_from_idx);
2015 if (err)
2016 goto put_dso;
2017 filt->addr = start;
2018 if (filt->range && !filt->size && !filt->sym_to)
2019 filt->size = size;
2020 }
2021
2022 if (filt->sym_to) {
2023 err = find_dso_sym(dso, filt->sym_to, &start, &size,
2024 filt->sym_to_idx);
2025 if (err)
2026 goto put_dso;
2027
2028 err = check_end_after_start(filt, start, size);
2029 if (err)
2030 return err;
2031
2032 filt->size = start + size - filt->addr;
2033 }
2034
2035put_dso:
2036 dso__put(dso);
2037
2038 return err;
2039}
2040
2041static char *addr_filter__to_str(struct addr_filter *filt)
2042{
2043 char filename_buf[PATH_MAX];
2044 const char *at = "";
2045 const char *fn = "";
2046 char *filter;
2047 int err;
2048
2049 if (filt->filename) {
2050 at = "@";
2051 fn = realpath(filt->filename, filename_buf);
2052 if (!fn)
2053 return NULL;
2054 }
2055
2056 if (filt->range) {
2057 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s",
2058 filt->action, filt->addr, filt->size, at, fn);
2059 } else {
2060 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s",
2061 filt->action, filt->addr, at, fn);
2062 }
2063
2064 return err < 0 ? NULL : filter;
2065}
2066
2067static int parse_addr_filter(struct perf_evsel *evsel, const char *filter,
2068 int max_nr)
2069{
2070 struct addr_filters filts;
2071 struct addr_filter *filt;
2072 int err;
2073
2074 addr_filters__init(&filts);
2075
2076 err = addr_filters__parse_bare_filter(&filts, filter);
2077 if (err)
2078 goto out_exit;
2079
2080 if (filts.cnt > max_nr) {
2081 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n",
2082 filts.cnt, max_nr);
2083 err = -EINVAL;
2084 goto out_exit;
2085 }
2086
2087 list_for_each_entry(filt, &filts.head, list) {
2088 char *new_filter;
2089
2090 err = addr_filter__resolve_syms(filt);
2091 if (err)
2092 goto out_exit;
2093
2094 new_filter = addr_filter__to_str(filt);
2095 if (!new_filter) {
2096 err = -ENOMEM;
2097 goto out_exit;
2098 }
2099
2100 if (perf_evsel__append_addr_filter(evsel, new_filter)) {
2101 err = -ENOMEM;
2102 goto out_exit;
2103 }
2104 }
2105
2106out_exit:
2107 addr_filters__exit(&filts);
2108
2109 if (err) {
2110 pr_err("Failed to parse address filter: '%s'\n", filter);
2111 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n");
2112 pr_err("Where multiple filters are separated by space or comma.\n");
2113 }
2114
2115 return err;
2116}
2117
2118static struct perf_pmu *perf_evsel__find_pmu(struct perf_evsel *evsel)
2119{
2120 struct perf_pmu *pmu = NULL;
2121
2122 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
2123 if (pmu->type == evsel->attr.type)
2124 break;
2125 }
2126
2127 return pmu;
2128}
2129
2130static int perf_evsel__nr_addr_filter(struct perf_evsel *evsel)
2131{
2132 struct perf_pmu *pmu = perf_evsel__find_pmu(evsel);
2133 int nr_addr_filters = 0;
2134
2135 if (!pmu)
2136 return 0;
2137
2138 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
2139
2140 return nr_addr_filters;
2141}
2142
2143int auxtrace_parse_filters(struct perf_evlist *evlist)
2144{
2145 struct perf_evsel *evsel;
2146 char *filter;
2147 int err, max_nr;
2148
2149 evlist__for_each_entry(evlist, evsel) {
2150 filter = evsel->filter;
2151 max_nr = perf_evsel__nr_addr_filter(evsel);
2152 if (!filter || !max_nr)
2153 continue;
2154 evsel->filter = NULL;
2155 err = parse_addr_filter(evsel, filter, max_nr);
2156 free(filter);
2157 if (err)
2158 return err;
2159 pr_debug("Address filter: %s\n", evsel->filter);
2160 }
2161
2162 return 0;
2163}