]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - tools/perf/util/auxtrace.c
Merge tag 'tegra-for-4.3-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / tools / perf / util / auxtrace.c
1 /*
2 * auxtrace.c: AUX area trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16 #include <sys/types.h>
17 #include <sys/mman.h>
18 #include <stdbool.h>
19
20 #include <linux/kernel.h>
21 #include <linux/perf_event.h>
22 #include <linux/types.h>
23 #include <linux/bitops.h>
24 #include <linux/log2.h>
25 #include <linux/string.h>
26
27 #include <sys/param.h>
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <limits.h>
32 #include <errno.h>
33 #include <linux/list.h>
34
35 #include "../perf.h"
36 #include "util.h"
37 #include "evlist.h"
38 #include "cpumap.h"
39 #include "thread_map.h"
40 #include "asm/bug.h"
41 #include "auxtrace.h"
42
43 #include <linux/hash.h>
44
45 #include "event.h"
46 #include "session.h"
47 #include "debug.h"
48 #include "parse-options.h"
49
50 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
51 struct auxtrace_mmap_params *mp,
52 void *userpg, int fd)
53 {
54 struct perf_event_mmap_page *pc = userpg;
55
56 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
57 pr_err("Cannot use AUX area tracing mmaps\n");
58 return -1;
59 #endif
60
61 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
62
63 mm->userpg = userpg;
64 mm->mask = mp->mask;
65 mm->len = mp->len;
66 mm->prev = 0;
67 mm->idx = mp->idx;
68 mm->tid = mp->tid;
69 mm->cpu = mp->cpu;
70
71 if (!mp->len) {
72 mm->base = NULL;
73 return 0;
74 }
75
76 pc->aux_offset = mp->offset;
77 pc->aux_size = mp->len;
78
79 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
80 if (mm->base == MAP_FAILED) {
81 pr_debug2("failed to mmap AUX area\n");
82 mm->base = NULL;
83 return -1;
84 }
85
86 return 0;
87 }
88
89 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
90 {
91 if (mm->base) {
92 munmap(mm->base, mm->len);
93 mm->base = NULL;
94 }
95 }
96
97 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
98 off_t auxtrace_offset,
99 unsigned int auxtrace_pages,
100 bool auxtrace_overwrite)
101 {
102 if (auxtrace_pages) {
103 mp->offset = auxtrace_offset;
104 mp->len = auxtrace_pages * (size_t)page_size;
105 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
106 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
107 pr_debug2("AUX area mmap length %zu\n", mp->len);
108 } else {
109 mp->len = 0;
110 }
111 }
112
113 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
114 struct perf_evlist *evlist, int idx,
115 bool per_cpu)
116 {
117 mp->idx = idx;
118
119 if (per_cpu) {
120 mp->cpu = evlist->cpus->map[idx];
121 if (evlist->threads)
122 mp->tid = thread_map__pid(evlist->threads, 0);
123 else
124 mp->tid = -1;
125 } else {
126 mp->cpu = -1;
127 mp->tid = thread_map__pid(evlist->threads, idx);
128 }
129 }
130
131 #define AUXTRACE_INIT_NR_QUEUES 32
132
133 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
134 {
135 struct auxtrace_queue *queue_array;
136 unsigned int max_nr_queues, i;
137
138 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
139 if (nr_queues > max_nr_queues)
140 return NULL;
141
142 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
143 if (!queue_array)
144 return NULL;
145
146 for (i = 0; i < nr_queues; i++) {
147 INIT_LIST_HEAD(&queue_array[i].head);
148 queue_array[i].priv = NULL;
149 }
150
151 return queue_array;
152 }
153
154 int auxtrace_queues__init(struct auxtrace_queues *queues)
155 {
156 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
157 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
158 if (!queues->queue_array)
159 return -ENOMEM;
160 return 0;
161 }
162
163 static int auxtrace_queues__grow(struct auxtrace_queues *queues,
164 unsigned int new_nr_queues)
165 {
166 unsigned int nr_queues = queues->nr_queues;
167 struct auxtrace_queue *queue_array;
168 unsigned int i;
169
170 if (!nr_queues)
171 nr_queues = AUXTRACE_INIT_NR_QUEUES;
172
173 while (nr_queues && nr_queues < new_nr_queues)
174 nr_queues <<= 1;
175
176 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
177 return -EINVAL;
178
179 queue_array = auxtrace_alloc_queue_array(nr_queues);
180 if (!queue_array)
181 return -ENOMEM;
182
183 for (i = 0; i < queues->nr_queues; i++) {
184 list_splice_tail(&queues->queue_array[i].head,
185 &queue_array[i].head);
186 queue_array[i].priv = queues->queue_array[i].priv;
187 }
188
189 queues->nr_queues = nr_queues;
190 queues->queue_array = queue_array;
191
192 return 0;
193 }
194
195 static void *auxtrace_copy_data(u64 size, struct perf_session *session)
196 {
197 int fd = perf_data_file__fd(session->file);
198 void *p;
199 ssize_t ret;
200
201 if (size > SSIZE_MAX)
202 return NULL;
203
204 p = malloc(size);
205 if (!p)
206 return NULL;
207
208 ret = readn(fd, p, size);
209 if (ret != (ssize_t)size) {
210 free(p);
211 return NULL;
212 }
213
214 return p;
215 }
216
217 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
218 unsigned int idx,
219 struct auxtrace_buffer *buffer)
220 {
221 struct auxtrace_queue *queue;
222 int err;
223
224 if (idx >= queues->nr_queues) {
225 err = auxtrace_queues__grow(queues, idx + 1);
226 if (err)
227 return err;
228 }
229
230 queue = &queues->queue_array[idx];
231
232 if (!queue->set) {
233 queue->set = true;
234 queue->tid = buffer->tid;
235 queue->cpu = buffer->cpu;
236 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
237 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
238 queue->cpu, queue->tid, buffer->cpu, buffer->tid);
239 return -EINVAL;
240 }
241
242 buffer->buffer_nr = queues->next_buffer_nr++;
243
244 list_add_tail(&buffer->list, &queue->head);
245
246 queues->new_data = true;
247 queues->populated = true;
248
249 return 0;
250 }
251
252 /* Limit buffers to 32MiB on 32-bit */
253 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
254
255 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
256 unsigned int idx,
257 struct auxtrace_buffer *buffer)
258 {
259 u64 sz = buffer->size;
260 bool consecutive = false;
261 struct auxtrace_buffer *b;
262 int err;
263
264 while (sz > BUFFER_LIMIT_FOR_32_BIT) {
265 b = memdup(buffer, sizeof(struct auxtrace_buffer));
266 if (!b)
267 return -ENOMEM;
268 b->size = BUFFER_LIMIT_FOR_32_BIT;
269 b->consecutive = consecutive;
270 err = auxtrace_queues__add_buffer(queues, idx, b);
271 if (err) {
272 auxtrace_buffer__free(b);
273 return err;
274 }
275 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
276 sz -= BUFFER_LIMIT_FOR_32_BIT;
277 consecutive = true;
278 }
279
280 buffer->size = sz;
281 buffer->consecutive = consecutive;
282
283 return 0;
284 }
285
286 static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
287 struct perf_session *session,
288 unsigned int idx,
289 struct auxtrace_buffer *buffer)
290 {
291 if (session->one_mmap) {
292 buffer->data = buffer->data_offset - session->one_mmap_offset +
293 session->one_mmap_addr;
294 } else if (perf_data_file__is_pipe(session->file)) {
295 buffer->data = auxtrace_copy_data(buffer->size, session);
296 if (!buffer->data)
297 return -ENOMEM;
298 buffer->data_needs_freeing = true;
299 } else if (BITS_PER_LONG == 32 &&
300 buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
301 int err;
302
303 err = auxtrace_queues__split_buffer(queues, idx, buffer);
304 if (err)
305 return err;
306 }
307
308 return auxtrace_queues__add_buffer(queues, idx, buffer);
309 }
310
311 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
312 struct perf_session *session,
313 union perf_event *event, off_t data_offset,
314 struct auxtrace_buffer **buffer_ptr)
315 {
316 struct auxtrace_buffer *buffer;
317 unsigned int idx;
318 int err;
319
320 buffer = zalloc(sizeof(struct auxtrace_buffer));
321 if (!buffer)
322 return -ENOMEM;
323
324 buffer->pid = -1;
325 buffer->tid = event->auxtrace.tid;
326 buffer->cpu = event->auxtrace.cpu;
327 buffer->data_offset = data_offset;
328 buffer->offset = event->auxtrace.offset;
329 buffer->reference = event->auxtrace.reference;
330 buffer->size = event->auxtrace.size;
331 idx = event->auxtrace.idx;
332
333 err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
334 if (err)
335 goto out_err;
336
337 if (buffer_ptr)
338 *buffer_ptr = buffer;
339
340 return 0;
341
342 out_err:
343 auxtrace_buffer__free(buffer);
344 return err;
345 }
346
347 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
348 struct perf_session *session,
349 off_t file_offset, size_t sz)
350 {
351 union perf_event *event;
352 int err;
353 char buf[PERF_SAMPLE_MAX_SIZE];
354
355 err = perf_session__peek_event(session, file_offset, buf,
356 PERF_SAMPLE_MAX_SIZE, &event, NULL);
357 if (err)
358 return err;
359
360 if (event->header.type == PERF_RECORD_AUXTRACE) {
361 if (event->header.size < sizeof(struct auxtrace_event) ||
362 event->header.size != sz) {
363 err = -EINVAL;
364 goto out;
365 }
366 file_offset += event->header.size;
367 err = auxtrace_queues__add_event(queues, session, event,
368 file_offset, NULL);
369 }
370 out:
371 return err;
372 }
373
374 void auxtrace_queues__free(struct auxtrace_queues *queues)
375 {
376 unsigned int i;
377
378 for (i = 0; i < queues->nr_queues; i++) {
379 while (!list_empty(&queues->queue_array[i].head)) {
380 struct auxtrace_buffer *buffer;
381
382 buffer = list_entry(queues->queue_array[i].head.next,
383 struct auxtrace_buffer, list);
384 list_del(&buffer->list);
385 auxtrace_buffer__free(buffer);
386 }
387 }
388
389 zfree(&queues->queue_array);
390 queues->nr_queues = 0;
391 }
392
393 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
394 unsigned int pos, unsigned int queue_nr,
395 u64 ordinal)
396 {
397 unsigned int parent;
398
399 while (pos) {
400 parent = (pos - 1) >> 1;
401 if (heap_array[parent].ordinal <= ordinal)
402 break;
403 heap_array[pos] = heap_array[parent];
404 pos = parent;
405 }
406 heap_array[pos].queue_nr = queue_nr;
407 heap_array[pos].ordinal = ordinal;
408 }
409
410 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
411 u64 ordinal)
412 {
413 struct auxtrace_heap_item *heap_array;
414
415 if (queue_nr >= heap->heap_sz) {
416 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
417
418 while (heap_sz <= queue_nr)
419 heap_sz <<= 1;
420 heap_array = realloc(heap->heap_array,
421 heap_sz * sizeof(struct auxtrace_heap_item));
422 if (!heap_array)
423 return -ENOMEM;
424 heap->heap_array = heap_array;
425 heap->heap_sz = heap_sz;
426 }
427
428 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
429
430 return 0;
431 }
432
433 void auxtrace_heap__free(struct auxtrace_heap *heap)
434 {
435 zfree(&heap->heap_array);
436 heap->heap_cnt = 0;
437 heap->heap_sz = 0;
438 }
439
440 void auxtrace_heap__pop(struct auxtrace_heap *heap)
441 {
442 unsigned int pos, last, heap_cnt = heap->heap_cnt;
443 struct auxtrace_heap_item *heap_array;
444
445 if (!heap_cnt)
446 return;
447
448 heap->heap_cnt -= 1;
449
450 heap_array = heap->heap_array;
451
452 pos = 0;
453 while (1) {
454 unsigned int left, right;
455
456 left = (pos << 1) + 1;
457 if (left >= heap_cnt)
458 break;
459 right = left + 1;
460 if (right >= heap_cnt) {
461 heap_array[pos] = heap_array[left];
462 return;
463 }
464 if (heap_array[left].ordinal < heap_array[right].ordinal) {
465 heap_array[pos] = heap_array[left];
466 pos = left;
467 } else {
468 heap_array[pos] = heap_array[right];
469 pos = right;
470 }
471 }
472
473 last = heap_cnt - 1;
474 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
475 heap_array[last].ordinal);
476 }
477
478 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
479 {
480 if (itr)
481 return itr->info_priv_size(itr);
482 return 0;
483 }
484
485 static int auxtrace_not_supported(void)
486 {
487 pr_err("AUX area tracing is not supported on this architecture\n");
488 return -EINVAL;
489 }
490
491 int auxtrace_record__info_fill(struct auxtrace_record *itr,
492 struct perf_session *session,
493 struct auxtrace_info_event *auxtrace_info,
494 size_t priv_size)
495 {
496 if (itr)
497 return itr->info_fill(itr, session, auxtrace_info, priv_size);
498 return auxtrace_not_supported();
499 }
500
501 void auxtrace_record__free(struct auxtrace_record *itr)
502 {
503 if (itr)
504 itr->free(itr);
505 }
506
507 int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
508 {
509 if (itr && itr->snapshot_start)
510 return itr->snapshot_start(itr);
511 return 0;
512 }
513
514 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr)
515 {
516 if (itr && itr->snapshot_finish)
517 return itr->snapshot_finish(itr);
518 return 0;
519 }
520
521 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
522 struct auxtrace_mmap *mm,
523 unsigned char *data, u64 *head, u64 *old)
524 {
525 if (itr && itr->find_snapshot)
526 return itr->find_snapshot(itr, idx, mm, data, head, old);
527 return 0;
528 }
529
530 int auxtrace_record__options(struct auxtrace_record *itr,
531 struct perf_evlist *evlist,
532 struct record_opts *opts)
533 {
534 if (itr)
535 return itr->recording_options(itr, evlist, opts);
536 return 0;
537 }
538
539 u64 auxtrace_record__reference(struct auxtrace_record *itr)
540 {
541 if (itr)
542 return itr->reference(itr);
543 return 0;
544 }
545
546 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
547 struct record_opts *opts, const char *str)
548 {
549 if (!str)
550 return 0;
551
552 if (itr)
553 return itr->parse_snapshot_options(itr, opts, str);
554
555 pr_err("No AUX area tracing to snapshot\n");
556 return -EINVAL;
557 }
558
559 struct auxtrace_record *__weak
560 auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
561 {
562 *err = 0;
563 return NULL;
564 }
565
566 static int auxtrace_index__alloc(struct list_head *head)
567 {
568 struct auxtrace_index *auxtrace_index;
569
570 auxtrace_index = malloc(sizeof(struct auxtrace_index));
571 if (!auxtrace_index)
572 return -ENOMEM;
573
574 auxtrace_index->nr = 0;
575 INIT_LIST_HEAD(&auxtrace_index->list);
576
577 list_add_tail(&auxtrace_index->list, head);
578
579 return 0;
580 }
581
582 void auxtrace_index__free(struct list_head *head)
583 {
584 struct auxtrace_index *auxtrace_index, *n;
585
586 list_for_each_entry_safe(auxtrace_index, n, head, list) {
587 list_del(&auxtrace_index->list);
588 free(auxtrace_index);
589 }
590 }
591
592 static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
593 {
594 struct auxtrace_index *auxtrace_index;
595 int err;
596
597 if (list_empty(head)) {
598 err = auxtrace_index__alloc(head);
599 if (err)
600 return NULL;
601 }
602
603 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
604
605 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
606 err = auxtrace_index__alloc(head);
607 if (err)
608 return NULL;
609 auxtrace_index = list_entry(head->prev, struct auxtrace_index,
610 list);
611 }
612
613 return auxtrace_index;
614 }
615
616 int auxtrace_index__auxtrace_event(struct list_head *head,
617 union perf_event *event, off_t file_offset)
618 {
619 struct auxtrace_index *auxtrace_index;
620 size_t nr;
621
622 auxtrace_index = auxtrace_index__last(head);
623 if (!auxtrace_index)
624 return -ENOMEM;
625
626 nr = auxtrace_index->nr;
627 auxtrace_index->entries[nr].file_offset = file_offset;
628 auxtrace_index->entries[nr].sz = event->header.size;
629 auxtrace_index->nr += 1;
630
631 return 0;
632 }
633
634 static int auxtrace_index__do_write(int fd,
635 struct auxtrace_index *auxtrace_index)
636 {
637 struct auxtrace_index_entry ent;
638 size_t i;
639
640 for (i = 0; i < auxtrace_index->nr; i++) {
641 ent.file_offset = auxtrace_index->entries[i].file_offset;
642 ent.sz = auxtrace_index->entries[i].sz;
643 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
644 return -errno;
645 }
646 return 0;
647 }
648
649 int auxtrace_index__write(int fd, struct list_head *head)
650 {
651 struct auxtrace_index *auxtrace_index;
652 u64 total = 0;
653 int err;
654
655 list_for_each_entry(auxtrace_index, head, list)
656 total += auxtrace_index->nr;
657
658 if (writen(fd, &total, sizeof(total)) != sizeof(total))
659 return -errno;
660
661 list_for_each_entry(auxtrace_index, head, list) {
662 err = auxtrace_index__do_write(fd, auxtrace_index);
663 if (err)
664 return err;
665 }
666
667 return 0;
668 }
669
670 static int auxtrace_index__process_entry(int fd, struct list_head *head,
671 bool needs_swap)
672 {
673 struct auxtrace_index *auxtrace_index;
674 struct auxtrace_index_entry ent;
675 size_t nr;
676
677 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
678 return -1;
679
680 auxtrace_index = auxtrace_index__last(head);
681 if (!auxtrace_index)
682 return -1;
683
684 nr = auxtrace_index->nr;
685 if (needs_swap) {
686 auxtrace_index->entries[nr].file_offset =
687 bswap_64(ent.file_offset);
688 auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
689 } else {
690 auxtrace_index->entries[nr].file_offset = ent.file_offset;
691 auxtrace_index->entries[nr].sz = ent.sz;
692 }
693
694 auxtrace_index->nr = nr + 1;
695
696 return 0;
697 }
698
699 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
700 bool needs_swap)
701 {
702 struct list_head *head = &session->auxtrace_index;
703 u64 nr;
704
705 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
706 return -1;
707
708 if (needs_swap)
709 nr = bswap_64(nr);
710
711 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
712 return -1;
713
714 while (nr--) {
715 int err;
716
717 err = auxtrace_index__process_entry(fd, head, needs_swap);
718 if (err)
719 return -1;
720 }
721
722 return 0;
723 }
724
725 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
726 struct perf_session *session,
727 struct auxtrace_index_entry *ent)
728 {
729 return auxtrace_queues__add_indexed_event(queues, session,
730 ent->file_offset, ent->sz);
731 }
732
733 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
734 struct perf_session *session)
735 {
736 struct auxtrace_index *auxtrace_index;
737 struct auxtrace_index_entry *ent;
738 size_t i;
739 int err;
740
741 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
742 for (i = 0; i < auxtrace_index->nr; i++) {
743 ent = &auxtrace_index->entries[i];
744 err = auxtrace_queues__process_index_entry(queues,
745 session,
746 ent);
747 if (err)
748 return err;
749 }
750 }
751 return 0;
752 }
753
754 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
755 struct auxtrace_buffer *buffer)
756 {
757 if (buffer) {
758 if (list_is_last(&buffer->list, &queue->head))
759 return NULL;
760 return list_entry(buffer->list.next, struct auxtrace_buffer,
761 list);
762 } else {
763 if (list_empty(&queue->head))
764 return NULL;
765 return list_entry(queue->head.next, struct auxtrace_buffer,
766 list);
767 }
768 }
769
770 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
771 {
772 size_t adj = buffer->data_offset & (page_size - 1);
773 size_t size = buffer->size + adj;
774 off_t file_offset = buffer->data_offset - adj;
775 void *addr;
776
777 if (buffer->data)
778 return buffer->data;
779
780 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
781 if (addr == MAP_FAILED)
782 return NULL;
783
784 buffer->mmap_addr = addr;
785 buffer->mmap_size = size;
786
787 buffer->data = addr + adj;
788
789 return buffer->data;
790 }
791
792 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
793 {
794 if (!buffer->data || !buffer->mmap_addr)
795 return;
796 munmap(buffer->mmap_addr, buffer->mmap_size);
797 buffer->mmap_addr = NULL;
798 buffer->mmap_size = 0;
799 buffer->data = NULL;
800 buffer->use_data = NULL;
801 }
802
803 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
804 {
805 auxtrace_buffer__put_data(buffer);
806 if (buffer->data_needs_freeing) {
807 buffer->data_needs_freeing = false;
808 zfree(&buffer->data);
809 buffer->use_data = NULL;
810 buffer->size = 0;
811 }
812 }
813
814 void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
815 {
816 auxtrace_buffer__drop_data(buffer);
817 free(buffer);
818 }
819
820 void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
821 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
822 const char *msg)
823 {
824 size_t size;
825
826 memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
827
828 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
829 auxtrace_error->type = type;
830 auxtrace_error->code = code;
831 auxtrace_error->cpu = cpu;
832 auxtrace_error->pid = pid;
833 auxtrace_error->tid = tid;
834 auxtrace_error->ip = ip;
835 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
836
837 size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
838 strlen(auxtrace_error->msg) + 1;
839 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
840 }
841
842 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
843 struct perf_tool *tool,
844 struct perf_session *session,
845 perf_event__handler_t process)
846 {
847 union perf_event *ev;
848 size_t priv_size;
849 int err;
850
851 pr_debug2("Synthesizing auxtrace information\n");
852 priv_size = auxtrace_record__info_priv_size(itr);
853 ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
854 if (!ev)
855 return -ENOMEM;
856
857 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
858 ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
859 priv_size;
860 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
861 priv_size);
862 if (err)
863 goto out_free;
864
865 err = process(tool, ev, NULL, NULL);
866 out_free:
867 free(ev);
868 return err;
869 }
870
871 static bool auxtrace__dont_decode(struct perf_session *session)
872 {
873 return !session->itrace_synth_opts ||
874 session->itrace_synth_opts->dont_decode;
875 }
876
877 int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
878 union perf_event *event,
879 struct perf_session *session __maybe_unused)
880 {
881 enum auxtrace_type type = event->auxtrace_info.type;
882
883 if (dump_trace)
884 fprintf(stdout, " type: %u\n", type);
885
886 switch (type) {
887 case PERF_AUXTRACE_UNKNOWN:
888 default:
889 return -EINVAL;
890 }
891 }
892
893 s64 perf_event__process_auxtrace(struct perf_tool *tool,
894 union perf_event *event,
895 struct perf_session *session)
896 {
897 s64 err;
898
899 if (dump_trace)
900 fprintf(stdout, " size: %#"PRIx64" offset: %#"PRIx64" ref: %#"PRIx64" idx: %u tid: %d cpu: %d\n",
901 event->auxtrace.size, event->auxtrace.offset,
902 event->auxtrace.reference, event->auxtrace.idx,
903 event->auxtrace.tid, event->auxtrace.cpu);
904
905 if (auxtrace__dont_decode(session))
906 return event->auxtrace.size;
907
908 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
909 return -EINVAL;
910
911 err = session->auxtrace->process_auxtrace_event(session, event, tool);
912 if (err < 0)
913 return err;
914
915 return event->auxtrace.size;
916 }
917
918 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS
919 #define PERF_ITRACE_DEFAULT_PERIOD 100000
920 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16
921 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024
922
923 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
924 {
925 synth_opts->instructions = true;
926 synth_opts->branches = true;
927 synth_opts->transactions = true;
928 synth_opts->errors = true;
929 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
930 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
931 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
932 }
933
934 /*
935 * Please check tools/perf/Documentation/perf-script.txt for information
936 * about the options parsed here, which is introduced after this cset,
937 * when support in 'perf script' for these options is introduced.
938 */
939 int itrace_parse_synth_opts(const struct option *opt, const char *str,
940 int unset)
941 {
942 struct itrace_synth_opts *synth_opts = opt->value;
943 const char *p;
944 char *endptr;
945
946 synth_opts->set = true;
947
948 if (unset) {
949 synth_opts->dont_decode = true;
950 return 0;
951 }
952
953 if (!str) {
954 itrace_synth_opts__set_default(synth_opts);
955 return 0;
956 }
957
958 for (p = str; *p;) {
959 switch (*p++) {
960 case 'i':
961 synth_opts->instructions = true;
962 while (*p == ' ' || *p == ',')
963 p += 1;
964 if (isdigit(*p)) {
965 synth_opts->period = strtoull(p, &endptr, 10);
966 p = endptr;
967 while (*p == ' ' || *p == ',')
968 p += 1;
969 switch (*p++) {
970 case 'i':
971 synth_opts->period_type =
972 PERF_ITRACE_PERIOD_INSTRUCTIONS;
973 break;
974 case 't':
975 synth_opts->period_type =
976 PERF_ITRACE_PERIOD_TICKS;
977 break;
978 case 'm':
979 synth_opts->period *= 1000;
980 /* Fall through */
981 case 'u':
982 synth_opts->period *= 1000;
983 /* Fall through */
984 case 'n':
985 if (*p++ != 's')
986 goto out_err;
987 synth_opts->period_type =
988 PERF_ITRACE_PERIOD_NANOSECS;
989 break;
990 case '\0':
991 goto out;
992 default:
993 goto out_err;
994 }
995 }
996 break;
997 case 'b':
998 synth_opts->branches = true;
999 break;
1000 case 'x':
1001 synth_opts->transactions = true;
1002 break;
1003 case 'e':
1004 synth_opts->errors = true;
1005 break;
1006 case 'd':
1007 synth_opts->log = true;
1008 break;
1009 case 'c':
1010 synth_opts->branches = true;
1011 synth_opts->calls = true;
1012 break;
1013 case 'r':
1014 synth_opts->branches = true;
1015 synth_opts->returns = true;
1016 break;
1017 case 'g':
1018 synth_opts->callchain = true;
1019 synth_opts->callchain_sz =
1020 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1021 while (*p == ' ' || *p == ',')
1022 p += 1;
1023 if (isdigit(*p)) {
1024 unsigned int val;
1025
1026 val = strtoul(p, &endptr, 10);
1027 p = endptr;
1028 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1029 goto out_err;
1030 synth_opts->callchain_sz = val;
1031 }
1032 break;
1033 case ' ':
1034 case ',':
1035 break;
1036 default:
1037 goto out_err;
1038 }
1039 }
1040 out:
1041 if (synth_opts->instructions) {
1042 if (!synth_opts->period_type)
1043 synth_opts->period_type =
1044 PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1045 if (!synth_opts->period)
1046 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1047 }
1048
1049 return 0;
1050
1051 out_err:
1052 pr_err("Bad Instruction Tracing options '%s'\n", str);
1053 return -EINVAL;
1054 }
1055
1056 static const char * const auxtrace_error_type_name[] = {
1057 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1058 };
1059
1060 static const char *auxtrace_error_name(int type)
1061 {
1062 const char *error_type_name = NULL;
1063
1064 if (type < PERF_AUXTRACE_ERROR_MAX)
1065 error_type_name = auxtrace_error_type_name[type];
1066 if (!error_type_name)
1067 error_type_name = "unknown AUX";
1068 return error_type_name;
1069 }
1070
1071 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1072 {
1073 struct auxtrace_error_event *e = &event->auxtrace_error;
1074 int ret;
1075
1076 ret = fprintf(fp, " %s error type %u",
1077 auxtrace_error_name(e->type), e->type);
1078 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
1079 e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
1080 return ret;
1081 }
1082
1083 void perf_session__auxtrace_error_inc(struct perf_session *session,
1084 union perf_event *event)
1085 {
1086 struct auxtrace_error_event *e = &event->auxtrace_error;
1087
1088 if (e->type < PERF_AUXTRACE_ERROR_MAX)
1089 session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1090 }
1091
1092 void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1093 {
1094 int i;
1095
1096 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1097 if (!stats->nr_auxtrace_errors[i])
1098 continue;
1099 ui__warning("%u %s errors\n",
1100 stats->nr_auxtrace_errors[i],
1101 auxtrace_error_name(i));
1102 }
1103 }
1104
1105 int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
1106 union perf_event *event,
1107 struct perf_session *session)
1108 {
1109 if (auxtrace__dont_decode(session))
1110 return 0;
1111
1112 perf_event__fprintf_auxtrace_error(event, stdout);
1113 return 0;
1114 }
1115
1116 static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
1117 struct auxtrace_record *itr,
1118 struct perf_tool *tool, process_auxtrace_t fn,
1119 bool snapshot, size_t snapshot_size)
1120 {
1121 u64 head, old = mm->prev, offset, ref;
1122 unsigned char *data = mm->base;
1123 size_t size, head_off, old_off, len1, len2, padding;
1124 union perf_event ev;
1125 void *data1, *data2;
1126
1127 if (snapshot) {
1128 head = auxtrace_mmap__read_snapshot_head(mm);
1129 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1130 &head, &old))
1131 return -1;
1132 } else {
1133 head = auxtrace_mmap__read_head(mm);
1134 }
1135
1136 if (old == head)
1137 return 0;
1138
1139 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1140 mm->idx, old, head, head - old);
1141
1142 if (mm->mask) {
1143 head_off = head & mm->mask;
1144 old_off = old & mm->mask;
1145 } else {
1146 head_off = head % mm->len;
1147 old_off = old % mm->len;
1148 }
1149
1150 if (head_off > old_off)
1151 size = head_off - old_off;
1152 else
1153 size = mm->len - (old_off - head_off);
1154
1155 if (snapshot && size > snapshot_size)
1156 size = snapshot_size;
1157
1158 ref = auxtrace_record__reference(itr);
1159
1160 if (head > old || size <= head || mm->mask) {
1161 offset = head - size;
1162 } else {
1163 /*
1164 * When the buffer size is not a power of 2, 'head' wraps at the
1165 * highest multiple of the buffer size, so we have to subtract
1166 * the remainder here.
1167 */
1168 u64 rem = (0ULL - mm->len) % mm->len;
1169
1170 offset = head - size - rem;
1171 }
1172
1173 if (size > head_off) {
1174 len1 = size - head_off;
1175 data1 = &data[mm->len - len1];
1176 len2 = head_off;
1177 data2 = &data[0];
1178 } else {
1179 len1 = size;
1180 data1 = &data[head_off - len1];
1181 len2 = 0;
1182 data2 = NULL;
1183 }
1184
1185 if (itr->alignment) {
1186 unsigned int unwanted = len1 % itr->alignment;
1187
1188 len1 -= unwanted;
1189 size -= unwanted;
1190 }
1191
1192 /* padding must be written by fn() e.g. record__process_auxtrace() */
1193 padding = size & 7;
1194 if (padding)
1195 padding = 8 - padding;
1196
1197 memset(&ev, 0, sizeof(ev));
1198 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1199 ev.auxtrace.header.size = sizeof(ev.auxtrace);
1200 ev.auxtrace.size = size + padding;
1201 ev.auxtrace.offset = offset;
1202 ev.auxtrace.reference = ref;
1203 ev.auxtrace.idx = mm->idx;
1204 ev.auxtrace.tid = mm->tid;
1205 ev.auxtrace.cpu = mm->cpu;
1206
1207 if (fn(tool, &ev, data1, len1, data2, len2))
1208 return -1;
1209
1210 mm->prev = head;
1211
1212 if (!snapshot) {
1213 auxtrace_mmap__write_tail(mm, head);
1214 if (itr->read_finish) {
1215 int err;
1216
1217 err = itr->read_finish(itr, mm->idx);
1218 if (err < 0)
1219 return err;
1220 }
1221 }
1222
1223 return 1;
1224 }
1225
1226 int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
1227 struct perf_tool *tool, process_auxtrace_t fn)
1228 {
1229 return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
1230 }
1231
1232 int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
1233 struct auxtrace_record *itr,
1234 struct perf_tool *tool, process_auxtrace_t fn,
1235 size_t snapshot_size)
1236 {
1237 return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
1238 }
1239
1240 /**
1241 * struct auxtrace_cache - hash table to implement a cache
1242 * @hashtable: the hashtable
1243 * @sz: hashtable size (number of hlists)
1244 * @entry_size: size of an entry
1245 * @limit: limit the number of entries to this maximum, when reached the cache
1246 * is dropped and caching begins again with an empty cache
1247 * @cnt: current number of entries
1248 * @bits: hashtable size (@sz = 2^@bits)
1249 */
1250 struct auxtrace_cache {
1251 struct hlist_head *hashtable;
1252 size_t sz;
1253 size_t entry_size;
1254 size_t limit;
1255 size_t cnt;
1256 unsigned int bits;
1257 };
1258
1259 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1260 unsigned int limit_percent)
1261 {
1262 struct auxtrace_cache *c;
1263 struct hlist_head *ht;
1264 size_t sz, i;
1265
1266 c = zalloc(sizeof(struct auxtrace_cache));
1267 if (!c)
1268 return NULL;
1269
1270 sz = 1UL << bits;
1271
1272 ht = calloc(sz, sizeof(struct hlist_head));
1273 if (!ht)
1274 goto out_free;
1275
1276 for (i = 0; i < sz; i++)
1277 INIT_HLIST_HEAD(&ht[i]);
1278
1279 c->hashtable = ht;
1280 c->sz = sz;
1281 c->entry_size = entry_size;
1282 c->limit = (c->sz * limit_percent) / 100;
1283 c->bits = bits;
1284
1285 return c;
1286
1287 out_free:
1288 free(c);
1289 return NULL;
1290 }
1291
1292 static void auxtrace_cache__drop(struct auxtrace_cache *c)
1293 {
1294 struct auxtrace_cache_entry *entry;
1295 struct hlist_node *tmp;
1296 size_t i;
1297
1298 if (!c)
1299 return;
1300
1301 for (i = 0; i < c->sz; i++) {
1302 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1303 hlist_del(&entry->hash);
1304 auxtrace_cache__free_entry(c, entry);
1305 }
1306 }
1307
1308 c->cnt = 0;
1309 }
1310
1311 void auxtrace_cache__free(struct auxtrace_cache *c)
1312 {
1313 if (!c)
1314 return;
1315
1316 auxtrace_cache__drop(c);
1317 free(c->hashtable);
1318 free(c);
1319 }
1320
1321 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1322 {
1323 return malloc(c->entry_size);
1324 }
1325
1326 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1327 void *entry)
1328 {
1329 free(entry);
1330 }
1331
1332 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1333 struct auxtrace_cache_entry *entry)
1334 {
1335 if (c->limit && ++c->cnt > c->limit)
1336 auxtrace_cache__drop(c);
1337
1338 entry->key = key;
1339 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1340
1341 return 0;
1342 }
1343
1344 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1345 {
1346 struct auxtrace_cache_entry *entry;
1347 struct hlist_head *hlist;
1348
1349 if (!c)
1350 return NULL;
1351
1352 hlist = &c->hashtable[hash_32(key, c->bits)];
1353 hlist_for_each_entry(entry, hlist, hash) {
1354 if (entry->key == key)
1355 return entry;
1356 }
1357
1358 return NULL;
1359 }