2 * CTF writing support via babeltrace.
4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 * Released under the GPL v2. (and only v2, not any later version)
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include <traceevent/event-parse.h>
24 #include "data-convert-bt.h"
33 #include "sane_ctype.h"
35 #define pr_N(n, fmt, ...) \
36 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
38 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
39 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
41 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
44 struct bt_ctf_event_class
*event_class
;
50 struct bt_ctf_stream
*stream
;
56 /* writer primitives */
57 struct bt_ctf_writer
*writer
;
58 struct ctf_stream
**stream
;
60 struct bt_ctf_stream_class
*stream_class
;
61 struct bt_ctf_clock
*clock
;
66 struct bt_ctf_field_type
*s64
;
67 struct bt_ctf_field_type
*u64
;
68 struct bt_ctf_field_type
*s32
;
69 struct bt_ctf_field_type
*u32
;
70 struct bt_ctf_field_type
*string
;
71 struct bt_ctf_field_type
*u32_hex
;
72 struct bt_ctf_field_type
*u64_hex
;
74 struct bt_ctf_field_type
*array
[6];
76 struct bt_ctf_event_class
*comm_class
;
77 struct bt_ctf_event_class
*exit_class
;
78 struct bt_ctf_event_class
*fork_class
;
82 struct perf_tool tool
;
83 struct ctf_writer writer
;
89 /* Ordered events configured queue size. */
93 static int value_set(struct bt_ctf_field_type
*type
,
94 struct bt_ctf_event
*event
,
95 const char *name
, u64 val
)
97 struct bt_ctf_field
*field
;
98 bool sign
= bt_ctf_field_type_integer_get_signed(type
);
101 field
= bt_ctf_field_create(type
);
103 pr_err("failed to create a field %s\n", name
);
108 ret
= bt_ctf_field_signed_integer_set_value(field
, val
);
110 pr_err("failed to set field value %s\n", name
);
114 ret
= bt_ctf_field_unsigned_integer_set_value(field
, val
);
116 pr_err("failed to set field value %s\n", name
);
121 ret
= bt_ctf_event_set_payload(event
, name
, field
);
123 pr_err("failed to set payload %s\n", name
);
127 pr2(" SET [%s = %" PRIu64
"]\n", name
, val
);
130 bt_ctf_field_put(field
);
134 #define __FUNC_VALUE_SET(_name, _val_type) \
135 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
136 struct bt_ctf_event *event, \
140 struct bt_ctf_field_type *type = cw->data._name; \
141 return value_set(type, event, name, (u64) val); \
144 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
150 __FUNC_VALUE_SET(u64_hex
, u64
)
152 static int string_set_value(struct bt_ctf_field
*field
, const char *string
);
153 static __maybe_unused
int
154 value_set_string(struct ctf_writer
*cw
, struct bt_ctf_event
*event
,
155 const char *name
, const char *string
)
157 struct bt_ctf_field_type
*type
= cw
->data
.string
;
158 struct bt_ctf_field
*field
;
161 field
= bt_ctf_field_create(type
);
163 pr_err("failed to create a field %s\n", name
);
167 ret
= string_set_value(field
, string
);
169 pr_err("failed to set value %s\n", name
);
173 ret
= bt_ctf_event_set_payload(event
, name
, field
);
175 pr_err("failed to set payload %s\n", name
);
178 bt_ctf_field_put(field
);
182 static struct bt_ctf_field_type
*
183 get_tracepoint_field_type(struct ctf_writer
*cw
, struct format_field
*field
)
185 unsigned long flags
= field
->flags
;
187 if (flags
& FIELD_IS_STRING
)
188 return cw
->data
.string
;
190 if (!(flags
& FIELD_IS_SIGNED
)) {
191 /* unsigned long are mostly pointers */
192 if (flags
& FIELD_IS_LONG
|| flags
& FIELD_IS_POINTER
)
193 return cw
->data
.u64_hex
;
196 if (flags
& FIELD_IS_SIGNED
) {
197 if (field
->size
== 8)
203 if (field
->size
== 8)
209 static unsigned long long adjust_signedness(unsigned long long value_int
, int size
)
211 unsigned long long value_mask
;
214 * value_mask = (1 << (size * 8 - 1)) - 1.
215 * Directly set value_mask for code readers.
219 value_mask
= 0x7fULL
;
222 value_mask
= 0x7fffULL
;
225 value_mask
= 0x7fffffffULL
;
229 * For 64 bit value, return it self. There is no need
238 /* If it is a positive value, don't adjust. */
239 if ((value_int
& (~0ULL - value_mask
)) == 0)
242 /* Fill upper part of value_int with 1 to make it a negative long long. */
243 return (value_int
& value_mask
) | ~value_mask
;
246 static int string_set_value(struct bt_ctf_field
*field
, const char *string
)
249 size_t len
= strlen(string
), i
, p
;
252 for (i
= p
= 0; i
< len
; i
++, p
++) {
253 if (isprint(string
[i
])) {
256 buffer
[p
] = string
[i
];
260 snprintf(numstr
, sizeof(numstr
), "\\x%02x",
261 (unsigned int)(string
[i
]) & 0xff);
264 buffer
= zalloc(i
+ (len
- i
) * 4 + 2);
266 pr_err("failed to set unprintable string '%s'\n", string
);
267 return bt_ctf_field_string_set_value(field
, "UNPRINTABLE-STRING");
270 strncpy(buffer
, string
, i
);
272 strncat(buffer
+ p
, numstr
, 4);
278 return bt_ctf_field_string_set_value(field
, string
);
279 err
= bt_ctf_field_string_set_value(field
, buffer
);
284 static int add_tracepoint_field_value(struct ctf_writer
*cw
,
285 struct bt_ctf_event_class
*event_class
,
286 struct bt_ctf_event
*event
,
287 struct perf_sample
*sample
,
288 struct format_field
*fmtf
)
290 struct bt_ctf_field_type
*type
;
291 struct bt_ctf_field
*array_field
;
292 struct bt_ctf_field
*field
;
293 const char *name
= fmtf
->name
;
294 void *data
= sample
->raw_data
;
295 unsigned long flags
= fmtf
->flags
;
296 unsigned int n_items
;
303 offset
= fmtf
->offset
;
305 if (flags
& FIELD_IS_STRING
)
306 flags
&= ~FIELD_IS_ARRAY
;
308 if (flags
& FIELD_IS_DYNAMIC
) {
309 unsigned long long tmp_val
;
311 tmp_val
= pevent_read_number(fmtf
->event
->pevent
,
318 if (flags
& FIELD_IS_ARRAY
) {
320 type
= bt_ctf_event_class_get_field_by_name(
322 array_field
= bt_ctf_field_create(type
);
323 bt_ctf_field_type_put(type
);
325 pr_err("Failed to create array type %s\n", name
);
329 len
= fmtf
->size
/ fmtf
->arraylen
;
330 n_items
= fmtf
->arraylen
;
336 type
= get_tracepoint_field_type(cw
, fmtf
);
338 for (i
= 0; i
< n_items
; i
++) {
339 if (flags
& FIELD_IS_ARRAY
)
340 field
= bt_ctf_field_array_get_field(array_field
, i
);
342 field
= bt_ctf_field_create(type
);
345 pr_err("failed to create a field %s\n", name
);
349 if (flags
& FIELD_IS_STRING
)
350 ret
= string_set_value(field
, data
+ offset
+ i
* len
);
352 unsigned long long value_int
;
354 value_int
= pevent_read_number(
356 data
+ offset
+ i
* len
, len
);
358 if (!(flags
& FIELD_IS_SIGNED
))
359 ret
= bt_ctf_field_unsigned_integer_set_value(
362 ret
= bt_ctf_field_signed_integer_set_value(
363 field
, adjust_signedness(value_int
, len
));
367 pr_err("failed to set file value %s\n", name
);
370 if (!(flags
& FIELD_IS_ARRAY
)) {
371 ret
= bt_ctf_event_set_payload(event
, name
, field
);
373 pr_err("failed to set payload %s\n", name
);
377 bt_ctf_field_put(field
);
379 if (flags
& FIELD_IS_ARRAY
) {
380 ret
= bt_ctf_event_set_payload(event
, name
, array_field
);
382 pr_err("Failed add payload array %s\n", name
);
385 bt_ctf_field_put(array_field
);
390 bt_ctf_field_put(field
);
394 static int add_tracepoint_fields_values(struct ctf_writer
*cw
,
395 struct bt_ctf_event_class
*event_class
,
396 struct bt_ctf_event
*event
,
397 struct format_field
*fields
,
398 struct perf_sample
*sample
)
400 struct format_field
*field
;
403 for (field
= fields
; field
; field
= field
->next
) {
404 ret
= add_tracepoint_field_value(cw
, event_class
, event
, sample
,
412 static int add_tracepoint_values(struct ctf_writer
*cw
,
413 struct bt_ctf_event_class
*event_class
,
414 struct bt_ctf_event
*event
,
415 struct perf_evsel
*evsel
,
416 struct perf_sample
*sample
)
418 struct format_field
*common_fields
= evsel
->tp_format
->format
.common_fields
;
419 struct format_field
*fields
= evsel
->tp_format
->format
.fields
;
422 ret
= add_tracepoint_fields_values(cw
, event_class
, event
,
423 common_fields
, sample
);
425 ret
= add_tracepoint_fields_values(cw
, event_class
, event
,
432 add_bpf_output_values(struct bt_ctf_event_class
*event_class
,
433 struct bt_ctf_event
*event
,
434 struct perf_sample
*sample
)
436 struct bt_ctf_field_type
*len_type
, *seq_type
;
437 struct bt_ctf_field
*len_field
, *seq_field
;
438 unsigned int raw_size
= sample
->raw_size
;
439 unsigned int nr_elements
= raw_size
/ sizeof(u32
);
443 if (nr_elements
* sizeof(u32
) != raw_size
)
444 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
445 raw_size
, nr_elements
* sizeof(u32
) - raw_size
);
447 len_type
= bt_ctf_event_class_get_field_by_name(event_class
, "raw_len");
448 len_field
= bt_ctf_field_create(len_type
);
450 pr_err("failed to create 'raw_len' for bpf output event\n");
455 ret
= bt_ctf_field_unsigned_integer_set_value(len_field
, nr_elements
);
457 pr_err("failed to set field value for raw_len\n");
460 ret
= bt_ctf_event_set_payload(event
, "raw_len", len_field
);
462 pr_err("failed to set payload to raw_len\n");
466 seq_type
= bt_ctf_event_class_get_field_by_name(event_class
, "raw_data");
467 seq_field
= bt_ctf_field_create(seq_type
);
469 pr_err("failed to create 'raw_data' for bpf output event\n");
474 ret
= bt_ctf_field_sequence_set_length(seq_field
, len_field
);
476 pr_err("failed to set length of 'raw_data'\n");
480 for (i
= 0; i
< nr_elements
; i
++) {
481 struct bt_ctf_field
*elem_field
=
482 bt_ctf_field_sequence_get_field(seq_field
, i
);
484 ret
= bt_ctf_field_unsigned_integer_set_value(elem_field
,
485 ((u32
*)(sample
->raw_data
))[i
]);
487 bt_ctf_field_put(elem_field
);
489 pr_err("failed to set raw_data[%d]\n", i
);
494 ret
= bt_ctf_event_set_payload(event
, "raw_data", seq_field
);
496 pr_err("failed to set payload for raw_data\n");
499 bt_ctf_field_put(seq_field
);
501 bt_ctf_field_type_put(seq_type
);
503 bt_ctf_field_put(len_field
);
505 bt_ctf_field_type_put(len_type
);
509 static int add_generic_values(struct ctf_writer
*cw
,
510 struct bt_ctf_event
*event
,
511 struct perf_evsel
*evsel
,
512 struct perf_sample
*sample
)
514 u64 type
= evsel
->attr
.sample_type
;
519 * PERF_SAMPLE_TIME - not needed as we have it in
521 * PERF_SAMPLE_READ - TODO
522 * PERF_SAMPLE_CALLCHAIN - TODO
523 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
524 * PERF_SAMPLE_BRANCH_STACK - TODO
525 * PERF_SAMPLE_REGS_USER - TODO
526 * PERF_SAMPLE_STACK_USER - TODO
529 if (type
& PERF_SAMPLE_IP
) {
530 ret
= value_set_u64_hex(cw
, event
, "perf_ip", sample
->ip
);
535 if (type
& PERF_SAMPLE_TID
) {
536 ret
= value_set_s32(cw
, event
, "perf_tid", sample
->tid
);
540 ret
= value_set_s32(cw
, event
, "perf_pid", sample
->pid
);
545 if ((type
& PERF_SAMPLE_ID
) ||
546 (type
& PERF_SAMPLE_IDENTIFIER
)) {
547 ret
= value_set_u64(cw
, event
, "perf_id", sample
->id
);
552 if (type
& PERF_SAMPLE_STREAM_ID
) {
553 ret
= value_set_u64(cw
, event
, "perf_stream_id", sample
->stream_id
);
558 if (type
& PERF_SAMPLE_PERIOD
) {
559 ret
= value_set_u64(cw
, event
, "perf_period", sample
->period
);
564 if (type
& PERF_SAMPLE_WEIGHT
) {
565 ret
= value_set_u64(cw
, event
, "perf_weight", sample
->weight
);
570 if (type
& PERF_SAMPLE_DATA_SRC
) {
571 ret
= value_set_u64(cw
, event
, "perf_data_src",
577 if (type
& PERF_SAMPLE_TRANSACTION
) {
578 ret
= value_set_u64(cw
, event
, "perf_transaction",
579 sample
->transaction
);
587 static int ctf_stream__flush(struct ctf_stream
*cs
)
592 err
= bt_ctf_stream_flush(cs
->stream
);
594 pr_err("CTF stream %d flush failed\n", cs
->cpu
);
596 pr("Flush stream for cpu %d (%u samples)\n",
605 static struct ctf_stream
*ctf_stream__create(struct ctf_writer
*cw
, int cpu
)
607 struct ctf_stream
*cs
;
608 struct bt_ctf_field
*pkt_ctx
= NULL
;
609 struct bt_ctf_field
*cpu_field
= NULL
;
610 struct bt_ctf_stream
*stream
= NULL
;
613 cs
= zalloc(sizeof(*cs
));
615 pr_err("Failed to allocate ctf stream\n");
619 stream
= bt_ctf_writer_create_stream(cw
->writer
, cw
->stream_class
);
621 pr_err("Failed to create CTF stream\n");
625 pkt_ctx
= bt_ctf_stream_get_packet_context(stream
);
627 pr_err("Failed to obtain packet context\n");
631 cpu_field
= bt_ctf_field_structure_get_field(pkt_ctx
, "cpu_id");
632 bt_ctf_field_put(pkt_ctx
);
634 pr_err("Failed to obtain cpu field\n");
638 ret
= bt_ctf_field_unsigned_integer_set_value(cpu_field
, (u32
) cpu
);
640 pr_err("Failed to update CPU number\n");
644 bt_ctf_field_put(cpu_field
);
652 bt_ctf_field_put(cpu_field
);
654 bt_ctf_stream_put(stream
);
660 static void ctf_stream__delete(struct ctf_stream
*cs
)
663 bt_ctf_stream_put(cs
->stream
);
668 static struct ctf_stream
*ctf_stream(struct ctf_writer
*cw
, int cpu
)
670 struct ctf_stream
*cs
= cw
->stream
[cpu
];
673 cs
= ctf_stream__create(cw
, cpu
);
674 cw
->stream
[cpu
] = cs
;
680 static int get_sample_cpu(struct ctf_writer
*cw
, struct perf_sample
*sample
,
681 struct perf_evsel
*evsel
)
685 if (evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)
688 if (cpu
> cw
->stream_cnt
) {
689 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
690 cpu
, cw
->stream_cnt
);
697 #define STREAM_FLUSH_COUNT 100000
700 * Currently we have no other way to determine the
701 * time for the stream flush other than keep track
702 * of the number of events and check it against
705 static bool is_flush_needed(struct ctf_stream
*cs
)
707 return cs
->count
>= STREAM_FLUSH_COUNT
;
710 static int process_sample_event(struct perf_tool
*tool
,
711 union perf_event
*_event
,
712 struct perf_sample
*sample
,
713 struct perf_evsel
*evsel
,
714 struct machine
*machine __maybe_unused
)
716 struct convert
*c
= container_of(tool
, struct convert
, tool
);
717 struct evsel_priv
*priv
= evsel
->priv
;
718 struct ctf_writer
*cw
= &c
->writer
;
719 struct ctf_stream
*cs
;
720 struct bt_ctf_event_class
*event_class
;
721 struct bt_ctf_event
*event
;
724 if (WARN_ONCE(!priv
, "Failed to setup all events.\n"))
727 event_class
= priv
->event_class
;
731 c
->events_size
+= _event
->header
.size
;
733 pr_time2(sample
->time
, "sample %" PRIu64
"\n", c
->events_count
);
735 event
= bt_ctf_event_create(event_class
);
737 pr_err("Failed to create an CTF event\n");
741 bt_ctf_clock_set_time(cw
->clock
, sample
->time
);
743 ret
= add_generic_values(cw
, event
, evsel
, sample
);
747 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) {
748 ret
= add_tracepoint_values(cw
, event_class
, event
,
754 if (perf_evsel__is_bpf_output(evsel
)) {
755 ret
= add_bpf_output_values(event_class
, event
, sample
);
760 cs
= ctf_stream(cw
, get_sample_cpu(cw
, sample
, evsel
));
762 if (is_flush_needed(cs
))
763 ctf_stream__flush(cs
);
766 bt_ctf_stream_append_event(cs
->stream
, event
);
769 bt_ctf_event_put(event
);
773 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
775 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
780 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
781 static int process_##_name##_event(struct perf_tool *tool, \
782 union perf_event *_event, \
783 struct perf_sample *sample, \
784 struct machine *machine) \
786 struct convert *c = container_of(tool, struct convert, tool);\
787 struct ctf_writer *cw = &c->writer; \
788 struct bt_ctf_event_class *event_class = cw->_name##_class;\
789 struct bt_ctf_event *event; \
790 struct ctf_stream *cs; \
793 c->non_sample_count++; \
794 c->events_size += _event->header.size; \
795 event = bt_ctf_event_create(event_class); \
797 pr_err("Failed to create an CTF event\n"); \
801 bt_ctf_clock_set_time(cw->clock, sample->time); \
803 cs = ctf_stream(cw, 0); \
805 if (is_flush_needed(cs)) \
806 ctf_stream__flush(cs); \
809 bt_ctf_stream_append_event(cs->stream, event); \
811 bt_ctf_event_put(event); \
813 return perf_event__process_##_name(tool, _event, sample, machine);\
816 __FUNC_PROCESS_NON_SAMPLE(comm
,
817 __NON_SAMPLE_SET_FIELD(comm
, u32
, pid
);
818 __NON_SAMPLE_SET_FIELD(comm
, u32
, tid
);
819 __NON_SAMPLE_SET_FIELD(comm
, string
, comm
);
821 __FUNC_PROCESS_NON_SAMPLE(fork
,
822 __NON_SAMPLE_SET_FIELD(fork
, u32
, pid
);
823 __NON_SAMPLE_SET_FIELD(fork
, u32
, ppid
);
824 __NON_SAMPLE_SET_FIELD(fork
, u32
, tid
);
825 __NON_SAMPLE_SET_FIELD(fork
, u32
, ptid
);
826 __NON_SAMPLE_SET_FIELD(fork
, u64
, time
);
829 __FUNC_PROCESS_NON_SAMPLE(exit
,
830 __NON_SAMPLE_SET_FIELD(fork
, u32
, pid
);
831 __NON_SAMPLE_SET_FIELD(fork
, u32
, ppid
);
832 __NON_SAMPLE_SET_FIELD(fork
, u32
, tid
);
833 __NON_SAMPLE_SET_FIELD(fork
, u32
, ptid
);
834 __NON_SAMPLE_SET_FIELD(fork
, u64
, time
);
836 #undef __NON_SAMPLE_SET_FIELD
837 #undef __FUNC_PROCESS_NON_SAMPLE
839 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
840 static char *change_name(char *name
, char *orig_name
, int dup
)
842 char *new_name
= NULL
;
851 * Add '_' prefix to potential keywork. According to
852 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
853 * futher CTF spec updating may require us to use '$'.
856 len
= strlen(name
) + sizeof("_");
858 len
= strlen(orig_name
) + sizeof("_dupl_X");
860 new_name
= malloc(len
);
865 snprintf(new_name
, len
, "_%s", name
);
867 snprintf(new_name
, len
, "%s_dupl_%d", orig_name
, dup
);
870 if (name
!= orig_name
)
875 static int event_class_add_field(struct bt_ctf_event_class
*event_class
,
876 struct bt_ctf_field_type
*type
,
877 struct format_field
*field
)
879 struct bt_ctf_field_type
*t
= NULL
;
884 /* alias was already assigned */
885 if (field
->alias
!= field
->name
)
886 return bt_ctf_event_class_add_field(event_class
, type
,
887 (char *)field
->alias
);
891 /* If 'name' is a keywork, add prefix. */
892 if (bt_ctf_validate_identifier(name
))
893 name
= change_name(name
, field
->name
, -1);
896 pr_err("Failed to fix invalid identifier.");
899 while ((t
= bt_ctf_event_class_get_field_by_name(event_class
, name
))) {
900 bt_ctf_field_type_put(t
);
901 name
= change_name(name
, field
->name
, dup
++);
903 pr_err("Failed to create dup name for '%s'\n", field
->name
);
908 ret
= bt_ctf_event_class_add_field(event_class
, type
, name
);
915 static int add_tracepoint_fields_types(struct ctf_writer
*cw
,
916 struct format_field
*fields
,
917 struct bt_ctf_event_class
*event_class
)
919 struct format_field
*field
;
922 for (field
= fields
; field
; field
= field
->next
) {
923 struct bt_ctf_field_type
*type
;
924 unsigned long flags
= field
->flags
;
926 pr2(" field '%s'\n", field
->name
);
928 type
= get_tracepoint_field_type(cw
, field
);
933 * A string is an array of chars. For this we use the string
934 * type and don't care that it is an array. What we don't
935 * support is an array of strings.
937 if (flags
& FIELD_IS_STRING
)
938 flags
&= ~FIELD_IS_ARRAY
;
940 if (flags
& FIELD_IS_ARRAY
)
941 type
= bt_ctf_field_type_array_create(type
, field
->arraylen
);
943 ret
= event_class_add_field(event_class
, type
, field
);
945 if (flags
& FIELD_IS_ARRAY
)
946 bt_ctf_field_type_put(type
);
949 pr_err("Failed to add field '%s': %d\n",
958 static int add_tracepoint_types(struct ctf_writer
*cw
,
959 struct perf_evsel
*evsel
,
960 struct bt_ctf_event_class
*class)
962 struct format_field
*common_fields
= evsel
->tp_format
->format
.common_fields
;
963 struct format_field
*fields
= evsel
->tp_format
->format
.fields
;
966 ret
= add_tracepoint_fields_types(cw
, common_fields
, class);
968 ret
= add_tracepoint_fields_types(cw
, fields
, class);
973 static int add_bpf_output_types(struct ctf_writer
*cw
,
974 struct bt_ctf_event_class
*class)
976 struct bt_ctf_field_type
*len_type
= cw
->data
.u32
;
977 struct bt_ctf_field_type
*seq_base_type
= cw
->data
.u32_hex
;
978 struct bt_ctf_field_type
*seq_type
;
981 ret
= bt_ctf_event_class_add_field(class, len_type
, "raw_len");
985 seq_type
= bt_ctf_field_type_sequence_create(seq_base_type
, "raw_len");
989 return bt_ctf_event_class_add_field(class, seq_type
, "raw_data");
992 static int add_generic_types(struct ctf_writer
*cw
, struct perf_evsel
*evsel
,
993 struct bt_ctf_event_class
*event_class
)
995 u64 type
= evsel
->attr
.sample_type
;
999 * PERF_SAMPLE_TIME - not needed as we have it in
1001 * PERF_SAMPLE_READ - TODO
1002 * PERF_SAMPLE_CALLCHAIN - TODO
1003 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
1004 * are handled separately
1005 * PERF_SAMPLE_BRANCH_STACK - TODO
1006 * PERF_SAMPLE_REGS_USER - TODO
1007 * PERF_SAMPLE_STACK_USER - TODO
1010 #define ADD_FIELD(cl, t, n) \
1012 pr2(" field '%s'\n", n); \
1013 if (bt_ctf_event_class_add_field(cl, t, n)) { \
1014 pr_err("Failed to add field '%s';\n", n); \
1019 if (type
& PERF_SAMPLE_IP
)
1020 ADD_FIELD(event_class
, cw
->data
.u64_hex
, "perf_ip");
1022 if (type
& PERF_SAMPLE_TID
) {
1023 ADD_FIELD(event_class
, cw
->data
.s32
, "perf_tid");
1024 ADD_FIELD(event_class
, cw
->data
.s32
, "perf_pid");
1027 if ((type
& PERF_SAMPLE_ID
) ||
1028 (type
& PERF_SAMPLE_IDENTIFIER
))
1029 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_id");
1031 if (type
& PERF_SAMPLE_STREAM_ID
)
1032 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_stream_id");
1034 if (type
& PERF_SAMPLE_PERIOD
)
1035 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_period");
1037 if (type
& PERF_SAMPLE_WEIGHT
)
1038 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_weight");
1040 if (type
& PERF_SAMPLE_DATA_SRC
)
1041 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_data_src");
1043 if (type
& PERF_SAMPLE_TRANSACTION
)
1044 ADD_FIELD(event_class
, cw
->data
.u64
, "perf_transaction");
1050 static int add_event(struct ctf_writer
*cw
, struct perf_evsel
*evsel
)
1052 struct bt_ctf_event_class
*event_class
;
1053 struct evsel_priv
*priv
;
1054 const char *name
= perf_evsel__name(evsel
);
1057 pr("Adding event '%s' (type %d)\n", name
, evsel
->attr
.type
);
1059 event_class
= bt_ctf_event_class_create(name
);
1063 ret
= add_generic_types(cw
, evsel
, event_class
);
1067 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) {
1068 ret
= add_tracepoint_types(cw
, evsel
, event_class
);
1073 if (perf_evsel__is_bpf_output(evsel
)) {
1074 ret
= add_bpf_output_types(cw
, event_class
);
1079 ret
= bt_ctf_stream_class_add_event_class(cw
->stream_class
, event_class
);
1081 pr("Failed to add event class into stream.\n");
1085 priv
= malloc(sizeof(*priv
));
1089 priv
->event_class
= event_class
;
1094 bt_ctf_event_class_put(event_class
);
1095 pr_err("Failed to add event '%s'.\n", name
);
1099 static int setup_events(struct ctf_writer
*cw
, struct perf_session
*session
)
1101 struct perf_evlist
*evlist
= session
->evlist
;
1102 struct perf_evsel
*evsel
;
1105 evlist__for_each_entry(evlist
, evsel
) {
1106 ret
= add_event(cw
, evsel
);
1113 #define __NON_SAMPLE_ADD_FIELD(t, n) \
1115 pr2(" field '%s'\n", #n); \
1116 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1117 pr_err("Failed to add field '%s';\n", #n);\
1122 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
1123 static int add_##_name##_event(struct ctf_writer *cw) \
1125 struct bt_ctf_event_class *event_class; \
1128 pr("Adding "#_name" event\n"); \
1129 event_class = bt_ctf_event_class_create("perf_" #_name);\
1134 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1136 pr("Failed to add event class '"#_name"' into stream.\n");\
1140 cw->_name##_class = event_class; \
1141 bt_ctf_event_class_put(event_class); \
1145 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm
,
1146 __NON_SAMPLE_ADD_FIELD(u32
, pid
);
1147 __NON_SAMPLE_ADD_FIELD(u32
, tid
);
1148 __NON_SAMPLE_ADD_FIELD(string
, comm
);
1151 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork
,
1152 __NON_SAMPLE_ADD_FIELD(u32
, pid
);
1153 __NON_SAMPLE_ADD_FIELD(u32
, ppid
);
1154 __NON_SAMPLE_ADD_FIELD(u32
, tid
);
1155 __NON_SAMPLE_ADD_FIELD(u32
, ptid
);
1156 __NON_SAMPLE_ADD_FIELD(u64
, time
);
1159 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit
,
1160 __NON_SAMPLE_ADD_FIELD(u32
, pid
);
1161 __NON_SAMPLE_ADD_FIELD(u32
, ppid
);
1162 __NON_SAMPLE_ADD_FIELD(u32
, tid
);
1163 __NON_SAMPLE_ADD_FIELD(u32
, ptid
);
1164 __NON_SAMPLE_ADD_FIELD(u64
, time
);
1167 #undef __NON_SAMPLE_ADD_FIELD
1168 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1170 static int setup_non_sample_events(struct ctf_writer
*cw
,
1171 struct perf_session
*session __maybe_unused
)
1175 ret
= add_comm_event(cw
);
1178 ret
= add_exit_event(cw
);
1181 ret
= add_fork_event(cw
);
1187 static void cleanup_events(struct perf_session
*session
)
1189 struct perf_evlist
*evlist
= session
->evlist
;
1190 struct perf_evsel
*evsel
;
1192 evlist__for_each_entry(evlist
, evsel
) {
1193 struct evsel_priv
*priv
;
1196 bt_ctf_event_class_put(priv
->event_class
);
1197 zfree(&evsel
->priv
);
1200 perf_evlist__delete(evlist
);
1201 session
->evlist
= NULL
;
1204 static int setup_streams(struct ctf_writer
*cw
, struct perf_session
*session
)
1206 struct ctf_stream
**stream
;
1207 struct perf_header
*ph
= &session
->header
;
1211 * Try to get the number of cpus used in the data file,
1212 * if not present fallback to the MAX_CPUS.
1214 ncpus
= ph
->env
.nr_cpus_avail
?: MAX_CPUS
;
1216 stream
= zalloc(sizeof(*stream
) * ncpus
);
1218 pr_err("Failed to allocate streams.\n");
1222 cw
->stream
= stream
;
1223 cw
->stream_cnt
= ncpus
;
1227 static void free_streams(struct ctf_writer
*cw
)
1231 for (cpu
= 0; cpu
< cw
->stream_cnt
; cpu
++)
1232 ctf_stream__delete(cw
->stream
[cpu
]);
1237 static int ctf_writer__setup_env(struct ctf_writer
*cw
,
1238 struct perf_session
*session
)
1240 struct perf_header
*header
= &session
->header
;
1241 struct bt_ctf_writer
*writer
= cw
->writer
;
1243 #define ADD(__n, __v) \
1245 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1249 ADD("host", header
->env
.hostname
);
1250 ADD("sysname", "Linux");
1251 ADD("release", header
->env
.os_release
);
1252 ADD("version", header
->env
.version
);
1253 ADD("machine", header
->env
.arch
);
1254 ADD("domain", "kernel");
1255 ADD("tracer_name", "perf");
1261 static int ctf_writer__setup_clock(struct ctf_writer
*cw
)
1263 struct bt_ctf_clock
*clock
= cw
->clock
;
1265 bt_ctf_clock_set_description(clock
, "perf clock");
1267 #define SET(__n, __v) \
1269 if (bt_ctf_clock_set_##__n(clock, __v)) \
1273 SET(frequency
, 1000000000);
1277 SET(is_absolute
, 0);
1283 static struct bt_ctf_field_type
*create_int_type(int size
, bool sign
, bool hex
)
1285 struct bt_ctf_field_type
*type
;
1287 type
= bt_ctf_field_type_integer_create(size
);
1292 bt_ctf_field_type_integer_set_signed(type
, 1))
1296 bt_ctf_field_type_integer_set_base(type
, BT_CTF_INTEGER_BASE_HEXADECIMAL
))
1299 #if __BYTE_ORDER == __BIG_ENDIAN
1300 bt_ctf_field_type_set_byte_order(type
, BT_CTF_BYTE_ORDER_BIG_ENDIAN
);
1302 bt_ctf_field_type_set_byte_order(type
, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN
);
1305 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1306 size
, sign
? "un" : "", hex
? "hex" : "");
1310 bt_ctf_field_type_put(type
);
1314 static void ctf_writer__cleanup_data(struct ctf_writer
*cw
)
1318 for (i
= 0; i
< ARRAY_SIZE(cw
->data
.array
); i
++)
1319 bt_ctf_field_type_put(cw
->data
.array
[i
]);
1322 static int ctf_writer__init_data(struct ctf_writer
*cw
)
1324 #define CREATE_INT_TYPE(type, size, sign, hex) \
1326 (type) = create_int_type(size, sign, hex); \
1331 CREATE_INT_TYPE(cw
->data
.s64
, 64, true, false);
1332 CREATE_INT_TYPE(cw
->data
.u64
, 64, false, false);
1333 CREATE_INT_TYPE(cw
->data
.s32
, 32, true, false);
1334 CREATE_INT_TYPE(cw
->data
.u32
, 32, false, false);
1335 CREATE_INT_TYPE(cw
->data
.u32_hex
, 32, false, true);
1336 CREATE_INT_TYPE(cw
->data
.u64_hex
, 64, false, true);
1338 cw
->data
.string
= bt_ctf_field_type_string_create();
1339 if (cw
->data
.string
)
1343 ctf_writer__cleanup_data(cw
);
1344 pr_err("Failed to create data types.\n");
1348 static void ctf_writer__cleanup(struct ctf_writer
*cw
)
1350 ctf_writer__cleanup_data(cw
);
1352 bt_ctf_clock_put(cw
->clock
);
1354 bt_ctf_stream_class_put(cw
->stream_class
);
1355 bt_ctf_writer_put(cw
->writer
);
1357 /* and NULL all the pointers */
1358 memset(cw
, 0, sizeof(*cw
));
1361 static int ctf_writer__init(struct ctf_writer
*cw
, const char *path
)
1363 struct bt_ctf_writer
*writer
;
1364 struct bt_ctf_stream_class
*stream_class
;
1365 struct bt_ctf_clock
*clock
;
1366 struct bt_ctf_field_type
*pkt_ctx_type
;
1370 writer
= bt_ctf_writer_create(path
);
1374 cw
->writer
= writer
;
1377 clock
= bt_ctf_clock_create("perf_clock");
1379 pr("Failed to create CTF clock.\n");
1385 if (ctf_writer__setup_clock(cw
)) {
1386 pr("Failed to setup CTF clock.\n");
1390 /* CTF stream class */
1391 stream_class
= bt_ctf_stream_class_create("perf_stream");
1392 if (!stream_class
) {
1393 pr("Failed to create CTF stream class.\n");
1397 cw
->stream_class
= stream_class
;
1399 /* CTF clock stream setup */
1400 if (bt_ctf_stream_class_set_clock(stream_class
, clock
)) {
1401 pr("Failed to assign CTF clock to stream class.\n");
1405 if (ctf_writer__init_data(cw
))
1408 /* Add cpu_id for packet context */
1409 pkt_ctx_type
= bt_ctf_stream_class_get_packet_context_type(stream_class
);
1413 ret
= bt_ctf_field_type_structure_add_field(pkt_ctx_type
, cw
->data
.u32
, "cpu_id");
1414 bt_ctf_field_type_put(pkt_ctx_type
);
1418 /* CTF clock writer setup */
1419 if (bt_ctf_writer_add_clock(writer
, clock
)) {
1420 pr("Failed to assign CTF clock to writer.\n");
1427 ctf_writer__cleanup(cw
);
1429 pr_err("Failed to setup CTF writer.\n");
1433 static int ctf_writer__flush_streams(struct ctf_writer
*cw
)
1437 for (cpu
= 0; cpu
< cw
->stream_cnt
&& !ret
; cpu
++)
1438 ret
= ctf_stream__flush(cw
->stream
[cpu
]);
1443 static int convert__config(const char *var
, const char *value
, void *cb
)
1445 struct convert
*c
= cb
;
1447 if (!strcmp(var
, "convert.queue-size"))
1448 return perf_config_u64(&c
->queue_size
, var
, value
);
1453 int bt_convert__perf2ctf(const char *input
, const char *path
,
1454 struct perf_data_convert_opts
*opts
)
1456 struct perf_session
*session
;
1457 struct perf_data_file file
= {
1459 .mode
= PERF_DATA_MODE_READ
,
1460 .force
= opts
->force
,
1462 struct convert c
= {
1464 .sample
= process_sample_event
,
1465 .mmap
= perf_event__process_mmap
,
1466 .mmap2
= perf_event__process_mmap2
,
1467 .comm
= perf_event__process_comm
,
1468 .exit
= perf_event__process_exit
,
1469 .fork
= perf_event__process_fork
,
1470 .lost
= perf_event__process_lost
,
1471 .tracing_data
= perf_event__process_tracing_data
,
1472 .build_id
= perf_event__process_build_id
,
1473 .namespaces
= perf_event__process_namespaces
,
1474 .ordered_events
= true,
1475 .ordering_requires_timestamps
= true,
1478 struct ctf_writer
*cw
= &c
.writer
;
1482 c
.tool
.comm
= process_comm_event
;
1483 c
.tool
.exit
= process_exit_event
;
1484 c
.tool
.fork
= process_fork_event
;
1487 err
= perf_config(convert__config
, &c
);
1492 if (ctf_writer__init(cw
, path
))
1496 /* perf.data session */
1497 session
= perf_session__new(&file
, 0, &c
.tool
);
1502 ordered_events__set_alloc_size(&session
->ordered_events
,
1506 /* CTF writer env/clock setup */
1507 if (ctf_writer__setup_env(cw
, session
))
1510 /* CTF events setup */
1511 if (setup_events(cw
, session
))
1514 if (opts
->all
&& setup_non_sample_events(cw
, session
))
1517 if (setup_streams(cw
, session
))
1520 err
= perf_session__process_events(session
);
1522 err
= ctf_writer__flush_streams(cw
);
1524 pr_err("Error during conversion.\n");
1527 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1531 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64
" samples",
1532 (double) c
.events_size
/ 1024.0 / 1024.0,
1535 if (!c
.non_sample_count
)
1536 fprintf(stderr
, ") ]\n");
1538 fprintf(stderr
, ", %" PRIu64
" non-samples) ]\n", c
.non_sample_count
);
1540 cleanup_events(session
);
1541 perf_session__delete(session
);
1542 ctf_writer__cleanup(cw
);
1547 perf_session__delete(session
);
1549 ctf_writer__cleanup(cw
);
1550 pr_err("Error during conversion setup.\n");