]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/util/data-convert-bt.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / data-convert-bt.c
1 /*
2 * CTF writing support via babeltrace.
3 *
4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10 #include <linux/compiler.h>
11 #include <babeltrace/ctf-writer/writer.h>
12 #include <babeltrace/ctf-writer/clock.h>
13 #include <babeltrace/ctf-writer/stream.h>
14 #include <babeltrace/ctf-writer/event.h>
15 #include <babeltrace/ctf-writer/event-types.h>
16 #include <babeltrace/ctf-writer/event-fields.h>
17 #include <babeltrace/ctf-ir/utils.h>
18 #include <babeltrace/ctf/events.h>
19 #include <traceevent/event-parse.h>
20 #include "asm/bug.h"
21 #include "data-convert-bt.h"
22 #include "session.h"
23 #include "util.h"
24 #include "debug.h"
25 #include "tool.h"
26 #include "evlist.h"
27 #include "evsel.h"
28 #include "machine.h"
29 #include "config.h"
30
31 #define pr_N(n, fmt, ...) \
32 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
33
34 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
35 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
36
37 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
38
39 struct evsel_priv {
40 struct bt_ctf_event_class *event_class;
41 };
42
43 #define MAX_CPUS 4096
44
45 struct ctf_stream {
46 struct bt_ctf_stream *stream;
47 int cpu;
48 u32 count;
49 };
50
51 struct ctf_writer {
52 /* writer primitives */
53 struct bt_ctf_writer *writer;
54 struct ctf_stream **stream;
55 int stream_cnt;
56 struct bt_ctf_stream_class *stream_class;
57 struct bt_ctf_clock *clock;
58
59 /* data types */
60 union {
61 struct {
62 struct bt_ctf_field_type *s64;
63 struct bt_ctf_field_type *u64;
64 struct bt_ctf_field_type *s32;
65 struct bt_ctf_field_type *u32;
66 struct bt_ctf_field_type *string;
67 struct bt_ctf_field_type *u32_hex;
68 struct bt_ctf_field_type *u64_hex;
69 };
70 struct bt_ctf_field_type *array[6];
71 } data;
72 struct bt_ctf_event_class *comm_class;
73 struct bt_ctf_event_class *exit_class;
74 struct bt_ctf_event_class *fork_class;
75 };
76
77 struct convert {
78 struct perf_tool tool;
79 struct ctf_writer writer;
80
81 u64 events_size;
82 u64 events_count;
83 u64 non_sample_count;
84
85 /* Ordered events configured queue size. */
86 u64 queue_size;
87 };
88
89 static int value_set(struct bt_ctf_field_type *type,
90 struct bt_ctf_event *event,
91 const char *name, u64 val)
92 {
93 struct bt_ctf_field *field;
94 bool sign = bt_ctf_field_type_integer_get_signed(type);
95 int ret;
96
97 field = bt_ctf_field_create(type);
98 if (!field) {
99 pr_err("failed to create a field %s\n", name);
100 return -1;
101 }
102
103 if (sign) {
104 ret = bt_ctf_field_signed_integer_set_value(field, val);
105 if (ret) {
106 pr_err("failed to set field value %s\n", name);
107 goto err;
108 }
109 } else {
110 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
111 if (ret) {
112 pr_err("failed to set field value %s\n", name);
113 goto err;
114 }
115 }
116
117 ret = bt_ctf_event_set_payload(event, name, field);
118 if (ret) {
119 pr_err("failed to set payload %s\n", name);
120 goto err;
121 }
122
123 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
124
125 err:
126 bt_ctf_field_put(field);
127 return ret;
128 }
129
130 #define __FUNC_VALUE_SET(_name, _val_type) \
131 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
132 struct bt_ctf_event *event, \
133 const char *name, \
134 _val_type val) \
135 { \
136 struct bt_ctf_field_type *type = cw->data._name; \
137 return value_set(type, event, name, (u64) val); \
138 }
139
140 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
141
142 FUNC_VALUE_SET(s32)
143 FUNC_VALUE_SET(u32)
144 FUNC_VALUE_SET(s64)
145 FUNC_VALUE_SET(u64)
146 __FUNC_VALUE_SET(u64_hex, u64)
147
148 static int string_set_value(struct bt_ctf_field *field, const char *string);
149 static __maybe_unused int
150 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
151 const char *name, const char *string)
152 {
153 struct bt_ctf_field_type *type = cw->data.string;
154 struct bt_ctf_field *field;
155 int ret = 0;
156
157 field = bt_ctf_field_create(type);
158 if (!field) {
159 pr_err("failed to create a field %s\n", name);
160 return -1;
161 }
162
163 ret = string_set_value(field, string);
164 if (ret) {
165 pr_err("failed to set value %s\n", name);
166 goto err_put_field;
167 }
168
169 ret = bt_ctf_event_set_payload(event, name, field);
170 if (ret)
171 pr_err("failed to set payload %s\n", name);
172
173 err_put_field:
174 bt_ctf_field_put(field);
175 return ret;
176 }
177
178 static struct bt_ctf_field_type*
179 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
180 {
181 unsigned long flags = field->flags;
182
183 if (flags & FIELD_IS_STRING)
184 return cw->data.string;
185
186 if (!(flags & FIELD_IS_SIGNED)) {
187 /* unsigned long are mostly pointers */
188 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
189 return cw->data.u64_hex;
190 }
191
192 if (flags & FIELD_IS_SIGNED) {
193 if (field->size == 8)
194 return cw->data.s64;
195 else
196 return cw->data.s32;
197 }
198
199 if (field->size == 8)
200 return cw->data.u64;
201 else
202 return cw->data.u32;
203 }
204
205 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
206 {
207 unsigned long long value_mask;
208
209 /*
210 * value_mask = (1 << (size * 8 - 1)) - 1.
211 * Directly set value_mask for code readers.
212 */
213 switch (size) {
214 case 1:
215 value_mask = 0x7fULL;
216 break;
217 case 2:
218 value_mask = 0x7fffULL;
219 break;
220 case 4:
221 value_mask = 0x7fffffffULL;
222 break;
223 case 8:
224 /*
225 * For 64 bit value, return it self. There is no need
226 * to fill high bit.
227 */
228 /* Fall through */
229 default:
230 /* BUG! */
231 return value_int;
232 }
233
234 /* If it is a positive value, don't adjust. */
235 if ((value_int & (~0ULL - value_mask)) == 0)
236 return value_int;
237
238 /* Fill upper part of value_int with 1 to make it a negative long long. */
239 return (value_int & value_mask) | ~value_mask;
240 }
241
242 static int string_set_value(struct bt_ctf_field *field, const char *string)
243 {
244 char *buffer = NULL;
245 size_t len = strlen(string), i, p;
246 int err;
247
248 for (i = p = 0; i < len; i++, p++) {
249 if (isprint(string[i])) {
250 if (!buffer)
251 continue;
252 buffer[p] = string[i];
253 } else {
254 char numstr[5];
255
256 snprintf(numstr, sizeof(numstr), "\\x%02x",
257 (unsigned int)(string[i]) & 0xff);
258
259 if (!buffer) {
260 buffer = zalloc(i + (len - i) * 4 + 2);
261 if (!buffer) {
262 pr_err("failed to set unprintable string '%s'\n", string);
263 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
264 }
265 if (i > 0)
266 strncpy(buffer, string, i);
267 }
268 strncat(buffer + p, numstr, 4);
269 p += 3;
270 }
271 }
272
273 if (!buffer)
274 return bt_ctf_field_string_set_value(field, string);
275 err = bt_ctf_field_string_set_value(field, buffer);
276 free(buffer);
277 return err;
278 }
279
280 static int add_tracepoint_field_value(struct ctf_writer *cw,
281 struct bt_ctf_event_class *event_class,
282 struct bt_ctf_event *event,
283 struct perf_sample *sample,
284 struct format_field *fmtf)
285 {
286 struct bt_ctf_field_type *type;
287 struct bt_ctf_field *array_field;
288 struct bt_ctf_field *field;
289 const char *name = fmtf->name;
290 void *data = sample->raw_data;
291 unsigned long flags = fmtf->flags;
292 unsigned int n_items;
293 unsigned int i;
294 unsigned int offset;
295 unsigned int len;
296 int ret;
297
298 name = fmtf->alias;
299 offset = fmtf->offset;
300 len = fmtf->size;
301 if (flags & FIELD_IS_STRING)
302 flags &= ~FIELD_IS_ARRAY;
303
304 if (flags & FIELD_IS_DYNAMIC) {
305 unsigned long long tmp_val;
306
307 tmp_val = pevent_read_number(fmtf->event->pevent,
308 data + offset, len);
309 offset = tmp_val;
310 len = offset >> 16;
311 offset &= 0xffff;
312 }
313
314 if (flags & FIELD_IS_ARRAY) {
315
316 type = bt_ctf_event_class_get_field_by_name(
317 event_class, name);
318 array_field = bt_ctf_field_create(type);
319 bt_ctf_field_type_put(type);
320 if (!array_field) {
321 pr_err("Failed to create array type %s\n", name);
322 return -1;
323 }
324
325 len = fmtf->size / fmtf->arraylen;
326 n_items = fmtf->arraylen;
327 } else {
328 n_items = 1;
329 array_field = NULL;
330 }
331
332 type = get_tracepoint_field_type(cw, fmtf);
333
334 for (i = 0; i < n_items; i++) {
335 if (flags & FIELD_IS_ARRAY)
336 field = bt_ctf_field_array_get_field(array_field, i);
337 else
338 field = bt_ctf_field_create(type);
339
340 if (!field) {
341 pr_err("failed to create a field %s\n", name);
342 return -1;
343 }
344
345 if (flags & FIELD_IS_STRING)
346 ret = string_set_value(field, data + offset + i * len);
347 else {
348 unsigned long long value_int;
349
350 value_int = pevent_read_number(
351 fmtf->event->pevent,
352 data + offset + i * len, len);
353
354 if (!(flags & FIELD_IS_SIGNED))
355 ret = bt_ctf_field_unsigned_integer_set_value(
356 field, value_int);
357 else
358 ret = bt_ctf_field_signed_integer_set_value(
359 field, adjust_signedness(value_int, len));
360 }
361
362 if (ret) {
363 pr_err("failed to set file value %s\n", name);
364 goto err_put_field;
365 }
366 if (!(flags & FIELD_IS_ARRAY)) {
367 ret = bt_ctf_event_set_payload(event, name, field);
368 if (ret) {
369 pr_err("failed to set payload %s\n", name);
370 goto err_put_field;
371 }
372 }
373 bt_ctf_field_put(field);
374 }
375 if (flags & FIELD_IS_ARRAY) {
376 ret = bt_ctf_event_set_payload(event, name, array_field);
377 if (ret) {
378 pr_err("Failed add payload array %s\n", name);
379 return -1;
380 }
381 bt_ctf_field_put(array_field);
382 }
383 return 0;
384
385 err_put_field:
386 bt_ctf_field_put(field);
387 return -1;
388 }
389
390 static int add_tracepoint_fields_values(struct ctf_writer *cw,
391 struct bt_ctf_event_class *event_class,
392 struct bt_ctf_event *event,
393 struct format_field *fields,
394 struct perf_sample *sample)
395 {
396 struct format_field *field;
397 int ret;
398
399 for (field = fields; field; field = field->next) {
400 ret = add_tracepoint_field_value(cw, event_class, event, sample,
401 field);
402 if (ret)
403 return -1;
404 }
405 return 0;
406 }
407
408 static int add_tracepoint_values(struct ctf_writer *cw,
409 struct bt_ctf_event_class *event_class,
410 struct bt_ctf_event *event,
411 struct perf_evsel *evsel,
412 struct perf_sample *sample)
413 {
414 struct format_field *common_fields = evsel->tp_format->format.common_fields;
415 struct format_field *fields = evsel->tp_format->format.fields;
416 int ret;
417
418 ret = add_tracepoint_fields_values(cw, event_class, event,
419 common_fields, sample);
420 if (!ret)
421 ret = add_tracepoint_fields_values(cw, event_class, event,
422 fields, sample);
423
424 return ret;
425 }
426
427 static int
428 add_bpf_output_values(struct bt_ctf_event_class *event_class,
429 struct bt_ctf_event *event,
430 struct perf_sample *sample)
431 {
432 struct bt_ctf_field_type *len_type, *seq_type;
433 struct bt_ctf_field *len_field, *seq_field;
434 unsigned int raw_size = sample->raw_size;
435 unsigned int nr_elements = raw_size / sizeof(u32);
436 unsigned int i;
437 int ret;
438
439 if (nr_elements * sizeof(u32) != raw_size)
440 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
441 raw_size, nr_elements * sizeof(u32) - raw_size);
442
443 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
444 len_field = bt_ctf_field_create(len_type);
445 if (!len_field) {
446 pr_err("failed to create 'raw_len' for bpf output event\n");
447 ret = -1;
448 goto put_len_type;
449 }
450
451 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
452 if (ret) {
453 pr_err("failed to set field value for raw_len\n");
454 goto put_len_field;
455 }
456 ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
457 if (ret) {
458 pr_err("failed to set payload to raw_len\n");
459 goto put_len_field;
460 }
461
462 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
463 seq_field = bt_ctf_field_create(seq_type);
464 if (!seq_field) {
465 pr_err("failed to create 'raw_data' for bpf output event\n");
466 ret = -1;
467 goto put_seq_type;
468 }
469
470 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
471 if (ret) {
472 pr_err("failed to set length of 'raw_data'\n");
473 goto put_seq_field;
474 }
475
476 for (i = 0; i < nr_elements; i++) {
477 struct bt_ctf_field *elem_field =
478 bt_ctf_field_sequence_get_field(seq_field, i);
479
480 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
481 ((u32 *)(sample->raw_data))[i]);
482
483 bt_ctf_field_put(elem_field);
484 if (ret) {
485 pr_err("failed to set raw_data[%d]\n", i);
486 goto put_seq_field;
487 }
488 }
489
490 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
491 if (ret)
492 pr_err("failed to set payload for raw_data\n");
493
494 put_seq_field:
495 bt_ctf_field_put(seq_field);
496 put_seq_type:
497 bt_ctf_field_type_put(seq_type);
498 put_len_field:
499 bt_ctf_field_put(len_field);
500 put_len_type:
501 bt_ctf_field_type_put(len_type);
502 return ret;
503 }
504
505 static int add_generic_values(struct ctf_writer *cw,
506 struct bt_ctf_event *event,
507 struct perf_evsel *evsel,
508 struct perf_sample *sample)
509 {
510 u64 type = evsel->attr.sample_type;
511 int ret;
512
513 /*
514 * missing:
515 * PERF_SAMPLE_TIME - not needed as we have it in
516 * ctf event header
517 * PERF_SAMPLE_READ - TODO
518 * PERF_SAMPLE_CALLCHAIN - TODO
519 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
520 * PERF_SAMPLE_BRANCH_STACK - TODO
521 * PERF_SAMPLE_REGS_USER - TODO
522 * PERF_SAMPLE_STACK_USER - TODO
523 */
524
525 if (type & PERF_SAMPLE_IP) {
526 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
527 if (ret)
528 return -1;
529 }
530
531 if (type & PERF_SAMPLE_TID) {
532 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
533 if (ret)
534 return -1;
535
536 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
537 if (ret)
538 return -1;
539 }
540
541 if ((type & PERF_SAMPLE_ID) ||
542 (type & PERF_SAMPLE_IDENTIFIER)) {
543 ret = value_set_u64(cw, event, "perf_id", sample->id);
544 if (ret)
545 return -1;
546 }
547
548 if (type & PERF_SAMPLE_STREAM_ID) {
549 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
550 if (ret)
551 return -1;
552 }
553
554 if (type & PERF_SAMPLE_PERIOD) {
555 ret = value_set_u64(cw, event, "perf_period", sample->period);
556 if (ret)
557 return -1;
558 }
559
560 if (type & PERF_SAMPLE_WEIGHT) {
561 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
562 if (ret)
563 return -1;
564 }
565
566 if (type & PERF_SAMPLE_DATA_SRC) {
567 ret = value_set_u64(cw, event, "perf_data_src",
568 sample->data_src);
569 if (ret)
570 return -1;
571 }
572
573 if (type & PERF_SAMPLE_TRANSACTION) {
574 ret = value_set_u64(cw, event, "perf_transaction",
575 sample->transaction);
576 if (ret)
577 return -1;
578 }
579
580 return 0;
581 }
582
583 static int ctf_stream__flush(struct ctf_stream *cs)
584 {
585 int err = 0;
586
587 if (cs) {
588 err = bt_ctf_stream_flush(cs->stream);
589 if (err)
590 pr_err("CTF stream %d flush failed\n", cs->cpu);
591
592 pr("Flush stream for cpu %d (%u samples)\n",
593 cs->cpu, cs->count);
594
595 cs->count = 0;
596 }
597
598 return err;
599 }
600
601 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
602 {
603 struct ctf_stream *cs;
604 struct bt_ctf_field *pkt_ctx = NULL;
605 struct bt_ctf_field *cpu_field = NULL;
606 struct bt_ctf_stream *stream = NULL;
607 int ret;
608
609 cs = zalloc(sizeof(*cs));
610 if (!cs) {
611 pr_err("Failed to allocate ctf stream\n");
612 return NULL;
613 }
614
615 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
616 if (!stream) {
617 pr_err("Failed to create CTF stream\n");
618 goto out;
619 }
620
621 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
622 if (!pkt_ctx) {
623 pr_err("Failed to obtain packet context\n");
624 goto out;
625 }
626
627 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
628 bt_ctf_field_put(pkt_ctx);
629 if (!cpu_field) {
630 pr_err("Failed to obtain cpu field\n");
631 goto out;
632 }
633
634 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
635 if (ret) {
636 pr_err("Failed to update CPU number\n");
637 goto out;
638 }
639
640 bt_ctf_field_put(cpu_field);
641
642 cs->cpu = cpu;
643 cs->stream = stream;
644 return cs;
645
646 out:
647 if (cpu_field)
648 bt_ctf_field_put(cpu_field);
649 if (stream)
650 bt_ctf_stream_put(stream);
651
652 free(cs);
653 return NULL;
654 }
655
656 static void ctf_stream__delete(struct ctf_stream *cs)
657 {
658 if (cs) {
659 bt_ctf_stream_put(cs->stream);
660 free(cs);
661 }
662 }
663
664 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
665 {
666 struct ctf_stream *cs = cw->stream[cpu];
667
668 if (!cs) {
669 cs = ctf_stream__create(cw, cpu);
670 cw->stream[cpu] = cs;
671 }
672
673 return cs;
674 }
675
676 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
677 struct perf_evsel *evsel)
678 {
679 int cpu = 0;
680
681 if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
682 cpu = sample->cpu;
683
684 if (cpu > cw->stream_cnt) {
685 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
686 cpu, cw->stream_cnt);
687 cpu = 0;
688 }
689
690 return cpu;
691 }
692
693 #define STREAM_FLUSH_COUNT 100000
694
695 /*
696 * Currently we have no other way to determine the
697 * time for the stream flush other than keep track
698 * of the number of events and check it against
699 * threshold.
700 */
701 static bool is_flush_needed(struct ctf_stream *cs)
702 {
703 return cs->count >= STREAM_FLUSH_COUNT;
704 }
705
706 static int process_sample_event(struct perf_tool *tool,
707 union perf_event *_event,
708 struct perf_sample *sample,
709 struct perf_evsel *evsel,
710 struct machine *machine __maybe_unused)
711 {
712 struct convert *c = container_of(tool, struct convert, tool);
713 struct evsel_priv *priv = evsel->priv;
714 struct ctf_writer *cw = &c->writer;
715 struct ctf_stream *cs;
716 struct bt_ctf_event_class *event_class;
717 struct bt_ctf_event *event;
718 int ret;
719
720 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
721 return 0;
722
723 event_class = priv->event_class;
724
725 /* update stats */
726 c->events_count++;
727 c->events_size += _event->header.size;
728
729 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
730
731 event = bt_ctf_event_create(event_class);
732 if (!event) {
733 pr_err("Failed to create an CTF event\n");
734 return -1;
735 }
736
737 bt_ctf_clock_set_time(cw->clock, sample->time);
738
739 ret = add_generic_values(cw, event, evsel, sample);
740 if (ret)
741 return -1;
742
743 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
744 ret = add_tracepoint_values(cw, event_class, event,
745 evsel, sample);
746 if (ret)
747 return -1;
748 }
749
750 if (perf_evsel__is_bpf_output(evsel)) {
751 ret = add_bpf_output_values(event_class, event, sample);
752 if (ret)
753 return -1;
754 }
755
756 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
757 if (cs) {
758 if (is_flush_needed(cs))
759 ctf_stream__flush(cs);
760
761 cs->count++;
762 bt_ctf_stream_append_event(cs->stream, event);
763 }
764
765 bt_ctf_event_put(event);
766 return cs ? 0 : -1;
767 }
768
769 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
770 do { \
771 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
772 if (ret) \
773 return -1; \
774 } while(0)
775
776 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
777 static int process_##_name##_event(struct perf_tool *tool, \
778 union perf_event *_event, \
779 struct perf_sample *sample, \
780 struct machine *machine) \
781 { \
782 struct convert *c = container_of(tool, struct convert, tool);\
783 struct ctf_writer *cw = &c->writer; \
784 struct bt_ctf_event_class *event_class = cw->_name##_class;\
785 struct bt_ctf_event *event; \
786 struct ctf_stream *cs; \
787 int ret; \
788 \
789 c->non_sample_count++; \
790 c->events_size += _event->header.size; \
791 event = bt_ctf_event_create(event_class); \
792 if (!event) { \
793 pr_err("Failed to create an CTF event\n"); \
794 return -1; \
795 } \
796 \
797 bt_ctf_clock_set_time(cw->clock, sample->time); \
798 body \
799 cs = ctf_stream(cw, 0); \
800 if (cs) { \
801 if (is_flush_needed(cs)) \
802 ctf_stream__flush(cs); \
803 \
804 cs->count++; \
805 bt_ctf_stream_append_event(cs->stream, event); \
806 } \
807 bt_ctf_event_put(event); \
808 \
809 return perf_event__process_##_name(tool, _event, sample, machine);\
810 }
811
812 __FUNC_PROCESS_NON_SAMPLE(comm,
813 __NON_SAMPLE_SET_FIELD(comm, u32, pid);
814 __NON_SAMPLE_SET_FIELD(comm, u32, tid);
815 __NON_SAMPLE_SET_FIELD(comm, string, comm);
816 )
817 __FUNC_PROCESS_NON_SAMPLE(fork,
818 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
819 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
820 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
821 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
822 __NON_SAMPLE_SET_FIELD(fork, u64, time);
823 )
824
825 __FUNC_PROCESS_NON_SAMPLE(exit,
826 __NON_SAMPLE_SET_FIELD(fork, u32, pid);
827 __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
828 __NON_SAMPLE_SET_FIELD(fork, u32, tid);
829 __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
830 __NON_SAMPLE_SET_FIELD(fork, u64, time);
831 )
832 #undef __NON_SAMPLE_SET_FIELD
833 #undef __FUNC_PROCESS_NON_SAMPLE
834
835 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
836 static char *change_name(char *name, char *orig_name, int dup)
837 {
838 char *new_name = NULL;
839 size_t len;
840
841 if (!name)
842 name = orig_name;
843
844 if (dup >= 10)
845 goto out;
846 /*
847 * Add '_' prefix to potential keywork. According to
848 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
849 * futher CTF spec updating may require us to use '$'.
850 */
851 if (dup < 0)
852 len = strlen(name) + sizeof("_");
853 else
854 len = strlen(orig_name) + sizeof("_dupl_X");
855
856 new_name = malloc(len);
857 if (!new_name)
858 goto out;
859
860 if (dup < 0)
861 snprintf(new_name, len, "_%s", name);
862 else
863 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
864
865 out:
866 if (name != orig_name)
867 free(name);
868 return new_name;
869 }
870
871 static int event_class_add_field(struct bt_ctf_event_class *event_class,
872 struct bt_ctf_field_type *type,
873 struct format_field *field)
874 {
875 struct bt_ctf_field_type *t = NULL;
876 char *name;
877 int dup = 1;
878 int ret;
879
880 /* alias was already assigned */
881 if (field->alias != field->name)
882 return bt_ctf_event_class_add_field(event_class, type,
883 (char *)field->alias);
884
885 name = field->name;
886
887 /* If 'name' is a keywork, add prefix. */
888 if (bt_ctf_validate_identifier(name))
889 name = change_name(name, field->name, -1);
890
891 if (!name) {
892 pr_err("Failed to fix invalid identifier.");
893 return -1;
894 }
895 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
896 bt_ctf_field_type_put(t);
897 name = change_name(name, field->name, dup++);
898 if (!name) {
899 pr_err("Failed to create dup name for '%s'\n", field->name);
900 return -1;
901 }
902 }
903
904 ret = bt_ctf_event_class_add_field(event_class, type, name);
905 if (!ret)
906 field->alias = name;
907
908 return ret;
909 }
910
911 static int add_tracepoint_fields_types(struct ctf_writer *cw,
912 struct format_field *fields,
913 struct bt_ctf_event_class *event_class)
914 {
915 struct format_field *field;
916 int ret;
917
918 for (field = fields; field; field = field->next) {
919 struct bt_ctf_field_type *type;
920 unsigned long flags = field->flags;
921
922 pr2(" field '%s'\n", field->name);
923
924 type = get_tracepoint_field_type(cw, field);
925 if (!type)
926 return -1;
927
928 /*
929 * A string is an array of chars. For this we use the string
930 * type and don't care that it is an array. What we don't
931 * support is an array of strings.
932 */
933 if (flags & FIELD_IS_STRING)
934 flags &= ~FIELD_IS_ARRAY;
935
936 if (flags & FIELD_IS_ARRAY)
937 type = bt_ctf_field_type_array_create(type, field->arraylen);
938
939 ret = event_class_add_field(event_class, type, field);
940
941 if (flags & FIELD_IS_ARRAY)
942 bt_ctf_field_type_put(type);
943
944 if (ret) {
945 pr_err("Failed to add field '%s': %d\n",
946 field->name, ret);
947 return -1;
948 }
949 }
950
951 return 0;
952 }
953
954 static int add_tracepoint_types(struct ctf_writer *cw,
955 struct perf_evsel *evsel,
956 struct bt_ctf_event_class *class)
957 {
958 struct format_field *common_fields = evsel->tp_format->format.common_fields;
959 struct format_field *fields = evsel->tp_format->format.fields;
960 int ret;
961
962 ret = add_tracepoint_fields_types(cw, common_fields, class);
963 if (!ret)
964 ret = add_tracepoint_fields_types(cw, fields, class);
965
966 return ret;
967 }
968
969 static int add_bpf_output_types(struct ctf_writer *cw,
970 struct bt_ctf_event_class *class)
971 {
972 struct bt_ctf_field_type *len_type = cw->data.u32;
973 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
974 struct bt_ctf_field_type *seq_type;
975 int ret;
976
977 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
978 if (ret)
979 return ret;
980
981 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
982 if (!seq_type)
983 return -1;
984
985 return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
986 }
987
988 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
989 struct bt_ctf_event_class *event_class)
990 {
991 u64 type = evsel->attr.sample_type;
992
993 /*
994 * missing:
995 * PERF_SAMPLE_TIME - not needed as we have it in
996 * ctf event header
997 * PERF_SAMPLE_READ - TODO
998 * PERF_SAMPLE_CALLCHAIN - TODO
999 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
1000 * are handled separately
1001 * PERF_SAMPLE_BRANCH_STACK - TODO
1002 * PERF_SAMPLE_REGS_USER - TODO
1003 * PERF_SAMPLE_STACK_USER - TODO
1004 */
1005
1006 #define ADD_FIELD(cl, t, n) \
1007 do { \
1008 pr2(" field '%s'\n", n); \
1009 if (bt_ctf_event_class_add_field(cl, t, n)) { \
1010 pr_err("Failed to add field '%s';\n", n); \
1011 return -1; \
1012 } \
1013 } while (0)
1014
1015 if (type & PERF_SAMPLE_IP)
1016 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1017
1018 if (type & PERF_SAMPLE_TID) {
1019 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1020 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1021 }
1022
1023 if ((type & PERF_SAMPLE_ID) ||
1024 (type & PERF_SAMPLE_IDENTIFIER))
1025 ADD_FIELD(event_class, cw->data.u64, "perf_id");
1026
1027 if (type & PERF_SAMPLE_STREAM_ID)
1028 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1029
1030 if (type & PERF_SAMPLE_PERIOD)
1031 ADD_FIELD(event_class, cw->data.u64, "perf_period");
1032
1033 if (type & PERF_SAMPLE_WEIGHT)
1034 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1035
1036 if (type & PERF_SAMPLE_DATA_SRC)
1037 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1038
1039 if (type & PERF_SAMPLE_TRANSACTION)
1040 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1041
1042 #undef ADD_FIELD
1043 return 0;
1044 }
1045
1046 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
1047 {
1048 struct bt_ctf_event_class *event_class;
1049 struct evsel_priv *priv;
1050 const char *name = perf_evsel__name(evsel);
1051 int ret;
1052
1053 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
1054
1055 event_class = bt_ctf_event_class_create(name);
1056 if (!event_class)
1057 return -1;
1058
1059 ret = add_generic_types(cw, evsel, event_class);
1060 if (ret)
1061 goto err;
1062
1063 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
1064 ret = add_tracepoint_types(cw, evsel, event_class);
1065 if (ret)
1066 goto err;
1067 }
1068
1069 if (perf_evsel__is_bpf_output(evsel)) {
1070 ret = add_bpf_output_types(cw, event_class);
1071 if (ret)
1072 goto err;
1073 }
1074
1075 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1076 if (ret) {
1077 pr("Failed to add event class into stream.\n");
1078 goto err;
1079 }
1080
1081 priv = malloc(sizeof(*priv));
1082 if (!priv)
1083 goto err;
1084
1085 priv->event_class = event_class;
1086 evsel->priv = priv;
1087 return 0;
1088
1089 err:
1090 bt_ctf_event_class_put(event_class);
1091 pr_err("Failed to add event '%s'.\n", name);
1092 return -1;
1093 }
1094
1095 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1096 {
1097 struct perf_evlist *evlist = session->evlist;
1098 struct perf_evsel *evsel;
1099 int ret;
1100
1101 evlist__for_each_entry(evlist, evsel) {
1102 ret = add_event(cw, evsel);
1103 if (ret)
1104 return ret;
1105 }
1106 return 0;
1107 }
1108
1109 #define __NON_SAMPLE_ADD_FIELD(t, n) \
1110 do { \
1111 pr2(" field '%s'\n", #n); \
1112 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1113 pr_err("Failed to add field '%s';\n", #n);\
1114 return -1; \
1115 } \
1116 } while(0)
1117
1118 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
1119 static int add_##_name##_event(struct ctf_writer *cw) \
1120 { \
1121 struct bt_ctf_event_class *event_class; \
1122 int ret; \
1123 \
1124 pr("Adding "#_name" event\n"); \
1125 event_class = bt_ctf_event_class_create("perf_" #_name);\
1126 if (!event_class) \
1127 return -1; \
1128 body \
1129 \
1130 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1131 if (ret) { \
1132 pr("Failed to add event class '"#_name"' into stream.\n");\
1133 return ret; \
1134 } \
1135 \
1136 cw->_name##_class = event_class; \
1137 bt_ctf_event_class_put(event_class); \
1138 return 0; \
1139 }
1140
1141 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1142 __NON_SAMPLE_ADD_FIELD(u32, pid);
1143 __NON_SAMPLE_ADD_FIELD(u32, tid);
1144 __NON_SAMPLE_ADD_FIELD(string, comm);
1145 )
1146
1147 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1148 __NON_SAMPLE_ADD_FIELD(u32, pid);
1149 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1150 __NON_SAMPLE_ADD_FIELD(u32, tid);
1151 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1152 __NON_SAMPLE_ADD_FIELD(u64, time);
1153 )
1154
1155 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1156 __NON_SAMPLE_ADD_FIELD(u32, pid);
1157 __NON_SAMPLE_ADD_FIELD(u32, ppid);
1158 __NON_SAMPLE_ADD_FIELD(u32, tid);
1159 __NON_SAMPLE_ADD_FIELD(u32, ptid);
1160 __NON_SAMPLE_ADD_FIELD(u64, time);
1161 )
1162
1163 #undef __NON_SAMPLE_ADD_FIELD
1164 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1165
1166 static int setup_non_sample_events(struct ctf_writer *cw,
1167 struct perf_session *session __maybe_unused)
1168 {
1169 int ret;
1170
1171 ret = add_comm_event(cw);
1172 if (ret)
1173 return ret;
1174 ret = add_exit_event(cw);
1175 if (ret)
1176 return ret;
1177 ret = add_fork_event(cw);
1178 if (ret)
1179 return ret;
1180 return 0;
1181 }
1182
1183 static void cleanup_events(struct perf_session *session)
1184 {
1185 struct perf_evlist *evlist = session->evlist;
1186 struct perf_evsel *evsel;
1187
1188 evlist__for_each_entry(evlist, evsel) {
1189 struct evsel_priv *priv;
1190
1191 priv = evsel->priv;
1192 bt_ctf_event_class_put(priv->event_class);
1193 zfree(&evsel->priv);
1194 }
1195
1196 perf_evlist__delete(evlist);
1197 session->evlist = NULL;
1198 }
1199
1200 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1201 {
1202 struct ctf_stream **stream;
1203 struct perf_header *ph = &session->header;
1204 int ncpus;
1205
1206 /*
1207 * Try to get the number of cpus used in the data file,
1208 * if not present fallback to the MAX_CPUS.
1209 */
1210 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1211
1212 stream = zalloc(sizeof(*stream) * ncpus);
1213 if (!stream) {
1214 pr_err("Failed to allocate streams.\n");
1215 return -ENOMEM;
1216 }
1217
1218 cw->stream = stream;
1219 cw->stream_cnt = ncpus;
1220 return 0;
1221 }
1222
1223 static void free_streams(struct ctf_writer *cw)
1224 {
1225 int cpu;
1226
1227 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1228 ctf_stream__delete(cw->stream[cpu]);
1229
1230 free(cw->stream);
1231 }
1232
1233 static int ctf_writer__setup_env(struct ctf_writer *cw,
1234 struct perf_session *session)
1235 {
1236 struct perf_header *header = &session->header;
1237 struct bt_ctf_writer *writer = cw->writer;
1238
1239 #define ADD(__n, __v) \
1240 do { \
1241 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1242 return -1; \
1243 } while (0)
1244
1245 ADD("host", header->env.hostname);
1246 ADD("sysname", "Linux");
1247 ADD("release", header->env.os_release);
1248 ADD("version", header->env.version);
1249 ADD("machine", header->env.arch);
1250 ADD("domain", "kernel");
1251 ADD("tracer_name", "perf");
1252
1253 #undef ADD
1254 return 0;
1255 }
1256
1257 static int ctf_writer__setup_clock(struct ctf_writer *cw)
1258 {
1259 struct bt_ctf_clock *clock = cw->clock;
1260
1261 bt_ctf_clock_set_description(clock, "perf clock");
1262
1263 #define SET(__n, __v) \
1264 do { \
1265 if (bt_ctf_clock_set_##__n(clock, __v)) \
1266 return -1; \
1267 } while (0)
1268
1269 SET(frequency, 1000000000);
1270 SET(offset_s, 0);
1271 SET(offset, 0);
1272 SET(precision, 10);
1273 SET(is_absolute, 0);
1274
1275 #undef SET
1276 return 0;
1277 }
1278
1279 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1280 {
1281 struct bt_ctf_field_type *type;
1282
1283 type = bt_ctf_field_type_integer_create(size);
1284 if (!type)
1285 return NULL;
1286
1287 if (sign &&
1288 bt_ctf_field_type_integer_set_signed(type, 1))
1289 goto err;
1290
1291 if (hex &&
1292 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1293 goto err;
1294
1295 #if __BYTE_ORDER == __BIG_ENDIAN
1296 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1297 #else
1298 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1299 #endif
1300
1301 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1302 size, sign ? "un" : "", hex ? "hex" : "");
1303 return type;
1304
1305 err:
1306 bt_ctf_field_type_put(type);
1307 return NULL;
1308 }
1309
1310 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1311 {
1312 unsigned int i;
1313
1314 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1315 bt_ctf_field_type_put(cw->data.array[i]);
1316 }
1317
1318 static int ctf_writer__init_data(struct ctf_writer *cw)
1319 {
1320 #define CREATE_INT_TYPE(type, size, sign, hex) \
1321 do { \
1322 (type) = create_int_type(size, sign, hex); \
1323 if (!(type)) \
1324 goto err; \
1325 } while (0)
1326
1327 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1328 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1329 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1330 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1331 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1332 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1333
1334 cw->data.string = bt_ctf_field_type_string_create();
1335 if (cw->data.string)
1336 return 0;
1337
1338 err:
1339 ctf_writer__cleanup_data(cw);
1340 pr_err("Failed to create data types.\n");
1341 return -1;
1342 }
1343
1344 static void ctf_writer__cleanup(struct ctf_writer *cw)
1345 {
1346 ctf_writer__cleanup_data(cw);
1347
1348 bt_ctf_clock_put(cw->clock);
1349 free_streams(cw);
1350 bt_ctf_stream_class_put(cw->stream_class);
1351 bt_ctf_writer_put(cw->writer);
1352
1353 /* and NULL all the pointers */
1354 memset(cw, 0, sizeof(*cw));
1355 }
1356
1357 static int ctf_writer__init(struct ctf_writer *cw, const char *path)
1358 {
1359 struct bt_ctf_writer *writer;
1360 struct bt_ctf_stream_class *stream_class;
1361 struct bt_ctf_clock *clock;
1362 struct bt_ctf_field_type *pkt_ctx_type;
1363 int ret;
1364
1365 /* CTF writer */
1366 writer = bt_ctf_writer_create(path);
1367 if (!writer)
1368 goto err;
1369
1370 cw->writer = writer;
1371
1372 /* CTF clock */
1373 clock = bt_ctf_clock_create("perf_clock");
1374 if (!clock) {
1375 pr("Failed to create CTF clock.\n");
1376 goto err_cleanup;
1377 }
1378
1379 cw->clock = clock;
1380
1381 if (ctf_writer__setup_clock(cw)) {
1382 pr("Failed to setup CTF clock.\n");
1383 goto err_cleanup;
1384 }
1385
1386 /* CTF stream class */
1387 stream_class = bt_ctf_stream_class_create("perf_stream");
1388 if (!stream_class) {
1389 pr("Failed to create CTF stream class.\n");
1390 goto err_cleanup;
1391 }
1392
1393 cw->stream_class = stream_class;
1394
1395 /* CTF clock stream setup */
1396 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1397 pr("Failed to assign CTF clock to stream class.\n");
1398 goto err_cleanup;
1399 }
1400
1401 if (ctf_writer__init_data(cw))
1402 goto err_cleanup;
1403
1404 /* Add cpu_id for packet context */
1405 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1406 if (!pkt_ctx_type)
1407 goto err_cleanup;
1408
1409 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1410 bt_ctf_field_type_put(pkt_ctx_type);
1411 if (ret)
1412 goto err_cleanup;
1413
1414 /* CTF clock writer setup */
1415 if (bt_ctf_writer_add_clock(writer, clock)) {
1416 pr("Failed to assign CTF clock to writer.\n");
1417 goto err_cleanup;
1418 }
1419
1420 return 0;
1421
1422 err_cleanup:
1423 ctf_writer__cleanup(cw);
1424 err:
1425 pr_err("Failed to setup CTF writer.\n");
1426 return -1;
1427 }
1428
1429 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1430 {
1431 int cpu, ret = 0;
1432
1433 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1434 ret = ctf_stream__flush(cw->stream[cpu]);
1435
1436 return ret;
1437 }
1438
1439 static int convert__config(const char *var, const char *value, void *cb)
1440 {
1441 struct convert *c = cb;
1442
1443 if (!strcmp(var, "convert.queue-size")) {
1444 c->queue_size = perf_config_u64(var, value);
1445 return 0;
1446 }
1447
1448 return 0;
1449 }
1450
1451 int bt_convert__perf2ctf(const char *input, const char *path,
1452 struct perf_data_convert_opts *opts)
1453 {
1454 struct perf_session *session;
1455 struct perf_data_file file = {
1456 .path = input,
1457 .mode = PERF_DATA_MODE_READ,
1458 .force = opts->force,
1459 };
1460 struct convert c = {
1461 .tool = {
1462 .sample = process_sample_event,
1463 .mmap = perf_event__process_mmap,
1464 .mmap2 = perf_event__process_mmap2,
1465 .comm = perf_event__process_comm,
1466 .exit = perf_event__process_exit,
1467 .fork = perf_event__process_fork,
1468 .lost = perf_event__process_lost,
1469 .tracing_data = perf_event__process_tracing_data,
1470 .build_id = perf_event__process_build_id,
1471 .ordered_events = true,
1472 .ordering_requires_timestamps = true,
1473 },
1474 };
1475 struct ctf_writer *cw = &c.writer;
1476 int err = -1;
1477
1478 if (opts->all) {
1479 c.tool.comm = process_comm_event;
1480 c.tool.exit = process_exit_event;
1481 c.tool.fork = process_fork_event;
1482 }
1483
1484 perf_config(convert__config, &c);
1485
1486 /* CTF writer */
1487 if (ctf_writer__init(cw, path))
1488 return -1;
1489
1490 /* perf.data session */
1491 session = perf_session__new(&file, 0, &c.tool);
1492 if (!session)
1493 goto free_writer;
1494
1495 if (c.queue_size) {
1496 ordered_events__set_alloc_size(&session->ordered_events,
1497 c.queue_size);
1498 }
1499
1500 /* CTF writer env/clock setup */
1501 if (ctf_writer__setup_env(cw, session))
1502 goto free_session;
1503
1504 /* CTF events setup */
1505 if (setup_events(cw, session))
1506 goto free_session;
1507
1508 if (opts->all && setup_non_sample_events(cw, session))
1509 goto free_session;
1510
1511 if (setup_streams(cw, session))
1512 goto free_session;
1513
1514 err = perf_session__process_events(session);
1515 if (!err)
1516 err = ctf_writer__flush_streams(cw);
1517 else
1518 pr_err("Error during conversion.\n");
1519
1520 fprintf(stderr,
1521 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1522 file.path, path);
1523
1524 fprintf(stderr,
1525 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1526 (double) c.events_size / 1024.0 / 1024.0,
1527 c.events_count);
1528
1529 if (!c.non_sample_count)
1530 fprintf(stderr, ") ]\n");
1531 else
1532 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1533
1534 cleanup_events(session);
1535 perf_session__delete(session);
1536 ctf_writer__cleanup(cw);
1537
1538 return err;
1539
1540 free_session:
1541 perf_session__delete(session);
1542 free_writer:
1543 ctf_writer__cleanup(cw);
1544 pr_err("Error during conversion setup.\n");
1545 return err;
1546 }