]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - tools/perf/util/session.c
perf evlist: Remove needless util.h from evlist.h
[mirror_ubuntu-jammy-kernel.git] / tools / perf / util / session.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a43783ae 2#include <errno.h>
fd20e811 3#include <inttypes.h>
57fc032a 4#include <linux/err.h>
94c744b6 5#include <linux/kernel.h>
7f7c536f 6#include <linux/zalloc.h>
4e319027 7#include <traceevent/event-parse.h>
05a1f47e 8#include <api/fs/fs.h>
94c744b6 9
ba21594c 10#include <byteswap.h>
94c744b6
ACM
11#include <unistd.h>
12#include <sys/types.h>
a41794cd 13#include <sys/mman.h>
9c3516d1 14#include <perf/cpumap.h>
94c744b6 15
e248de33
ACM
16#include "evlist.h"
17#include "evsel.h"
98521b38 18#include "memswap.h"
1101f69a 19#include "map.h"
daecf9e0 20#include "symbol.h"
94c744b6 21#include "session.h"
45694aa7 22#include "tool.h"
a328626b 23#include "sort.h"
5d67be97 24#include "cpumap.h"
0f6a3015 25#include "perf_regs.h"
b0a45203 26#include "asm/bug.h"
c446870d 27#include "auxtrace.h"
e7ff8920 28#include "thread.h"
a5499b37 29#include "thread-stack.h"
93115d32 30#include "sample-raw.h"
2d2aea6a 31#include "stat.h"
2da39f1c 32#include "util.h"
ec1891af 33#include "arch/common.h"
94c744b6 34
cb62c6f1
AB
35#ifdef HAVE_ZSTD_SUPPORT
36static int perf_session__process_compressed_event(struct perf_session *session,
37 union perf_event *event, u64 file_offset)
38{
39 void *src;
40 size_t decomp_size, src_size;
41 u64 decomp_last_rem = 0;
872c8ee8 42 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
cb62c6f1
AB
43 struct decomp *decomp, *decomp_last = session->decomp_last;
44
872c8ee8
AB
45 if (decomp_last) {
46 decomp_last_rem = decomp_last->size - decomp_last->head;
47 decomp_len += decomp_last_rem;
48 }
49
50 mmap_len = sizeof(struct decomp) + decomp_len;
51 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
cb62c6f1
AB
52 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
53 if (decomp == MAP_FAILED) {
54 pr_err("Couldn't allocate memory for decompression\n");
55 return -1;
56 }
57
58 decomp->file_pos = file_offset;
872c8ee8 59 decomp->mmap_len = mmap_len;
cb62c6f1
AB
60 decomp->head = 0;
61
872c8ee8 62 if (decomp_last_rem) {
cb62c6f1
AB
63 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
64 decomp->size = decomp_last_rem;
65 }
66
67 src = (void *)event + sizeof(struct compressed_event);
68 src_size = event->pack.header.size - sizeof(struct compressed_event);
69
70 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
71 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
72 if (!decomp_size) {
872c8ee8 73 munmap(decomp, mmap_len);
cb62c6f1
AB
74 pr_err("Couldn't decompress data\n");
75 return -1;
76 }
77
78 decomp->size += decomp_size;
79
80 if (session->decomp == NULL) {
81 session->decomp = decomp;
82 session->decomp_last = decomp;
83 } else {
84 session->decomp_last->next = decomp;
85 session->decomp_last = decomp;
86 }
87
88 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
89
90 return 0;
91}
92#else /* !HAVE_ZSTD_SUPPORT */
93#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
94#endif
95
c446870d
AH
96static int perf_session__deliver_event(struct perf_session *session,
97 union perf_event *event,
c446870d
AH
98 struct perf_tool *tool,
99 u64 file_offset);
d10eb1eb 100
316c7136 101static int perf_session__open(struct perf_session *session)
94c744b6 102{
8ceb41d7 103 struct perf_data *data = session->data;
8dc58101 104
316c7136 105 if (perf_session__read_header(session) < 0) {
e87b4911 106 pr_err("incompatible file format (rerun with -v to learn more)\n");
6a4d98d7 107 return -1;
94c744b6
ACM
108 }
109
8ceb41d7 110 if (perf_data__is_pipe(data))
cc9784bd
JO
111 return 0;
112
3ba78bd0
JO
113 if (perf_header__has_feat(&session->header, HEADER_STAT))
114 return 0;
115
316c7136 116 if (!perf_evlist__valid_sample_type(session->evlist)) {
e87b4911 117 pr_err("non matching sample_type\n");
6a4d98d7 118 return -1;
c2a70653
ACM
119 }
120
316c7136 121 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
e87b4911 122 pr_err("non matching sample_id_all\n");
6a4d98d7 123 return -1;
c2a70653
ACM
124 }
125
316c7136 126 if (!perf_evlist__valid_read_format(session->evlist)) {
e87b4911 127 pr_err("non matching read_format\n");
6a4d98d7 128 return -1;
9ede473c
JO
129 }
130
94c744b6 131 return 0;
94c744b6
ACM
132}
133
7b56cce2 134void perf_session__set_id_hdr_size(struct perf_session *session)
9c90a61c 135{
7b56cce2
ACM
136 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
137
7b56cce2 138 machines__set_id_hdr_size(&session->machines, id_hdr_size);
9c90a61c
ACM
139}
140
316c7136 141int perf_session__create_kernel_maps(struct perf_session *session)
a1645ce1 142{
316c7136 143 int ret = machine__create_kernel_maps(&session->machines.host);
a1645ce1 144
a1645ce1 145 if (ret >= 0)
316c7136 146 ret = machines__create_guest_kernel_maps(&session->machines);
a1645ce1
ZY
147 return ret;
148}
149
316c7136 150static void perf_session__destroy_kernel_maps(struct perf_session *session)
076c6e45 151{
316c7136 152 machines__destroy_kernel_maps(&session->machines);
076c6e45
ACM
153}
154
cfe1c414
AH
155static bool perf_session__has_comm_exec(struct perf_session *session)
156{
32dcd021 157 struct evsel *evsel;
cfe1c414 158
e5cadb93 159 evlist__for_each_entry(session->evlist, evsel) {
1fc632ce 160 if (evsel->core.attr.comm_exec)
cfe1c414
AH
161 return true;
162 }
163
164 return false;
165}
166
167static void perf_session__set_comm_exec(struct perf_session *session)
168{
169 bool comm_exec = perf_session__has_comm_exec(session);
170
171 machines__set_comm_exec(&session->machines, comm_exec);
172}
173
d10eb1eb 174static int ordered_events__deliver_event(struct ordered_events *oe,
9870d780 175 struct ordered_event *event)
d10eb1eb 176{
9870d780
ACM
177 struct perf_session *session = container_of(oe, struct perf_session,
178 ordered_events);
9870d780 179
93d10af2 180 return perf_session__deliver_event(session, event->event,
c446870d 181 session->tool, event->file_offset);
d10eb1eb
ACM
182}
183
8ceb41d7 184struct perf_session *perf_session__new(struct perf_data *data,
f5fc1412 185 bool repipe, struct perf_tool *tool)
94c744b6 186{
316c7136 187 struct perf_session *session = zalloc(sizeof(*session));
efad1415 188
316c7136 189 if (!session)
94c744b6
ACM
190 goto out;
191
316c7136 192 session->repipe = repipe;
9870d780 193 session->tool = tool;
99fa2984 194 INIT_LIST_HEAD(&session->auxtrace_index);
316c7136 195 machines__init(&session->machines);
a4a6668a
JO
196 ordered_events__init(&session->ordered_events,
197 ordered_events__deliver_event, NULL);
94c744b6 198
e4378f0c 199 perf_env__init(&session->header.env);
8ceb41d7
JO
200 if (data) {
201 if (perf_data__open(data))
64abebf7 202 goto out_delete;
6a4d98d7 203
8ceb41d7 204 session->data = data;
6a4d98d7 205
8ceb41d7 206 if (perf_data__is_read(data)) {
316c7136 207 if (perf_session__open(session) < 0)
befa09b6 208 goto out_delete;
6a4d98d7 209
0973ad97
DCC
210 /*
211 * set session attributes that are present in perf.data
212 * but not in pipe-mode.
213 */
8ceb41d7 214 if (!data->is_pipe) {
0973ad97
DCC
215 perf_session__set_id_hdr_size(session);
216 perf_session__set_comm_exec(session);
217 }
93115d32
TR
218
219 perf_evlist__init_trace_event_sample_raw(session->evlist);
ec65def1
JO
220
221 /* Open the directory data. */
222 if (data->is_dir && perf_data__open_dir(data))
223 goto out_delete;
6a4d98d7 224 }
4cde998d
ACM
225 } else {
226 session->machines.host.env = &perf_env;
6a4d98d7
JO
227 }
228
ec1891af
AH
229 session->machines.host.single_address_space =
230 perf_env__single_address_space(session->machines.host.env);
231
8ceb41d7 232 if (!data || perf_data__is_write(data)) {
64abebf7
ACM
233 /*
234 * In O_RDONLY mode this will be performed when reading the
8115d60c 235 * kernel MMAP event, in perf_event__process_mmap().
64abebf7 236 */
316c7136 237 if (perf_session__create_kernel_maps(session) < 0)
a5c2a4c9 238 pr_warning("Cannot read kernel map\n");
64abebf7 239 }
d549c769 240
0973ad97
DCC
241 /*
242 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
243 * processed, so perf_evlist__sample_id_all is not meaningful here.
244 */
8ceb41d7 245 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
0a8cb85c 246 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
21ef97f0 247 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
0a8cb85c 248 tool->ordered_events = false;
d10eb1eb 249 }
21ef97f0 250
316c7136 251 return session;
6a4d98d7 252
6a4d98d7 253 out_delete:
316c7136 254 perf_session__delete(session);
6a4d98d7 255 out:
4aa65636 256 return NULL;
94c744b6
ACM
257}
258
b424eba2
ACM
259static void perf_session__delete_threads(struct perf_session *session)
260{
876650e6 261 machine__delete_threads(&session->machines.host);
b424eba2
ACM
262}
263
cb62c6f1
AB
264static void perf_session__release_decomp_events(struct perf_session *session)
265{
266 struct decomp *next, *decomp;
872c8ee8 267 size_t mmap_len;
cb62c6f1 268 next = session->decomp;
cb62c6f1
AB
269 do {
270 decomp = next;
271 if (decomp == NULL)
272 break;
273 next = decomp->next;
872c8ee8
AB
274 mmap_len = decomp->mmap_len;
275 munmap(decomp, mmap_len);
cb62c6f1
AB
276 } while (1);
277}
278
316c7136 279void perf_session__delete(struct perf_session *session)
94c744b6 280{
e1446551
ACM
281 if (session == NULL)
282 return;
c446870d 283 auxtrace__free(session);
99fa2984 284 auxtrace_index__free(&session->auxtrace_index);
316c7136 285 perf_session__destroy_kernel_maps(session);
316c7136 286 perf_session__delete_threads(session);
cb62c6f1 287 perf_session__release_decomp_events(session);
f0ce888c 288 perf_env__exit(&session->header.env);
316c7136 289 machines__exit(&session->machines);
8ceb41d7
JO
290 if (session->data)
291 perf_data__close(session->data);
316c7136 292 free(session);
94c744b6 293}
a328626b 294
89f1688a 295static int process_event_synth_tracing_data_stub(struct perf_session *session
47c3d109
AH
296 __maybe_unused,
297 union perf_event *event
89f1688a 298 __maybe_unused)
d20deb64
ACM
299{
300 dump_printf(": unhandled!\n");
301 return 0;
302}
303
47c3d109
AH
304static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
305 union perf_event *event __maybe_unused,
63503dba 306 struct evlist **pevlist
1d037ca1 307 __maybe_unused)
10d0f086
ACM
308{
309 dump_printf(": unhandled!\n");
310 return 0;
311}
312
ffe77725
JO
313static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
314 union perf_event *event __maybe_unused,
63503dba 315 struct evlist **pevlist
ffe77725
JO
316 __maybe_unused)
317{
2d2aea6a
JO
318 if (dump_trace)
319 perf_event__fprintf_event_update(event, stdout);
320
ffe77725
JO
321 dump_printf(": unhandled!\n");
322 return 0;
323}
324
1d037ca1
IT
325static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
326 union perf_event *event __maybe_unused,
327 struct perf_sample *sample __maybe_unused,
32dcd021 328 struct evsel *evsel __maybe_unused,
1d037ca1 329 struct machine *machine __maybe_unused)
9e69c210
ACM
330{
331 dump_printf(": unhandled!\n");
332 return 0;
333}
334
1d037ca1
IT
335static int process_event_stub(struct perf_tool *tool __maybe_unused,
336 union perf_event *event __maybe_unused,
337 struct perf_sample *sample __maybe_unused,
338 struct machine *machine __maybe_unused)
06aae590
ACM
339{
340 dump_printf(": unhandled!\n");
341 return 0;
342}
343
1d037ca1
IT
344static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
345 union perf_event *event __maybe_unused,
d704ebda 346 struct ordered_events *oe __maybe_unused)
743eb868
ACM
347{
348 dump_printf(": unhandled!\n");
349 return 0;
350}
351
45694aa7 352static int process_finished_round(struct perf_tool *tool,
d20deb64 353 union perf_event *event,
d704ebda 354 struct ordered_events *oe);
d6b17beb 355
a16ac023
AH
356static int skipn(int fd, off_t n)
357{
358 char buf[4096];
359 ssize_t ret;
360
361 while (n > 0) {
362 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
363 if (ret <= 0)
364 return ret;
365 n -= ret;
366 }
367
368 return 0;
369}
370
7336555a
JO
371static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
372 union perf_event *event)
a16ac023
AH
373{
374 dump_printf(": unhandled!\n");
8ceb41d7
JO
375 if (perf_data__is_pipe(session->data))
376 skipn(perf_data__fd(session->data), event->auxtrace.size);
a16ac023
AH
377 return event->auxtrace.size;
378}
379
89f1688a
JO
380static int process_event_op2_stub(struct perf_session *session __maybe_unused,
381 union perf_event *event __maybe_unused)
e9bf54d2
AH
382{
383 dump_printf(": unhandled!\n");
384 return 0;
385}
386
5f3339d2
JO
387
388static
89f1688a
JO
389int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
390 union perf_event *event __maybe_unused)
5f3339d2 391{
2d2aea6a
JO
392 if (dump_trace)
393 perf_event__fprintf_thread_map(event, stdout);
394
5f3339d2
JO
395 dump_printf(": unhandled!\n");
396 return 0;
397}
398
6640b6c2 399static
89f1688a
JO
400int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
401 union perf_event *event __maybe_unused)
6640b6c2 402{
2d2aea6a
JO
403 if (dump_trace)
404 perf_event__fprintf_cpu_map(event, stdout);
405
6640b6c2
JO
406 dump_printf(": unhandled!\n");
407 return 0;
408}
409
374fb9e3 410static
89f1688a
JO
411int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
412 union perf_event *event __maybe_unused)
374fb9e3 413{
2d2aea6a
JO
414 if (dump_trace)
415 perf_event__fprintf_stat_config(event, stdout);
416
374fb9e3
JO
417 dump_printf(": unhandled!\n");
418 return 0;
419}
420
89f1688a
JO
421static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
422 union perf_event *event)
d80518c9 423{
2d2aea6a
JO
424 if (dump_trace)
425 perf_event__fprintf_stat(event, stdout);
426
d80518c9
JO
427 dump_printf(": unhandled!\n");
428 return 0;
429}
430
89f1688a
JO
431static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
432 union perf_event *event)
2d8f0f18 433{
2d2aea6a
JO
434 if (dump_trace)
435 perf_event__fprintf_stat_round(event, stdout);
436
2d8f0f18
JO
437 dump_printf(": unhandled!\n");
438 return 0;
439}
440
61a7773c
AB
441static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
442 union perf_event *event __maybe_unused,
443 u64 file_offset __maybe_unused)
444{
445 dump_printf(": unhandled!\n");
446 return 0;
447}
448
9c501402 449void perf_tool__fill_defaults(struct perf_tool *tool)
06aae590 450{
45694aa7
ACM
451 if (tool->sample == NULL)
452 tool->sample = process_event_sample_stub;
453 if (tool->mmap == NULL)
454 tool->mmap = process_event_stub;
6adb0b0a
DA
455 if (tool->mmap2 == NULL)
456 tool->mmap2 = process_event_stub;
45694aa7
ACM
457 if (tool->comm == NULL)
458 tool->comm = process_event_stub;
7f0cd236
NK
459 if (tool->namespaces == NULL)
460 tool->namespaces = process_event_stub;
45694aa7
ACM
461 if (tool->fork == NULL)
462 tool->fork = process_event_stub;
463 if (tool->exit == NULL)
464 tool->exit = process_event_stub;
465 if (tool->lost == NULL)
466 tool->lost = perf_event__process_lost;
c4937a91
KL
467 if (tool->lost_samples == NULL)
468 tool->lost_samples = perf_event__process_lost_samples;
4a96f7a0
AH
469 if (tool->aux == NULL)
470 tool->aux = perf_event__process_aux;
0ad21f68
AH
471 if (tool->itrace_start == NULL)
472 tool->itrace_start = perf_event__process_itrace_start;
0286039f
AH
473 if (tool->context_switch == NULL)
474 tool->context_switch = perf_event__process_switch;
9aa0bfa3
SL
475 if (tool->ksymbol == NULL)
476 tool->ksymbol = perf_event__process_ksymbol;
3f604b5f
ACM
477 if (tool->bpf == NULL)
478 tool->bpf = perf_event__process_bpf;
45694aa7
ACM
479 if (tool->read == NULL)
480 tool->read = process_event_sample_stub;
481 if (tool->throttle == NULL)
482 tool->throttle = process_event_stub;
483 if (tool->unthrottle == NULL)
484 tool->unthrottle = process_event_stub;
485 if (tool->attr == NULL)
486 tool->attr = process_event_synth_attr_stub;
ffe77725
JO
487 if (tool->event_update == NULL)
488 tool->event_update = process_event_synth_event_update_stub;
45694aa7
ACM
489 if (tool->tracing_data == NULL)
490 tool->tracing_data = process_event_synth_tracing_data_stub;
491 if (tool->build_id == NULL)
5fb0ac16 492 tool->build_id = process_event_op2_stub;
45694aa7 493 if (tool->finished_round == NULL) {
0a8cb85c 494 if (tool->ordered_events)
45694aa7 495 tool->finished_round = process_finished_round;
d6b17beb 496 else
45694aa7 497 tool->finished_round = process_finished_round_stub;
d6b17beb 498 }
3c659eed 499 if (tool->id_index == NULL)
5fb0ac16 500 tool->id_index = process_event_op2_stub;
a16ac023 501 if (tool->auxtrace_info == NULL)
5fb0ac16 502 tool->auxtrace_info = process_event_op2_stub;
a16ac023
AH
503 if (tool->auxtrace == NULL)
504 tool->auxtrace = process_event_auxtrace_stub;
e9bf54d2 505 if (tool->auxtrace_error == NULL)
5fb0ac16 506 tool->auxtrace_error = process_event_op2_stub;
5f3339d2
JO
507 if (tool->thread_map == NULL)
508 tool->thread_map = process_event_thread_map_stub;
6640b6c2
JO
509 if (tool->cpu_map == NULL)
510 tool->cpu_map = process_event_cpu_map_stub;
374fb9e3
JO
511 if (tool->stat_config == NULL)
512 tool->stat_config = process_event_stat_config_stub;
d80518c9
JO
513 if (tool->stat == NULL)
514 tool->stat = process_stat_stub;
2d8f0f18
JO
515 if (tool->stat_round == NULL)
516 tool->stat_round = process_stat_round_stub;
46bc29b9
AH
517 if (tool->time_conv == NULL)
518 tool->time_conv = process_event_op2_stub;
e9def1b2
DCC
519 if (tool->feature == NULL)
520 tool->feature = process_event_op2_stub;
61a7773c 521 if (tool->compressed == NULL)
cb62c6f1 522 tool->compressed = perf_session__process_compressed_event;
06aae590 523}
48000a1a 524
268fb20f
JO
525static void swap_sample_id_all(union perf_event *event, void *data)
526{
527 void *end = (void *) event + event->header.size;
528 int size = end - data;
529
530 BUG_ON(size % sizeof(u64));
531 mem_bswap_64(data, size);
532}
533
534static void perf_event__all64_swap(union perf_event *event,
1d037ca1 535 bool sample_id_all __maybe_unused)
ba21594c 536{
8115d60c
ACM
537 struct perf_event_header *hdr = &event->header;
538 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
ba21594c
ACM
539}
540
268fb20f 541static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
ba21594c 542{
8115d60c
ACM
543 event->comm.pid = bswap_32(event->comm.pid);
544 event->comm.tid = bswap_32(event->comm.tid);
268fb20f
JO
545
546 if (sample_id_all) {
547 void *data = &event->comm.comm;
548
9ac3e487 549 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
268fb20f
JO
550 swap_sample_id_all(event, data);
551 }
ba21594c
ACM
552}
553
268fb20f
JO
554static void perf_event__mmap_swap(union perf_event *event,
555 bool sample_id_all)
ba21594c 556{
8115d60c
ACM
557 event->mmap.pid = bswap_32(event->mmap.pid);
558 event->mmap.tid = bswap_32(event->mmap.tid);
559 event->mmap.start = bswap_64(event->mmap.start);
560 event->mmap.len = bswap_64(event->mmap.len);
561 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
268fb20f
JO
562
563 if (sample_id_all) {
564 void *data = &event->mmap.filename;
565
9ac3e487 566 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
268fb20f
JO
567 swap_sample_id_all(event, data);
568 }
ba21594c
ACM
569}
570
5c5e854b
SE
571static void perf_event__mmap2_swap(union perf_event *event,
572 bool sample_id_all)
573{
574 event->mmap2.pid = bswap_32(event->mmap2.pid);
575 event->mmap2.tid = bswap_32(event->mmap2.tid);
576 event->mmap2.start = bswap_64(event->mmap2.start);
577 event->mmap2.len = bswap_64(event->mmap2.len);
578 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
579 event->mmap2.maj = bswap_32(event->mmap2.maj);
580 event->mmap2.min = bswap_32(event->mmap2.min);
581 event->mmap2.ino = bswap_64(event->mmap2.ino);
582
583 if (sample_id_all) {
584 void *data = &event->mmap2.filename;
585
586 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
587 swap_sample_id_all(event, data);
588 }
589}
268fb20f 590static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
ba21594c 591{
8115d60c
ACM
592 event->fork.pid = bswap_32(event->fork.pid);
593 event->fork.tid = bswap_32(event->fork.tid);
594 event->fork.ppid = bswap_32(event->fork.ppid);
595 event->fork.ptid = bswap_32(event->fork.ptid);
596 event->fork.time = bswap_64(event->fork.time);
268fb20f
JO
597
598 if (sample_id_all)
599 swap_sample_id_all(event, &event->fork + 1);
ba21594c
ACM
600}
601
268fb20f 602static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
ba21594c 603{
8115d60c
ACM
604 event->read.pid = bswap_32(event->read.pid);
605 event->read.tid = bswap_32(event->read.tid);
606 event->read.value = bswap_64(event->read.value);
607 event->read.time_enabled = bswap_64(event->read.time_enabled);
608 event->read.time_running = bswap_64(event->read.time_running);
609 event->read.id = bswap_64(event->read.id);
268fb20f
JO
610
611 if (sample_id_all)
612 swap_sample_id_all(event, &event->read + 1);
ba21594c
ACM
613}
614
4a96f7a0
AH
615static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
616{
617 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
618 event->aux.aux_size = bswap_64(event->aux.aux_size);
619 event->aux.flags = bswap_64(event->aux.flags);
620
621 if (sample_id_all)
622 swap_sample_id_all(event, &event->aux + 1);
623}
624
0ad21f68
AH
625static void perf_event__itrace_start_swap(union perf_event *event,
626 bool sample_id_all)
627{
628 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
629 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
630
631 if (sample_id_all)
632 swap_sample_id_all(event, &event->itrace_start + 1);
633}
634
0286039f
AH
635static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
636{
637 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
638 event->context_switch.next_prev_pid =
639 bswap_32(event->context_switch.next_prev_pid);
640 event->context_switch.next_prev_tid =
641 bswap_32(event->context_switch.next_prev_tid);
642 }
643
644 if (sample_id_all)
645 swap_sample_id_all(event, &event->context_switch + 1);
646}
647
dd96c46b
JO
648static void perf_event__throttle_swap(union perf_event *event,
649 bool sample_id_all)
650{
651 event->throttle.time = bswap_64(event->throttle.time);
652 event->throttle.id = bswap_64(event->throttle.id);
653 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
654
655 if (sample_id_all)
656 swap_sample_id_all(event, &event->throttle + 1);
657}
658
acd244b8
NK
659static void perf_event__namespaces_swap(union perf_event *event,
660 bool sample_id_all)
661{
662 u64 i;
663
664 event->namespaces.pid = bswap_32(event->namespaces.pid);
665 event->namespaces.tid = bswap_32(event->namespaces.tid);
666 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
667
668 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
669 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
670
671 ns->dev = bswap_64(ns->dev);
672 ns->ino = bswap_64(ns->ino);
673 }
674
675 if (sample_id_all)
676 swap_sample_id_all(event, &event->namespaces.link_info[i]);
677}
678
e108c66e
JO
679static u8 revbyte(u8 b)
680{
681 int rev = (b >> 4) | ((b & 0xf) << 4);
682 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
683 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
684 return (u8) rev;
685}
686
687/*
688 * XXX this is hack in attempt to carry flags bitfield
bd1a0be5 689 * through endian village. ABI says:
e108c66e
JO
690 *
691 * Bit-fields are allocated from right to left (least to most significant)
692 * on little-endian implementations and from left to right (most to least
693 * significant) on big-endian implementations.
694 *
695 * The above seems to be byte specific, so we need to reverse each
696 * byte of the bitfield. 'Internet' also says this might be implementation
697 * specific and we probably need proper fix and carry perf_event_attr
698 * bitfield flags in separate data file FEAT_ section. Thought this seems
699 * to work for now.
700 */
701static void swap_bitfield(u8 *p, unsigned len)
702{
703 unsigned i;
704
705 for (i = 0; i < len; i++) {
706 *p = revbyte(*p);
707 p++;
708 }
709}
710
eda3913b
DA
711/* exported for swapping attributes in file header */
712void perf_event__attr_swap(struct perf_event_attr *attr)
713{
714 attr->type = bswap_32(attr->type);
715 attr->size = bswap_32(attr->size);
b30b6172
WN
716
717#define bswap_safe(f, n) \
718 (attr->size > (offsetof(struct perf_event_attr, f) + \
719 sizeof(attr->f) * (n)))
720#define bswap_field(f, sz) \
721do { \
722 if (bswap_safe(f, 0)) \
723 attr->f = bswap_##sz(attr->f); \
724} while(0)
792d48b4 725#define bswap_field_16(f) bswap_field(f, 16)
b30b6172
WN
726#define bswap_field_32(f) bswap_field(f, 32)
727#define bswap_field_64(f) bswap_field(f, 64)
728
729 bswap_field_64(config);
730 bswap_field_64(sample_period);
731 bswap_field_64(sample_type);
732 bswap_field_64(read_format);
733 bswap_field_32(wakeup_events);
734 bswap_field_32(bp_type);
735 bswap_field_64(bp_addr);
736 bswap_field_64(bp_len);
737 bswap_field_64(branch_sample_type);
738 bswap_field_64(sample_regs_user);
739 bswap_field_32(sample_stack_user);
740 bswap_field_32(aux_watermark);
792d48b4 741 bswap_field_16(sample_max_stack);
b30b6172
WN
742
743 /*
744 * After read_format are bitfields. Check read_format because
745 * we are unable to use offsetof on bitfield.
746 */
747 if (bswap_safe(read_format, 1))
748 swap_bitfield((u8 *) (&attr->read_format + 1),
749 sizeof(u64));
750#undef bswap_field_64
751#undef bswap_field_32
752#undef bswap_field
753#undef bswap_safe
eda3913b
DA
754}
755
268fb20f 756static void perf_event__hdr_attr_swap(union perf_event *event,
1d037ca1 757 bool sample_id_all __maybe_unused)
2c46dbb5
TZ
758{
759 size_t size;
760
eda3913b 761 perf_event__attr_swap(&event->attr.attr);
2c46dbb5 762
8115d60c
ACM
763 size = event->header.size;
764 size -= (void *)&event->attr.id - (void *)event;
765 mem_bswap_64(event->attr.id, size);
2c46dbb5
TZ
766}
767
ffe77725
JO
768static void perf_event__event_update_swap(union perf_event *event,
769 bool sample_id_all __maybe_unused)
770{
771 event->event_update.type = bswap_64(event->event_update.type);
772 event->event_update.id = bswap_64(event->event_update.id);
773}
774
268fb20f 775static void perf_event__event_type_swap(union perf_event *event,
1d037ca1 776 bool sample_id_all __maybe_unused)
cd19a035 777{
8115d60c
ACM
778 event->event_type.event_type.event_id =
779 bswap_64(event->event_type.event_type.event_id);
cd19a035
TZ
780}
781
268fb20f 782static void perf_event__tracing_data_swap(union perf_event *event,
1d037ca1 783 bool sample_id_all __maybe_unused)
9215545e 784{
8115d60c 785 event->tracing_data.size = bswap_32(event->tracing_data.size);
9215545e
TZ
786}
787
a16ac023
AH
788static void perf_event__auxtrace_info_swap(union perf_event *event,
789 bool sample_id_all __maybe_unused)
790{
791 size_t size;
792
793 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
794
795 size = event->header.size;
796 size -= (void *)&event->auxtrace_info.priv - (void *)event;
797 mem_bswap_64(event->auxtrace_info.priv, size);
798}
799
800static void perf_event__auxtrace_swap(union perf_event *event,
801 bool sample_id_all __maybe_unused)
802{
803 event->auxtrace.size = bswap_64(event->auxtrace.size);
804 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
805 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
806 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
807 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
808 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
809}
810
e9bf54d2
AH
811static void perf_event__auxtrace_error_swap(union perf_event *event,
812 bool sample_id_all __maybe_unused)
813{
814 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
815 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
816 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
817 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
818 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
16bd4321 819 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
e9bf54d2 820 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
16bd4321
AH
821 if (event->auxtrace_error.fmt)
822 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
e9bf54d2
AH
823}
824
5f3339d2
JO
825static void perf_event__thread_map_swap(union perf_event *event,
826 bool sample_id_all __maybe_unused)
827{
828 unsigned i;
829
830 event->thread_map.nr = bswap_64(event->thread_map.nr);
831
832 for (i = 0; i < event->thread_map.nr; i++)
833 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
834}
835
6640b6c2
JO
836static void perf_event__cpu_map_swap(union perf_event *event,
837 bool sample_id_all __maybe_unused)
838{
839 struct cpu_map_data *data = &event->cpu_map.data;
840 struct cpu_map_entries *cpus;
841 struct cpu_map_mask *mask;
842 unsigned i;
843
844 data->type = bswap_64(data->type);
845
846 switch (data->type) {
847 case PERF_CPU_MAP__CPUS:
848 cpus = (struct cpu_map_entries *)data->data;
849
850 cpus->nr = bswap_16(cpus->nr);
851
852 for (i = 0; i < cpus->nr; i++)
853 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
854 break;
855 case PERF_CPU_MAP__MASK:
856 mask = (struct cpu_map_mask *) data->data;
857
858 mask->nr = bswap_16(mask->nr);
859 mask->long_size = bswap_16(mask->long_size);
860
861 switch (mask->long_size) {
862 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
863 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
864 default:
865 pr_err("cpu_map swap: unsupported long size\n");
866 }
867 default:
868 break;
869 }
870}
871
374fb9e3
JO
872static void perf_event__stat_config_swap(union perf_event *event,
873 bool sample_id_all __maybe_unused)
874{
875 u64 size;
876
877 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
878 size += 1; /* nr item itself */
879 mem_bswap_64(&event->stat_config.nr, size);
880}
881
d80518c9
JO
882static void perf_event__stat_swap(union perf_event *event,
883 bool sample_id_all __maybe_unused)
884{
885 event->stat.id = bswap_64(event->stat.id);
886 event->stat.thread = bswap_32(event->stat.thread);
887 event->stat.cpu = bswap_32(event->stat.cpu);
888 event->stat.val = bswap_64(event->stat.val);
889 event->stat.ena = bswap_64(event->stat.ena);
890 event->stat.run = bswap_64(event->stat.run);
891}
892
2d8f0f18
JO
893static void perf_event__stat_round_swap(union perf_event *event,
894 bool sample_id_all __maybe_unused)
895{
896 event->stat_round.type = bswap_64(event->stat_round.type);
897 event->stat_round.time = bswap_64(event->stat_round.time);
898}
899
268fb20f
JO
900typedef void (*perf_event__swap_op)(union perf_event *event,
901 bool sample_id_all);
ba21594c 902
8115d60c
ACM
903static perf_event__swap_op perf_event__swap_ops[] = {
904 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
5c5e854b 905 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
8115d60c
ACM
906 [PERF_RECORD_COMM] = perf_event__comm_swap,
907 [PERF_RECORD_FORK] = perf_event__task_swap,
908 [PERF_RECORD_EXIT] = perf_event__task_swap,
909 [PERF_RECORD_LOST] = perf_event__all64_swap,
910 [PERF_RECORD_READ] = perf_event__read_swap,
dd96c46b
JO
911 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
912 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
8115d60c 913 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
4a96f7a0 914 [PERF_RECORD_AUX] = perf_event__aux_swap,
0ad21f68 915 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
c4937a91 916 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
0286039f
AH
917 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
918 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
acd244b8 919 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
eda3913b 920 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
8115d60c
ACM
921 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
922 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
923 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
3c659eed 924 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
a16ac023
AH
925 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
926 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
e9bf54d2 927 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
5f3339d2 928 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
6640b6c2 929 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
374fb9e3 930 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
d80518c9 931 [PERF_RECORD_STAT] = perf_event__stat_swap,
2d8f0f18 932 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
ffe77725 933 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
46bc29b9 934 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
8115d60c 935 [PERF_RECORD_HEADER_MAX] = NULL,
ba21594c
ACM
936};
937
d6b17beb
FW
938/*
939 * When perf record finishes a pass on every buffers, it records this pseudo
940 * event.
941 * We record the max timestamp t found in the pass n.
942 * Assuming these timestamps are monotonic across cpus, we know that if
943 * a buffer still has events with timestamps below t, they will be all
944 * available and then read in the pass n + 1.
945 * Hence when we start to read the pass n + 2, we can safely flush every
946 * events with timestamps below t.
947 *
948 * ============ PASS n =================
949 * CPU 0 | CPU 1
950 * |
951 * cnt1 timestamps | cnt2 timestamps
952 * 1 | 2
953 * 2 | 3
954 * - | 4 <--- max recorded
955 *
956 * ============ PASS n + 1 ==============
957 * CPU 0 | CPU 1
958 * |
959 * cnt1 timestamps | cnt2 timestamps
960 * 3 | 5
961 * 4 | 6
962 * 5 | 7 <---- max recorded
963 *
964 * Flush every events below timestamp 4
965 *
966 * ============ PASS n + 2 ==============
967 * CPU 0 | CPU 1
968 * |
969 * cnt1 timestamps | cnt2 timestamps
970 * 6 | 8
971 * 7 | 9
972 * - | 10
973 *
974 * Flush every events below timestamp 7
975 * etc...
976 */
b7b61cbe 977static int process_finished_round(struct perf_tool *tool __maybe_unused,
1d037ca1 978 union perf_event *event __maybe_unused,
d704ebda 979 struct ordered_events *oe)
d6b17beb 980{
5531e162
AH
981 if (dump_trace)
982 fprintf(stdout, "\n");
b7b61cbe 983 return ordered_events__flush(oe, OE_FLUSH__ROUND);
d6b17beb
FW
984}
985
b7b61cbe 986int perf_session__queue_event(struct perf_session *s, union perf_event *event,
dc83e139 987 u64 timestamp, u64 file_offset)
c61e52ee 988{
dc83e139 989 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
640c03ce 990}
c61e52ee 991
384b6055 992static void callchain__lbr_callstack_printf(struct perf_sample *sample)
640c03ce 993{
384b6055
KL
994 struct ip_callchain *callchain = sample->callchain;
995 struct branch_stack *lbr_stack = sample->branch_stack;
996 u64 kernel_callchain_nr = callchain->nr;
640c03ce 997 unsigned int i;
c61e52ee 998
384b6055
KL
999 for (i = 0; i < kernel_callchain_nr; i++) {
1000 if (callchain->ips[i] == PERF_CONTEXT_USER)
1001 break;
1002 }
1003
1004 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1005 u64 total_nr;
1006 /*
1007 * LBR callstack can only get user call chain,
1008 * i is kernel call chain number,
1009 * 1 is PERF_CONTEXT_USER.
1010 *
1011 * The user call chain is stored in LBR registers.
1012 * LBR are pair registers. The caller is stored
1013 * in "from" register, while the callee is stored
1014 * in "to" register.
1015 * For example, there is a call stack
1016 * "A"->"B"->"C"->"D".
1017 * The LBR registers will recorde like
1018 * "C"->"D", "B"->"C", "A"->"B".
1019 * So only the first "to" register and all "from"
1020 * registers are needed to construct the whole stack.
1021 */
1022 total_nr = i + 1 + lbr_stack->nr + 1;
1023 kernel_callchain_nr = i + 1;
1024
1025 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1026
1027 for (i = 0; i < kernel_callchain_nr; i++)
1028 printf("..... %2d: %016" PRIx64 "\n",
1029 i, callchain->ips[i]);
1030
1031 printf("..... %2d: %016" PRIx64 "\n",
1032 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1033 for (i = 0; i < lbr_stack->nr; i++)
1034 printf("..... %2d: %016" PRIx64 "\n",
1035 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1036 }
1037}
1038
32dcd021 1039static void callchain__printf(struct evsel *evsel,
384b6055
KL
1040 struct perf_sample *sample)
1041{
1042 unsigned int i;
1043 struct ip_callchain *callchain = sample->callchain;
1044
acf2abbd 1045 if (perf_evsel__has_branch_callstack(evsel))
384b6055
KL
1046 callchain__lbr_callstack_printf(sample);
1047
1048 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
640c03ce 1049
384b6055 1050 for (i = 0; i < callchain->nr; i++)
9486aa38 1051 printf("..... %2d: %016" PRIx64 "\n",
384b6055 1052 i, callchain->ips[i]);
c61e52ee
FW
1053}
1054
d2720c3d 1055static void branch_stack__printf(struct perf_sample *sample, bool callstack)
b5387528
RAV
1056{
1057 uint64_t i;
1058
d2720c3d
AB
1059 printf("%s: nr:%" PRIu64 "\n",
1060 !callstack ? "... branch stack" : "... branch callstack",
1061 sample->branch_stack->nr);
b5387528 1062
0e332f03
AK
1063 for (i = 0; i < sample->branch_stack->nr; i++) {
1064 struct branch_entry *e = &sample->branch_stack->entries[i];
1065
d2720c3d
AB
1066 if (!callstack) {
1067 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1068 i, e->from, e->to,
1069 (unsigned short)e->flags.cycles,
1070 e->flags.mispred ? "M" : " ",
1071 e->flags.predicted ? "P" : " ",
1072 e->flags.abort ? "A" : " ",
1073 e->flags.in_tx ? "T" : " ",
1074 (unsigned)e->flags.reserved);
1075 } else {
1076 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1077 i, i > 0 ? e->from : e->to);
1078 }
0e332f03 1079 }
b5387528
RAV
1080}
1081
0f6a3015
JO
1082static void regs_dump__printf(u64 mask, u64 *regs)
1083{
1084 unsigned rid, i = 0;
1085
1086 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1087 u64 val = regs[i++];
1088
1089 printf(".... %-5s 0x%" PRIx64 "\n",
1090 perf_reg_name(rid), val);
1091 }
1092}
1093
6a21c0b5
SE
1094static const char *regs_abi[] = {
1095 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1096 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1097 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1098};
1099
1100static inline const char *regs_dump_abi(struct regs_dump *d)
1101{
1102 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1103 return "unknown";
1104
1105 return regs_abi[d->abi];
1106}
1107
1108static void regs__printf(const char *type, struct regs_dump *regs)
1109{
1110 u64 mask = regs->mask;
1111
1112 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1113 type,
1114 mask,
1115 regs_dump_abi(regs));
1116
1117 regs_dump__printf(mask, regs->regs);
1118}
1119
352ea45a 1120static void regs_user__printf(struct perf_sample *sample)
0f6a3015
JO
1121{
1122 struct regs_dump *user_regs = &sample->user_regs;
1123
6a21c0b5
SE
1124 if (user_regs->regs)
1125 regs__printf("user", user_regs);
1126}
1127
1128static void regs_intr__printf(struct perf_sample *sample)
1129{
1130 struct regs_dump *intr_regs = &sample->intr_regs;
1131
1132 if (intr_regs->regs)
1133 regs__printf("intr", intr_regs);
0f6a3015
JO
1134}
1135
1136static void stack_user__printf(struct stack_dump *dump)
1137{
1138 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1139 dump->size, dump->offset);
1140}
1141
63503dba 1142static void perf_evlist__print_tstamp(struct evlist *evlist,
8115d60c 1143 union perf_event *event,
8d50e5b4 1144 struct perf_sample *sample)
9c90a61c 1145{
9fa8727a 1146 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
7f3be652 1147
9c90a61c 1148 if (event->header.type != PERF_RECORD_SAMPLE &&
9fa8727a 1149 !perf_evlist__sample_id_all(evlist)) {
9c90a61c
ACM
1150 fputs("-1 -1 ", stdout);
1151 return;
1152 }
1153
7f3be652 1154 if ((sample_type & PERF_SAMPLE_CPU))
9c90a61c
ACM
1155 printf("%u ", sample->cpu);
1156
7f3be652 1157 if (sample_type & PERF_SAMPLE_TIME)
9486aa38 1158 printf("%" PRIu64 " ", sample->time);
9c90a61c
ACM
1159}
1160
9ede473c
JO
1161static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1162{
1163 printf("... sample_read:\n");
1164
1165 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1166 printf("...... time enabled %016" PRIx64 "\n",
1167 sample->read.time_enabled);
1168
1169 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1170 printf("...... time running %016" PRIx64 "\n",
1171 sample->read.time_running);
1172
1173 if (read_format & PERF_FORMAT_GROUP) {
1174 u64 i;
1175
1176 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1177
1178 for (i = 0; i < sample->read.group.nr; i++) {
1179 struct sample_read_value *value;
1180
1181 value = &sample->read.group.values[i];
1182 printf("..... id %016" PRIx64
1183 ", value %016" PRIx64 "\n",
1184 value->id, value->value);
1185 }
1186 } else
1187 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1188 sample->read.one.id, sample->read.one.value);
1189}
1190
63503dba 1191static void dump_event(struct evlist *evlist, union perf_event *event,
8d50e5b4 1192 u64 file_offset, struct perf_sample *sample)
9aefcab0
TG
1193{
1194 if (!dump_trace)
1195 return;
1196
9486aa38
ACM
1197 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1198 file_offset, event->header.size, event->header.type);
9aefcab0
TG
1199
1200 trace_event(event);
93115d32
TR
1201 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1202 evlist->trace_event_sample_raw(evlist, event, sample);
9aefcab0
TG
1203
1204 if (sample)
9fa8727a 1205 perf_evlist__print_tstamp(evlist, event, sample);
9aefcab0 1206
9486aa38 1207 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
8115d60c 1208 event->header.size, perf_event__name(event->header.type));
9aefcab0
TG
1209}
1210
32dcd021 1211static void dump_sample(struct evsel *evsel, union perf_event *event,
8d50e5b4 1212 struct perf_sample *sample)
9aefcab0 1213{
7f3be652
ACM
1214 u64 sample_type;
1215
ddbc24b7
ACM
1216 if (!dump_trace)
1217 return;
1218
0ea590ae 1219 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
9486aa38 1220 event->header.misc, sample->pid, sample->tid, sample->ip,
7cec0922 1221 sample->period, sample->addr);
9aefcab0 1222
1fc632ce 1223 sample_type = evsel->core.attr.sample_type;
7f3be652 1224
27de9b2b 1225 if (evsel__has_callchain(evsel))
384b6055 1226 callchain__printf(evsel, sample);
b5387528 1227
d2720c3d
AB
1228 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1229 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel));
0f6a3015
JO
1230
1231 if (sample_type & PERF_SAMPLE_REGS_USER)
352ea45a 1232 regs_user__printf(sample);
0f6a3015 1233
6a21c0b5
SE
1234 if (sample_type & PERF_SAMPLE_REGS_INTR)
1235 regs_intr__printf(sample);
1236
0f6a3015
JO
1237 if (sample_type & PERF_SAMPLE_STACK_USER)
1238 stack_user__printf(&sample->user_stack);
05484298
AK
1239
1240 if (sample_type & PERF_SAMPLE_WEIGHT)
1241 printf("... weight: %" PRIu64 "\n", sample->weight);
98a3b32c
SE
1242
1243 if (sample_type & PERF_SAMPLE_DATA_SRC)
1244 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
9ede473c 1245
8780fb25
KL
1246 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1247 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1248
475eeab9
AK
1249 if (sample_type & PERF_SAMPLE_TRANSACTION)
1250 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1251
9ede473c 1252 if (sample_type & PERF_SAMPLE_READ)
1fc632ce 1253 sample_read__printf(sample, evsel->core.attr.read_format);
9aefcab0
TG
1254}
1255
32dcd021 1256static void dump_read(struct evsel *evsel, union perf_event *event)
dac7f6b7 1257{
69d81f09 1258 struct perf_record_read *read_event = &event->read;
dac7f6b7
JO
1259 u64 read_format;
1260
1261 if (!dump_trace)
1262 return;
1263
213a6c1d 1264 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
fc50e0ba 1265 perf_evsel__name(evsel),
dac7f6b7
JO
1266 event->read.value);
1267
f3c8d907
LY
1268 if (!evsel)
1269 return;
1270
1fc632ce 1271 read_format = evsel->core.attr.read_format;
dac7f6b7
JO
1272
1273 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
213a6c1d 1274 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
dac7f6b7
JO
1275
1276 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
213a6c1d 1277 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
dac7f6b7
JO
1278
1279 if (read_format & PERF_FORMAT_ID)
213a6c1d 1280 printf("... id : %" PRI_lu64 "\n", read_event->id);
dac7f6b7
JO
1281}
1282
54245fdc 1283static struct machine *machines__find_for_cpumode(struct machines *machines,
ef89325f
AH
1284 union perf_event *event,
1285 struct perf_sample *sample)
743eb868 1286{
ad85ace0 1287 struct machine *machine;
743eb868 1288
7c0f4a41 1289 if (perf_guest &&
473398a2
ACM
1290 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1291 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
7fb0a5ee
ND
1292 u32 pid;
1293
5c5e854b
SE
1294 if (event->header.type == PERF_RECORD_MMAP
1295 || event->header.type == PERF_RECORD_MMAP2)
7fb0a5ee
ND
1296 pid = event->mmap.pid;
1297 else
ef89325f 1298 pid = sample->pid;
7fb0a5ee 1299
54245fdc 1300 machine = machines__find(machines, pid);
ad85ace0 1301 if (!machine)
3caeaa56 1302 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
ad85ace0 1303 return machine;
7fb0a5ee 1304 }
743eb868 1305
54245fdc 1306 return &machines->host;
743eb868
ACM
1307}
1308
63503dba 1309static int deliver_sample_value(struct evlist *evlist,
e4caec0d
JO
1310 struct perf_tool *tool,
1311 union perf_event *event,
1312 struct perf_sample *sample,
1313 struct sample_read_value *v,
1314 struct machine *machine)
1315{
313e53b0 1316 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
e4caec0d 1317
e4caec0d
JO
1318 if (sid) {
1319 sample->id = v->id;
1320 sample->period = v->value - sid->period;
1321 sid->period = v->value;
1322 }
1323
1324 if (!sid || sid->evsel == NULL) {
313e53b0 1325 ++evlist->stats.nr_unknown_id;
e4caec0d
JO
1326 return 0;
1327 }
1328
529c1a9e
JO
1329 /*
1330 * There's no reason to deliver sample
1331 * for zero period, bail out.
1332 */
1333 if (!sample->period)
1334 return 0;
1335
e4caec0d
JO
1336 return tool->sample(tool, event, sample, sid->evsel, machine);
1337}
1338
63503dba 1339static int deliver_sample_group(struct evlist *evlist,
e4caec0d
JO
1340 struct perf_tool *tool,
1341 union perf_event *event,
1342 struct perf_sample *sample,
1343 struct machine *machine)
1344{
1345 int ret = -EINVAL;
1346 u64 i;
1347
1348 for (i = 0; i < sample->read.group.nr; i++) {
313e53b0 1349 ret = deliver_sample_value(evlist, tool, event, sample,
e4caec0d
JO
1350 &sample->read.group.values[i],
1351 machine);
1352 if (ret)
1353 break;
1354 }
1355
1356 return ret;
1357}
1358
1359static int
63503dba 1360 perf_evlist__deliver_sample(struct evlist *evlist,
e4caec0d
JO
1361 struct perf_tool *tool,
1362 union perf_event *event,
1363 struct perf_sample *sample,
32dcd021 1364 struct evsel *evsel,
e4caec0d
JO
1365 struct machine *machine)
1366{
1367 /* We know evsel != NULL. */
1fc632ce
JO
1368 u64 sample_type = evsel->core.attr.sample_type;
1369 u64 read_format = evsel->core.attr.read_format;
e4caec0d 1370
d94386f2 1371 /* Standard sample delivery. */
e4caec0d
JO
1372 if (!(sample_type & PERF_SAMPLE_READ))
1373 return tool->sample(tool, event, sample, evsel, machine);
1374
1375 /* For PERF_SAMPLE_READ we have either single or group mode. */
1376 if (read_format & PERF_FORMAT_GROUP)
313e53b0 1377 return deliver_sample_group(evlist, tool, event, sample,
e4caec0d
JO
1378 machine);
1379 else
313e53b0 1380 return deliver_sample_value(evlist, tool, event, sample,
e4caec0d
JO
1381 &sample->read.one, machine);
1382}
1383
d10eb1eb 1384static int machines__deliver_event(struct machines *machines,
63503dba 1385 struct evlist *evlist,
d10eb1eb
ACM
1386 union perf_event *event,
1387 struct perf_sample *sample,
1388 struct perf_tool *tool, u64 file_offset)
cbf41645 1389{
32dcd021 1390 struct evsel *evsel;
743eb868 1391 struct machine *machine;
9e69c210 1392
9fa8727a 1393 dump_event(evlist, event, file_offset, sample);
532e7269 1394
313e53b0 1395 evsel = perf_evlist__id2evsel(evlist, sample->id);
7b27509f 1396
fa713a4e 1397 machine = machines__find_for_cpumode(machines, event, sample);
743eb868 1398
cbf41645
TG
1399 switch (event->header.type) {
1400 case PERF_RECORD_SAMPLE:
9e69c210 1401 if (evsel == NULL) {
313e53b0 1402 ++evlist->stats.nr_unknown_id;
6782206b 1403 return 0;
9e69c210 1404 }
1b29ac59 1405 dump_sample(evsel, event, sample);
0c095715 1406 if (machine == NULL) {
313e53b0 1407 ++evlist->stats.nr_unprocessable_samples;
6782206b 1408 return 0;
0c095715 1409 }
313e53b0 1410 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
cbf41645 1411 case PERF_RECORD_MMAP:
45694aa7 1412 return tool->mmap(tool, event, sample, machine);
5c5e854b 1413 case PERF_RECORD_MMAP2:
930e6fcd
KL
1414 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1415 ++evlist->stats.nr_proc_map_timeout;
5c5e854b 1416 return tool->mmap2(tool, event, sample, machine);
cbf41645 1417 case PERF_RECORD_COMM:
45694aa7 1418 return tool->comm(tool, event, sample, machine);
f3b3614a
HB
1419 case PERF_RECORD_NAMESPACES:
1420 return tool->namespaces(tool, event, sample, machine);
cbf41645 1421 case PERF_RECORD_FORK:
45694aa7 1422 return tool->fork(tool, event, sample, machine);
cbf41645 1423 case PERF_RECORD_EXIT:
45694aa7 1424 return tool->exit(tool, event, sample, machine);
cbf41645 1425 case PERF_RECORD_LOST:
45694aa7 1426 if (tool->lost == perf_event__process_lost)
313e53b0 1427 evlist->stats.total_lost += event->lost.lost;
45694aa7 1428 return tool->lost(tool, event, sample, machine);
c4937a91
KL
1429 case PERF_RECORD_LOST_SAMPLES:
1430 if (tool->lost_samples == perf_event__process_lost_samples)
1431 evlist->stats.total_lost_samples += event->lost_samples.lost;
1432 return tool->lost_samples(tool, event, sample, machine);
cbf41645 1433 case PERF_RECORD_READ:
dac7f6b7 1434 dump_read(evsel, event);
45694aa7 1435 return tool->read(tool, event, sample, evsel, machine);
cbf41645 1436 case PERF_RECORD_THROTTLE:
45694aa7 1437 return tool->throttle(tool, event, sample, machine);
cbf41645 1438 case PERF_RECORD_UNTHROTTLE:
45694aa7 1439 return tool->unthrottle(tool, event, sample, machine);
4a96f7a0 1440 case PERF_RECORD_AUX:
05a1f47e
AS
1441 if (tool->aux == perf_event__process_aux) {
1442 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1443 evlist->stats.total_aux_lost += 1;
1444 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1445 evlist->stats.total_aux_partial += 1;
1446 }
4a96f7a0 1447 return tool->aux(tool, event, sample, machine);
0ad21f68
AH
1448 case PERF_RECORD_ITRACE_START:
1449 return tool->itrace_start(tool, event, sample, machine);
0286039f
AH
1450 case PERF_RECORD_SWITCH:
1451 case PERF_RECORD_SWITCH_CPU_WIDE:
1452 return tool->context_switch(tool, event, sample, machine);
9aa0bfa3
SL
1453 case PERF_RECORD_KSYMBOL:
1454 return tool->ksymbol(tool, event, sample, machine);
45178a92 1455 case PERF_RECORD_BPF_EVENT:
3f604b5f 1456 return tool->bpf(tool, event, sample, machine);
cbf41645 1457 default:
313e53b0 1458 ++evlist->stats.nr_unknown_events;
cbf41645
TG
1459 return -1;
1460 }
1461}
1462
c446870d
AH
1463static int perf_session__deliver_event(struct perf_session *session,
1464 union perf_event *event,
c446870d
AH
1465 struct perf_tool *tool,
1466 u64 file_offset)
1467{
93d10af2 1468 struct perf_sample sample;
c446870d
AH
1469 int ret;
1470
93d10af2
JO
1471 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1472 if (ret) {
1473 pr_err("Can't parse sample, err = %d\n", ret);
1474 return ret;
1475 }
1476
1477 ret = auxtrace__process_event(session, event, &sample, tool);
c446870d
AH
1478 if (ret < 0)
1479 return ret;
1480 if (ret > 0)
1481 return 0;
1482
1483 return machines__deliver_event(&session->machines, session->evlist,
93d10af2 1484 event, &sample, tool, file_offset);
c446870d
AH
1485}
1486
d5652d86
AH
1487static s64 perf_session__process_user_event(struct perf_session *session,
1488 union perf_event *event,
d5652d86 1489 u64 file_offset)
06aae590 1490{
d704ebda 1491 struct ordered_events *oe = &session->ordered_events;
9870d780 1492 struct perf_tool *tool = session->tool;
f250b09c 1493 struct perf_sample sample = { .time = 0, };
8ceb41d7 1494 int fd = perf_data__fd(session->data);
10d0f086
ACM
1495 int err;
1496
61a7773c
AB
1497 if (event->header.type != PERF_RECORD_COMPRESSED ||
1498 tool->compressed == perf_session__process_compressed_event_stub)
1499 dump_event(session->evlist, event, file_offset, &sample);
06aae590 1500
cbf41645 1501 /* These events are processed right away */
06aae590 1502 switch (event->header.type) {
2c46dbb5 1503 case PERF_RECORD_HEADER_ATTR:
47c3d109 1504 err = tool->attr(tool, event, &session->evlist);
cfe1c414 1505 if (err == 0) {
7b56cce2 1506 perf_session__set_id_hdr_size(session);
cfe1c414
AH
1507 perf_session__set_comm_exec(session);
1508 }
10d0f086 1509 return err;
ffe77725
JO
1510 case PERF_RECORD_EVENT_UPDATE:
1511 return tool->event_update(tool, event, &session->evlist);
f67697bd
JO
1512 case PERF_RECORD_HEADER_EVENT_TYPE:
1513 /*
1514 * Depreceated, but we need to handle it for sake
1515 * of old data files create in pipe mode.
1516 */
1517 return 0;
9215545e
TZ
1518 case PERF_RECORD_HEADER_TRACING_DATA:
1519 /* setup for reading amidst mmap */
cc9784bd 1520 lseek(fd, file_offset, SEEK_SET);
89f1688a 1521 return tool->tracing_data(session, event);
c7929e47 1522 case PERF_RECORD_HEADER_BUILD_ID:
89f1688a 1523 return tool->build_id(session, event);
d6b17beb 1524 case PERF_RECORD_FINISHED_ROUND:
d704ebda 1525 return tool->finished_round(tool, event, oe);
3c659eed 1526 case PERF_RECORD_ID_INDEX:
89f1688a 1527 return tool->id_index(session, event);
a16ac023 1528 case PERF_RECORD_AUXTRACE_INFO:
89f1688a 1529 return tool->auxtrace_info(session, event);
a16ac023
AH
1530 case PERF_RECORD_AUXTRACE:
1531 /* setup for reading amidst mmap */
1532 lseek(fd, file_offset + event->header.size, SEEK_SET);
7336555a 1533 return tool->auxtrace(session, event);
e9bf54d2 1534 case PERF_RECORD_AUXTRACE_ERROR:
85ed4729 1535 perf_session__auxtrace_error_inc(session, event);
89f1688a 1536 return tool->auxtrace_error(session, event);
5f3339d2 1537 case PERF_RECORD_THREAD_MAP:
89f1688a 1538 return tool->thread_map(session, event);
6640b6c2 1539 case PERF_RECORD_CPU_MAP:
89f1688a 1540 return tool->cpu_map(session, event);
374fb9e3 1541 case PERF_RECORD_STAT_CONFIG:
89f1688a 1542 return tool->stat_config(session, event);
d80518c9 1543 case PERF_RECORD_STAT:
89f1688a 1544 return tool->stat(session, event);
2d8f0f18 1545 case PERF_RECORD_STAT_ROUND:
89f1688a 1546 return tool->stat_round(session, event);
46bc29b9
AH
1547 case PERF_RECORD_TIME_CONV:
1548 session->time_conv = event->time_conv;
89f1688a 1549 return tool->time_conv(session, event);
e9def1b2 1550 case PERF_RECORD_HEADER_FEATURE:
89f1688a 1551 return tool->feature(session, event);
61a7773c
AB
1552 case PERF_RECORD_COMPRESSED:
1553 err = tool->compressed(session, event, file_offset);
1554 if (err)
1555 dump_event(session->evlist, event, file_offset, &sample);
1556 return err;
06aae590 1557 default:
ba74f064 1558 return -EINVAL;
06aae590 1559 }
ba74f064
TG
1560}
1561
a293829d
AH
1562int perf_session__deliver_synth_event(struct perf_session *session,
1563 union perf_event *event,
b7b61cbe 1564 struct perf_sample *sample)
a293829d 1565{
63503dba 1566 struct evlist *evlist = session->evlist;
9870d780 1567 struct perf_tool *tool = session->tool;
fa713a4e
ACM
1568
1569 events_stats__inc(&evlist->stats, event->header.type);
a293829d
AH
1570
1571 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
b7b61cbe 1572 return perf_session__process_user_event(session, event, 0);
a293829d 1573
fa713a4e 1574 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
a293829d
AH
1575}
1576
268fb20f
JO
1577static void event_swap(union perf_event *event, bool sample_id_all)
1578{
1579 perf_event__swap_op swap;
1580
1581 swap = perf_event__swap_ops[event->header.type];
1582 if (swap)
1583 swap(event, sample_id_all);
1584}
1585
5a52f33a
AH
1586int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1587 void *buf, size_t buf_sz,
1588 union perf_event **event_ptr,
1589 struct perf_sample *sample)
1590{
1591 union perf_event *event;
1592 size_t hdr_sz, rest;
1593 int fd;
1594
1595 if (session->one_mmap && !session->header.needs_swap) {
1596 event = file_offset - session->one_mmap_offset +
1597 session->one_mmap_addr;
1598 goto out_parse_sample;
1599 }
1600
8ceb41d7 1601 if (perf_data__is_pipe(session->data))
5a52f33a
AH
1602 return -1;
1603
8ceb41d7 1604 fd = perf_data__fd(session->data);
5a52f33a
AH
1605 hdr_sz = sizeof(struct perf_event_header);
1606
1607 if (buf_sz < hdr_sz)
1608 return -1;
1609
1610 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
554e92ed 1611 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
5a52f33a
AH
1612 return -1;
1613
1614 event = (union perf_event *)buf;
1615
1616 if (session->header.needs_swap)
1617 perf_event_header__bswap(&event->header);
1618
554e92ed 1619 if (event->header.size < hdr_sz || event->header.size > buf_sz)
5a52f33a
AH
1620 return -1;
1621
1622 rest = event->header.size - hdr_sz;
1623
554e92ed 1624 if (readn(fd, buf, rest) != (ssize_t)rest)
5a52f33a
AH
1625 return -1;
1626
1627 if (session->header.needs_swap)
1628 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1629
1630out_parse_sample:
1631
1632 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1633 perf_evlist__parse_sample(session->evlist, event, sample))
1634 return -1;
1635
1636 *event_ptr = event;
1637
1638 return 0;
1639}
1640
d5652d86 1641static s64 perf_session__process_event(struct perf_session *session,
b7b61cbe 1642 union perf_event *event, u64 file_offset)
ba74f064 1643{
63503dba 1644 struct evlist *evlist = session->evlist;
9870d780 1645 struct perf_tool *tool = session->tool;
ba74f064
TG
1646 int ret;
1647
268fb20f 1648 if (session->header.needs_swap)
313e53b0 1649 event_swap(event, perf_evlist__sample_id_all(evlist));
ba74f064
TG
1650
1651 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1652 return -EINVAL;
1653
313e53b0 1654 events_stats__inc(&evlist->stats, event->header.type);
ba74f064
TG
1655
1656 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
b7b61cbe 1657 return perf_session__process_user_event(session, event, file_offset);
cbf41645 1658
0a8cb85c 1659 if (tool->ordered_events) {
631e8f0a 1660 u64 timestamp = -1ULL;
93d10af2
JO
1661
1662 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
631e8f0a 1663 if (ret && ret != -1)
93d10af2
JO
1664 return ret;
1665
1666 ret = perf_session__queue_event(session, event, timestamp, file_offset);
cbf41645
TG
1667 if (ret != -ETIME)
1668 return ret;
1669 }
1670
93d10af2 1671 return perf_session__deliver_event(session, event, tool, file_offset);
06aae590
ACM
1672}
1673
316c7136 1674void perf_event_header__bswap(struct perf_event_header *hdr)
ba21594c 1675{
316c7136
ACM
1676 hdr->type = bswap_32(hdr->type);
1677 hdr->misc = bswap_16(hdr->misc);
1678 hdr->size = bswap_16(hdr->size);
ba21594c
ACM
1679}
1680
b424eba2
ACM
1681struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1682{
1fcb8768 1683 return machine__findnew_thread(&session->machines.host, -1, pid);
b424eba2
ACM
1684}
1685
b25756df
AH
1686/*
1687 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1688 * So here a single thread is created for that, but actually there is a separate
1689 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1690 * is only 1. That causes problems for some tools, requiring workarounds. For
1691 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1692 */
9d8b172f 1693int perf_session__register_idle_thread(struct perf_session *session)
06aae590 1694{
1fcb8768 1695 struct thread *thread;
9d8b172f 1696 int err = 0;
06aae590 1697
1fcb8768 1698 thread = machine__findnew_thread(&session->machines.host, 0, 0);
162f0bef 1699 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
06aae590 1700 pr_err("problem inserting idle task.\n");
9d8b172f 1701 err = -1;
06aae590
ACM
1702 }
1703
f3b3614a
HB
1704 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1705 pr_err("problem inserting idle task.\n");
1706 err = -1;
1707 }
1708
9d8b172f
MH
1709 /* machine__findnew_thread() got the thread, so put it */
1710 thread__put(thread);
1711 return err;
06aae590
ACM
1712}
1713
f06149c0
WN
1714static void
1715perf_session__warn_order(const struct perf_session *session)
1716{
1717 const struct ordered_events *oe = &session->ordered_events;
32dcd021 1718 struct evsel *evsel;
f06149c0
WN
1719 bool should_warn = true;
1720
1721 evlist__for_each_entry(session->evlist, evsel) {
1fc632ce 1722 if (evsel->core.attr.write_backward)
f06149c0
WN
1723 should_warn = false;
1724 }
1725
1726 if (!should_warn)
1727 return;
1728 if (oe->nr_unordered_events != 0)
1729 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1730}
1731
9870d780 1732static void perf_session__warn_about_errors(const struct perf_session *session)
11095994 1733{
9870d780 1734 const struct events_stats *stats = &session->evlist->stats;
9870d780
ACM
1735
1736 if (session->tool->lost == perf_event__process_lost &&
ccda068f 1737 stats->nr_events[PERF_RECORD_LOST] != 0) {
7b27509f
ACM
1738 ui__warning("Processed %d events and lost %d chunks!\n\n"
1739 "Check IO/CPU overload!\n\n",
ccda068f
ACM
1740 stats->nr_events[0],
1741 stats->nr_events[PERF_RECORD_LOST]);
11095994
ACM
1742 }
1743
c4937a91
KL
1744 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1745 double drop_rate;
1746
1747 drop_rate = (double)stats->total_lost_samples /
1748 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1749 if (drop_rate > 0.05) {
41a43dac 1750 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
c4937a91
KL
1751 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1752 drop_rate * 100.0);
1753 }
1754 }
1755
a38f48e3
AH
1756 if (session->tool->aux == perf_event__process_aux &&
1757 stats->total_aux_lost != 0) {
1758 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1759 stats->total_aux_lost,
1760 stats->nr_events[PERF_RECORD_AUX]);
1761 }
1762
05a1f47e
AS
1763 if (session->tool->aux == perf_event__process_aux &&
1764 stats->total_aux_partial != 0) {
1765 bool vmm_exclusive = false;
1766
1767 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1768 &vmm_exclusive);
1769
1770 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1771 "Are you running a KVM guest in the background?%s\n\n",
1772 stats->total_aux_partial,
1773 stats->nr_events[PERF_RECORD_AUX],
1774 vmm_exclusive ?
1775 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1776 "will reduce the gaps to only guest's timeslices." :
1777 "");
1778 }
1779
ccda068f 1780 if (stats->nr_unknown_events != 0) {
11095994
ACM
1781 ui__warning("Found %u unknown events!\n\n"
1782 "Is this an older tool processing a perf.data "
1783 "file generated by a more recent tool?\n\n"
1784 "If that is not the case, consider "
1785 "reporting to linux-kernel@vger.kernel.org.\n\n",
ccda068f 1786 stats->nr_unknown_events);
11095994
ACM
1787 }
1788
ccda068f 1789 if (stats->nr_unknown_id != 0) {
9e69c210 1790 ui__warning("%u samples with id not present in the header\n",
ccda068f 1791 stats->nr_unknown_id);
9e69c210
ACM
1792 }
1793
ccda068f 1794 if (stats->nr_invalid_chains != 0) {
75be989a
ACM
1795 ui__warning("Found invalid callchains!\n\n"
1796 "%u out of %u events were discarded for this reason.\n\n"
1797 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
ccda068f
ACM
1798 stats->nr_invalid_chains,
1799 stats->nr_events[PERF_RECORD_SAMPLE]);
75be989a 1800 }
0c095715 1801
ccda068f 1802 if (stats->nr_unprocessable_samples != 0) {
0c095715
JR
1803 ui__warning("%u unprocessable samples recorded.\n"
1804 "Do you have a KVM guest running and not using 'perf kvm'?\n",
ccda068f 1805 stats->nr_unprocessable_samples);
0c095715 1806 }
f61ff6c0 1807
f06149c0 1808 perf_session__warn_order(session);
85ed4729
AH
1809
1810 events_stats__auxtrace_error_warn(stats);
930e6fcd
KL
1811
1812 if (stats->nr_proc_map_timeout != 0) {
1813 ui__warning("%d map information files for pre-existing threads were\n"
1814 "not processed, if there are samples for addresses they\n"
1815 "will not be resolved, you may find out which are these\n"
1816 "threads by running with -v and redirecting the output\n"
9d9cad76
KL
1817 "to a file.\n"
1818 "The time limit to process proc map is too short?\n"
1819 "Increase it by --proc-map-timeout\n",
930e6fcd
KL
1820 stats->nr_proc_map_timeout);
1821 }
11095994
ACM
1822}
1823
a5499b37
AH
1824static int perf_session__flush_thread_stack(struct thread *thread,
1825 void *p __maybe_unused)
1826{
1827 return thread_stack__flush(thread);
1828}
1829
1830static int perf_session__flush_thread_stacks(struct perf_session *session)
1831{
1832 return machines__for_each_thread(&session->machines,
1833 perf_session__flush_thread_stack,
1834 NULL);
1835}
1836
8dc58101
TZ
1837volatile int session_done;
1838
cb62c6f1
AB
1839static int __perf_session__process_decomp_events(struct perf_session *session);
1840
b7b61cbe 1841static int __perf_session__process_pipe_events(struct perf_session *session)
8dc58101 1842{
fa713a4e 1843 struct ordered_events *oe = &session->ordered_events;
9870d780 1844 struct perf_tool *tool = session->tool;
8ceb41d7 1845 int fd = perf_data__fd(session->data);
444d2866
SE
1846 union perf_event *event;
1847 uint32_t size, cur_size = 0;
1848 void *buf = NULL;
d5652d86 1849 s64 skip = 0;
8dc58101 1850 u64 head;
727ebd54 1851 ssize_t err;
8dc58101
TZ
1852 void *p;
1853
45694aa7 1854 perf_tool__fill_defaults(tool);
8dc58101
TZ
1855
1856 head = 0;
444d2866
SE
1857 cur_size = sizeof(union perf_event);
1858
1859 buf = malloc(cur_size);
1860 if (!buf)
1861 return -errno;
1e0d4f02 1862 ordered_events__set_copy_on_queue(oe, true);
8dc58101 1863more:
444d2866 1864 event = buf;
cc9784bd 1865 err = readn(fd, event, sizeof(struct perf_event_header));
8dc58101
TZ
1866 if (err <= 0) {
1867 if (err == 0)
1868 goto done;
1869
1870 pr_err("failed to read event header\n");
1871 goto out_err;
1872 }
1873
316c7136 1874 if (session->header.needs_swap)
444d2866 1875 perf_event_header__bswap(&event->header);
8dc58101 1876
444d2866 1877 size = event->header.size;
27389d78
AH
1878 if (size < sizeof(struct perf_event_header)) {
1879 pr_err("bad event header size\n");
1880 goto out_err;
1881 }
8dc58101 1882
444d2866
SE
1883 if (size > cur_size) {
1884 void *new = realloc(buf, size);
1885 if (!new) {
1886 pr_err("failed to allocate memory to read event\n");
1887 goto out_err;
1888 }
1889 buf = new;
1890 cur_size = size;
1891 event = buf;
1892 }
1893 p = event;
8dc58101
TZ
1894 p += sizeof(struct perf_event_header);
1895
794e43b5 1896 if (size - sizeof(struct perf_event_header)) {
cc9784bd 1897 err = readn(fd, p, size - sizeof(struct perf_event_header));
794e43b5
TZ
1898 if (err <= 0) {
1899 if (err == 0) {
1900 pr_err("unexpected end of event stream\n");
1901 goto done;
1902 }
8dc58101 1903
794e43b5
TZ
1904 pr_err("failed to read event data\n");
1905 goto out_err;
1906 }
8dc58101
TZ
1907 }
1908
b7b61cbe 1909 if ((skip = perf_session__process_event(session, event, head)) < 0) {
9389a460 1910 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
444d2866 1911 head, event->header.size, event->header.type);
9389a460
JO
1912 err = -EINVAL;
1913 goto out_err;
8dc58101
TZ
1914 }
1915
1916 head += size;
1917
8dc58101
TZ
1918 if (skip > 0)
1919 head += skip;
1920
cb62c6f1
AB
1921 err = __perf_session__process_decomp_events(session);
1922 if (err)
1923 goto out_err;
1924
8dc58101
TZ
1925 if (!session_done())
1926 goto more;
1927done:
8c16b649 1928 /* do the final flush for ordered samples */
b7b61cbe 1929 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
c446870d
AH
1930 if (err)
1931 goto out_err;
1932 err = auxtrace__flush_events(session, tool);
a5499b37
AH
1933 if (err)
1934 goto out_err;
1935 err = perf_session__flush_thread_stacks(session);
8dc58101 1936out_err:
444d2866 1937 free(buf);
075ca1eb
JO
1938 if (!tool->no_warn)
1939 perf_session__warn_about_errors(session);
adc56ed1 1940 ordered_events__free(&session->ordered_events);
c446870d 1941 auxtrace__free_events(session);
8dc58101
TZ
1942 return err;
1943}
1944
998bedc8
FW
1945static union perf_event *
1946fetch_mmaped_event(struct perf_session *session,
1947 u64 head, size_t mmap_size, char *buf)
1948{
1949 union perf_event *event;
1950
1951 /*
1952 * Ensure we have enough space remaining to read
1953 * the size of the event in the headers.
1954 */
1955 if (head + sizeof(event->header) > mmap_size)
1956 return NULL;
1957
1958 event = (union perf_event *)(buf + head);
1959
1960 if (session->header.needs_swap)
1961 perf_event_header__bswap(&event->header);
1962
27389d78
AH
1963 if (head + event->header.size > mmap_size) {
1964 /* We're not fetching the event so swap back again */
1965 if (session->header.needs_swap)
1966 perf_event_header__bswap(&event->header);
57fc032a
ACM
1967 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
1968 __func__, head, event->header.size, mmap_size);
1969 return ERR_PTR(-EINVAL);
27389d78 1970 }
998bedc8
FW
1971
1972 return event;
1973}
1974
cb62c6f1
AB
1975static int __perf_session__process_decomp_events(struct perf_session *session)
1976{
1977 s64 skip;
1978 u64 size, file_pos = 0;
1979 struct decomp *decomp = session->decomp_last;
1980
1981 if (!decomp)
1982 return 0;
1983
1984 while (decomp->head < decomp->size && !session_done()) {
1985 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1986
57fc032a
ACM
1987 if (IS_ERR(event))
1988 return PTR_ERR(event);
1989
cb62c6f1
AB
1990 if (!event)
1991 break;
1992
1993 size = event->header.size;
1994
1995 if (size < sizeof(struct perf_event_header) ||
1996 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1997 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1998 decomp->file_pos + decomp->head, event->header.size, event->header.type);
1999 return -EINVAL;
2000 }
2001
2002 if (skip)
2003 size += skip;
2004
2005 decomp->head += size;
2006 }
2007
2008 return 0;
2009}
2010
35d48ddf
DM
2011/*
2012 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2013 * slices. On 32bit we use 32MB.
2014 */
2015#if BITS_PER_LONG == 64
2016#define MMAP_SIZE ULLONG_MAX
2017#define NUM_MMAPS 1
2018#else
2019#define MMAP_SIZE (32 * 1024 * 1024ULL)
2020#define NUM_MMAPS 128
2021#endif
2022
e51f8061
JO
2023struct reader;
2024
2025typedef s64 (*reader_cb_t)(struct perf_session *session,
2026 union perf_event *event,
2027 u64 file_offset);
2028
82715eb1 2029struct reader {
e51f8061
JO
2030 int fd;
2031 u64 data_size;
2032 u64 data_offset;
2033 reader_cb_t process;
82715eb1
JO
2034};
2035
3c7b67b2
JO
2036static int
2037reader__process_events(struct reader *rd, struct perf_session *session,
2038 struct ui_progress *prog)
06aae590 2039{
3c7b67b2 2040 u64 data_size = rd->data_size;
d5652d86 2041 u64 head, page_offset, file_offset, file_pos, size;
3c7b67b2 2042 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
0c1fe6b2 2043 size_t mmap_size;
35d48ddf 2044 char *buf, *mmaps[NUM_MMAPS];
8115d60c 2045 union perf_event *event;
d5652d86 2046 s64 skip;
0331ee0c 2047
3c7b67b2 2048 page_offset = page_size * (rd->data_offset / page_size);
0331ee0c 2049 file_offset = page_offset;
3c7b67b2 2050 head = rd->data_offset - page_offset;
06aae590 2051
3c7b67b2 2052 ui_progress__init_size(prog, data_size, "Processing events...");
381c02f6 2053
3c7b67b2 2054 data_size += rd->data_offset;
55b44629 2055
35d48ddf 2056 mmap_size = MMAP_SIZE;
4f5a473d
JO
2057 if (mmap_size > data_size) {
2058 mmap_size = data_size;
919d86d3
AH
2059 session->one_mmap = true;
2060 }
55b44629 2061
fe174207
TG
2062 memset(mmaps, 0, sizeof(mmaps));
2063
ba21594c
ACM
2064 mmap_prot = PROT_READ;
2065 mmap_flags = MAP_SHARED;
2066
0331ee0c 2067 if (session->header.needs_swap) {
ba21594c
ACM
2068 mmap_prot |= PROT_WRITE;
2069 mmap_flags = MAP_PRIVATE;
2070 }
06aae590 2071remap:
3c7b67b2 2072 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
55b44629 2073 file_offset);
06aae590
ACM
2074 if (buf == MAP_FAILED) {
2075 pr_err("failed to mmap file\n");
2076 err = -errno;
3c7b67b2 2077 goto out;
06aae590 2078 }
fe174207
TG
2079 mmaps[map_idx] = buf;
2080 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
d6513281 2081 file_pos = file_offset + head;
919d86d3
AH
2082 if (session->one_mmap) {
2083 session->one_mmap_addr = buf;
2084 session->one_mmap_offset = file_offset;
2085 }
06aae590
ACM
2086
2087more:
998bedc8 2088 event = fetch_mmaped_event(session, head, mmap_size, buf);
57fc032a
ACM
2089 if (IS_ERR(event))
2090 return PTR_ERR(event);
2091
998bedc8 2092 if (!event) {
fe174207
TG
2093 if (mmaps[map_idx]) {
2094 munmap(mmaps[map_idx], mmap_size);
2095 mmaps[map_idx] = NULL;
2096 }
06aae590 2097
0331ee0c
TG
2098 page_offset = page_size * (head / page_size);
2099 file_offset += page_offset;
2100 head -= page_offset;
06aae590
ACM
2101 goto remap;
2102 }
2103
2104 size = event->header.size;
2105
167e418f
TR
2106 skip = -EINVAL;
2107
27389d78 2108 if (size < sizeof(struct perf_event_header) ||
e51f8061 2109 (skip = rd->process(session, event, file_pos)) < 0) {
167e418f 2110 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
9389a460 2111 file_offset + head, event->header.size,
167e418f
TR
2112 event->header.type, strerror(-skip));
2113 err = skip;
3c7b67b2 2114 goto out;
06aae590
ACM
2115 }
2116
6f917c70
AH
2117 if (skip)
2118 size += skip;
2119
06aae590 2120 head += size;
d6513281 2121 file_pos += size;
06aae590 2122
cb62c6f1
AB
2123 err = __perf_session__process_decomp_events(session);
2124 if (err)
2125 goto out;
2126
3c7b67b2 2127 ui_progress__update(prog, size);
55b44629 2128
33e940a2 2129 if (session_done())
8c16b649 2130 goto out;
33e940a2 2131
4f5a473d 2132 if (file_pos < data_size)
06aae590 2133 goto more;
d6513281 2134
8c16b649 2135out:
3c7b67b2
JO
2136 return err;
2137}
2138
e51f8061
JO
2139static s64 process_simple(struct perf_session *session,
2140 union perf_event *event,
2141 u64 file_offset)
2142{
2143 return perf_session__process_event(session, event, file_offset);
2144}
2145
3c7b67b2
JO
2146static int __perf_session__process_events(struct perf_session *session)
2147{
2148 struct reader rd = {
2149 .fd = perf_data__fd(session->data),
2150 .data_size = session->header.data_size,
2151 .data_offset = session->header.data_offset,
e51f8061 2152 .process = process_simple,
3c7b67b2
JO
2153 };
2154 struct ordered_events *oe = &session->ordered_events;
2155 struct perf_tool *tool = session->tool;
2156 struct ui_progress prog;
2157 int err;
2158
2159 perf_tool__fill_defaults(tool);
2160
2161 if (rd.data_size == 0)
2162 return -1;
2163
2164 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2165
2166 err = reader__process_events(&rd, session, &prog);
2167 if (err)
2168 goto out_err;
c61e52ee 2169 /* do the final flush for ordered samples */
b7b61cbe 2170 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
c446870d
AH
2171 if (err)
2172 goto out_err;
2173 err = auxtrace__flush_events(session, tool);
a5499b37
AH
2174 if (err)
2175 goto out_err;
2176 err = perf_session__flush_thread_stacks(session);
06aae590 2177out_err:
a5580f3e 2178 ui_progress__finish();
075ca1eb
JO
2179 if (!tool->no_warn)
2180 perf_session__warn_about_errors(session);
b26dc730
WN
2181 /*
2182 * We may switching perf.data output, make ordered_events
2183 * reusable.
2184 */
2185 ordered_events__reinit(&session->ordered_events);
c446870d 2186 auxtrace__free_events(session);
919d86d3 2187 session->one_mmap = false;
06aae590
ACM
2188 return err;
2189}
27295592 2190
b7b61cbe 2191int perf_session__process_events(struct perf_session *session)
6122e4e4 2192{
9d8b172f 2193 if (perf_session__register_idle_thread(session) < 0)
6122e4e4
ACM
2194 return -ENOMEM;
2195
7ba4da10
JO
2196 if (perf_data__is_pipe(session->data))
2197 return __perf_session__process_pipe_events(session);
88ca895d 2198
7ba4da10 2199 return __perf_session__process_events(session);
6122e4e4
ACM
2200}
2201
7f3be652 2202bool perf_session__has_traces(struct perf_session *session, const char *msg)
27295592 2203{
32dcd021 2204 struct evsel *evsel;
93ea01c2 2205
e5cadb93 2206 evlist__for_each_entry(session->evlist, evsel) {
1fc632ce 2207 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
93ea01c2 2208 return true;
27295592
ACM
2209 }
2210
93ea01c2
DA
2211 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2212 return false;
27295592 2213}
56b03f3c 2214
3183f8ca 2215int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
56b03f3c
ACM
2216{
2217 char *bracket;
a1645ce1 2218 struct ref_reloc_sym *ref;
3183f8ca 2219 struct kmap *kmap;
a1645ce1
ZY
2220
2221 ref = zalloc(sizeof(struct ref_reloc_sym));
2222 if (ref == NULL)
2223 return -ENOMEM;
56b03f3c 2224
a1645ce1
ZY
2225 ref->name = strdup(symbol_name);
2226 if (ref->name == NULL) {
2227 free(ref);
56b03f3c 2228 return -ENOMEM;
a1645ce1 2229 }
56b03f3c 2230
a1645ce1 2231 bracket = strchr(ref->name, ']');
56b03f3c
ACM
2232 if (bracket)
2233 *bracket = '\0';
2234
a1645ce1 2235 ref->addr = addr;
9de89fe7 2236
3183f8ca
ACM
2237 kmap = map__kmap(map);
2238 if (kmap)
a1645ce1 2239 kmap->ref_reloc_sym = ref;
9de89fe7 2240
56b03f3c
ACM
2241 return 0;
2242}
1f626bc3 2243
316c7136 2244size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1f626bc3 2245{
316c7136 2246 return machines__fprintf_dsos(&session->machines, fp);
1f626bc3 2247}
f869097e 2248
316c7136 2249size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
417c2ff6 2250 bool (skip)(struct dso *dso, int parm), int parm)
f869097e 2251{
316c7136 2252 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
f869097e 2253}
e248de33
ACM
2254
2255size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2256{
c446870d
AH
2257 size_t ret;
2258 const char *msg = "";
2259
2260 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2261 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2262
fe692ac8 2263 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
e248de33 2264
75be989a 2265 ret += events_stats__fprintf(&session->evlist->stats, fp);
e248de33
ACM
2266 return ret;
2267}
c0230b2b 2268
b424eba2
ACM
2269size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2270{
2271 /*
2272 * FIXME: Here we have to actually print all the machines in this
2273 * session, not just the host...
2274 */
876650e6 2275 return machine__fprintf(&session->machines.host, fp);
b424eba2
ACM
2276}
2277
32dcd021 2278struct evsel *perf_session__find_first_evtype(struct perf_session *session,
9cbdb702
DA
2279 unsigned int type)
2280{
32dcd021 2281 struct evsel *pos;
9cbdb702 2282
e5cadb93 2283 evlist__for_each_entry(session->evlist, pos) {
1fc632ce 2284 if (pos->core.attr.type == type)
9cbdb702
DA
2285 return pos;
2286 }
2287 return NULL;
2288}
2289
5d67be97
AB
2290int perf_session__cpu_bitmap(struct perf_session *session,
2291 const char *cpu_list, unsigned long *cpu_bitmap)
2292{
8bac41cb 2293 int i, err = -1;
f854839b 2294 struct perf_cpu_map *map;
5d67be97
AB
2295
2296 for (i = 0; i < PERF_TYPE_MAX; ++i) {
32dcd021 2297 struct evsel *evsel;
5d67be97
AB
2298
2299 evsel = perf_session__find_first_evtype(session, i);
2300 if (!evsel)
2301 continue;
2302
1fc632ce 2303 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
5d67be97 2304 pr_err("File does not contain CPU events. "
30795467 2305 "Remove -C option to proceed.\n");
5d67be97
AB
2306 return -1;
2307 }
2308 }
2309
9c3516d1 2310 map = perf_cpu_map__new(cpu_list);
47fbe53b
DA
2311 if (map == NULL) {
2312 pr_err("Invalid cpu_list\n");
2313 return -1;
2314 }
5d67be97
AB
2315
2316 for (i = 0; i < map->nr; i++) {
2317 int cpu = map->map[i];
2318
2319 if (cpu >= MAX_NR_CPUS) {
2320 pr_err("Requested CPU %d too large. "
2321 "Consider raising MAX_NR_CPUS\n", cpu);
8bac41cb 2322 goto out_delete_map;
5d67be97
AB
2323 }
2324
2325 set_bit(cpu, cpu_bitmap);
2326 }
2327
8bac41cb
SF
2328 err = 0;
2329
2330out_delete_map:
38f01d8d 2331 perf_cpu_map__put(map);
8bac41cb 2332 return err;
5d67be97 2333}
fbe96f29
SE
2334
2335void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2336 bool full)
2337{
fbe96f29
SE
2338 if (session == NULL || fp == NULL)
2339 return;
2340
fbe96f29 2341 fprintf(fp, "# ========\n");
fbe96f29
SE
2342 perf_header__fprintf_info(session, fp, full);
2343 fprintf(fp, "# ========\n#\n");
2344}
da378962
ACM
2345
2346
2347int __perf_session__set_tracepoints_handlers(struct perf_session *session,
32dcd021 2348 const struct evsel_str_handler *assocs,
da378962
ACM
2349 size_t nr_assocs)
2350{
32dcd021 2351 struct evsel *evsel;
da378962
ACM
2352 size_t i;
2353 int err;
2354
2355 for (i = 0; i < nr_assocs; i++) {
ccf53eac
ACM
2356 /*
2357 * Adding a handler for an event not in the session,
2358 * just ignore it.
2359 */
2360 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
da378962 2361 if (evsel == NULL)
ccf53eac 2362 continue;
da378962
ACM
2363
2364 err = -EEXIST;
744a9719 2365 if (evsel->handler != NULL)
ccf53eac 2366 goto out;
744a9719 2367 evsel->handler = assocs[i].handler;
da378962
ACM
2368 }
2369
2370 err = 0;
2371out:
2372 return err;
da378962 2373}
3c659eed 2374
89f1688a
JO
2375int perf_event__process_id_index(struct perf_session *session,
2376 union perf_event *event)
3c659eed 2377{
63503dba 2378 struct evlist *evlist = session->evlist;
3c659eed
AH
2379 struct id_index_event *ie = &event->id_index;
2380 size_t i, nr, max_nr;
2381
2382 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2383 sizeof(struct id_index_entry);
2384 nr = ie->nr;
2385 if (nr > max_nr)
2386 return -EINVAL;
2387
2388 if (dump_trace)
2389 fprintf(stdout, " nr: %zu\n", nr);
2390
2391 for (i = 0; i < nr; i++) {
2392 struct id_index_entry *e = &ie->entries[i];
2393 struct perf_sample_id *sid;
2394
2395 if (dump_trace) {
2396 fprintf(stdout, " ... id: %"PRIu64, e->id);
2397 fprintf(stdout, " idx: %"PRIu64, e->idx);
2398 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2399 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2400 }
2401
2402 sid = perf_evlist__id2sid(evlist, e->id);
2403 if (!sid)
2404 return -ENOENT;
2405 sid->idx = e->idx;
2406 sid->cpu = e->cpu;
2407 sid->tid = e->tid;
2408 }
2409 return 0;
2410}
2411
2412int perf_event__synthesize_id_index(struct perf_tool *tool,
2413 perf_event__handler_t process,
63503dba 2414 struct evlist *evlist,
3c659eed
AH
2415 struct machine *machine)
2416{
2417 union perf_event *ev;
32dcd021 2418 struct evsel *evsel;
3c659eed
AH
2419 size_t nr = 0, i = 0, sz, max_nr, n;
2420 int err;
2421
2422 pr_debug2("Synthesizing id index\n");
2423
2424 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2425 sizeof(struct id_index_entry);
2426
e5cadb93 2427 evlist__for_each_entry(evlist, evsel)
3c659eed
AH
2428 nr += evsel->ids;
2429
2430 n = nr > max_nr ? max_nr : nr;
2431 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2432 ev = zalloc(sz);
2433 if (!ev)
2434 return -ENOMEM;
2435
2436 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2437 ev->id_index.header.size = sz;
2438 ev->id_index.nr = n;
2439
e5cadb93 2440 evlist__for_each_entry(evlist, evsel) {
3c659eed
AH
2441 u32 j;
2442
2443 for (j = 0; j < evsel->ids; j++) {
2444 struct id_index_entry *e;
2445 struct perf_sample_id *sid;
2446
2447 if (i >= n) {
2448 err = process(tool, ev, NULL, machine);
2449 if (err)
2450 goto out_err;
2451 nr -= n;
2452 i = 0;
2453 }
2454
2455 e = &ev->id_index.entries[i++];
2456
2457 e->id = evsel->id[j];
2458
2459 sid = perf_evlist__id2sid(evlist, e->id);
2460 if (!sid) {
2461 free(ev);
2462 return -ENOENT;
2463 }
2464
2465 e->idx = sid->idx;
2466 e->cpu = sid->cpu;
2467 e->tid = sid->tid;
2468 }
2469 }
2470
2471 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2472 ev->id_index.header.size = sz;
2473 ev->id_index.nr = nr;
2474
2475 err = process(tool, ev, NULL, machine);
2476out_err:
2477 free(ev);
2478
2479 return err;
2480}