]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - tools/perf/util/event.c
Merge branch 'linus' into perf/core
[mirror_ubuntu-zesty-kernel.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "string.h"
7 #include "strlist.h"
8 #include "thread.h"
9
10 static pid_t event__synthesize_comm(pid_t pid, int full,
11 event__handler_t process,
12 struct perf_session *session)
13 {
14 event_t ev;
15 char filename[PATH_MAX];
16 char bf[BUFSIZ];
17 FILE *fp;
18 size_t size = 0;
19 DIR *tasks;
20 struct dirent dirent, *next;
21 pid_t tgid = 0;
22
23 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
24
25 fp = fopen(filename, "r");
26 if (fp == NULL) {
27 out_race:
28 /*
29 * We raced with a task exiting - just return:
30 */
31 pr_debug("couldn't open %s\n", filename);
32 return 0;
33 }
34
35 memset(&ev.comm, 0, sizeof(ev.comm));
36 while (!ev.comm.comm[0] || !ev.comm.pid) {
37 if (fgets(bf, sizeof(bf), fp) == NULL)
38 goto out_failure;
39
40 if (memcmp(bf, "Name:", 5) == 0) {
41 char *name = bf + 5;
42 while (*name && isspace(*name))
43 ++name;
44 size = strlen(name) - 1;
45 memcpy(ev.comm.comm, name, size++);
46 } else if (memcmp(bf, "Tgid:", 5) == 0) {
47 char *tgids = bf + 5;
48 while (*tgids && isspace(*tgids))
49 ++tgids;
50 tgid = ev.comm.pid = atoi(tgids);
51 }
52 }
53
54 ev.comm.header.type = PERF_RECORD_COMM;
55 size = ALIGN(size, sizeof(u64));
56 ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
57
58 if (!full) {
59 ev.comm.tid = pid;
60
61 process(&ev, session);
62 goto out_fclose;
63 }
64
65 snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
66
67 tasks = opendir(filename);
68 if (tasks == NULL)
69 goto out_race;
70
71 while (!readdir_r(tasks, &dirent, &next) && next) {
72 char *end;
73 pid = strtol(dirent.d_name, &end, 10);
74 if (*end)
75 continue;
76
77 ev.comm.tid = pid;
78
79 process(&ev, session);
80 }
81 closedir(tasks);
82
83 out_fclose:
84 fclose(fp);
85 return tgid;
86
87 out_failure:
88 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
89 return -1;
90 }
91
92 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
93 event__handler_t process,
94 struct perf_session *session)
95 {
96 char filename[PATH_MAX];
97 FILE *fp;
98
99 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
100
101 fp = fopen(filename, "r");
102 if (fp == NULL) {
103 /*
104 * We raced with a task exiting - just return:
105 */
106 pr_debug("couldn't open %s\n", filename);
107 return -1;
108 }
109
110 while (1) {
111 char bf[BUFSIZ], *pbf = bf;
112 event_t ev = {
113 .header = {
114 .type = PERF_RECORD_MMAP,
115 /*
116 * Just like the kernel, see __perf_event_mmap
117 * in kernel/perf_event.c
118 */
119 .misc = PERF_RECORD_MISC_USER,
120 },
121 };
122 int n;
123 size_t size;
124 if (fgets(bf, sizeof(bf), fp) == NULL)
125 break;
126
127 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
128 n = hex2u64(pbf, &ev.mmap.start);
129 if (n < 0)
130 continue;
131 pbf += n + 1;
132 n = hex2u64(pbf, &ev.mmap.len);
133 if (n < 0)
134 continue;
135 pbf += n + 3;
136 if (*pbf == 'x') { /* vm_exec */
137 u64 vm_pgoff;
138 char *execname = strchr(bf, '/');
139
140 /* Catch VDSO */
141 if (execname == NULL)
142 execname = strstr(bf, "[vdso]");
143
144 if (execname == NULL)
145 continue;
146
147 pbf += 3;
148 n = hex2u64(pbf, &vm_pgoff);
149 /* pgoff is in bytes, not pages */
150 if (n >= 0)
151 ev.mmap.pgoff = vm_pgoff << getpagesize();
152 else
153 ev.mmap.pgoff = 0;
154
155 size = strlen(execname);
156 execname[size - 1] = '\0'; /* Remove \n */
157 memcpy(ev.mmap.filename, execname, size);
158 size = ALIGN(size, sizeof(u64));
159 ev.mmap.len -= ev.mmap.start;
160 ev.mmap.header.size = (sizeof(ev.mmap) -
161 (sizeof(ev.mmap.filename) - size));
162 ev.mmap.pid = tgid;
163 ev.mmap.tid = pid;
164
165 process(&ev, session);
166 }
167 }
168
169 fclose(fp);
170 return 0;
171 }
172
173 int event__synthesize_modules(event__handler_t process,
174 struct perf_session *session,
175 struct kernel_info *kerninfo)
176 {
177 struct rb_node *nd;
178 struct map_groups *kmaps = &kerninfo->kmaps;
179 u16 misc;
180
181 /*
182 * kernel uses 0 for user space maps, see kernel/perf_event.c
183 * __perf_event_mmap
184 */
185 if (is_host_kernel(kerninfo))
186 misc = PERF_RECORD_MISC_KERNEL;
187 else
188 misc = PERF_RECORD_MISC_GUEST_KERNEL;
189
190 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
191 nd; nd = rb_next(nd)) {
192 event_t ev;
193 size_t size;
194 struct map *pos = rb_entry(nd, struct map, rb_node);
195
196 if (pos->dso->kernel)
197 continue;
198
199 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
200 memset(&ev, 0, sizeof(ev));
201 ev.mmap.header.misc = misc;
202 ev.mmap.header.type = PERF_RECORD_MMAP;
203 ev.mmap.header.size = (sizeof(ev.mmap) -
204 (sizeof(ev.mmap.filename) - size));
205 ev.mmap.start = pos->start;
206 ev.mmap.len = pos->end - pos->start;
207 ev.mmap.pid = kerninfo->pid;
208
209 memcpy(ev.mmap.filename, pos->dso->long_name,
210 pos->dso->long_name_len + 1);
211 process(&ev, session);
212 }
213
214 return 0;
215 }
216
217 int event__synthesize_thread(pid_t pid, event__handler_t process,
218 struct perf_session *session)
219 {
220 pid_t tgid = event__synthesize_comm(pid, 1, process, session);
221 if (tgid == -1)
222 return -1;
223 return event__synthesize_mmap_events(pid, tgid, process, session);
224 }
225
226 void event__synthesize_threads(event__handler_t process,
227 struct perf_session *session)
228 {
229 DIR *proc;
230 struct dirent dirent, *next;
231
232 proc = opendir("/proc");
233
234 while (!readdir_r(proc, &dirent, &next) && next) {
235 char *end;
236 pid_t pid = strtol(dirent.d_name, &end, 10);
237
238 if (*end) /* only interested in proper numerical dirents */
239 continue;
240
241 event__synthesize_thread(pid, process, session);
242 }
243
244 closedir(proc);
245 }
246
247 struct process_symbol_args {
248 const char *name;
249 u64 start;
250 };
251
252 static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
253 {
254 struct process_symbol_args *args = arg;
255
256 /*
257 * Must be a function or at least an alias, as in PARISC64, where "_text" is
258 * an 'A' to the same address as "_stext".
259 */
260 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
261 type == 'A') || strcmp(name, args->name))
262 return 0;
263
264 args->start = start;
265 return 1;
266 }
267
268 int event__synthesize_kernel_mmap(event__handler_t process,
269 struct perf_session *session,
270 struct kernel_info *kerninfo,
271 const char *symbol_name)
272 {
273 size_t size;
274 const char *filename, *mmap_name;
275 char path[PATH_MAX];
276 char name_buff[PATH_MAX];
277 struct map *map;
278
279 event_t ev = {
280 .header = {
281 .type = PERF_RECORD_MMAP,
282 },
283 };
284 /*
285 * We should get this from /sys/kernel/sections/.text, but till that is
286 * available use this, and after it is use this as a fallback for older
287 * kernels.
288 */
289 struct process_symbol_args args = { .name = symbol_name, };
290
291 mmap_name = kern_mmap_name(kerninfo, name_buff);
292 if (is_host_kernel(kerninfo)) {
293 /*
294 * kernel uses PERF_RECORD_MISC_USER for user space maps,
295 * see kernel/perf_event.c __perf_event_mmap
296 */
297 ev.header.misc = PERF_RECORD_MISC_KERNEL;
298 filename = "/proc/kallsyms";
299 } else {
300 ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
301 if (is_default_guest(kerninfo))
302 filename = (char *) symbol_conf.default_guest_kallsyms;
303 else {
304 sprintf(path, "%s/proc/kallsyms", kerninfo->root_dir);
305 filename = path;
306 }
307 }
308
309 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
310 return -ENOENT;
311
312 map = kerninfo->vmlinux_maps[MAP__FUNCTION];
313 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
314 "%s%s", mmap_name, symbol_name) + 1;
315 size = ALIGN(size, sizeof(u64));
316 ev.mmap.header.size = (sizeof(ev.mmap) -
317 (sizeof(ev.mmap.filename) - size));
318 ev.mmap.pgoff = args.start;
319 ev.mmap.start = map->start;
320 ev.mmap.len = map->end - ev.mmap.start;
321 ev.mmap.pid = kerninfo->pid;
322
323 return process(&ev, session);
324 }
325
326 static void thread__comm_adjust(struct thread *self)
327 {
328 char *comm = self->comm;
329
330 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
331 (!symbol_conf.comm_list ||
332 strlist__has_entry(symbol_conf.comm_list, comm))) {
333 unsigned int slen = strlen(comm);
334
335 if (slen > comms__col_width) {
336 comms__col_width = slen;
337 threads__col_width = slen + 6;
338 }
339 }
340 }
341
342 static int thread__set_comm_adjust(struct thread *self, const char *comm)
343 {
344 int ret = thread__set_comm(self, comm);
345
346 if (ret)
347 return ret;
348
349 thread__comm_adjust(self);
350
351 return 0;
352 }
353
354 int event__process_comm(event_t *self, struct perf_session *session)
355 {
356 struct thread *thread = perf_session__findnew(session, self->comm.pid);
357
358 dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid);
359
360 if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) {
361 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
362 return -1;
363 }
364
365 return 0;
366 }
367
368 int event__process_lost(event_t *self, struct perf_session *session)
369 {
370 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
371 session->events_stats.lost += self->lost.lost;
372 return 0;
373 }
374
375 static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
376 {
377 maps[MAP__FUNCTION]->start = self->mmap.start;
378 maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
379 /*
380 * Be a bit paranoid here, some perf.data file came with
381 * a zero sized synthesized MMAP event for the kernel.
382 */
383 if (maps[MAP__FUNCTION]->end == 0)
384 maps[MAP__FUNCTION]->end = ~0UL;
385 }
386
387 static int event__process_kernel_mmap(event_t *self,
388 struct perf_session *session)
389 {
390 struct map *map;
391 char kmmap_prefix[PATH_MAX];
392 struct kernel_info *kerninfo;
393 enum dso_kernel_type kernel_type;
394 bool is_kernel_mmap;
395
396 kerninfo = kerninfo__findnew(&session->kerninfo_root, self->mmap.pid);
397 if (!kerninfo) {
398 pr_err("Can't find id %d's kerninfo\n", self->mmap.pid);
399 goto out_problem;
400 }
401
402 kern_mmap_name(kerninfo, kmmap_prefix);
403 if (is_host_kernel(kerninfo))
404 kernel_type = DSO_TYPE_KERNEL;
405 else
406 kernel_type = DSO_TYPE_GUEST_KERNEL;
407
408 is_kernel_mmap = memcmp(self->mmap.filename,
409 kmmap_prefix,
410 strlen(kmmap_prefix)) == 0;
411 if (self->mmap.filename[0] == '/' ||
412 (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
413
414 char short_module_name[1024];
415 char *name, *dot;
416
417 if (self->mmap.filename[0] == '/') {
418 name = strrchr(self->mmap.filename, '/');
419 if (name == NULL)
420 goto out_problem;
421
422 ++name; /* skip / */
423 dot = strrchr(name, '.');
424 if (dot == NULL)
425 goto out_problem;
426 snprintf(short_module_name, sizeof(short_module_name),
427 "[%.*s]", (int)(dot - name), name);
428 strxfrchar(short_module_name, '-', '_');
429 } else
430 strcpy(short_module_name, self->mmap.filename);
431
432 map = map_groups__new_module(&kerninfo->kmaps,
433 self->mmap.start,
434 self->mmap.filename,
435 kerninfo);
436 if (map == NULL)
437 goto out_problem;
438
439 name = strdup(short_module_name);
440 if (name == NULL)
441 goto out_problem;
442
443 map->dso->short_name = name;
444 map->end = map->start + self->mmap.len;
445 } else if (is_kernel_mmap) {
446 const char *symbol_name = (self->mmap.filename +
447 strlen(kmmap_prefix));
448 /*
449 * Should be there already, from the build-id table in
450 * the header.
451 */
452 struct dso *kernel = __dsos__findnew(&kerninfo->dsos__kernel,
453 kmmap_prefix);
454 if (kernel == NULL)
455 goto out_problem;
456
457 kernel->kernel = kernel_type;
458 if (__map_groups__create_kernel_maps(&kerninfo->kmaps,
459 kerninfo->vmlinux_maps, kernel) < 0)
460 goto out_problem;
461
462 event_set_kernel_mmap_len(kerninfo->vmlinux_maps, self);
463 perf_session__set_kallsyms_ref_reloc_sym(kerninfo->vmlinux_maps,
464 symbol_name,
465 self->mmap.pgoff);
466 if (is_default_guest(kerninfo)) {
467 /*
468 * preload dso of guest kernel and modules
469 */
470 dso__load(kernel,
471 kerninfo->vmlinux_maps[MAP__FUNCTION],
472 NULL);
473 }
474 }
475 return 0;
476 out_problem:
477 return -1;
478 }
479
480 int event__process_mmap(event_t *self, struct perf_session *session)
481 {
482 struct kernel_info *kerninfo;
483 struct thread *thread;
484 struct map *map;
485 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
486 int ret = 0;
487
488 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
489 self->mmap.pid, self->mmap.tid, self->mmap.start,
490 self->mmap.len, self->mmap.pgoff, self->mmap.filename);
491
492 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
493 cpumode == PERF_RECORD_MISC_KERNEL) {
494 ret = event__process_kernel_mmap(self, session);
495 if (ret < 0)
496 goto out_problem;
497 return 0;
498 }
499
500 thread = perf_session__findnew(session, self->mmap.pid);
501 kerninfo = kerninfo__findhost(&session->kerninfo_root);
502 map = map__new(&kerninfo->dsos__user, self->mmap.start,
503 self->mmap.len, self->mmap.pgoff,
504 self->mmap.pid, self->mmap.filename,
505 MAP__FUNCTION, session->cwd, session->cwdlen);
506
507 if (thread == NULL || map == NULL)
508 goto out_problem;
509
510 thread__insert_map(thread, map);
511 return 0;
512
513 out_problem:
514 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
515 return 0;
516 }
517
518 int event__process_task(event_t *self, struct perf_session *session)
519 {
520 struct thread *thread = perf_session__findnew(session, self->fork.pid);
521 struct thread *parent = perf_session__findnew(session, self->fork.ppid);
522
523 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
524 self->fork.ppid, self->fork.ptid);
525 /*
526 * A thread clone will have the same PID for both parent and child.
527 */
528 if (thread == parent)
529 return 0;
530
531 if (self->header.type == PERF_RECORD_EXIT)
532 return 0;
533
534 if (thread == NULL || parent == NULL ||
535 thread__fork(thread, parent) < 0) {
536 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
537 return -1;
538 }
539
540 return 0;
541 }
542
543 void thread__find_addr_map(struct thread *self,
544 struct perf_session *session, u8 cpumode,
545 enum map_type type, pid_t pid, u64 addr,
546 struct addr_location *al)
547 {
548 struct map_groups *mg = &self->mg;
549 struct kernel_info *kerninfo = NULL;
550
551 al->thread = self;
552 al->addr = addr;
553 al->cpumode = cpumode;
554 al->filtered = false;
555
556 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
557 al->level = 'k';
558 kerninfo = kerninfo__findhost(&session->kerninfo_root);
559 mg = &kerninfo->kmaps;
560 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
561 al->level = '.';
562 kerninfo = kerninfo__findhost(&session->kerninfo_root);
563 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
564 al->level = 'g';
565 kerninfo = kerninfo__find(&session->kerninfo_root, pid);
566 if (!kerninfo) {
567 al->map = NULL;
568 return;
569 }
570 mg = &kerninfo->kmaps;
571 } else {
572 /*
573 * 'u' means guest os user space.
574 * TODO: We don't support guest user space. Might support late.
575 */
576 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
577 al->level = 'u';
578 else
579 al->level = 'H';
580 al->map = NULL;
581
582 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
583 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
584 !perf_guest)
585 al->filtered = true;
586 if ((cpumode == PERF_RECORD_MISC_USER ||
587 cpumode == PERF_RECORD_MISC_KERNEL) &&
588 !perf_host)
589 al->filtered = true;
590
591 return;
592 }
593 try_again:
594 al->map = map_groups__find(mg, type, al->addr);
595 if (al->map == NULL) {
596 /*
597 * If this is outside of all known maps, and is a negative
598 * address, try to look it up in the kernel dso, as it might be
599 * a vsyscall or vdso (which executes in user-mode).
600 *
601 * XXX This is nasty, we should have a symbol list in the
602 * "[vdso]" dso, but for now lets use the old trick of looking
603 * in the whole kernel symbol list.
604 */
605 if ((long long)al->addr < 0 &&
606 cpumode == PERF_RECORD_MISC_KERNEL &&
607 kerninfo &&
608 mg != &kerninfo->kmaps) {
609 mg = &kerninfo->kmaps;
610 goto try_again;
611 }
612 } else
613 al->addr = al->map->map_ip(al->map, al->addr);
614 }
615
616 void thread__find_addr_location(struct thread *self,
617 struct perf_session *session, u8 cpumode,
618 enum map_type type, pid_t pid, u64 addr,
619 struct addr_location *al,
620 symbol_filter_t filter)
621 {
622 thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
623 if (al->map != NULL)
624 al->sym = map__find_symbol(al->map, al->addr, filter);
625 else
626 al->sym = NULL;
627 }
628
629 static void dso__calc_col_width(struct dso *self)
630 {
631 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
632 (!symbol_conf.dso_list ||
633 strlist__has_entry(symbol_conf.dso_list, self->name))) {
634 unsigned int slen = strlen(self->name);
635 if (slen > dsos__col_width)
636 dsos__col_width = slen;
637 }
638
639 self->slen_calculated = 1;
640 }
641
642 int event__preprocess_sample(const event_t *self, struct perf_session *session,
643 struct addr_location *al, symbol_filter_t filter)
644 {
645 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
646 struct thread *thread = perf_session__findnew(session, self->ip.pid);
647
648 if (thread == NULL)
649 return -1;
650
651 if (symbol_conf.comm_list &&
652 !strlist__has_entry(symbol_conf.comm_list, thread->comm))
653 goto out_filtered;
654
655 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
656
657 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
658 self->ip.pid, self->ip.ip, al);
659 dump_printf(" ...... dso: %s\n",
660 al->map ? al->map->dso->long_name :
661 al->level == 'H' ? "[hypervisor]" : "<not found>");
662 al->sym = NULL;
663
664 if (al->map) {
665 if (symbol_conf.dso_list &&
666 (!al->map || !al->map->dso ||
667 !(strlist__has_entry(symbol_conf.dso_list,
668 al->map->dso->short_name) ||
669 (al->map->dso->short_name != al->map->dso->long_name &&
670 strlist__has_entry(symbol_conf.dso_list,
671 al->map->dso->long_name)))))
672 goto out_filtered;
673 /*
674 * We have to do this here as we may have a dso with no symbol
675 * hit that has a name longer than the ones with symbols
676 * sampled.
677 */
678 if (!sort_dso.elide && !al->map->dso->slen_calculated)
679 dso__calc_col_width(al->map->dso);
680
681 al->sym = map__find_symbol(al->map, al->addr, filter);
682 }
683
684 if (symbol_conf.sym_list && al->sym &&
685 !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
686 goto out_filtered;
687
688 return 0;
689
690 out_filtered:
691 al->filtered = true;
692 return 0;
693 }
694
695 int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
696 {
697 u64 *array = event->sample.array;
698
699 if (type & PERF_SAMPLE_IP) {
700 data->ip = event->ip.ip;
701 array++;
702 }
703
704 if (type & PERF_SAMPLE_TID) {
705 u32 *p = (u32 *)array;
706 data->pid = p[0];
707 data->tid = p[1];
708 array++;
709 }
710
711 if (type & PERF_SAMPLE_TIME) {
712 data->time = *array;
713 array++;
714 }
715
716 if (type & PERF_SAMPLE_ADDR) {
717 data->addr = *array;
718 array++;
719 }
720
721 if (type & PERF_SAMPLE_ID) {
722 data->id = *array;
723 array++;
724 }
725
726 if (type & PERF_SAMPLE_STREAM_ID) {
727 data->stream_id = *array;
728 array++;
729 }
730
731 if (type & PERF_SAMPLE_CPU) {
732 u32 *p = (u32 *)array;
733 data->cpu = *p;
734 array++;
735 }
736
737 if (type & PERF_SAMPLE_PERIOD) {
738 data->period = *array;
739 array++;
740 }
741
742 if (type & PERF_SAMPLE_READ) {
743 pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
744 return -1;
745 }
746
747 if (type & PERF_SAMPLE_CALLCHAIN) {
748 data->callchain = (struct ip_callchain *)array;
749 array += 1 + data->callchain->nr;
750 }
751
752 if (type & PERF_SAMPLE_RAW) {
753 u32 *p = (u32 *)array;
754 data->raw_size = *p;
755 p++;
756 data->raw_data = p;
757 }
758
759 return 0;
760 }