]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - tools/perf/builtin-kvm.c
perf kvm: Use defines of kvm events
[mirror_ubuntu-hirsute-kernel.git] / tools / perf / builtin-kvm.c
CommitLineData
a1645ce1
ZY
1#include "builtin.h"
2#include "perf.h"
3
bcf6edcd 4#include "util/evsel.h"
1afe1d14 5#include "util/evlist.h"
a1645ce1
ZY
6#include "util/util.h"
7#include "util/cache.h"
8#include "util/symbol.h"
9#include "util/thread.h"
10#include "util/header.h"
11#include "util/session.h"
2e73f00f 12#include "util/intlist.h"
a1645ce1
ZY
13#include "util/parse-options.h"
14#include "util/trace-event.h"
a1645ce1 15#include "util/debug.h"
553873e1 16#include <api/fs/debugfs.h>
bcf6edcd
XG
17#include "util/tool.h"
18#include "util/stat.h"
1afe1d14 19#include "util/top.h"
f5fc1412 20#include "util/data.h"
a1645ce1
ZY
21
22#include <sys/prctl.h>
87419c9a 23#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14 24#include <sys/timerfd.h>
87419c9a 25#endif
a1645ce1 26
1afe1d14 27#include <termios.h>
a1645ce1
ZY
28#include <semaphore.h>
29#include <pthread.h>
30#include <math.h>
31
da50ad69 32#ifdef HAVE_KVM_STAT_SUPPORT
44b38021 33#include <asm/kvm_perf.h>
bcf6edcd
XG
34
35struct event_key {
36 #define INVALID_KEY (~0ULL)
37 u64 key;
38 int info;
39};
40
de332ac4
DA
41struct kvm_event_stats {
42 u64 time;
43 struct stats stats;
44};
45
46struct kvm_event {
47 struct list_head hash_entry;
48 struct rb_node rb;
49
50 struct event_key key;
51
52 struct kvm_event_stats total;
53
54 #define DEFAULT_VCPU_NUM 8
55 int max_vcpu;
56 struct kvm_event_stats *vcpu;
57};
58
59typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
60
61struct kvm_event_key {
62 const char *name;
63 key_cmp_fun key;
64};
65
66
3786063a 67struct perf_kvm_stat;
de332ac4 68
bcf6edcd 69struct kvm_events_ops {
14907e73
ACM
70 bool (*is_begin_event)(struct perf_evsel *evsel,
71 struct perf_sample *sample,
72 struct event_key *key);
73 bool (*is_end_event)(struct perf_evsel *evsel,
74 struct perf_sample *sample, struct event_key *key);
3786063a 75 void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
44b38021 76 char *decode);
bcf6edcd
XG
77 const char *name;
78};
79
de332ac4
DA
80struct exit_reasons_table {
81 unsigned long exit_code;
82 const char *reason;
83};
84
85#define EVENTS_BITS 12
86#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
87
3786063a 88struct perf_kvm_stat {
de332ac4 89 struct perf_tool tool;
b4006796 90 struct record_opts opts;
1afe1d14 91 struct perf_evlist *evlist;
de332ac4
DA
92 struct perf_session *session;
93
94 const char *file_name;
95 const char *report_event;
96 const char *sort_key;
97 int trace_vcpu;
98
99 struct exit_reasons_table *exit_reasons;
de332ac4
DA
100 const char *exit_reasons_isa;
101
102 struct kvm_events_ops *events_ops;
103 key_cmp_fun compare;
104 struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
1afe1d14 105
de332ac4
DA
106 u64 total_time;
107 u64 total_count;
1afe1d14 108 u64 lost_events;
70f7b4a7 109 u64 duration;
de332ac4 110
2e73f00f
DA
111 const char *pid_str;
112 struct intlist *pid_list;
113
de332ac4 114 struct rb_root result;
1afe1d14
DA
115
116 int timerfd;
117 unsigned int display_time;
118 bool live;
de332ac4
DA
119};
120
121
14907e73
ACM
122static void exit_event_get_key(struct perf_evsel *evsel,
123 struct perf_sample *sample,
124 struct event_key *key)
bcf6edcd
XG
125{
126 key->info = 0;
44b38021 127 key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON);
bcf6edcd
XG
128}
129
14907e73 130static bool kvm_exit_event(struct perf_evsel *evsel)
bcf6edcd 131{
44b38021 132 return !strcmp(evsel->name, KVM_EXIT_TRACE);
bcf6edcd
XG
133}
134
14907e73
ACM
135static bool exit_event_begin(struct perf_evsel *evsel,
136 struct perf_sample *sample, struct event_key *key)
bcf6edcd 137{
14907e73
ACM
138 if (kvm_exit_event(evsel)) {
139 exit_event_get_key(evsel, sample, key);
bcf6edcd
XG
140 return true;
141 }
142
143 return false;
144}
145
14907e73 146static bool kvm_entry_event(struct perf_evsel *evsel)
bcf6edcd 147{
44b38021 148 return !strcmp(evsel->name, KVM_ENTRY_TRACE);
bcf6edcd
XG
149}
150
14907e73
ACM
151static bool exit_event_end(struct perf_evsel *evsel,
152 struct perf_sample *sample __maybe_unused,
153 struct event_key *key __maybe_unused)
bcf6edcd 154{
14907e73 155 return kvm_entry_event(evsel);
bcf6edcd
XG
156}
157
df74c13b
AY
158#define define_exit_reasons_table(name, symbols) \
159 static struct exit_reasons_table name[] = { \
160 symbols, { -1, NULL } \
161 }
bcf6edcd 162
df74c13b
AY
163define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
164define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
bcf6edcd 165
df74c13b
AY
166static const char *get_exit_reason(struct perf_kvm_stat *kvm,
167 struct exit_reasons_table *tbl,
168 u64 exit_code)
bcf6edcd 169{
df74c13b 170 while (tbl->reason != NULL) {
de332ac4
DA
171 if (tbl->exit_code == exit_code)
172 return tbl->reason;
173 tbl++;
bcf6edcd
XG
174 }
175
176 pr_err("unknown kvm exit code:%lld on %s\n",
de332ac4 177 (unsigned long long)exit_code, kvm->exit_reasons_isa);
bcf6edcd
XG
178 return "UNKNOWN";
179}
180
3786063a 181static void exit_event_decode_key(struct perf_kvm_stat *kvm,
de332ac4 182 struct event_key *key,
44b38021 183 char *decode)
bcf6edcd 184{
df74c13b
AY
185 const char *exit_reason = get_exit_reason(kvm, kvm->exit_reasons,
186 key->key);
bcf6edcd 187
44b38021 188 scnprintf(decode, DECODE_STR_LEN, "%s", exit_reason);
bcf6edcd
XG
189}
190
191static struct kvm_events_ops exit_events = {
192 .is_begin_event = exit_event_begin,
193 .is_end_event = exit_event_end,
194 .decode_key = exit_event_decode_key,
195 .name = "VM-EXIT"
196};
197
de332ac4
DA
198/*
199 * For the mmio events, we treat:
200 * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
201 * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
202 */
14907e73
ACM
203static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
204 struct event_key *key)
bcf6edcd 205{
14907e73
ACM
206 key->key = perf_evsel__intval(evsel, sample, "gpa");
207 key->info = perf_evsel__intval(evsel, sample, "type");
bcf6edcd
XG
208}
209
210#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
211#define KVM_TRACE_MMIO_READ 1
212#define KVM_TRACE_MMIO_WRITE 2
213
14907e73
ACM
214static bool mmio_event_begin(struct perf_evsel *evsel,
215 struct perf_sample *sample, struct event_key *key)
bcf6edcd
XG
216{
217 /* MMIO read begin event in kernel. */
14907e73 218 if (kvm_exit_event(evsel))
bcf6edcd
XG
219 return true;
220
221 /* MMIO write begin event in kernel. */
14907e73
ACM
222 if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
223 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
224 mmio_event_get_key(evsel, sample, key);
bcf6edcd
XG
225 return true;
226 }
227
228 return false;
229}
230
14907e73
ACM
231static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
232 struct event_key *key)
bcf6edcd
XG
233{
234 /* MMIO write end event in kernel. */
14907e73 235 if (kvm_entry_event(evsel))
bcf6edcd
XG
236 return true;
237
238 /* MMIO read end event in kernel.*/
14907e73
ACM
239 if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
240 perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
241 mmio_event_get_key(evsel, sample, key);
bcf6edcd
XG
242 return true;
243 }
244
245 return false;
246}
247
3786063a 248static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
de332ac4 249 struct event_key *key,
44b38021 250 char *decode)
bcf6edcd 251{
44b38021 252 scnprintf(decode, DECODE_STR_LEN, "%#lx:%s", (unsigned long)key->key,
bcf6edcd
XG
253 key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
254}
255
256static struct kvm_events_ops mmio_events = {
257 .is_begin_event = mmio_event_begin,
258 .is_end_event = mmio_event_end,
259 .decode_key = mmio_event_decode_key,
260 .name = "MMIO Access"
261};
262
263 /* The time of emulation pio access is from kvm_pio to kvm_entry. */
14907e73
ACM
264static void ioport_event_get_key(struct perf_evsel *evsel,
265 struct perf_sample *sample,
266 struct event_key *key)
bcf6edcd 267{
14907e73
ACM
268 key->key = perf_evsel__intval(evsel, sample, "port");
269 key->info = perf_evsel__intval(evsel, sample, "rw");
bcf6edcd
XG
270}
271
14907e73
ACM
272static bool ioport_event_begin(struct perf_evsel *evsel,
273 struct perf_sample *sample,
274 struct event_key *key)
bcf6edcd 275{
14907e73
ACM
276 if (!strcmp(evsel->name, "kvm:kvm_pio")) {
277 ioport_event_get_key(evsel, sample, key);
bcf6edcd
XG
278 return true;
279 }
280
281 return false;
282}
283
14907e73
ACM
284static bool ioport_event_end(struct perf_evsel *evsel,
285 struct perf_sample *sample __maybe_unused,
bcf6edcd
XG
286 struct event_key *key __maybe_unused)
287{
14907e73 288 return kvm_entry_event(evsel);
bcf6edcd
XG
289}
290
3786063a 291static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
de332ac4 292 struct event_key *key,
44b38021 293 char *decode)
bcf6edcd 294{
44b38021 295 scnprintf(decode, DECODE_STR_LEN, "%#llx:%s", (unsigned long long)key->key,
bcf6edcd
XG
296 key->info ? "POUT" : "PIN");
297}
298
299static struct kvm_events_ops ioport_events = {
300 .is_begin_event = ioport_event_begin,
301 .is_end_event = ioport_event_end,
302 .decode_key = ioport_event_decode_key,
303 .name = "IO Port Access"
304};
305
3786063a 306static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
bcf6edcd
XG
307{
308 bool ret = true;
309
de332ac4
DA
310 if (!strcmp(kvm->report_event, "vmexit"))
311 kvm->events_ops = &exit_events;
312 else if (!strcmp(kvm->report_event, "mmio"))
313 kvm->events_ops = &mmio_events;
314 else if (!strcmp(kvm->report_event, "ioport"))
315 kvm->events_ops = &ioport_events;
bcf6edcd 316 else {
de332ac4 317 pr_err("Unknown report event:%s\n", kvm->report_event);
bcf6edcd
XG
318 ret = false;
319 }
320
321 return ret;
322}
323
bcf6edcd
XG
324struct vcpu_event_record {
325 int vcpu_id;
326 u64 start_time;
327 struct kvm_event *last_event;
328};
329
bcf6edcd 330
3786063a 331static void init_kvm_event_record(struct perf_kvm_stat *kvm)
bcf6edcd 332{
b880deea 333 unsigned int i;
bcf6edcd 334
b880deea 335 for (i = 0; i < EVENTS_CACHE_SIZE; i++)
de332ac4 336 INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
bcf6edcd
XG
337}
338
87419c9a 339#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14
DA
340static void clear_events_cache_stats(struct list_head *kvm_events_cache)
341{
342 struct list_head *head;
343 struct kvm_event *event;
344 unsigned int i;
62d04dbf 345 int j;
1afe1d14
DA
346
347 for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
348 head = &kvm_events_cache[i];
349 list_for_each_entry(event, head, hash_entry) {
350 /* reset stats for event */
62d04dbf
DA
351 event->total.time = 0;
352 init_stats(&event->total.stats);
353
354 for (j = 0; j < event->max_vcpu; ++j) {
355 event->vcpu[j].time = 0;
356 init_stats(&event->vcpu[j].stats);
357 }
1afe1d14
DA
358 }
359 }
360}
87419c9a 361#endif
1afe1d14 362
bcf6edcd
XG
363static int kvm_events_hash_fn(u64 key)
364{
365 return key & (EVENTS_CACHE_SIZE - 1);
366}
367
368static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
369{
370 int old_max_vcpu = event->max_vcpu;
6ca5f308 371 void *prev;
bcf6edcd
XG
372
373 if (vcpu_id < event->max_vcpu)
374 return true;
375
376 while (event->max_vcpu <= vcpu_id)
377 event->max_vcpu += DEFAULT_VCPU_NUM;
378
6ca5f308 379 prev = event->vcpu;
bcf6edcd
XG
380 event->vcpu = realloc(event->vcpu,
381 event->max_vcpu * sizeof(*event->vcpu));
382 if (!event->vcpu) {
6ca5f308 383 free(prev);
bcf6edcd
XG
384 pr_err("Not enough memory\n");
385 return false;
386 }
387
388 memset(event->vcpu + old_max_vcpu, 0,
389 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
390 return true;
391}
392
393static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
394{
395 struct kvm_event *event;
396
397 event = zalloc(sizeof(*event));
398 if (!event) {
399 pr_err("Not enough memory\n");
400 return NULL;
401 }
402
403 event->key = *key;
acb61fc8 404 init_stats(&event->total.stats);
bcf6edcd
XG
405 return event;
406}
407
3786063a 408static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
de332ac4 409 struct event_key *key)
bcf6edcd
XG
410{
411 struct kvm_event *event;
412 struct list_head *head;
413
414 BUG_ON(key->key == INVALID_KEY);
415
de332ac4 416 head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
355afe81 417 list_for_each_entry(event, head, hash_entry) {
bcf6edcd
XG
418 if (event->key.key == key->key && event->key.info == key->info)
419 return event;
355afe81 420 }
bcf6edcd
XG
421
422 event = kvm_alloc_init_event(key);
423 if (!event)
424 return NULL;
425
426 list_add(&event->hash_entry, head);
427 return event;
428}
429
3786063a 430static bool handle_begin_event(struct perf_kvm_stat *kvm,
de332ac4 431 struct vcpu_event_record *vcpu_record,
bcf6edcd
XG
432 struct event_key *key, u64 timestamp)
433{
434 struct kvm_event *event = NULL;
435
436 if (key->key != INVALID_KEY)
de332ac4 437 event = find_create_kvm_event(kvm, key);
bcf6edcd
XG
438
439 vcpu_record->last_event = event;
440 vcpu_record->start_time = timestamp;
441 return true;
442}
443
444static void
445kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
446{
447 kvm_stats->time += time_diff;
448 update_stats(&kvm_stats->stats, time_diff);
449}
450
451static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
452{
453 struct kvm_event_stats *kvm_stats = &event->total;
454
455 if (vcpu_id != -1)
456 kvm_stats = &event->vcpu[vcpu_id];
457
458 return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
459 avg_stats(&kvm_stats->stats));
460}
461
462static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
463 u64 time_diff)
464{
2aa8eab0
DA
465 if (vcpu_id == -1) {
466 kvm_update_event_stats(&event->total, time_diff);
467 return true;
468 }
bcf6edcd
XG
469
470 if (!kvm_event_expand(event, vcpu_id))
471 return false;
472
473 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
474 return true;
475}
476
3786063a 477static bool handle_end_event(struct perf_kvm_stat *kvm,
de332ac4
DA
478 struct vcpu_event_record *vcpu_record,
479 struct event_key *key,
70f7b4a7 480 struct perf_sample *sample)
bcf6edcd
XG
481{
482 struct kvm_event *event;
483 u64 time_begin, time_diff;
2aa8eab0
DA
484 int vcpu;
485
486 if (kvm->trace_vcpu == -1)
487 vcpu = -1;
488 else
489 vcpu = vcpu_record->vcpu_id;
bcf6edcd
XG
490
491 event = vcpu_record->last_event;
492 time_begin = vcpu_record->start_time;
493
494 /* The begin event is not caught. */
495 if (!time_begin)
496 return true;
497
498 /*
499 * In some case, the 'begin event' only records the start timestamp,
500 * the actual event is recognized in the 'end event' (e.g. mmio-event).
501 */
502
503 /* Both begin and end events did not get the key. */
504 if (!event && key->key == INVALID_KEY)
505 return true;
506
507 if (!event)
de332ac4 508 event = find_create_kvm_event(kvm, key);
bcf6edcd
XG
509
510 if (!event)
511 return false;
512
513 vcpu_record->last_event = NULL;
514 vcpu_record->start_time = 0;
515
1afe1d14 516 /* seems to happen once in a while during live mode */
70f7b4a7 517 if (sample->time < time_begin) {
1afe1d14
DA
518 pr_debug("End time before begin time; skipping event.\n");
519 return true;
520 }
bcf6edcd 521
70f7b4a7
DA
522 time_diff = sample->time - time_begin;
523
524 if (kvm->duration && time_diff > kvm->duration) {
44b38021 525 char decode[DECODE_STR_LEN];
70f7b4a7
DA
526
527 kvm->events_ops->decode_key(kvm, &event->key, decode);
528 if (strcmp(decode, "HLT")) {
529 pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
530 sample->time, sample->pid, vcpu_record->vcpu_id,
531 decode, time_diff/1000);
532 }
533 }
534
2aa8eab0 535 return update_kvm_event(event, vcpu, time_diff);
bcf6edcd
XG
536}
537
14907e73
ACM
538static
539struct vcpu_event_record *per_vcpu_record(struct thread *thread,
540 struct perf_evsel *evsel,
541 struct perf_sample *sample)
bcf6edcd
XG
542{
543 /* Only kvm_entry records vcpu id. */
14907e73 544 if (!thread->priv && kvm_entry_event(evsel)) {
bcf6edcd
XG
545 struct vcpu_event_record *vcpu_record;
546
14907e73 547 vcpu_record = zalloc(sizeof(*vcpu_record));
bcf6edcd 548 if (!vcpu_record) {
14907e73 549 pr_err("%s: Not enough memory\n", __func__);
bcf6edcd
XG
550 return NULL;
551 }
552
44b38021 553 vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, VCPU_ID);
bcf6edcd
XG
554 thread->priv = vcpu_record;
555 }
556
14907e73 557 return thread->priv;
bcf6edcd
XG
558}
559
3786063a 560static bool handle_kvm_event(struct perf_kvm_stat *kvm,
de332ac4
DA
561 struct thread *thread,
562 struct perf_evsel *evsel,
14907e73 563 struct perf_sample *sample)
bcf6edcd
XG
564{
565 struct vcpu_event_record *vcpu_record;
566 struct event_key key = {.key = INVALID_KEY};
567
14907e73 568 vcpu_record = per_vcpu_record(thread, evsel, sample);
bcf6edcd
XG
569 if (!vcpu_record)
570 return true;
571
2aa8eab0
DA
572 /* only process events for vcpus user cares about */
573 if ((kvm->trace_vcpu != -1) &&
574 (kvm->trace_vcpu != vcpu_record->vcpu_id))
575 return true;
576
de332ac4
DA
577 if (kvm->events_ops->is_begin_event(evsel, sample, &key))
578 return handle_begin_event(kvm, vcpu_record, &key, sample->time);
bcf6edcd 579
de332ac4 580 if (kvm->events_ops->is_end_event(evsel, sample, &key))
70f7b4a7 581 return handle_end_event(kvm, vcpu_record, &key, sample);
bcf6edcd
XG
582
583 return true;
584}
585
bcf6edcd
XG
586#define GET_EVENT_KEY(func, field) \
587static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
588{ \
589 if (vcpu == -1) \
590 return event->total.field; \
591 \
592 if (vcpu >= event->max_vcpu) \
593 return 0; \
594 \
595 return event->vcpu[vcpu].field; \
596}
597
598#define COMPARE_EVENT_KEY(func, field) \
599GET_EVENT_KEY(func, field) \
600static int compare_kvm_event_ ## func(struct kvm_event *one, \
601 struct kvm_event *two, int vcpu)\
602{ \
603 return get_event_ ##func(one, vcpu) > \
604 get_event_ ##func(two, vcpu); \
605}
606
607GET_EVENT_KEY(time, time);
608COMPARE_EVENT_KEY(count, stats.n);
609COMPARE_EVENT_KEY(mean, stats.mean);
62d04dbf
DA
610GET_EVENT_KEY(max, stats.max);
611GET_EVENT_KEY(min, stats.min);
bcf6edcd
XG
612
613#define DEF_SORT_NAME_KEY(name, compare_key) \
614 { #name, compare_kvm_event_ ## compare_key }
615
616static struct kvm_event_key keys[] = {
617 DEF_SORT_NAME_KEY(sample, count),
618 DEF_SORT_NAME_KEY(time, mean),
619 { NULL, NULL }
620};
621
3786063a 622static bool select_key(struct perf_kvm_stat *kvm)
bcf6edcd
XG
623{
624 int i;
625
626 for (i = 0; keys[i].name; i++) {
de332ac4
DA
627 if (!strcmp(keys[i].name, kvm->sort_key)) {
628 kvm->compare = keys[i].key;
bcf6edcd
XG
629 return true;
630 }
631 }
632
de332ac4 633 pr_err("Unknown compare key:%s\n", kvm->sort_key);
bcf6edcd
XG
634 return false;
635}
636
de332ac4
DA
637static void insert_to_result(struct rb_root *result, struct kvm_event *event,
638 key_cmp_fun bigger, int vcpu)
bcf6edcd 639{
de332ac4 640 struct rb_node **rb = &result->rb_node;
bcf6edcd
XG
641 struct rb_node *parent = NULL;
642 struct kvm_event *p;
643
644 while (*rb) {
645 p = container_of(*rb, struct kvm_event, rb);
646 parent = *rb;
647
648 if (bigger(event, p, vcpu))
649 rb = &(*rb)->rb_left;
650 else
651 rb = &(*rb)->rb_right;
652 }
653
654 rb_link_node(&event->rb, parent, rb);
de332ac4 655 rb_insert_color(&event->rb, result);
bcf6edcd
XG
656}
657
3786063a
XG
658static void
659update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
bcf6edcd 660{
de332ac4
DA
661 int vcpu = kvm->trace_vcpu;
662
663 kvm->total_count += get_event_count(event, vcpu);
664 kvm->total_time += get_event_time(event, vcpu);
bcf6edcd
XG
665}
666
667static bool event_is_valid(struct kvm_event *event, int vcpu)
668{
669 return !!get_event_count(event, vcpu);
670}
671
3786063a 672static void sort_result(struct perf_kvm_stat *kvm)
bcf6edcd
XG
673{
674 unsigned int i;
de332ac4 675 int vcpu = kvm->trace_vcpu;
bcf6edcd
XG
676 struct kvm_event *event;
677
355afe81
DA
678 for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
679 list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
bcf6edcd 680 if (event_is_valid(event, vcpu)) {
de332ac4
DA
681 update_total_count(kvm, event);
682 insert_to_result(&kvm->result, event,
683 kvm->compare, vcpu);
bcf6edcd 684 }
355afe81
DA
685 }
686 }
bcf6edcd
XG
687}
688
689/* returns left most element of result, and erase it */
de332ac4 690static struct kvm_event *pop_from_result(struct rb_root *result)
bcf6edcd 691{
de332ac4 692 struct rb_node *node = rb_first(result);
bcf6edcd
XG
693
694 if (!node)
695 return NULL;
696
de332ac4 697 rb_erase(node, result);
bcf6edcd
XG
698 return container_of(node, struct kvm_event, rb);
699}
700
1afe1d14 701static void print_vcpu_info(struct perf_kvm_stat *kvm)
bcf6edcd 702{
1afe1d14
DA
703 int vcpu = kvm->trace_vcpu;
704
bcf6edcd
XG
705 pr_info("Analyze events for ");
706
1afe1d14
DA
707 if (kvm->live) {
708 if (kvm->opts.target.system_wide)
709 pr_info("all VMs, ");
710 else if (kvm->opts.target.pid)
711 pr_info("pid(s) %s, ", kvm->opts.target.pid);
712 else
713 pr_info("dazed and confused on what is monitored, ");
714 }
715
bcf6edcd
XG
716 if (vcpu == -1)
717 pr_info("all VCPUs:\n\n");
718 else
719 pr_info("VCPU %d:\n\n", vcpu);
720}
721
1afe1d14
DA
722static void show_timeofday(void)
723{
724 char date[64];
725 struct timeval tv;
726 struct tm ltime;
727
728 gettimeofday(&tv, NULL);
729 if (localtime_r(&tv.tv_sec, &ltime)) {
730 strftime(date, sizeof(date), "%H:%M:%S", &ltime);
731 pr_info("%s.%06ld", date, tv.tv_usec);
732 } else
733 pr_info("00:00:00.000000");
734
735 return;
736}
737
3786063a 738static void print_result(struct perf_kvm_stat *kvm)
bcf6edcd 739{
44b38021 740 char decode[DECODE_STR_LEN];
bcf6edcd 741 struct kvm_event *event;
de332ac4 742 int vcpu = kvm->trace_vcpu;
bcf6edcd 743
1afe1d14
DA
744 if (kvm->live) {
745 puts(CONSOLE_CLEAR);
746 show_timeofday();
747 }
748
bcf6edcd 749 pr_info("\n\n");
1afe1d14 750 print_vcpu_info(kvm);
44b38021 751 pr_info("%*s ", DECODE_STR_LEN, kvm->events_ops->name);
bcf6edcd
XG
752 pr_info("%10s ", "Samples");
753 pr_info("%9s ", "Samples%");
754
755 pr_info("%9s ", "Time%");
62d04dbf
DA
756 pr_info("%10s ", "Min Time");
757 pr_info("%10s ", "Max Time");
bcf6edcd
XG
758 pr_info("%16s ", "Avg time");
759 pr_info("\n\n");
760
de332ac4 761 while ((event = pop_from_result(&kvm->result))) {
62d04dbf 762 u64 ecount, etime, max, min;
bcf6edcd
XG
763
764 ecount = get_event_count(event, vcpu);
765 etime = get_event_time(event, vcpu);
62d04dbf
DA
766 max = get_event_max(event, vcpu);
767 min = get_event_min(event, vcpu);
bcf6edcd 768
de332ac4 769 kvm->events_ops->decode_key(kvm, &event->key, decode);
44b38021 770 pr_info("%*s ", DECODE_STR_LEN, decode);
bcf6edcd 771 pr_info("%10llu ", (unsigned long long)ecount);
de332ac4
DA
772 pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
773 pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
62d04dbf
DA
774 pr_info("%8" PRIu64 "us ", min / 1000);
775 pr_info("%8" PRIu64 "us ", max / 1000);
bcf6edcd
XG
776 pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
777 kvm_event_rel_stddev(vcpu, event));
778 pr_info("\n");
779 }
780
e4f7637f
DA
781 pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
782 kvm->total_count, kvm->total_time / 1e3);
1afe1d14
DA
783
784 if (kvm->lost_events)
785 pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
786}
787
87419c9a 788#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14
DA
789static int process_lost_event(struct perf_tool *tool,
790 union perf_event *event __maybe_unused,
791 struct perf_sample *sample __maybe_unused,
792 struct machine *machine __maybe_unused)
793{
794 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
795
796 kvm->lost_events++;
797 return 0;
bcf6edcd 798}
87419c9a 799#endif
bcf6edcd 800
2e73f00f
DA
801static bool skip_sample(struct perf_kvm_stat *kvm,
802 struct perf_sample *sample)
803{
804 if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
805 return true;
806
807 return false;
808}
809
de332ac4 810static int process_sample_event(struct perf_tool *tool,
bcf6edcd
XG
811 union perf_event *event,
812 struct perf_sample *sample,
813 struct perf_evsel *evsel,
814 struct machine *machine)
815{
2e73f00f 816 struct thread *thread;
3786063a
XG
817 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
818 tool);
bcf6edcd 819
2e73f00f
DA
820 if (skip_sample(kvm, sample))
821 return 0;
822
314add6b 823 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
bcf6edcd
XG
824 if (thread == NULL) {
825 pr_debug("problem processing %d event, skipping it.\n",
826 event->header.type);
827 return -1;
828 }
829
de332ac4 830 if (!handle_kvm_event(kvm, thread, evsel, sample))
bcf6edcd
XG
831 return -1;
832
833 return 0;
834}
835
65c647a6
AY
836static int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
837{
838 if (strstr(cpuid, "Intel")) {
839 kvm->exit_reasons = vmx_exit_reasons;
840 kvm->exit_reasons_isa = "VMX";
841 } else if (strstr(cpuid, "AMD")) {
842 kvm->exit_reasons = svm_exit_reasons;
843 kvm->exit_reasons_isa = "SVM";
844 } else
845 return -ENOTSUP;
846
847 return 0;
848}
849
1afe1d14 850static int cpu_isa_config(struct perf_kvm_stat *kvm)
bcf6edcd 851{
1afe1d14 852 char buf[64], *cpuid;
65c647a6 853 int err;
1afe1d14
DA
854
855 if (kvm->live) {
856 err = get_cpuid(buf, sizeof(buf));
857 if (err != 0) {
65c647a6 858 pr_err("Failed to look up CPU type\n");
1afe1d14
DA
859 return err;
860 }
861 cpuid = buf;
862 } else
863 cpuid = kvm->session->header.env.cpuid;
bcf6edcd 864
65c647a6
AY
865 if (!cpuid) {
866 pr_err("Failed to look up CPU type\n");
867 return -EINVAL;
1afe1d14
DA
868 }
869
65c647a6
AY
870 err = cpu_isa_init(kvm, cpuid);
871 if (err == -ENOTSUP)
872 pr_err("CPU %s is not supported.\n", cpuid);
1afe1d14 873
65c647a6 874 return err;
1afe1d14
DA
875}
876
877static bool verify_vcpu(int vcpu)
878{
879 if (vcpu != -1 && vcpu < 0) {
880 pr_err("Invalid vcpu:%d.\n", vcpu);
881 return false;
882 }
883
884 return true;
885}
886
87419c9a 887#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14
DA
888/* keeping the max events to a modest level to keep
889 * the processing of samples per mmap smooth.
890 */
891#define PERF_KVM__MAX_EVENTS_PER_MMAP 25
892
893static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
894 u64 *mmap_time)
895{
896 union perf_event *event;
897 struct perf_sample sample;
898 s64 n = 0;
899 int err;
900
901 *mmap_time = ULLONG_MAX;
902 while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
903 err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
904 if (err) {
8e50d384 905 perf_evlist__mmap_consume(kvm->evlist, idx);
1afe1d14
DA
906 pr_err("Failed to parse sample\n");
907 return -1;
908 }
909
910 err = perf_session_queue_event(kvm->session, event, &sample, 0);
8e50d384
ZZ
911 /*
912 * FIXME: Here we can't consume the event, as perf_session_queue_event will
913 * point to it, and it'll get possibly overwritten by the kernel.
914 */
915 perf_evlist__mmap_consume(kvm->evlist, idx);
916
1afe1d14
DA
917 if (err) {
918 pr_err("Failed to enqueue sample: %d\n", err);
919 return -1;
920 }
921
922 /* save time stamp of our first sample for this mmap */
923 if (n == 0)
924 *mmap_time = sample.time;
925
926 /* limit events per mmap handled all at once */
927 n++;
928 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
929 break;
930 }
931
932 return n;
933}
934
935static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
936{
937 int i, err, throttled = 0;
938 s64 n, ntotal = 0;
939 u64 flush_time = ULLONG_MAX, mmap_time;
940
941 for (i = 0; i < kvm->evlist->nr_mmaps; i++) {
942 n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
943 if (n < 0)
944 return -1;
945
946 /* flush time is going to be the minimum of all the individual
947 * mmap times. Essentially, we flush all the samples queued up
948 * from the last pass under our minimal start time -- that leaves
949 * a very small race for samples to come in with a lower timestamp.
950 * The ioctl to return the perf_clock timestamp should close the
951 * race entirely.
952 */
953 if (mmap_time < flush_time)
954 flush_time = mmap_time;
955
956 ntotal += n;
957 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
958 throttled = 1;
959 }
960
961 /* flush queue after each round in which we processed events */
962 if (ntotal) {
963 kvm->session->ordered_samples.next_flush = flush_time;
964 err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session);
965 if (err) {
966 if (kvm->lost_events)
967 pr_info("\nLost events: %" PRIu64 "\n\n",
968 kvm->lost_events);
969 return err;
970 }
971 }
972
973 return throttled;
974}
975
976static volatile int done;
977
978static void sig_handler(int sig __maybe_unused)
979{
980 done = 1;
981}
982
983static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
984{
985 struct itimerspec new_value;
986 int rc = -1;
987
988 kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
989 if (kvm->timerfd < 0) {
990 pr_err("timerfd_create failed\n");
991 goto out;
992 }
993
994 new_value.it_value.tv_sec = kvm->display_time;
995 new_value.it_value.tv_nsec = 0;
996 new_value.it_interval.tv_sec = kvm->display_time;
997 new_value.it_interval.tv_nsec = 0;
998
999 if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
1000 pr_err("timerfd_settime failed: %d\n", errno);
1001 close(kvm->timerfd);
1002 goto out;
1003 }
1004
1005 rc = 0;
1006out:
1007 return rc;
1008}
1009
1010static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
1011{
1012 uint64_t c;
1013 int rc;
1014
1015 rc = read(kvm->timerfd, &c, sizeof(uint64_t));
1016 if (rc < 0) {
1017 if (errno == EAGAIN)
1018 return 0;
1019
1020 pr_err("Failed to read timer fd: %d\n", errno);
1021 return -1;
1022 }
1023
1024 if (rc != sizeof(uint64_t)) {
1025 pr_err("Error reading timer fd - invalid size returned\n");
1026 return -1;
1027 }
1028
1029 if (c != 1)
1030 pr_debug("Missed timer beats: %" PRIu64 "\n", c-1);
1031
1032 /* update display */
1033 sort_result(kvm);
1034 print_result(kvm);
1035
1036 /* reset counts */
1037 clear_events_cache_stats(kvm->kvm_events_cache);
1038 kvm->total_count = 0;
1039 kvm->total_time = 0;
1040 kvm->lost_events = 0;
1041
1042 return 0;
1043}
1044
1045static int fd_set_nonblock(int fd)
1046{
1047 long arg = 0;
1048
1049 arg = fcntl(fd, F_GETFL);
1050 if (arg < 0) {
1051 pr_err("Failed to get current flags for fd %d\n", fd);
1052 return -1;
1053 }
1054
1055 if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) {
1056 pr_err("Failed to set non-block option on fd %d\n", fd);
1057 return -1;
1058 }
1059
1060 return 0;
1061}
1062
1063static
1064int perf_kvm__handle_stdin(struct termios *tc_now, struct termios *tc_save)
1065{
1066 int c;
1067
1068 tcsetattr(0, TCSANOW, tc_now);
1069 c = getc(stdin);
1070 tcsetattr(0, TCSAFLUSH, tc_save);
1071
1072 if (c == 'q')
1073 return 1;
1074
1075 return 0;
1076}
1077
1078static int kvm_events_live_report(struct perf_kvm_stat *kvm)
1079{
1080 struct pollfd *pollfds = NULL;
1081 int nr_fds, nr_stdin, ret, err = -EINVAL;
1082 struct termios tc, save;
1083
1084 /* live flag must be set first */
1085 kvm->live = true;
1086
1087 ret = cpu_isa_config(kvm);
1088 if (ret < 0)
1089 return ret;
1090
1091 if (!verify_vcpu(kvm->trace_vcpu) ||
1092 !select_key(kvm) ||
1093 !register_kvm_events_ops(kvm)) {
1094 goto out;
1095 }
1096
1097 init_kvm_event_record(kvm);
1098
1099 tcgetattr(0, &save);
1100 tc = save;
1101 tc.c_lflag &= ~(ICANON | ECHO);
1102 tc.c_cc[VMIN] = 0;
1103 tc.c_cc[VTIME] = 0;
1104
1105 signal(SIGINT, sig_handler);
1106 signal(SIGTERM, sig_handler);
1107
1108 /* copy pollfds -- need to add timerfd and stdin */
1109 nr_fds = kvm->evlist->nr_fds;
1110 pollfds = zalloc(sizeof(struct pollfd) * (nr_fds + 2));
1111 if (!pollfds) {
1112 err = -ENOMEM;
1113 goto out;
1114 }
1115 memcpy(pollfds, kvm->evlist->pollfd,
1116 sizeof(struct pollfd) * kvm->evlist->nr_fds);
1117
1118 /* add timer fd */
1119 if (perf_kvm__timerfd_create(kvm) < 0) {
1120 err = -1;
1121 goto out;
1122 }
1123
1124 pollfds[nr_fds].fd = kvm->timerfd;
1125 pollfds[nr_fds].events = POLLIN;
1126 nr_fds++;
1127
1128 pollfds[nr_fds].fd = fileno(stdin);
1129 pollfds[nr_fds].events = POLLIN;
1130 nr_stdin = nr_fds;
1131 nr_fds++;
1132 if (fd_set_nonblock(fileno(stdin)) != 0)
1133 goto out;
1134
1135 /* everything is good - enable the events and process */
1136 perf_evlist__enable(kvm->evlist);
1137
1138 while (!done) {
1139 int rc;
1140
1141 rc = perf_kvm__mmap_read(kvm);
1142 if (rc < 0)
1143 break;
1144
1145 err = perf_kvm__handle_timerfd(kvm);
1146 if (err)
1147 goto out;
1148
1149 if (pollfds[nr_stdin].revents & POLLIN)
1150 done = perf_kvm__handle_stdin(&tc, &save);
1151
1152 if (!rc && !done)
1153 err = poll(pollfds, nr_fds, 100);
1154 }
1155
1156 perf_evlist__disable(kvm->evlist);
1157
1158 if (err == 0) {
1159 sort_result(kvm);
1160 print_result(kvm);
1161 }
1162
1163out:
1164 if (kvm->timerfd >= 0)
1165 close(kvm->timerfd);
1166
f5385650 1167 free(pollfds);
1afe1d14
DA
1168 return err;
1169}
1170
1171static int kvm_live_open_events(struct perf_kvm_stat *kvm)
1172{
1173 int err, rc = -1;
1174 struct perf_evsel *pos;
1175 struct perf_evlist *evlist = kvm->evlist;
1176
1177 perf_evlist__config(evlist, &kvm->opts);
1178
1179 /*
1180 * Note: exclude_{guest,host} do not apply here.
1181 * This command processes KVM tracepoints from host only
1182 */
0050f7aa 1183 evlist__for_each(evlist, pos) {
1afe1d14
DA
1184 struct perf_event_attr *attr = &pos->attr;
1185
1186 /* make sure these *are* set */
e71aa283
AH
1187 perf_evsel__set_sample_bit(pos, TID);
1188 perf_evsel__set_sample_bit(pos, TIME);
1189 perf_evsel__set_sample_bit(pos, CPU);
1190 perf_evsel__set_sample_bit(pos, RAW);
1afe1d14 1191 /* make sure these are *not*; want as small a sample as possible */
e71aa283
AH
1192 perf_evsel__reset_sample_bit(pos, PERIOD);
1193 perf_evsel__reset_sample_bit(pos, IP);
1194 perf_evsel__reset_sample_bit(pos, CALLCHAIN);
1195 perf_evsel__reset_sample_bit(pos, ADDR);
1196 perf_evsel__reset_sample_bit(pos, READ);
1afe1d14
DA
1197 attr->mmap = 0;
1198 attr->comm = 0;
1199 attr->task = 0;
1200
1201 attr->sample_period = 1;
1202
1203 attr->watermark = 0;
1204 attr->wakeup_events = 1000;
1205
1206 /* will enable all once we are ready */
1207 attr->disabled = 1;
1208 }
1209
1210 err = perf_evlist__open(evlist);
1211 if (err < 0) {
1212 printf("Couldn't create the events: %s\n", strerror(errno));
1213 goto out;
bcf6edcd
XG
1214 }
1215
1afe1d14
DA
1216 if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) {
1217 ui__error("Failed to mmap the events: %s\n", strerror(errno));
1218 perf_evlist__close(evlist);
1219 goto out;
1220 }
1221
1222 rc = 0;
1223
1224out:
1225 return rc;
bcf6edcd 1226}
87419c9a 1227#endif
bcf6edcd 1228
3786063a 1229static int read_events(struct perf_kvm_stat *kvm)
bcf6edcd 1230{
bcf6edcd
XG
1231 int ret;
1232
de332ac4
DA
1233 struct perf_tool eops = {
1234 .sample = process_sample_event,
1235 .comm = perf_event__process_comm,
1236 .ordered_samples = true,
1237 };
f5fc1412 1238 struct perf_data_file file = {
476b3a86 1239 .path = kvm->file_name,
f5fc1412
JO
1240 .mode = PERF_DATA_MODE_READ,
1241 };
de332ac4
DA
1242
1243 kvm->tool = eops;
f5fc1412 1244 kvm->session = perf_session__new(&file, false, &kvm->tool);
de332ac4 1245 if (!kvm->session) {
bcf6edcd
XG
1246 pr_err("Initializing perf session failed\n");
1247 return -EINVAL;
1248 }
1249
de332ac4 1250 if (!perf_session__has_traces(kvm->session, "kvm record"))
bcf6edcd
XG
1251 return -EINVAL;
1252
1253 /*
1254 * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
1255 * traced in the old kernel.
1256 */
1afe1d14 1257 ret = cpu_isa_config(kvm);
bcf6edcd
XG
1258 if (ret < 0)
1259 return ret;
1260
de332ac4 1261 return perf_session__process_events(kvm->session, &kvm->tool);
bcf6edcd
XG
1262}
1263
2e73f00f
DA
1264static int parse_target_str(struct perf_kvm_stat *kvm)
1265{
1266 if (kvm->pid_str) {
1267 kvm->pid_list = intlist__new(kvm->pid_str);
1268 if (kvm->pid_list == NULL) {
1269 pr_err("Error parsing process id string\n");
1270 return -EINVAL;
1271 }
1272 }
1273
1274 return 0;
1275}
1276
3786063a 1277static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
bcf6edcd
XG
1278{
1279 int ret = -EINVAL;
de332ac4 1280 int vcpu = kvm->trace_vcpu;
bcf6edcd 1281
2e73f00f
DA
1282 if (parse_target_str(kvm) != 0)
1283 goto exit;
1284
bcf6edcd
XG
1285 if (!verify_vcpu(vcpu))
1286 goto exit;
1287
de332ac4 1288 if (!select_key(kvm))
bcf6edcd
XG
1289 goto exit;
1290
de332ac4 1291 if (!register_kvm_events_ops(kvm))
bcf6edcd
XG
1292 goto exit;
1293
de332ac4 1294 init_kvm_event_record(kvm);
bcf6edcd
XG
1295 setup_pager();
1296
de332ac4 1297 ret = read_events(kvm);
bcf6edcd
XG
1298 if (ret)
1299 goto exit;
1300
de332ac4
DA
1301 sort_result(kvm);
1302 print_result(kvm);
1303
bcf6edcd
XG
1304exit:
1305 return ret;
1306}
1307
8fdd84c4
DA
1308static const char * const kvm_events_tp[] = {
1309 "kvm:kvm_entry",
1310 "kvm:kvm_exit",
1311 "kvm:kvm_mmio",
1312 "kvm:kvm_pio",
bcf6edcd
XG
1313};
1314
1315#define STRDUP_FAIL_EXIT(s) \
1316 ({ char *_p; \
1317 _p = strdup(s); \
1318 if (!_p) \
1319 return -ENOMEM; \
1320 _p; \
1321 })
1322
3786063a
XG
1323static int
1324kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
bcf6edcd
XG
1325{
1326 unsigned int rec_argc, i, j;
1327 const char **rec_argv;
8fdd84c4
DA
1328 const char * const record_args[] = {
1329 "record",
1330 "-R",
8fdd84c4
DA
1331 "-m", "1024",
1332 "-c", "1",
1333 };
bcf6edcd 1334
8fdd84c4
DA
1335 rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
1336 2 * ARRAY_SIZE(kvm_events_tp);
bcf6edcd
XG
1337 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1338
1339 if (rec_argv == NULL)
1340 return -ENOMEM;
1341
1342 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1343 rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
1344
8fdd84c4
DA
1345 for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
1346 rec_argv[i++] = "-e";
1347 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
1348 }
1349
bcf6edcd 1350 rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
de332ac4 1351 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
bcf6edcd
XG
1352
1353 for (j = 1; j < (unsigned int)argc; j++, i++)
1354 rec_argv[i] = argv[j];
1355
1356 return cmd_record(i, rec_argv, NULL);
1357}
1358
3786063a
XG
1359static int
1360kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
de332ac4
DA
1361{
1362 const struct option kvm_events_report_options[] = {
1363 OPT_STRING(0, "event", &kvm->report_event, "report event",
1364 "event for reporting: vmexit, mmio, ioport"),
1365 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1366 "vcpu id to report"),
1367 OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1368 "key for sorting: sample(sort by samples number)"
1369 " time (sort by avg time)"),
2e73f00f
DA
1370 OPT_STRING('p', "pid", &kvm->pid_str, "pid",
1371 "analyze events only for given process id(s)"),
de332ac4
DA
1372 OPT_END()
1373 };
bcf6edcd 1374
de332ac4
DA
1375 const char * const kvm_events_report_usage[] = {
1376 "perf kvm stat report [<options>]",
1377 NULL
1378 };
bcf6edcd 1379
bcf6edcd
XG
1380 symbol__init();
1381
1382 if (argc) {
1383 argc = parse_options(argc, argv,
1384 kvm_events_report_options,
1385 kvm_events_report_usage, 0);
1386 if (argc)
1387 usage_with_options(kvm_events_report_usage,
1388 kvm_events_report_options);
1389 }
1390
de332ac4 1391 return kvm_events_report_vcpu(kvm);
bcf6edcd
XG
1392}
1393
87419c9a 1394#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14
DA
1395static struct perf_evlist *kvm_live_event_list(void)
1396{
1397 struct perf_evlist *evlist;
1398 char *tp, *name, *sys;
1399 unsigned int j;
1400 int err = -1;
1401
1402 evlist = perf_evlist__new();
1403 if (evlist == NULL)
1404 return NULL;
1405
1406 for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
1407
1408 tp = strdup(kvm_events_tp[j]);
1409 if (tp == NULL)
1410 goto out;
1411
1412 /* split tracepoint into subsystem and name */
1413 sys = tp;
1414 name = strchr(tp, ':');
1415 if (name == NULL) {
1416 pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
1417 kvm_events_tp[j]);
1418 free(tp);
1419 goto out;
1420 }
1421 *name = '\0';
1422 name++;
1423
1424 if (perf_evlist__add_newtp(evlist, sys, name, NULL)) {
1425 pr_err("Failed to add %s tracepoint to the list\n", kvm_events_tp[j]);
1426 free(tp);
1427 goto out;
1428 }
1429
1430 free(tp);
1431 }
1432
1433 err = 0;
1434
1435out:
1436 if (err) {
1437 perf_evlist__delete(evlist);
1438 evlist = NULL;
1439 }
1440
1441 return evlist;
1442}
1443
1444static int kvm_events_live(struct perf_kvm_stat *kvm,
1445 int argc, const char **argv)
1446{
1447 char errbuf[BUFSIZ];
1448 int err;
1449
1450 const struct option live_options[] = {
1451 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1452 "record events on existing process id"),
994a1f78
JO
1453 OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
1454 "number of mmap data pages",
1455 perf_evlist__parse_mmap_pages),
1afe1d14
DA
1456 OPT_INCR('v', "verbose", &verbose,
1457 "be more verbose (show counter open errors, etc)"),
1458 OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
1459 "system-wide collection from all CPUs"),
1460 OPT_UINTEGER('d', "display", &kvm->display_time,
1461 "time in seconds between display updates"),
1462 OPT_STRING(0, "event", &kvm->report_event, "report event",
1463 "event for reporting: vmexit, mmio, ioport"),
1464 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1465 "vcpu id to report"),
1466 OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1467 "key for sorting: sample(sort by samples number)"
1468 " time (sort by avg time)"),
70f7b4a7
DA
1469 OPT_U64(0, "duration", &kvm->duration,
1470 "show events other than HALT that take longer than duration usecs"),
1afe1d14
DA
1471 OPT_END()
1472 };
1473 const char * const live_usage[] = {
1474 "perf kvm stat live [<options>]",
1475 NULL
1476 };
f5fc1412
JO
1477 struct perf_data_file file = {
1478 .mode = PERF_DATA_MODE_WRITE,
1479 };
1afe1d14
DA
1480
1481
1482 /* event handling */
1483 kvm->tool.sample = process_sample_event;
1484 kvm->tool.comm = perf_event__process_comm;
1485 kvm->tool.exit = perf_event__process_exit;
1486 kvm->tool.fork = perf_event__process_fork;
1487 kvm->tool.lost = process_lost_event;
1488 kvm->tool.ordered_samples = true;
1489 perf_tool__fill_defaults(&kvm->tool);
1490
1491 /* set defaults */
1492 kvm->display_time = 1;
1493 kvm->opts.user_interval = 1;
1494 kvm->opts.mmap_pages = 512;
1495 kvm->opts.target.uses_mmap = false;
1496 kvm->opts.target.uid_str = NULL;
1497 kvm->opts.target.uid = UINT_MAX;
1498
1499 symbol__init();
1500 disable_buildid_cache();
1501
1502 use_browser = 0;
1503 setup_browser(false);
1504
1505 if (argc) {
1506 argc = parse_options(argc, argv, live_options,
1507 live_usage, 0);
1508 if (argc)
1509 usage_with_options(live_usage, live_options);
1510 }
1511
70f7b4a7
DA
1512 kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */
1513
1afe1d14
DA
1514 /*
1515 * target related setups
1516 */
602ad878 1517 err = target__validate(&kvm->opts.target);
1afe1d14 1518 if (err) {
602ad878 1519 target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
1afe1d14
DA
1520 ui__warning("%s", errbuf);
1521 }
1522
602ad878 1523 if (target__none(&kvm->opts.target))
1afe1d14
DA
1524 kvm->opts.target.system_wide = true;
1525
1526
1527 /*
1528 * generate the event list
1529 */
1530 kvm->evlist = kvm_live_event_list();
1531 if (kvm->evlist == NULL) {
1532 err = -1;
1533 goto out;
1534 }
1535
1536 symbol_conf.nr_events = kvm->evlist->nr_entries;
1537
1538 if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
1539 usage_with_options(live_usage, live_options);
1540
1541 /*
1542 * perf session
1543 */
f5fc1412 1544 kvm->session = perf_session__new(&file, false, &kvm->tool);
1afe1d14
DA
1545 if (kvm->session == NULL) {
1546 err = -ENOMEM;
1547 goto out;
1548 }
1549 kvm->session->evlist = kvm->evlist;
1550 perf_session__set_id_hdr_size(kvm->session);
a33fbd56
ACM
1551 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
1552 kvm->evlist->threads, false);
1afe1d14
DA
1553 err = kvm_live_open_events(kvm);
1554 if (err)
1555 goto out;
1556
1557 err = kvm_events_live_report(kvm);
1558
1559out:
1560 exit_browser(0);
1561
1562 if (kvm->session)
1563 perf_session__delete(kvm->session);
1564 kvm->session = NULL;
03ad9747 1565 if (kvm->evlist)
1afe1d14 1566 perf_evlist__delete(kvm->evlist);
1afe1d14
DA
1567
1568 return err;
1569}
87419c9a 1570#endif
1afe1d14 1571
bcf6edcd
XG
1572static void print_kvm_stat_usage(void)
1573{
1574 printf("Usage: perf kvm stat <command>\n\n");
1575
1576 printf("# Available commands:\n");
1577 printf("\trecord: record kvm events\n");
1578 printf("\treport: report statistical data of kvm events\n");
1afe1d14 1579 printf("\tlive: live reporting of statistical data of kvm events\n");
bcf6edcd
XG
1580
1581 printf("\nOtherwise, it is the alias of 'perf stat':\n");
1582}
1583
3786063a 1584static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
bcf6edcd 1585{
3786063a
XG
1586 struct perf_kvm_stat kvm = {
1587 .file_name = file_name,
1588
1589 .trace_vcpu = -1,
1590 .report_event = "vmexit",
1591 .sort_key = "sample",
1592
3786063a
XG
1593 };
1594
bcf6edcd
XG
1595 if (argc == 1) {
1596 print_kvm_stat_usage();
1597 goto perf_stat;
1598 }
1599
1600 if (!strncmp(argv[1], "rec", 3))
3786063a 1601 return kvm_events_record(&kvm, argc - 1, argv + 1);
bcf6edcd
XG
1602
1603 if (!strncmp(argv[1], "rep", 3))
3786063a 1604 return kvm_events_report(&kvm, argc - 1 , argv + 1);
bcf6edcd 1605
87419c9a 1606#ifdef HAVE_TIMERFD_SUPPORT
1afe1d14
DA
1607 if (!strncmp(argv[1], "live", 4))
1608 return kvm_events_live(&kvm, argc - 1 , argv + 1);
87419c9a 1609#endif
1afe1d14 1610
bcf6edcd
XG
1611perf_stat:
1612 return cmd_stat(argc, argv, NULL);
1613}
da50ad69 1614#endif /* HAVE_KVM_STAT_SUPPORT */
bcf6edcd 1615
3786063a 1616static int __cmd_record(const char *file_name, int argc, const char **argv)
a1645ce1
ZY
1617{
1618 int rec_argc, i = 0, j;
1619 const char **rec_argv;
1620
1621 rec_argc = argc + 2;
1622 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1623 rec_argv[i++] = strdup("record");
1624 rec_argv[i++] = strdup("-o");
3786063a 1625 rec_argv[i++] = strdup(file_name);
a1645ce1
ZY
1626 for (j = 1; j < argc; j++, i++)
1627 rec_argv[i] = argv[j];
1628
1629 BUG_ON(i != rec_argc);
1630
1631 return cmd_record(i, rec_argv, NULL);
1632}
1633
3786063a 1634static int __cmd_report(const char *file_name, int argc, const char **argv)
a1645ce1
ZY
1635{
1636 int rec_argc, i = 0, j;
1637 const char **rec_argv;
1638
1639 rec_argc = argc + 2;
1640 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1641 rec_argv[i++] = strdup("report");
1642 rec_argv[i++] = strdup("-i");
3786063a 1643 rec_argv[i++] = strdup(file_name);
a1645ce1
ZY
1644 for (j = 1; j < argc; j++, i++)
1645 rec_argv[i] = argv[j];
1646
1647 BUG_ON(i != rec_argc);
1648
1649 return cmd_report(i, rec_argv, NULL);
1650}
1651
3786063a
XG
1652static int
1653__cmd_buildid_list(const char *file_name, int argc, const char **argv)
a1645ce1
ZY
1654{
1655 int rec_argc, i = 0, j;
1656 const char **rec_argv;
1657
1658 rec_argc = argc + 2;
1659 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1660 rec_argv[i++] = strdup("buildid-list");
1661 rec_argv[i++] = strdup("-i");
3786063a 1662 rec_argv[i++] = strdup(file_name);
a1645ce1
ZY
1663 for (j = 1; j < argc; j++, i++)
1664 rec_argv[i] = argv[j];
1665
1666 BUG_ON(i != rec_argc);
1667
1668 return cmd_buildid_list(i, rec_argv, NULL);
1669}
1670
1d037ca1 1671int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
a1645ce1 1672{
20914ce5 1673 const char *file_name = NULL;
de332ac4 1674 const struct option kvm_options[] = {
3786063a 1675 OPT_STRING('i', "input", &file_name, "file",
de332ac4 1676 "Input file name"),
3786063a 1677 OPT_STRING('o', "output", &file_name, "file",
de332ac4
DA
1678 "Output file name"),
1679 OPT_BOOLEAN(0, "guest", &perf_guest,
1680 "Collect guest os data"),
1681 OPT_BOOLEAN(0, "host", &perf_host,
1682 "Collect host os data"),
1683 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
1684 "guest mount directory under which every guest os"
1685 " instance has a subdir"),
1686 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
1687 "file", "file saving guest os vmlinux"),
1688 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
1689 "file", "file saving guest os /proc/kallsyms"),
1690 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
1691 "file", "file saving guest os /proc/modules"),
100b9073
DY
1692 OPT_INCR('v', "verbose", &verbose,
1693 "be more verbose (show counter open errors, etc)"),
de332ac4
DA
1694 OPT_END()
1695 };
1696
09a71b97
RR
1697 const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
1698 "buildid-list", "stat", NULL };
1699 const char *kvm_usage[] = { NULL, NULL };
de332ac4 1700
1aed2671
JR
1701 perf_host = 0;
1702 perf_guest = 1;
a1645ce1 1703
09a71b97
RR
1704 argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
1705 PARSE_OPT_STOP_AT_NON_OPTION);
a1645ce1
ZY
1706 if (!argc)
1707 usage_with_options(kvm_usage, kvm_options);
1708
1709 if (!perf_host)
1710 perf_guest = 1;
1711
3786063a 1712 if (!file_name) {
e1a2b174 1713 file_name = get_filename_for_perf_kvm();
de332ac4 1714
3786063a 1715 if (!file_name) {
de332ac4
DA
1716 pr_err("Failed to allocate memory for filename\n");
1717 return -ENOMEM;
1718 }
a1645ce1
ZY
1719 }
1720
1721 if (!strncmp(argv[0], "rec", 3))
3786063a 1722 return __cmd_record(file_name, argc, argv);
a1645ce1 1723 else if (!strncmp(argv[0], "rep", 3))
3786063a 1724 return __cmd_report(file_name, argc, argv);
a1645ce1
ZY
1725 else if (!strncmp(argv[0], "diff", 4))
1726 return cmd_diff(argc, argv, NULL);
1727 else if (!strncmp(argv[0], "top", 3))
1728 return cmd_top(argc, argv, NULL);
1729 else if (!strncmp(argv[0], "buildid-list", 12))
3786063a 1730 return __cmd_buildid_list(file_name, argc, argv);
da50ad69 1731#ifdef HAVE_KVM_STAT_SUPPORT
bcf6edcd 1732 else if (!strncmp(argv[0], "stat", 4))
3786063a 1733 return kvm_cmd_stat(file_name, argc, argv);
7321090f 1734#endif
a1645ce1
ZY
1735 else
1736 usage_with_options(kvm_usage, kvm_options);
1737
1738 return 0;
1739}