]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - tools/perf/tests/switch-tracking.c
1a60fa1219f55e26291fa751c14de4fcfaf401db
[mirror_ubuntu-focal-kernel.git] / tools / perf / tests / switch-tracking.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <sys/time.h>
3 #include <sys/prctl.h>
4 #include <errno.h>
5 #include <limits.h>
6 #include <time.h>
7 #include <stdlib.h>
8 #include <linux/zalloc.h>
9 #include <perf/cpumap.h>
10 #include <perf/evlist.h>
11
12 #include "debug.h"
13 #include "parse-events.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include "thread_map.h"
17 #include "cpumap.h"
18 #include "record.h"
19 #include "tests.h"
20
21 static int spin_sleep(void)
22 {
23 struct timeval start, now, diff, maxtime;
24 struct timespec ts;
25 int err, i;
26
27 maxtime.tv_sec = 0;
28 maxtime.tv_usec = 50000;
29
30 err = gettimeofday(&start, NULL);
31 if (err)
32 return err;
33
34 /* Spin for 50ms */
35 while (1) {
36 for (i = 0; i < 1000; i++)
37 barrier();
38
39 err = gettimeofday(&now, NULL);
40 if (err)
41 return err;
42
43 timersub(&now, &start, &diff);
44 if (timercmp(&diff, &maxtime, > /* For checkpatch */))
45 break;
46 }
47
48 ts.tv_nsec = 50 * 1000 * 1000;
49 ts.tv_sec = 0;
50
51 /* Sleep for 50ms */
52 err = nanosleep(&ts, NULL);
53 if (err == EINTR)
54 err = 0;
55
56 return err;
57 }
58
59 struct switch_tracking {
60 struct evsel *switch_evsel;
61 struct evsel *cycles_evsel;
62 pid_t *tids;
63 int nr_tids;
64 int comm_seen[4];
65 int cycles_before_comm_1;
66 int cycles_between_comm_2_and_comm_3;
67 int cycles_after_comm_4;
68 };
69
70 static int check_comm(struct switch_tracking *switch_tracking,
71 union perf_event *event, const char *comm, int nr)
72 {
73 if (event->header.type == PERF_RECORD_COMM &&
74 (pid_t)event->comm.pid == getpid() &&
75 (pid_t)event->comm.tid == getpid() &&
76 strcmp(event->comm.comm, comm) == 0) {
77 if (switch_tracking->comm_seen[nr]) {
78 pr_debug("Duplicate comm event\n");
79 return -1;
80 }
81 switch_tracking->comm_seen[nr] = 1;
82 pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr);
83 return 1;
84 }
85 return 0;
86 }
87
88 static int check_cpu(struct switch_tracking *switch_tracking, int cpu)
89 {
90 int i, nr = cpu + 1;
91
92 if (cpu < 0)
93 return -1;
94
95 if (!switch_tracking->tids) {
96 switch_tracking->tids = calloc(nr, sizeof(pid_t));
97 if (!switch_tracking->tids)
98 return -1;
99 for (i = 0; i < nr; i++)
100 switch_tracking->tids[i] = -1;
101 switch_tracking->nr_tids = nr;
102 return 0;
103 }
104
105 if (cpu >= switch_tracking->nr_tids) {
106 void *addr;
107
108 addr = realloc(switch_tracking->tids, nr * sizeof(pid_t));
109 if (!addr)
110 return -1;
111 switch_tracking->tids = addr;
112 for (i = switch_tracking->nr_tids; i < nr; i++)
113 switch_tracking->tids[i] = -1;
114 switch_tracking->nr_tids = nr;
115 return 0;
116 }
117
118 return 0;
119 }
120
121 static int process_sample_event(struct evlist *evlist,
122 union perf_event *event,
123 struct switch_tracking *switch_tracking)
124 {
125 struct perf_sample sample;
126 struct evsel *evsel;
127 pid_t next_tid, prev_tid;
128 int cpu, err;
129
130 if (perf_evlist__parse_sample(evlist, event, &sample)) {
131 pr_debug("perf_evlist__parse_sample failed\n");
132 return -1;
133 }
134
135 evsel = perf_evlist__id2evsel(evlist, sample.id);
136 if (evsel == switch_tracking->switch_evsel) {
137 next_tid = perf_evsel__intval(evsel, &sample, "next_pid");
138 prev_tid = perf_evsel__intval(evsel, &sample, "prev_pid");
139 cpu = sample.cpu;
140 pr_debug3("sched_switch: cpu: %d prev_tid %d next_tid %d\n",
141 cpu, prev_tid, next_tid);
142 err = check_cpu(switch_tracking, cpu);
143 if (err)
144 return err;
145 /*
146 * Check for no missing sched_switch events i.e. that the
147 * evsel->system_wide flag has worked.
148 */
149 if (switch_tracking->tids[cpu] != -1 &&
150 switch_tracking->tids[cpu] != prev_tid) {
151 pr_debug("Missing sched_switch events\n");
152 return -1;
153 }
154 switch_tracking->tids[cpu] = next_tid;
155 }
156
157 if (evsel == switch_tracking->cycles_evsel) {
158 pr_debug3("cycles event\n");
159 if (!switch_tracking->comm_seen[0])
160 switch_tracking->cycles_before_comm_1 = 1;
161 if (switch_tracking->comm_seen[1] &&
162 !switch_tracking->comm_seen[2])
163 switch_tracking->cycles_between_comm_2_and_comm_3 = 1;
164 if (switch_tracking->comm_seen[3])
165 switch_tracking->cycles_after_comm_4 = 1;
166 }
167
168 return 0;
169 }
170
171 static int process_event(struct evlist *evlist, union perf_event *event,
172 struct switch_tracking *switch_tracking)
173 {
174 if (event->header.type == PERF_RECORD_SAMPLE)
175 return process_sample_event(evlist, event, switch_tracking);
176
177 if (event->header.type == PERF_RECORD_COMM) {
178 int err, done = 0;
179
180 err = check_comm(switch_tracking, event, "Test COMM 1", 0);
181 if (err < 0)
182 return -1;
183 done += err;
184 err = check_comm(switch_tracking, event, "Test COMM 2", 1);
185 if (err < 0)
186 return -1;
187 done += err;
188 err = check_comm(switch_tracking, event, "Test COMM 3", 2);
189 if (err < 0)
190 return -1;
191 done += err;
192 err = check_comm(switch_tracking, event, "Test COMM 4", 3);
193 if (err < 0)
194 return -1;
195 done += err;
196 if (done != 1) {
197 pr_debug("Unexpected comm event\n");
198 return -1;
199 }
200 }
201
202 return 0;
203 }
204
205 struct event_node {
206 struct list_head list;
207 union perf_event *event;
208 u64 event_time;
209 };
210
211 static int add_event(struct evlist *evlist, struct list_head *events,
212 union perf_event *event)
213 {
214 struct perf_sample sample;
215 struct event_node *node;
216
217 node = malloc(sizeof(struct event_node));
218 if (!node) {
219 pr_debug("malloc failed\n");
220 return -1;
221 }
222 node->event = event;
223 list_add(&node->list, events);
224
225 if (perf_evlist__parse_sample(evlist, event, &sample)) {
226 pr_debug("perf_evlist__parse_sample failed\n");
227 return -1;
228 }
229
230 if (!sample.time) {
231 pr_debug("event with no time\n");
232 return -1;
233 }
234
235 node->event_time = sample.time;
236
237 return 0;
238 }
239
240 static void free_event_nodes(struct list_head *events)
241 {
242 struct event_node *node;
243
244 while (!list_empty(events)) {
245 node = list_entry(events->next, struct event_node, list);
246 list_del_init(&node->list);
247 free(node);
248 }
249 }
250
251 static int compar(const void *a, const void *b)
252 {
253 const struct event_node *nodea = a;
254 const struct event_node *nodeb = b;
255 s64 cmp = nodea->event_time - nodeb->event_time;
256
257 return cmp;
258 }
259
260 static int process_events(struct evlist *evlist,
261 struct switch_tracking *switch_tracking)
262 {
263 union perf_event *event;
264 unsigned pos, cnt = 0;
265 LIST_HEAD(events);
266 struct event_node *events_array, *node;
267 struct perf_mmap *md;
268 int i, ret;
269
270 for (i = 0; i < evlist->nr_mmaps; i++) {
271 md = &evlist->mmap[i];
272 if (perf_mmap__read_init(md) < 0)
273 continue;
274
275 while ((event = perf_mmap__read_event(md)) != NULL) {
276 cnt += 1;
277 ret = add_event(evlist, &events, event);
278 perf_mmap__consume(md);
279 if (ret < 0)
280 goto out_free_nodes;
281 }
282 perf_mmap__read_done(md);
283 }
284
285 events_array = calloc(cnt, sizeof(struct event_node));
286 if (!events_array) {
287 pr_debug("calloc failed\n");
288 ret = -1;
289 goto out_free_nodes;
290 }
291
292 pos = 0;
293 list_for_each_entry(node, &events, list)
294 events_array[pos++] = *node;
295
296 qsort(events_array, cnt, sizeof(struct event_node), compar);
297
298 for (pos = 0; pos < cnt; pos++) {
299 ret = process_event(evlist, events_array[pos].event,
300 switch_tracking);
301 if (ret < 0)
302 goto out_free;
303 }
304
305 ret = 0;
306 out_free:
307 pr_debug("%u events recorded\n", cnt);
308 free(events_array);
309 out_free_nodes:
310 free_event_nodes(&events);
311 return ret;
312 }
313
314 /**
315 * test__switch_tracking - test using sched_switch and tracking events.
316 *
317 * This function implements a test that checks that sched_switch events and
318 * tracking events can be recorded for a workload (current process) using the
319 * evsel->system_wide and evsel->tracking flags (respectively) with other events
320 * sometimes enabled or disabled.
321 */
322 int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_unused)
323 {
324 const char *sched_switch = "sched:sched_switch";
325 struct switch_tracking switch_tracking = { .tids = NULL, };
326 struct record_opts opts = {
327 .mmap_pages = UINT_MAX,
328 .user_freq = UINT_MAX,
329 .user_interval = ULLONG_MAX,
330 .freq = 4000,
331 .target = {
332 .uses_mmap = true,
333 },
334 };
335 struct perf_thread_map *threads = NULL;
336 struct perf_cpu_map *cpus = NULL;
337 struct evlist *evlist = NULL;
338 struct evsel *evsel, *cpu_clocks_evsel, *cycles_evsel;
339 struct evsel *switch_evsel, *tracking_evsel;
340 const char *comm;
341 int err = -1;
342
343 threads = thread_map__new(-1, getpid(), UINT_MAX);
344 if (!threads) {
345 pr_debug("thread_map__new failed!\n");
346 goto out_err;
347 }
348
349 cpus = perf_cpu_map__new(NULL);
350 if (!cpus) {
351 pr_debug("perf_cpu_map__new failed!\n");
352 goto out_err;
353 }
354
355 evlist = evlist__new();
356 if (!evlist) {
357 pr_debug("evlist__new failed!\n");
358 goto out_err;
359 }
360
361 perf_evlist__set_maps(&evlist->core, cpus, threads);
362
363 /* First event */
364 err = parse_events(evlist, "cpu-clock:u", NULL);
365 if (err) {
366 pr_debug("Failed to parse event dummy:u\n");
367 goto out_err;
368 }
369
370 cpu_clocks_evsel = perf_evlist__last(evlist);
371
372 /* Second event */
373 err = parse_events(evlist, "cycles:u", NULL);
374 if (err) {
375 pr_debug("Failed to parse event cycles:u\n");
376 goto out_err;
377 }
378
379 cycles_evsel = perf_evlist__last(evlist);
380
381 /* Third event */
382 if (!perf_evlist__can_select_event(evlist, sched_switch)) {
383 pr_debug("No sched_switch\n");
384 err = 0;
385 goto out;
386 }
387
388 err = parse_events(evlist, sched_switch, NULL);
389 if (err) {
390 pr_debug("Failed to parse event %s\n", sched_switch);
391 goto out_err;
392 }
393
394 switch_evsel = perf_evlist__last(evlist);
395
396 perf_evsel__set_sample_bit(switch_evsel, CPU);
397 perf_evsel__set_sample_bit(switch_evsel, TIME);
398
399 switch_evsel->system_wide = true;
400 switch_evsel->no_aux_samples = true;
401 switch_evsel->immediate = true;
402
403 /* Test moving an event to the front */
404 if (cycles_evsel == perf_evlist__first(evlist)) {
405 pr_debug("cycles event already at front");
406 goto out_err;
407 }
408 perf_evlist__to_front(evlist, cycles_evsel);
409 if (cycles_evsel != perf_evlist__first(evlist)) {
410 pr_debug("Failed to move cycles event to front");
411 goto out_err;
412 }
413
414 perf_evsel__set_sample_bit(cycles_evsel, CPU);
415 perf_evsel__set_sample_bit(cycles_evsel, TIME);
416
417 /* Fourth event */
418 err = parse_events(evlist, "dummy:u", NULL);
419 if (err) {
420 pr_debug("Failed to parse event dummy:u\n");
421 goto out_err;
422 }
423
424 tracking_evsel = perf_evlist__last(evlist);
425
426 perf_evlist__set_tracking_event(evlist, tracking_evsel);
427
428 tracking_evsel->core.attr.freq = 0;
429 tracking_evsel->core.attr.sample_period = 1;
430
431 perf_evsel__set_sample_bit(tracking_evsel, TIME);
432
433 /* Config events */
434 perf_evlist__config(evlist, &opts, NULL);
435
436 /* Check moved event is still at the front */
437 if (cycles_evsel != perf_evlist__first(evlist)) {
438 pr_debug("Front event no longer at front");
439 goto out_err;
440 }
441
442 /* Check tracking event is tracking */
443 if (!tracking_evsel->core.attr.mmap || !tracking_evsel->core.attr.comm) {
444 pr_debug("Tracking event not tracking\n");
445 goto out_err;
446 }
447
448 /* Check non-tracking events are not tracking */
449 evlist__for_each_entry(evlist, evsel) {
450 if (evsel != tracking_evsel) {
451 if (evsel->core.attr.mmap || evsel->core.attr.comm) {
452 pr_debug("Non-tracking event is tracking\n");
453 goto out_err;
454 }
455 }
456 }
457
458 if (evlist__open(evlist) < 0) {
459 pr_debug("Not supported\n");
460 err = 0;
461 goto out;
462 }
463
464 err = perf_evlist__mmap(evlist, UINT_MAX);
465 if (err) {
466 pr_debug("perf_evlist__mmap failed!\n");
467 goto out_err;
468 }
469
470 evlist__enable(evlist);
471
472 err = evsel__disable(cpu_clocks_evsel);
473 if (err) {
474 pr_debug("perf_evlist__disable_event failed!\n");
475 goto out_err;
476 }
477
478 err = spin_sleep();
479 if (err) {
480 pr_debug("spin_sleep failed!\n");
481 goto out_err;
482 }
483
484 comm = "Test COMM 1";
485 err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
486 if (err) {
487 pr_debug("PR_SET_NAME failed!\n");
488 goto out_err;
489 }
490
491 err = evsel__disable(cycles_evsel);
492 if (err) {
493 pr_debug("perf_evlist__disable_event failed!\n");
494 goto out_err;
495 }
496
497 comm = "Test COMM 2";
498 err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
499 if (err) {
500 pr_debug("PR_SET_NAME failed!\n");
501 goto out_err;
502 }
503
504 err = spin_sleep();
505 if (err) {
506 pr_debug("spin_sleep failed!\n");
507 goto out_err;
508 }
509
510 comm = "Test COMM 3";
511 err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
512 if (err) {
513 pr_debug("PR_SET_NAME failed!\n");
514 goto out_err;
515 }
516
517 err = evsel__enable(cycles_evsel);
518 if (err) {
519 pr_debug("perf_evlist__disable_event failed!\n");
520 goto out_err;
521 }
522
523 comm = "Test COMM 4";
524 err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
525 if (err) {
526 pr_debug("PR_SET_NAME failed!\n");
527 goto out_err;
528 }
529
530 err = spin_sleep();
531 if (err) {
532 pr_debug("spin_sleep failed!\n");
533 goto out_err;
534 }
535
536 evlist__disable(evlist);
537
538 switch_tracking.switch_evsel = switch_evsel;
539 switch_tracking.cycles_evsel = cycles_evsel;
540
541 err = process_events(evlist, &switch_tracking);
542
543 zfree(&switch_tracking.tids);
544
545 if (err)
546 goto out_err;
547
548 /* Check all 4 comm events were seen i.e. that evsel->tracking works */
549 if (!switch_tracking.comm_seen[0] || !switch_tracking.comm_seen[1] ||
550 !switch_tracking.comm_seen[2] || !switch_tracking.comm_seen[3]) {
551 pr_debug("Missing comm events\n");
552 goto out_err;
553 }
554
555 /* Check cycles event got enabled */
556 if (!switch_tracking.cycles_before_comm_1) {
557 pr_debug("Missing cycles events\n");
558 goto out_err;
559 }
560
561 /* Check cycles event got disabled */
562 if (switch_tracking.cycles_between_comm_2_and_comm_3) {
563 pr_debug("cycles events even though event was disabled\n");
564 goto out_err;
565 }
566
567 /* Check cycles event got enabled again */
568 if (!switch_tracking.cycles_after_comm_4) {
569 pr_debug("Missing cycles events\n");
570 goto out_err;
571 }
572 out:
573 if (evlist) {
574 evlist__disable(evlist);
575 evlist__delete(evlist);
576 } else {
577 perf_cpu_map__put(cpus);
578 perf_thread_map__put(threads);
579 }
580
581 return err;
582
583 out_err:
584 err = -1;
585 goto out;
586 }