]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - tools/perf/tests/hists_output.c
perf machine: Protect the machine->threads with a rwlock
[mirror_ubuntu-jammy-kernel.git] / tools / perf / tests / hists_output.c
1 #include "perf.h"
2 #include "util/debug.h"
3 #include "util/symbol.h"
4 #include "util/sort.h"
5 #include "util/evsel.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
8 #include "util/thread.h"
9 #include "util/parse-events.h"
10 #include "tests/tests.h"
11 #include "tests/hists_common.h"
12
13 struct sample {
14 u32 cpu;
15 u32 pid;
16 u64 ip;
17 struct thread *thread;
18 struct map *map;
19 struct symbol *sym;
20 };
21
22 /* For the numbers, see hists_common.c */
23 static struct sample fake_samples[] = {
24 /* perf [kernel] schedule() */
25 { .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
26 /* perf [perf] main() */
27 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
28 /* perf [perf] cmd_record() */
29 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
30 /* perf [libc] malloc() */
31 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
32 /* perf [libc] free() */
33 { .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
34 /* perf [perf] main() */
35 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
36 /* perf [kernel] page_fault() */
37 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
38 /* bash [bash] main() */
39 { .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
40 /* bash [bash] xmalloc() */
41 { .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
42 /* bash [kernel] page_fault() */
43 { .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
44 };
45
46 static int add_hist_entries(struct hists *hists, struct machine *machine)
47 {
48 struct addr_location al;
49 struct perf_evsel *evsel = hists_to_evsel(hists);
50 struct perf_sample sample = { .period = 100, };
51 size_t i;
52
53 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
54 const union perf_event event = {
55 .header = {
56 .misc = PERF_RECORD_MISC_USER,
57 },
58 };
59 struct hist_entry_iter iter = {
60 .ops = &hist_iter_normal,
61 .hide_unresolved = false,
62 };
63
64 sample.cpu = fake_samples[i].cpu;
65 sample.pid = fake_samples[i].pid;
66 sample.tid = fake_samples[i].pid;
67 sample.ip = fake_samples[i].ip;
68
69 if (perf_event__preprocess_sample(&event, machine, &al,
70 &sample) < 0)
71 goto out;
72
73 if (hist_entry_iter__add(&iter, &al, evsel, &sample,
74 PERF_MAX_STACK_DEPTH, NULL) < 0) {
75 addr_location__put(&al);
76 goto out;
77 }
78
79 fake_samples[i].thread = al.thread;
80 fake_samples[i].map = al.map;
81 fake_samples[i].sym = al.sym;
82 }
83
84 return TEST_OK;
85
86 out:
87 pr_debug("Not enough memory for adding a hist entry\n");
88 return TEST_FAIL;
89 }
90
91 static void del_hist_entries(struct hists *hists)
92 {
93 struct hist_entry *he;
94 struct rb_root *root_in;
95 struct rb_root *root_out;
96 struct rb_node *node;
97
98 if (sort__need_collapse)
99 root_in = &hists->entries_collapsed;
100 else
101 root_in = hists->entries_in;
102
103 root_out = &hists->entries;
104
105 while (!RB_EMPTY_ROOT(root_out)) {
106 node = rb_first(root_out);
107
108 he = rb_entry(node, struct hist_entry, rb_node);
109 rb_erase(node, root_out);
110 rb_erase(&he->rb_node_in, root_in);
111 hist_entry__delete(he);
112 }
113 }
114
115 typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
116
117 #define COMM(he) (thread__comm_str(he->thread))
118 #define DSO(he) (he->ms.map->dso->short_name)
119 #define SYM(he) (he->ms.sym->name)
120 #define CPU(he) (he->cpu)
121 #define PID(he) (he->thread->tid)
122
123 /* default sort keys (no field) */
124 static int test1(struct perf_evsel *evsel, struct machine *machine)
125 {
126 int err;
127 struct hists *hists = evsel__hists(evsel);
128 struct hist_entry *he;
129 struct rb_root *root;
130 struct rb_node *node;
131
132 field_order = NULL;
133 sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
134
135 setup_sorting();
136
137 /*
138 * expected output:
139 *
140 * Overhead Command Shared Object Symbol
141 * ======== ======= ============= ==============
142 * 20.00% perf perf [.] main
143 * 10.00% bash [kernel] [k] page_fault
144 * 10.00% bash bash [.] main
145 * 10.00% bash bash [.] xmalloc
146 * 10.00% perf [kernel] [k] page_fault
147 * 10.00% perf [kernel] [k] schedule
148 * 10.00% perf libc [.] free
149 * 10.00% perf libc [.] malloc
150 * 10.00% perf perf [.] cmd_record
151 */
152 err = add_hist_entries(hists, machine);
153 if (err < 0)
154 goto out;
155
156 hists__collapse_resort(hists, NULL);
157 hists__output_resort(hists, NULL);
158
159 if (verbose > 2) {
160 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
161 print_hists_out(hists);
162 }
163
164 root = &hists->entries;
165 node = rb_first(root);
166 he = rb_entry(node, struct hist_entry, rb_node);
167 TEST_ASSERT_VAL("Invalid hist entry",
168 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
169 !strcmp(SYM(he), "main") && he->stat.period == 200);
170
171 node = rb_next(node);
172 he = rb_entry(node, struct hist_entry, rb_node);
173 TEST_ASSERT_VAL("Invalid hist entry",
174 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
175 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
176
177 node = rb_next(node);
178 he = rb_entry(node, struct hist_entry, rb_node);
179 TEST_ASSERT_VAL("Invalid hist entry",
180 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
181 !strcmp(SYM(he), "main") && he->stat.period == 100);
182
183 node = rb_next(node);
184 he = rb_entry(node, struct hist_entry, rb_node);
185 TEST_ASSERT_VAL("Invalid hist entry",
186 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
187 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
188
189 node = rb_next(node);
190 he = rb_entry(node, struct hist_entry, rb_node);
191 TEST_ASSERT_VAL("Invalid hist entry",
192 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
193 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
194
195 node = rb_next(node);
196 he = rb_entry(node, struct hist_entry, rb_node);
197 TEST_ASSERT_VAL("Invalid hist entry",
198 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
199 !strcmp(SYM(he), "schedule") && he->stat.period == 100);
200
201 node = rb_next(node);
202 he = rb_entry(node, struct hist_entry, rb_node);
203 TEST_ASSERT_VAL("Invalid hist entry",
204 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
205 !strcmp(SYM(he), "free") && he->stat.period == 100);
206
207 node = rb_next(node);
208 he = rb_entry(node, struct hist_entry, rb_node);
209 TEST_ASSERT_VAL("Invalid hist entry",
210 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
211 !strcmp(SYM(he), "malloc") && he->stat.period == 100);
212
213 node = rb_next(node);
214 he = rb_entry(node, struct hist_entry, rb_node);
215 TEST_ASSERT_VAL("Invalid hist entry",
216 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
217 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
218
219 out:
220 del_hist_entries(hists);
221 reset_output_field();
222 return err;
223 }
224
225 /* mixed fields and sort keys */
226 static int test2(struct perf_evsel *evsel, struct machine *machine)
227 {
228 int err;
229 struct hists *hists = evsel__hists(evsel);
230 struct hist_entry *he;
231 struct rb_root *root;
232 struct rb_node *node;
233
234 field_order = "overhead,cpu";
235 sort_order = "pid";
236
237 setup_sorting();
238
239 /*
240 * expected output:
241 *
242 * Overhead CPU Command: Pid
243 * ======== === =============
244 * 30.00% 1 perf : 100
245 * 10.00% 0 perf : 100
246 * 10.00% 2 perf : 100
247 * 20.00% 2 perf : 200
248 * 10.00% 0 bash : 300
249 * 10.00% 1 bash : 300
250 * 10.00% 3 bash : 300
251 */
252 err = add_hist_entries(hists, machine);
253 if (err < 0)
254 goto out;
255
256 hists__collapse_resort(hists, NULL);
257 hists__output_resort(hists, NULL);
258
259 if (verbose > 2) {
260 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
261 print_hists_out(hists);
262 }
263
264 root = &hists->entries;
265 node = rb_first(root);
266 he = rb_entry(node, struct hist_entry, rb_node);
267 TEST_ASSERT_VAL("Invalid hist entry",
268 CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
269
270 node = rb_next(node);
271 he = rb_entry(node, struct hist_entry, rb_node);
272 TEST_ASSERT_VAL("Invalid hist entry",
273 CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100);
274
275 out:
276 del_hist_entries(hists);
277 reset_output_field();
278 return err;
279 }
280
281 /* fields only (no sort key) */
282 static int test3(struct perf_evsel *evsel, struct machine *machine)
283 {
284 int err;
285 struct hists *hists = evsel__hists(evsel);
286 struct hist_entry *he;
287 struct rb_root *root;
288 struct rb_node *node;
289
290 field_order = "comm,overhead,dso";
291 sort_order = NULL;
292
293 setup_sorting();
294
295 /*
296 * expected output:
297 *
298 * Command Overhead Shared Object
299 * ======= ======== =============
300 * bash 20.00% bash
301 * bash 10.00% [kernel]
302 * perf 30.00% perf
303 * perf 20.00% [kernel]
304 * perf 20.00% libc
305 */
306 err = add_hist_entries(hists, machine);
307 if (err < 0)
308 goto out;
309
310 hists__collapse_resort(hists, NULL);
311 hists__output_resort(hists, NULL);
312
313 if (verbose > 2) {
314 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
315 print_hists_out(hists);
316 }
317
318 root = &hists->entries;
319 node = rb_first(root);
320 he = rb_entry(node, struct hist_entry, rb_node);
321 TEST_ASSERT_VAL("Invalid hist entry",
322 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
323 he->stat.period == 200);
324
325 node = rb_next(node);
326 he = rb_entry(node, struct hist_entry, rb_node);
327 TEST_ASSERT_VAL("Invalid hist entry",
328 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
329 he->stat.period == 100);
330
331 node = rb_next(node);
332 he = rb_entry(node, struct hist_entry, rb_node);
333 TEST_ASSERT_VAL("Invalid hist entry",
334 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
335 he->stat.period == 300);
336
337 node = rb_next(node);
338 he = rb_entry(node, struct hist_entry, rb_node);
339 TEST_ASSERT_VAL("Invalid hist entry",
340 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
341 he->stat.period == 200);
342
343 node = rb_next(node);
344 he = rb_entry(node, struct hist_entry, rb_node);
345 TEST_ASSERT_VAL("Invalid hist entry",
346 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
347 he->stat.period == 200);
348
349 out:
350 del_hist_entries(hists);
351 reset_output_field();
352 return err;
353 }
354
355 /* handle duplicate 'dso' field */
356 static int test4(struct perf_evsel *evsel, struct machine *machine)
357 {
358 int err;
359 struct hists *hists = evsel__hists(evsel);
360 struct hist_entry *he;
361 struct rb_root *root;
362 struct rb_node *node;
363
364 field_order = "dso,sym,comm,overhead,dso";
365 sort_order = "sym";
366
367 setup_sorting();
368
369 /*
370 * expected output:
371 *
372 * Shared Object Symbol Command Overhead
373 * ============= ============== ======= ========
374 * perf [.] cmd_record perf 10.00%
375 * libc [.] free perf 10.00%
376 * bash [.] main bash 10.00%
377 * perf [.] main perf 20.00%
378 * libc [.] malloc perf 10.00%
379 * [kernel] [k] page_fault bash 10.00%
380 * [kernel] [k] page_fault perf 10.00%
381 * [kernel] [k] schedule perf 10.00%
382 * bash [.] xmalloc bash 10.00%
383 */
384 err = add_hist_entries(hists, machine);
385 if (err < 0)
386 goto out;
387
388 hists__collapse_resort(hists, NULL);
389 hists__output_resort(hists, NULL);
390
391 if (verbose > 2) {
392 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
393 print_hists_out(hists);
394 }
395
396 root = &hists->entries;
397 node = rb_first(root);
398 he = rb_entry(node, struct hist_entry, rb_node);
399 TEST_ASSERT_VAL("Invalid hist entry",
400 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
401 !strcmp(COMM(he), "perf") && he->stat.period == 100);
402
403 node = rb_next(node);
404 he = rb_entry(node, struct hist_entry, rb_node);
405 TEST_ASSERT_VAL("Invalid hist entry",
406 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "free") &&
407 !strcmp(COMM(he), "perf") && he->stat.period == 100);
408
409 node = rb_next(node);
410 he = rb_entry(node, struct hist_entry, rb_node);
411 TEST_ASSERT_VAL("Invalid hist entry",
412 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "main") &&
413 !strcmp(COMM(he), "bash") && he->stat.period == 100);
414
415 node = rb_next(node);
416 he = rb_entry(node, struct hist_entry, rb_node);
417 TEST_ASSERT_VAL("Invalid hist entry",
418 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "main") &&
419 !strcmp(COMM(he), "perf") && he->stat.period == 200);
420
421 node = rb_next(node);
422 he = rb_entry(node, struct hist_entry, rb_node);
423 TEST_ASSERT_VAL("Invalid hist entry",
424 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "malloc") &&
425 !strcmp(COMM(he), "perf") && he->stat.period == 100);
426
427 node = rb_next(node);
428 he = rb_entry(node, struct hist_entry, rb_node);
429 TEST_ASSERT_VAL("Invalid hist entry",
430 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
431 !strcmp(COMM(he), "bash") && he->stat.period == 100);
432
433 node = rb_next(node);
434 he = rb_entry(node, struct hist_entry, rb_node);
435 TEST_ASSERT_VAL("Invalid hist entry",
436 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
437 !strcmp(COMM(he), "perf") && he->stat.period == 100);
438
439 node = rb_next(node);
440 he = rb_entry(node, struct hist_entry, rb_node);
441 TEST_ASSERT_VAL("Invalid hist entry",
442 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "schedule") &&
443 !strcmp(COMM(he), "perf") && he->stat.period == 100);
444
445 node = rb_next(node);
446 he = rb_entry(node, struct hist_entry, rb_node);
447 TEST_ASSERT_VAL("Invalid hist entry",
448 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "xmalloc") &&
449 !strcmp(COMM(he), "bash") && he->stat.period == 100);
450
451 out:
452 del_hist_entries(hists);
453 reset_output_field();
454 return err;
455 }
456
457 /* full sort keys w/o overhead field */
458 static int test5(struct perf_evsel *evsel, struct machine *machine)
459 {
460 int err;
461 struct hists *hists = evsel__hists(evsel);
462 struct hist_entry *he;
463 struct rb_root *root;
464 struct rb_node *node;
465
466 field_order = "cpu,pid,comm,dso,sym";
467 sort_order = "dso,pid";
468
469 setup_sorting();
470
471 /*
472 * expected output:
473 *
474 * CPU Command: Pid Command Shared Object Symbol
475 * === ============= ======= ============= ==============
476 * 0 perf: 100 perf [kernel] [k] schedule
477 * 2 perf: 200 perf [kernel] [k] page_fault
478 * 1 bash: 300 bash [kernel] [k] page_fault
479 * 0 bash: 300 bash bash [.] xmalloc
480 * 3 bash: 300 bash bash [.] main
481 * 1 perf: 100 perf libc [.] malloc
482 * 2 perf: 100 perf libc [.] free
483 * 1 perf: 100 perf perf [.] cmd_record
484 * 1 perf: 100 perf perf [.] main
485 * 2 perf: 200 perf perf [.] main
486 */
487 err = add_hist_entries(hists, machine);
488 if (err < 0)
489 goto out;
490
491 hists__collapse_resort(hists, NULL);
492 hists__output_resort(hists, NULL);
493
494 if (verbose > 2) {
495 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
496 print_hists_out(hists);
497 }
498
499 root = &hists->entries;
500 node = rb_first(root);
501 he = rb_entry(node, struct hist_entry, rb_node);
502
503 TEST_ASSERT_VAL("Invalid hist entry",
504 CPU(he) == 0 && PID(he) == 100 &&
505 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
506 !strcmp(SYM(he), "schedule") && he->stat.period == 100);
507
508 node = rb_next(node);
509 he = rb_entry(node, struct hist_entry, rb_node);
510 TEST_ASSERT_VAL("Invalid hist entry",
511 CPU(he) == 2 && PID(he) == 200 &&
512 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
513 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
514
515 node = rb_next(node);
516 he = rb_entry(node, struct hist_entry, rb_node);
517 TEST_ASSERT_VAL("Invalid hist entry",
518 CPU(he) == 1 && PID(he) == 300 &&
519 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
520 !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
521
522 node = rb_next(node);
523 he = rb_entry(node, struct hist_entry, rb_node);
524 TEST_ASSERT_VAL("Invalid hist entry",
525 CPU(he) == 0 && PID(he) == 300 &&
526 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
527 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
528
529 node = rb_next(node);
530 he = rb_entry(node, struct hist_entry, rb_node);
531 TEST_ASSERT_VAL("Invalid hist entry",
532 CPU(he) == 3 && PID(he) == 300 &&
533 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
534 !strcmp(SYM(he), "main") && he->stat.period == 100);
535
536 node = rb_next(node);
537 he = rb_entry(node, struct hist_entry, rb_node);
538 TEST_ASSERT_VAL("Invalid hist entry",
539 CPU(he) == 1 && PID(he) == 100 &&
540 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
541 !strcmp(SYM(he), "malloc") && he->stat.period == 100);
542
543 node = rb_next(node);
544 he = rb_entry(node, struct hist_entry, rb_node);
545 TEST_ASSERT_VAL("Invalid hist entry",
546 CPU(he) == 2 && PID(he) == 100 &&
547 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
548 !strcmp(SYM(he), "free") && he->stat.period == 100);
549
550 node = rb_next(node);
551 he = rb_entry(node, struct hist_entry, rb_node);
552 TEST_ASSERT_VAL("Invalid hist entry",
553 CPU(he) == 1 && PID(he) == 100 &&
554 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
555 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
556
557 node = rb_next(node);
558 he = rb_entry(node, struct hist_entry, rb_node);
559 TEST_ASSERT_VAL("Invalid hist entry",
560 CPU(he) == 1 && PID(he) == 100 &&
561 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
562 !strcmp(SYM(he), "main") && he->stat.period == 100);
563
564 node = rb_next(node);
565 he = rb_entry(node, struct hist_entry, rb_node);
566 TEST_ASSERT_VAL("Invalid hist entry",
567 CPU(he) == 2 && PID(he) == 200 &&
568 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
569 !strcmp(SYM(he), "main") && he->stat.period == 100);
570
571 out:
572 del_hist_entries(hists);
573 reset_output_field();
574 return err;
575 }
576
577 int test__hists_output(void)
578 {
579 int err = TEST_FAIL;
580 struct machines machines;
581 struct machine *machine;
582 struct perf_evsel *evsel;
583 struct perf_evlist *evlist = perf_evlist__new();
584 size_t i;
585 test_fn_t testcases[] = {
586 test1,
587 test2,
588 test3,
589 test4,
590 test5,
591 };
592
593 TEST_ASSERT_VAL("No memory", evlist);
594
595 err = parse_events(evlist, "cpu-clock", NULL);
596 if (err)
597 goto out;
598
599 machines__init(&machines);
600
601 /* setup threads/dso/map/symbols also */
602 machine = setup_fake_machine(&machines);
603 if (!machine)
604 goto out;
605
606 if (verbose > 1)
607 machine__fprintf(machine, stderr);
608
609 evsel = perf_evlist__first(evlist);
610
611 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
612 err = testcases[i](evsel, machine);
613 if (err < 0)
614 break;
615 }
616
617 out:
618 /* tear down everything */
619 perf_evlist__delete(evlist);
620 machines__exit(&machines);
621
622 return err;
623 }