]> git.proxmox.com Git - mirror_lxcfs.git/blob - src/proc_cpuview.c
proc_cpuview: cleanup
[mirror_lxcfs.git] / src / proc_cpuview.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #ifndef _GNU_SOURCE
4 #define _GNU_SOURCE
5 #endif
6
7 #ifndef FUSE_USE_VERSION
8 #define FUSE_USE_VERSION 26
9 #endif
10
11 #define _FILE_OFFSET_BITS 64
12
13 #define __STDC_FORMAT_MACROS
14 #include <dirent.h>
15 #include <errno.h>
16 #include <fcntl.h>
17 #include <fuse.h>
18 #include <inttypes.h>
19 #include <libgen.h>
20 #include <pthread.h>
21 #include <sched.h>
22 #include <stdarg.h>
23 #include <stdbool.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <time.h>
29 #include <unistd.h>
30 #include <wait.h>
31 #include <linux/magic.h>
32 #include <linux/sched.h>
33 #include <sys/epoll.h>
34 #include <sys/mman.h>
35 #include <sys/mount.h>
36 #include <sys/param.h>
37 #include <sys/socket.h>
38 #include <sys/syscall.h>
39 #include <sys/sysinfo.h>
40 #include <sys/vfs.h>
41
42 #include "bindings.h"
43 #include "config.h"
44 #include "cgroup_fuse.h"
45 #include "cpuset_parse.h"
46 #include "cgroups/cgroup.h"
47 #include "cgroups/cgroup_utils.h"
48 #include "memory_utils.h"
49 #include "proc_loadavg.h"
50 #include "utils.h"
51
52 /* Data for CPU view */
53 struct cg_proc_stat {
54 char *cg;
55 struct cpuacct_usage *usage; // Real usage as read from the host's /proc/stat
56 struct cpuacct_usage *view; // Usage stats reported to the container
57 int cpu_count;
58 pthread_mutex_t lock; // For node manipulation
59 struct cg_proc_stat *next;
60 };
61
62 struct cg_proc_stat_head {
63 struct cg_proc_stat *next;
64 time_t lastcheck;
65
66 /*
67 * For access to the list. Reading can be parallel, pruning is exclusive.
68 */
69 pthread_rwlock_t lock;
70 };
71
72 #define CPUVIEW_HASH_SIZE 100
73 static struct cg_proc_stat_head *proc_stat_history[CPUVIEW_HASH_SIZE];
74
75 static void reset_proc_stat_node(struct cg_proc_stat *node,
76 struct cpuacct_usage *usage, int cpu_count)
77 {
78 lxcfs_debug("Resetting stat node for %s\n", node->cg);
79 memcpy(node->usage, usage, sizeof(struct cpuacct_usage) * cpu_count);
80
81 for (int i = 0; i < cpu_count; i++) {
82 node->view[i].user = 0;
83 node->view[i].system = 0;
84 node->view[i].idle = 0;
85 }
86
87 node->cpu_count = cpu_count;
88 }
89
90 static bool expand_proc_stat_node(struct cg_proc_stat *node, int cpu_count)
91 {
92 __do_free struct cpuacct_usage *new_usage = NULL, *new_view = NULL;
93
94 /* Allocate new memory */
95 new_usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
96 if (!new_usage)
97 return false;
98
99 new_view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
100 if (!new_view)
101 return false;
102
103 /* Copy existing data & initialize new elements */
104 for (int i = 0; i < cpu_count; i++) {
105 if (i < node->cpu_count) {
106 new_usage[i].user = node->usage[i].user;
107 new_usage[i].system = node->usage[i].system;
108 new_usage[i].idle = node->usage[i].idle;
109
110 new_view[i].user = node->view[i].user;
111 new_view[i].system = node->view[i].system;
112 new_view[i].idle = node->view[i].idle;
113 } else {
114 new_usage[i].user = 0;
115 new_usage[i].system = 0;
116 new_usage[i].idle = 0;
117
118 new_view[i].user = 0;
119 new_view[i].system = 0;
120 new_view[i].idle = 0;
121 }
122 }
123
124 free(node->usage);
125 node->usage = move_ptr(new_usage);
126
127 free(node->view);
128 node->view = move_ptr(new_view);
129 node->cpu_count = cpu_count;
130
131 return true;
132 }
133
134 static void free_proc_stat_node(struct cg_proc_stat *node)
135 {
136 pthread_mutex_destroy(&node->lock);
137 free_disarm(node->cg);
138 free_disarm(node->usage);
139 free_disarm(node->view);
140 free_disarm(node);
141 }
142
143 static struct cg_proc_stat *add_proc_stat_node(struct cg_proc_stat *new_node)
144 {
145 int hash = calc_hash(new_node->cg) % CPUVIEW_HASH_SIZE;
146 struct cg_proc_stat_head *head = proc_stat_history[hash];
147 struct cg_proc_stat *node, *rv = new_node;
148
149 pthread_rwlock_wrlock(&head->lock);
150
151 if (!head->next) {
152 head->next = new_node;
153 goto out;
154 }
155
156 node = head->next;
157
158 for (;;) {
159 if (strcmp(node->cg, new_node->cg) == 0) {
160 /* The node is already present, return it */
161 free_proc_stat_node(new_node);
162 rv = node;
163 goto out;
164 }
165
166 if (node->next) {
167 node = node->next;
168 continue;
169 }
170
171 node->next = new_node;
172 goto out;
173 }
174
175 out:
176 pthread_rwlock_unlock(&head->lock);
177 return rv;
178 }
179
180 static struct cg_proc_stat *new_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
181 {
182 struct cg_proc_stat *node;
183 int i;
184
185 node = malloc(sizeof(struct cg_proc_stat));
186 if (!node)
187 goto err;
188
189 node->cg = NULL;
190 node->usage = NULL;
191 node->view = NULL;
192
193 node->cg = malloc(strlen(cg) + 1);
194 if (!node->cg)
195 goto err;
196
197 strcpy(node->cg, cg);
198
199 node->usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
200 if (!node->usage)
201 goto err;
202
203 memcpy(node->usage, usage, sizeof(struct cpuacct_usage) * cpu_count);
204
205 node->view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
206 if (!node->view)
207 goto err;
208
209 node->cpu_count = cpu_count;
210 node->next = NULL;
211
212 if (pthread_mutex_init(&node->lock, NULL) != 0)
213 log_error(goto err, "Failed to initialize node lock");
214
215 for (i = 0; i < cpu_count; i++) {
216 node->view[i].user = 0;
217 node->view[i].system = 0;
218 node->view[i].idle = 0;
219 }
220
221 return node;
222
223 err:
224 if (node && node->cg)
225 free(node->cg);
226 if (node && node->usage)
227 free(node->usage);
228 if (node && node->view)
229 free(node->view);
230 if (node)
231 free(node);
232
233 return NULL;
234 }
235
236 static bool cgfs_param_exist(const char *controller, const char *cgroup,
237 const char *file)
238 {
239 __do_free char *path = NULL;
240 int cfd;
241
242 cfd = get_cgroup_fd(controller);
243 if (cfd < 0)
244 return false;
245
246 path = must_make_path(dot_or_empty(cgroup), cgroup, file, NULL);
247 return (faccessat(cfd, path, F_OK, 0) == 0);
248 }
249
250 static struct cg_proc_stat *prune_proc_stat_list(struct cg_proc_stat *node)
251 {
252 struct cg_proc_stat *first = NULL;
253
254 for (struct cg_proc_stat *prev = NULL; node; ) {
255 if (!cgfs_param_exist("cpu", node->cg, "cpu.shares")) {
256 struct cg_proc_stat *tmp = node;
257
258 lxcfs_debug("Removing stat node for %s\n", node->cg);
259
260 if (prev)
261 prev->next = node->next;
262 else
263 first = node->next;
264
265 node = node->next;
266 free_proc_stat_node(tmp);
267 } else {
268 if (!first)
269 first = node;
270 prev = node;
271 node = node->next;
272 }
273 }
274
275 return first;
276 }
277
278 #define PROC_STAT_PRUNE_INTERVAL 10
279 static void prune_proc_stat_history(void)
280 {
281 time_t now = time(NULL);
282
283 for (int i = 0; i < CPUVIEW_HASH_SIZE; i++) {
284 pthread_rwlock_wrlock(&proc_stat_history[i]->lock);
285
286 if ((proc_stat_history[i]->lastcheck + PROC_STAT_PRUNE_INTERVAL) > now) {
287 pthread_rwlock_unlock(&proc_stat_history[i]->lock);
288 return;
289 }
290
291 if (proc_stat_history[i]->next) {
292 proc_stat_history[i]->next = prune_proc_stat_list(proc_stat_history[i]->next);
293 proc_stat_history[i]->lastcheck = now;
294 }
295
296 pthread_rwlock_unlock(&proc_stat_history[i]->lock);
297 }
298 }
299
300 static struct cg_proc_stat *find_proc_stat_node(struct cg_proc_stat_head *head,
301 const char *cg)
302 {
303 struct cg_proc_stat *node;
304
305 pthread_rwlock_rdlock(&head->lock);
306
307 if (!head->next) {
308 pthread_rwlock_unlock(&head->lock);
309 return NULL;
310 }
311
312 node = head->next;
313
314 do {
315 if (strcmp(cg, node->cg) == 0)
316 goto out;
317 } while ((node = node->next));
318
319 node = NULL;
320
321 out:
322 pthread_rwlock_unlock(&head->lock);
323 prune_proc_stat_history();
324 return node;
325 }
326
327 static struct cg_proc_stat *find_or_create_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
328 {
329 int hash = calc_hash(cg) % CPUVIEW_HASH_SIZE;
330 struct cg_proc_stat_head *head = proc_stat_history[hash];
331 struct cg_proc_stat *node;
332
333 node = find_proc_stat_node(head, cg);
334 if (!node) {
335 node = new_proc_stat_node(usage, cpu_count, cg);
336 if (!node)
337 return NULL;
338
339 node = add_proc_stat_node(node);
340 lxcfs_debug("New stat node (%d) for %s\n", cpu_count, cg);
341 }
342
343 pthread_mutex_lock(&node->lock);
344
345 /* If additional CPUs on the host have been enabled, CPU usage counter
346 * arrays have to be expanded */
347 if (node->cpu_count < cpu_count) {
348 lxcfs_debug("Expanding stat node %d->%d for %s\n",
349 node->cpu_count, cpu_count, cg);
350
351 if (!expand_proc_stat_node(node, cpu_count)) {
352 pthread_mutex_unlock(&node->lock);
353 return log_debug(NULL, "Unable to expand stat node %d->%d for %s", node->cpu_count, cpu_count, cg);
354 }
355 }
356
357 return node;
358 }
359
360 static void add_cpu_usage(uint64_t *surplus, struct cpuacct_usage *usage,
361 uint64_t *counter, uint64_t threshold)
362 {
363 unsigned long free_space, to_add;
364
365 free_space = threshold - usage->user - usage->system;
366
367 if (free_space > usage->idle)
368 free_space = usage->idle;
369
370 to_add = free_space > *surplus ? *surplus : free_space;
371
372 *counter += to_add;
373 usage->idle -= to_add;
374 *surplus -= to_add;
375 }
376
377 static unsigned long diff_cpu_usage(struct cpuacct_usage *older,
378 struct cpuacct_usage *newer,
379 struct cpuacct_usage *diff, int cpu_count)
380 {
381 unsigned long sum = 0;
382
383 for (int i = 0; i < cpu_count; i++) {
384 if (!newer[i].online)
385 continue;
386
387 /*
388 * When cpuset is changed on the fly, the CPUs might get
389 * reordered. We could either reset all counters, or check
390 * that the substractions below will return expected results.
391 */
392 if (newer[i].user > older[i].user)
393 diff[i].user = newer[i].user - older[i].user;
394 else
395 diff[i].user = 0;
396
397 if (newer[i].system > older[i].system)
398 diff[i].system = newer[i].system - older[i].system;
399 else
400 diff[i].system = 0;
401
402 if (newer[i].idle > older[i].idle)
403 diff[i].idle = newer[i].idle - older[i].idle;
404 else
405 diff[i].idle = 0;
406
407 sum += diff[i].user;
408 sum += diff[i].system;
409 sum += diff[i].idle;
410 }
411
412 return sum;
413 }
414
415 /*
416 * Read cgroup CPU quota parameters from `cpu.cfs_quota_us` or
417 * `cpu.cfs_period_us`, depending on `param`. Parameter value is returned
418 * throuh `value`.
419 */
420 static bool read_cpu_cfs_param(const char *cg, const char *param, int64_t *value)
421 {
422 __do_free char *str = NULL;
423 char file[11 + 6 + 1]; /* cpu.cfs__us + quota/period + \0 */
424
425 snprintf(file, sizeof(file), "cpu.cfs_%s_us", param);
426
427 if (!cgroup_ops->get(cgroup_ops, "cpu", cg, file, &str))
428 return false;
429
430 if (sscanf(str, "%"PRId64, value) != 1)
431 return false;
432
433 return true;
434 }
435
436 /*
437 * Return the exact number of visible CPUs based on CPU quotas.
438 * If there is no quota set, zero is returned.
439 */
440 static double exact_cpu_count(const char *cg)
441 {
442 double rv;
443 int nprocs;
444 int64_t cfs_quota, cfs_period;
445
446 if (!read_cpu_cfs_param(cg, "quota", &cfs_quota))
447 return 0;
448
449 if (!read_cpu_cfs_param(cg, "period", &cfs_period))
450 return 0;
451
452 if (cfs_quota <= 0 || cfs_period <= 0)
453 return 0;
454
455 rv = (double)cfs_quota / (double)cfs_period;
456
457 nprocs = get_nprocs();
458
459 if (rv > nprocs)
460 rv = nprocs;
461
462 return rv;
463 }
464
465 /*
466 * Return the maximum number of visible CPUs based on CPU quotas.
467 * If there is no quota set, zero is returned.
468 */
469 int max_cpu_count(const char *cg)
470 {
471 __do_free char *cpuset = NULL;
472 int rv, nprocs;
473 int64_t cfs_quota, cfs_period;
474 int nr_cpus_in_cpuset = 0;
475
476 if (!read_cpu_cfs_param(cg, "quota", &cfs_quota))
477 return 0;
478
479 if (!read_cpu_cfs_param(cg, "period", &cfs_period))
480 return 0;
481
482 cpuset = get_cpuset(cg);
483 if (cpuset)
484 nr_cpus_in_cpuset = cpu_number_in_cpuset(cpuset);
485
486 if (cfs_quota <= 0 || cfs_period <= 0){
487 if (nr_cpus_in_cpuset > 0)
488 return nr_cpus_in_cpuset;
489
490 return 0;
491 }
492
493 rv = cfs_quota / cfs_period;
494
495 /* In case quota/period does not yield a whole number, add one CPU for
496 * the remainder.
497 */
498 if ((cfs_quota % cfs_period) > 0)
499 rv += 1;
500
501 nprocs = get_nprocs();
502 if (rv > nprocs)
503 rv = nprocs;
504
505 /* use min value in cpu quota and cpuset */
506 if (nr_cpus_in_cpuset > 0 && nr_cpus_in_cpuset < rv)
507 rv = nr_cpus_in_cpuset;
508
509 return rv;
510 }
511
512 int cpuview_proc_stat(const char *cg, const char *cpuset,
513 struct cpuacct_usage *cg_cpu_usage, int cg_cpu_usage_size,
514 FILE *f, char *buf, size_t buf_size)
515 {
516 __do_free char *line = NULL;
517 __do_free struct cpuacct_usage *diff = NULL;
518 size_t linelen = 0, total_len = 0;
519 int curcpu = -1; /* cpu numbering starts at 0 */
520 int physcpu, i;
521 int max_cpus = max_cpu_count(cg), cpu_cnt = 0;
522 uint64_t user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0,
523 softirq = 0, steal = 0, guest = 0, guest_nice = 0;
524 uint64_t user_sum = 0, system_sum = 0, idle_sum = 0;
525 uint64_t user_surplus = 0, system_surplus = 0;
526 ssize_t l;
527 uint64_t total_sum, threshold;
528 struct cg_proc_stat *stat_node;
529 int nprocs = get_nprocs_conf();
530
531 if (cg_cpu_usage_size < nprocs)
532 nprocs = cg_cpu_usage_size;
533
534 /* Read all CPU stats and stop when we've encountered other lines */
535 while (getline(&line, &linelen, f) != -1) {
536 int ret;
537 char cpu_char[10]; /* That's a lot of cores */
538 uint64_t all_used, cg_used;
539
540 if (strlen(line) == 0)
541 continue;
542
543 /* not a ^cpuN line containing a number N */
544 if (sscanf(line, "cpu%9[^ ]", cpu_char) != 1)
545 break;
546
547 if (sscanf(cpu_char, "%d", &physcpu) != 1)
548 continue;
549
550 if (physcpu >= cg_cpu_usage_size)
551 continue;
552
553 curcpu++;
554 cpu_cnt++;
555
556 if (!cpu_in_cpuset(physcpu, cpuset)) {
557 for (i = curcpu; i <= physcpu; i++)
558 cg_cpu_usage[i].online = false;
559 continue;
560 }
561
562 if (curcpu < physcpu) {
563 /* Some CPUs may be disabled */
564 for (i = curcpu; i < physcpu; i++)
565 cg_cpu_usage[i].online = false;
566
567 curcpu = physcpu;
568 }
569
570 cg_cpu_usage[curcpu].online = true;
571
572 ret = sscanf(line, "%*s %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 "lu",
573 &user,
574 &nice,
575 &system,
576 &idle,
577 &iowait,
578 &irq,
579 &softirq,
580 &steal,
581 &guest,
582 &guest_nice);
583 if (ret != 10)
584 continue;
585
586 all_used = user + nice + system + iowait + irq + softirq + steal + guest + guest_nice;
587 cg_used = cg_cpu_usage[curcpu].user + cg_cpu_usage[curcpu].system;
588
589 if (all_used >= cg_used) {
590 cg_cpu_usage[curcpu].idle = idle + (all_used - cg_used);
591
592 } else {
593 lxcfs_error("cpu%d from %s has unexpected cpu time: %" PRIu64 " in /proc/stat, %" PRIu64 " in cpuacct.usage_all; unable to determine idle time",
594 curcpu, cg, all_used, cg_used);
595 cg_cpu_usage[curcpu].idle = idle;
596 }
597 }
598
599 /* Cannot use more CPUs than is available due to cpuset */
600 if (max_cpus > cpu_cnt)
601 max_cpus = cpu_cnt;
602
603 stat_node = find_or_create_proc_stat_node(cg_cpu_usage, nprocs, cg);
604 if (!stat_node)
605 return log_error(0, "Failed to find/create stat node for %s", cg);
606
607 diff = malloc(sizeof(struct cpuacct_usage) * nprocs);
608 if (!diff)
609 return 0;
610
611 /*
612 * If the new values are LOWER than values stored in memory, it means
613 * the cgroup has been reset/recreated and we should reset too.
614 */
615 for (curcpu = 0; curcpu < nprocs; curcpu++) {
616 if (!cg_cpu_usage[curcpu].online)
617 continue;
618
619 if (cg_cpu_usage[curcpu].user < stat_node->usage[curcpu].user)
620 reset_proc_stat_node(stat_node, cg_cpu_usage, nprocs);
621
622 break;
623 }
624
625 total_sum = diff_cpu_usage(stat_node->usage, cg_cpu_usage, diff, nprocs);
626
627 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
628 stat_node->usage[curcpu].online = cg_cpu_usage[curcpu].online;
629
630 if (!stat_node->usage[curcpu].online)
631 continue;
632
633 i++;
634
635 stat_node->usage[curcpu].user += diff[curcpu].user;
636 stat_node->usage[curcpu].system += diff[curcpu].system;
637 stat_node->usage[curcpu].idle += diff[curcpu].idle;
638
639 if (max_cpus > 0 && i >= max_cpus) {
640 user_surplus += diff[curcpu].user;
641 system_surplus += diff[curcpu].system;
642 }
643 }
644
645 /* Calculate usage counters of visible CPUs */
646 if (max_cpus > 0) {
647 uint64_t diff_user = 0;
648 uint64_t diff_system = 0;
649 uint64_t diff_idle = 0;
650 uint64_t max_diff_idle = 0;
651 uint64_t max_diff_idle_index = 0;
652 double exact_cpus;
653
654 /* threshold = maximum usage per cpu, including idle */
655 threshold = total_sum / cpu_cnt * max_cpus;
656
657 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
658 if (!stat_node->usage[curcpu].online)
659 continue;
660
661 i++;
662
663 if (i == max_cpus)
664 break;
665
666 if (diff[curcpu].user + diff[curcpu].system >= threshold)
667 continue;
668
669 /* Add user */
670 add_cpu_usage(&user_surplus, &diff[curcpu],
671 &diff[curcpu].user, threshold);
672
673 if (diff[curcpu].user + diff[curcpu].system >= threshold)
674 continue;
675
676 /* If there is still room, add system */
677 add_cpu_usage(&system_surplus, &diff[curcpu],
678 &diff[curcpu].system, threshold);
679 }
680
681 if (user_surplus > 0)
682 lxcfs_debug("leftover user: %lu for %s\n", user_surplus, cg);
683 if (system_surplus > 0)
684 lxcfs_debug("leftover system: %lu for %s\n", system_surplus, cg);
685
686 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
687 if (!stat_node->usage[curcpu].online)
688 continue;
689
690 i++;
691
692 if (i == max_cpus)
693 break;
694
695 stat_node->view[curcpu].user += diff[curcpu].user;
696 stat_node->view[curcpu].system += diff[curcpu].system;
697 stat_node->view[curcpu].idle += diff[curcpu].idle;
698
699 user_sum += stat_node->view[curcpu].user;
700 system_sum += stat_node->view[curcpu].system;
701 idle_sum += stat_node->view[curcpu].idle;
702
703 diff_user += diff[curcpu].user;
704 diff_system += diff[curcpu].system;
705 diff_idle += diff[curcpu].idle;
706 if (diff[curcpu].idle > max_diff_idle) {
707 max_diff_idle = diff[curcpu].idle;
708 max_diff_idle_index = curcpu;
709 }
710
711 lxcfs_v("curcpu: %d, diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", curcpu, diff[curcpu].user, diff[curcpu].system, diff[curcpu].idle);
712 }
713 lxcfs_v("total. diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", diff_user, diff_system, diff_idle);
714
715 /* revise cpu usage view to support partial cpu case. */
716 exact_cpus = exact_cpu_count(cg);
717 if (exact_cpus < (double)max_cpus){
718 unsigned long delta = (unsigned long)((double)(diff_user + diff_system + diff_idle) * (1 - exact_cpus / (double)max_cpus));
719
720 lxcfs_v("revising cpu usage view to match the exact cpu count [%f]\n", exact_cpus);
721 lxcfs_v("delta: %lu\n", delta);
722 lxcfs_v("idle_sum before: %lu\n", idle_sum);
723 idle_sum = idle_sum > delta ? idle_sum - delta : 0;
724 lxcfs_v("idle_sum after: %lu\n", idle_sum);
725
726 curcpu = max_diff_idle_index;
727 lxcfs_v("curcpu: %d, idle before: %lu\n", curcpu, stat_node->view[curcpu].idle);
728 stat_node->view[curcpu].idle = stat_node->view[curcpu].idle > delta ? stat_node->view[curcpu].idle - delta : 0;
729 lxcfs_v("curcpu: %d, idle after: %lu\n", curcpu, stat_node->view[curcpu].idle);
730 }
731 } else {
732 for (curcpu = 0; curcpu < nprocs; curcpu++) {
733 if (!stat_node->usage[curcpu].online)
734 continue;
735
736 stat_node->view[curcpu].user = stat_node->usage[curcpu].user;
737 stat_node->view[curcpu].system = stat_node->usage[curcpu].system;
738 stat_node->view[curcpu].idle = stat_node->usage[curcpu].idle;
739
740 user_sum += stat_node->view[curcpu].user;
741 system_sum += stat_node->view[curcpu].system;
742 idle_sum += stat_node->view[curcpu].idle;
743 }
744 }
745
746 /* Render the file */
747 /* cpu-all */
748 l = snprintf(buf, buf_size,
749 "cpu %" PRIu64 " 0 %" PRIu64 " %" PRIu64 " 0 0 0 0 0 0\n",
750 user_sum, system_sum, idle_sum);
751 lxcfs_v("cpu-all: %s\n", buf);
752 if (l < 0)
753 return log_error(0, "Failed to write cache");
754 if (l >= buf_size)
755 return log_error(0, "Write to cache was truncated");
756
757 buf += l;
758 buf_size -= l;
759 total_len += l;
760
761 /* Render visible CPUs */
762 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
763 if (!stat_node->usage[curcpu].online)
764 continue;
765
766 i++;
767
768 if (max_cpus > 0 && i == max_cpus)
769 break;
770
771 l = snprintf(buf, buf_size, "cpu%d %" PRIu64 " 0 %" PRIu64 " %" PRIu64 " 0 0 0 0 0 0\n",
772 i,
773 stat_node->view[curcpu].user,
774 stat_node->view[curcpu].system,
775 stat_node->view[curcpu].idle);
776 lxcfs_v("cpu: %s\n", buf);
777 if (l < 0)
778 return log_error(0, "Failed to write cache");
779 if (l >= buf_size)
780 return log_error(0, "Write to cache was truncated");
781
782 buf += l;
783 buf_size -= l;
784 total_len += l;
785 }
786
787 /* Pass the rest of /proc/stat, start with the last line read */
788 l = snprintf(buf, buf_size, "%s", line);
789 if (l < 0)
790 return log_error(0, "Failed to write cache");
791 if (l >= buf_size)
792 return log_error(0, "Write to cache was truncated");
793
794 buf += l;
795 buf_size -= l;
796 total_len += l;
797
798 /* Pass the rest of the host's /proc/stat */
799 while (getline(&line, &linelen, f) != -1) {
800 l = snprintf(buf, buf_size, "%s", line);
801 if (l < 0)
802 return log_error(0, "Failed to write cache");
803 if (l >= buf_size)
804 return log_error(0, "Write to cache was truncated");
805
806 buf += l;
807 buf_size -= l;
808 total_len += l;
809 }
810
811 if (stat_node)
812 pthread_mutex_unlock(&stat_node->lock);
813
814 return total_len;
815 }
816
817 /*
818 * check whether this is a '^processor" line in /proc/cpuinfo
819 */
820 static inline bool is_processor_line(const char *line)
821 {
822 int cpu;
823 return sscanf(line, "processor : %d", &cpu) == 1;
824 }
825
826 static inline bool cpuline_in_cpuset(const char *line, const char *cpuset)
827 {
828 int cpu;
829
830 if (sscanf(line, "processor : %d", &cpu) == 1)
831 return cpu_in_cpuset(cpu, cpuset);
832
833 return false;
834 }
835
836 int proc_cpuinfo_read(char *buf, size_t size, off_t offset,
837 struct fuse_file_info *fi)
838 {
839 __do_free char *cg = NULL, *cpuset = NULL, *line = NULL;
840 __do_free void *fopen_cache = NULL;
841 __do_fclose FILE *f = NULL;
842 struct fuse_context *fc = fuse_get_context();
843 struct lxcfs_opts *opts = (struct lxcfs_opts *)fc->private_data;
844 struct file_info *d = INTTYPE_TO_PTR(fi->fh);
845 size_t linelen = 0, total_len = 0;
846 bool am_printing = false, firstline = true, is_s390x = false;
847 int curcpu = -1, cpu, max_cpus = 0;
848 bool use_view;
849 char *cache = d->buf;
850 size_t cache_size = d->buflen;
851
852 if (offset){
853 int left;
854
855 if (offset > d->size)
856 return -EINVAL;
857
858 if (!d->cached)
859 return 0;
860
861 left = d->size - offset;
862 total_len = left > size ? size: left;
863 memcpy(buf, cache + offset, total_len);
864
865 return total_len;
866 }
867
868 pid_t initpid = lookup_initpid_in_store(fc->pid);
869 if (initpid <= 1 || is_shared_pidns(initpid))
870 initpid = fc->pid;
871
872 cg = get_pid_cgroup(initpid, "cpuset");
873 if (!cg)
874 return read_file_fuse("proc/cpuinfo", buf, size, d);
875 prune_init_slice(cg);
876
877 cpuset = get_cpuset(cg);
878 if (!cpuset)
879 return 0;
880
881 if (cgroup_ops->can_use_cpuview(cgroup_ops) && opts && opts->use_cfs)
882 use_view = true;
883 else
884 use_view = false;
885 if (use_view)
886 max_cpus = max_cpu_count(cg);
887
888 f = fopen_cached("/proc/cpuinfo", "re", &fopen_cache);
889 if (!f)
890 return 0;
891
892 while (getline(&line, &linelen, f) != -1) {
893 ssize_t l;
894 if (firstline) {
895 firstline = false;
896 if (strstr(line, "IBM/S390") != NULL) {
897 is_s390x = true;
898 am_printing = true;
899 continue;
900 }
901 }
902
903 if (strncmp(line, "# processors:", 12) == 0)
904 continue;
905
906 if (is_processor_line(line)) {
907 if (use_view && max_cpus > 0 && (curcpu + 1) == max_cpus)
908 break;
909
910 am_printing = cpuline_in_cpuset(line, cpuset);
911 if (am_printing) {
912 curcpu++;
913 l = snprintf(cache, cache_size, "processor : %d\n", curcpu);
914 if (l < 0)
915 return log_error(0, "Failed to write cache");
916 if (l >= cache_size)
917 return log_error(0, "Write to cache was truncated");
918 cache += l;
919 cache_size -= l;
920 total_len += l;
921 }
922 continue;
923 } else if (is_s390x && sscanf(line, "processor %d:", &cpu) == 1) {
924 char *p;
925
926 if (use_view && max_cpus > 0 && (curcpu + 1) == max_cpus)
927 break;
928
929 if (!cpu_in_cpuset(cpu, cpuset))
930 continue;
931
932 curcpu ++;
933 p = strchr(line, ':');
934 if (!p || !*p)
935 return 0;
936 p++;
937
938 l = snprintf(cache, cache_size, "processor %d:%s", curcpu, p);
939 if (l < 0)
940 return log_error(0, "Failed to write cache");
941 if (l >= cache_size)
942 return log_error(0, "Write to cache was truncated");
943
944 cache += l;
945 cache_size -= l;
946 total_len += l;
947 continue;
948
949 }
950 if (am_printing) {
951 l = snprintf(cache, cache_size, "%s", line);
952 if (l < 0)
953 return log_error(0, "Failed to write cache");
954 if (l >= cache_size)
955 return log_error(0, "Write to cache was truncated");
956
957 cache += l;
958 cache_size -= l;
959 total_len += l;
960 }
961 }
962
963 if (is_s390x) {
964 __do_free char *origcache = d->buf;
965 ssize_t l;
966
967 d->buf = malloc(d->buflen);
968 if (!d->buf) {
969 d->buf = move_ptr(origcache);
970 return 0;
971 }
972
973 cache = d->buf;
974 cache_size = d->buflen;
975 total_len = 0;
976 l = snprintf(cache, cache_size, "vendor_id : IBM/S390\n");
977 if (l < 0 || l >= cache_size)
978 return 0;
979
980 cache_size -= l;
981 cache += l;
982 total_len += l;
983 l = snprintf(cache, cache_size, "# processors : %d\n", curcpu + 1);
984 if (l < 0 || l >= cache_size)
985 return 0;
986
987 cache_size -= l;
988 cache += l;
989 total_len += l;
990 l = snprintf(cache, cache_size, "%s", origcache);
991 if (l < 0 || l >= cache_size)
992 return 0;
993 total_len += l;
994 }
995
996 d->cached = 1;
997 d->size = total_len;
998 if (total_len > size)
999 total_len = size;
1000
1001 /* read from off 0 */
1002 memcpy(buf, d->buf, total_len);
1003
1004 return total_len;
1005 }
1006
1007 /*
1008 * Returns 0 on success.
1009 * It is the caller's responsibility to free `return_usage`, unless this
1010 * function returns an error.
1011 */
1012 int read_cpuacct_usage_all(char *cg, char *cpuset,
1013 struct cpuacct_usage **return_usage, int *size)
1014 {
1015 __do_free char *usage_str = NULL;
1016 __do_free struct cpuacct_usage *cpu_usage = NULL;
1017 int cpucount = get_nprocs_conf();
1018 int i = 0, j = 0, read_pos = 0, read_cnt = 0;
1019 int ret;
1020 int cg_cpu;
1021 uint64_t cg_user, cg_system;
1022 int64_t ticks_per_sec;
1023
1024 ticks_per_sec = sysconf(_SC_CLK_TCK);
1025
1026 if (ticks_per_sec < 0 && errno == EINVAL) {
1027 lxcfs_v(
1028 "%s\n",
1029 "read_cpuacct_usage_all failed to determine number of clock ticks "
1030 "in a second");
1031 return -1;
1032 }
1033
1034 cpu_usage = malloc(sizeof(struct cpuacct_usage) * cpucount);
1035 if (!cpu_usage)
1036 return -ENOMEM;
1037
1038 memset(cpu_usage, 0, sizeof(struct cpuacct_usage) * cpucount);
1039 if (!cgroup_ops->get(cgroup_ops, "cpuacct", cg, "cpuacct.usage_all", &usage_str)) {
1040 char *data = NULL;
1041 size_t sz = 0, asz = 0;
1042
1043 /* read cpuacct.usage_percpu instead. */
1044 lxcfs_v("failed to read cpuacct.usage_all. reading cpuacct.usage_percpu instead\n%s", "");
1045 if (!cgroup_ops->get(cgroup_ops, "cpuacct", cg, "cpuacct.usage_percpu", &usage_str))
1046 return -1;
1047 lxcfs_v("usage_str: %s\n", usage_str);
1048
1049 /* convert cpuacct.usage_percpu into cpuacct.usage_all. */
1050 lxcfs_v("converting cpuacct.usage_percpu into cpuacct.usage_all\n%s", "");
1051
1052 must_strcat(&data, &sz, &asz, "cpu user system\n");
1053
1054 while (sscanf(usage_str + read_pos, "%" PRIu64 " %n", &cg_user, &read_cnt) > 0) {
1055 lxcfs_debug("i: %d, cg_user: %" PRIu64 ", read_pos: %d, read_cnt: %d\n", i, cg_user, read_pos, read_cnt);
1056 must_strcat(&data, &sz, &asz, "%d %lu 0\n", i, cg_user);
1057 i++;
1058 read_pos += read_cnt;
1059 }
1060
1061 usage_str = data;
1062
1063 lxcfs_v("usage_str: %s\n", usage_str);
1064 }
1065
1066 if (sscanf(usage_str, "cpu user system\n%n", &read_cnt) != 0)
1067 return log_error(-1, "read_cpuacct_usage_all reading first line from %s/cpuacct.usage_all failed", cg);
1068
1069 read_pos += read_cnt;
1070
1071 for (i = 0, j = 0; i < cpucount; i++) {
1072 ret = sscanf(usage_str + read_pos,
1073 "%d %" PRIu64 " %" PRIu64 "\n%n", &cg_cpu,
1074 &cg_user, &cg_system, &read_cnt);
1075
1076 if (ret == EOF)
1077 break;
1078
1079 if (ret != 3)
1080 return log_error(-1, "read_cpuacct_usage_all reading from %s/cpuacct.usage_all failed", cg);
1081
1082 read_pos += read_cnt;
1083
1084 /* Convert the time from nanoseconds to USER_HZ */
1085 cpu_usage[j].user = cg_user / 1000.0 / 1000 / 1000 * ticks_per_sec;
1086 cpu_usage[j].system = cg_system / 1000.0 / 1000 / 1000 * ticks_per_sec;
1087 j++;
1088 }
1089
1090 *return_usage = move_ptr(cpu_usage);
1091 *size = cpucount;
1092 return 0;
1093 }
1094
1095 static bool cpuview_init_head(struct cg_proc_stat_head **head)
1096 {
1097 *head = malloc(sizeof(struct cg_proc_stat_head));
1098 if (!(*head))
1099 return log_error(false, "%s", strerror(errno));
1100
1101 (*head)->lastcheck = time(NULL);
1102 (*head)->next = NULL;
1103
1104 if (pthread_rwlock_init(&(*head)->lock, NULL) != 0) {
1105 free_disarm(*head);
1106 return log_error(false, "Failed to initialize list lock");
1107 }
1108
1109 return true;
1110 }
1111
1112 bool init_cpuview(void)
1113 {
1114 int i;
1115
1116 for (i = 0; i < CPUVIEW_HASH_SIZE; i++)
1117 proc_stat_history[i] = NULL;
1118
1119 for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
1120 if (!cpuview_init_head(&proc_stat_history[i]))
1121 goto err;
1122 }
1123
1124 return true;
1125
1126 err:
1127 for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
1128 if (proc_stat_history[i])
1129 free_disarm(proc_stat_history[i]);
1130 }
1131
1132 return false;
1133 }
1134
1135 static void cpuview_free_head(struct cg_proc_stat_head *head)
1136 {
1137 struct cg_proc_stat *node, *tmp;
1138
1139 if (head->next) {
1140 node = head->next;
1141
1142 for (;;) {
1143 tmp = node;
1144 node = node->next;
1145 free_proc_stat_node(tmp);
1146
1147 if (!node)
1148 break;
1149 }
1150 }
1151
1152 pthread_rwlock_destroy(&head->lock);
1153 free_disarm(head);
1154 }
1155
1156 void free_cpuview(void)
1157 {
1158 for (int i = 0; i < CPUVIEW_HASH_SIZE; i++)
1159 if (proc_stat_history[i])
1160 cpuview_free_head(proc_stat_history[i]);
1161 }