]>
git.proxmox.com Git - mirror_lxcfs.git/blob - src/proc_cpuview.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
9 #define __STDC_FORMAT_MACROS
27 #include <linux/magic.h>
28 #include <linux/sched.h>
29 #include <sys/epoll.h>
31 #include <sys/mount.h>
32 #include <sys/param.h>
33 #include <sys/socket.h>
34 #include <sys/syscall.h>
35 #include <sys/sysinfo.h>
38 #include "proc_cpuview.h"
41 #include "cgroup_fuse.h"
42 #include "cpuset_parse.h"
43 #include "cgroups/cgroup.h"
44 #include "cgroups/cgroup_utils.h"
45 #include "memory_utils.h"
46 #include "proc_loadavg.h"
49 /* Data for CPU view */
52 struct cpuacct_usage
*usage
; /* Real usage as read from the host's /proc/stat. */
53 struct cpuacct_usage
*view
; /* Usage stats reported to the container. */
55 pthread_mutex_t lock
; /* For node manipulation. */
56 struct cg_proc_stat
*next
;
59 struct cg_proc_stat_head
{
60 struct cg_proc_stat
*next
;
64 * For access to the list. Reading can be parallel, pruning is exclusive.
66 pthread_rwlock_t lock
;
69 #define CPUVIEW_HASH_SIZE 100
70 static struct cg_proc_stat_head
*proc_stat_history
[CPUVIEW_HASH_SIZE
];
72 static void reset_proc_stat_node(struct cg_proc_stat
*node
,
73 struct cpuacct_usage
*usage
, int cpu_count
)
75 lxcfs_debug("Resetting stat node for %s\n", node
->cg
);
76 memcpy(node
->usage
, usage
, sizeof(struct cpuacct_usage
) * cpu_count
);
78 for (int i
= 0; i
< cpu_count
; i
++) {
79 node
->view
[i
].user
= 0;
80 node
->view
[i
].system
= 0;
81 node
->view
[i
].idle
= 0;
84 node
->cpu_count
= cpu_count
;
87 static bool expand_proc_stat_node(struct cg_proc_stat
*node
, int cpu_count
)
89 __do_free
struct cpuacct_usage
*new_usage
= NULL
, *new_view
= NULL
;
91 /* Allocate new memory */
92 new_usage
= zalloc(sizeof(struct cpuacct_usage
) * cpu_count
);
96 new_view
= zalloc(sizeof(struct cpuacct_usage
) * cpu_count
);
100 /* Copy existing data & initialize new elements */
101 for (int i
= 0; i
< cpu_count
; i
++) {
102 if (i
< node
->cpu_count
) {
103 new_usage
[i
].user
= node
->usage
[i
].user
;
104 new_usage
[i
].system
= node
->usage
[i
].system
;
105 new_usage
[i
].idle
= node
->usage
[i
].idle
;
107 new_view
[i
].user
= node
->view
[i
].user
;
108 new_view
[i
].system
= node
->view
[i
].system
;
109 new_view
[i
].idle
= node
->view
[i
].idle
;
114 node
->usage
= move_ptr(new_usage
);
117 node
->view
= move_ptr(new_view
);
118 node
->cpu_count
= cpu_count
;
123 static void free_proc_stat_node(struct cg_proc_stat
*node
)
127 * We're abusing the usage pointer to indicate that
128 * pthread_mutex_init() was successful. Don't judge me.
131 pthread_mutex_destroy(&node
->lock
);
132 free_disarm(node
->cg
);
133 free_disarm(node
->usage
);
134 free_disarm(node
->view
);
139 define_cleanup_function(struct cg_proc_stat
*, free_proc_stat_node
);
141 static struct cg_proc_stat
*add_proc_stat_node(struct cg_proc_stat
*new_node
)
143 call_cleaner(free_proc_stat_node
) struct cg_proc_stat
*new = new_node
;
144 struct cg_proc_stat
*rv
= new_node
;
145 int hash
= calc_hash(new->cg
) % CPUVIEW_HASH_SIZE
;
146 struct cg_proc_stat_head
*head
= proc_stat_history
[hash
];
147 struct cg_proc_stat
*cur
;
149 pthread_rwlock_wrlock(&head
->lock
);
152 head
->next
= move_ptr(new);
153 goto out_rwlock_unlock
;
160 * The node to be added is already present in the list, so
161 * free the newly allocated one and return the one we found.
163 if (strcmp(cur
->cg
, new->cg
) == 0) {
165 goto out_rwlock_unlock
;
174 /* Add new node to end of list. */
175 cur
->next
= move_ptr(new);
176 goto out_rwlock_unlock
;
180 pthread_rwlock_unlock(&head
->lock
);
184 static struct cg_proc_stat
*new_proc_stat_node(struct cpuacct_usage
*usage
,
185 int cpu_count
, const char *cg
)
187 call_cleaner(free_proc_stat_node
) struct cg_proc_stat
*node
= NULL
;
188 __do_free
struct cpuacct_usage
*new_usage
= NULL
;
190 node
= zalloc(sizeof(struct cg_proc_stat
));
194 node
->cg
= strdup(cg
);
198 new_usage
= memdup(usage
, sizeof(struct cpuacct_usage
) * cpu_count
);
202 node
->view
= zalloc(sizeof(struct cpuacct_usage
) * cpu_count
);
206 node
->cpu_count
= cpu_count
;
208 if (pthread_mutex_init(&node
->lock
, NULL
))
211 * We're abusing the usage pointer to indicate that
212 * pthread_mutex_init() was successful. Don't judge me.
214 node
->usage
= move_ptr(new_usage
);
216 return move_ptr(node
);
219 static bool cgroup_supports(const char *controller
, const char *cgroup
,
222 __do_free
char *path
= NULL
;
225 cfd
= get_cgroup_fd(controller
);
229 path
= must_make_path_relative(cgroup
, file
, NULL
);
230 return faccessat(cfd
, path
, F_OK
, 0) == 0;
233 static struct cg_proc_stat
*prune_proc_stat_list(struct cg_proc_stat
*node
)
235 struct cg_proc_stat
*first
= NULL
;
237 for (struct cg_proc_stat
*prev
= NULL
; node
; ) {
238 if (!cgroup_supports("cpu", node
->cg
, "cpu.shares")) {
239 call_cleaner(free_proc_stat_node
) struct cg_proc_stat
*cur
= node
;
242 prev
->next
= node
->next
;
247 lxcfs_debug("Removing stat node for %s\n", cur
->cg
);
259 #define PROC_STAT_PRUNE_INTERVAL 10
260 static void prune_proc_stat_history(void)
262 time_t now
= time(NULL
);
264 for (int i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
265 pthread_rwlock_wrlock(&proc_stat_history
[i
]->lock
);
267 if ((proc_stat_history
[i
]->lastcheck
+ PROC_STAT_PRUNE_INTERVAL
) > now
) {
268 pthread_rwlock_unlock(&proc_stat_history
[i
]->lock
);
272 if (proc_stat_history
[i
]->next
) {
273 proc_stat_history
[i
]->next
= prune_proc_stat_list(proc_stat_history
[i
]->next
);
274 proc_stat_history
[i
]->lastcheck
= now
;
277 pthread_rwlock_unlock(&proc_stat_history
[i
]->lock
);
281 static struct cg_proc_stat
*find_proc_stat_node(struct cg_proc_stat_head
*head
,
284 struct cg_proc_stat
*node
;
286 pthread_rwlock_rdlock(&head
->lock
);
289 pthread_rwlock_unlock(&head
->lock
);
296 if (strcmp(cg
, node
->cg
) == 0)
298 } while ((node
= node
->next
));
303 pthread_rwlock_unlock(&head
->lock
);
304 prune_proc_stat_history();
308 static struct cg_proc_stat
*find_or_create_proc_stat_node(struct cpuacct_usage
*usage
,
309 int cpu_count
, const char *cg
)
311 int hash
= calc_hash(cg
) % CPUVIEW_HASH_SIZE
;
312 struct cg_proc_stat_head
*head
= proc_stat_history
[hash
];
313 struct cg_proc_stat
*node
;
315 node
= find_proc_stat_node(head
, cg
);
317 node
= new_proc_stat_node(usage
, cpu_count
, cg
);
321 node
= add_proc_stat_node(node
);
322 lxcfs_debug("New stat node (%d) for %s\n", cpu_count
, cg
);
325 pthread_mutex_lock(&node
->lock
);
328 * If additional CPUs on the host have been enabled, CPU usage counter
329 * arrays have to be expanded.
331 if (node
->cpu_count
< cpu_count
) {
332 lxcfs_debug("Expanding stat node %d->%d for %s\n",
333 node
->cpu_count
, cpu_count
, cg
);
335 if (!expand_proc_stat_node(node
, cpu_count
)) {
336 pthread_mutex_unlock(&node
->lock
);
337 return log_debug(NULL
, "Unable to expand stat node %d->%d for %s", node
->cpu_count
, cpu_count
, cg
);
344 static void add_cpu_usage(uint64_t *surplus
, struct cpuacct_usage
*usage
,
345 uint64_t *counter
, uint64_t threshold
)
347 uint64_t free_space
, to_add
;
349 free_space
= threshold
- usage
->user
- usage
->system
;
351 if (free_space
> usage
->idle
)
352 free_space
= usage
->idle
;
354 if (free_space
> *surplus
)
360 usage
->idle
-= to_add
;
364 static uint64_t diff_cpu_usage(struct cpuacct_usage
*older
,
365 struct cpuacct_usage
*newer
,
366 struct cpuacct_usage
*diff
, int cpu_count
)
370 for (int i
= 0; i
< cpu_count
; i
++) {
371 if (!newer
[i
].online
)
375 * When cpuset is changed on the fly, the CPUs might get
376 * reordered. We could either reset all counters, or check
377 * that the substractions below will return expected results.
379 if (newer
[i
].user
> older
[i
].user
)
380 diff
[i
].user
= newer
[i
].user
- older
[i
].user
;
384 if (newer
[i
].system
> older
[i
].system
)
385 diff
[i
].system
= newer
[i
].system
- older
[i
].system
;
389 if (newer
[i
].idle
> older
[i
].idle
)
390 diff
[i
].idle
= newer
[i
].idle
- older
[i
].idle
;
395 sum
+= diff
[i
].system
;
403 * Read cgroup CPU quota parameters from `cpu.cfs_quota_us` or
404 * `cpu.cfs_period_us`, depending on `param`. Parameter value is returned
407 static bool read_cpu_cfs_param(const char *cg
, const char *param
, int64_t *value
)
409 __do_free
char *str
= NULL
;
410 char file
[STRLITERALLEN("cpu.cfs_period_us") + 1];
414 if (pure_unified_layout(cgroup_ops
)) {
415 first
= !strcmp(param
, "quota");
416 ret
= snprintf(file
, sizeof(file
), "cpu.max");
418 ret
= snprintf(file
, sizeof(file
), "cpu.cfs_%s_us", param
);
420 if (ret
< 0 || (size_t)ret
>= sizeof(file
))
423 if (!cgroup_ops
->get(cgroup_ops
, "cpu", cg
, file
, &str
))
426 return sscanf(str
, first
? "%" PRId64
: "%*d %" PRId64
, value
) == 1;
430 * Return the exact number of visible CPUs based on CPU quotas.
431 * If there is no quota set, zero is returned.
433 static double exact_cpu_count(const char *cg
)
437 int64_t cfs_quota
, cfs_period
;
439 if (!read_cpu_cfs_param(cg
, "quota", &cfs_quota
))
442 if (!read_cpu_cfs_param(cg
, "period", &cfs_period
))
445 if (cfs_quota
<= 0 || cfs_period
<= 0)
448 rv
= (double)cfs_quota
/ (double)cfs_period
;
450 nprocs
= get_nprocs();
459 * Return the maximum number of visible CPUs based on CPU quotas.
460 * If there is no quota set, zero is returned.
462 int max_cpu_count(const char *cg
)
464 __do_free
char *cpuset
= NULL
;
466 int64_t cfs_quota
, cfs_period
;
467 int nr_cpus_in_cpuset
= 0;
469 if (!read_cpu_cfs_param(cg
, "quota", &cfs_quota
))
472 if (!read_cpu_cfs_param(cg
, "period", &cfs_period
))
475 cpuset
= get_cpuset(cg
);
477 nr_cpus_in_cpuset
= cpu_number_in_cpuset(cpuset
);
479 if (cfs_quota
<= 0 || cfs_period
<= 0) {
480 if (nr_cpus_in_cpuset
> 0)
481 return nr_cpus_in_cpuset
;
486 rv
= cfs_quota
/ cfs_period
;
489 * In case quota/period does not yield a whole number, add one CPU for
492 if ((cfs_quota
% cfs_period
) > 0)
495 nprocs
= get_nprocs();
499 /* Use min value in cpu quota and cpuset. */
500 if (nr_cpus_in_cpuset
> 0 && nr_cpus_in_cpuset
< rv
)
501 rv
= nr_cpus_in_cpuset
;
506 int cpuview_proc_stat(const char *cg
, const char *cpuset
,
507 struct cpuacct_usage
*cg_cpu_usage
, int cg_cpu_usage_size
,
508 FILE *f
, char *buf
, size_t buf_size
)
510 __do_free
char *line
= NULL
;
511 __do_free
struct cpuacct_usage
*diff
= NULL
;
512 size_t linelen
= 0, total_len
= 0;
513 int curcpu
= -1; /* cpu numbering starts at 0 */
516 uint64_t user
= 0, nice
= 0, system
= 0, idle
= 0, iowait
= 0, irq
= 0,
517 softirq
= 0, steal
= 0, guest
= 0, guest_nice
= 0;
518 uint64_t user_sum
= 0, system_sum
= 0, idle_sum
= 0;
519 uint64_t user_surplus
= 0, system_surplus
= 0;
520 int nprocs
, max_cpus
;
522 uint64_t total_sum
, threshold
;
523 struct cg_proc_stat
*stat_node
;
525 nprocs
= get_nprocs_conf();
526 if (cg_cpu_usage_size
< nprocs
)
527 nprocs
= cg_cpu_usage_size
;
529 /* Read all CPU stats and stop when we've encountered other lines */
530 while (getline(&line
, &linelen
, f
) != -1) {
532 char cpu_char
[10]; /* That's a lot of cores */
533 uint64_t all_used
, cg_used
;
535 if (strlen(line
) == 0)
538 /* not a ^cpuN line containing a number N */
539 if (sscanf(line
, "cpu%9[^ ]", cpu_char
) != 1)
542 if (sscanf(cpu_char
, "%d", &physcpu
) != 1)
545 if (physcpu
>= cg_cpu_usage_size
)
551 if (!cpu_in_cpuset(physcpu
, cpuset
)) {
552 for (i
= curcpu
; i
<= physcpu
; i
++)
553 cg_cpu_usage
[i
].online
= false;
557 if (curcpu
< physcpu
) {
558 /* Some CPUs may be disabled */
559 for (i
= curcpu
; i
< physcpu
; i
++)
560 cg_cpu_usage
[i
].online
= false;
565 cg_cpu_usage
[curcpu
].online
= true;
567 ret
= sscanf(line
, "%*s %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
"lu",
581 all_used
= user
+ nice
+ system
+ iowait
+ irq
+ softirq
+ steal
+ guest
+ guest_nice
;
582 cg_used
= cg_cpu_usage
[curcpu
].user
+ cg_cpu_usage
[curcpu
].system
;
584 if (all_used
>= cg_used
) {
585 cg_cpu_usage
[curcpu
].idle
= idle
+ (all_used
- cg_used
);
588 lxcfs_error("cpu%d from %s has unexpected cpu time: %" PRIu64
" in /proc/stat, %" PRIu64
" in cpuacct.usage_all; unable to determine idle time",
589 curcpu
, cg
, all_used
, cg_used
);
590 cg_cpu_usage
[curcpu
].idle
= idle
;
594 /* Cannot use more CPUs than is available in cpuset. */
595 max_cpus
= max_cpu_count(cg
);
596 if (max_cpus
> cpu_cnt
|| !max_cpus
)
599 /* takes lock pthread_mutex_lock(&node->lock) */
600 stat_node
= find_or_create_proc_stat_node(cg_cpu_usage
, nprocs
, cg
);
602 return log_error(0, "Failed to find/create stat node for %s", cg
);
604 diff
= zalloc(sizeof(struct cpuacct_usage
) * nprocs
);
606 goto out_pthread_mutex_unlock
;
609 * If the new values are LOWER than values stored in memory, it means
610 * the cgroup has been reset/recreated and we should reset too.
612 for (curcpu
= 0; curcpu
< nprocs
; curcpu
++) {
613 if (!cg_cpu_usage
[curcpu
].online
)
616 if (cg_cpu_usage
[curcpu
].user
< stat_node
->usage
[curcpu
].user
)
617 reset_proc_stat_node(stat_node
, cg_cpu_usage
, nprocs
);
622 total_sum
= diff_cpu_usage(stat_node
->usage
, cg_cpu_usage
, diff
, nprocs
);
624 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
625 stat_node
->usage
[curcpu
].online
= cg_cpu_usage
[curcpu
].online
;
627 if (!stat_node
->usage
[curcpu
].online
)
632 stat_node
->usage
[curcpu
].user
+= diff
[curcpu
].user
;
633 stat_node
->usage
[curcpu
].system
+= diff
[curcpu
].system
;
634 stat_node
->usage
[curcpu
].idle
+= diff
[curcpu
].idle
;
636 if (max_cpus
> 0 && i
>= max_cpus
) {
637 user_surplus
+= diff
[curcpu
].user
;
638 system_surplus
+= diff
[curcpu
].system
;
642 /* Calculate usage counters of visible CPUs */
644 uint64_t diff_user
= 0;
645 uint64_t diff_system
= 0;
646 uint64_t diff_idle
= 0;
647 uint64_t max_diff_idle
= 0;
648 uint64_t max_diff_idle_index
= 0;
650 /* threshold = maximum usage per cpu, including idle */
651 threshold
= total_sum
/ cpu_cnt
* max_cpus
;
653 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
654 if (!stat_node
->usage
[curcpu
].online
)
662 if (diff
[curcpu
].user
+ diff
[curcpu
].system
>= threshold
)
666 add_cpu_usage(&user_surplus
, &diff
[curcpu
],
667 &diff
[curcpu
].user
, threshold
);
669 if (diff
[curcpu
].user
+ diff
[curcpu
].system
>= threshold
)
672 /* If there is still room, add system */
673 add_cpu_usage(&system_surplus
, &diff
[curcpu
],
674 &diff
[curcpu
].system
, threshold
);
677 if (user_surplus
> 0)
678 lxcfs_debug("leftover user: %lu for %s\n", user_surplus
, cg
);
679 if (system_surplus
> 0)
680 lxcfs_debug("leftover system: %lu for %s\n", system_surplus
, cg
);
682 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
683 if (!stat_node
->usage
[curcpu
].online
)
691 stat_node
->view
[curcpu
].user
+= diff
[curcpu
].user
;
692 stat_node
->view
[curcpu
].system
+= diff
[curcpu
].system
;
693 stat_node
->view
[curcpu
].idle
+= diff
[curcpu
].idle
;
695 user_sum
+= stat_node
->view
[curcpu
].user
;
696 system_sum
+= stat_node
->view
[curcpu
].system
;
697 idle_sum
+= stat_node
->view
[curcpu
].idle
;
699 diff_user
+= diff
[curcpu
].user
;
700 diff_system
+= diff
[curcpu
].system
;
701 diff_idle
+= diff
[curcpu
].idle
;
702 if (diff
[curcpu
].idle
> max_diff_idle
) {
703 max_diff_idle
= diff
[curcpu
].idle
;
704 max_diff_idle_index
= curcpu
;
707 lxcfs_v("curcpu: %d, diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", curcpu
, diff
[curcpu
].user
, diff
[curcpu
].system
, diff
[curcpu
].idle
);
709 lxcfs_v("total. diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", diff_user
, diff_system
, diff_idle
);
711 /* revise cpu usage view to support partial cpu case. */
712 exact_cpus
= exact_cpu_count(cg
);
713 if (exact_cpus
< (double)max_cpus
){
714 uint64_t delta
= (uint64_t)((double)(diff_user
+ diff_system
+ diff_idle
) * (1 - exact_cpus
/ (double)max_cpus
));
716 lxcfs_v("revising cpu usage view to match the exact cpu count [%f]\n", exact_cpus
);
717 lxcfs_v("delta: %lu\n", delta
);
718 lxcfs_v("idle_sum before: %lu\n", idle_sum
);
719 if (idle_sum
> delta
)
720 idle_sum
= idle_sum
- delta
;
723 lxcfs_v("idle_sum after: %lu\n", idle_sum
);
725 curcpu
= max_diff_idle_index
;
726 lxcfs_v("curcpu: %d, idle before: %lu\n", curcpu
, stat_node
->view
[curcpu
].idle
);
727 if (stat_node
->view
[curcpu
].idle
> delta
)
728 stat_node
->view
[curcpu
].idle
= stat_node
->view
[curcpu
].idle
- delta
;
730 stat_node
->view
[curcpu
].idle
= 0;
731 lxcfs_v("curcpu: %d, idle after: %lu\n", curcpu
, stat_node
->view
[curcpu
].idle
);
734 for (curcpu
= 0; curcpu
< nprocs
; curcpu
++) {
735 if (!stat_node
->usage
[curcpu
].online
)
738 stat_node
->view
[curcpu
].user
= stat_node
->usage
[curcpu
].user
;
739 stat_node
->view
[curcpu
].system
= stat_node
->usage
[curcpu
].system
;
740 stat_node
->view
[curcpu
].idle
= stat_node
->usage
[curcpu
].idle
;
742 user_sum
+= stat_node
->view
[curcpu
].user
;
743 system_sum
+= stat_node
->view
[curcpu
].system
;
744 idle_sum
+= stat_node
->view
[curcpu
].idle
;
748 /* Render the file */
750 l
= snprintf(buf
, buf_size
,
751 "cpu %" PRIu64
" 0 %" PRIu64
" %" PRIu64
" 0 0 0 0 0 0\n",
752 user_sum
, system_sum
, idle_sum
);
753 lxcfs_v("cpu-all: %s\n", buf
);
755 lxcfs_error("Failed to write cache");
757 goto out_pthread_mutex_unlock
;
760 lxcfs_error("Write to cache was truncated");
762 goto out_pthread_mutex_unlock
;
769 /* Render visible CPUs */
770 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
771 if (!stat_node
->usage
[curcpu
].online
)
776 if (max_cpus
> 0 && i
== max_cpus
)
779 l
= snprintf(buf
, buf_size
, "cpu%d %" PRIu64
" 0 %" PRIu64
" %" PRIu64
" 0 0 0 0 0 0\n",
781 stat_node
->view
[curcpu
].user
,
782 stat_node
->view
[curcpu
].system
,
783 stat_node
->view
[curcpu
].idle
);
784 lxcfs_v("cpu: %s\n", buf
);
786 lxcfs_error("Failed to write cache");
788 goto out_pthread_mutex_unlock
;
791 lxcfs_error("Write to cache was truncated");
793 goto out_pthread_mutex_unlock
;
801 /* Pass the rest of /proc/stat, start with the last line read */
802 l
= snprintf(buf
, buf_size
, "%s", line
);
804 lxcfs_error("Failed to write cache");
806 goto out_pthread_mutex_unlock
;
809 lxcfs_error("Write to cache was truncated");
811 goto out_pthread_mutex_unlock
;
818 /* Pass the rest of the host's /proc/stat */
819 while (getline(&line
, &linelen
, f
) != -1) {
820 l
= snprintf(buf
, buf_size
, "%s", line
);
822 lxcfs_error("Failed to write cache");
824 goto out_pthread_mutex_unlock
;
827 lxcfs_error("Write to cache was truncated");
829 goto out_pthread_mutex_unlock
;
837 out_pthread_mutex_unlock
:
839 pthread_mutex_unlock(&stat_node
->lock
);
845 * check whether this is a '^processor" line in /proc/cpuinfo
847 static inline bool is_processor_line(const char *line
)
850 return sscanf(line
, "processor : %d", &cpu
) == 1;
853 static inline bool cpuline_in_cpuset(const char *line
, const char *cpuset
)
857 if (sscanf(line
, "processor : %d", &cpu
) == 1)
858 return cpu_in_cpuset(cpu
, cpuset
);
863 int proc_cpuinfo_read(char *buf
, size_t size
, off_t offset
,
864 struct fuse_file_info
*fi
)
866 __do_free
char *cg
= NULL
, *cpuset
= NULL
, *line
= NULL
;
867 __do_free
void *fopen_cache
= NULL
;
868 __do_fclose
FILE *f
= NULL
;
869 struct fuse_context
*fc
= fuse_get_context();
870 struct lxcfs_opts
*opts
= (struct lxcfs_opts
*)fc
->private_data
;
871 struct file_info
*d
= INTTYPE_TO_PTR(fi
->fh
);
872 size_t linelen
= 0, total_len
= 0;
873 bool am_printing
= false, firstline
= true, is_s390x
= false;
874 int curcpu
= -1, cpu
, max_cpus
= 0;
876 char *cache
= d
->buf
;
877 size_t cache_size
= d
->buflen
;
882 if (offset
> d
->size
)
888 left
= d
->size
- offset
;
889 total_len
= left
> size
? size
: left
;
890 memcpy(buf
, cache
+ offset
, total_len
);
895 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
896 if (initpid
<= 1 || is_shared_pidns(initpid
))
899 cg
= get_pid_cgroup(initpid
, "cpuset");
901 return read_file_fuse("proc/cpuinfo", buf
, size
, d
);
902 prune_init_slice(cg
);
904 cpuset
= get_cpuset(cg
);
908 if (cgroup_ops
->can_use_cpuview(cgroup_ops
) && opts
&& opts
->use_cfs
)
913 max_cpus
= max_cpu_count(cg
);
915 f
= fopen_cached("/proc/cpuinfo", "re", &fopen_cache
);
919 while (getline(&line
, &linelen
, f
) != -1) {
923 if (strstr(line
, "IBM/S390") != NULL
) {
930 if (strncmp(line
, "# processors:", 12) == 0)
933 if (is_processor_line(line
)) {
934 if (use_view
&& max_cpus
> 0 && (curcpu
+ 1) == max_cpus
)
937 am_printing
= cpuline_in_cpuset(line
, cpuset
);
940 l
= snprintf(cache
, cache_size
, "processor : %d\n", curcpu
);
942 return log_error(0, "Failed to write cache");
944 return log_error(0, "Write to cache was truncated");
950 } else if (is_s390x
&& sscanf(line
, "processor %d:", &cpu
) == 1) {
953 if (use_view
&& max_cpus
> 0 && (curcpu
+ 1) == max_cpus
)
956 if (!cpu_in_cpuset(cpu
, cpuset
))
960 p
= strchr(line
, ':');
965 l
= snprintf(cache
, cache_size
, "processor %d:%s", curcpu
, p
);
967 return log_error(0, "Failed to write cache");
969 return log_error(0, "Write to cache was truncated");
978 l
= snprintf(cache
, cache_size
, "%s", line
);
980 return log_error(0, "Failed to write cache");
982 return log_error(0, "Write to cache was truncated");
991 __do_free
char *origcache
= d
->buf
;
994 d
->buf
= malloc(d
->buflen
);
996 d
->buf
= move_ptr(origcache
);
1001 cache_size
= d
->buflen
;
1003 l
= snprintf(cache
, cache_size
, "vendor_id : IBM/S390\n");
1004 if (l
< 0 || l
>= cache_size
)
1010 l
= snprintf(cache
, cache_size
, "# processors : %d\n", curcpu
+ 1);
1011 if (l
< 0 || l
>= cache_size
)
1017 l
= snprintf(cache
, cache_size
, "%s", origcache
);
1018 if (l
< 0 || l
>= cache_size
)
1024 d
->size
= total_len
;
1025 if (total_len
> size
)
1028 /* read from off 0 */
1029 memcpy(buf
, d
->buf
, total_len
);
1035 * Returns 0 on success.
1036 * It is the caller's responsibility to free `return_usage`, unless this
1037 * function returns an error.
1039 int read_cpuacct_usage_all(char *cg
, char *cpuset
,
1040 struct cpuacct_usage
**return_usage
, int *size
)
1042 __do_free
char *usage_str
= NULL
;
1043 __do_free
struct cpuacct_usage
*cpu_usage
= NULL
;
1044 int i
= 0, j
= 0, read_pos
= 0, read_cnt
= 0;
1048 uint64_t cg_user
, cg_system
;
1049 int64_t ticks_per_sec
;
1051 ticks_per_sec
= sysconf(_SC_CLK_TCK
);
1052 if (ticks_per_sec
< 0 && errno
== EINVAL
) {
1053 lxcfs_debug("%m - Failed to determine number of ticks per second");
1057 cpucount
= get_nprocs_conf();
1058 cpu_usage
= malloc(sizeof(struct cpuacct_usage
) * cpucount
);
1062 memset(cpu_usage
, 0, sizeof(struct cpuacct_usage
) * cpucount
);
1063 if (!cgroup_ops
->get(cgroup_ops
, "cpuacct", cg
, "cpuacct.usage_all", &usage_str
)) {
1064 char *sep
= " \t\n";
1067 /* Read cpuacct.usage_percpu instead. */
1068 lxcfs_debug("Falling back to cpuacct.usage_percpu");
1069 if (!cgroup_ops
->get(cgroup_ops
, "cpuacct", cg
, "cpuacct.usage_percpu", &usage_str
))
1072 lxc_iterate_parts(tok
, usage_str
, sep
) {
1073 uint64_t percpu_user
;
1078 tok
= trim_whitespace_in_place(tok
);
1079 ret
= safe_uint64(tok
, &percpu_user
, 10);
1083 /* Convert the time from nanoseconds to USER_HZ */
1084 cpu_usage
[i
].user
= percpu_user
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
1085 cpu_usage
[i
].system
= cpu_usage
[i
].user
;
1087 lxcfs_debug("cpu%d with time %s", i
, tok
);
1090 if (sscanf(usage_str
, "cpu user system\n%n", &read_cnt
) != 0)
1091 return log_error(-1, "read_cpuacct_usage_all reading first line from %s/cpuacct.usage_all failed", cg
);
1093 read_pos
+= read_cnt
;
1095 for (i
= 0, j
= 0; i
< cpucount
; i
++) {
1096 ret
= sscanf(usage_str
+ read_pos
,
1097 "%d %" PRIu64
" %" PRIu64
"\n%n", &cg_cpu
,
1098 &cg_user
, &cg_system
, &read_cnt
);
1104 return log_error(-EINVAL
, "Failed to parse cpuacct.usage_all line %s from cgroup %s",
1105 usage_str
+ read_pos
, cg
);
1107 read_pos
+= read_cnt
;
1109 /* Convert the time from nanoseconds to USER_HZ */
1110 cpu_usage
[j
].user
= cg_user
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
1111 cpu_usage
[j
].system
= cg_system
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
1116 *return_usage
= move_ptr(cpu_usage
);
1121 static bool cpuview_init_head(struct cg_proc_stat_head
**head
)
1123 __do_free
struct cg_proc_stat_head
*h
;
1125 h
= zalloc(sizeof(struct cg_proc_stat_head
));
1129 if (pthread_rwlock_init(&h
->lock
, NULL
))
1132 h
->lastcheck
= time(NULL
);
1134 *head
= move_ptr(h
);
1138 bool init_cpuview(void)
1142 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++)
1143 proc_stat_history
[i
] = NULL
;
1145 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
1146 if (!cpuview_init_head(&proc_stat_history
[i
]))
1153 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
1154 if (proc_stat_history
[i
])
1155 free_disarm(proc_stat_history
[i
]);
1161 static void cpuview_free_head(struct cg_proc_stat_head
*head
)
1163 struct cg_proc_stat
*node
;
1169 struct cg_proc_stat
*cur
= node
;
1171 free_proc_stat_node(cur
);
1177 pthread_rwlock_destroy(&head
->lock
);
1181 void free_cpuview(void)
1183 for (int i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++)
1184 if (proc_stat_history
[i
])
1185 cpuview_free_head(proc_stat_history
[i
]);