]>
git.proxmox.com Git - mirror_lxcfs.git/blob - src/proc_cpuview.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
7 #ifndef FUSE_USE_VERSION
8 #define FUSE_USE_VERSION 26
11 #define _FILE_OFFSET_BITS 64
13 #define __STDC_FORMAT_MACROS
31 #include <linux/magic.h>
32 #include <linux/sched.h>
33 #include <sys/epoll.h>
35 #include <sys/mount.h>
36 #include <sys/param.h>
37 #include <sys/socket.h>
38 #include <sys/syscall.h>
39 #include <sys/sysinfo.h>
44 #include "cgroup_fuse.h"
45 #include "cpuset_parse.h"
46 #include "cgroups/cgroup.h"
47 #include "cgroups/cgroup_utils.h"
48 #include "memory_utils.h"
49 #include "proc_loadavg.h"
52 /* Data for CPU view */
55 struct cpuacct_usage
*usage
; /* Real usage as read from the host's /proc/stat. */
56 struct cpuacct_usage
*view
; /* Usage stats reported to the container. */
58 pthread_mutex_t lock
; /* For node manipulation. */
59 struct cg_proc_stat
*next
;
62 struct cg_proc_stat_head
{
63 struct cg_proc_stat
*next
;
67 * For access to the list. Reading can be parallel, pruning is exclusive.
69 pthread_rwlock_t lock
;
72 #define CPUVIEW_HASH_SIZE 100
73 static struct cg_proc_stat_head
*proc_stat_history
[CPUVIEW_HASH_SIZE
];
75 static void reset_proc_stat_node(struct cg_proc_stat
*node
,
76 struct cpuacct_usage
*usage
, int cpu_count
)
78 lxcfs_debug("Resetting stat node for %s\n", node
->cg
);
79 memcpy(node
->usage
, usage
, sizeof(struct cpuacct_usage
) * cpu_count
);
81 for (int i
= 0; i
< cpu_count
; i
++) {
82 node
->view
[i
].user
= 0;
83 node
->view
[i
].system
= 0;
84 node
->view
[i
].idle
= 0;
87 node
->cpu_count
= cpu_count
;
90 static bool expand_proc_stat_node(struct cg_proc_stat
*node
, int cpu_count
)
92 __do_free
struct cpuacct_usage
*new_usage
= NULL
, *new_view
= NULL
;
94 /* Allocate new memory */
95 new_usage
= zalloc(sizeof(struct cpuacct_usage
) * cpu_count
);
99 new_view
= zalloc(sizeof(struct cpuacct_usage
) * cpu_count
);
103 /* Copy existing data & initialize new elements */
104 for (int i
= 0; i
< cpu_count
; i
++) {
105 if (i
< node
->cpu_count
) {
106 new_usage
[i
].user
= node
->usage
[i
].user
;
107 new_usage
[i
].system
= node
->usage
[i
].system
;
108 new_usage
[i
].idle
= node
->usage
[i
].idle
;
110 new_view
[i
].user
= node
->view
[i
].user
;
111 new_view
[i
].system
= node
->view
[i
].system
;
112 new_view
[i
].idle
= node
->view
[i
].idle
;
117 node
->usage
= move_ptr(new_usage
);
120 node
->view
= move_ptr(new_view
);
121 node
->cpu_count
= cpu_count
;
126 static void free_proc_stat_node(struct cg_proc_stat
*node
)
130 * We're abusing the usage pointer to indicate that
131 * pthread_mutex_init() was successful. Don't judge me.
134 pthread_mutex_destroy(&node
->lock
);
135 free_disarm(node
->cg
);
136 free_disarm(node
->usage
);
137 free_disarm(node
->view
);
142 define_cleanup_function(struct cg_proc_stat
*, free_proc_stat_node
);
144 static struct cg_proc_stat
*add_proc_stat_node(struct cg_proc_stat
*new_node
)
146 call_cleaner(free_proc_stat_node
) struct cg_proc_stat
*new = new_node
;
147 struct cg_proc_stat
*rv
= new_node
;
148 int hash
= calc_hash(new->cg
) % CPUVIEW_HASH_SIZE
;
149 struct cg_proc_stat_head
*head
= proc_stat_history
[hash
];
150 struct cg_proc_stat
*cur
;
152 pthread_rwlock_wrlock(&head
->lock
);
155 head
->next
= move_ptr(new);
156 goto out_rwlock_unlock
;
163 * The node to be added is already present in the list, so
164 * free the newly allocated one and return the one we found.
166 if (strcmp(cur
->cg
, new->cg
) == 0) {
168 goto out_rwlock_unlock
;
177 /* Add new node to end of list. */
178 cur
->next
= move_ptr(new);
179 goto out_rwlock_unlock
;
183 pthread_rwlock_unlock(&head
->lock
);
187 static struct cg_proc_stat
*new_proc_stat_node(struct cpuacct_usage
*usage
,
188 int cpu_count
, const char *cg
)
190 call_cleaner(free_proc_stat_node
) struct cg_proc_stat
*node
= NULL
;
191 __do_free
struct cpuacct_usage
*new_usage
= NULL
;
193 node
= zalloc(sizeof(struct cg_proc_stat
));
197 node
->cg
= strdup(cg
);
201 new_usage
= memdup(usage
, sizeof(struct cpuacct_usage
) * cpu_count
);
205 node
->view
= zalloc(sizeof(struct cpuacct_usage
) * cpu_count
);
209 node
->cpu_count
= cpu_count
;
211 if (pthread_mutex_init(&node
->lock
, NULL
))
214 * We're abusing the usage pointer to indicate that
215 * pthread_mutex_init() was successful. Don't judge me.
217 node
->usage
= move_ptr(new_usage
);
219 return move_ptr(node
);
222 static bool cgroup_supports(const char *controller
, const char *cgroup
,
225 __do_free
char *path
= NULL
;
228 cfd
= get_cgroup_fd(controller
);
232 path
= must_make_path_relative(cgroup
, file
, NULL
);
233 return faccessat(cfd
, path
, F_OK
, 0) == 0;
236 static struct cg_proc_stat
*prune_proc_stat_list(struct cg_proc_stat
*node
)
238 struct cg_proc_stat
*first
= NULL
;
240 for (struct cg_proc_stat
*prev
= NULL
; node
; ) {
241 if (!cgroup_supports("cpu", node
->cg
, "cpu.shares")) {
242 call_cleaner(free_proc_stat_node
) struct cg_proc_stat
*cur
= node
;
245 prev
->next
= node
->next
;
250 lxcfs_debug("Removing stat node for %s\n", cur
->cg
);
262 #define PROC_STAT_PRUNE_INTERVAL 10
263 static void prune_proc_stat_history(void)
265 time_t now
= time(NULL
);
267 for (int i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
268 pthread_rwlock_wrlock(&proc_stat_history
[i
]->lock
);
270 if ((proc_stat_history
[i
]->lastcheck
+ PROC_STAT_PRUNE_INTERVAL
) > now
) {
271 pthread_rwlock_unlock(&proc_stat_history
[i
]->lock
);
275 if (proc_stat_history
[i
]->next
) {
276 proc_stat_history
[i
]->next
= prune_proc_stat_list(proc_stat_history
[i
]->next
);
277 proc_stat_history
[i
]->lastcheck
= now
;
280 pthread_rwlock_unlock(&proc_stat_history
[i
]->lock
);
284 static struct cg_proc_stat
*find_proc_stat_node(struct cg_proc_stat_head
*head
,
287 struct cg_proc_stat
*node
;
289 pthread_rwlock_rdlock(&head
->lock
);
292 pthread_rwlock_unlock(&head
->lock
);
299 if (strcmp(cg
, node
->cg
) == 0)
301 } while ((node
= node
->next
));
306 pthread_rwlock_unlock(&head
->lock
);
307 prune_proc_stat_history();
311 static struct cg_proc_stat
*find_or_create_proc_stat_node(struct cpuacct_usage
*usage
, int cpu_count
, const char *cg
)
313 int hash
= calc_hash(cg
) % CPUVIEW_HASH_SIZE
;
314 struct cg_proc_stat_head
*head
= proc_stat_history
[hash
];
315 struct cg_proc_stat
*node
;
317 node
= find_proc_stat_node(head
, cg
);
319 node
= new_proc_stat_node(usage
, cpu_count
, cg
);
323 node
= add_proc_stat_node(node
);
324 lxcfs_debug("New stat node (%d) for %s\n", cpu_count
, cg
);
327 pthread_mutex_lock(&node
->lock
);
330 * If additional CPUs on the host have been enabled, CPU usage counter
331 * arrays have to be expanded.
333 if (node
->cpu_count
< cpu_count
) {
334 lxcfs_debug("Expanding stat node %d->%d for %s\n",
335 node
->cpu_count
, cpu_count
, cg
);
337 if (!expand_proc_stat_node(node
, cpu_count
)) {
338 pthread_mutex_unlock(&node
->lock
);
339 return log_debug(NULL
, "Unable to expand stat node %d->%d for %s", node
->cpu_count
, cpu_count
, cg
);
346 static void add_cpu_usage(uint64_t *surplus
, struct cpuacct_usage
*usage
,
347 uint64_t *counter
, uint64_t threshold
)
349 uint64_t free_space
, to_add
;
351 free_space
= threshold
- usage
->user
- usage
->system
;
353 if (free_space
> usage
->idle
)
354 free_space
= usage
->idle
;
356 if (free_space
> *surplus
)
362 usage
->idle
-= to_add
;
366 static uint64_t diff_cpu_usage(struct cpuacct_usage
*older
,
367 struct cpuacct_usage
*newer
,
368 struct cpuacct_usage
*diff
, int cpu_count
)
372 for (int i
= 0; i
< cpu_count
; i
++) {
373 if (!newer
[i
].online
)
377 * When cpuset is changed on the fly, the CPUs might get
378 * reordered. We could either reset all counters, or check
379 * that the substractions below will return expected results.
381 if (newer
[i
].user
> older
[i
].user
)
382 diff
[i
].user
= newer
[i
].user
- older
[i
].user
;
386 if (newer
[i
].system
> older
[i
].system
)
387 diff
[i
].system
= newer
[i
].system
- older
[i
].system
;
391 if (newer
[i
].idle
> older
[i
].idle
)
392 diff
[i
].idle
= newer
[i
].idle
- older
[i
].idle
;
397 sum
+= diff
[i
].system
;
405 * Read cgroup CPU quota parameters from `cpu.cfs_quota_us` or
406 * `cpu.cfs_period_us`, depending on `param`. Parameter value is returned
409 static bool read_cpu_cfs_param(const char *cg
, const char *param
, int64_t *value
)
411 __do_free
char *str
= NULL
;
412 char file
[11 + 6 + 1]; /* cpu.cfs__us + quota/period + \0 */
415 if (!pure_unified_layout(cgroup_ops
)) {
416 snprintf(file
, sizeof(file
), "cpu.cfs_%s_us", param
);
418 strcpy(file
, "cpu.max");
419 first
= !strcmp(param
, "quota");
422 if (!cgroup_ops
->get(cgroup_ops
, "cpu", cg
, file
, &str
))
425 if (sscanf(str
, first
? "%" PRId64
: "%*d %" PRId64
, value
) != 1)
432 * Return the exact number of visible CPUs based on CPU quotas.
433 * If there is no quota set, zero is returned.
435 static double exact_cpu_count(const char *cg
)
439 int64_t cfs_quota
, cfs_period
;
441 read_cpu_cfs_param(cg
, "quota", &cfs_quota
);
442 read_cpu_cfs_param(cg
, "period", &cfs_period
);
444 if (cfs_quota
<= 0 || cfs_period
<= 0)
447 rv
= (double)cfs_quota
/ (double)cfs_period
;
449 nprocs
= get_nprocs();
458 * Return the maximum number of visible CPUs based on CPU quotas.
459 * If there is no quota set, zero is returned.
461 int max_cpu_count(const char *cg
)
463 __do_free
char *cpuset
= NULL
;
465 int64_t cfs_quota
, cfs_period
;
466 int nr_cpus_in_cpuset
= 0;
468 read_cpu_cfs_param(cg
, "quota", &cfs_quota
);
469 read_cpu_cfs_param(cg
, "period", &cfs_period
);
471 cpuset
= get_cpuset(cg
);
473 nr_cpus_in_cpuset
= cpu_number_in_cpuset(cpuset
);
475 if (cfs_quota
<= 0 || cfs_period
<= 0){
476 if (nr_cpus_in_cpuset
> 0)
477 return nr_cpus_in_cpuset
;
482 rv
= cfs_quota
/ cfs_period
;
484 /* In case quota/period does not yield a whole number, add one CPU for
487 if ((cfs_quota
% cfs_period
) > 0)
490 nprocs
= get_nprocs();
494 /* use min value in cpu quota and cpuset */
495 if (nr_cpus_in_cpuset
> 0 && nr_cpus_in_cpuset
< rv
)
496 rv
= nr_cpus_in_cpuset
;
501 int cpuview_proc_stat(const char *cg
, const char *cpuset
,
502 struct cpuacct_usage
*cg_cpu_usage
, int cg_cpu_usage_size
,
503 FILE *f
, char *buf
, size_t buf_size
)
505 __do_free
char *line
= NULL
;
506 __do_free
struct cpuacct_usage
*diff
= NULL
;
507 size_t linelen
= 0, total_len
= 0;
508 int curcpu
= -1; /* cpu numbering starts at 0 */
511 uint64_t user
= 0, nice
= 0, system
= 0, idle
= 0, iowait
= 0, irq
= 0,
512 softirq
= 0, steal
= 0, guest
= 0, guest_nice
= 0;
513 uint64_t user_sum
= 0, system_sum
= 0, idle_sum
= 0;
514 uint64_t user_surplus
= 0, system_surplus
= 0;
515 int nprocs
, max_cpus
;
517 uint64_t total_sum
, threshold
;
518 struct cg_proc_stat
*stat_node
;
520 nprocs
= get_nprocs_conf();
521 if (cg_cpu_usage_size
< nprocs
)
522 nprocs
= cg_cpu_usage_size
;
524 /* Read all CPU stats and stop when we've encountered other lines */
525 while (getline(&line
, &linelen
, f
) != -1) {
527 char cpu_char
[10]; /* That's a lot of cores */
528 uint64_t all_used
, cg_used
;
530 if (strlen(line
) == 0)
533 /* not a ^cpuN line containing a number N */
534 if (sscanf(line
, "cpu%9[^ ]", cpu_char
) != 1)
537 if (sscanf(cpu_char
, "%d", &physcpu
) != 1)
540 if (physcpu
>= cg_cpu_usage_size
)
546 if (!cpu_in_cpuset(physcpu
, cpuset
)) {
547 for (i
= curcpu
; i
<= physcpu
; i
++)
548 cg_cpu_usage
[i
].online
= false;
552 if (curcpu
< physcpu
) {
553 /* Some CPUs may be disabled */
554 for (i
= curcpu
; i
< physcpu
; i
++)
555 cg_cpu_usage
[i
].online
= false;
560 cg_cpu_usage
[curcpu
].online
= true;
562 ret
= sscanf(line
, "%*s %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
" %" PRIu64
"lu",
576 all_used
= user
+ nice
+ system
+ iowait
+ irq
+ softirq
+ steal
+ guest
+ guest_nice
;
577 cg_used
= cg_cpu_usage
[curcpu
].user
+ cg_cpu_usage
[curcpu
].system
;
579 if (all_used
>= cg_used
) {
580 cg_cpu_usage
[curcpu
].idle
= idle
+ (all_used
- cg_used
);
583 lxcfs_error("cpu%d from %s has unexpected cpu time: %" PRIu64
" in /proc/stat, %" PRIu64
" in cpuacct.usage_all; unable to determine idle time",
584 curcpu
, cg
, all_used
, cg_used
);
585 cg_cpu_usage
[curcpu
].idle
= idle
;
589 /* Cannot use more CPUs than is available in cpuset. */
590 max_cpus
= max_cpu_count(cg
);
591 if (max_cpus
> cpu_cnt
|| !max_cpus
)
594 stat_node
= find_or_create_proc_stat_node(cg_cpu_usage
, nprocs
, cg
);
596 return log_error(0, "Failed to find/create stat node for %s", cg
);
598 diff
= malloc(sizeof(struct cpuacct_usage
) * nprocs
);
603 * If the new values are LOWER than values stored in memory, it means
604 * the cgroup has been reset/recreated and we should reset too.
606 for (curcpu
= 0; curcpu
< nprocs
; curcpu
++) {
607 if (!cg_cpu_usage
[curcpu
].online
)
610 if (cg_cpu_usage
[curcpu
].user
< stat_node
->usage
[curcpu
].user
)
611 reset_proc_stat_node(stat_node
, cg_cpu_usage
, nprocs
);
616 total_sum
= diff_cpu_usage(stat_node
->usage
, cg_cpu_usage
, diff
, nprocs
);
618 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
619 stat_node
->usage
[curcpu
].online
= cg_cpu_usage
[curcpu
].online
;
621 if (!stat_node
->usage
[curcpu
].online
)
626 stat_node
->usage
[curcpu
].user
+= diff
[curcpu
].user
;
627 stat_node
->usage
[curcpu
].system
+= diff
[curcpu
].system
;
628 stat_node
->usage
[curcpu
].idle
+= diff
[curcpu
].idle
;
630 if (max_cpus
> 0 && i
>= max_cpus
) {
631 user_surplus
+= diff
[curcpu
].user
;
632 system_surplus
+= diff
[curcpu
].system
;
636 /* Calculate usage counters of visible CPUs */
638 uint64_t diff_user
= 0;
639 uint64_t diff_system
= 0;
640 uint64_t diff_idle
= 0;
641 uint64_t max_diff_idle
= 0;
642 uint64_t max_diff_idle_index
= 0;
645 /* threshold = maximum usage per cpu, including idle */
646 threshold
= total_sum
/ cpu_cnt
* max_cpus
;
648 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
649 if (!stat_node
->usage
[curcpu
].online
)
657 if (diff
[curcpu
].user
+ diff
[curcpu
].system
>= threshold
)
661 add_cpu_usage(&user_surplus
, &diff
[curcpu
],
662 &diff
[curcpu
].user
, threshold
);
664 if (diff
[curcpu
].user
+ diff
[curcpu
].system
>= threshold
)
667 /* If there is still room, add system */
668 add_cpu_usage(&system_surplus
, &diff
[curcpu
],
669 &diff
[curcpu
].system
, threshold
);
672 if (user_surplus
> 0)
673 lxcfs_debug("leftover user: %lu for %s\n", user_surplus
, cg
);
674 if (system_surplus
> 0)
675 lxcfs_debug("leftover system: %lu for %s\n", system_surplus
, cg
);
677 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
678 if (!stat_node
->usage
[curcpu
].online
)
686 stat_node
->view
[curcpu
].user
+= diff
[curcpu
].user
;
687 stat_node
->view
[curcpu
].system
+= diff
[curcpu
].system
;
688 stat_node
->view
[curcpu
].idle
+= diff
[curcpu
].idle
;
690 user_sum
+= stat_node
->view
[curcpu
].user
;
691 system_sum
+= stat_node
->view
[curcpu
].system
;
692 idle_sum
+= stat_node
->view
[curcpu
].idle
;
694 diff_user
+= diff
[curcpu
].user
;
695 diff_system
+= diff
[curcpu
].system
;
696 diff_idle
+= diff
[curcpu
].idle
;
697 if (diff
[curcpu
].idle
> max_diff_idle
) {
698 max_diff_idle
= diff
[curcpu
].idle
;
699 max_diff_idle_index
= curcpu
;
702 lxcfs_v("curcpu: %d, diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", curcpu
, diff
[curcpu
].user
, diff
[curcpu
].system
, diff
[curcpu
].idle
);
704 lxcfs_v("total. diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", diff_user
, diff_system
, diff_idle
);
706 /* revise cpu usage view to support partial cpu case. */
707 exact_cpus
= exact_cpu_count(cg
);
708 if (exact_cpus
< (double)max_cpus
){
709 uint64_t delta
= (uint64_t)((double)(diff_user
+ diff_system
+ diff_idle
) * (1 - exact_cpus
/ (double)max_cpus
));
711 lxcfs_v("revising cpu usage view to match the exact cpu count [%f]\n", exact_cpus
);
712 lxcfs_v("delta: %lu\n", delta
);
713 lxcfs_v("idle_sum before: %lu\n", idle_sum
);
714 idle_sum
= idle_sum
> delta
? idle_sum
- delta
: 0;
715 lxcfs_v("idle_sum after: %lu\n", idle_sum
);
717 curcpu
= max_diff_idle_index
;
718 lxcfs_v("curcpu: %d, idle before: %lu\n", curcpu
, stat_node
->view
[curcpu
].idle
);
719 stat_node
->view
[curcpu
].idle
= stat_node
->view
[curcpu
].idle
> delta
? stat_node
->view
[curcpu
].idle
- delta
: 0;
720 lxcfs_v("curcpu: %d, idle after: %lu\n", curcpu
, stat_node
->view
[curcpu
].idle
);
723 for (curcpu
= 0; curcpu
< nprocs
; curcpu
++) {
724 if (!stat_node
->usage
[curcpu
].online
)
727 stat_node
->view
[curcpu
].user
= stat_node
->usage
[curcpu
].user
;
728 stat_node
->view
[curcpu
].system
= stat_node
->usage
[curcpu
].system
;
729 stat_node
->view
[curcpu
].idle
= stat_node
->usage
[curcpu
].idle
;
731 user_sum
+= stat_node
->view
[curcpu
].user
;
732 system_sum
+= stat_node
->view
[curcpu
].system
;
733 idle_sum
+= stat_node
->view
[curcpu
].idle
;
737 /* Render the file */
739 l
= snprintf(buf
, buf_size
,
740 "cpu %" PRIu64
" 0 %" PRIu64
" %" PRIu64
" 0 0 0 0 0 0\n",
741 user_sum
, system_sum
, idle_sum
);
742 lxcfs_v("cpu-all: %s\n", buf
);
744 return log_error(0, "Failed to write cache");
746 return log_error(0, "Write to cache was truncated");
752 /* Render visible CPUs */
753 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
754 if (!stat_node
->usage
[curcpu
].online
)
759 if (max_cpus
> 0 && i
== max_cpus
)
762 l
= snprintf(buf
, buf_size
, "cpu%d %" PRIu64
" 0 %" PRIu64
" %" PRIu64
" 0 0 0 0 0 0\n",
764 stat_node
->view
[curcpu
].user
,
765 stat_node
->view
[curcpu
].system
,
766 stat_node
->view
[curcpu
].idle
);
767 lxcfs_v("cpu: %s\n", buf
);
769 return log_error(0, "Failed to write cache");
771 return log_error(0, "Write to cache was truncated");
778 /* Pass the rest of /proc/stat, start with the last line read */
779 l
= snprintf(buf
, buf_size
, "%s", line
);
781 return log_error(0, "Failed to write cache");
783 return log_error(0, "Write to cache was truncated");
789 /* Pass the rest of the host's /proc/stat */
790 while (getline(&line
, &linelen
, f
) != -1) {
791 l
= snprintf(buf
, buf_size
, "%s", line
);
793 return log_error(0, "Failed to write cache");
795 return log_error(0, "Write to cache was truncated");
803 pthread_mutex_unlock(&stat_node
->lock
);
809 * check whether this is a '^processor" line in /proc/cpuinfo
811 static inline bool is_processor_line(const char *line
)
814 return sscanf(line
, "processor : %d", &cpu
) == 1;
817 static inline bool cpuline_in_cpuset(const char *line
, const char *cpuset
)
821 if (sscanf(line
, "processor : %d", &cpu
) == 1)
822 return cpu_in_cpuset(cpu
, cpuset
);
827 int proc_cpuinfo_read(char *buf
, size_t size
, off_t offset
,
828 struct fuse_file_info
*fi
)
830 __do_free
char *cg
= NULL
, *cpuset
= NULL
, *line
= NULL
;
831 __do_free
void *fopen_cache
= NULL
;
832 __do_fclose
FILE *f
= NULL
;
833 struct fuse_context
*fc
= fuse_get_context();
834 struct lxcfs_opts
*opts
= (struct lxcfs_opts
*)fc
->private_data
;
835 struct file_info
*d
= INTTYPE_TO_PTR(fi
->fh
);
836 size_t linelen
= 0, total_len
= 0;
837 bool am_printing
= false, firstline
= true, is_s390x
= false;
838 int curcpu
= -1, cpu
, max_cpus
= 0;
840 char *cache
= d
->buf
;
841 size_t cache_size
= d
->buflen
;
846 if (offset
> d
->size
)
852 left
= d
->size
- offset
;
853 total_len
= left
> size
? size
: left
;
854 memcpy(buf
, cache
+ offset
, total_len
);
859 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
860 if (initpid
<= 1 || is_shared_pidns(initpid
))
863 cg
= get_pid_cgroup(initpid
, "cpuset");
865 return read_file_fuse("proc/cpuinfo", buf
, size
, d
);
866 prune_init_slice(cg
);
868 cpuset
= get_cpuset(cg
);
872 if (cgroup_ops
->can_use_cpuview(cgroup_ops
) && opts
&& opts
->use_cfs
)
877 max_cpus
= max_cpu_count(cg
);
879 f
= fopen_cached("/proc/cpuinfo", "re", &fopen_cache
);
883 while (getline(&line
, &linelen
, f
) != -1) {
887 if (strstr(line
, "IBM/S390") != NULL
) {
894 if (strncmp(line
, "# processors:", 12) == 0)
897 if (is_processor_line(line
)) {
898 if (use_view
&& max_cpus
> 0 && (curcpu
+ 1) == max_cpus
)
901 am_printing
= cpuline_in_cpuset(line
, cpuset
);
904 l
= snprintf(cache
, cache_size
, "processor : %d\n", curcpu
);
906 return log_error(0, "Failed to write cache");
908 return log_error(0, "Write to cache was truncated");
914 } else if (is_s390x
&& sscanf(line
, "processor %d:", &cpu
) == 1) {
917 if (use_view
&& max_cpus
> 0 && (curcpu
+ 1) == max_cpus
)
920 if (!cpu_in_cpuset(cpu
, cpuset
))
924 p
= strchr(line
, ':');
929 l
= snprintf(cache
, cache_size
, "processor %d:%s", curcpu
, p
);
931 return log_error(0, "Failed to write cache");
933 return log_error(0, "Write to cache was truncated");
942 l
= snprintf(cache
, cache_size
, "%s", line
);
944 return log_error(0, "Failed to write cache");
946 return log_error(0, "Write to cache was truncated");
955 __do_free
char *origcache
= d
->buf
;
958 d
->buf
= malloc(d
->buflen
);
960 d
->buf
= move_ptr(origcache
);
965 cache_size
= d
->buflen
;
967 l
= snprintf(cache
, cache_size
, "vendor_id : IBM/S390\n");
968 if (l
< 0 || l
>= cache_size
)
974 l
= snprintf(cache
, cache_size
, "# processors : %d\n", curcpu
+ 1);
975 if (l
< 0 || l
>= cache_size
)
981 l
= snprintf(cache
, cache_size
, "%s", origcache
);
982 if (l
< 0 || l
>= cache_size
)
989 if (total_len
> size
)
992 /* read from off 0 */
993 memcpy(buf
, d
->buf
, total_len
);
999 * Returns 0 on success.
1000 * It is the caller's responsibility to free `return_usage`, unless this
1001 * function returns an error.
1003 int read_cpuacct_usage_all(char *cg
, char *cpuset
,
1004 struct cpuacct_usage
**return_usage
, int *size
)
1006 __do_free
char *usage_str
= NULL
;
1007 __do_free
struct cpuacct_usage
*cpu_usage
= NULL
;
1008 int i
= 0, j
= 0, read_pos
= 0, read_cnt
= 0;
1012 uint64_t cg_user
, cg_system
;
1013 int64_t ticks_per_sec
;
1015 ticks_per_sec
= sysconf(_SC_CLK_TCK
);
1016 if (ticks_per_sec
< 0 && errno
== EINVAL
) {
1017 lxcfs_debug("%m - Failed to determine number of ticks per second");
1021 cpucount
= get_nprocs_conf();
1022 cpu_usage
= malloc(sizeof(struct cpuacct_usage
) * cpucount
);
1026 memset(cpu_usage
, 0, sizeof(struct cpuacct_usage
) * cpucount
);
1027 if (!cgroup_ops
->get(cgroup_ops
, "cpuacct", cg
, "cpuacct.usage_all", &usage_str
)) {
1028 char *sep
= " \t\n";
1031 /* Read cpuacct.usage_percpu instead. */
1032 lxcfs_debug("Falling back to cpuacct.usage_percpu");
1033 if (!cgroup_ops
->get(cgroup_ops
, "cpuacct", cg
, "cpuacct.usage_percpu", &usage_str
))
1036 lxc_iterate_parts(tok
, usage_str
, sep
) {
1037 uint64_t percpu_user
;
1042 tok
= trim_whitespace_in_place(tok
);
1043 ret
= safe_uint64(tok
, &percpu_user
, 10);
1047 /* Convert the time from nanoseconds to USER_HZ */
1048 cpu_usage
[i
].user
= percpu_user
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
1049 cpu_usage
[i
].system
= cpu_usage
[i
].user
;
1051 lxcfs_debug("cpu%d with time %s", i
, tok
);
1054 if (sscanf(usage_str
, "cpu user system\n%n", &read_cnt
) != 0)
1055 return log_error(-1, "read_cpuacct_usage_all reading first line from %s/cpuacct.usage_all failed", cg
);
1057 read_pos
+= read_cnt
;
1059 for (i
= 0, j
= 0; i
< cpucount
; i
++) {
1060 ret
= sscanf(usage_str
+ read_pos
,
1061 "%d %" PRIu64
" %" PRIu64
"\n%n", &cg_cpu
,
1062 &cg_user
, &cg_system
, &read_cnt
);
1068 return log_error(-EINVAL
, "Failed to parse cpuacct.usage_all line %s from cgroup %s",
1069 usage_str
+ read_pos
, cg
);
1071 read_pos
+= read_cnt
;
1073 /* Convert the time from nanoseconds to USER_HZ */
1074 cpu_usage
[j
].user
= cg_user
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
1075 cpu_usage
[j
].system
= cg_system
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
1080 *return_usage
= move_ptr(cpu_usage
);
1085 static bool cpuview_init_head(struct cg_proc_stat_head
**head
)
1087 *head
= malloc(sizeof(struct cg_proc_stat_head
));
1089 return log_error(false, "%s", strerror(errno
));
1091 (*head
)->lastcheck
= time(NULL
);
1092 (*head
)->next
= NULL
;
1094 if (pthread_rwlock_init(&(*head
)->lock
, NULL
) != 0) {
1096 return log_error(false, "Failed to initialize list lock");
1102 bool init_cpuview(void)
1106 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++)
1107 proc_stat_history
[i
] = NULL
;
1109 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
1110 if (!cpuview_init_head(&proc_stat_history
[i
]))
1117 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
1118 if (proc_stat_history
[i
])
1119 free_disarm(proc_stat_history
[i
]);
1125 static void cpuview_free_head(struct cg_proc_stat_head
*head
)
1127 struct cg_proc_stat
*node
;
1133 struct cg_proc_stat
*cur
= node
;
1135 free_proc_stat_node(cur
);
1141 pthread_rwlock_destroy(&head
->lock
);
1145 void free_cpuview(void)
1147 for (int i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++)
1148 if (proc_stat_history
[i
])
1149 cpuview_free_head(proc_stat_history
[i
]);