/* SPDX-License-Identifier: LGPL-2.1+ */
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#ifndef FUSE_USE_VERSION
-#define FUSE_USE_VERSION 26
-#endif
-
-#define _FILE_OFFSET_BITS 64
+#include "config.h"
-#define __STDC_FORMAT_MACROS
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
-#include <fuse.h>
#include <inttypes.h>
#include <libgen.h>
#include <pthread.h>
#include <sys/sysinfo.h>
#include <sys/vfs.h>
+#include "proc_cpuview.h"
+
#include "bindings.h"
-#include "config.h"
#include "cgroup_fuse.h"
#include "cpuset_parse.h"
#include "cgroups/cgroup.h"
/* Data for CPU view */
struct cg_proc_stat {
char *cg;
- struct cpuacct_usage *usage; // Real usage as read from the host's /proc/stat
- struct cpuacct_usage *view; // Usage stats reported to the container
+ struct cpuacct_usage *usage; /* Real usage as read from the host's /proc/stat. */
+ struct cpuacct_usage *view; /* Usage stats reported to the container. */
int cpu_count;
- pthread_mutex_t lock; // For node manipulation
+ pthread_mutex_t lock; /* For node manipulation. */
struct cg_proc_stat *next;
};
__do_free struct cpuacct_usage *new_usage = NULL, *new_view = NULL;
/* Allocate new memory */
- new_usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+ new_usage = zalloc(sizeof(struct cpuacct_usage) * cpu_count);
if (!new_usage)
return false;
- new_view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+ new_view = zalloc(sizeof(struct cpuacct_usage) * cpu_count);
if (!new_view)
return false;
/* Copy existing data & initialize new elements */
for (int i = 0; i < cpu_count; i++) {
if (i < node->cpu_count) {
- new_usage[i].user = node->usage[i].user;
- new_usage[i].system = node->usage[i].system;
- new_usage[i].idle = node->usage[i].idle;
-
- new_view[i].user = node->view[i].user;
- new_view[i].system = node->view[i].system;
- new_view[i].idle = node->view[i].idle;
- } else {
- new_usage[i].user = 0;
- new_usage[i].system = 0;
- new_usage[i].idle = 0;
+ new_usage[i].user = node->usage[i].user;
+ new_usage[i].system = node->usage[i].system;
+ new_usage[i].idle = node->usage[i].idle;
- new_view[i].user = 0;
- new_view[i].system = 0;
- new_view[i].idle = 0;
+ new_view[i].user = node->view[i].user;
+ new_view[i].system = node->view[i].system;
+ new_view[i].idle = node->view[i].idle;
}
}
static void free_proc_stat_node(struct cg_proc_stat *node)
{
- pthread_mutex_destroy(&node->lock);
- free_disarm(node->cg);
- free_disarm(node->usage);
- free_disarm(node->view);
- free_disarm(node);
+ if (node) {
+ /*
+ * We're abusing the usage pointer to indicate that
+ * pthread_mutex_init() was successful. Don't judge me.
+ */
+ if (node->usage)
+ pthread_mutex_destroy(&node->lock);
+ free_disarm(node->cg);
+ free_disarm(node->usage);
+ free_disarm(node->view);
+ free_disarm(node);
+ }
}
+define_cleanup_function(struct cg_proc_stat *, free_proc_stat_node);
+
static struct cg_proc_stat *add_proc_stat_node(struct cg_proc_stat *new_node)
{
- int hash = calc_hash(new_node->cg) % CPUVIEW_HASH_SIZE;
+ call_cleaner(free_proc_stat_node) struct cg_proc_stat *new = new_node;
+ struct cg_proc_stat *rv = new_node;
+ int hash = calc_hash(new->cg) % CPUVIEW_HASH_SIZE;
struct cg_proc_stat_head *head = proc_stat_history[hash];
- struct cg_proc_stat *node, *rv = new_node;
+ struct cg_proc_stat *cur;
pthread_rwlock_wrlock(&head->lock);
if (!head->next) {
- head->next = new_node;
- goto out;
+ head->next = move_ptr(new);
+ goto out_rwlock_unlock;
}
- node = head->next;
+ cur = head->next;
for (;;) {
- if (strcmp(node->cg, new_node->cg) == 0) {
- /* The node is already present, return it */
- free_proc_stat_node(new_node);
- rv = node;
- goto out;
+ /*
+ * The node to be added is already present in the list, so
+ * free the newly allocated one and return the one we found.
+ */
+ if (strcmp(cur->cg, new->cg) == 0) {
+ rv = cur;
+ goto out_rwlock_unlock;
}
- if (node->next) {
- node = node->next;
+ /* Keep walking. */
+ if (cur->next) {
+ cur = cur->next;
continue;
}
- node->next = new_node;
- goto out;
+ /* Add new node to end of list. */
+ cur->next = move_ptr(new);
+ goto out_rwlock_unlock;
}
-out:
+out_rwlock_unlock:
pthread_rwlock_unlock(&head->lock);
- return rv;
+ return move_ptr(rv);
}
-static struct cg_proc_stat *new_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
+static struct cg_proc_stat *new_proc_stat_node(struct cpuacct_usage *usage,
+ int cpu_count, const char *cg)
{
- struct cg_proc_stat *node;
- int i;
+ call_cleaner(free_proc_stat_node) struct cg_proc_stat *node = NULL;
+ __do_free struct cpuacct_usage *new_usage = NULL;
- node = malloc(sizeof(struct cg_proc_stat));
+ node = zalloc(sizeof(struct cg_proc_stat));
if (!node)
- goto err;
-
- node->cg = NULL;
- node->usage = NULL;
- node->view = NULL;
+ return NULL;
- node->cg = malloc(strlen(cg) + 1);
+ node->cg = strdup(cg);
if (!node->cg)
- goto err;
-
- strcpy(node->cg, cg);
-
- node->usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
- if (!node->usage)
- goto err;
+ return NULL;
- memcpy(node->usage, usage, sizeof(struct cpuacct_usage) * cpu_count);
+ new_usage = memdup(usage, sizeof(struct cpuacct_usage) * cpu_count);
+ if (!new_usage)
+ return NULL;
- node->view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+ node->view = zalloc(sizeof(struct cpuacct_usage) * cpu_count);
if (!node->view)
- goto err;
+ return NULL;
node->cpu_count = cpu_count;
- node->next = NULL;
- if (pthread_mutex_init(&node->lock, NULL) != 0)
- log_error(goto err, "Failed to initialize node lock");
-
- for (i = 0; i < cpu_count; i++) {
- node->view[i].user = 0;
- node->view[i].system = 0;
- node->view[i].idle = 0;
- }
-
- return node;
+ if (pthread_mutex_init(&node->lock, NULL))
+ return NULL;
+ /*
+ * We're abusing the usage pointer to indicate that
+ * pthread_mutex_init() was successful. Don't judge me.
+ */
+ node->usage = move_ptr(new_usage);
-err:
- if (node && node->cg)
- free(node->cg);
- if (node && node->usage)
- free(node->usage);
- if (node && node->view)
- free(node->view);
- if (node)
- free(node);
-
- return NULL;
+ return move_ptr(node);
}
-static bool cgfs_param_exist(const char *controller, const char *cgroup,
- const char *file)
+static bool cgroup_supports(const char *controller, const char *cgroup,
+ const char *file)
{
__do_free char *path = NULL;
int cfd;
if (cfd < 0)
return false;
- if (is_relative(cgroup))
- path = must_make_path(cgroup, file, NULL);
- else
- path = must_make_path(".", cgroup, file, NULL);
-
- return (faccessat(cfd, path, F_OK, 0) == 0);
+ path = must_make_path_relative(cgroup, file, NULL);
+ return faccessat(cfd, path, F_OK, 0) == 0;
}
static struct cg_proc_stat *prune_proc_stat_list(struct cg_proc_stat *node)
struct cg_proc_stat *first = NULL;
for (struct cg_proc_stat *prev = NULL; node; ) {
- if (!cgfs_param_exist("cpu", node->cg, "cpu.shares")) {
- struct cg_proc_stat *tmp = node;
-
- lxcfs_debug("Removing stat node for %s\n", node->cg);
+ if (!cgroup_supports("cpu", node->cg, "cpu.shares")) {
+ struct cg_proc_stat *cur = node;
if (prev)
prev->next = node->next;
first = node->next;
node = node->next;
- free_proc_stat_node(tmp);
+ lxcfs_debug("Removing stat node for %s\n", cur);
+
+ free_proc_stat_node(cur);
} else {
if (!first)
first = node;
return node;
}
-static struct cg_proc_stat *find_or_create_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
+static struct cg_proc_stat *find_or_create_proc_stat_node(struct cpuacct_usage *usage,
+ int cpu_count, const char *cg)
{
int hash = calc_hash(cg) % CPUVIEW_HASH_SIZE;
struct cg_proc_stat_head *head = proc_stat_history[hash];
pthread_mutex_lock(&node->lock);
- /* If additional CPUs on the host have been enabled, CPU usage counter
- * arrays have to be expanded */
+ /*
+ * If additional CPUs on the host have been enabled, CPU usage counter
+ * arrays have to be expanded.
+ */
if (node->cpu_count < cpu_count) {
lxcfs_debug("Expanding stat node %d->%d for %s\n",
- node->cpu_count, cpu_count, cg);
+ node->cpu_count, cpu_count, cg);
if (!expand_proc_stat_node(node, cpu_count)) {
pthread_mutex_unlock(&node->lock);
if (free_space > usage->idle)
free_space = usage->idle;
- to_add = free_space > *surplus ? *surplus : free_space;
+ if (free_space > *surplus)
+ to_add = *surplus;
+ else
+ to_add = free_space;
*counter += to_add;
usage->idle -= to_add;
/*
* Read cgroup CPU quota parameters from `cpu.cfs_quota_us` or
* `cpu.cfs_period_us`, depending on `param`. Parameter value is returned
- * throuh `value`.
+ * through `value`.
*/
static bool read_cpu_cfs_param(const char *cg, const char *param, int64_t *value)
{
__do_free char *str = NULL;
- char file[11 + 6 + 1]; /* cpu.cfs__us + quota/period + \0 */
+ char file[STRLITERALLEN("cpu.cfs_period_us") + 1];
bool first = true;
+ int ret;
- if (!pure_unified_layout(cgroup_ops)) {
- snprintf(file, sizeof(file), "cpu.cfs_%s_us", param);
- } else {
- strcpy(file, "cpu.max");
+ if (pure_unified_layout(cgroup_ops)) {
first = !strcmp(param, "quota");
+ ret = snprintf(file, sizeof(file), "cpu.max");
+ } else {
+ ret = snprintf(file, sizeof(file), "cpu.cfs_%s_us", param);
}
-
- if (!cgroup_ops->get(cgroup_ops, "cpu", cg, file, &str))
+ if (ret < 0 || (size_t)ret >= sizeof(file))
return false;
- if (sscanf(str, first ? "%" PRId64 : "%*" PRId64 " %" PRId64, value) != 1)
+ if (!cgroup_ops->get(cgroup_ops, "cpu", cg, file, &str))
return false;
- return true;
+ return sscanf(str, first ? "%" PRId64 : "%*d %" PRId64, value) == 1;
}
/*
int nprocs;
int64_t cfs_quota, cfs_period;
- read_cpu_cfs_param(cg, "quota", &cfs_quota);
- read_cpu_cfs_param(cg, "period", &cfs_period);
+ if (!read_cpu_cfs_param(cg, "quota", &cfs_quota))
+ return 0;
+
+ if (!read_cpu_cfs_param(cg, "period", &cfs_period))
+ return 0;
if (cfs_quota <= 0 || cfs_period <= 0)
return 0;
int64_t cfs_quota, cfs_period;
int nr_cpus_in_cpuset = 0;
- read_cpu_cfs_param(cg, "quota", &cfs_quota);
- read_cpu_cfs_param(cg, "period", &cfs_period);
+ if (!read_cpu_cfs_param(cg, "quota", &cfs_quota))
+ return 0;
+
+ if (!read_cpu_cfs_param(cg, "period", &cfs_period))
+ return 0;
cpuset = get_cpuset(cg);
if (cpuset)
nr_cpus_in_cpuset = cpu_number_in_cpuset(cpuset);
- if (cfs_quota <= 0 || cfs_period <= 0){
+ if (cfs_quota <= 0 || cfs_period <= 0) {
if (nr_cpus_in_cpuset > 0)
return nr_cpus_in_cpuset;
rv = cfs_quota / cfs_period;
- /* In case quota/period does not yield a whole number, add one CPU for
+ /*
+ * In case quota/period does not yield a whole number, add one CPU for
* the remainder.
*/
if ((cfs_quota % cfs_period) > 0)
if (rv > nprocs)
rv = nprocs;
- /* use min value in cpu quota and cpuset */
+ /* Use min value in cpu quota and cpuset. */
if (nr_cpus_in_cpuset > 0 && nr_cpus_in_cpuset < rv)
rv = nr_cpus_in_cpuset;
if (all_used >= cg_used) {
cg_cpu_usage[curcpu].idle = idle + (all_used - cg_used);
-
} else {
- lxcfs_error("cpu%d from %s has unexpected cpu time: %" PRIu64 " in /proc/stat, %" PRIu64 " in cpuacct.usage_all; unable to determine idle time",
- curcpu, cg, all_used, cg_used);
+ lxcfs_v("cpu%d from %s has unexpected cpu time: %" PRIu64 " in /proc/stat, %" PRIu64 " in cpuacct.usage_all; unable to determine idle time",
+ curcpu, cg, all_used, cg_used);
cg_cpu_usage[curcpu].idle = idle;
}
}
if (max_cpus > cpu_cnt || !max_cpus)
max_cpus = cpu_cnt;
+ /* takes lock pthread_mutex_lock(&node->lock) */
stat_node = find_or_create_proc_stat_node(cg_cpu_usage, nprocs, cg);
if (!stat_node)
return log_error(0, "Failed to find/create stat node for %s", cg);
- diff = malloc(sizeof(struct cpuacct_usage) * nprocs);
+ diff = zalloc(sizeof(struct cpuacct_usage) * nprocs);
if (!diff)
- return 0;
+ goto out_pthread_mutex_unlock;
/*
* If the new values are LOWER than values stored in memory, it means
i++;
- stat_node->usage[curcpu].user += diff[curcpu].user;
+ stat_node->usage[curcpu].user += diff[curcpu].user;
stat_node->usage[curcpu].system += diff[curcpu].system;
- stat_node->usage[curcpu].idle += diff[curcpu].idle;
+ stat_node->usage[curcpu].idle += diff[curcpu].idle;
if (max_cpus > 0 && i >= max_cpus) {
- user_surplus += diff[curcpu].user;
- system_surplus += diff[curcpu].system;
+ user_surplus += diff[curcpu].user;
+ system_surplus += diff[curcpu].system;
}
}
uint64_t max_diff_idle = 0;
uint64_t max_diff_idle_index = 0;
double exact_cpus;
-
/* threshold = maximum usage per cpu, including idle */
threshold = total_sum / cpu_cnt * max_cpus;
if (i == max_cpus)
break;
- stat_node->view[curcpu].user += diff[curcpu].user;
- stat_node->view[curcpu].system += diff[curcpu].system;
- stat_node->view[curcpu].idle += diff[curcpu].idle;
+ stat_node->view[curcpu].user += diff[curcpu].user;
+ stat_node->view[curcpu].system += diff[curcpu].system;
+ stat_node->view[curcpu].idle += diff[curcpu].idle;
- user_sum += stat_node->view[curcpu].user;
- system_sum += stat_node->view[curcpu].system;
- idle_sum += stat_node->view[curcpu].idle;
+ user_sum += stat_node->view[curcpu].user;
+ system_sum += stat_node->view[curcpu].system;
+ idle_sum += stat_node->view[curcpu].idle;
- diff_user += diff[curcpu].user;
- diff_system += diff[curcpu].system;
- diff_idle += diff[curcpu].idle;
+ diff_user += diff[curcpu].user;
+ diff_system += diff[curcpu].system;
+ diff_idle += diff[curcpu].idle;
if (diff[curcpu].idle > max_diff_idle) {
- max_diff_idle = diff[curcpu].idle;
- max_diff_idle_index = curcpu;
+ max_diff_idle = diff[curcpu].idle;
+ max_diff_idle_index = curcpu;
}
lxcfs_v("curcpu: %d, diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", curcpu, diff[curcpu].user, diff[curcpu].system, diff[curcpu].idle);
lxcfs_v("revising cpu usage view to match the exact cpu count [%f]\n", exact_cpus);
lxcfs_v("delta: %lu\n", delta);
lxcfs_v("idle_sum before: %lu\n", idle_sum);
- idle_sum = idle_sum > delta ? idle_sum - delta : 0;
+ if (idle_sum > delta)
+ idle_sum = idle_sum - delta;
+ else
+ idle_sum = 0;
lxcfs_v("idle_sum after: %lu\n", idle_sum);
curcpu = max_diff_idle_index;
lxcfs_v("curcpu: %d, idle before: %lu\n", curcpu, stat_node->view[curcpu].idle);
- stat_node->view[curcpu].idle = stat_node->view[curcpu].idle > delta ? stat_node->view[curcpu].idle - delta : 0;
+ if (stat_node->view[curcpu].idle > delta)
+ stat_node->view[curcpu].idle = stat_node->view[curcpu].idle - delta;
+ else
+ stat_node->view[curcpu].idle = 0;
lxcfs_v("curcpu: %d, idle after: %lu\n", curcpu, stat_node->view[curcpu].idle);
}
} else {
if (!stat_node->usage[curcpu].online)
continue;
- stat_node->view[curcpu].user = stat_node->usage[curcpu].user;
- stat_node->view[curcpu].system = stat_node->usage[curcpu].system;
- stat_node->view[curcpu].idle = stat_node->usage[curcpu].idle;
+ stat_node->view[curcpu].user = stat_node->usage[curcpu].user;
+ stat_node->view[curcpu].system = stat_node->usage[curcpu].system;
+ stat_node->view[curcpu].idle = stat_node->usage[curcpu].idle;
- user_sum += stat_node->view[curcpu].user;
- system_sum += stat_node->view[curcpu].system;
- idle_sum += stat_node->view[curcpu].idle;
+ user_sum += stat_node->view[curcpu].user;
+ system_sum += stat_node->view[curcpu].system;
+ idle_sum += stat_node->view[curcpu].idle;
}
}
"cpu %" PRIu64 " 0 %" PRIu64 " %" PRIu64 " 0 0 0 0 0 0\n",
user_sum, system_sum, idle_sum);
lxcfs_v("cpu-all: %s\n", buf);
- if (l < 0)
- return log_error(0, "Failed to write cache");
- if (l >= buf_size)
- return log_error(0, "Write to cache was truncated");
+ if (l < 0) {
+ lxcfs_error("Failed to write cache");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
+ if ((size_t)l >= buf_size) {
+ lxcfs_error("Write to cache was truncated");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
buf += l;
buf_size -= l;
stat_node->view[curcpu].system,
stat_node->view[curcpu].idle);
lxcfs_v("cpu: %s\n", buf);
- if (l < 0)
- return log_error(0, "Failed to write cache");
- if (l >= buf_size)
- return log_error(0, "Write to cache was truncated");
+ if (l < 0) {
+ lxcfs_error("Failed to write cache");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
+ if ((size_t)l >= buf_size) {
+ lxcfs_error("Write to cache was truncated");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
buf += l;
buf_size -= l;
/* Pass the rest of /proc/stat, start with the last line read */
l = snprintf(buf, buf_size, "%s", line);
- if (l < 0)
- return log_error(0, "Failed to write cache");
- if (l >= buf_size)
- return log_error(0, "Write to cache was truncated");
+ if (l < 0) {
+ lxcfs_error("Failed to write cache");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
+ if ((size_t)l >= buf_size) {
+ lxcfs_error("Write to cache was truncated");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
buf += l;
buf_size -= l;
/* Pass the rest of the host's /proc/stat */
while (getline(&line, &linelen, f) != -1) {
l = snprintf(buf, buf_size, "%s", line);
- if (l < 0)
- return log_error(0, "Failed to write cache");
- if (l >= buf_size)
- return log_error(0, "Write to cache was truncated");
+ if (l < 0) {
+ lxcfs_error("Failed to write cache");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
+ if ((size_t)l >= buf_size) {
+ lxcfs_error("Write to cache was truncated");
+ total_len = 0;
+ goto out_pthread_mutex_unlock;
+ }
buf += l;
buf_size -= l;
total_len += l;
}
+out_pthread_mutex_unlock:
if (stat_node)
pthread_mutex_unlock(&stat_node->lock);
size_t cache_size = d->buflen;
if (offset) {
- int left;
+ size_t left;
if (offset > d->size)
return -EINVAL;
l = snprintf(cache, cache_size, "processor : %d\n", curcpu);
if (l < 0)
return log_error(0, "Failed to write cache");
- if (l >= cache_size)
+ if ((size_t)l >= cache_size)
return log_error(0, "Write to cache was truncated");
cache += l;
cache_size -= l;
l = snprintf(cache, cache_size, "processor %d:%s", curcpu, p);
if (l < 0)
return log_error(0, "Failed to write cache");
- if (l >= cache_size)
+ if ((size_t)l >= cache_size)
return log_error(0, "Write to cache was truncated");
cache += l;
l = snprintf(cache, cache_size, "%s", line);
if (l < 0)
return log_error(0, "Failed to write cache");
- if (l >= cache_size)
+ if ((size_t)l >= cache_size)
return log_error(0, "Write to cache was truncated");
cache += l;
cache_size = d->buflen;
total_len = 0;
l = snprintf(cache, cache_size, "vendor_id : IBM/S390\n");
- if (l < 0 || l >= cache_size)
+ if (l < 0 || (size_t)l >= cache_size)
return 0;
cache_size -= l;
cache += l;
total_len += l;
l = snprintf(cache, cache_size, "# processors : %d\n", curcpu + 1);
- if (l < 0 || l >= cache_size)
+ if (l < 0 || (size_t)l >= cache_size)
return 0;
cache_size -= l;
cache += l;
total_len += l;
l = snprintf(cache, cache_size, "%s", origcache);
- if (l < 0 || l >= cache_size)
+ if (l < 0 || (size_t)l >= cache_size)
return 0;
total_len += l;
}
{
__do_free char *usage_str = NULL;
__do_free struct cpuacct_usage *cpu_usage = NULL;
- int cpucount;
int i = 0, j = 0, read_pos = 0, read_cnt = 0;
+ int cpucount;
int ret;
int cg_cpu;
uint64_t cg_user, cg_system;
ticks_per_sec = sysconf(_SC_CLK_TCK);
if (ticks_per_sec < 0 && errno == EINVAL) {
- lxcfs_v(
- "%s\n",
- "read_cpuacct_usage_all failed to determine number of clock ticks "
- "in a second");
+ lxcfs_debug("%m - Failed to determine number of ticks per second");
return -1;
}
memset(cpu_usage, 0, sizeof(struct cpuacct_usage) * cpucount);
if (!cgroup_ops->get(cgroup_ops, "cpuacct", cg, "cpuacct.usage_all", &usage_str)) {
- char *data = NULL;
- size_t sz = 0, asz = 0;
+ char *sep = " \t\n";
+ char *tok;
- /* read cpuacct.usage_percpu instead. */
- lxcfs_v("failed to read cpuacct.usage_all. reading cpuacct.usage_percpu instead\n%s", "");
+ /* Read cpuacct.usage_percpu instead. */
+ lxcfs_debug("Falling back to cpuacct.usage_percpu");
if (!cgroup_ops->get(cgroup_ops, "cpuacct", cg, "cpuacct.usage_percpu", &usage_str))
return -1;
- lxcfs_v("usage_str: %s\n", usage_str);
- /* convert cpuacct.usage_percpu into cpuacct.usage_all. */
- lxcfs_v("converting cpuacct.usage_percpu into cpuacct.usage_all\n%s", "");
+ lxc_iterate_parts(tok, usage_str, sep) {
+ uint64_t percpu_user;
+
+ if (i >= cpucount)
+ break;
- must_strcat(&data, &sz, &asz, "cpu user system\n");
+ tok = trim_whitespace_in_place(tok);
+ ret = safe_uint64(tok, &percpu_user, 10);
+ if (ret)
+ return -1;
- while (sscanf(usage_str + read_pos, "%" PRIu64 " %n", &cg_user, &read_cnt) > 0) {
- lxcfs_debug("i: %d, cg_user: %" PRIu64 ", read_pos: %d, read_cnt: %d\n", i, cg_user, read_pos, read_cnt);
- must_strcat(&data, &sz, &asz, "%d %lu 0\n", i, cg_user);
+ /* Convert the time from nanoseconds to USER_HZ */
+ cpu_usage[i].user = percpu_user / 1000.0 / 1000 / 1000 * ticks_per_sec;
+ cpu_usage[i].system = cpu_usage[i].user;
i++;
- read_pos += read_cnt;
+ lxcfs_debug("cpu%d with time %s", i, tok);
}
+ } else {
+ if (sscanf(usage_str, "cpu user system\n%n", &read_cnt) != 0)
+ return log_error(-1, "read_cpuacct_usage_all reading first line from %s/cpuacct.usage_all failed", cg);
- usage_str = data;
-
- lxcfs_v("usage_str: %s\n", usage_str);
- }
-
- if (sscanf(usage_str, "cpu user system\n%n", &read_cnt) != 0)
- return log_error(-1, "read_cpuacct_usage_all reading first line from %s/cpuacct.usage_all failed", cg);
-
- read_pos += read_cnt;
+ read_pos += read_cnt;
- for (i = 0, j = 0; i < cpucount; i++) {
- ret = sscanf(usage_str + read_pos,
- "%d %" PRIu64 " %" PRIu64 "\n%n", &cg_cpu,
- &cg_user, &cg_system, &read_cnt);
+ for (i = 0, j = 0; i < cpucount; i++) {
+ ret = sscanf(usage_str + read_pos,
+ "%d %" PRIu64 " %" PRIu64 "\n%n", &cg_cpu,
+ &cg_user, &cg_system, &read_cnt);
- if (ret == EOF)
- break;
+ if (ret == EOF)
+ break;
- if (ret != 3)
- return log_error(-1, "read_cpuacct_usage_all reading from %s/cpuacct.usage_all failed", cg);
+ if (ret != 3)
+ return log_error(-EINVAL, "Failed to parse cpuacct.usage_all line %s from cgroup %s",
+ usage_str + read_pos, cg);
- read_pos += read_cnt;
+ read_pos += read_cnt;
- /* Convert the time from nanoseconds to USER_HZ */
- cpu_usage[j].user = cg_user / 1000.0 / 1000 / 1000 * ticks_per_sec;
- cpu_usage[j].system = cg_system / 1000.0 / 1000 / 1000 * ticks_per_sec;
- j++;
+ /* Convert the time from nanoseconds to USER_HZ */
+ cpu_usage[j].user = cg_user / 1000.0 / 1000 / 1000 * ticks_per_sec;
+ cpu_usage[j].system = cg_system / 1000.0 / 1000 / 1000 * ticks_per_sec;
+ j++;
+ }
}
*return_usage = move_ptr(cpu_usage);
static bool cpuview_init_head(struct cg_proc_stat_head **head)
{
- *head = malloc(sizeof(struct cg_proc_stat_head));
- if (!(*head))
- return log_error(false, "%s", strerror(errno));
+ __do_free struct cg_proc_stat_head *h;
- (*head)->lastcheck = time(NULL);
- (*head)->next = NULL;
+ h = zalloc(sizeof(struct cg_proc_stat_head));
+ if (!h)
+ return false;
- if (pthread_rwlock_init(&(*head)->lock, NULL) != 0) {
- free_disarm(*head);
- return log_error(false, "Failed to initialize list lock");
- }
+ if (pthread_rwlock_init(&h->lock, NULL))
+ return false;
+ h->lastcheck = time(NULL);
+
+ *head = move_ptr(h);
return true;
}
static void cpuview_free_head(struct cg_proc_stat_head *head)
{
- struct cg_proc_stat *node, *tmp;
+ struct cg_proc_stat *node;
if (head->next) {
node = head->next;
for (;;) {
- tmp = node;
+ struct cg_proc_stat *cur = node;
node = node->next;
- free_proc_stat_node(tmp);
-
+ free_proc_stat_node(cur);
if (!node)
break;
}