return rv;
}
+static bool expand_proc_stat_node(struct cg_proc_stat *node, int cpu_count)
+{
+ struct cpuacct_usage *new_usage, *new_view;
+ int i;
+
+ /* Allocate new memory */
+ new_usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+ if (!new_usage)
+ return false;
+
+ new_view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+ if (!new_view) {
+ free(new_usage);
+ return false;
+ }
+
+ /* Copy existing data & initialize new elements */
+ for (i = 0; i < cpu_count; i++) {
+ if (i < node->cpu_count) {
+ new_usage[i].user = node->usage[i].user;
+ new_usage[i].system = node->usage[i].system;
+ new_usage[i].idle = node->usage[i].idle;
+
+ new_view[i].user = node->view[i].user;
+ new_view[i].system = node->view[i].system;
+ new_view[i].idle = node->view[i].idle;
+ } else {
+ new_usage[i].user = 0;
+ new_usage[i].system = 0;
+ new_usage[i].idle = 0;
+
+ new_view[i].user = 0;
+ new_view[i].system = 0;
+ new_view[i].idle = 0;
+ }
+ }
+
+ free(node->usage);
+ free(node->view);
+
+ node->usage = new_usage;
+ node->view = new_view;
+ node->cpu_count = cpu_count;
+
+ return true;
+}
+
static struct cg_proc_stat *find_or_create_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
{
int hash = calc_hash(cg) % CPUVIEW_HASH_SIZE;
}
pthread_mutex_lock(&node->lock);
+
+ /* If additional CPUs on the host have been enabled, CPU usage counter
+ * arrays have to be expanded */
+ if (node->cpu_count < cpu_count) {
+ lxcfs_debug("Expanding stat node %d->%d for %s\n",
+ node->cpu_count, cpu_count, cg);
+
+ if (!expand_proc_stat_node(node, cpu_count)) {
+ pthread_mutex_unlock(&node->lock);
+ lxcfs_debug("Unable to expand stat node %d->%d for %s\n",
+ node->cpu_count, cpu_count, cg);
+ return NULL;
+ }
+ }
+
return node;
}