]> git.proxmox.com Git - mirror_lxcfs.git/blobdiff - bindings.c
CPU view: handle CPU hotplug at runtime
[mirror_lxcfs.git] / bindings.c
index b20eaa053b92877683163a27206143c8c9eed36a..e838441c01213255c479057c1a5b2c7f1b2ac829 100644 (file)
@@ -80,6 +80,12 @@ struct file_info {
        int cached;
 };
 
+struct cpuacct_usage {
+       uint64_t user;
+       uint64_t system;
+       uint64_t idle;
+};
+
 /* The function of hash table.*/
 #define LOAD_SIZE 100 /*the size of hash_table */
 #define FLUSH_TIME 5  /*the flush rate */
@@ -92,12 +98,13 @@ struct file_info {
 #define EXP_15         2037            /* 1/exp(5sec/15min) */
 #define LOAD_INT(x) ((x) >> FSHIFT)
 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-/* 
+/*
  * This parameter is used for proc_loadavg_read().
  * 1 means use loadavg, 0 means not use.
  */
 static int loadavg = 0;
-static int calc_hash(char *name)
+static volatile sig_atomic_t loadavg_stop = 0;
+static int calc_hash(const char *name)
 {
        unsigned int hash = 0;
        unsigned int x = 0;
@@ -109,7 +116,7 @@ static int calc_hash(char *name)
                        hash ^= (x >> 24);
                hash &= ~x;
        }
-       return ((hash & 0x7fffffff) % LOAD_SIZE);
+       return (hash & 0x7fffffff);
 }
 
 struct load_node {
@@ -247,7 +254,7 @@ static struct load_node *del_node(struct load_node *n, int locate)
        return g;
 }
 
-void load_free(void)
+static void load_free(void)
 {
        int i;
        struct load_node *f, *p;
@@ -279,6 +286,115 @@ void load_free(void)
                pthread_rwlock_destroy(&load_hash[i].rdlock);
        }
 }
+
+/* Data for CPU view */
+struct cg_proc_stat {
+       char *cg;
+       struct cpuacct_usage *usage; // Real usage as read from the host's /proc/stat
+       struct cpuacct_usage *view; // Usage stats reported to the container
+       int cpu_count;
+       pthread_mutex_t lock; // For node manipulation
+       struct cg_proc_stat *next;
+};
+
+struct cg_proc_stat_head {
+       struct cg_proc_stat *next;
+       time_t lastcheck;
+
+       /*
+        * For access to the list. Reading can be parallel, pruning is exclusive.
+        */
+       pthread_rwlock_t lock;
+};
+
+#define CPUVIEW_HASH_SIZE 100
+static struct cg_proc_stat_head *proc_stat_history[CPUVIEW_HASH_SIZE];
+
+static bool cpuview_init_head(struct cg_proc_stat_head **head)
+{
+       *head = malloc(sizeof(struct cg_proc_stat_head));
+       if (!(*head)) {
+               lxcfs_error("%s\n", strerror(errno));
+               return false;
+       }
+
+       (*head)->lastcheck = time(NULL);
+       (*head)->next = NULL;
+
+       if (pthread_rwlock_init(&(*head)->lock, NULL) != 0) {
+               lxcfs_error("%s\n", "Failed to initialize list lock");
+               free(*head);
+               return false;
+       }
+
+       return true;
+}
+
+static bool init_cpuview()
+{
+       int i;
+
+       for (i = 0; i < CPUVIEW_HASH_SIZE; i++)
+               proc_stat_history[i] = NULL;
+
+       for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
+               if (!cpuview_init_head(&proc_stat_history[i]))
+                       goto err;
+       }
+
+       return true;
+
+err:
+       for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
+               if (proc_stat_history[i]) {
+                       free(proc_stat_history[i]);
+                       proc_stat_history[i] = NULL;
+               }
+       }
+
+       return false;
+}
+
+static void free_proc_stat_node(struct cg_proc_stat *node)
+{
+       pthread_mutex_destroy(&node->lock);
+       free(node->cg);
+       free(node->usage);
+       free(node->view);
+       free(node);
+}
+
+static void cpuview_free_head(struct cg_proc_stat_head *head)
+{
+       struct cg_proc_stat *node, *tmp;
+
+       if (head->next) {
+               node = head->next;
+
+               for (;;) {
+                       tmp = node;
+                       node = node->next;
+                       free_proc_stat_node(tmp);
+
+                       if (!node)
+                               break;
+               }
+       }
+
+       pthread_rwlock_destroy(&head->lock);
+       free(head);
+}
+
+static void free_cpuview()
+{
+       int i;
+
+       for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
+               if (proc_stat_history[i])
+                       cpuview_free_head(proc_stat_history[i]);
+       }
+}
+
 /* Reserve buffer size to account for file size changes. */
 #define BUF_RESERVE_SIZE 512
 
@@ -578,19 +694,24 @@ static bool write_string(const char *fnam, const char *string, int fd)
        FILE *f;
        size_t len, ret;
 
-       if (!(f = fdopen(fd, "w")))
+       f = fdopen(fd, "w");
+       if (!f)
                return false;
+
        len = strlen(string);
        ret = fwrite(string, 1, len, f);
        if (ret != len) {
-               lxcfs_error("Error writing to file: %s\n", strerror(errno));
+               lxcfs_error("%s - Error writing \"%s\" to \"%s\"\n",
+                           strerror(errno), string, fnam);
                fclose(f);
                return false;
        }
+
        if (fclose(f) < 0) {
-               lxcfs_error("Error writing to file: %s\n", strerror(errno));
+               lxcfs_error("%s - Failed to close \"%s\"\n", strerror(errno), fnam);
                return false;
        }
+
        return true;
 }
 
@@ -1082,6 +1203,28 @@ bool cgfs_get_value(const char *controller, const char *cgroup, const char *file
        return *value != NULL;
 }
 
+bool cgfs_param_exist(const char *controller, const char *cgroup, const char *file)
+{
+       int ret, cfd;
+       size_t len;
+       char *fnam, *tmpc;
+
+       tmpc = find_mounted_controller(controller, &cfd);
+       if (!tmpc)
+               return false;
+
+       /* Make sure we pass a relative path to *at() family of functions.
+        * . + /cgroup + / + file + \0
+        */
+       len = strlen(cgroup) + strlen(file) + 3;
+       fnam = alloca(len);
+       ret = snprintf(fnam, len, "%s%s/%s", *cgroup == '/' ? "." : "", cgroup, file);
+       if (ret < 0 || (size_t)ret >= len)
+               return false;
+
+       return (faccessat(cfd, fnam, F_OK, 0) == 0);
+}
+
 struct cgfs_files *cgfs_get_key(const char *controller, const char *cgroup, const char *file)
 {
        int ret, cfd;
@@ -2043,6 +2186,7 @@ static void do_release_file_info(struct fuse_file_info *fi)
        free(f->buf);
        f->buf = NULL;
        free(f);
+       f = NULL;
 }
 
 int cg_releasedir(const char *path, struct fuse_file_info *fi)
@@ -3156,7 +3300,7 @@ static bool startswith(const char *line, const char *pref)
 static void parse_memstat(char *memstat, unsigned long *cached,
                unsigned long *active_anon, unsigned long *inactive_anon,
                unsigned long *active_file, unsigned long *inactive_file,
-               unsigned long *unevictable)
+               unsigned long *unevictable, unsigned long *shmem)
 {
        char *eol;
 
@@ -3179,6 +3323,9 @@ static void parse_memstat(char *memstat, unsigned long *cached,
                } else if (startswith(memstat, "total_unevictable")) {
                        sscanf(memstat + 17, "%lu", unevictable);
                        *unevictable /= 1024;
+               } else if (startswith(memstat, "total_shmem")) {
+                       sscanf(memstat + 11, "%lu", shmem);
+                       *shmem /= 1024;
                }
                eol = strchr(memstat, '\n');
                if (!eol)
@@ -3295,7 +3442,7 @@ static int proc_meminfo_read(char *buf, size_t size, off_t offset,
                *memswlimit_str = NULL, *memswusage_str = NULL;
        unsigned long memlimit = 0, memusage = 0, memswlimit = 0, memswusage = 0,
                cached = 0, hosttotal = 0, active_anon = 0, inactive_anon = 0,
-               active_file = 0, inactive_file = 0, unevictable = 0,
+               active_file = 0, inactive_file = 0, unevictable = 0, shmem = 0,
                hostswtotal = 0;
        char *line = NULL;
        size_t linelen = 0, total_len = 0, rv = 0;
@@ -3346,7 +3493,7 @@ static int proc_meminfo_read(char *buf, size_t size, off_t offset,
 
        parse_memstat(memstat_str, &cached, &active_anon,
                        &inactive_anon, &active_file, &inactive_file,
-                       &unevictable);
+                       &unevictable, &shmem);
 
        f = fopen("/proc/meminfo", "r");
        if (!f)
@@ -3422,6 +3569,15 @@ static int proc_meminfo_read(char *buf, size_t size, off_t offset,
                } else if (startswith(line, "SUnreclaim")) {
                        snprintf(lbuf, 100, "SUnreclaim:     %8lu kB\n", 0UL);
                        printme = lbuf;
+               } else if (startswith(line, "Shmem:")) {
+                       snprintf(lbuf, 100, "Shmem:          %8lu kB\n", shmem);
+                       printme = lbuf;
+               } else if (startswith(line, "ShmemHugePages")) {
+                       snprintf(lbuf, 100, "ShmemHugePages: %8lu kB\n", 0UL);
+                       printme = lbuf;
+               } else if (startswith(line, "ShmemPmdMapped")) {
+                       snprintf(lbuf, 100, "ShmemPmdMapped: %8lu kB\n", 0UL);
+                       printme = lbuf;
                } else
                        printme = line;
 
@@ -3485,6 +3641,85 @@ static bool cpuline_in_cpuset(const char *line, const char *cpuset)
        return cpu_in_cpuset(cpu, cpuset);
 }
 
+/*
+ * Read cgroup CPU quota parameters from `cpu.cfs_quota_us` or `cpu.cfs_period_us`,
+ * depending on `param`. Parameter value is returned throuh `value`.
+ */
+static bool read_cpu_cfs_param(const char *cg, const char *param, int64_t *value)
+{
+       bool rv = false;
+       char file[11 + 6 + 1]; // cpu.cfs__us + quota/period + \0
+       char *str = NULL;
+
+       sprintf(file, "cpu.cfs_%s_us", param);
+
+       if (!cgfs_get_value("cpu", cg, file, &str))
+               goto err;
+
+       if (sscanf(str, "%ld", value) != 1)
+               goto err;
+
+       rv = true;
+
+err:
+       if (str)
+               free(str);
+       return rv;
+}
+
+/*
+ * Return the maximum number of visible CPUs based on CPU quotas.
+ * If there is no quota set, zero is returned.
+ */
+int max_cpu_count(const char *cg)
+{
+       int rv, nprocs;
+       int64_t cfs_quota, cfs_period;
+
+       if (!read_cpu_cfs_param(cg, "quota", &cfs_quota))
+               return 0;
+
+       if (!read_cpu_cfs_param(cg, "period", &cfs_period))
+               return 0;
+
+       if (cfs_quota <= 0 || cfs_period <= 0)
+               return 0;
+
+       rv = cfs_quota / cfs_period;
+
+       /* In case quota/period does not yield a whole number, add one CPU for
+        * the remainder.
+        */
+       if ((cfs_quota % cfs_period) > 0)
+               rv += 1;
+
+       nprocs = get_nprocs();
+
+       if (rv > nprocs)
+               rv = nprocs;
+
+       return rv;
+}
+
+/*
+ * Determine whether CPU views should be used or not.
+ */
+bool use_cpuview(const char *cg)
+{
+       int cfd;
+       char *tmpc;
+
+       tmpc = find_mounted_controller("cpu", &cfd);
+       if (!tmpc)
+               return false;
+
+       tmpc = find_mounted_controller("cpuacct", &cfd);
+       if (!tmpc)
+               return false;
+
+       return true;
+}
+
 /*
  * check whether this is a '^processor" line in /proc/cpuinfo
  */
@@ -3507,7 +3742,8 @@ static int proc_cpuinfo_read(char *buf, size_t size, off_t offset,
        char *line = NULL;
        size_t linelen = 0, total_len = 0, rv = 0;
        bool am_printing = false, firstline = true, is_s390x = false;
-       int curcpu = -1, cpu;
+       int curcpu = -1, cpu, max_cpus = 0;
+       bool use_view;
        char *cache = d->buf;
        size_t cache_size = d->buflen;
        FILE *f = NULL;
@@ -3535,6 +3771,11 @@ static int proc_cpuinfo_read(char *buf, size_t size, off_t offset,
        if (!cpuset)
                goto err;
 
+       use_view = use_cpuview(cg);
+
+       if (use_view)
+               max_cpus = max_cpu_count(cg);
+
        f = fopen("/proc/cpuinfo", "r");
        if (!f)
                goto err;
@@ -3552,6 +3793,8 @@ static int proc_cpuinfo_read(char *buf, size_t size, off_t offset,
                if (strncmp(line, "# processors:", 12) == 0)
                        continue;
                if (is_processor_line(line)) {
+                       if (use_view && max_cpus > 0 && (curcpu+1) == max_cpus)
+                               break;
                        am_printing = cpuline_in_cpuset(line, cpuset);
                        if (am_printing) {
                                curcpu ++;
@@ -3573,6 +3816,8 @@ static int proc_cpuinfo_read(char *buf, size_t size, off_t offset,
                        continue;
                } else if (is_s390x && sscanf(line, "processor %d:", &cpu) == 1) {
                        char *p;
+                       if (use_view && max_cpus > 0 && (curcpu+1) == max_cpus)
+                               break;
                        if (!cpu_in_cpuset(cpu, cpuset))
                                continue;
                        curcpu ++;
@@ -3793,152 +4038,872 @@ static uint64_t get_reaper_age(pid_t pid)
        return procage;
 }
 
-#define CPUALL_MAX_SIZE (BUF_RESERVE_SIZE / 2)
-static int proc_stat_read(char *buf, size_t size, off_t offset,
-               struct fuse_file_info *fi)
+/*
+ * Returns 0 on success.
+ * It is the caller's responsibility to free `return_usage`, unless this
+ * function returns an error.
+ */
+static int read_cpuacct_usage_all(char *cg, char *cpuset, struct cpuacct_usage **return_usage)
 {
-       struct fuse_context *fc = fuse_get_context();
-       struct file_info *d = (struct file_info *)fi->fh;
-       char *cg;
-       char *cpuset = NULL;
-       char *line = NULL;
-       size_t linelen = 0, total_len = 0, rv = 0;
-       int curcpu = -1; /* cpu numbering starts at 0 */
-       unsigned long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
-       unsigned long user_sum = 0, nice_sum = 0, system_sum = 0, idle_sum = 0, iowait_sum = 0,
-                                       irq_sum = 0, softirq_sum = 0, steal_sum = 0, guest_sum = 0, guest_nice_sum = 0;
-       char cpuall[CPUALL_MAX_SIZE];
-       /* reserve for cpu all */
-       char *cache = d->buf + CPUALL_MAX_SIZE;
-       size_t cache_size = d->buflen - CPUALL_MAX_SIZE;
-       FILE *f = NULL;
+       int cpucount = get_nprocs();
+       struct cpuacct_usage *cpu_usage;
+       int rv = 0, i, j, ret, read_pos = 0, read_cnt;
+       int cg_cpu;
+       uint64_t cg_user, cg_system;
+       int64_t ticks_per_sec;
+       char *usage_str = NULL;
 
-       if (offset){
-               if (offset > d->size)
-                       return -EINVAL;
-               if (!d->cached)
-                       return 0;
-               int left = d->size - offset;
-               total_len = left > size ? size: left;
-               memcpy(buf, d->buf + offset, total_len);
-               return total_len;
-       }
+       ticks_per_sec = sysconf(_SC_CLK_TCK);
 
-       pid_t initpid = lookup_initpid_in_store(fc->pid);
-       if (initpid <= 0)
-               initpid = fc->pid;
-       cg = get_pid_cgroup(initpid, "cpuset");
-       if (!cg)
-               return read_file("/proc/stat", buf, size, d);
-       prune_init_slice(cg);
+       if (ticks_per_sec < 0 && errno == EINVAL) {
+               lxcfs_debug(
+                       "%s\n",
+                       "read_cpuacct_usage_all failed to determine number of clock ticks "
+                       "in a second");
+               return -1;
+       }
 
-       cpuset = get_cpuset(cg);
-       if (!cpuset)
-               goto err;
+       cpu_usage = malloc(sizeof(struct cpuacct_usage) * cpucount);
+       if (!cpu_usage)
+               return -ENOMEM;
 
-       f = fopen("/proc/stat", "r");
-       if (!f)
+       if (!cgfs_get_value("cpuacct", cg, "cpuacct.usage_all", &usage_str)) {
+               rv = -1;
                goto err;
+       }
 
-       //skip first line
-       if (getline(&line, &linelen, f) < 0) {
-               lxcfs_error("%s\n", "proc_stat_read read first line failed.");
+       if (sscanf(usage_str, "cpu user system\n%n", &read_cnt) != 0) {
+               lxcfs_error("read_cpuacct_usage_all reading first line from "
+                               "%s/cpuacct.usage_all failed.\n", cg);
+               rv = -1;
                goto err;
        }
 
-       while (getline(&line, &linelen, f) != -1) {
-               ssize_t l;
-               int cpu;
-               char cpu_char[10]; /* That's a lot of cores */
-               char *c;
-
-               if (strlen(line) == 0)
-                       continue;
-               if (sscanf(line, "cpu%9[^ ]", cpu_char) != 1) {
-                       /* not a ^cpuN line containing a number N, just print it */
-                       l = snprintf(cache, cache_size, "%s", line);
-                       if (l < 0) {
-                               perror("Error writing to cache");
-                               rv = 0;
-                               goto err;
-                       }
-                       if (l >= cache_size) {
-                               lxcfs_error("%s\n", "Internal error: truncated write to cache.");
-                               rv = 0;
-                               goto err;
-                       }
-                       cache += l;
-                       cache_size -= l;
-                       total_len += l;
-                       continue;
-               }
+       read_pos += read_cnt;
 
-               if (sscanf(cpu_char, "%d", &cpu) != 1)
-                       continue;
-               if (!cpu_in_cpuset(cpu, cpuset))
-                       continue;
-               curcpu ++;
+       for (i = 0, j = 0; i < cpucount; i++) {
+               ret = sscanf(usage_str + read_pos, "%d %lu %lu\n%n", &cg_cpu, &cg_user,
+                               &cg_system, &read_cnt);
 
-               c = strchr(line, ' ');
-               if (!c)
-                       continue;
-               l = snprintf(cache, cache_size, "cpu%d%s", curcpu, c);
-               if (l < 0) {
-                       perror("Error writing to cache");
-                       rv = 0;
-                       goto err;
+               if (ret == EOF)
+                       break;
 
-               }
-               if (l >= cache_size) {
-                       lxcfs_error("%s\n", "Internal error: truncated write to cache.");
-                       rv = 0;
+               if (ret != 3) {
+                       lxcfs_error("read_cpuacct_usage_all reading from %s/cpuacct.usage_all "
+                                       "failed.\n", cg);
+                       rv = -1;
                        goto err;
                }
 
-               cache += l;
-               cache_size -= l;
-               total_len += l;
+               read_pos += read_cnt;
 
-               if (sscanf(line, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
-                          &user,
-                          &nice,
-                          &system,
-                          &idle,
-                          &iowait,
-                          &irq,
-                          &softirq,
-                          &steal,
-                          &guest,
-                          &guest_nice) != 10)
+               if (!cpu_in_cpuset(i, cpuset))
                        continue;
-               user_sum += user;
-               nice_sum += nice;
-               system_sum += system;
-               idle_sum += idle;
-               iowait_sum += iowait;
-               irq_sum += irq;
-               softirq_sum += softirq;
-               steal_sum += steal;
-               guest_sum += guest;
-               guest_nice_sum += guest_nice;
+
+               /* Convert the time from nanoseconds to USER_HZ */
+               cpu_usage[j].user = cg_user / 1000.0 / 1000 / 1000 * ticks_per_sec;
+               cpu_usage[j].system = cg_system / 1000.0 / 1000 / 1000 * ticks_per_sec;
+               j++;
        }
 
-       cache = d->buf;
+       rv = 0;
+       *return_usage = cpu_usage;
 
-       int cpuall_len = snprintf(cpuall, CPUALL_MAX_SIZE, "cpu  %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
-                       user_sum,
-                       nice_sum,
-                       system_sum,
-                       idle_sum,
-                       iowait_sum,
-                       irq_sum,
-                       softirq_sum,
-                       steal_sum,
-                       guest_sum,
-                       guest_nice_sum);
-       if (cpuall_len > 0 && cpuall_len < CPUALL_MAX_SIZE) {
-               memcpy(cache, cpuall, cpuall_len);
-               cache += cpuall_len;
+err:
+       if (usage_str)
+               free(usage_str);
+
+       if (rv != 0) {
+               free(cpu_usage);
+               *return_usage = NULL;
+       }
+
+       return rv;
+}
+
+static unsigned long diff_cpu_usage(struct cpuacct_usage *older, struct cpuacct_usage *newer, struct cpuacct_usage *diff, int cpu_count)
+{
+       int i;
+       unsigned long sum = 0;
+
+       for (i = 0; i < cpu_count; i++) {
+               /* When cpuset is changed on the fly, the CPUs might get reordered.
+                * We could either reset all counters, or check that the substractions
+                * below will return expected results.
+                */
+               if (newer[i].user > older[i].user)
+                       diff[i].user = newer[i].user - older[i].user;
+               else
+                       diff[i].user = 0;
+
+               if (newer[i].system > older[i].system)
+                       diff[i].system = newer[i].system - older[i].system;
+               else
+                       diff[i].system = 0;
+
+               if (newer[i].idle > older[i].idle)
+                       diff[i].idle = newer[i].idle - older[i].idle;
+               else
+                       diff[i].idle = 0;
+
+               sum += diff[i].user;
+               sum += diff[i].system;
+               sum += diff[i].idle;
+       }
+
+       return sum;
+}
+
+static void add_cpu_usage(unsigned long *surplus, struct cpuacct_usage *usage, unsigned long *counter, unsigned long threshold)
+{
+       unsigned long free_space, to_add;
+
+       free_space = threshold - usage->user - usage->system;
+
+       if (free_space > usage->idle)
+               free_space = usage->idle;
+
+       to_add = free_space > *surplus ? *surplus : free_space;
+
+       *counter += to_add;
+       usage->idle -= to_add;
+       *surplus -= to_add;
+}
+
+static struct cg_proc_stat *prune_proc_stat_list(struct cg_proc_stat *node)
+{
+       struct cg_proc_stat *first = NULL, *prev, *tmp;
+
+       for (prev = NULL; node; ) {
+               if (!cgfs_param_exist("cpu", node->cg, "cpu.shares")) {
+                       tmp = node;
+                       lxcfs_debug("Removing stat node for %s\n", node->cg);
+
+                       if (prev)
+                               prev->next = node->next;
+                       else
+                               first = node->next;
+
+                       node = node->next;
+                       free_proc_stat_node(tmp);
+               } else {
+                       if (!first)
+                               first = node;
+                       prev = node;
+                       node = node->next;
+               }
+       }
+
+       return first;
+}
+
+#define PROC_STAT_PRUNE_INTERVAL 10
+static void prune_proc_stat_history(void)
+{
+       int i;
+       time_t now = time(NULL);
+
+       for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
+               pthread_rwlock_wrlock(&proc_stat_history[i]->lock);
+
+               if ((proc_stat_history[i]->lastcheck + PROC_STAT_PRUNE_INTERVAL) > now) {
+                       pthread_rwlock_unlock(&proc_stat_history[i]->lock);
+                       return;
+               }
+
+               if (proc_stat_history[i]->next) {
+                       proc_stat_history[i]->next = prune_proc_stat_list(proc_stat_history[i]->next);
+                       proc_stat_history[i]->lastcheck = now;
+               }
+
+               pthread_rwlock_unlock(&proc_stat_history[i]->lock);
+       }
+}
+
+static struct cg_proc_stat *find_proc_stat_node(struct cg_proc_stat_head *head, const char *cg)
+{
+       struct cg_proc_stat *node;
+
+       pthread_rwlock_rdlock(&head->lock);
+
+       if (!head->next) {
+               pthread_rwlock_unlock(&head->lock);
+               return NULL;
+       }
+
+       node = head->next;
+
+       do {
+               if (strcmp(cg, node->cg) == 0)
+                       goto out;
+       } while ((node = node->next));
+
+       node = NULL;
+
+out:
+       pthread_rwlock_unlock(&head->lock);
+       prune_proc_stat_history();
+       return node;
+}
+
+static struct cg_proc_stat *new_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
+{
+       struct cg_proc_stat *node;
+       int i;
+
+       node = malloc(sizeof(struct cg_proc_stat));
+       if (!node)
+               goto err;
+
+       node->cg = NULL;
+       node->usage = NULL;
+       node->view = NULL;
+
+       node->cg = malloc(strlen(cg) + 1);
+       if (!node->cg)
+               goto err;
+
+       strcpy(node->cg, cg);
+
+       node->usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+       if (!node->usage)
+               goto err;
+
+       memcpy(node->usage, usage, sizeof(struct cpuacct_usage) * cpu_count);
+
+       node->view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+       if (!node->view)
+               goto err;
+
+       node->cpu_count = cpu_count;
+       node->next = NULL;
+
+       if (pthread_mutex_init(&node->lock, NULL) != 0) {
+               lxcfs_error("%s\n", "Failed to initialize node lock");
+               goto err;
+       }
+
+       for (i = 0; i < cpu_count; i++) {
+               node->view[i].user = 0;
+               node->view[i].system = 0;
+               node->view[i].idle = 0;
+       }
+
+       return node;
+
+err:
+       if (node && node->cg)
+               free(node->cg);
+       if (node && node->usage)
+               free(node->usage);
+       if (node && node->view)
+               free(node->view);
+       if (node)
+               free(node);
+
+       return NULL;
+}
+
+static struct cg_proc_stat *add_proc_stat_node(struct cg_proc_stat *new_node)
+{
+       int hash = calc_hash(new_node->cg) % CPUVIEW_HASH_SIZE;
+       struct cg_proc_stat_head *head = proc_stat_history[hash];
+       struct cg_proc_stat *node, *rv = new_node;
+
+       pthread_rwlock_wrlock(&head->lock);
+
+       if (!head->next) {
+               head->next = new_node;
+               goto out;
+       }
+
+       node = head->next;
+
+       for (;;) {
+               if (strcmp(node->cg, new_node->cg) == 0) {
+                       /* The node is already present, return it */
+                       free_proc_stat_node(new_node);
+                       rv = node;
+                       goto out;
+               }
+
+               if (node->next) {
+                       node = node->next;
+                       continue;
+               }
+
+               node->next = new_node;
+               goto out;
+       }
+
+out:
+       pthread_rwlock_unlock(&head->lock);
+       return rv;
+}
+
+static bool expand_proc_stat_node(struct cg_proc_stat *node, int cpu_count)
+{
+       struct cpuacct_usage *new_usage, *new_view;
+       int i;
+
+       /* Allocate new memory */
+       new_usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+       if (!new_usage)
+               return false;
+
+       new_view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
+       if (!new_view) {
+               free(new_usage);
+               return false;
+       }
+
+       /* Copy existing data & initialize new elements */
+       for (i = 0; i < cpu_count; i++) {
+               if (i < node->cpu_count) {
+                       new_usage[i].user = node->usage[i].user;
+                       new_usage[i].system = node->usage[i].system;
+                       new_usage[i].idle = node->usage[i].idle;
+
+                       new_view[i].user = node->view[i].user;
+                       new_view[i].system = node->view[i].system;
+                       new_view[i].idle = node->view[i].idle;
+               } else {
+                       new_usage[i].user = 0;
+                       new_usage[i].system = 0;
+                       new_usage[i].idle = 0;
+
+                       new_view[i].user = 0;
+                       new_view[i].system = 0;
+                       new_view[i].idle = 0;
+               }
+       }
+
+       free(node->usage);
+       free(node->view);
+
+       node->usage = new_usage;
+       node->view = new_view;
+       node->cpu_count = cpu_count;
+
+       return true;
+}
+
+static struct cg_proc_stat *find_or_create_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
+{
+       int hash = calc_hash(cg) % CPUVIEW_HASH_SIZE;
+       struct cg_proc_stat_head *head = proc_stat_history[hash];
+       struct cg_proc_stat *node;
+
+       node = find_proc_stat_node(head, cg);
+
+       if (!node) {
+               node = new_proc_stat_node(usage, cpu_count, cg);
+               if (!node)
+                       return NULL;
+
+               node = add_proc_stat_node(node);
+               lxcfs_debug("New stat node (%d) for %s\n", cpu_count, cg);
+       }
+
+       pthread_mutex_lock(&node->lock);
+
+       /* If additional CPUs on the host have been enabled, CPU usage counter
+        * arrays have to be expanded */
+       if (node->cpu_count < cpu_count) {
+               lxcfs_debug("Expanding stat node %d->%d for %s\n",
+                               node->cpu_count, cpu_count, cg);
+
+               if (!expand_proc_stat_node(node, cpu_count)) {
+                       pthread_mutex_unlock(&node->lock);
+                       lxcfs_debug("Unable to expand stat node %d->%d for %s\n",
+                                       node->cpu_count, cpu_count, cg);
+                       return NULL;
+               }
+       }
+
+       return node;
+}
+
+static void reset_proc_stat_node(struct cg_proc_stat *node, struct cpuacct_usage *usage, int cpu_count)
+{
+       int i;
+
+       lxcfs_debug("Resetting stat node for %s\n", node->cg);
+       memcpy(node->usage, usage, sizeof(struct cpuacct_usage) * cpu_count);
+
+       for (i = 0; i < cpu_count; i++) {
+               node->view[i].user = 0;
+               node->view[i].system = 0;
+               node->view[i].idle = 0;
+       }
+
+       node->cpu_count = cpu_count;
+}
+
+static int cpuview_proc_stat(const char *cg, const char *cpuset, struct cpuacct_usage *cg_cpu_usage, FILE *f, char *buf, size_t buf_size)
+{
+       char *line = NULL;
+       size_t linelen = 0, total_len = 0, rv = 0, l;
+       int curcpu = -1; /* cpu numbering starts at 0 */
+       int max_cpus = max_cpu_count(cg), cpu_cnt = 0;
+       unsigned long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
+       unsigned long user_sum = 0, system_sum = 0, idle_sum = 0;
+       unsigned long user_surplus = 0, system_surplus = 0;
+       unsigned long total_sum, threshold;
+       struct cg_proc_stat *stat_node;
+       struct cpuacct_usage *diff = NULL;
+       int nprocs = get_nprocs();
+
+       /* Read all CPU stats and stop when we've encountered other lines */
+       while (getline(&line, &linelen, f) != -1) {
+               int cpu, ret;
+               char cpu_char[10]; /* That's a lot of cores */
+               uint64_t all_used, cg_used;
+
+               if (strlen(line) == 0)
+                       continue;
+               if (sscanf(line, "cpu%9[^ ]", cpu_char) != 1) {
+                       /* not a ^cpuN line containing a number N */
+                       break;
+               }
+
+               if (sscanf(cpu_char, "%d", &cpu) != 1)
+                       continue;
+               if (!cpu_in_cpuset(cpu, cpuset))
+                       continue;
+               curcpu ++;
+               cpu_cnt ++;
+
+               ret = sscanf(line, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+                          &user,
+                          &nice,
+                          &system,
+                          &idle,
+                          &iowait,
+                          &irq,
+                          &softirq,
+                          &steal,
+                          &guest,
+                          &guest_nice);
+
+               if (ret != 10)
+                       continue;
+
+               all_used = user + nice + system + iowait + irq + softirq + steal + guest + guest_nice;
+               cg_used = cg_cpu_usage[curcpu].user + cg_cpu_usage[curcpu].system;
+
+               if (all_used >= cg_used) {
+                       cg_cpu_usage[curcpu].idle = idle + (all_used - cg_used);
+
+               } else {
+                       lxcfs_error("cpu%d from %s has unexpected cpu time: %lu in /proc/stat, "
+                                       "%lu in cpuacct.usage_all; unable to determine idle time\n",
+                                       curcpu, cg, all_used, cg_used);
+                       cg_cpu_usage[curcpu].idle = idle;
+               }
+       }
+
+       /* Cannot use more CPUs than is available due to cpuset */
+       if (max_cpus > cpu_cnt)
+               max_cpus = cpu_cnt;
+
+       stat_node = find_or_create_proc_stat_node(cg_cpu_usage, nprocs, cg);
+
+       if (!stat_node) {
+               lxcfs_error("unable to find/create stat node for %s\n", cg);
+               rv = 0;
+               goto err;
+       }
+
+       diff = malloc(sizeof(struct cpuacct_usage) * nprocs);
+       if (!diff) {
+               rv = 0;
+               goto err;
+       }
+
+       /*
+        * If the new values are LOWER than values stored in memory, it means
+        * the cgroup has been reset/recreated and we should reset too.
+        */
+       if (cg_cpu_usage[0].user < stat_node->usage[0].user)
+               reset_proc_stat_node(stat_node, cg_cpu_usage, nprocs);
+
+       total_sum = diff_cpu_usage(stat_node->usage, cg_cpu_usage, diff, cpu_cnt);
+
+       for (curcpu = 0; curcpu < cpu_cnt; curcpu++) {
+               stat_node->usage[curcpu].user += diff[curcpu].user;
+               stat_node->usage[curcpu].system += diff[curcpu].system;
+               stat_node->usage[curcpu].idle += diff[curcpu].idle;
+
+               if (max_cpus > 0 && curcpu >= max_cpus) {
+                       user_surplus += diff[curcpu].user;
+                       system_surplus += diff[curcpu].system;
+               }
+       }
+
+       /* Calculate usage counters of visible CPUs */
+       if (max_cpus > 0) {
+               /* threshold = maximum usage per cpu, including idle */
+               threshold = total_sum / cpu_cnt * max_cpus;
+
+               for (curcpu = 0; curcpu < max_cpus; curcpu++) {
+                       if (diff[curcpu].user + diff[curcpu].system >= threshold)
+                               continue;
+
+                       /* Add user */
+                       add_cpu_usage(
+                                       &user_surplus,
+                                       &diff[curcpu],
+                                       &diff[curcpu].user,
+                                       threshold);
+
+                       if (diff[curcpu].user + diff[curcpu].system >= threshold)
+                               continue;
+
+                       /* If there is still room, add system */
+                       add_cpu_usage(
+                                       &system_surplus,
+                                       &diff[curcpu],
+                                       &diff[curcpu].system,
+                                       threshold);
+               }
+
+               if (user_surplus > 0)
+                       lxcfs_debug("leftover user: %lu for %s\n", user_surplus, cg);
+               if (system_surplus > 0)
+                       lxcfs_debug("leftover system: %lu for %s\n", system_surplus, cg);
+
+               for (curcpu = 0; curcpu < max_cpus; curcpu++) {
+                       stat_node->view[curcpu].user += diff[curcpu].user;
+                       stat_node->view[curcpu].system += diff[curcpu].system;
+                       stat_node->view[curcpu].idle += diff[curcpu].idle;
+
+                       user_sum += stat_node->view[curcpu].user;
+                       system_sum += stat_node->view[curcpu].system;
+                       idle_sum += stat_node->view[curcpu].idle;
+               }
+
+       } else {
+               for (curcpu = 0; curcpu < cpu_cnt; curcpu++) {
+                       stat_node->view[curcpu].user = stat_node->usage[curcpu].user;
+                       stat_node->view[curcpu].system = stat_node->usage[curcpu].system;
+                       stat_node->view[curcpu].idle = stat_node->usage[curcpu].idle;
+
+                       user_sum += stat_node->view[curcpu].user;
+                       system_sum += stat_node->view[curcpu].system;
+                       idle_sum += stat_node->view[curcpu].idle;
+               }
+       }
+
+       /* Render the file */
+       /* cpu-all */
+       l = snprintf(buf, buf_size, "cpu  %lu 0 %lu %lu 0 0 0 0 0 0\n",
+                       user_sum,
+                       system_sum,
+                       idle_sum);
+
+       if (l < 0) {
+               perror("Error writing to cache");
+               rv = 0;
+               goto err;
+
+       }
+       if (l >= buf_size) {
+               lxcfs_error("%s\n", "Internal error: truncated write to cache.");
+               rv = 0;
+               goto err;
+       }
+
+       buf += l;
+       buf_size -= l;
+       total_len += l;
+
+       /* Render visible CPUs */
+       for (curcpu = 0; curcpu < cpu_cnt; curcpu++) {
+               if (max_cpus > 0 && curcpu == max_cpus)
+                       break;
+
+               l = snprintf(buf, buf_size, "cpu%d %lu 0 %lu %lu 0 0 0 0 0 0\n",
+                               curcpu,
+                               stat_node->view[curcpu].user,
+                               stat_node->view[curcpu].system,
+                               stat_node->view[curcpu].idle);
+
+               if (l < 0) {
+                       perror("Error writing to cache");
+                       rv = 0;
+                       goto err;
+
+               }
+               if (l >= buf_size) {
+                       lxcfs_error("%s\n", "Internal error: truncated write to cache.");
+                       rv = 0;
+                       goto err;
+               }
+
+               buf += l;
+               buf_size -= l;
+               total_len += l;
+       }
+
+       /* Pass the rest of /proc/stat, start with the last line read */
+       l = snprintf(buf, buf_size, "%s", line);
+
+       if (l < 0) {
+               perror("Error writing to cache");
+               rv = 0;
+               goto err;
+
+       }
+       if (l >= buf_size) {
+               lxcfs_error("%s\n", "Internal error: truncated write to cache.");
+               rv = 0;
+               goto err;
+       }
+
+       buf += l;
+       buf_size -= l;
+       total_len += l;
+
+       /* Pass the rest of the host's /proc/stat */
+       while (getline(&line, &linelen, f) != -1) {
+               l = snprintf(buf, buf_size, "%s", line);
+               if (l < 0) {
+                       perror("Error writing to cache");
+                       rv = 0;
+                       goto err;
+               }
+               if (l >= buf_size) {
+                       lxcfs_error("%s\n", "Internal error: truncated write to cache.");
+                       rv = 0;
+                       goto err;
+               }
+               buf += l;
+               buf_size -= l;
+               total_len += l;
+       }
+
+       rv = total_len;
+
+err:
+       if (stat_node)
+               pthread_mutex_unlock(&stat_node->lock);
+       if (line)
+               free(line);
+       if (diff)
+               free(diff);
+       return rv;
+}
+
+#define CPUALL_MAX_SIZE (BUF_RESERVE_SIZE / 2)
+static int proc_stat_read(char *buf, size_t size, off_t offset,
+               struct fuse_file_info *fi)
+{
+       struct fuse_context *fc = fuse_get_context();
+       struct file_info *d = (struct file_info *)fi->fh;
+       char *cg;
+       char *cpuset = NULL;
+       char *line = NULL;
+       size_t linelen = 0, total_len = 0, rv = 0;
+       int curcpu = -1; /* cpu numbering starts at 0 */
+       unsigned long user = 0, nice = 0, system = 0, idle = 0, iowait = 0, irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
+       unsigned long user_sum = 0, nice_sum = 0, system_sum = 0, idle_sum = 0, iowait_sum = 0,
+                                       irq_sum = 0, softirq_sum = 0, steal_sum = 0, guest_sum = 0, guest_nice_sum = 0;
+       char cpuall[CPUALL_MAX_SIZE];
+       /* reserve for cpu all */
+       char *cache = d->buf + CPUALL_MAX_SIZE;
+       size_t cache_size = d->buflen - CPUALL_MAX_SIZE;
+       FILE *f = NULL;
+       struct cpuacct_usage *cg_cpu_usage = NULL;
+
+       if (offset){
+               if (offset > d->size)
+                       return -EINVAL;
+               if (!d->cached)
+                       return 0;
+               int left = d->size - offset;
+               total_len = left > size ? size: left;
+               memcpy(buf, d->buf + offset, total_len);
+               return total_len;
+       }
+
+       pid_t initpid = lookup_initpid_in_store(fc->pid);
+       if (initpid <= 0)
+               initpid = fc->pid;
+       cg = get_pid_cgroup(initpid, "cpuset");
+       if (!cg)
+               return read_file("/proc/stat", buf, size, d);
+       prune_init_slice(cg);
+
+       cpuset = get_cpuset(cg);
+       if (!cpuset)
+               goto err;
+
+       /*
+        * Read cpuacct.usage_all for all CPUs.
+        * If the cpuacct cgroup is present, it is used to calculate the container's
+        * CPU usage. If not, values from the host's /proc/stat are used.
+        */
+       if (read_cpuacct_usage_all(cg, cpuset, &cg_cpu_usage) != 0) {
+               lxcfs_debug("%s\n", "proc_stat_read failed to read from cpuacct, "
+                               "falling back to the host's /proc/stat");
+       }
+
+       f = fopen("/proc/stat", "r");
+       if (!f)
+               goto err;
+
+       //skip first line
+       if (getline(&line, &linelen, f) < 0) {
+               lxcfs_error("%s\n", "proc_stat_read read first line failed.");
+               goto err;
+       }
+
+       if (use_cpuview(cg) && cg_cpu_usage) {
+               total_len = cpuview_proc_stat(cg, cpuset, cg_cpu_usage, f, d->buf, d->buflen);
+               goto out;
+       }
+
+       while (getline(&line, &linelen, f) != -1) {
+               ssize_t l;
+               int cpu;
+               char cpu_char[10]; /* That's a lot of cores */
+               char *c;
+               uint64_t all_used, cg_used, new_idle;
+               int ret;
+
+               if (strlen(line) == 0)
+                       continue;
+               if (sscanf(line, "cpu%9[^ ]", cpu_char) != 1) {
+                       /* not a ^cpuN line containing a number N, just print it */
+                       l = snprintf(cache, cache_size, "%s", line);
+                       if (l < 0) {
+                               perror("Error writing to cache");
+                               rv = 0;
+                               goto err;
+                       }
+                       if (l >= cache_size) {
+                               lxcfs_error("%s\n", "Internal error: truncated write to cache.");
+                               rv = 0;
+                               goto err;
+                       }
+                       cache += l;
+                       cache_size -= l;
+                       total_len += l;
+                       continue;
+               }
+
+               if (sscanf(cpu_char, "%d", &cpu) != 1)
+                       continue;
+               if (!cpu_in_cpuset(cpu, cpuset))
+                       continue;
+               curcpu ++;
+
+               ret = sscanf(line, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
+                          &user,
+                          &nice,
+                          &system,
+                          &idle,
+                          &iowait,
+                          &irq,
+                          &softirq,
+                          &steal,
+                          &guest,
+                          &guest_nice);
+
+               if (ret != 10 || !cg_cpu_usage) {
+                       c = strchr(line, ' ');
+                       if (!c)
+                               continue;
+                       l = snprintf(cache, cache_size, "cpu%d%s", curcpu, c);
+                       if (l < 0) {
+                               perror("Error writing to cache");
+                               rv = 0;
+                               goto err;
+
+                       }
+                       if (l >= cache_size) {
+                               lxcfs_error("%s\n", "Internal error: truncated write to cache.");
+                               rv = 0;
+                               goto err;
+                       }
+
+                       cache += l;
+                       cache_size -= l;
+                       total_len += l;
+
+                       if (ret != 10)
+                               continue;
+               }
+
+               if (cg_cpu_usage) {
+                       all_used = user + nice + system + iowait + irq + softirq + steal + guest + guest_nice;
+                       cg_used = cg_cpu_usage[curcpu].user + cg_cpu_usage[curcpu].system;
+
+                       if (all_used >= cg_used) {
+                               new_idle = idle + (all_used - cg_used);
+
+                       } else {
+                               lxcfs_error("cpu%d from %s has unexpected cpu time: %lu in /proc/stat, "
+                                               "%lu in cpuacct.usage_all; unable to determine idle time\n",
+                                               curcpu, cg, all_used, cg_used);
+                               new_idle = idle;
+                       }
+
+                       l = snprintf(cache, cache_size, "cpu%d %lu 0 %lu %lu 0 0 0 0 0 0\n",
+                                       curcpu, cg_cpu_usage[curcpu].user, cg_cpu_usage[curcpu].system,
+                                       new_idle);
+
+                       if (l < 0) {
+                               perror("Error writing to cache");
+                               rv = 0;
+                               goto err;
+
+                       }
+                       if (l >= cache_size) {
+                               lxcfs_error("%s\n", "Internal error: truncated write to cache.");
+                               rv = 0;
+                               goto err;
+                       }
+
+                       cache += l;
+                       cache_size -= l;
+                       total_len += l;
+
+                       user_sum += cg_cpu_usage[curcpu].user;
+                       system_sum += cg_cpu_usage[curcpu].system;
+                       idle_sum += new_idle;
+
+               } else {
+                       user_sum += user;
+                       nice_sum += nice;
+                       system_sum += system;
+                       idle_sum += idle;
+                       iowait_sum += iowait;
+                       irq_sum += irq;
+                       softirq_sum += softirq;
+                       steal_sum += steal;
+                       guest_sum += guest;
+                       guest_nice_sum += guest_nice;
+               }
+       }
+
+       cache = d->buf;
+
+       int cpuall_len = snprintf(cpuall, CPUALL_MAX_SIZE, "cpu  %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+                       user_sum,
+                       nice_sum,
+                       system_sum,
+                       idle_sum,
+                       iowait_sum,
+                       irq_sum,
+                       softirq_sum,
+                       steal_sum,
+                       guest_sum,
+                       guest_nice_sum);
+       if (cpuall_len > 0 && cpuall_len < CPUALL_MAX_SIZE) {
+               memcpy(cache, cpuall, cpuall_len);
+               cache += cpuall_len;
        } else {
                /* shouldn't happen */
                lxcfs_error("proc_stat_read copy cpuall failed, cpuall_len=%d.", cpuall_len);
@@ -3947,6 +4912,8 @@ static int proc_stat_read(char *buf, size_t size, off_t offset,
 
        memmove(cache, d->buf + CPUALL_MAX_SIZE, total_len);
        total_len += cpuall_len;
+
+out:
        d->cached = 1;
        d->size = total_len;
        if (total_len > size)
@@ -3958,6 +4925,8 @@ static int proc_stat_read(char *buf, size_t size, off_t offset,
 err:
        if (f)
                fclose(f);
+       if (cg_cpu_usage)
+               free(cg_cpu_usage);
        free(line);
        free(cpuset);
        free(cg);
@@ -4372,6 +5341,8 @@ static int calc_pid(char ***pid_buf, char *dpath, int depth, int sum, int cfd)
        }
        fclose(f);
 out:
+       if (line)
+               free(line);
        free(path);
        return sum;
 }
@@ -4480,7 +5451,7 @@ static int refresh_load(struct load_node *p, char *path)
        p->last_pid = last_pid;
 
        free(line);
-err_out:       
+err_out:
        for (; i > 0; i--)
                free(idbuf[i-1]);
 out:
@@ -4500,6 +5471,9 @@ void *load_begin(void *arg)
        clock_t time1, time2;
 
        while (1) {
+               if (loadavg_stop == 1)
+                       return NULL;
+
                time1 = clock();
                for (i = 0; i < LOAD_SIZE; i++) {
                        pthread_mutex_lock(&load_hash[i].lock);
@@ -4536,6 +5510,10 @@ out:                                     f = f->next;
                                }
                        }
                }
+
+               if (loadavg_stop == 1)
+                       return NULL;
+
                time2 = clock();
                usleep(FLUSH_TIME * 1000000 - (int)((time2 - time1) * 1000000 / CLOCKS_PER_SEC));
        }
@@ -4552,7 +5530,7 @@ static int proc_loadavg_read(char *buf, size_t size, off_t offset,
        char *cache = d->buf;
        struct load_node *n;
        int hash;
-       int cfd;
+       int cfd, rv = 0;
        unsigned long a, b, c;
 
        if (offset) {
@@ -4576,7 +5554,7 @@ static int proc_loadavg_read(char *buf, size_t size, off_t offset,
                return read_file("/proc/loadavg", buf, size, d);
 
        prune_init_slice(cg);
-       hash = calc_hash(cg);
+       hash = calc_hash(cg) % LOAD_SIZE;
        n = locate_node(cg, hash);
 
        /* First time */
@@ -4587,7 +5565,8 @@ static int proc_loadavg_read(char *buf, size_t size, off_t offset,
                         * because delete is not allowed before read has ended.
                         */
                        pthread_rwlock_unlock(&load_hash[hash].rdlock);
-                       return 0;
+                       rv = 0;
+                       goto err;
                }
                do {
                        n = malloc(sizeof(struct load_node));
@@ -4617,7 +5596,8 @@ static int proc_loadavg_read(char *buf, size_t size, off_t offset,
        pthread_rwlock_unlock(&load_hash[hash].rdlock);
        if (total_len < 0 || total_len >=  d->buflen) {
                lxcfs_error("%s\n", "Failed to write to cache");
-               return 0;
+               rv = 0;
+               goto err;
        }
        d->size = (int)total_len;
        d->cached = 1;
@@ -4625,7 +5605,11 @@ static int proc_loadavg_read(char *buf, size_t size, off_t offset,
        if (total_len > size)
                total_len = size;
        memcpy(buf, d->buf, total_len);
-       return total_len;
+       rv = total_len;
+
+err:
+       free(cg);
+       return rv;
 }
 /* Return a positive number on success, return 0 on failure.*/
 pthread_t load_daemon(int load_use)
@@ -4649,6 +5633,26 @@ pthread_t load_daemon(int load_use)
        return pid;
 }
 
+/* Returns 0 on success. */
+int stop_load_daemon(pthread_t pid)
+{
+       int s;
+
+       /* Signal the thread to gracefully stop */
+       loadavg_stop = 1;
+
+       s = pthread_join(pid, NULL); /* Make sure sub thread has been canceled. */
+       if (s != 0) {
+               lxcfs_error("%s\n", "stop_load_daemon error: failed to join");
+               return -1;
+       }
+
+       load_free();
+       loadavg_stop = 0;
+
+       return 0;
+}
+
 static off_t get_procfile_size(const char *which)
 {
        FILE *f = fopen(which, "r");
@@ -5210,6 +6214,11 @@ static void __attribute__((constructor)) collect_and_mount_subsystems(void)
        if (!cret || chdir(cwd) < 0)
                lxcfs_debug("Could not change back to original working directory: %s.\n", strerror(errno));
 
+       if (!init_cpuview()) {
+               lxcfs_error("%s\n", "failed to init CPU view");
+               goto out;
+       }
+
        print_subsystems();
 
 out:
@@ -5233,6 +6242,7 @@ static void __attribute__((destructor)) free_subsystems(void)
        }
        free(hierarchies);
        free(fd_hierarchies);
+       free_cpuview();
 
        if (cgroup_mount_ns_fd >= 0)
                close(cgroup_mount_ns_fd);