]> git.proxmox.com Git - mirror_lxcfs.git/blame - src/proc_loadavg.c
Merge pull request #372 from brauner/2020-03-16/cgroup_fixes
[mirror_lxcfs.git] / src / proc_loadavg.c
CommitLineData
db0463bf 1/* SPDX-License-Identifier: LGPL-2.1+ */
1f5596dd
CB
2
3#ifndef _GNU_SOURCE
4#define _GNU_SOURCE
5#endif
6
7#ifndef FUSE_USE_VERSION
8#define FUSE_USE_VERSION 26
9#endif
10
11#define _FILE_OFFSET_BITS 64
12
13#define __STDC_FORMAT_MACROS
14#include <dirent.h>
15#include <errno.h>
16#include <fcntl.h>
17#include <fuse.h>
18#include <inttypes.h>
19#include <libgen.h>
20#include <pthread.h>
21#include <sched.h>
22#include <stdarg.h>
23#include <stdbool.h>
24#include <stdint.h>
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28#include <time.h>
29#include <unistd.h>
30#include <wait.h>
31#include <linux/magic.h>
32#include <linux/sched.h>
33#include <sys/epoll.h>
34#include <sys/mman.h>
35#include <sys/mount.h>
36#include <sys/param.h>
37#include <sys/socket.h>
38#include <sys/syscall.h>
39#include <sys/sysinfo.h>
40#include <sys/vfs.h>
41
42#include "bindings.h"
43#include "config.h"
44#include "cgroup_fuse.h"
45#include "cgroups/cgroup.h"
46#include "cgroups/cgroup_utils.h"
47#include "memory_utils.h"
48#include "utils.h"
49
50/*
51 * This parameter is used for proc_loadavg_read().
52 * 1 means use loadavg, 0 means not use.
53 */
54static int loadavg = 0;
55
56/* The function of hash table.*/
57#define LOAD_SIZE 100 /*the size of hash_table */
58#define FLUSH_TIME 5 /*the flush rate */
59#define DEPTH_DIR 3 /*the depth of per cgroup */
60/* The function of calculate loadavg .*/
61#define FSHIFT 11 /* nr of bits of precision */
62#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
63#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
64#define EXP_5 2014 /* 1/exp(5sec/5min) */
65#define EXP_15 2037 /* 1/exp(5sec/15min) */
66#define LOAD_INT(x) ((x) >> FSHIFT)
67#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
68static volatile sig_atomic_t loadavg_stop = 0;
69
70struct load_node {
1ba088ae
CB
71 /* cgroup */
72 char *cg;
73 /* Load averages */
74 uint64_t avenrun[3];
1f5596dd
CB
75 unsigned int run_pid;
76 unsigned int total_pid;
77 unsigned int last_pid;
1ba088ae
CB
78 /* The file descriptor of the mounted cgroup */
79 int cfd;
1f5596dd
CB
80 struct load_node *next;
81 struct load_node **pre;
82};
83
84struct load_head {
85 /*
86 * The lock is about insert load_node and refresh load_node.To the first
87 * load_node of each hash bucket, insert and refresh in this hash bucket is
88 * mutually exclusive.
89 */
90 pthread_mutex_t lock;
91 /*
92 * The rdlock is about read loadavg and delete load_node.To each hash
93 * bucket, read and delete is mutually exclusive. But at the same time, we
94 * allow paratactic read operation. This rdlock is at list level.
95 */
96 pthread_rwlock_t rdlock;
97 /*
98 * The rilock is about read loadavg and insert load_node.To the first
99 * load_node of each hash bucket, read and insert is mutually exclusive.
100 * But at the same time, we allow paratactic read operation.
101 */
102 pthread_rwlock_t rilock;
103 struct load_node *next;
104};
105
106static struct load_head load_hash[LOAD_SIZE]; /* hash table */
107
108/*
109 * locate_node() finds special node. Not return NULL means success.
110 * It should be noted that rdlock isn't unlocked at the end of code
111 * because this function is used to read special node. Delete is not
112 * allowed before read has ended.
113 * unlock rdlock only in proc_loadavg_read().
114 */
115static struct load_node *locate_node(char *cg, int locate)
116{
117 struct load_node *f = NULL;
118 int i = 0;
119
120 pthread_rwlock_rdlock(&load_hash[locate].rilock);
121 pthread_rwlock_rdlock(&load_hash[locate].rdlock);
122 if (load_hash[locate].next == NULL) {
123 pthread_rwlock_unlock(&load_hash[locate].rilock);
124 return f;
125 }
126 f = load_hash[locate].next;
127 pthread_rwlock_unlock(&load_hash[locate].rilock);
128 while (f && ((i = strcmp(f->cg, cg)) != 0))
129 f = f->next;
130 return f;
131}
132
133static void insert_node(struct load_node **n, int locate)
134{
135 struct load_node *f;
136
137 pthread_mutex_lock(&load_hash[locate].lock);
138 pthread_rwlock_wrlock(&load_hash[locate].rilock);
139 f = load_hash[locate].next;
140 load_hash[locate].next = *n;
141
142 (*n)->pre = &(load_hash[locate].next);
143 if (f)
144 f->pre = &((*n)->next);
145 (*n)->next = f;
146 pthread_mutex_unlock(&load_hash[locate].lock);
147 pthread_rwlock_unlock(&load_hash[locate].rilock);
148}
149
4ec5c9da 150int calc_hash(const char *name)
1f5596dd
CB
151{
152 unsigned int hash = 0;
153 unsigned int x = 0;
b7604bf9 154
1f5596dd
CB
155 /* ELFHash algorithm. */
156 while (*name) {
157 hash = (hash << 4) + *name++;
158 x = hash & 0xf0000000;
159 if (x != 0)
160 hash ^= (x >> 24);
161 hash &= ~x;
162 }
b7604bf9 163
1f5596dd
CB
164 return (hash & 0x7fffffff);
165}
166
167int proc_loadavg_read(char *buf, size_t size, off_t offset,
168 struct fuse_file_info *fi)
169{
b7604bf9 170 __do_free char *cg = NULL;
1f5596dd 171 struct fuse_context *fc = fuse_get_context();
99b183fb 172 struct file_info *d = INTTYPE_TO_PTR(fi->fh);
1f5596dd 173 pid_t initpid;
4f18a602 174 ssize_t total_len = 0;
1f5596dd
CB
175 char *cache = d->buf;
176 struct load_node *n;
177 int hash;
b7604bf9 178 int cfd;
1ba088ae 179 uint64_t a, b, c;
1f5596dd
CB
180
181 if (offset) {
182 int left;
183
184 if (offset > d->size)
185 return -EINVAL;
186
187 if (!d->cached)
188 return 0;
189
190 left = d->size - offset;
191 total_len = left > size ? size : left;
192 memcpy(buf, cache + offset, total_len);
193
194 return total_len;
195 }
196 if (!loadavg)
197 return read_file_fuse("/proc/loadavg", buf, size, d);
198
199 initpid = lookup_initpid_in_store(fc->pid);
200 if (initpid <= 1 || is_shared_pidns(initpid))
201 initpid = fc->pid;
202
203 cg = get_pid_cgroup(initpid, "cpu");
204 if (!cg)
205 return read_file_fuse("/proc/loadavg", buf, size, d);
206
207 prune_init_slice(cg);
208 hash = calc_hash(cg) % LOAD_SIZE;
209 n = locate_node(cg, hash);
210
211 /* First time */
212 if (n == NULL) {
213 cfd = get_cgroup_fd("cpu");
214 if (cfd >= 0) {
215 /*
216 * In locate_node() above, pthread_rwlock_unlock() isn't used
217 * because delete is not allowed before read has ended.
218 */
219 pthread_rwlock_unlock(&load_hash[hash].rdlock);
b7604bf9 220 return 0;
1f5596dd 221 }
b7604bf9 222
1f5596dd
CB
223 do {
224 n = malloc(sizeof(struct load_node));
225 } while (!n);
226
227 do {
228 n->cg = malloc(strlen(cg)+1);
229 } while (!n->cg);
b7604bf9 230
1f5596dd
CB
231 strcpy(n->cg, cg);
232 n->avenrun[0] = 0;
233 n->avenrun[1] = 0;
234 n->avenrun[2] = 0;
235 n->run_pid = 0;
236 n->total_pid = 1;
237 n->last_pid = initpid;
238 n->cfd = cfd;
239 insert_node(&n, hash);
240 }
241 a = n->avenrun[0] + (FIXED_1/200);
242 b = n->avenrun[1] + (FIXED_1/200);
243 c = n->avenrun[2] + (FIXED_1/200);
b7604bf9
CB
244 total_len = snprintf(d->buf, d->buflen,
245 "%lu.%02lu "
246 "%lu.%02lu "
247 "%lu.%02lu "
248 "%d/"
249 "%d"
250 "%d\n",
251 LOAD_INT(a),
252 LOAD_FRAC(a),
253 LOAD_INT(b),
254 LOAD_FRAC(b),
255 LOAD_INT(c),
256 LOAD_FRAC(c),
257 n->run_pid,
258 n->total_pid,
259 n->last_pid);
1f5596dd 260 pthread_rwlock_unlock(&load_hash[hash].rdlock);
b7604bf9
CB
261 if (total_len < 0 || total_len >= d->buflen)
262 return log_error(0, "Failed to write to cache");
263
1f5596dd
CB
264 d->size = (int)total_len;
265 d->cached = 1;
266
267 if (total_len > size)
268 total_len = size;
1f5596dd 269
b7604bf9
CB
270 memcpy(buf, d->buf, total_len);
271 return total_len;
1f5596dd
CB
272}
273
274/*
275 * Find the process pid from cgroup path.
276 * eg:from /sys/fs/cgroup/cpu/docker/containerid/cgroup.procs to find the process pid.
277 * @pid_buf : put pid to pid_buf.
278 * @dpath : the path of cgroup. eg: /docker/containerid or /docker/containerid/child-cgroup ...
279 * @depth : the depth of cgroup in container.
280 * @sum : return the number of pid.
281 * @cfd : the file descriptor of the mounted cgroup. eg: /sys/fs/cgroup/cpu
282 */
283static int calc_pid(char ***pid_buf, char *dpath, int depth, int sum, int cfd)
284{
285 __do_free char *path = NULL;
9b817e41 286 __do_free void *fdopen_cache = NULL;
1f5596dd
CB
287 __do_close_prot_errno int fd = -EBADF;
288 __do_fclose FILE *f = NULL;
289 __do_closedir DIR *dir = NULL;
290 struct dirent *file;
291 size_t linelen = 0;
292 char *line = NULL;
293 int pd;
294 char **pid;
295
296 /* path = dpath + "/cgroup.procs" + /0 */
297 path = malloc(strlen(dpath) + 20);
298 if (!path)
299 return sum;
300
301 strcpy(path, dpath);
302 fd = openat(cfd, path, O_RDONLY | O_CLOEXEC | O_NOFOLLOW);
303 if (fd < 0)
304 return sum;
305
306 dir = fdopendir(move_fd(fd));
307 if (!dir)
308 return sum;
309
310 while (((file = readdir(dir)) != NULL) && depth > 0) {
311 if (strcmp(file->d_name, ".") == 0)
312 continue;
313
314 if (strcmp(file->d_name, "..") == 0)
315 continue;
316
317 if (file->d_type == DT_DIR) {
318 __do_free char *path_dir = NULL;
319
320 /* path + '/' + d_name +/0 */
321 path_dir = malloc(strlen(path) + 2 + sizeof(file->d_name));
322 if (!path_dir)
323 return sum;
324
325 strcpy(path_dir, path);
326 strcat(path_dir, "/");
327 strcat(path_dir, file->d_name);
328 pd = depth - 1;
329 sum = calc_pid(pid_buf, path_dir, pd, sum, cfd);
330 }
331 }
332
333 strcat(path, "/cgroup.procs");
334 fd = openat(cfd, path, O_RDONLY);
335 if (fd < 0)
336 return sum;
337
9b817e41 338 f = fdopen_cached(fd, "re", &fdopen_cache);
1f5596dd
CB
339 if (!f)
340 return sum;
341
342 while (getline(&line, &linelen, f) != -1) {
343 pid = realloc(*pid_buf, sizeof(char *) * (sum + 1));
344 if (!pid)
345 return sum;
346 *pid_buf = pid;
347
348 *(*pid_buf + sum) = malloc(strlen(line) + 1);
349 if (!*(*pid_buf + sum))
350 return sum;
351
352 strcpy(*(*pid_buf + sum), line);
353 sum++;
354 }
355
356 return sum;
357}
358
359/*
360 * calc_load calculates the load according to the following formula:
361 * load1 = load0 * exp + active * (1 - exp)
362 *
363 * @load1: the new loadavg.
364 * @load0: the former loadavg.
365 * @active: the total number of running pid at this moment.
366 * @exp: the fixed-point defined in the beginning.
367 */
1ba088ae 368static uint64_t calc_load(uint64_t load, uint64_t exp, uint64_t active)
1f5596dd 369{
1ba088ae 370 uint64_t newload;
1f5596dd
CB
371
372 active = active > 0 ? active * FIXED_1 : 0;
373 newload = load * exp + active * (FIXED_1 - exp);
374 if (active >= load)
375 newload += FIXED_1 - 1;
376
377 return newload / FIXED_1;
378}
379
380/*
381 * Return 0 means that container p->cg is closed.
382 * Return -1 means that error occurred in refresh.
383 * Positive num equals the total number of pid.
384 */
385static int refresh_load(struct load_node *p, char *path)
386{
387 __do_free char *line = NULL;
388 char **idbuf;
389 char proc_path[256];
390 int i, ret, run_pid = 0, total_pid = 0, last_pid = 0;
391 size_t linelen = 0;
392 int sum, length;
393 struct dirent *file;
394
395 idbuf = malloc(sizeof(char *));
396 if (!idbuf)
397 return -1;
398
399 sum = calc_pid(&idbuf, path, DEPTH_DIR, 0, p->cfd);
400 /* normal exit */
401 if (sum == 0)
402 goto out;
403
404 for (i = 0; i < sum; i++) {
405 __do_closedir DIR *dp = NULL;
406
407 /*clean up '\n' */
acff9786 408 length = strlen(idbuf[i]) - 1;
1f5596dd
CB
409 idbuf[i][length] = '\0';
410 ret = snprintf(proc_path, 256, "/proc/%s/task", idbuf[i]);
411 if (ret < 0 || ret > 255) {
1f5596dd
CB
412 i = sum;
413 sum = -1;
b7604bf9 414 log_error(goto err_out, "snprintf() failed in refresh_load");
1f5596dd
CB
415 }
416
417 dp = opendir(proc_path);
b7604bf9
CB
418 if (!dp)
419 log_error(continue, "Open proc_path failed in refresh_load");
420
1f5596dd 421 while ((file = readdir(dp)) != NULL) {
757a63e7 422 __do_free void *fopen_cache = NULL;
1f5596dd
CB
423 __do_fclose FILE *f = NULL;
424
425 if (strncmp(file->d_name, ".", 1) == 0)
426 continue;
b7604bf9 427
1f5596dd
CB
428 if (strncmp(file->d_name, "..", 1) == 0)
429 continue;
b7604bf9 430
1f5596dd 431 total_pid++;
b7604bf9 432
1f5596dd
CB
433 /* We make the biggest pid become last_pid.*/
434 ret = atof(file->d_name);
435 last_pid = (ret > last_pid) ? ret : last_pid;
436
acff9786
CB
437 ret = snprintf(proc_path, 256, "/proc/%s/task/%s/status",
438 idbuf[i], file->d_name);
1f5596dd 439 if (ret < 0 || ret > 255) {
1f5596dd
CB
440 i = sum;
441 sum = -1;
b7604bf9 442 log_error(goto err_out, "snprintf() failed in refresh_load");
1f5596dd
CB
443 }
444
757a63e7 445 f = fopen_cached(proc_path, "re", &fopen_cache);
1f5596dd
CB
446 if (f != NULL) {
447 while (getline(&line, &linelen, f) != -1) {
448 /* Find State */
acff9786
CB
449 if ((strncmp(line, "State", 5) == 0) &&
450 (strncmp(line, "State R", 7) == 0 ||
451 strncmp(line, "State D", 7) == 0))
452 run_pid++;
453 break;
1f5596dd 454 }
1f5596dd
CB
455 }
456 }
457 }
458 /*Calculate the loadavg.*/
459 p->avenrun[0] = calc_load(p->avenrun[0], EXP_1, run_pid);
460 p->avenrun[1] = calc_load(p->avenrun[1], EXP_5, run_pid);
461 p->avenrun[2] = calc_load(p->avenrun[2], EXP_15, run_pid);
462 p->run_pid = run_pid;
463 p->total_pid = total_pid;
464 p->last_pid = last_pid;
465
466err_out:
467 for (; i > 0; i--)
b7604bf9 468 free(idbuf[i - 1]);
1f5596dd
CB
469out:
470 free(idbuf);
471 return sum;
472}
473
474/* Delete the load_node n and return the next node of it. */
475static struct load_node *del_node(struct load_node *n, int locate)
476{
477 struct load_node *g;
478
479 pthread_rwlock_wrlock(&load_hash[locate].rdlock);
480 if (n->next == NULL) {
481 *(n->pre) = NULL;
482 } else {
483 *(n->pre) = n->next;
484 n->next->pre = n->pre;
485 }
486 g = n->next;
487 free_disarm(n->cg);
488 free_disarm(n);
489 pthread_rwlock_unlock(&load_hash[locate].rdlock);
490 return g;
491}
492
493/*
494 * Traverse the hash table and update it.
495 */
496static void *load_begin(void *arg)
497{
498
499 int i, sum, length, ret;
500 struct load_node *f;
501 int first_node;
502 clock_t time1, time2;
503
504 while (1) {
505 if (loadavg_stop == 1)
506 return NULL;
507
508 time1 = clock();
509 for (i = 0; i < LOAD_SIZE; i++) {
510 pthread_mutex_lock(&load_hash[i].lock);
511 if (load_hash[i].next == NULL) {
512 pthread_mutex_unlock(&load_hash[i].lock);
513 continue;
514 }
515 f = load_hash[i].next;
516 first_node = 1;
517 while (f) {
518 __do_free char *path = NULL;
519
520 length = strlen(f->cg) + 2;
521 /* strlen(f->cg) + '.' or '' + \0 */
522 path = malloc(length);
523 if (!path)
524 goto out;
525
526 ret = snprintf(path, length, "%s%s", dot_or_empty(f->cg), f->cg);
b7604bf9
CB
527 /* Ignore the node if snprintf fails.*/
528 if (ret < 0 || ret > length - 1)
529 log_error(goto out, "Refresh node %s failed for snprintf()", f->cg);
1f5596dd
CB
530
531 sum = refresh_load(f, path);
532 if (sum == 0)
533 f = del_node(f, i);
534 else
535out: f = f->next;
536 /* load_hash[i].lock locks only on the first node.*/
537 if (first_node == 1) {
538 first_node = 0;
539 pthread_mutex_unlock(&load_hash[i].lock);
540 }
541 }
542 }
543
544 if (loadavg_stop == 1)
545 return NULL;
546
547 time2 = clock();
548 usleep(FLUSH_TIME * 1000000 - (int)((time2 - time1) * 1000000 / CLOCKS_PER_SEC));
549 }
550}
551
552/*
553 * init_load initialize the hash table.
554 * Return 0 on success, return -1 on failure.
555 */
556static int init_load(void)
557{
558 int i;
559 int ret;
560
561 for (i = 0; i < LOAD_SIZE; i++) {
562 load_hash[i].next = NULL;
563 ret = pthread_mutex_init(&load_hash[i].lock, NULL);
b7604bf9
CB
564 if (ret) {
565 lxcfs_error("Failed to initialize lock");
1f5596dd
CB
566 goto out3;
567 }
b7604bf9 568
1f5596dd 569 ret = pthread_rwlock_init(&load_hash[i].rdlock, NULL);
b7604bf9
CB
570 if (ret) {
571 lxcfs_error("Failed to initialize rdlock");
1f5596dd
CB
572 goto out2;
573 }
b7604bf9 574
1f5596dd 575 ret = pthread_rwlock_init(&load_hash[i].rilock, NULL);
b7604bf9
CB
576 if (ret) {
577 lxcfs_error("Failed to initialize rilock");
1f5596dd
CB
578 goto out1;
579 }
580 }
b7604bf9 581
1f5596dd 582 return 0;
b7604bf9 583
1f5596dd
CB
584out1:
585 pthread_rwlock_destroy(&load_hash[i].rdlock);
586out2:
587 pthread_mutex_destroy(&load_hash[i].lock);
588out3:
589 while (i > 0) {
590 i--;
591 pthread_mutex_destroy(&load_hash[i].lock);
592 pthread_rwlock_destroy(&load_hash[i].rdlock);
593 pthread_rwlock_destroy(&load_hash[i].rilock);
594 }
b7604bf9 595
1f5596dd
CB
596 return -1;
597}
598
599static void load_free(void)
600{
601 struct load_node *f, *p;
602
603 for (int i = 0; i < LOAD_SIZE; i++) {
604 pthread_mutex_lock(&load_hash[i].lock);
605 pthread_rwlock_wrlock(&load_hash[i].rilock);
606 pthread_rwlock_wrlock(&load_hash[i].rdlock);
607 if (load_hash[i].next == NULL) {
608 pthread_mutex_unlock(&load_hash[i].lock);
609 pthread_mutex_destroy(&load_hash[i].lock);
610 pthread_rwlock_unlock(&load_hash[i].rilock);
611 pthread_rwlock_destroy(&load_hash[i].rilock);
612 pthread_rwlock_unlock(&load_hash[i].rdlock);
613 pthread_rwlock_destroy(&load_hash[i].rdlock);
614 continue;
615 }
616
617 for (f = load_hash[i].next; f;) {
618 free_disarm(f->cg);
619 p = f->next;
620 free_disarm(f);
621 f = p;
622 }
623
624 pthread_mutex_unlock(&load_hash[i].lock);
625 pthread_mutex_destroy(&load_hash[i].lock);
626 pthread_rwlock_unlock(&load_hash[i].rilock);
627 pthread_rwlock_destroy(&load_hash[i].rilock);
628 pthread_rwlock_unlock(&load_hash[i].rdlock);
629 pthread_rwlock_destroy(&load_hash[i].rdlock);
630 }
631}
632
633/* Return a positive number on success, return 0 on failure.*/
634pthread_t load_daemon(int load_use)
635{
636 int ret;
637 pthread_t pid;
638
639 ret = init_load();
b7604bf9
CB
640 if (ret == -1)
641 return log_error(0, "Initialize hash_table fails in load_daemon!");
642
1f5596dd
CB
643 ret = pthread_create(&pid, NULL, load_begin, NULL);
644 if (ret != 0) {
1f5596dd 645 load_free();
b7604bf9 646 return log_error(0, "Create pthread fails in load_daemon!");
1f5596dd 647 }
b7604bf9 648
1f5596dd
CB
649 /* use loadavg, here loadavg = 1*/
650 loadavg = load_use;
651 return pid;
652}
653
654/* Returns 0 on success. */
655int stop_load_daemon(pthread_t pid)
656{
657 int s;
658
659 /* Signal the thread to gracefully stop */
660 loadavg_stop = 1;
661
662 s = pthread_join(pid, NULL); /* Make sure sub thread has been canceled. */
b7604bf9
CB
663 if (s)
664 return log_error(-1, "stop_load_daemon error: failed to join");
1f5596dd
CB
665
666 load_free();
667 loadavg_stop = 0;
668
669 return 0;
670}