]> git.proxmox.com Git - mirror_lxcfs.git/blob - bindings.c
Merge pull request #324 from brauner/2020-02-20/cgroup2_support_8
[mirror_lxcfs.git] / bindings.c
1 /* lxcfs
2 *
3 * Copyright © 2014-2016 Canonical, Inc
4 * Author: Serge Hallyn <serge.hallyn@ubuntu.com>
5 *
6 * See COPYING file for details.
7 */
8
9 #define FUSE_USE_VERSION 26
10
11 #define __STDC_FORMAT_MACROS
12 #include <dirent.h>
13 #include <errno.h>
14 #include <fcntl.h>
15 #include <fuse.h>
16 #include <inttypes.h>
17 #include <libgen.h>
18 #include <pthread.h>
19 #include <sched.h>
20 #include <stdarg.h>
21 #include <stdbool.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <time.h>
27 #include <unistd.h>
28 #include <wait.h>
29 #include <linux/magic.h>
30 #include <linux/sched.h>
31 #include <sys/epoll.h>
32 #include <sys/mman.h>
33 #include <sys/mount.h>
34 #include <sys/param.h>
35 #include <sys/socket.h>
36 #include <sys/syscall.h>
37 #include <sys/sysinfo.h>
38 #include <sys/vfs.h>
39
40 #include "bindings.h"
41 #include "cgroups/cgroup.h"
42 #include "cgroups/cgroup_utils.h"
43 #include "memory_utils.h"
44 #include "config.h"
45
46 /* Define pivot_root() if missing from the C library */
47 #ifndef HAVE_PIVOT_ROOT
48 static int pivot_root(const char * new_root, const char * put_old)
49 {
50 #ifdef __NR_pivot_root
51 return syscall(__NR_pivot_root, new_root, put_old);
52 #else
53 errno = ENOSYS;
54 return -1;
55 #endif
56 }
57 #else
58 extern int pivot_root(const char * new_root, const char * put_old);
59 #endif
60
61 struct cpuacct_usage {
62 uint64_t user;
63 uint64_t system;
64 uint64_t idle;
65 bool online;
66 };
67
68 /* The function of hash table.*/
69 #define LOAD_SIZE 100 /*the size of hash_table */
70 #define FLUSH_TIME 5 /*the flush rate */
71 #define DEPTH_DIR 3 /*the depth of per cgroup */
72 /* The function of calculate loadavg .*/
73 #define FSHIFT 11 /* nr of bits of precision */
74 #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
75 #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
76 #define EXP_5 2014 /* 1/exp(5sec/5min) */
77 #define EXP_15 2037 /* 1/exp(5sec/15min) */
78 #define LOAD_INT(x) ((x) >> FSHIFT)
79 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
80 /*
81 * This parameter is used for proc_loadavg_read().
82 * 1 means use loadavg, 0 means not use.
83 */
84 static int loadavg = 0;
85 static volatile sig_atomic_t loadavg_stop = 0;
86 static int calc_hash(const char *name)
87 {
88 unsigned int hash = 0;
89 unsigned int x = 0;
90 /* ELFHash algorithm. */
91 while (*name) {
92 hash = (hash << 4) + *name++;
93 x = hash & 0xf0000000;
94 if (x != 0)
95 hash ^= (x >> 24);
96 hash &= ~x;
97 }
98 return (hash & 0x7fffffff);
99 }
100
101 struct load_node {
102 char *cg; /*cg */
103 unsigned long avenrun[3]; /* Load averages */
104 unsigned int run_pid;
105 unsigned int total_pid;
106 unsigned int last_pid;
107 int cfd; /* The file descriptor of the mounted cgroup */
108 struct load_node *next;
109 struct load_node **pre;
110 };
111
112 struct load_head {
113 /*
114 * The lock is about insert load_node and refresh load_node.To the first
115 * load_node of each hash bucket, insert and refresh in this hash bucket is
116 * mutually exclusive.
117 */
118 pthread_mutex_t lock;
119 /*
120 * The rdlock is about read loadavg and delete load_node.To each hash
121 * bucket, read and delete is mutually exclusive. But at the same time, we
122 * allow paratactic read operation. This rdlock is at list level.
123 */
124 pthread_rwlock_t rdlock;
125 /*
126 * The rilock is about read loadavg and insert load_node.To the first
127 * load_node of each hash bucket, read and insert is mutually exclusive.
128 * But at the same time, we allow paratactic read operation.
129 */
130 pthread_rwlock_t rilock;
131 struct load_node *next;
132 };
133
134 static struct load_head load_hash[LOAD_SIZE]; /* hash table */
135 /*
136 * init_load initialize the hash table.
137 * Return 0 on success, return -1 on failure.
138 */
139 static int init_load(void)
140 {
141 int i;
142 int ret;
143
144 for (i = 0; i < LOAD_SIZE; i++) {
145 load_hash[i].next = NULL;
146 ret = pthread_mutex_init(&load_hash[i].lock, NULL);
147 if (ret != 0) {
148 lxcfs_error("%s\n", "Failed to initialize lock");
149 goto out3;
150 }
151 ret = pthread_rwlock_init(&load_hash[i].rdlock, NULL);
152 if (ret != 0) {
153 lxcfs_error("%s\n", "Failed to initialize rdlock");
154 goto out2;
155 }
156 ret = pthread_rwlock_init(&load_hash[i].rilock, NULL);
157 if (ret != 0) {
158 lxcfs_error("%s\n", "Failed to initialize rilock");
159 goto out1;
160 }
161 }
162 return 0;
163 out1:
164 pthread_rwlock_destroy(&load_hash[i].rdlock);
165 out2:
166 pthread_mutex_destroy(&load_hash[i].lock);
167 out3:
168 while (i > 0) {
169 i--;
170 pthread_mutex_destroy(&load_hash[i].lock);
171 pthread_rwlock_destroy(&load_hash[i].rdlock);
172 pthread_rwlock_destroy(&load_hash[i].rilock);
173 }
174 return -1;
175 }
176
177 static void insert_node(struct load_node **n, int locate)
178 {
179 struct load_node *f;
180
181 pthread_mutex_lock(&load_hash[locate].lock);
182 pthread_rwlock_wrlock(&load_hash[locate].rilock);
183 f = load_hash[locate].next;
184 load_hash[locate].next = *n;
185
186 (*n)->pre = &(load_hash[locate].next);
187 if (f)
188 f->pre = &((*n)->next);
189 (*n)->next = f;
190 pthread_mutex_unlock(&load_hash[locate].lock);
191 pthread_rwlock_unlock(&load_hash[locate].rilock);
192 }
193 /*
194 * locate_node() finds special node. Not return NULL means success.
195 * It should be noted that rdlock isn't unlocked at the end of code
196 * because this function is used to read special node. Delete is not
197 * allowed before read has ended.
198 * unlock rdlock only in proc_loadavg_read().
199 */
200 static struct load_node *locate_node(char *cg, int locate)
201 {
202 struct load_node *f = NULL;
203 int i = 0;
204
205 pthread_rwlock_rdlock(&load_hash[locate].rilock);
206 pthread_rwlock_rdlock(&load_hash[locate].rdlock);
207 if (load_hash[locate].next == NULL) {
208 pthread_rwlock_unlock(&load_hash[locate].rilock);
209 return f;
210 }
211 f = load_hash[locate].next;
212 pthread_rwlock_unlock(&load_hash[locate].rilock);
213 while (f && ((i = strcmp(f->cg, cg)) != 0))
214 f = f->next;
215 return f;
216 }
217
218 /* Delete the load_node n and return the next node of it. */
219 static struct load_node *del_node(struct load_node *n, int locate)
220 {
221 struct load_node *g;
222
223 pthread_rwlock_wrlock(&load_hash[locate].rdlock);
224 if (n->next == NULL) {
225 *(n->pre) = NULL;
226 } else {
227 *(n->pre) = n->next;
228 n->next->pre = n->pre;
229 }
230 g = n->next;
231 free_disarm(n->cg);
232 free_disarm(n);
233 pthread_rwlock_unlock(&load_hash[locate].rdlock);
234 return g;
235 }
236
237 static void load_free(void)
238 {
239 struct load_node *f, *p;
240
241 for (int i = 0; i < LOAD_SIZE; i++) {
242 pthread_mutex_lock(&load_hash[i].lock);
243 pthread_rwlock_wrlock(&load_hash[i].rilock);
244 pthread_rwlock_wrlock(&load_hash[i].rdlock);
245 if (load_hash[i].next == NULL) {
246 pthread_mutex_unlock(&load_hash[i].lock);
247 pthread_mutex_destroy(&load_hash[i].lock);
248 pthread_rwlock_unlock(&load_hash[i].rilock);
249 pthread_rwlock_destroy(&load_hash[i].rilock);
250 pthread_rwlock_unlock(&load_hash[i].rdlock);
251 pthread_rwlock_destroy(&load_hash[i].rdlock);
252 continue;
253 }
254
255 for (f = load_hash[i].next; f;) {
256 free_disarm(f->cg);
257 p = f->next;
258 free_disarm(f);
259 f = p;
260 }
261
262 pthread_mutex_unlock(&load_hash[i].lock);
263 pthread_mutex_destroy(&load_hash[i].lock);
264 pthread_rwlock_unlock(&load_hash[i].rilock);
265 pthread_rwlock_destroy(&load_hash[i].rilock);
266 pthread_rwlock_unlock(&load_hash[i].rdlock);
267 pthread_rwlock_destroy(&load_hash[i].rdlock);
268 }
269 }
270
271 /* Data for CPU view */
272 struct cg_proc_stat {
273 char *cg;
274 struct cpuacct_usage *usage; // Real usage as read from the host's /proc/stat
275 struct cpuacct_usage *view; // Usage stats reported to the container
276 int cpu_count;
277 pthread_mutex_t lock; // For node manipulation
278 struct cg_proc_stat *next;
279 };
280
281 struct cg_proc_stat_head {
282 struct cg_proc_stat *next;
283 time_t lastcheck;
284
285 /*
286 * For access to the list. Reading can be parallel, pruning is exclusive.
287 */
288 pthread_rwlock_t lock;
289 };
290
291 #define CPUVIEW_HASH_SIZE 100
292 static struct cg_proc_stat_head *proc_stat_history[CPUVIEW_HASH_SIZE];
293
294 static bool cpuview_init_head(struct cg_proc_stat_head **head)
295 {
296 *head = malloc(sizeof(struct cg_proc_stat_head));
297 if (!(*head)) {
298 lxcfs_error("%s\n", strerror(errno));
299 return false;
300 }
301
302 (*head)->lastcheck = time(NULL);
303 (*head)->next = NULL;
304
305 if (pthread_rwlock_init(&(*head)->lock, NULL) != 0) {
306 lxcfs_error("%s\n", "Failed to initialize list lock");
307 free_disarm(*head);
308 return false;
309 }
310
311 return true;
312 }
313
314 static bool init_cpuview()
315 {
316 int i;
317
318 for (i = 0; i < CPUVIEW_HASH_SIZE; i++)
319 proc_stat_history[i] = NULL;
320
321 for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
322 if (!cpuview_init_head(&proc_stat_history[i]))
323 goto err;
324 }
325
326 return true;
327
328 err:
329 for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
330 if (proc_stat_history[i])
331 free_disarm(proc_stat_history[i]);
332 }
333
334 return false;
335 }
336
337 static void free_proc_stat_node(struct cg_proc_stat *node)
338 {
339 pthread_mutex_destroy(&node->lock);
340 free_disarm(node->cg);
341 free_disarm(node->usage);
342 free_disarm(node->view);
343 free_disarm(node);
344 }
345
346 static void cpuview_free_head(struct cg_proc_stat_head *head)
347 {
348 struct cg_proc_stat *node, *tmp;
349
350 if (head->next) {
351 node = head->next;
352
353 for (;;) {
354 tmp = node;
355 node = node->next;
356 free_proc_stat_node(tmp);
357
358 if (!node)
359 break;
360 }
361 }
362
363 pthread_rwlock_destroy(&head->lock);
364 free_disarm(head);
365 }
366
367 static void free_cpuview()
368 {
369 int i;
370
371 for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
372 if (proc_stat_history[i])
373 cpuview_free_head(proc_stat_history[i]);
374 }
375 }
376
377 /*
378 * A table caching which pid is init for a pid namespace.
379 * When looking up which pid is init for $qpid, we first
380 * 1. Stat /proc/$qpid/ns/pid.
381 * 2. Check whether the ino_t is in our store.
382 * a. if not, fork a child in qpid's ns to send us
383 * ucred.pid = 1, and read the initpid. Cache
384 * initpid and creation time for /proc/initpid
385 * in a new store entry.
386 * b. if so, verify that /proc/initpid still matches
387 * what we have saved. If not, clear the store
388 * entry and go back to a. If so, return the
389 * cached initpid.
390 */
391 struct pidns_init_store {
392 ino_t ino; // inode number for /proc/$pid/ns/pid
393 pid_t initpid; // the pid of nit in that ns
394 long int ctime; // the time at which /proc/$initpid was created
395 struct pidns_init_store *next;
396 long int lastcheck;
397 };
398
399 /* lol - look at how they are allocated in the kernel */
400 #define PIDNS_HASH_SIZE 4096
401 #define HASH(x) ((x) % PIDNS_HASH_SIZE)
402
403 static struct pidns_init_store *pidns_hash_table[PIDNS_HASH_SIZE];
404 static pthread_mutex_t pidns_store_mutex = PTHREAD_MUTEX_INITIALIZER;
405 static void lock_mutex(pthread_mutex_t *l)
406 {
407 int ret;
408
409 if ((ret = pthread_mutex_lock(l)) != 0) {
410 lxcfs_error("returned:%d %s\n", ret, strerror(ret));
411 exit(1);
412 }
413 }
414
415 struct cgroup_ops *cgroup_ops;
416
417 static int cgroup_mount_ns_fd = -1;
418
419 static void unlock_mutex(pthread_mutex_t *l)
420 {
421 int ret;
422
423 if ((ret = pthread_mutex_unlock(l)) != 0) {
424 lxcfs_error("returned:%d %s\n", ret, strerror(ret));
425 exit(1);
426 }
427 }
428
429 static void store_lock(void)
430 {
431 lock_mutex(&pidns_store_mutex);
432 }
433
434 static void store_unlock(void)
435 {
436 unlock_mutex(&pidns_store_mutex);
437 }
438
439 /* Must be called under store_lock */
440 static bool initpid_still_valid(struct pidns_init_store *e, struct stat *nsfdsb)
441 {
442 struct stat initsb;
443 char fnam[100];
444
445 snprintf(fnam, 100, "/proc/%d", e->initpid);
446 if (stat(fnam, &initsb) < 0)
447 return false;
448
449 lxcfs_debug("Comparing ctime %ld == %ld for pid %d.\n", e->ctime,
450 initsb.st_ctime, e->initpid);
451
452 if (e->ctime != initsb.st_ctime)
453 return false;
454 return true;
455 }
456
457 /* Must be called under store_lock */
458 static void remove_initpid(struct pidns_init_store *e)
459 {
460 struct pidns_init_store *tmp;
461 int h;
462
463 lxcfs_debug("Remove_initpid: removing entry for %d.\n", e->initpid);
464
465 h = HASH(e->ino);
466 if (pidns_hash_table[h] == e) {
467 pidns_hash_table[h] = e->next;
468 free_disarm(e);
469 return;
470 }
471
472 tmp = pidns_hash_table[h];
473 while (tmp) {
474 if (tmp->next == e) {
475 tmp->next = e->next;
476 free_disarm(e);
477 return;
478 }
479 tmp = tmp->next;
480 }
481 }
482
483 #define PURGE_SECS 5
484 /* Must be called under store_lock */
485 static void prune_initpid_store(void)
486 {
487 static long int last_prune = 0;
488 struct pidns_init_store *e, *prev, *delme;
489 long int now, threshold;
490 int i;
491
492 if (!last_prune) {
493 last_prune = time(NULL);
494 return;
495 }
496 now = time(NULL);
497 if (now < last_prune + PURGE_SECS)
498 return;
499
500 lxcfs_debug("%s\n", "Pruning.");
501
502 last_prune = now;
503 threshold = now - 2 * PURGE_SECS;
504
505 for (i = 0; i < PIDNS_HASH_SIZE; i++) {
506 for (prev = NULL, e = pidns_hash_table[i]; e; ) {
507 if (e->lastcheck < threshold) {
508
509 lxcfs_debug("Removing cached entry for %d.\n", e->initpid);
510
511 delme = e;
512 if (prev)
513 prev->next = e->next;
514 else
515 pidns_hash_table[i] = e->next;
516 e = e->next;
517 free_disarm(delme);
518 } else {
519 prev = e;
520 e = e->next;
521 }
522 }
523 }
524 }
525
526 /* Must be called under store_lock */
527 static void save_initpid(struct stat *sb, pid_t pid)
528 {
529 struct pidns_init_store *e;
530 char fpath[100];
531 struct stat procsb;
532 int h;
533
534 lxcfs_debug("Save_initpid: adding entry for %d.\n", pid);
535
536 snprintf(fpath, 100, "/proc/%d", pid);
537 if (stat(fpath, &procsb) < 0)
538 return;
539 do {
540 e = malloc(sizeof(*e));
541 } while (!e);
542 e->ino = sb->st_ino;
543 e->initpid = pid;
544 e->ctime = procsb.st_ctime;
545 h = HASH(e->ino);
546 e->next = pidns_hash_table[h];
547 e->lastcheck = time(NULL);
548 pidns_hash_table[h] = e;
549 }
550
551 /*
552 * Given the stat(2) info for a nsfd pid inode, lookup the init_pid_store
553 * entry for the inode number and creation time. Verify that the init pid
554 * is still valid. If not, remove it. Return the entry if valid, NULL
555 * otherwise.
556 * Must be called under store_lock
557 */
558 static struct pidns_init_store *lookup_verify_initpid(struct stat *sb)
559 {
560 int h = HASH(sb->st_ino);
561 struct pidns_init_store *e = pidns_hash_table[h];
562
563 while (e) {
564 if (e->ino == sb->st_ino) {
565 if (initpid_still_valid(e, sb)) {
566 e->lastcheck = time(NULL);
567 return e;
568 }
569 remove_initpid(e);
570 return NULL;
571 }
572 e = e->next;
573 }
574
575 return NULL;
576 }
577
578 static int is_dir(const char *path, int fd)
579 {
580 struct stat statbuf;
581 int ret = fstatat(fd, path, &statbuf, fd);
582 if (ret == 0 && S_ISDIR(statbuf.st_mode))
583 return 1;
584 return 0;
585 }
586
587 static int preserve_ns(const int pid, const char *ns)
588 {
589 int ret;
590 /* 5 /proc + 21 /int_as_str + 3 /ns + 20 /NS_NAME + 1 \0 */
591 #define __NS_PATH_LEN 50
592 char path[__NS_PATH_LEN];
593
594 /* This way we can use this function to also check whether namespaces
595 * are supported by the kernel by passing in the NULL or the empty
596 * string.
597 */
598 ret = snprintf(path, __NS_PATH_LEN, "/proc/%d/ns%s%s", pid,
599 !ns || strcmp(ns, "") == 0 ? "" : "/",
600 !ns || strcmp(ns, "") == 0 ? "" : ns);
601 if (ret < 0 || (size_t)ret >= __NS_PATH_LEN) {
602 errno = EFBIG;
603 return -1;
604 }
605
606 return open(path, O_RDONLY | O_CLOEXEC);
607 }
608
609 /**
610 * in_same_namespace - Check whether two processes are in the same namespace.
611 * @pid1 - PID of the first process.
612 * @pid2 - PID of the second process.
613 * @ns - Name of the namespace to check. Must correspond to one of the names
614 * for the namespaces as shown in /proc/<pid/ns/
615 *
616 * If the two processes are not in the same namespace returns an fd to the
617 * namespace of the second process identified by @pid2. If the two processes are
618 * in the same namespace returns -EINVAL, -1 if an error occurred.
619 */
620 static int in_same_namespace(pid_t pid1, pid_t pid2, const char *ns)
621 {
622 __do_close_prot_errno int ns_fd1 = -1, ns_fd2 = -1;
623 int ret = -1;
624 struct stat ns_st1, ns_st2;
625
626 ns_fd1 = preserve_ns(pid1, ns);
627 if (ns_fd1 < 0) {
628 /* The kernel does not support this namespace. This is not an
629 * error.
630 */
631 if (errno == ENOENT)
632 return -EINVAL;
633
634 return -1;
635 }
636
637 ns_fd2 = preserve_ns(pid2, ns);
638 if (ns_fd2 < 0)
639 return -1;
640
641 ret = fstat(ns_fd1, &ns_st1);
642 if (ret < 0)
643 return -1;
644
645 ret = fstat(ns_fd2, &ns_st2);
646 if (ret < 0)
647 return -1;
648
649 /* processes are in the same namespace */
650 if ((ns_st1.st_dev == ns_st2.st_dev) && (ns_st1.st_ino == ns_st2.st_ino))
651 return -EINVAL;
652
653 /* processes are in different namespaces */
654 return move_fd(ns_fd2);
655 }
656
657 static bool is_shared_pidns(pid_t pid)
658 {
659 if (pid != 1)
660 return false;
661
662 if (in_same_namespace(pid, getpid(), "pid") == -EINVAL)
663 return true;
664
665 return false;
666 }
667
668 static bool write_string(const char *fnam, const char *string, int fd)
669 {
670 FILE *f;
671 size_t len, ret;
672
673 f = fdopen(fd, "w");
674 if (!f)
675 return false;
676
677 len = strlen(string);
678 ret = fwrite(string, 1, len, f);
679 if (ret != len) {
680 lxcfs_error("%s - Error writing \"%s\" to \"%s\"\n",
681 strerror(errno), string, fnam);
682 fclose(f);
683 return false;
684 }
685
686 if (fclose(f) < 0) {
687 lxcfs_error("%s - Failed to close \"%s\"\n", strerror(errno), fnam);
688 return false;
689 }
690
691 return true;
692 }
693
694 struct cgfs_files {
695 char *name;
696 uint32_t uid, gid;
697 uint32_t mode;
698 };
699
700 static void print_subsystems(void)
701 {
702 int i = 0;
703
704 fprintf(stderr, "mount namespace: %d\n", cgroup_mount_ns_fd);
705 fprintf(stderr, "hierarchies:\n");
706 for (struct hierarchy **h = cgroup_ops->hierarchies; h && *h; h++, i++) {
707 __do_free char *controllers = lxc_string_join(",", (const char **)(*h)->controllers, false);
708 fprintf(stderr, " %2d: fd: %3d: %s\n", i, (*h)->fd, controllers ?: "");
709 }
710 }
711
712 /* do we need to do any massaging here? I'm not sure... */
713 /* Return the mounted controller and store the corresponding open file descriptor
714 * referring to the controller mountpoint in the private lxcfs namespace in
715 * @cfd.
716 */
717 static int find_mounted_controller(const char *controller)
718 {
719 struct hierarchy *h;
720
721 h = cgroup_ops->get_hierarchy(cgroup_ops, controller);
722 return h ? h->fd : -EBADF;
723 }
724
725 bool cgfs_set_value(const char *controller, const char *cgroup, const char *file,
726 const char *value)
727 {
728 int ret, fd, cfd;
729 size_t len;
730 char *fnam;
731
732 cfd = find_mounted_controller(controller);
733 if (cfd < 0)
734 return false;
735
736 /* Make sure we pass a relative path to *at() family of functions.
737 * . + /cgroup + / + file + \0
738 */
739 len = strlen(cgroup) + strlen(file) + 3;
740 fnam = alloca(len);
741 ret = snprintf(fnam, len, "%s%s/%s", dot_or_empty(cgroup), cgroup, file);
742 if (ret < 0 || (size_t)ret >= len)
743 return false;
744
745 fd = openat(cfd, fnam, O_WRONLY);
746 if (fd < 0)
747 return false;
748
749 return write_string(fnam, value, fd);
750 }
751
752 // Chown all the files in the cgroup directory. We do this when we create
753 // a cgroup on behalf of a user.
754 static void chown_all_cgroup_files(const char *dirname, uid_t uid, gid_t gid, int fd)
755 {
756 struct dirent *direntp;
757 char path[MAXPATHLEN];
758 size_t len;
759 DIR *d;
760 int fd1, ret;
761
762 len = strlen(dirname);
763 if (len >= MAXPATHLEN) {
764 lxcfs_error("Pathname too long: %s\n", dirname);
765 return;
766 }
767
768 fd1 = openat(fd, dirname, O_DIRECTORY);
769 if (fd1 < 0)
770 return;
771
772 d = fdopendir(fd1);
773 if (!d) {
774 lxcfs_error("Failed to open %s\n", dirname);
775 return;
776 }
777
778 while ((direntp = readdir(d))) {
779 if (!strcmp(direntp->d_name, ".") || !strcmp(direntp->d_name, ".."))
780 continue;
781 ret = snprintf(path, MAXPATHLEN, "%s/%s", dirname, direntp->d_name);
782 if (ret < 0 || ret >= MAXPATHLEN) {
783 lxcfs_error("Pathname too long under %s\n", dirname);
784 continue;
785 }
786 if (fchownat(fd, path, uid, gid, 0) < 0)
787 lxcfs_error("Failed to chown file %s to %u:%u", path, uid, gid);
788 }
789 closedir(d);
790 }
791
792 int cgfs_create(const char *controller, const char *cg, uid_t uid, gid_t gid)
793 {
794 int cfd;
795 size_t len;
796 char *dirnam;
797
798 cfd = find_mounted_controller(controller);
799 if (cfd < 0)
800 return -EINVAL;
801
802 /* Make sure we pass a relative path to *at() family of functions.
803 * . + /cg + \0
804 */
805 len = strlen(cg) + 2;
806 dirnam = alloca(len);
807 snprintf(dirnam, len, "%s%s", dot_or_empty(cg), cg);
808
809 if (mkdirat(cfd, dirnam, 0755) < 0)
810 return -errno;
811
812 if (uid == 0 && gid == 0)
813 return 0;
814
815 if (fchownat(cfd, dirnam, uid, gid, 0) < 0)
816 return -errno;
817
818 chown_all_cgroup_files(dirnam, uid, gid, cfd);
819
820 return 0;
821 }
822
823 static bool recursive_rmdir(const char *dirname, int fd, const int cfd)
824 {
825 struct dirent *direntp;
826 DIR *dir;
827 bool ret = false;
828 char pathname[MAXPATHLEN];
829 int dupfd;
830
831 dupfd = dup(fd); // fdopendir() does bad things once it uses an fd.
832 if (dupfd < 0)
833 return false;
834
835 dir = fdopendir(dupfd);
836 if (!dir) {
837 lxcfs_debug("Failed to open %s: %s.\n", dirname, strerror(errno));
838 close(dupfd);
839 return false;
840 }
841
842 while ((direntp = readdir(dir))) {
843 struct stat mystat;
844 int rc;
845
846 if (!strcmp(direntp->d_name, ".") ||
847 !strcmp(direntp->d_name, ".."))
848 continue;
849
850 rc = snprintf(pathname, MAXPATHLEN, "%s/%s", dirname, direntp->d_name);
851 if (rc < 0 || rc >= MAXPATHLEN) {
852 lxcfs_error("%s\n", "Pathname too long.");
853 continue;
854 }
855
856 rc = fstatat(cfd, pathname, &mystat, AT_SYMLINK_NOFOLLOW);
857 if (rc) {
858 lxcfs_debug("Failed to stat %s: %s.\n", pathname, strerror(errno));
859 continue;
860 }
861 if (S_ISDIR(mystat.st_mode))
862 if (!recursive_rmdir(pathname, fd, cfd))
863 lxcfs_debug("Error removing %s.\n", pathname);
864 }
865
866 ret = true;
867 if (closedir(dir) < 0) {
868 lxcfs_error("Failed to close directory %s: %s\n", dirname, strerror(errno));
869 ret = false;
870 }
871
872 if (unlinkat(cfd, dirname, AT_REMOVEDIR) < 0) {
873 lxcfs_debug("Failed to delete %s: %s.\n", dirname, strerror(errno));
874 ret = false;
875 }
876
877 close(dupfd);
878
879 return ret;
880 }
881
882 bool cgfs_remove(const char *controller, const char *cg)
883 {
884 int fd, cfd;
885 size_t len;
886 char *dirnam;
887 bool bret;
888
889 cfd = find_mounted_controller(controller);
890 if (cfd < 0)
891 return false;
892
893 /* Make sure we pass a relative path to *at() family of functions.
894 * . + /cg + \0
895 */
896 len = strlen(cg) + 2;
897 dirnam = alloca(len);
898 snprintf(dirnam, len, "%s%s", dot_or_empty(cg), cg);
899
900 fd = openat(cfd, dirnam, O_DIRECTORY);
901 if (fd < 0)
902 return false;
903
904 bret = recursive_rmdir(dirnam, fd, cfd);
905 close(fd);
906 return bret;
907 }
908
909 bool cgfs_chmod_file(const char *controller, const char *file, mode_t mode)
910 {
911 int cfd;
912 size_t len;
913 char *pathname;
914
915 cfd = find_mounted_controller(controller);
916 if (cfd < 0)
917 return false;
918
919 /* Make sure we pass a relative path to *at() family of functions.
920 * . + /file + \0
921 */
922 len = strlen(file) + 2;
923 pathname = alloca(len);
924 snprintf(pathname, len, "%s%s", dot_or_empty(file), file);
925 if (fchmodat(cfd, pathname, mode, 0) < 0)
926 return false;
927 return true;
928 }
929
930 static int chown_tasks_files(const char *dirname, uid_t uid, gid_t gid, int fd)
931 {
932 size_t len;
933 char *fname;
934
935 len = strlen(dirname) + strlen("/cgroup.procs") + 1;
936 fname = alloca(len);
937 snprintf(fname, len, "%s/tasks", dirname);
938 if (fchownat(fd, fname, uid, gid, 0) != 0)
939 return -errno;
940 snprintf(fname, len, "%s/cgroup.procs", dirname);
941 if (fchownat(fd, fname, uid, gid, 0) != 0)
942 return -errno;
943 return 0;
944 }
945
946 int cgfs_chown_file(const char *controller, const char *file, uid_t uid, gid_t gid)
947 {
948 int cfd;
949 size_t len;
950 char *pathname;
951
952 cfd = find_mounted_controller(controller);
953 if (cfd < 0)
954 return false;
955
956 /* Make sure we pass a relative path to *at() family of functions.
957 * . + /file + \0
958 */
959 len = strlen(file) + 2;
960 pathname = alloca(len);
961 snprintf(pathname, len, "%s%s", dot_or_empty(file), file);
962 if (fchownat(cfd, pathname, uid, gid, 0) < 0)
963 return -errno;
964
965 if (is_dir(pathname, cfd))
966 // like cgmanager did, we want to chown the tasks file as well
967 return chown_tasks_files(pathname, uid, gid, cfd);
968
969 return 0;
970 }
971
972 FILE *open_pids_file(const char *controller, const char *cgroup)
973 {
974 int fd, cfd;
975 size_t len;
976 char *pathname;
977
978 cfd = find_mounted_controller(controller);
979 if (cfd < 0)
980 return false;
981
982 /* Make sure we pass a relative path to *at() family of functions.
983 * . + /cgroup + / "cgroup.procs" + \0
984 */
985 len = strlen(cgroup) + strlen("cgroup.procs") + 3;
986 pathname = alloca(len);
987 snprintf(pathname, len, "%s%s/cgroup.procs", dot_or_empty(cgroup), cgroup);
988
989 fd = openat(cfd, pathname, O_WRONLY);
990 if (fd < 0)
991 return NULL;
992
993 return fdopen(fd, "w");
994 }
995
996 static bool cgfs_iterate_cgroup(const char *controller, const char *cgroup, bool directories,
997 void ***list, size_t typesize,
998 void* (*iterator)(const char*, const char*, const char*))
999 {
1000 int cfd, fd, ret;
1001 size_t len;
1002 char *cg;
1003 char pathname[MAXPATHLEN];
1004 size_t sz = 0, asz = 0;
1005 struct dirent *dirent;
1006 DIR *dir;
1007
1008 cfd = find_mounted_controller(controller);
1009 *list = NULL;
1010 if (cfd < 0)
1011 return false;
1012
1013 /* Make sure we pass a relative path to *at() family of functions. */
1014 len = strlen(cgroup) + 1 /* . */ + 1 /* \0 */;
1015 cg = alloca(len);
1016 ret = snprintf(cg, len, "%s%s", dot_or_empty(cgroup), cgroup);
1017 if (ret < 0 || (size_t)ret >= len) {
1018 lxcfs_error("Pathname too long under %s\n", cgroup);
1019 return false;
1020 }
1021
1022 fd = openat(cfd, cg, O_DIRECTORY);
1023 if (fd < 0)
1024 return false;
1025
1026 dir = fdopendir(fd);
1027 if (!dir)
1028 return false;
1029
1030 while ((dirent = readdir(dir))) {
1031 struct stat mystat;
1032
1033 if (!strcmp(dirent->d_name, ".") ||
1034 !strcmp(dirent->d_name, ".."))
1035 continue;
1036
1037 ret = snprintf(pathname, MAXPATHLEN, "%s/%s", cg, dirent->d_name);
1038 if (ret < 0 || ret >= MAXPATHLEN) {
1039 lxcfs_error("Pathname too long under %s\n", cg);
1040 continue;
1041 }
1042
1043 ret = fstatat(cfd, pathname, &mystat, AT_SYMLINK_NOFOLLOW);
1044 if (ret) {
1045 lxcfs_error("Failed to stat %s: %s\n", pathname, strerror(errno));
1046 continue;
1047 }
1048 if ((!directories && !S_ISREG(mystat.st_mode)) ||
1049 (directories && !S_ISDIR(mystat.st_mode)))
1050 continue;
1051
1052 if (sz+2 >= asz) {
1053 void **tmp;
1054 asz += BATCH_SIZE;
1055 do {
1056 tmp = realloc(*list, asz * typesize);
1057 } while (!tmp);
1058 *list = tmp;
1059 }
1060 (*list)[sz] = (*iterator)(controller, cg, dirent->d_name);
1061 (*list)[sz+1] = NULL;
1062 sz++;
1063 }
1064 if (closedir(dir) < 0) {
1065 lxcfs_error("Failed closedir for %s: %s\n", cgroup, strerror(errno));
1066 return false;
1067 }
1068 return true;
1069 }
1070
1071 static void *make_children_list_entry(const char *controller, const char *cgroup, const char *dir_entry)
1072 {
1073 char *dup;
1074 do {
1075 dup = strdup(dir_entry);
1076 } while (!dup);
1077 return dup;
1078 }
1079
1080 bool cgfs_list_children(const char *controller, const char *cgroup, char ***list)
1081 {
1082 return cgfs_iterate_cgroup(controller, cgroup, true, (void***)list, sizeof(*list), &make_children_list_entry);
1083 }
1084
1085 void free_key(struct cgfs_files *k)
1086 {
1087 if (!k)
1088 return;
1089 free_disarm(k->name);
1090 free_disarm(k);
1091 }
1092
1093 void free_keys(struct cgfs_files **keys)
1094 {
1095 int i;
1096
1097 if (!keys)
1098 return;
1099 for (i = 0; keys[i]; i++) {
1100 free_key(keys[i]);
1101 }
1102 free_disarm(keys);
1103 }
1104
1105 bool cgfs_param_exist(const char *controller, const char *cgroup, const char *file)
1106 {
1107 int ret, cfd;
1108 size_t len;
1109 char *fnam;
1110
1111 cfd = find_mounted_controller(controller);
1112 if (cfd < 0)
1113 return false;
1114
1115 /* Make sure we pass a relative path to *at() family of functions.
1116 * . + /cgroup + / + file + \0
1117 */
1118 len = strlen(cgroup) + strlen(file) + 3;
1119 fnam = alloca(len);
1120 ret = snprintf(fnam, len, "%s%s/%s", dot_or_empty(cgroup), cgroup, file);
1121 if (ret < 0 || (size_t)ret >= len)
1122 return false;
1123
1124 return (faccessat(cfd, fnam, F_OK, 0) == 0);
1125 }
1126
1127 struct cgfs_files *cgfs_get_key(const char *controller, const char *cgroup, const char *file)
1128 {
1129 int ret, cfd;
1130 size_t len;
1131 char *fnam;
1132 struct stat sb;
1133 struct cgfs_files *newkey;
1134
1135 cfd = find_mounted_controller(controller);
1136 if (cfd < 0)
1137 return false;
1138
1139 if (file && *file == '/')
1140 file++;
1141
1142 if (file && strchr(file, '/'))
1143 return NULL;
1144
1145 /* Make sure we pass a relative path to *at() family of functions.
1146 * . + /cgroup + / + file + \0
1147 */
1148 len = strlen(cgroup) + 3;
1149 if (file)
1150 len += strlen(file) + 1;
1151 fnam = alloca(len);
1152 snprintf(fnam, len, "%s%s%s%s", dot_or_empty(cgroup), cgroup,
1153 file ? "/" : "", file ? file : "");
1154
1155 ret = fstatat(cfd, fnam, &sb, 0);
1156 if (ret < 0)
1157 return NULL;
1158
1159 do {
1160 newkey = malloc(sizeof(struct cgfs_files));
1161 } while (!newkey);
1162 if (file)
1163 newkey->name = must_copy_string(file);
1164 else if (strrchr(cgroup, '/'))
1165 newkey->name = must_copy_string(strrchr(cgroup, '/'));
1166 else
1167 newkey->name = must_copy_string(cgroup);
1168 newkey->uid = sb.st_uid;
1169 newkey->gid = sb.st_gid;
1170 newkey->mode = sb.st_mode;
1171
1172 return newkey;
1173 }
1174
1175 static void *make_key_list_entry(const char *controller, const char *cgroup, const char *dir_entry)
1176 {
1177 struct cgfs_files *entry = cgfs_get_key(controller, cgroup, dir_entry);
1178 if (!entry) {
1179 lxcfs_error("Error getting files under %s:%s\n", controller,
1180 cgroup);
1181 }
1182 return entry;
1183 }
1184
1185 bool cgfs_list_keys(const char *controller, const char *cgroup, struct cgfs_files ***keys)
1186 {
1187 return cgfs_iterate_cgroup(controller, cgroup, false, (void***)keys, sizeof(*keys), &make_key_list_entry);
1188 }
1189
1190 bool is_child_cgroup(const char *controller, const char *cgroup, const char *f)
1191 {
1192 int cfd;
1193 size_t len;
1194 char *fnam;
1195 int ret;
1196 struct stat sb;
1197
1198 cfd = find_mounted_controller(controller);
1199 if (cfd < 0)
1200 return false;
1201
1202 /* Make sure we pass a relative path to *at() family of functions.
1203 * . + /cgroup + / + f + \0
1204 */
1205 len = strlen(cgroup) + strlen(f) + 3;
1206 fnam = alloca(len);
1207 ret = snprintf(fnam, len, "%s%s/%s", dot_or_empty(cgroup), cgroup, f);
1208 if (ret < 0 || (size_t)ret >= len)
1209 return false;
1210
1211 ret = fstatat(cfd, fnam, &sb, 0);
1212 if (ret < 0 || !S_ISDIR(sb.st_mode))
1213 return false;
1214
1215 return true;
1216 }
1217
1218 #define SEND_CREDS_OK 0
1219 #define SEND_CREDS_NOTSK 1
1220 #define SEND_CREDS_FAIL 2
1221 static bool recv_creds(int sock, struct ucred *cred, char *v);
1222 static int wait_for_pid(pid_t pid);
1223 static int send_creds(int sock, struct ucred *cred, char v, bool pingfirst);
1224 static int send_creds_clone_wrapper(void *arg);
1225
1226 /*
1227 * clone a task which switches to @task's namespace and writes '1'.
1228 * over a unix sock so we can read the task's reaper's pid in our
1229 * namespace
1230 *
1231 * Note: glibc's fork() does not respect pidns, which can lead to failed
1232 * assertions inside glibc (and thus failed forks) if the child's pid in
1233 * the pidns and the parent pid outside are identical. Using clone prevents
1234 * this issue.
1235 */
1236 static void write_task_init_pid_exit(int sock, pid_t target)
1237 {
1238 char fnam[100];
1239 pid_t pid;
1240 int fd, ret;
1241 size_t stack_size = sysconf(_SC_PAGESIZE);
1242 void *stack = alloca(stack_size);
1243
1244 ret = snprintf(fnam, sizeof(fnam), "/proc/%d/ns/pid", (int)target);
1245 if (ret < 0 || ret >= sizeof(fnam))
1246 _exit(1);
1247
1248 fd = open(fnam, O_RDONLY);
1249 if (fd < 0) {
1250 perror("write_task_init_pid_exit open of ns/pid");
1251 _exit(1);
1252 }
1253 if (setns(fd, 0)) {
1254 perror("write_task_init_pid_exit setns 1");
1255 close(fd);
1256 _exit(1);
1257 }
1258 pid = clone(send_creds_clone_wrapper, stack + stack_size, SIGCHLD, &sock);
1259 if (pid < 0)
1260 _exit(1);
1261 if (pid != 0) {
1262 if (!wait_for_pid(pid))
1263 _exit(1);
1264 _exit(0);
1265 }
1266 }
1267
1268 static int send_creds_clone_wrapper(void *arg) {
1269 struct ucred cred;
1270 char v;
1271 int sock = *(int *)arg;
1272
1273 /* we are the child */
1274 cred.uid = 0;
1275 cred.gid = 0;
1276 cred.pid = 1;
1277 v = '1';
1278 if (send_creds(sock, &cred, v, true) != SEND_CREDS_OK)
1279 return 1;
1280 return 0;
1281 }
1282
1283 static pid_t get_init_pid_for_task(pid_t task)
1284 {
1285 int sock[2];
1286 pid_t pid;
1287 pid_t ret = -1;
1288 char v = '0';
1289 struct ucred cred;
1290
1291 if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sock) < 0) {
1292 perror("socketpair");
1293 return -1;
1294 }
1295
1296 pid = fork();
1297 if (pid < 0)
1298 goto out;
1299 if (!pid) {
1300 close(sock[1]);
1301 write_task_init_pid_exit(sock[0], task);
1302 _exit(0);
1303 }
1304
1305 if (!recv_creds(sock[1], &cred, &v))
1306 goto out;
1307 ret = cred.pid;
1308
1309 out:
1310 close(sock[0]);
1311 close(sock[1]);
1312 if (pid > 0)
1313 wait_for_pid(pid);
1314 return ret;
1315 }
1316
1317 pid_t lookup_initpid_in_store(pid_t qpid)
1318 {
1319 pid_t answer = 0;
1320 struct stat sb;
1321 struct pidns_init_store *e;
1322 char fnam[100];
1323
1324 snprintf(fnam, 100, "/proc/%d/ns/pid", qpid);
1325 store_lock();
1326 if (stat(fnam, &sb) < 0)
1327 goto out;
1328 e = lookup_verify_initpid(&sb);
1329 if (e) {
1330 answer = e->initpid;
1331 goto out;
1332 }
1333 answer = get_init_pid_for_task(qpid);
1334 if (answer > 0)
1335 save_initpid(&sb, answer);
1336
1337 out:
1338 /* we prune at end in case we are returning
1339 * the value we were about to return */
1340 prune_initpid_store();
1341 store_unlock();
1342 return answer;
1343 }
1344
1345 static int wait_for_pid(pid_t pid)
1346 {
1347 int status, ret;
1348
1349 if (pid <= 0)
1350 return -1;
1351
1352 again:
1353 ret = waitpid(pid, &status, 0);
1354 if (ret == -1) {
1355 if (errno == EINTR)
1356 goto again;
1357 return -1;
1358 }
1359 if (ret != pid)
1360 goto again;
1361 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
1362 return -1;
1363 return 0;
1364 }
1365
1366 /*
1367 * append the given formatted string to *src.
1368 * src: a pointer to a char* in which to append the formatted string.
1369 * sz: the number of characters printed so far, minus trailing \0.
1370 * asz: the allocated size so far
1371 * format: string format. See printf for details.
1372 * ...: varargs. See printf for details.
1373 */
1374 static void must_strcat(char **src, size_t *sz, size_t *asz, const char *format, ...)
1375 {
1376 char tmp[BUF_RESERVE_SIZE];
1377 va_list args;
1378
1379 va_start (args, format);
1380 int tmplen = vsnprintf(tmp, BUF_RESERVE_SIZE, format, args);
1381 va_end(args);
1382
1383 if (!*src || tmplen + *sz + 1 >= *asz) {
1384 char *tmp;
1385 do {
1386 tmp = realloc(*src, *asz + BUF_RESERVE_SIZE);
1387 } while (!tmp);
1388 *src = tmp;
1389 *asz += BUF_RESERVE_SIZE;
1390 }
1391 memcpy((*src) +*sz , tmp, tmplen+1); /* include the \0 */
1392 *sz += tmplen;
1393 }
1394
1395 /*
1396 * append pid to *src.
1397 * src: a pointer to a char* in which ot append the pid.
1398 * sz: the number of characters printed so far, minus trailing \0.
1399 * asz: the allocated size so far
1400 * pid: the pid to append
1401 */
1402 static void must_strcat_pid(char **src, size_t *sz, size_t *asz, pid_t pid)
1403 {
1404 must_strcat(src, sz, asz, "%d\n", (int)pid);
1405 }
1406
1407 /*
1408 * Given a open file * to /proc/pid/{u,g}id_map, and an id
1409 * valid in the caller's namespace, return the id mapped into
1410 * pid's namespace.
1411 * Returns the mapped id, or -1 on error.
1412 */
1413 unsigned int
1414 convert_id_to_ns(FILE *idfile, unsigned int in_id)
1415 {
1416 unsigned int nsuid, // base id for a range in the idfile's namespace
1417 hostuid, // base id for a range in the caller's namespace
1418 count; // number of ids in this range
1419 char line[400];
1420 int ret;
1421
1422 fseek(idfile, 0L, SEEK_SET);
1423 while (fgets(line, 400, idfile)) {
1424 ret = sscanf(line, "%u %u %u\n", &nsuid, &hostuid, &count);
1425 if (ret != 3)
1426 continue;
1427 if (hostuid + count < hostuid || nsuid + count < nsuid) {
1428 /*
1429 * uids wrapped around - unexpected as this is a procfile,
1430 * so just bail.
1431 */
1432 lxcfs_error("pid wrapparound at entry %u %u %u in %s\n",
1433 nsuid, hostuid, count, line);
1434 return -1;
1435 }
1436 if (hostuid <= in_id && hostuid+count > in_id) {
1437 /*
1438 * now since hostuid <= in_id < hostuid+count, and
1439 * hostuid+count and nsuid+count do not wrap around,
1440 * we know that nsuid+(in_id-hostuid) which must be
1441 * less that nsuid+(count) must not wrap around
1442 */
1443 return (in_id - hostuid) + nsuid;
1444 }
1445 }
1446
1447 // no answer found
1448 return -1;
1449 }
1450
1451 /*
1452 * for is_privileged_over,
1453 * specify whether we require the calling uid to be root in his
1454 * namespace
1455 */
1456 #define NS_ROOT_REQD true
1457 #define NS_ROOT_OPT false
1458
1459 #define PROCLEN 100
1460
1461 static bool is_privileged_over(pid_t pid, uid_t uid, uid_t victim, bool req_ns_root)
1462 {
1463 char fpath[PROCLEN];
1464 int ret;
1465 bool answer = false;
1466 uid_t nsuid;
1467
1468 if (victim == -1 || uid == -1)
1469 return false;
1470
1471 /*
1472 * If the request is one not requiring root in the namespace,
1473 * then having the same uid suffices. (i.e. uid 1000 has write
1474 * access to files owned by uid 1000
1475 */
1476 if (!req_ns_root && uid == victim)
1477 return true;
1478
1479 ret = snprintf(fpath, PROCLEN, "/proc/%d/uid_map", pid);
1480 if (ret < 0 || ret >= PROCLEN)
1481 return false;
1482 FILE *f = fopen(fpath, "r");
1483 if (!f)
1484 return false;
1485
1486 /* if caller's not root in his namespace, reject */
1487 nsuid = convert_id_to_ns(f, uid);
1488 if (nsuid)
1489 goto out;
1490
1491 /*
1492 * If victim is not mapped into caller's ns, reject.
1493 * XXX I'm not sure this check is needed given that fuse
1494 * will be sending requests where the vfs has converted
1495 */
1496 nsuid = convert_id_to_ns(f, victim);
1497 if (nsuid == -1)
1498 goto out;
1499
1500 answer = true;
1501
1502 out:
1503 fclose(f);
1504 return answer;
1505 }
1506
1507 static bool perms_include(int fmode, mode_t req_mode)
1508 {
1509 mode_t r;
1510
1511 switch (req_mode & O_ACCMODE) {
1512 case O_RDONLY:
1513 r = S_IROTH;
1514 break;
1515 case O_WRONLY:
1516 r = S_IWOTH;
1517 break;
1518 case O_RDWR:
1519 r = S_IROTH | S_IWOTH;
1520 break;
1521 default:
1522 return false;
1523 }
1524 return ((fmode & r) == r);
1525 }
1526
1527
1528 /*
1529 * taskcg is a/b/c
1530 * querycg is /a/b/c/d/e
1531 * we return 'd'
1532 */
1533 static char *get_next_cgroup_dir(const char *taskcg, const char *querycg)
1534 {
1535 char *start, *end;
1536
1537 if (strlen(taskcg) <= strlen(querycg)) {
1538 lxcfs_error("%s\n", "I was fed bad input.");
1539 return NULL;
1540 }
1541
1542 if ((strcmp(querycg, "/") == 0) || (strcmp(querycg, "./") == 0))
1543 start = strdup(taskcg + 1);
1544 else
1545 start = strdup(taskcg + strlen(querycg) + 1);
1546 if (!start)
1547 return NULL;
1548 end = strchr(start, '/');
1549 if (end)
1550 *end = '\0';
1551 return start;
1552 }
1553
1554 char *get_pid_cgroup(pid_t pid, const char *contrl)
1555 {
1556 int cfd;
1557
1558 cfd = find_mounted_controller(contrl);
1559 if (cfd < 0)
1560 return false;
1561
1562 if (pure_unified_layout(cgroup_ops))
1563 return cg_unified_get_current_cgroup(pid);
1564
1565 return cg_legacy_get_current_cgroup(pid, contrl);
1566 }
1567
1568 /*
1569 * check whether a fuse context may access a cgroup dir or file
1570 *
1571 * If file is not null, it is a cgroup file to check under cg.
1572 * If file is null, then we are checking perms on cg itself.
1573 *
1574 * For files we can check the mode of the list_keys result.
1575 * For cgroups, we must make assumptions based on the files under the
1576 * cgroup, because cgmanager doesn't tell us ownership/perms of cgroups
1577 * yet.
1578 */
1579 static bool fc_may_access(struct fuse_context *fc, const char *contrl, const char *cg, const char *file, mode_t mode)
1580 {
1581 struct cgfs_files *k = NULL;
1582 bool ret = false;
1583
1584 k = cgfs_get_key(contrl, cg, file);
1585 if (!k)
1586 return false;
1587
1588 if (is_privileged_over(fc->pid, fc->uid, k->uid, NS_ROOT_OPT)) {
1589 if (perms_include(k->mode >> 6, mode)) {
1590 ret = true;
1591 goto out;
1592 }
1593 }
1594 if (fc->gid == k->gid) {
1595 if (perms_include(k->mode >> 3, mode)) {
1596 ret = true;
1597 goto out;
1598 }
1599 }
1600 ret = perms_include(k->mode, mode);
1601
1602 out:
1603 free_key(k);
1604 return ret;
1605 }
1606
1607 #define INITSCOPE "/init.scope"
1608 void prune_init_slice(char *cg)
1609 {
1610 char *point;
1611 size_t cg_len = strlen(cg), initscope_len = strlen(INITSCOPE);
1612
1613 if (cg_len < initscope_len)
1614 return;
1615
1616 point = cg + cg_len - initscope_len;
1617 if (strcmp(point, INITSCOPE) == 0) {
1618 if (point == cg)
1619 *(point+1) = '\0';
1620 else
1621 *point = '\0';
1622 }
1623 }
1624
1625 /*
1626 * If pid is in /a/b/c/d, he may only act on things under cg=/a/b/c/d.
1627 * If pid is in /a, he may act on /a/b, but not on /b.
1628 * if the answer is false and nextcg is not NULL, then *nextcg will point
1629 * to a string containing the next cgroup directory under cg, which must be
1630 * freed by the caller.
1631 */
1632 static bool caller_is_in_ancestor(pid_t pid, const char *contrl, const char *cg, char **nextcg)
1633 {
1634 bool answer = false;
1635 char *c2 = get_pid_cgroup(pid, contrl);
1636 char *linecmp;
1637
1638 if (!c2)
1639 return false;
1640 prune_init_slice(c2);
1641
1642 /*
1643 * callers pass in '/' or './' (openat()) for root cgroup, otherwise
1644 * they pass in a cgroup without leading '/'
1645 *
1646 * The original line here was:
1647 * linecmp = *cg == '/' ? c2 : c2+1;
1648 * TODO: I'm not sure why you'd want to increment when *cg != '/'?
1649 * Serge, do you know?
1650 */
1651 if (*cg == '/' || !strncmp(cg, "./", 2))
1652 linecmp = c2;
1653 else
1654 linecmp = c2 + 1;
1655 if (strncmp(linecmp, cg, strlen(linecmp)) != 0) {
1656 if (nextcg) {
1657 *nextcg = get_next_cgroup_dir(linecmp, cg);
1658 }
1659 goto out;
1660 }
1661 answer = true;
1662
1663 out:
1664 free(c2);
1665 return answer;
1666 }
1667
1668 /*
1669 * If pid is in /a/b/c, he may see that /a exists, but not /b or /a/c.
1670 */
1671 static bool caller_may_see_dir(pid_t pid, const char *contrl, const char *cg)
1672 {
1673 bool answer = false;
1674 char *c2, *task_cg;
1675 size_t target_len, task_len;
1676
1677 if (strcmp(cg, "/") == 0 || strcmp(cg, "./") == 0)
1678 return true;
1679
1680 c2 = get_pid_cgroup(pid, contrl);
1681 if (!c2)
1682 return false;
1683 prune_init_slice(c2);
1684
1685 task_cg = c2 + 1;
1686 target_len = strlen(cg);
1687 task_len = strlen(task_cg);
1688 if (task_len == 0) {
1689 /* Task is in the root cg, it can see everything. This case is
1690 * not handled by the strmcps below, since they test for the
1691 * last /, but that is the first / that we've chopped off
1692 * above.
1693 */
1694 answer = true;
1695 goto out;
1696 }
1697 if (strcmp(cg, task_cg) == 0) {
1698 answer = true;
1699 goto out;
1700 }
1701 if (target_len < task_len) {
1702 /* looking up a parent dir */
1703 if (strncmp(task_cg, cg, target_len) == 0 && task_cg[target_len] == '/')
1704 answer = true;
1705 goto out;
1706 }
1707 if (target_len > task_len) {
1708 /* looking up a child dir */
1709 if (strncmp(task_cg, cg, task_len) == 0 && cg[task_len] == '/')
1710 answer = true;
1711 goto out;
1712 }
1713
1714 out:
1715 free(c2);
1716 return answer;
1717 }
1718
1719 /*
1720 * given /cgroup/freezer/a/b, return "freezer".
1721 * the returned char* should NOT be freed.
1722 */
1723 static char *pick_controller_from_path(struct fuse_context *fc, const char *path)
1724 {
1725 const char *p1;
1726 char *contr, *slash;
1727
1728 if (strlen(path) < 9) {
1729 errno = EACCES;
1730 return NULL;
1731 }
1732 if (*(path + 7) != '/') {
1733 errno = EINVAL;
1734 return NULL;
1735 }
1736 p1 = path + 8;
1737 contr = strdupa(p1);
1738 if (!contr) {
1739 errno = ENOMEM;
1740 return NULL;
1741 }
1742 slash = strstr(contr, "/");
1743 if (slash)
1744 *slash = '\0';
1745
1746 for (struct hierarchy **h = cgroup_ops->hierarchies; h && *h; h++) {
1747 if ((*h)->__controllers && strcmp((*h)->__controllers, contr) == 0)
1748 return (*h)->__controllers;
1749 }
1750 errno = ENOENT;
1751 return NULL;
1752 }
1753
1754 /*
1755 * Find the start of cgroup in /cgroup/controller/the/cgroup/path
1756 * Note that the returned value may include files (keynames) etc
1757 */
1758 static const char *find_cgroup_in_path(const char *path)
1759 {
1760 const char *p1;
1761
1762 if (strlen(path) < 9) {
1763 errno = EACCES;
1764 return NULL;
1765 }
1766 p1 = strstr(path + 8, "/");
1767 if (!p1) {
1768 errno = EINVAL;
1769 return NULL;
1770 }
1771 errno = 0;
1772 return p1 + 1;
1773 }
1774
1775 /*
1776 * split the last path element from the path in @cg.
1777 * @dir is newly allocated and should be freed, @last not
1778 */
1779 static void get_cgdir_and_path(const char *cg, char **dir, char **last)
1780 {
1781 char *p;
1782
1783 do {
1784 *dir = strdup(cg);
1785 } while (!*dir);
1786 *last = strrchr(cg, '/');
1787 if (!*last) {
1788 *last = NULL;
1789 return;
1790 }
1791 p = strrchr(*dir, '/');
1792 *p = '\0';
1793 }
1794
1795 /*
1796 * FUSE ops for /cgroup
1797 */
1798
1799 int cg_getattr(const char *path, struct stat *sb)
1800 {
1801 struct timespec now;
1802 struct fuse_context *fc = fuse_get_context();
1803 char * cgdir = NULL;
1804 char *last = NULL, *path1, *path2;
1805 struct cgfs_files *k = NULL;
1806 const char *cgroup;
1807 const char *controller = NULL;
1808 int ret = -ENOENT;
1809
1810
1811 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
1812 return -EIO;
1813
1814 memset(sb, 0, sizeof(struct stat));
1815
1816 if (clock_gettime(CLOCK_REALTIME, &now) < 0)
1817 return -EINVAL;
1818
1819 sb->st_uid = sb->st_gid = 0;
1820 sb->st_atim = sb->st_mtim = sb->st_ctim = now;
1821 sb->st_size = 0;
1822
1823 if (strcmp(path, "/cgroup") == 0) {
1824 sb->st_mode = S_IFDIR | 00755;
1825 sb->st_nlink = 2;
1826 return 0;
1827 }
1828
1829 controller = pick_controller_from_path(fc, path);
1830 if (!controller)
1831 return -errno;
1832 cgroup = find_cgroup_in_path(path);
1833 if (!cgroup) {
1834 /* this is just /cgroup/controller, return it as a dir */
1835 sb->st_mode = S_IFDIR | 00755;
1836 sb->st_nlink = 2;
1837 return 0;
1838 }
1839
1840 get_cgdir_and_path(cgroup, &cgdir, &last);
1841
1842 if (!last) {
1843 path1 = "/";
1844 path2 = cgdir;
1845 } else {
1846 path1 = cgdir;
1847 path2 = last;
1848 }
1849
1850 pid_t initpid = lookup_initpid_in_store(fc->pid);
1851 if (initpid <= 1 || is_shared_pidns(initpid))
1852 initpid = fc->pid;
1853 /* check that cgcopy is either a child cgroup of cgdir, or listed in its keys.
1854 * Then check that caller's cgroup is under path if last is a child
1855 * cgroup, or cgdir if last is a file */
1856
1857 if (is_child_cgroup(controller, path1, path2)) {
1858 if (!caller_may_see_dir(initpid, controller, cgroup)) {
1859 ret = -ENOENT;
1860 goto out;
1861 }
1862 if (!caller_is_in_ancestor(initpid, controller, cgroup, NULL)) {
1863 /* this is just /cgroup/controller, return it as a dir */
1864 sb->st_mode = S_IFDIR | 00555;
1865 sb->st_nlink = 2;
1866 ret = 0;
1867 goto out;
1868 }
1869 if (!fc_may_access(fc, controller, cgroup, NULL, O_RDONLY)) {
1870 ret = -EACCES;
1871 goto out;
1872 }
1873
1874 // get uid, gid, from '/tasks' file and make up a mode
1875 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
1876 sb->st_mode = S_IFDIR | 00755;
1877 k = cgfs_get_key(controller, cgroup, NULL);
1878 if (!k) {
1879 sb->st_uid = sb->st_gid = 0;
1880 } else {
1881 sb->st_uid = k->uid;
1882 sb->st_gid = k->gid;
1883 }
1884 free_key(k);
1885 sb->st_nlink = 2;
1886 ret = 0;
1887 goto out;
1888 }
1889
1890 if ((k = cgfs_get_key(controller, path1, path2)) != NULL) {
1891 sb->st_mode = S_IFREG | k->mode;
1892 sb->st_nlink = 1;
1893 sb->st_uid = k->uid;
1894 sb->st_gid = k->gid;
1895 sb->st_size = 0;
1896 free_key(k);
1897 if (!caller_is_in_ancestor(initpid, controller, path1, NULL)) {
1898 ret = -ENOENT;
1899 goto out;
1900 }
1901 ret = 0;
1902 }
1903
1904 out:
1905 free(cgdir);
1906 return ret;
1907 }
1908
1909 int cg_opendir(const char *path, struct fuse_file_info *fi)
1910 {
1911 struct fuse_context *fc = fuse_get_context();
1912 const char *cgroup;
1913 struct file_info *dir_info;
1914 char *controller = NULL;
1915
1916 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
1917 return -EIO;
1918
1919 if (strcmp(path, "/cgroup") == 0) {
1920 cgroup = NULL;
1921 controller = NULL;
1922 } else {
1923 // return list of keys for the controller, and list of child cgroups
1924 controller = pick_controller_from_path(fc, path);
1925 if (!controller)
1926 return -errno;
1927
1928 cgroup = find_cgroup_in_path(path);
1929 if (!cgroup) {
1930 /* this is just /cgroup/controller, return its contents */
1931 cgroup = "/";
1932 }
1933 }
1934
1935 pid_t initpid = lookup_initpid_in_store(fc->pid);
1936 if (initpid <= 1 || is_shared_pidns(initpid))
1937 initpid = fc->pid;
1938 if (cgroup) {
1939 if (!caller_may_see_dir(initpid, controller, cgroup))
1940 return -ENOENT;
1941 if (!fc_may_access(fc, controller, cgroup, NULL, O_RDONLY))
1942 return -EACCES;
1943 }
1944
1945 /* we'll free this at cg_releasedir */
1946 dir_info = malloc(sizeof(*dir_info));
1947 if (!dir_info)
1948 return -ENOMEM;
1949 dir_info->controller = must_copy_string(controller);
1950 dir_info->cgroup = must_copy_string(cgroup);
1951 dir_info->type = LXC_TYPE_CGDIR;
1952 dir_info->buf = NULL;
1953 dir_info->file = NULL;
1954 dir_info->buflen = 0;
1955
1956 fi->fh = (unsigned long)dir_info;
1957 return 0;
1958 }
1959
1960 int cg_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset,
1961 struct fuse_file_info *fi)
1962 {
1963 struct file_info *d = (struct file_info *)fi->fh;
1964 struct cgfs_files **list = NULL;
1965 int i, ret;
1966 char *nextcg = NULL;
1967 struct fuse_context *fc = fuse_get_context();
1968 char **clist = NULL;
1969
1970 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
1971 return -EIO;
1972
1973 if (filler(buf, ".", NULL, 0) != 0 || filler(buf, "..", NULL, 0) != 0)
1974 return -EIO;
1975
1976 if (d->type != LXC_TYPE_CGDIR) {
1977 lxcfs_error("%s\n", "Internal error: file cache info used in readdir.");
1978 return -EIO;
1979 }
1980 if (!d->cgroup && !d->controller) {
1981 /*
1982 * ls /var/lib/lxcfs/cgroup - just show list of controllers.
1983 * This only works with the legacy hierarchy.
1984 */
1985 for (struct hierarchy **h = cgroup_ops->hierarchies; h && *h; h++) {
1986 if (is_unified_hierarchy(*h))
1987 continue;
1988
1989 if ((*h)->__controllers && filler(buf, (*h)->__controllers, NULL, 0))
1990 return -EIO;
1991 }
1992
1993 return 0;
1994 }
1995
1996 if (!cgfs_list_keys(d->controller, d->cgroup, &list)) {
1997 // not a valid cgroup
1998 ret = -EINVAL;
1999 goto out;
2000 }
2001
2002 pid_t initpid = lookup_initpid_in_store(fc->pid);
2003 if (initpid <= 1 || is_shared_pidns(initpid))
2004 initpid = fc->pid;
2005 if (!caller_is_in_ancestor(initpid, d->controller, d->cgroup, &nextcg)) {
2006 if (nextcg) {
2007 ret = filler(buf, nextcg, NULL, 0);
2008 free(nextcg);
2009 if (ret != 0) {
2010 ret = -EIO;
2011 goto out;
2012 }
2013 }
2014 ret = 0;
2015 goto out;
2016 }
2017
2018 for (i = 0; list && list[i]; i++) {
2019 if (filler(buf, list[i]->name, NULL, 0) != 0) {
2020 ret = -EIO;
2021 goto out;
2022 }
2023 }
2024
2025 // now get the list of child cgroups
2026
2027 if (!cgfs_list_children(d->controller, d->cgroup, &clist)) {
2028 ret = 0;
2029 goto out;
2030 }
2031 if (clist) {
2032 for (i = 0; clist[i]; i++) {
2033 if (filler(buf, clist[i], NULL, 0) != 0) {
2034 ret = -EIO;
2035 goto out;
2036 }
2037 }
2038 }
2039 ret = 0;
2040
2041 out:
2042 free_keys(list);
2043 if (clist) {
2044 for (i = 0; clist[i]; i++)
2045 free(clist[i]);
2046 free(clist);
2047 }
2048 return ret;
2049 }
2050
2051 void do_release_file_info(struct fuse_file_info *fi)
2052 {
2053 struct file_info *f = (struct file_info *)fi->fh;
2054
2055 if (!f)
2056 return;
2057
2058 fi->fh = 0;
2059
2060 free_disarm(f->controller);
2061 free_disarm(f->cgroup);
2062 free_disarm(f->file);
2063 free_disarm(f->buf);
2064 free_disarm(f);
2065 }
2066
2067 int cg_releasedir(const char *path, struct fuse_file_info *fi)
2068 {
2069 do_release_file_info(fi);
2070 return 0;
2071 }
2072
2073 int cg_open(const char *path, struct fuse_file_info *fi)
2074 {
2075 const char *cgroup;
2076 char *last = NULL, *path1, *path2, * cgdir = NULL, *controller;
2077 struct cgfs_files *k = NULL;
2078 struct file_info *file_info;
2079 struct fuse_context *fc = fuse_get_context();
2080 int ret;
2081
2082 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
2083 return -EIO;
2084
2085 controller = pick_controller_from_path(fc, path);
2086 if (!controller)
2087 return -errno;
2088 cgroup = find_cgroup_in_path(path);
2089 if (!cgroup)
2090 return -errno;
2091
2092 get_cgdir_and_path(cgroup, &cgdir, &last);
2093 if (!last) {
2094 path1 = "/";
2095 path2 = cgdir;
2096 } else {
2097 path1 = cgdir;
2098 path2 = last;
2099 }
2100
2101 k = cgfs_get_key(controller, path1, path2);
2102 if (!k) {
2103 ret = -EINVAL;
2104 goto out;
2105 }
2106 free_key(k);
2107
2108 pid_t initpid = lookup_initpid_in_store(fc->pid);
2109 if (initpid <= 1 || is_shared_pidns(initpid))
2110 initpid = fc->pid;
2111 if (!caller_may_see_dir(initpid, controller, path1)) {
2112 ret = -ENOENT;
2113 goto out;
2114 }
2115 if (!fc_may_access(fc, controller, path1, path2, fi->flags)) {
2116 ret = -EACCES;
2117 goto out;
2118 }
2119
2120 /* we'll free this at cg_release */
2121 file_info = malloc(sizeof(*file_info));
2122 if (!file_info) {
2123 ret = -ENOMEM;
2124 goto out;
2125 }
2126 file_info->controller = must_copy_string(controller);
2127 file_info->cgroup = must_copy_string(path1);
2128 file_info->file = must_copy_string(path2);
2129 file_info->type = LXC_TYPE_CGFILE;
2130 file_info->buf = NULL;
2131 file_info->buflen = 0;
2132
2133 fi->fh = (unsigned long)file_info;
2134 ret = 0;
2135
2136 out:
2137 free(cgdir);
2138 return ret;
2139 }
2140
2141 int cg_access(const char *path, int mode)
2142 {
2143 int ret;
2144 const char *cgroup;
2145 char *path1, *path2, *controller;
2146 char *last = NULL, *cgdir = NULL;
2147 struct cgfs_files *k = NULL;
2148 struct fuse_context *fc = fuse_get_context();
2149
2150 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
2151 return -EIO;
2152
2153 if (strcmp(path, "/cgroup") == 0)
2154 return 0;
2155
2156 controller = pick_controller_from_path(fc, path);
2157 if (!controller)
2158 return -errno;
2159 cgroup = find_cgroup_in_path(path);
2160 if (!cgroup) {
2161 // access("/sys/fs/cgroup/systemd", mode) - rx allowed, w not
2162 if ((mode & W_OK) == 0)
2163 return 0;
2164 return -EACCES;
2165 }
2166
2167 get_cgdir_and_path(cgroup, &cgdir, &last);
2168 if (!last) {
2169 path1 = "/";
2170 path2 = cgdir;
2171 } else {
2172 path1 = cgdir;
2173 path2 = last;
2174 }
2175
2176 k = cgfs_get_key(controller, path1, path2);
2177 if (!k) {
2178 if ((mode & W_OK) == 0)
2179 ret = 0;
2180 else
2181 ret = -EACCES;
2182 goto out;
2183 }
2184 free_key(k);
2185
2186 pid_t initpid = lookup_initpid_in_store(fc->pid);
2187 if (initpid <= 1 || is_shared_pidns(initpid))
2188 initpid = fc->pid;
2189 if (!caller_may_see_dir(initpid, controller, path1)) {
2190 ret = -ENOENT;
2191 goto out;
2192 }
2193 if (!fc_may_access(fc, controller, path1, path2, mode)) {
2194 ret = -EACCES;
2195 goto out;
2196 }
2197
2198 ret = 0;
2199
2200 out:
2201 free(cgdir);
2202 return ret;
2203 }
2204
2205 int cg_release(const char *path, struct fuse_file_info *fi)
2206 {
2207 do_release_file_info(fi);
2208 return 0;
2209 }
2210
2211 #define POLLIN_SET ( EPOLLIN | EPOLLHUP | EPOLLRDHUP )
2212
2213 static bool wait_for_sock(int sock, int timeout)
2214 {
2215 struct epoll_event ev;
2216 int epfd, ret, now, starttime, deltatime, saved_errno;
2217
2218 if ((starttime = time(NULL)) < 0)
2219 return false;
2220
2221 if ((epfd = epoll_create(1)) < 0) {
2222 lxcfs_error("%s\n", "Failed to create epoll socket: %m.");
2223 return false;
2224 }
2225
2226 ev.events = POLLIN_SET;
2227 ev.data.fd = sock;
2228 if (epoll_ctl(epfd, EPOLL_CTL_ADD, sock, &ev) < 0) {
2229 lxcfs_error("%s\n", "Failed adding socket to epoll: %m.");
2230 close(epfd);
2231 return false;
2232 }
2233
2234 again:
2235 if ((now = time(NULL)) < 0) {
2236 close(epfd);
2237 return false;
2238 }
2239
2240 deltatime = (starttime + timeout) - now;
2241 if (deltatime < 0) { // timeout
2242 errno = 0;
2243 close(epfd);
2244 return false;
2245 }
2246 ret = epoll_wait(epfd, &ev, 1, 1000*deltatime + 1);
2247 if (ret < 0 && errno == EINTR)
2248 goto again;
2249 saved_errno = errno;
2250 close(epfd);
2251
2252 if (ret <= 0) {
2253 errno = saved_errno;
2254 return false;
2255 }
2256 return true;
2257 }
2258
2259 static int msgrecv(int sockfd, void *buf, size_t len)
2260 {
2261 if (!wait_for_sock(sockfd, 2))
2262 return -1;
2263 return recv(sockfd, buf, len, MSG_DONTWAIT);
2264 }
2265
2266 static int send_creds(int sock, struct ucred *cred, char v, bool pingfirst)
2267 {
2268 struct msghdr msg = { 0 };
2269 struct iovec iov;
2270 struct cmsghdr *cmsg;
2271 char cmsgbuf[CMSG_SPACE(sizeof(*cred))];
2272 char buf[1];
2273 buf[0] = 'p';
2274
2275 if (pingfirst) {
2276 if (msgrecv(sock, buf, 1) != 1) {
2277 lxcfs_error("%s\n", "Error getting reply from server over socketpair.");
2278 return SEND_CREDS_FAIL;
2279 }
2280 }
2281
2282 msg.msg_control = cmsgbuf;
2283 msg.msg_controllen = sizeof(cmsgbuf);
2284
2285 cmsg = CMSG_FIRSTHDR(&msg);
2286 cmsg->cmsg_len = CMSG_LEN(sizeof(struct ucred));
2287 cmsg->cmsg_level = SOL_SOCKET;
2288 cmsg->cmsg_type = SCM_CREDENTIALS;
2289 memcpy(CMSG_DATA(cmsg), cred, sizeof(*cred));
2290
2291 msg.msg_name = NULL;
2292 msg.msg_namelen = 0;
2293
2294 buf[0] = v;
2295 iov.iov_base = buf;
2296 iov.iov_len = sizeof(buf);
2297 msg.msg_iov = &iov;
2298 msg.msg_iovlen = 1;
2299
2300 if (sendmsg(sock, &msg, 0) < 0) {
2301 lxcfs_error("Failed at sendmsg: %s.\n",strerror(errno));
2302 if (errno == 3)
2303 return SEND_CREDS_NOTSK;
2304 return SEND_CREDS_FAIL;
2305 }
2306
2307 return SEND_CREDS_OK;
2308 }
2309
2310 static bool recv_creds(int sock, struct ucred *cred, char *v)
2311 {
2312 struct msghdr msg = { 0 };
2313 struct iovec iov;
2314 struct cmsghdr *cmsg;
2315 char cmsgbuf[CMSG_SPACE(sizeof(*cred))];
2316 char buf[1];
2317 int ret;
2318 int optval = 1;
2319
2320 *v = '1';
2321
2322 cred->pid = -1;
2323 cred->uid = -1;
2324 cred->gid = -1;
2325
2326 if (setsockopt(sock, SOL_SOCKET, SO_PASSCRED, &optval, sizeof(optval)) == -1) {
2327 lxcfs_error("Failed to set passcred: %s\n", strerror(errno));
2328 return false;
2329 }
2330 buf[0] = '1';
2331 if (write(sock, buf, 1) != 1) {
2332 lxcfs_error("Failed to start write on scm fd: %s\n", strerror(errno));
2333 return false;
2334 }
2335
2336 msg.msg_name = NULL;
2337 msg.msg_namelen = 0;
2338 msg.msg_control = cmsgbuf;
2339 msg.msg_controllen = sizeof(cmsgbuf);
2340
2341 iov.iov_base = buf;
2342 iov.iov_len = sizeof(buf);
2343 msg.msg_iov = &iov;
2344 msg.msg_iovlen = 1;
2345
2346 if (!wait_for_sock(sock, 2)) {
2347 lxcfs_error("Timed out waiting for scm_cred: %s\n", strerror(errno));
2348 return false;
2349 }
2350 ret = recvmsg(sock, &msg, MSG_DONTWAIT);
2351 if (ret < 0) {
2352 lxcfs_error("Failed to receive scm_cred: %s\n", strerror(errno));
2353 return false;
2354 }
2355
2356 cmsg = CMSG_FIRSTHDR(&msg);
2357
2358 if (cmsg && cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)) &&
2359 cmsg->cmsg_level == SOL_SOCKET &&
2360 cmsg->cmsg_type == SCM_CREDENTIALS) {
2361 memcpy(cred, CMSG_DATA(cmsg), sizeof(*cred));
2362 }
2363 *v = buf[0];
2364
2365 return true;
2366 }
2367
2368 struct pid_ns_clone_args {
2369 int *cpipe;
2370 int sock;
2371 pid_t tpid;
2372 int (*wrapped) (int, pid_t); // pid_from_ns or pid_to_ns
2373 };
2374
2375 /*
2376 * pid_ns_clone_wrapper - wraps pid_to_ns or pid_from_ns for usage
2377 * with clone(). This simply writes '1' as ACK back to the parent
2378 * before calling the actual wrapped function.
2379 */
2380 static int pid_ns_clone_wrapper(void *arg) {
2381 struct pid_ns_clone_args* args = (struct pid_ns_clone_args *) arg;
2382 char b = '1';
2383
2384 close(args->cpipe[0]);
2385 if (write(args->cpipe[1], &b, sizeof(char)) < 0)
2386 lxcfs_error("(child): error on write: %s.\n", strerror(errno));
2387 close(args->cpipe[1]);
2388 return args->wrapped(args->sock, args->tpid);
2389 }
2390
2391 /*
2392 * pid_to_ns - reads pids from a ucred over a socket, then writes the
2393 * int value back over the socket. This shifts the pid from the
2394 * sender's pidns into tpid's pidns.
2395 */
2396 static int pid_to_ns(int sock, pid_t tpid)
2397 {
2398 char v = '0';
2399 struct ucred cred;
2400
2401 while (recv_creds(sock, &cred, &v)) {
2402 if (v == '1')
2403 return 0;
2404 if (write(sock, &cred.pid, sizeof(pid_t)) != sizeof(pid_t))
2405 return 1;
2406 }
2407 return 0;
2408 }
2409
2410
2411 /*
2412 * pid_to_ns_wrapper: when you setns into a pidns, you yourself remain
2413 * in your old pidns. Only children which you clone will be in the target
2414 * pidns. So the pid_to_ns_wrapper does the setns, then clones a child to
2415 * actually convert pids.
2416 *
2417 * Note: glibc's fork() does not respect pidns, which can lead to failed
2418 * assertions inside glibc (and thus failed forks) if the child's pid in
2419 * the pidns and the parent pid outside are identical. Using clone prevents
2420 * this issue.
2421 */
2422 static void pid_to_ns_wrapper(int sock, pid_t tpid)
2423 {
2424 int newnsfd = -1, ret, cpipe[2];
2425 char fnam[100];
2426 pid_t cpid;
2427 char v;
2428
2429 ret = snprintf(fnam, sizeof(fnam), "/proc/%d/ns/pid", tpid);
2430 if (ret < 0 || ret >= sizeof(fnam))
2431 _exit(1);
2432 newnsfd = open(fnam, O_RDONLY);
2433 if (newnsfd < 0)
2434 _exit(1);
2435 if (setns(newnsfd, 0) < 0)
2436 _exit(1);
2437 close(newnsfd);
2438
2439 if (pipe(cpipe) < 0)
2440 _exit(1);
2441
2442 struct pid_ns_clone_args args = {
2443 .cpipe = cpipe,
2444 .sock = sock,
2445 .tpid = tpid,
2446 .wrapped = &pid_to_ns
2447 };
2448 size_t stack_size = sysconf(_SC_PAGESIZE);
2449 void *stack = alloca(stack_size);
2450
2451 cpid = clone(pid_ns_clone_wrapper, stack + stack_size, SIGCHLD, &args);
2452 if (cpid < 0)
2453 _exit(1);
2454
2455 // give the child 1 second to be done forking and
2456 // write its ack
2457 if (!wait_for_sock(cpipe[0], 1))
2458 _exit(1);
2459 ret = read(cpipe[0], &v, 1);
2460 if (ret != sizeof(char) || v != '1')
2461 _exit(1);
2462
2463 if (!wait_for_pid(cpid))
2464 _exit(1);
2465 _exit(0);
2466 }
2467
2468 /*
2469 * To read cgroup files with a particular pid, we will setns into the child
2470 * pidns, open a pipe, fork a child - which will be the first to really be in
2471 * the child ns - which does the cgfs_get_value and writes the data to the pipe.
2472 */
2473 bool do_read_pids(pid_t tpid, const char *contrl, const char *cg, const char *file, char **d)
2474 {
2475 int sock[2] = {-1, -1};
2476 char *tmpdata = NULL;
2477 int ret;
2478 pid_t qpid, cpid = -1;
2479 bool answer = false;
2480 char v = '0';
2481 struct ucred cred;
2482 size_t sz = 0, asz = 0;
2483
2484 if (!cgroup_ops->get(cgroup_ops, contrl, cg, file, &tmpdata))
2485 return false;
2486
2487 /*
2488 * Now we read the pids from returned data one by one, pass
2489 * them into a child in the target namespace, read back the
2490 * translated pids, and put them into our to-return data
2491 */
2492
2493 if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sock) < 0) {
2494 perror("socketpair");
2495 free(tmpdata);
2496 return false;
2497 }
2498
2499 cpid = fork();
2500 if (cpid == -1)
2501 goto out;
2502
2503 if (!cpid) // child - exits when done
2504 pid_to_ns_wrapper(sock[1], tpid);
2505
2506 char *ptr = tmpdata;
2507 cred.uid = 0;
2508 cred.gid = 0;
2509 while (sscanf(ptr, "%d\n", &qpid) == 1) {
2510 cred.pid = qpid;
2511 ret = send_creds(sock[0], &cred, v, true);
2512
2513 if (ret == SEND_CREDS_NOTSK)
2514 goto next;
2515 if (ret == SEND_CREDS_FAIL)
2516 goto out;
2517
2518 // read converted results
2519 if (!wait_for_sock(sock[0], 2)) {
2520 lxcfs_error("Timed out waiting for pid from child: %s.\n", strerror(errno));
2521 goto out;
2522 }
2523 if (read(sock[0], &qpid, sizeof(qpid)) != sizeof(qpid)) {
2524 lxcfs_error("Error reading pid from child: %s.\n", strerror(errno));
2525 goto out;
2526 }
2527 must_strcat_pid(d, &sz, &asz, qpid);
2528 next:
2529 ptr = strchr(ptr, '\n');
2530 if (!ptr)
2531 break;
2532 ptr++;
2533 }
2534
2535 cred.pid = getpid();
2536 v = '1';
2537 if (send_creds(sock[0], &cred, v, true) != SEND_CREDS_OK) {
2538 // failed to ask child to exit
2539 lxcfs_error("Failed to ask child to exit: %s.\n", strerror(errno));
2540 goto out;
2541 }
2542
2543 answer = true;
2544
2545 out:
2546 free(tmpdata);
2547 if (cpid != -1)
2548 wait_for_pid(cpid);
2549 if (sock[0] != -1) {
2550 close(sock[0]);
2551 close(sock[1]);
2552 }
2553 return answer;
2554 }
2555
2556 int cg_read(const char *path, char *buf, size_t size, off_t offset,
2557 struct fuse_file_info *fi)
2558 {
2559 struct fuse_context *fc = fuse_get_context();
2560 struct file_info *f = (struct file_info *)fi->fh;
2561 struct cgfs_files *k = NULL;
2562 char *data = NULL;
2563 int ret, s;
2564 bool r;
2565
2566 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
2567 return -EIO;
2568
2569 if (f->type != LXC_TYPE_CGFILE) {
2570 lxcfs_error("%s\n", "Internal error: directory cache info used in cg_read.");
2571 return -EIO;
2572 }
2573
2574 if (offset)
2575 return 0;
2576
2577 if (!f->controller)
2578 return -EINVAL;
2579
2580 if ((k = cgfs_get_key(f->controller, f->cgroup, f->file)) == NULL) {
2581 return -EINVAL;
2582 }
2583 free_key(k);
2584
2585
2586 if (!fc_may_access(fc, f->controller, f->cgroup, f->file, O_RDONLY)) {
2587 ret = -EACCES;
2588 goto out;
2589 }
2590
2591 if (strcmp(f->file, "tasks") == 0 ||
2592 strcmp(f->file, "/tasks") == 0 ||
2593 strcmp(f->file, "/cgroup.procs") == 0 ||
2594 strcmp(f->file, "cgroup.procs") == 0)
2595 // special case - we have to translate the pids
2596 r = do_read_pids(fc->pid, f->controller, f->cgroup, f->file, &data);
2597 else
2598 r = cgroup_ops->get(cgroup_ops, f->controller, f->cgroup, f->file, &data);
2599
2600 if (!r) {
2601 ret = -EINVAL;
2602 goto out;
2603 }
2604
2605 if (!data) {
2606 ret = 0;
2607 goto out;
2608 }
2609 s = strlen(data);
2610 if (s > size)
2611 s = size;
2612 memcpy(buf, data, s);
2613 if (s > 0 && s < size && data[s-1] != '\n')
2614 buf[s++] = '\n';
2615
2616 ret = s;
2617
2618 out:
2619 free(data);
2620 return ret;
2621 }
2622
2623 static int pid_from_ns(int sock, pid_t tpid)
2624 {
2625 pid_t vpid;
2626 struct ucred cred;
2627 char v;
2628 int ret;
2629
2630 cred.uid = 0;
2631 cred.gid = 0;
2632 while (1) {
2633 if (!wait_for_sock(sock, 2)) {
2634 lxcfs_error("%s\n", "Timeout reading from parent.");
2635 return 1;
2636 }
2637 if ((ret = read(sock, &vpid, sizeof(pid_t))) != sizeof(pid_t)) {
2638 lxcfs_error("Bad read from parent: %s.\n", strerror(errno));
2639 return 1;
2640 }
2641 if (vpid == -1) // done
2642 break;
2643 v = '0';
2644 cred.pid = vpid;
2645 if (send_creds(sock, &cred, v, true) != SEND_CREDS_OK) {
2646 v = '1';
2647 cred.pid = getpid();
2648 if (send_creds(sock, &cred, v, false) != SEND_CREDS_OK)
2649 return 1;
2650 }
2651 }
2652 return 0;
2653 }
2654
2655 static void pid_from_ns_wrapper(int sock, pid_t tpid)
2656 {
2657 int newnsfd = -1, ret, cpipe[2];
2658 char fnam[100];
2659 pid_t cpid;
2660 char v;
2661
2662 ret = snprintf(fnam, sizeof(fnam), "/proc/%d/ns/pid", tpid);
2663 if (ret < 0 || ret >= sizeof(fnam))
2664 _exit(1);
2665 newnsfd = open(fnam, O_RDONLY);
2666 if (newnsfd < 0)
2667 _exit(1);
2668 if (setns(newnsfd, 0) < 0)
2669 _exit(1);
2670 close(newnsfd);
2671
2672 if (pipe(cpipe) < 0)
2673 _exit(1);
2674
2675 struct pid_ns_clone_args args = {
2676 .cpipe = cpipe,
2677 .sock = sock,
2678 .tpid = tpid,
2679 .wrapped = &pid_from_ns
2680 };
2681 size_t stack_size = sysconf(_SC_PAGESIZE);
2682 void *stack = alloca(stack_size);
2683
2684 cpid = clone(pid_ns_clone_wrapper, stack + stack_size, SIGCHLD, &args);
2685 if (cpid < 0)
2686 _exit(1);
2687
2688 // give the child 1 second to be done forking and
2689 // write its ack
2690 if (!wait_for_sock(cpipe[0], 1))
2691 _exit(1);
2692 ret = read(cpipe[0], &v, 1);
2693 if (ret != sizeof(char) || v != '1')
2694 _exit(1);
2695
2696 if (!wait_for_pid(cpid))
2697 _exit(1);
2698 _exit(0);
2699 }
2700
2701 /*
2702 * Given host @uid, return the uid to which it maps in
2703 * @pid's user namespace, or -1 if none.
2704 */
2705 bool hostuid_to_ns(uid_t uid, pid_t pid, uid_t *answer)
2706 {
2707 FILE *f;
2708 char line[400];
2709
2710 sprintf(line, "/proc/%d/uid_map", pid);
2711 if ((f = fopen(line, "r")) == NULL) {
2712 return false;
2713 }
2714
2715 *answer = convert_id_to_ns(f, uid);
2716 fclose(f);
2717
2718 if (*answer == -1)
2719 return false;
2720 return true;
2721 }
2722
2723 /*
2724 * get_pid_creds: get the real uid and gid of @pid from
2725 * /proc/$$/status
2726 * (XXX should we use euid here?)
2727 */
2728 void get_pid_creds(pid_t pid, uid_t *uid, gid_t *gid)
2729 {
2730 char line[400];
2731 uid_t u;
2732 gid_t g;
2733 FILE *f;
2734
2735 *uid = -1;
2736 *gid = -1;
2737 sprintf(line, "/proc/%d/status", pid);
2738 if ((f = fopen(line, "r")) == NULL) {
2739 lxcfs_error("Error opening %s: %s\n", line, strerror(errno));
2740 return;
2741 }
2742 while (fgets(line, 400, f)) {
2743 if (strncmp(line, "Uid:", 4) == 0) {
2744 if (sscanf(line+4, "%u", &u) != 1) {
2745 lxcfs_error("bad uid line for pid %u\n", pid);
2746 fclose(f);
2747 return;
2748 }
2749 *uid = u;
2750 } else if (strncmp(line, "Gid:", 4) == 0) {
2751 if (sscanf(line+4, "%u", &g) != 1) {
2752 lxcfs_error("bad gid line for pid %u\n", pid);
2753 fclose(f);
2754 return;
2755 }
2756 *gid = g;
2757 }
2758 }
2759 fclose(f);
2760 }
2761
2762 /*
2763 * May the requestor @r move victim @v to a new cgroup?
2764 * This is allowed if
2765 * . they are the same task
2766 * . they are ownedy by the same uid
2767 * . @r is root on the host, or
2768 * . @v's uid is mapped into @r's where @r is root.
2769 */
2770 bool may_move_pid(pid_t r, uid_t r_uid, pid_t v)
2771 {
2772 uid_t v_uid, tmpuid;
2773 gid_t v_gid;
2774
2775 if (r == v)
2776 return true;
2777 if (r_uid == 0)
2778 return true;
2779 get_pid_creds(v, &v_uid, &v_gid);
2780 if (r_uid == v_uid)
2781 return true;
2782 if (hostuid_to_ns(r_uid, r, &tmpuid) && tmpuid == 0
2783 && hostuid_to_ns(v_uid, r, &tmpuid))
2784 return true;
2785 return false;
2786 }
2787
2788 static bool do_write_pids(pid_t tpid, uid_t tuid, const char *contrl, const char *cg,
2789 const char *file, const char *buf)
2790 {
2791 int sock[2] = {-1, -1};
2792 pid_t qpid, cpid = -1;
2793 FILE *pids_file = NULL;
2794 bool answer = false, fail = false;
2795
2796 pids_file = open_pids_file(contrl, cg);
2797 if (!pids_file)
2798 return false;
2799
2800 /*
2801 * write the pids to a socket, have helper in writer's pidns
2802 * call movepid for us
2803 */
2804 if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sock) < 0) {
2805 perror("socketpair");
2806 goto out;
2807 }
2808
2809 cpid = fork();
2810 if (cpid == -1)
2811 goto out;
2812
2813 if (!cpid) { // child
2814 fclose(pids_file);
2815 pid_from_ns_wrapper(sock[1], tpid);
2816 }
2817
2818 const char *ptr = buf;
2819 while (sscanf(ptr, "%d", &qpid) == 1) {
2820 struct ucred cred;
2821 char v;
2822
2823 if (write(sock[0], &qpid, sizeof(qpid)) != sizeof(qpid)) {
2824 lxcfs_error("Error writing pid to child: %s.\n", strerror(errno));
2825 goto out;
2826 }
2827
2828 if (recv_creds(sock[0], &cred, &v)) {
2829 if (v == '0') {
2830 if (!may_move_pid(tpid, tuid, cred.pid)) {
2831 fail = true;
2832 break;
2833 }
2834 if (fprintf(pids_file, "%d", (int) cred.pid) < 0)
2835 fail = true;
2836 }
2837 }
2838
2839 ptr = strchr(ptr, '\n');
2840 if (!ptr)
2841 break;
2842 ptr++;
2843 }
2844
2845 /* All good, write the value */
2846 qpid = -1;
2847 if (write(sock[0], &qpid ,sizeof(qpid)) != sizeof(qpid))
2848 lxcfs_error("%s\n", "Warning: failed to ask child to exit.");
2849
2850 if (!fail)
2851 answer = true;
2852
2853 out:
2854 if (cpid != -1)
2855 wait_for_pid(cpid);
2856 if (sock[0] != -1) {
2857 close(sock[0]);
2858 close(sock[1]);
2859 }
2860 if (pids_file) {
2861 if (fclose(pids_file) != 0)
2862 answer = false;
2863 }
2864 return answer;
2865 }
2866
2867 int cg_write(const char *path, const char *buf, size_t size, off_t offset,
2868 struct fuse_file_info *fi)
2869 {
2870 struct fuse_context *fc = fuse_get_context();
2871 char *localbuf = NULL;
2872 struct cgfs_files *k = NULL;
2873 struct file_info *f = (struct file_info *)fi->fh;
2874 bool r;
2875
2876 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
2877 return -EIO;
2878
2879 if (f->type != LXC_TYPE_CGFILE) {
2880 lxcfs_error("%s\n", "Internal error: directory cache info used in cg_write.");
2881 return -EIO;
2882 }
2883
2884 if (offset)
2885 return 0;
2886
2887 localbuf = alloca(size+1);
2888 localbuf[size] = '\0';
2889 memcpy(localbuf, buf, size);
2890
2891 if ((k = cgfs_get_key(f->controller, f->cgroup, f->file)) == NULL) {
2892 size = -EINVAL;
2893 goto out;
2894 }
2895
2896 if (!fc_may_access(fc, f->controller, f->cgroup, f->file, O_WRONLY)) {
2897 size = -EACCES;
2898 goto out;
2899 }
2900
2901 if (strcmp(f->file, "tasks") == 0 ||
2902 strcmp(f->file, "/tasks") == 0 ||
2903 strcmp(f->file, "/cgroup.procs") == 0 ||
2904 strcmp(f->file, "cgroup.procs") == 0)
2905 // special case - we have to translate the pids
2906 r = do_write_pids(fc->pid, fc->uid, f->controller, f->cgroup, f->file, localbuf);
2907 else
2908 r = cgfs_set_value(f->controller, f->cgroup, f->file, localbuf);
2909
2910 if (!r)
2911 size = -EINVAL;
2912
2913 out:
2914 free_key(k);
2915 return size;
2916 }
2917
2918 int cg_chown(const char *path, uid_t uid, gid_t gid)
2919 {
2920 struct fuse_context *fc = fuse_get_context();
2921 char *cgdir = NULL, *last = NULL, *path1, *path2, *controller;
2922 struct cgfs_files *k = NULL;
2923 const char *cgroup;
2924 int ret;
2925
2926 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
2927 return -EIO;
2928
2929 if (strcmp(path, "/cgroup") == 0)
2930 return -EPERM;
2931
2932 controller = pick_controller_from_path(fc, path);
2933 if (!controller)
2934 return errno == ENOENT ? -EPERM : -errno;
2935
2936 cgroup = find_cgroup_in_path(path);
2937 if (!cgroup)
2938 /* this is just /cgroup/controller */
2939 return -EPERM;
2940
2941 get_cgdir_and_path(cgroup, &cgdir, &last);
2942
2943 if (!last) {
2944 path1 = "/";
2945 path2 = cgdir;
2946 } else {
2947 path1 = cgdir;
2948 path2 = last;
2949 }
2950
2951 if (is_child_cgroup(controller, path1, path2)) {
2952 // get uid, gid, from '/tasks' file and make up a mode
2953 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
2954 k = cgfs_get_key(controller, cgroup, "tasks");
2955
2956 } else
2957 k = cgfs_get_key(controller, path1, path2);
2958
2959 if (!k) {
2960 ret = -EINVAL;
2961 goto out;
2962 }
2963
2964 /*
2965 * This being a fuse request, the uid and gid must be valid
2966 * in the caller's namespace. So we can just check to make
2967 * sure that the caller is root in his uid, and privileged
2968 * over the file's current owner.
2969 */
2970 if (!is_privileged_over(fc->pid, fc->uid, k->uid, NS_ROOT_REQD)) {
2971 ret = -EACCES;
2972 goto out;
2973 }
2974
2975 ret = cgfs_chown_file(controller, cgroup, uid, gid);
2976
2977 out:
2978 free_key(k);
2979 free(cgdir);
2980
2981 return ret;
2982 }
2983
2984 int cg_chmod(const char *path, mode_t mode)
2985 {
2986 struct fuse_context *fc = fuse_get_context();
2987 char * cgdir = NULL, *last = NULL, *path1, *path2, *controller;
2988 struct cgfs_files *k = NULL;
2989 const char *cgroup;
2990 int ret;
2991
2992 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
2993 return -EIO;
2994
2995 if (strcmp(path, "/cgroup") == 0)
2996 return -EPERM;
2997
2998 controller = pick_controller_from_path(fc, path);
2999 if (!controller)
3000 return errno == ENOENT ? -EPERM : -errno;
3001
3002 cgroup = find_cgroup_in_path(path);
3003 if (!cgroup)
3004 /* this is just /cgroup/controller */
3005 return -EPERM;
3006
3007 get_cgdir_and_path(cgroup, &cgdir, &last);
3008
3009 if (!last) {
3010 path1 = "/";
3011 path2 = cgdir;
3012 } else {
3013 path1 = cgdir;
3014 path2 = last;
3015 }
3016
3017 if (is_child_cgroup(controller, path1, path2)) {
3018 // get uid, gid, from '/tasks' file and make up a mode
3019 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
3020 k = cgfs_get_key(controller, cgroup, "tasks");
3021
3022 } else
3023 k = cgfs_get_key(controller, path1, path2);
3024
3025 if (!k) {
3026 ret = -EINVAL;
3027 goto out;
3028 }
3029
3030 /*
3031 * This being a fuse request, the uid and gid must be valid
3032 * in the caller's namespace. So we can just check to make
3033 * sure that the caller is root in his uid, and privileged
3034 * over the file's current owner.
3035 */
3036 if (!is_privileged_over(fc->pid, fc->uid, k->uid, NS_ROOT_OPT)) {
3037 ret = -EPERM;
3038 goto out;
3039 }
3040
3041 if (!cgfs_chmod_file(controller, cgroup, mode)) {
3042 ret = -EINVAL;
3043 goto out;
3044 }
3045
3046 ret = 0;
3047 out:
3048 free_key(k);
3049 free(cgdir);
3050 return ret;
3051 }
3052
3053 int cg_mkdir(const char *path, mode_t mode)
3054 {
3055 struct fuse_context *fc = fuse_get_context();
3056 char *last = NULL, *path1, *cgdir = NULL, *controller, *next = NULL;
3057 const char *cgroup;
3058 int ret;
3059
3060 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
3061 return -EIO;
3062
3063 controller = pick_controller_from_path(fc, path);
3064 if (!controller)
3065 return errno == ENOENT ? -EPERM : -errno;
3066
3067 cgroup = find_cgroup_in_path(path);
3068 if (!cgroup)
3069 return -errno;
3070
3071 get_cgdir_and_path(cgroup, &cgdir, &last);
3072 if (!last)
3073 path1 = "/";
3074 else
3075 path1 = cgdir;
3076
3077 pid_t initpid = lookup_initpid_in_store(fc->pid);
3078 if (initpid <= 1 || is_shared_pidns(initpid))
3079 initpid = fc->pid;
3080 if (!caller_is_in_ancestor(initpid, controller, path1, &next)) {
3081 if (!next)
3082 ret = -EINVAL;
3083 else if (last && strcmp(next, last) == 0)
3084 ret = -EEXIST;
3085 else
3086 ret = -EPERM;
3087 goto out;
3088 }
3089
3090 if (!fc_may_access(fc, controller, path1, NULL, O_RDWR)) {
3091 ret = -EACCES;
3092 goto out;
3093 }
3094 if (!caller_is_in_ancestor(initpid, controller, path1, NULL)) {
3095 ret = -EACCES;
3096 goto out;
3097 }
3098
3099 ret = cgfs_create(controller, cgroup, fc->uid, fc->gid);
3100
3101 out:
3102 free(cgdir);
3103 free(next);
3104 return ret;
3105 }
3106
3107 int cg_rmdir(const char *path)
3108 {
3109 struct fuse_context *fc = fuse_get_context();
3110 char *last = NULL, *cgdir = NULL, *controller, *next = NULL;
3111 const char *cgroup;
3112 int ret;
3113
3114 if (!fc || !cgroup_ops || pure_unified_layout(cgroup_ops))
3115 return -EIO;
3116
3117 controller = pick_controller_from_path(fc, path);
3118 if (!controller) /* Someone's trying to delete "/cgroup". */
3119 return -EPERM;
3120
3121 cgroup = find_cgroup_in_path(path);
3122 if (!cgroup) /* Someone's trying to delete a controller e.g. "/blkio". */
3123 return -EPERM;
3124
3125 get_cgdir_and_path(cgroup, &cgdir, &last);
3126 if (!last) {
3127 /* Someone's trying to delete a cgroup on the same level as the
3128 * "/lxc" cgroup e.g. rmdir "/cgroup/blkio/lxc" or
3129 * rmdir "/cgroup/blkio/init.slice".
3130 */
3131 ret = -EPERM;
3132 goto out;
3133 }
3134
3135 pid_t initpid = lookup_initpid_in_store(fc->pid);
3136 if (initpid <= 1 || is_shared_pidns(initpid))
3137 initpid = fc->pid;
3138 if (!caller_is_in_ancestor(initpid, controller, cgroup, &next)) {
3139 if (!last || (next && (strcmp(next, last) == 0)))
3140 ret = -EBUSY;
3141 else
3142 ret = -ENOENT;
3143 goto out;
3144 }
3145
3146 if (!fc_may_access(fc, controller, cgdir, NULL, O_WRONLY)) {
3147 ret = -EACCES;
3148 goto out;
3149 }
3150 if (!caller_is_in_ancestor(initpid, controller, cgroup, NULL)) {
3151 ret = -EACCES;
3152 goto out;
3153 }
3154
3155 if (!cgfs_remove(controller, cgroup)) {
3156 ret = -EINVAL;
3157 goto out;
3158 }
3159
3160 ret = 0;
3161
3162 out:
3163 free(cgdir);
3164 free(next);
3165 return ret;
3166 }
3167
3168 static bool startswith(const char *line, const char *pref)
3169 {
3170 if (strncmp(line, pref, strlen(pref)) == 0)
3171 return true;
3172 return false;
3173 }
3174
3175 /* Note that "memory.stat" in cgroup2 is hierarchical by default. */
3176 static void parse_memstat(int version,
3177 char *memstat,
3178 unsigned long *cached,
3179 unsigned long *active_anon,
3180 unsigned long *inactive_anon,
3181 unsigned long *active_file,
3182 unsigned long *inactive_file,
3183 unsigned long *unevictable,
3184 unsigned long *shmem)
3185 {
3186 char *eol;
3187
3188 while (*memstat) {
3189 if (startswith(memstat, is_unified_controller(version)
3190 ? "cache"
3191 : "total_cache")) {
3192 sscanf(memstat + 11, "%lu", cached);
3193 *cached /= 1024;
3194 } else if (startswith(memstat, is_unified_controller(version)
3195 ? "active_anon"
3196 : "total_active_anon")) {
3197 sscanf(memstat + 17, "%lu", active_anon);
3198 *active_anon /= 1024;
3199 } else if (startswith(memstat, is_unified_controller(version)
3200 ? "inactive_anon"
3201 : "total_inactive_anon")) {
3202 sscanf(memstat + 19, "%lu", inactive_anon);
3203 *inactive_anon /= 1024;
3204 } else if (startswith(memstat, is_unified_controller(version)
3205 ? "active_file"
3206 : "total_active_file")) {
3207 sscanf(memstat + 17, "%lu", active_file);
3208 *active_file /= 1024;
3209 } else if (startswith(memstat, is_unified_controller(version)
3210 ? "inactive_file"
3211 : "total_inactive_file")) {
3212 sscanf(memstat + 19, "%lu", inactive_file);
3213 *inactive_file /= 1024;
3214 } else if (startswith(memstat, is_unified_controller(version)
3215 ? "unevictable"
3216 : "total_unevictable")) {
3217 sscanf(memstat + 17, "%lu", unevictable);
3218 *unevictable /= 1024;
3219 } else if (startswith(memstat, is_unified_controller(version)
3220 ? "shmem"
3221 : "total_shmem")) {
3222 sscanf(memstat + 11, "%lu", shmem);
3223 *shmem /= 1024;
3224 }
3225 eol = strchr(memstat, '\n');
3226 if (!eol)
3227 return;
3228 memstat = eol+1;
3229 }
3230 }
3231
3232 static void get_blkio_io_value(char *str, unsigned major, unsigned minor, char *iotype, unsigned long *v)
3233 {
3234 char *eol;
3235 char key[32];
3236
3237 memset(key, 0, 32);
3238 snprintf(key, 32, "%u:%u %s", major, minor, iotype);
3239
3240 size_t len = strlen(key);
3241 *v = 0;
3242
3243 while (*str) {
3244 if (startswith(str, key)) {
3245 sscanf(str + len, "%lu", v);
3246 return;
3247 }
3248 eol = strchr(str, '\n');
3249 if (!eol)
3250 return;
3251 str = eol+1;
3252 }
3253 }
3254
3255 int read_file_fuse(const char *path, char *buf, size_t size, struct file_info *d)
3256 {
3257 __do_free char *line = NULL;
3258 __do_fclose FILE *f = NULL;
3259 size_t linelen = 0, total_len = 0;
3260 char *cache = d->buf;
3261 size_t cache_size = d->buflen;
3262
3263 f = fopen(path, "r");
3264 if (!f)
3265 return 0;
3266
3267 while (getline(&line, &linelen, f) != -1) {
3268 ssize_t l = snprintf(cache, cache_size, "%s", line);
3269 if (l < 0) {
3270 perror("Error writing to cache");
3271 return 0;
3272 }
3273 if (l >= cache_size) {
3274 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3275 return 0;
3276 }
3277 cache += l;
3278 cache_size -= l;
3279 total_len += l;
3280 }
3281
3282 d->size = total_len;
3283 if (total_len > size)
3284 total_len = size;
3285
3286 /* read from off 0 */
3287 memcpy(buf, d->buf, total_len);
3288
3289 if (d->size > total_len)
3290 d->cached = d->size - total_len;
3291 return total_len;
3292 }
3293
3294 /*
3295 * FUSE ops for /proc
3296 */
3297
3298 static unsigned long get_memlimit(const char *cgroup, bool swap)
3299 {
3300 int ret;
3301 __do_free char *memlimit_str = NULL;
3302 unsigned long memlimit = -1;
3303
3304 if (swap)
3305 ret = cgroup_ops->get_memory_swap_max(cgroup_ops, cgroup, &memlimit_str);
3306 else
3307 ret = cgroup_ops->get_memory_max(cgroup_ops, cgroup, &memlimit_str);
3308 if (ret > 0)
3309 memlimit = strtoul(memlimit_str, NULL, 10);
3310
3311 return memlimit;
3312 }
3313
3314 static unsigned long get_min_memlimit(const char *cgroup, bool swap)
3315 {
3316 __do_free char *copy = NULL;
3317 unsigned long memlimit = 0;
3318 unsigned long retlimit;
3319
3320 copy = strdup(cgroup);
3321 retlimit = get_memlimit(copy, swap);
3322
3323 while (strcmp(copy, "/") != 0) {
3324 char *it = copy;
3325
3326 it = dirname(it);
3327 memlimit = get_memlimit(it, swap);
3328 if (memlimit != -1 && memlimit < retlimit)
3329 retlimit = memlimit;
3330 };
3331
3332 return retlimit;
3333 }
3334
3335 static int proc_meminfo_read(char *buf, size_t size, off_t offset,
3336 struct fuse_file_info *fi)
3337 {
3338 __do_free char *cgroup = NULL, *line = NULL,
3339 *memusage_str = NULL, *memstat_str = NULL,
3340 *memswlimit_str = NULL, *memswusage_str = NULL;
3341 __do_fclose FILE *f = NULL;
3342 struct fuse_context *fc = fuse_get_context();
3343 struct lxcfs_opts *opts = (struct lxcfs_opts *) fuse_get_context()->private_data;
3344 struct file_info *d = (struct file_info *)fi->fh;
3345 unsigned long memlimit = 0, memusage = 0, memswlimit = 0,
3346 memswusage = 0, cached = 0, hosttotal = 0, active_anon = 0,
3347 inactive_anon = 0, active_file = 0, inactive_file = 0,
3348 unevictable = 0, shmem = 0, hostswtotal = 0;
3349 size_t linelen = 0, total_len = 0;
3350 char *cache = d->buf;
3351 size_t cache_size = d->buflen;
3352 int ret;
3353
3354 if (offset) {
3355 int left;
3356
3357 if (offset > d->size)
3358 return -EINVAL;
3359
3360 if (!d->cached)
3361 return 0;
3362
3363 left = d->size - offset;
3364 total_len = left > size ? size : left;
3365 memcpy(buf, cache + offset, total_len);
3366
3367 return total_len;
3368 }
3369
3370 pid_t initpid = lookup_initpid_in_store(fc->pid);
3371 if (initpid <= 1 || is_shared_pidns(initpid))
3372 initpid = fc->pid;
3373
3374 cgroup = get_pid_cgroup(initpid, "memory");
3375 if (!cgroup)
3376 return read_file_fuse("/proc/meminfo", buf, size, d);
3377
3378 prune_init_slice(cgroup);
3379
3380 memlimit = get_min_memlimit(cgroup, false);
3381
3382 ret = cgroup_ops->get_memory_current(cgroup_ops, cgroup, &memusage_str);
3383 if (ret < 0)
3384 return 0;
3385
3386 ret = cgroup_ops->get_memory_stats(cgroup_ops, cgroup, &memstat_str);
3387 if (ret < 0)
3388 return 0;
3389 parse_memstat(ret, memstat_str, &cached, &active_anon, &inactive_anon,
3390 &active_file, &inactive_file, &unevictable, &shmem);
3391
3392 /*
3393 * Following values are allowed to fail, because swapaccount might be
3394 * turned off for current kernel.
3395 */
3396 ret = cgroup_ops->get_memory_swap_max(cgroup_ops, cgroup, &memswlimit_str);
3397 if (ret >= 0)
3398 ret = cgroup_ops->get_memory_swap_current(cgroup_ops, cgroup, &memswusage_str);
3399 if (ret >= 0) {
3400 memswlimit = get_min_memlimit(cgroup, true);
3401 memswusage = strtoul(memswusage_str, NULL, 10);
3402 memswlimit = memswlimit / 1024;
3403 memswusage = memswusage / 1024;
3404 }
3405
3406 memusage = strtoul(memusage_str, NULL, 10);
3407 memlimit /= 1024;
3408 memusage /= 1024;
3409
3410 f = fopen("/proc/meminfo", "r");
3411 if (!f)
3412 return 0;
3413
3414 while (getline(&line, &linelen, f) != -1) {
3415 ssize_t l;
3416 char *printme, lbuf[100];
3417
3418 memset(lbuf, 0, 100);
3419 if (startswith(line, "MemTotal:")) {
3420 sscanf(line+sizeof("MemTotal:")-1, "%lu", &hosttotal);
3421 if (hosttotal < memlimit)
3422 memlimit = hosttotal;
3423 snprintf(lbuf, 100, "MemTotal: %8lu kB\n", memlimit);
3424 printme = lbuf;
3425 } else if (startswith(line, "MemFree:")) {
3426 snprintf(lbuf, 100, "MemFree: %8lu kB\n", memlimit - memusage);
3427 printme = lbuf;
3428 } else if (startswith(line, "MemAvailable:")) {
3429 snprintf(lbuf, 100, "MemAvailable: %8lu kB\n", memlimit - memusage + cached);
3430 printme = lbuf;
3431 } else if (startswith(line, "SwapTotal:") && memswlimit > 0 &&
3432 opts && opts->swap_off == false) {
3433 sscanf(line+sizeof("SwapTotal:")-1, "%lu", &hostswtotal);
3434 if (hostswtotal < memswlimit)
3435 memswlimit = hostswtotal;
3436 snprintf(lbuf, 100, "SwapTotal: %8lu kB\n", memswlimit);
3437 printme = lbuf;
3438 } else if (startswith(line, "SwapTotal:") && opts && opts->swap_off == true) {
3439 snprintf(lbuf, 100, "SwapTotal: %8lu kB\n", 0UL);
3440 printme = lbuf;
3441 } else if (startswith(line, "SwapFree:") && memswlimit > 0 &&
3442 memswusage > 0 && opts && opts->swap_off == false) {
3443 unsigned long swaptotal = memswlimit,
3444 swapusage = memusage > memswusage
3445 ? 0
3446 : memswusage - memusage,
3447 swapfree = swapusage < swaptotal
3448 ? swaptotal - swapusage
3449 : 0;
3450 snprintf(lbuf, 100, "SwapFree: %8lu kB\n", swapfree);
3451 printme = lbuf;
3452 } else if (startswith(line, "SwapFree:") && opts && opts->swap_off == true) {
3453 snprintf(lbuf, 100, "SwapFree: %8lu kB\n", 0UL);
3454 printme = lbuf;
3455 } else if (startswith(line, "Slab:")) {
3456 snprintf(lbuf, 100, "Slab: %8lu kB\n", 0UL);
3457 printme = lbuf;
3458 } else if (startswith(line, "Buffers:")) {
3459 snprintf(lbuf, 100, "Buffers: %8lu kB\n", 0UL);
3460 printme = lbuf;
3461 } else if (startswith(line, "Cached:")) {
3462 snprintf(lbuf, 100, "Cached: %8lu kB\n", cached);
3463 printme = lbuf;
3464 } else if (startswith(line, "SwapCached:")) {
3465 snprintf(lbuf, 100, "SwapCached: %8lu kB\n", 0UL);
3466 printme = lbuf;
3467 } else if (startswith(line, "Active:")) {
3468 snprintf(lbuf, 100, "Active: %8lu kB\n",
3469 active_anon + active_file);
3470 printme = lbuf;
3471 } else if (startswith(line, "Inactive:")) {
3472 snprintf(lbuf, 100, "Inactive: %8lu kB\n",
3473 inactive_anon + inactive_file);
3474 printme = lbuf;
3475 } else if (startswith(line, "Active(anon)")) {
3476 snprintf(lbuf, 100, "Active(anon): %8lu kB\n", active_anon);
3477 printme = lbuf;
3478 } else if (startswith(line, "Inactive(anon)")) {
3479 snprintf(lbuf, 100, "Inactive(anon): %8lu kB\n", inactive_anon);
3480 printme = lbuf;
3481 } else if (startswith(line, "Active(file)")) {
3482 snprintf(lbuf, 100, "Active(file): %8lu kB\n", active_file);
3483 printme = lbuf;
3484 } else if (startswith(line, "Inactive(file)")) {
3485 snprintf(lbuf, 100, "Inactive(file): %8lu kB\n", inactive_file);
3486 printme = lbuf;
3487 } else if (startswith(line, "Unevictable")) {
3488 snprintf(lbuf, 100, "Unevictable: %8lu kB\n", unevictable);
3489 printme = lbuf;
3490 } else if (startswith(line, "SReclaimable")) {
3491 snprintf(lbuf, 100, "SReclaimable: %8lu kB\n", 0UL);
3492 printme = lbuf;
3493 } else if (startswith(line, "SUnreclaim")) {
3494 snprintf(lbuf, 100, "SUnreclaim: %8lu kB\n", 0UL);
3495 printme = lbuf;
3496 } else if (startswith(line, "Shmem:")) {
3497 snprintf(lbuf, 100, "Shmem: %8lu kB\n", shmem);
3498 printme = lbuf;
3499 } else if (startswith(line, "ShmemHugePages")) {
3500 snprintf(lbuf, 100, "ShmemHugePages: %8lu kB\n", 0UL);
3501 printme = lbuf;
3502 } else if (startswith(line, "ShmemPmdMapped")) {
3503 snprintf(lbuf, 100, "ShmemPmdMapped: %8lu kB\n", 0UL);
3504 printme = lbuf;
3505 } else
3506 printme = line;
3507
3508 l = snprintf(cache, cache_size, "%s", printme);
3509 if (l < 0) {
3510 perror("Error writing to cache");
3511 return 0;
3512
3513 }
3514 if (l >= cache_size) {
3515 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3516 return 0;
3517 }
3518
3519 cache += l;
3520 cache_size -= l;
3521 total_len += l;
3522 }
3523
3524 d->cached = 1;
3525 d->size = total_len;
3526 if (total_len > size ) total_len = size;
3527 memcpy(buf, d->buf, total_len);
3528
3529 return total_len;
3530 }
3531
3532 /*
3533 * Read the cpuset.cpus for cg
3534 * Return the answer in a newly allocated string which must be freed
3535 */
3536 char *get_cpuset(const char *cg)
3537 {
3538 char *value = NULL;
3539 int ret;
3540
3541 ret = cgroup_ops->get_cpuset_cpus(cgroup_ops, cg, &value);
3542 if (ret < 0)
3543 return NULL;
3544
3545 return value;
3546 }
3547
3548 bool cpu_in_cpuset(int cpu, const char *cpuset);
3549
3550 static bool cpuline_in_cpuset(const char *line, const char *cpuset)
3551 {
3552 int cpu;
3553
3554 if (sscanf(line, "processor : %d", &cpu) != 1)
3555 return false;
3556 return cpu_in_cpuset(cpu, cpuset);
3557 }
3558
3559 /*
3560 * Read cgroup CPU quota parameters from `cpu.cfs_quota_us` or `cpu.cfs_period_us`,
3561 * depending on `param`. Parameter value is returned throuh `value`.
3562 */
3563 static bool read_cpu_cfs_param(const char *cg, const char *param, int64_t *value)
3564 {
3565 __do_free char *str = NULL;
3566 char file[11 + 6 + 1]; /* cpu.cfs__us + quota/period + \0 */
3567
3568 snprintf(file, sizeof(file), "cpu.cfs_%s_us", param);
3569
3570 if (!cgroup_ops->get(cgroup_ops, "cpu", cg, file, &str))
3571 return false;
3572
3573 if (sscanf(str, "%ld", value) != 1)
3574 return false;
3575
3576 return true;
3577 }
3578
3579 /*
3580 * Return the maximum number of visible CPUs based on CPU quotas.
3581 * If there is no quota set, zero is returned.
3582 */
3583 int max_cpu_count(const char *cg)
3584 {
3585 int rv, nprocs;
3586 int64_t cfs_quota, cfs_period;
3587 int nr_cpus_in_cpuset = 0;
3588 char *cpuset = NULL;
3589
3590 if (!read_cpu_cfs_param(cg, "quota", &cfs_quota))
3591 return 0;
3592
3593 if (!read_cpu_cfs_param(cg, "period", &cfs_period))
3594 return 0;
3595
3596 cpuset = get_cpuset(cg);
3597 if (cpuset)
3598 nr_cpus_in_cpuset = cpu_number_in_cpuset(cpuset);
3599
3600 if (cfs_quota <= 0 || cfs_period <= 0){
3601 if (nr_cpus_in_cpuset > 0)
3602 return nr_cpus_in_cpuset;
3603
3604 return 0;
3605 }
3606
3607 rv = cfs_quota / cfs_period;
3608
3609 /* In case quota/period does not yield a whole number, add one CPU for
3610 * the remainder.
3611 */
3612 if ((cfs_quota % cfs_period) > 0)
3613 rv += 1;
3614
3615 nprocs = get_nprocs();
3616
3617 if (rv > nprocs)
3618 rv = nprocs;
3619
3620 /* use min value in cpu quota and cpuset */
3621 if (nr_cpus_in_cpuset > 0 && nr_cpus_in_cpuset < rv)
3622 rv = nr_cpus_in_cpuset;
3623
3624 return rv;
3625 }
3626
3627 /*
3628 * Return the exact number of visible CPUs based on CPU quotas.
3629 * If there is no quota set, zero is returned.
3630 */
3631 static double exact_cpu_count(const char *cg)
3632 {
3633 double rv;
3634 int nprocs;
3635 int64_t cfs_quota, cfs_period;
3636
3637 if (!read_cpu_cfs_param(cg, "quota", &cfs_quota))
3638 return 0;
3639
3640 if (!read_cpu_cfs_param(cg, "period", &cfs_period))
3641 return 0;
3642
3643 if (cfs_quota <= 0 || cfs_period <= 0)
3644 return 0;
3645
3646 rv = (double)cfs_quota / (double)cfs_period;
3647
3648 nprocs = get_nprocs();
3649
3650 if (rv > nprocs)
3651 rv = nprocs;
3652
3653 return rv;
3654 }
3655
3656 /*
3657 * check whether this is a '^processor" line in /proc/cpuinfo
3658 */
3659 static bool is_processor_line(const char *line)
3660 {
3661 int cpu;
3662
3663 if (sscanf(line, "processor : %d", &cpu) == 1)
3664 return true;
3665 return false;
3666 }
3667
3668 static int proc_cpuinfo_read(char *buf, size_t size, off_t offset,
3669 struct fuse_file_info *fi)
3670 {
3671 __do_free char *cg = NULL, *cpuset = NULL, *line = NULL;
3672 __do_fclose FILE *f = NULL;
3673 struct fuse_context *fc = fuse_get_context();
3674 struct file_info *d = (struct file_info *)fi->fh;
3675 size_t linelen = 0, total_len = 0;
3676 bool am_printing = false, firstline = true, is_s390x = false;
3677 int curcpu = -1, cpu, max_cpus = 0;
3678 bool use_view;
3679 char *cache = d->buf;
3680 size_t cache_size = d->buflen;
3681
3682 if (offset){
3683 int left;
3684
3685 if (offset > d->size)
3686 return -EINVAL;
3687
3688 if (!d->cached)
3689 return 0;
3690
3691 left = d->size - offset;
3692 total_len = left > size ? size: left;
3693 memcpy(buf, cache + offset, total_len);
3694
3695 return total_len;
3696 }
3697
3698 pid_t initpid = lookup_initpid_in_store(fc->pid);
3699 if (initpid <= 1 || is_shared_pidns(initpid))
3700 initpid = fc->pid;
3701 cg = get_pid_cgroup(initpid, "cpuset");
3702 if (!cg)
3703 return read_file_fuse("proc/cpuinfo", buf, size, d);
3704 prune_init_slice(cg);
3705
3706 cpuset = get_cpuset(cg);
3707 if (!cpuset)
3708 return 0;
3709
3710 use_view = cgroup_ops->can_use_cpuview(cgroup_ops);
3711 if (use_view)
3712 max_cpus = max_cpu_count(cg);
3713
3714 f = fopen("/proc/cpuinfo", "r");
3715 if (!f)
3716 return 0;
3717
3718 while (getline(&line, &linelen, f) != -1) {
3719 ssize_t l;
3720 if (firstline) {
3721 firstline = false;
3722 if (strstr(line, "IBM/S390") != NULL) {
3723 is_s390x = true;
3724 am_printing = true;
3725 continue;
3726 }
3727 }
3728 if (strncmp(line, "# processors:", 12) == 0)
3729 continue;
3730 if (is_processor_line(line)) {
3731 if (use_view && max_cpus > 0 && (curcpu+1) == max_cpus)
3732 break;
3733 am_printing = cpuline_in_cpuset(line, cpuset);
3734 if (am_printing) {
3735 curcpu ++;
3736 l = snprintf(cache, cache_size, "processor : %d\n", curcpu);
3737 if (l < 0) {
3738 perror("Error writing to cache");
3739 return 0;
3740 }
3741 if (l >= cache_size) {
3742 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3743 return 0;
3744 }
3745 cache += l;
3746 cache_size -= l;
3747 total_len += l;
3748 }
3749 continue;
3750 } else if (is_s390x && sscanf(line, "processor %d:", &cpu) == 1) {
3751 char *p;
3752 if (use_view && max_cpus > 0 && (curcpu+1) == max_cpus)
3753 break;
3754 if (!cpu_in_cpuset(cpu, cpuset))
3755 continue;
3756 curcpu ++;
3757 p = strchr(line, ':');
3758 if (!p || !*p)
3759 return 0;
3760 p++;
3761 l = snprintf(cache, cache_size, "processor %d:%s", curcpu, p);
3762 if (l < 0) {
3763 perror("Error writing to cache");
3764 return 0;
3765 }
3766 if (l >= cache_size) {
3767 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3768 return 0;
3769 }
3770 cache += l;
3771 cache_size -= l;
3772 total_len += l;
3773 continue;
3774
3775 }
3776 if (am_printing) {
3777 l = snprintf(cache, cache_size, "%s", line);
3778 if (l < 0) {
3779 perror("Error writing to cache");
3780 return 0;
3781 }
3782 if (l >= cache_size) {
3783 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3784 return 0;
3785 }
3786 cache += l;
3787 cache_size -= l;
3788 total_len += l;
3789 }
3790 }
3791
3792 if (is_s390x) {
3793 __do_free char *origcache = d->buf;
3794 ssize_t l;
3795
3796 d->buf = malloc(d->buflen);
3797 if (!d->buf) {
3798 d->buf = move_ptr(origcache);
3799 return 0;
3800 }
3801
3802 cache = d->buf;
3803 cache_size = d->buflen;
3804 total_len = 0;
3805 l = snprintf(cache, cache_size, "vendor_id : IBM/S390\n");
3806 if (l < 0 || l >= cache_size)
3807 return 0;
3808
3809 cache_size -= l;
3810 cache += l;
3811 total_len += l;
3812 l = snprintf(cache, cache_size, "# processors : %d\n", curcpu + 1);
3813 if (l < 0 || l >= cache_size)
3814 return 0;
3815
3816 cache_size -= l;
3817 cache += l;
3818 total_len += l;
3819 l = snprintf(cache, cache_size, "%s", origcache);
3820 if (l < 0 || l >= cache_size)
3821 return 0;
3822 total_len += l;
3823 }
3824
3825 d->cached = 1;
3826 d->size = total_len;
3827 if (total_len > size ) total_len = size;
3828
3829 /* read from off 0 */
3830 memcpy(buf, d->buf, total_len);
3831 return total_len;
3832 }
3833
3834 static uint64_t get_reaper_start_time(pid_t pid)
3835 {
3836 int ret;
3837 FILE *f;
3838 uint64_t starttime;
3839 /* strlen("/proc/") = 6
3840 * +
3841 * LXCFS_NUMSTRLEN64
3842 * +
3843 * strlen("/stat") = 5
3844 * +
3845 * \0 = 1
3846 * */
3847 #define __PROC_PID_STAT_LEN (6 + LXCFS_NUMSTRLEN64 + 5 + 1)
3848 char path[__PROC_PID_STAT_LEN];
3849 pid_t qpid;
3850
3851 qpid = lookup_initpid_in_store(pid);
3852 if (qpid <= 0) {
3853 /* Caller can check for EINVAL on 0. */
3854 errno = EINVAL;
3855 return 0;
3856 }
3857
3858 ret = snprintf(path, __PROC_PID_STAT_LEN, "/proc/%d/stat", qpid);
3859 if (ret < 0 || ret >= __PROC_PID_STAT_LEN) {
3860 /* Caller can check for EINVAL on 0. */
3861 errno = EINVAL;
3862 return 0;
3863 }
3864
3865 f = fopen(path, "r");
3866 if (!f) {
3867 /* Caller can check for EINVAL on 0. */
3868 errno = EINVAL;
3869 return 0;
3870 }
3871
3872 /* Note that the *scanf() argument supression requires that length
3873 * modifiers such as "l" are omitted. Otherwise some compilers will yell
3874 * at us. It's like telling someone you're not married and then asking
3875 * if you can bring your wife to the party.
3876 */
3877 ret = fscanf(f, "%*d " /* (1) pid %d */
3878 "%*s " /* (2) comm %s */
3879 "%*c " /* (3) state %c */
3880 "%*d " /* (4) ppid %d */
3881 "%*d " /* (5) pgrp %d */
3882 "%*d " /* (6) session %d */
3883 "%*d " /* (7) tty_nr %d */
3884 "%*d " /* (8) tpgid %d */
3885 "%*u " /* (9) flags %u */
3886 "%*u " /* (10) minflt %lu */
3887 "%*u " /* (11) cminflt %lu */
3888 "%*u " /* (12) majflt %lu */
3889 "%*u " /* (13) cmajflt %lu */
3890 "%*u " /* (14) utime %lu */
3891 "%*u " /* (15) stime %lu */
3892 "%*d " /* (16) cutime %ld */
3893 "%*d " /* (17) cstime %ld */
3894 "%*d " /* (18) priority %ld */
3895 "%*d " /* (19) nice %ld */
3896 "%*d " /* (20) num_threads %ld */
3897 "%*d " /* (21) itrealvalue %ld */
3898 "%" PRIu64, /* (22) starttime %llu */
3899 &starttime);
3900 if (ret != 1) {
3901 fclose(f);
3902 /* Caller can check for EINVAL on 0. */
3903 errno = EINVAL;
3904 return 0;
3905 }
3906
3907 fclose(f);
3908
3909 errno = 0;
3910 return starttime;
3911 }
3912
3913 static double get_reaper_start_time_in_sec(pid_t pid)
3914 {
3915 uint64_t clockticks, ticks_per_sec;
3916 int64_t ret;
3917 double res = 0;
3918
3919 clockticks = get_reaper_start_time(pid);
3920 if (clockticks == 0 && errno == EINVAL) {
3921 lxcfs_debug("failed to retrieve start time of pid %d\n", pid);
3922 return 0;
3923 }
3924
3925 ret = sysconf(_SC_CLK_TCK);
3926 if (ret < 0 && errno == EINVAL) {
3927 lxcfs_debug(
3928 "%s\n",
3929 "failed to determine number of clock ticks in a second");
3930 return 0;
3931 }
3932
3933 ticks_per_sec = (uint64_t)ret;
3934 res = (double)clockticks / ticks_per_sec;
3935 return res;
3936 }
3937
3938 static double get_reaper_age(pid_t pid)
3939 {
3940 uint64_t uptime_ms;
3941 double procstart, procage;
3942
3943 /* We need to substract the time the process has started since system
3944 * boot minus the time when the system has started to get the actual
3945 * reaper age.
3946 */
3947 procstart = get_reaper_start_time_in_sec(pid);
3948 procage = procstart;
3949 if (procstart > 0) {
3950 int ret;
3951 struct timespec spec;
3952
3953 ret = clock_gettime(CLOCK_BOOTTIME, &spec);
3954 if (ret < 0)
3955 return 0;
3956
3957 /* We could make this more precise here by using the tv_nsec
3958 * field in the timespec struct and convert it to milliseconds
3959 * and then create a double for the seconds and milliseconds but
3960 * that seems more work than it is worth.
3961 */
3962 uptime_ms = (spec.tv_sec * 1000) + (spec.tv_nsec * 1e-6);
3963 procage = (uptime_ms - (procstart * 1000)) / 1000;
3964 }
3965
3966 return procage;
3967 }
3968
3969 /*
3970 * Returns 0 on success.
3971 * It is the caller's responsibility to free `return_usage`, unless this
3972 * function returns an error.
3973 */
3974 static int read_cpuacct_usage_all(char *cg, char *cpuset, struct cpuacct_usage **return_usage, int *size)
3975 {
3976 __do_free char *usage_str = NULL;
3977 __do_free struct cpuacct_usage *cpu_usage = NULL;
3978 int cpucount = get_nprocs_conf();
3979 int read_pos = 0, read_cnt=0;
3980 int i, j, ret;
3981 int cg_cpu;
3982 uint64_t cg_user, cg_system;
3983 int64_t ticks_per_sec;
3984
3985 ticks_per_sec = sysconf(_SC_CLK_TCK);
3986
3987 if (ticks_per_sec < 0 && errno == EINVAL) {
3988 lxcfs_v(
3989 "%s\n",
3990 "read_cpuacct_usage_all failed to determine number of clock ticks "
3991 "in a second");
3992 return -1;
3993 }
3994
3995 cpu_usage = malloc(sizeof(struct cpuacct_usage) * cpucount);
3996 if (!cpu_usage)
3997 return -ENOMEM;
3998
3999 memset(cpu_usage, 0, sizeof(struct cpuacct_usage) * cpucount);
4000 if (!cgroup_ops->get(cgroup_ops, "cpuacct", cg, "cpuacct.usage_all", &usage_str)) {
4001 char *data = NULL;
4002 int i = 0, read_pos = 0, read_cnt=0;
4003 size_t sz = 0, asz = 0;
4004
4005 /* read cpuacct.usage_percpu instead. */
4006 lxcfs_v("failed to read cpuacct.usage_all. reading cpuacct.usage_percpu instead\n%s", "");
4007 if (!cgroup_ops->get(cgroup_ops, "cpuacct", cg, "cpuacct.usage_percpu", &usage_str))
4008 return -1;
4009 lxcfs_v("usage_str: %s\n", usage_str);
4010
4011 /* convert cpuacct.usage_percpu into cpuacct.usage_all. */
4012 lxcfs_v("converting cpuacct.usage_percpu into cpuacct.usage_all\n%s", "");
4013
4014 must_strcat(&data, &sz, &asz, "cpu user system\n");
4015
4016 while (sscanf(usage_str + read_pos, "%lu %n", &cg_user, &read_cnt) > 0) {
4017 lxcfs_debug("i: %d, cg_user: %lu, read_pos: %d, read_cnt: %d\n", i, cg_user, read_pos, read_cnt);
4018 must_strcat(&data, &sz, &asz, "%d %lu 0\n", i, cg_user);
4019 i++;
4020 read_pos += read_cnt;
4021 }
4022
4023 usage_str = data;
4024
4025 lxcfs_v("usage_str: %s\n", usage_str);
4026 }
4027
4028 if (sscanf(usage_str, "cpu user system\n%n", &read_cnt) != 0) {
4029 lxcfs_error("read_cpuacct_usage_all reading first line from "
4030 "%s/cpuacct.usage_all failed.\n", cg);
4031 return -1;
4032 }
4033
4034 read_pos += read_cnt;
4035
4036 for (i = 0, j = 0; i < cpucount; i++) {
4037 ret = sscanf(usage_str + read_pos, "%d %lu %lu\n%n", &cg_cpu, &cg_user,
4038 &cg_system, &read_cnt);
4039
4040 if (ret == EOF)
4041 break;
4042
4043 if (ret != 3) {
4044 lxcfs_error("read_cpuacct_usage_all reading from %s/cpuacct.usage_all "
4045 "failed.\n", cg);
4046 return -1;
4047 }
4048
4049 read_pos += read_cnt;
4050
4051 /* Convert the time from nanoseconds to USER_HZ */
4052 cpu_usage[j].user = cg_user / 1000.0 / 1000 / 1000 * ticks_per_sec;
4053 cpu_usage[j].system = cg_system / 1000.0 / 1000 / 1000 * ticks_per_sec;
4054 j++;
4055 }
4056
4057 *return_usage = move_ptr(cpu_usage);
4058 *size = cpucount;
4059 return 0;
4060 }
4061
4062 static unsigned long diff_cpu_usage(struct cpuacct_usage *older, struct cpuacct_usage *newer, struct cpuacct_usage *diff, int cpu_count)
4063 {
4064 int i;
4065 unsigned long sum = 0;
4066
4067 for (i = 0; i < cpu_count; i++) {
4068 if (!newer[i].online)
4069 continue;
4070
4071 /* When cpuset is changed on the fly, the CPUs might get reordered.
4072 * We could either reset all counters, or check that the substractions
4073 * below will return expected results.
4074 */
4075 if (newer[i].user > older[i].user)
4076 diff[i].user = newer[i].user - older[i].user;
4077 else
4078 diff[i].user = 0;
4079
4080 if (newer[i].system > older[i].system)
4081 diff[i].system = newer[i].system - older[i].system;
4082 else
4083 diff[i].system = 0;
4084
4085 if (newer[i].idle > older[i].idle)
4086 diff[i].idle = newer[i].idle - older[i].idle;
4087 else
4088 diff[i].idle = 0;
4089
4090 sum += diff[i].user;
4091 sum += diff[i].system;
4092 sum += diff[i].idle;
4093 }
4094
4095 return sum;
4096 }
4097
4098 static void add_cpu_usage(unsigned long *surplus, struct cpuacct_usage *usage, unsigned long *counter, unsigned long threshold)
4099 {
4100 unsigned long free_space, to_add;
4101
4102 free_space = threshold - usage->user - usage->system;
4103
4104 if (free_space > usage->idle)
4105 free_space = usage->idle;
4106
4107 to_add = free_space > *surplus ? *surplus : free_space;
4108
4109 *counter += to_add;
4110 usage->idle -= to_add;
4111 *surplus -= to_add;
4112 }
4113
4114 static struct cg_proc_stat *prune_proc_stat_list(struct cg_proc_stat *node)
4115 {
4116 struct cg_proc_stat *first = NULL, *prev, *tmp;
4117
4118 for (prev = NULL; node; ) {
4119 if (!cgfs_param_exist("cpu", node->cg, "cpu.shares")) {
4120 tmp = node;
4121 lxcfs_debug("Removing stat node for %s\n", node->cg);
4122
4123 if (prev)
4124 prev->next = node->next;
4125 else
4126 first = node->next;
4127
4128 node = node->next;
4129 free_proc_stat_node(tmp);
4130 } else {
4131 if (!first)
4132 first = node;
4133 prev = node;
4134 node = node->next;
4135 }
4136 }
4137
4138 return first;
4139 }
4140
4141 #define PROC_STAT_PRUNE_INTERVAL 10
4142 static void prune_proc_stat_history(void)
4143 {
4144 int i;
4145 time_t now = time(NULL);
4146
4147 for (i = 0; i < CPUVIEW_HASH_SIZE; i++) {
4148 pthread_rwlock_wrlock(&proc_stat_history[i]->lock);
4149
4150 if ((proc_stat_history[i]->lastcheck + PROC_STAT_PRUNE_INTERVAL) > now) {
4151 pthread_rwlock_unlock(&proc_stat_history[i]->lock);
4152 return;
4153 }
4154
4155 if (proc_stat_history[i]->next) {
4156 proc_stat_history[i]->next = prune_proc_stat_list(proc_stat_history[i]->next);
4157 proc_stat_history[i]->lastcheck = now;
4158 }
4159
4160 pthread_rwlock_unlock(&proc_stat_history[i]->lock);
4161 }
4162 }
4163
4164 static struct cg_proc_stat *find_proc_stat_node(struct cg_proc_stat_head *head, const char *cg)
4165 {
4166 struct cg_proc_stat *node;
4167
4168 pthread_rwlock_rdlock(&head->lock);
4169
4170 if (!head->next) {
4171 pthread_rwlock_unlock(&head->lock);
4172 return NULL;
4173 }
4174
4175 node = head->next;
4176
4177 do {
4178 if (strcmp(cg, node->cg) == 0)
4179 goto out;
4180 } while ((node = node->next));
4181
4182 node = NULL;
4183
4184 out:
4185 pthread_rwlock_unlock(&head->lock);
4186 prune_proc_stat_history();
4187 return node;
4188 }
4189
4190 static struct cg_proc_stat *new_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
4191 {
4192 struct cg_proc_stat *node;
4193 int i;
4194
4195 node = malloc(sizeof(struct cg_proc_stat));
4196 if (!node)
4197 goto err;
4198
4199 node->cg = NULL;
4200 node->usage = NULL;
4201 node->view = NULL;
4202
4203 node->cg = malloc(strlen(cg) + 1);
4204 if (!node->cg)
4205 goto err;
4206
4207 strcpy(node->cg, cg);
4208
4209 node->usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
4210 if (!node->usage)
4211 goto err;
4212
4213 memcpy(node->usage, usage, sizeof(struct cpuacct_usage) * cpu_count);
4214
4215 node->view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
4216 if (!node->view)
4217 goto err;
4218
4219 node->cpu_count = cpu_count;
4220 node->next = NULL;
4221
4222 if (pthread_mutex_init(&node->lock, NULL) != 0) {
4223 lxcfs_error("%s\n", "Failed to initialize node lock");
4224 goto err;
4225 }
4226
4227 for (i = 0; i < cpu_count; i++) {
4228 node->view[i].user = 0;
4229 node->view[i].system = 0;
4230 node->view[i].idle = 0;
4231 }
4232
4233 return node;
4234
4235 err:
4236 if (node && node->cg)
4237 free(node->cg);
4238 if (node && node->usage)
4239 free(node->usage);
4240 if (node && node->view)
4241 free(node->view);
4242 if (node)
4243 free(node);
4244
4245 return NULL;
4246 }
4247
4248 static struct cg_proc_stat *add_proc_stat_node(struct cg_proc_stat *new_node)
4249 {
4250 int hash = calc_hash(new_node->cg) % CPUVIEW_HASH_SIZE;
4251 struct cg_proc_stat_head *head = proc_stat_history[hash];
4252 struct cg_proc_stat *node, *rv = new_node;
4253
4254 pthread_rwlock_wrlock(&head->lock);
4255
4256 if (!head->next) {
4257 head->next = new_node;
4258 goto out;
4259 }
4260
4261 node = head->next;
4262
4263 for (;;) {
4264 if (strcmp(node->cg, new_node->cg) == 0) {
4265 /* The node is already present, return it */
4266 free_proc_stat_node(new_node);
4267 rv = node;
4268 goto out;
4269 }
4270
4271 if (node->next) {
4272 node = node->next;
4273 continue;
4274 }
4275
4276 node->next = new_node;
4277 goto out;
4278 }
4279
4280 out:
4281 pthread_rwlock_unlock(&head->lock);
4282 return rv;
4283 }
4284
4285 static bool expand_proc_stat_node(struct cg_proc_stat *node, int cpu_count)
4286 {
4287 __do_free struct cpuacct_usage *new_usage = NULL, *new_view = NULL;
4288
4289 /* Allocate new memory */
4290 new_usage = malloc(sizeof(struct cpuacct_usage) * cpu_count);
4291 if (!new_usage)
4292 return false;
4293
4294 new_view = malloc(sizeof(struct cpuacct_usage) * cpu_count);
4295 if (!new_view)
4296 return false;
4297
4298 /* Copy existing data & initialize new elements */
4299 for (int i = 0; i < cpu_count; i++) {
4300 if (i < node->cpu_count) {
4301 new_usage[i].user = node->usage[i].user;
4302 new_usage[i].system = node->usage[i].system;
4303 new_usage[i].idle = node->usage[i].idle;
4304
4305 new_view[i].user = node->view[i].user;
4306 new_view[i].system = node->view[i].system;
4307 new_view[i].idle = node->view[i].idle;
4308 } else {
4309 new_usage[i].user = 0;
4310 new_usage[i].system = 0;
4311 new_usage[i].idle = 0;
4312
4313 new_view[i].user = 0;
4314 new_view[i].system = 0;
4315 new_view[i].idle = 0;
4316 }
4317 }
4318
4319 free(node->usage);
4320 node->usage = move_ptr(new_usage);
4321
4322 free(node->view);
4323 node->view = move_ptr(new_view);
4324 node->cpu_count = cpu_count;
4325
4326 return true;
4327 }
4328
4329 static struct cg_proc_stat *find_or_create_proc_stat_node(struct cpuacct_usage *usage, int cpu_count, const char *cg)
4330 {
4331 int hash = calc_hash(cg) % CPUVIEW_HASH_SIZE;
4332 struct cg_proc_stat_head *head = proc_stat_history[hash];
4333 struct cg_proc_stat *node;
4334
4335 node = find_proc_stat_node(head, cg);
4336
4337 if (!node) {
4338 node = new_proc_stat_node(usage, cpu_count, cg);
4339 if (!node)
4340 return NULL;
4341
4342 node = add_proc_stat_node(node);
4343 lxcfs_debug("New stat node (%d) for %s\n", cpu_count, cg);
4344 }
4345
4346 pthread_mutex_lock(&node->lock);
4347
4348 /* If additional CPUs on the host have been enabled, CPU usage counter
4349 * arrays have to be expanded */
4350 if (node->cpu_count < cpu_count) {
4351 lxcfs_debug("Expanding stat node %d->%d for %s\n",
4352 node->cpu_count, cpu_count, cg);
4353
4354 if (!expand_proc_stat_node(node, cpu_count)) {
4355 pthread_mutex_unlock(&node->lock);
4356 lxcfs_debug("Unable to expand stat node %d->%d for %s\n",
4357 node->cpu_count, cpu_count, cg);
4358 return NULL;
4359 }
4360 }
4361
4362 return node;
4363 }
4364
4365 static void reset_proc_stat_node(struct cg_proc_stat *node, struct cpuacct_usage *usage, int cpu_count)
4366 {
4367 int i;
4368
4369 lxcfs_debug("Resetting stat node for %s\n", node->cg);
4370 memcpy(node->usage, usage, sizeof(struct cpuacct_usage) * cpu_count);
4371
4372 for (i = 0; i < cpu_count; i++) {
4373 node->view[i].user = 0;
4374 node->view[i].system = 0;
4375 node->view[i].idle = 0;
4376 }
4377
4378 node->cpu_count = cpu_count;
4379 }
4380
4381 static int cpuview_proc_stat(const char *cg, const char *cpuset,
4382 struct cpuacct_usage *cg_cpu_usage,
4383 int cg_cpu_usage_size, FILE *f, char *buf,
4384 size_t buf_size)
4385 {
4386 __do_free char *line = NULL;
4387 __do_free struct cpuacct_usage *diff = NULL;
4388 size_t linelen = 0, total_len = 0, l;
4389 int curcpu = -1; /* cpu numbering starts at 0 */
4390 int physcpu, i;
4391 int max_cpus = max_cpu_count(cg), cpu_cnt = 0;
4392 unsigned long user = 0, nice = 0, system = 0, idle = 0, iowait = 0,
4393 irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
4394 unsigned long user_sum = 0, system_sum = 0, idle_sum = 0;
4395 unsigned long user_surplus = 0, system_surplus = 0;
4396 unsigned long total_sum, threshold;
4397 struct cg_proc_stat *stat_node;
4398 int nprocs = get_nprocs_conf();
4399
4400 if (cg_cpu_usage_size < nprocs)
4401 nprocs = cg_cpu_usage_size;
4402
4403 /* Read all CPU stats and stop when we've encountered other lines */
4404 while (getline(&line, &linelen, f) != -1) {
4405 int ret;
4406 char cpu_char[10]; /* That's a lot of cores */
4407 uint64_t all_used, cg_used;
4408
4409 if (strlen(line) == 0)
4410 continue;
4411
4412 /* not a ^cpuN line containing a number N */
4413 if (sscanf(line, "cpu%9[^ ]", cpu_char) != 1)
4414 break;
4415
4416 if (sscanf(cpu_char, "%d", &physcpu) != 1)
4417 continue;
4418
4419 if (physcpu >= cg_cpu_usage_size)
4420 continue;
4421
4422 curcpu ++;
4423 cpu_cnt ++;
4424
4425 if (!cpu_in_cpuset(physcpu, cpuset)) {
4426 for (i = curcpu; i <= physcpu; i++)
4427 cg_cpu_usage[i].online = false;
4428 continue;
4429 }
4430
4431 if (curcpu < physcpu) {
4432 /* Some CPUs may be disabled */
4433 for (i = curcpu; i < physcpu; i++)
4434 cg_cpu_usage[i].online = false;
4435
4436 curcpu = physcpu;
4437 }
4438
4439 cg_cpu_usage[curcpu].online = true;
4440
4441 ret = sscanf(line, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
4442 &user,
4443 &nice,
4444 &system,
4445 &idle,
4446 &iowait,
4447 &irq,
4448 &softirq,
4449 &steal,
4450 &guest,
4451 &guest_nice);
4452
4453 if (ret != 10)
4454 continue;
4455
4456 all_used = user + nice + system + iowait + irq + softirq + steal + guest + guest_nice;
4457 cg_used = cg_cpu_usage[curcpu].user + cg_cpu_usage[curcpu].system;
4458
4459 if (all_used >= cg_used) {
4460 cg_cpu_usage[curcpu].idle = idle + (all_used - cg_used);
4461
4462 } else {
4463 lxcfs_error("cpu%d from %s has unexpected cpu time: %lu in /proc/stat, "
4464 "%lu in cpuacct.usage_all; unable to determine idle time\n",
4465 curcpu, cg, all_used, cg_used);
4466 cg_cpu_usage[curcpu].idle = idle;
4467 }
4468 }
4469
4470 /* Cannot use more CPUs than is available due to cpuset */
4471 if (max_cpus > cpu_cnt)
4472 max_cpus = cpu_cnt;
4473
4474 stat_node = find_or_create_proc_stat_node(cg_cpu_usage, nprocs, cg);
4475
4476 if (!stat_node) {
4477 lxcfs_error("unable to find/create stat node for %s\n", cg);
4478 return 0;
4479 }
4480
4481 diff = malloc(sizeof(struct cpuacct_usage) * nprocs);
4482 if (!diff) {
4483 return 0;
4484 }
4485
4486 /*
4487 * If the new values are LOWER than values stored in memory, it means
4488 * the cgroup has been reset/recreated and we should reset too.
4489 */
4490 for (curcpu = 0; curcpu < nprocs; curcpu++) {
4491 if (!cg_cpu_usage[curcpu].online)
4492 continue;
4493
4494 if (cg_cpu_usage[curcpu].user < stat_node->usage[curcpu].user)
4495 reset_proc_stat_node(stat_node, cg_cpu_usage, nprocs);
4496
4497 break;
4498 }
4499
4500 total_sum = diff_cpu_usage(stat_node->usage, cg_cpu_usage, diff, nprocs);
4501
4502 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
4503 stat_node->usage[curcpu].online = cg_cpu_usage[curcpu].online;
4504
4505 if (!stat_node->usage[curcpu].online)
4506 continue;
4507
4508 i++;
4509
4510 stat_node->usage[curcpu].user += diff[curcpu].user;
4511 stat_node->usage[curcpu].system += diff[curcpu].system;
4512 stat_node->usage[curcpu].idle += diff[curcpu].idle;
4513
4514 if (max_cpus > 0 && i >= max_cpus) {
4515 user_surplus += diff[curcpu].user;
4516 system_surplus += diff[curcpu].system;
4517 }
4518 }
4519
4520 /* Calculate usage counters of visible CPUs */
4521 if (max_cpus > 0) {
4522 unsigned long diff_user = 0;
4523 unsigned long diff_system = 0;
4524 unsigned long diff_idle = 0;
4525 unsigned long max_diff_idle = 0;
4526 unsigned long max_diff_idle_index = 0;
4527 double exact_cpus;
4528
4529 /* threshold = maximum usage per cpu, including idle */
4530 threshold = total_sum / cpu_cnt * max_cpus;
4531
4532 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
4533 if (!stat_node->usage[curcpu].online)
4534 continue;
4535
4536 i++;
4537
4538 if (i == max_cpus)
4539 break;
4540
4541 if (diff[curcpu].user + diff[curcpu].system >= threshold)
4542 continue;
4543
4544 /* Add user */
4545 add_cpu_usage(&user_surplus, &diff[curcpu],
4546 &diff[curcpu].user, threshold);
4547
4548 if (diff[curcpu].user + diff[curcpu].system >= threshold)
4549 continue;
4550
4551 /* If there is still room, add system */
4552 add_cpu_usage(&system_surplus, &diff[curcpu],
4553 &diff[curcpu].system, threshold);
4554 }
4555
4556 if (user_surplus > 0)
4557 lxcfs_debug("leftover user: %lu for %s\n", user_surplus, cg);
4558 if (system_surplus > 0)
4559 lxcfs_debug("leftover system: %lu for %s\n", system_surplus, cg);
4560
4561 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
4562 if (!stat_node->usage[curcpu].online)
4563 continue;
4564
4565 i++;
4566
4567 if (i == max_cpus)
4568 break;
4569
4570 stat_node->view[curcpu].user += diff[curcpu].user;
4571 stat_node->view[curcpu].system += diff[curcpu].system;
4572 stat_node->view[curcpu].idle += diff[curcpu].idle;
4573
4574 user_sum += stat_node->view[curcpu].user;
4575 system_sum += stat_node->view[curcpu].system;
4576 idle_sum += stat_node->view[curcpu].idle;
4577
4578 diff_user += diff[curcpu].user;
4579 diff_system += diff[curcpu].system;
4580 diff_idle += diff[curcpu].idle;
4581 if (diff[curcpu].idle > max_diff_idle) {
4582 max_diff_idle = diff[curcpu].idle;
4583 max_diff_idle_index = curcpu;
4584 }
4585
4586 lxcfs_v("curcpu: %d, diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", curcpu, diff[curcpu].user, diff[curcpu].system, diff[curcpu].idle);
4587 }
4588 lxcfs_v("total. diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", diff_user, diff_system, diff_idle);
4589
4590 /* revise cpu usage view to support partial cpu case. */
4591 exact_cpus = exact_cpu_count(cg);
4592 if (exact_cpus < (double)max_cpus){
4593 unsigned long delta = (unsigned long)((double)(diff_user + diff_system + diff_idle) * (1 - exact_cpus / (double)max_cpus));
4594
4595 lxcfs_v("revising cpu usage view to match the exact cpu count [%f]\n", exact_cpus);
4596 lxcfs_v("delta: %lu\n", delta);
4597 lxcfs_v("idle_sum before: %lu\n", idle_sum);
4598 idle_sum = idle_sum > delta ? idle_sum - delta : 0;
4599 lxcfs_v("idle_sum after: %lu\n", idle_sum);
4600
4601 curcpu = max_diff_idle_index;
4602 lxcfs_v("curcpu: %d, idle before: %lu\n", curcpu, stat_node->view[curcpu].idle);
4603 stat_node->view[curcpu].idle = stat_node->view[curcpu].idle > delta ? stat_node->view[curcpu].idle - delta : 0;
4604 lxcfs_v("curcpu: %d, idle after: %lu\n", curcpu, stat_node->view[curcpu].idle);
4605 }
4606 } else {
4607 for (curcpu = 0; curcpu < nprocs; curcpu++) {
4608 if (!stat_node->usage[curcpu].online)
4609 continue;
4610
4611 stat_node->view[curcpu].user = stat_node->usage[curcpu].user;
4612 stat_node->view[curcpu].system = stat_node->usage[curcpu].system;
4613 stat_node->view[curcpu].idle = stat_node->usage[curcpu].idle;
4614
4615 user_sum += stat_node->view[curcpu].user;
4616 system_sum += stat_node->view[curcpu].system;
4617 idle_sum += stat_node->view[curcpu].idle;
4618 }
4619 }
4620
4621 /* Render the file */
4622 /* cpu-all */
4623 l = snprintf(buf, buf_size, "cpu %lu 0 %lu %lu 0 0 0 0 0 0\n",
4624 user_sum,
4625 system_sum,
4626 idle_sum);
4627 lxcfs_v("cpu-all: %s\n", buf);
4628
4629 if (l < 0) {
4630 perror("Error writing to cache");
4631 return 0;
4632 }
4633 if (l >= buf_size) {
4634 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4635 return 0;
4636 }
4637
4638 buf += l;
4639 buf_size -= l;
4640 total_len += l;
4641
4642 /* Render visible CPUs */
4643 for (curcpu = 0, i = -1; curcpu < nprocs; curcpu++) {
4644 if (!stat_node->usage[curcpu].online)
4645 continue;
4646
4647 i++;
4648
4649 if (max_cpus > 0 && i == max_cpus)
4650 break;
4651
4652 l = snprintf(buf, buf_size, "cpu%d %lu 0 %lu %lu 0 0 0 0 0 0\n",
4653 i,
4654 stat_node->view[curcpu].user,
4655 stat_node->view[curcpu].system,
4656 stat_node->view[curcpu].idle);
4657 lxcfs_v("cpu: %s\n", buf);
4658
4659 if (l < 0) {
4660 perror("Error writing to cache");
4661 return 0;
4662
4663 }
4664 if (l >= buf_size) {
4665 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4666 return 0;
4667 }
4668
4669 buf += l;
4670 buf_size -= l;
4671 total_len += l;
4672 }
4673
4674 /* Pass the rest of /proc/stat, start with the last line read */
4675 l = snprintf(buf, buf_size, "%s", line);
4676
4677 if (l < 0) {
4678 perror("Error writing to cache");
4679 return 0;
4680
4681 }
4682 if (l >= buf_size) {
4683 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4684 return 0;
4685 }
4686
4687 buf += l;
4688 buf_size -= l;
4689 total_len += l;
4690
4691 /* Pass the rest of the host's /proc/stat */
4692 while (getline(&line, &linelen, f) != -1) {
4693 l = snprintf(buf, buf_size, "%s", line);
4694 if (l < 0) {
4695 perror("Error writing to cache");
4696 return 0;
4697 }
4698 if (l >= buf_size) {
4699 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4700 return 0;
4701 }
4702 buf += l;
4703 buf_size -= l;
4704 total_len += l;
4705 }
4706
4707 if (stat_node)
4708 pthread_mutex_unlock(&stat_node->lock);
4709 return total_len;
4710 }
4711
4712 #define CPUALL_MAX_SIZE (BUF_RESERVE_SIZE / 2)
4713 static int proc_stat_read(char *buf, size_t size, off_t offset,
4714 struct fuse_file_info *fi)
4715 {
4716 __do_free char *cg = NULL, *cpuset = NULL, *line = NULL;
4717 __do_free struct cpuacct_usage *cg_cpu_usage = NULL;
4718 __do_fclose FILE *f = NULL;
4719 struct fuse_context *fc = fuse_get_context();
4720 struct file_info *d = (struct file_info *)fi->fh;
4721 size_t linelen = 0, total_len = 0;
4722 int curcpu = -1; /* cpu numbering starts at 0 */
4723 int physcpu = 0;
4724 unsigned long user = 0, nice = 0, system = 0, idle = 0, iowait = 0,
4725 irq = 0, softirq = 0, steal = 0, guest = 0, guest_nice = 0;
4726 unsigned long user_sum = 0, nice_sum = 0, system_sum = 0, idle_sum = 0,
4727 iowait_sum = 0, irq_sum = 0, softirq_sum = 0,
4728 steal_sum = 0, guest_sum = 0, guest_nice_sum = 0;
4729 char cpuall[CPUALL_MAX_SIZE];
4730 /* reserve for cpu all */
4731 char *cache = d->buf + CPUALL_MAX_SIZE;
4732 size_t cache_size = d->buflen - CPUALL_MAX_SIZE;
4733 int cg_cpu_usage_size = 0;
4734
4735 if (offset){
4736 if (offset > d->size)
4737 return -EINVAL;
4738 if (!d->cached)
4739 return 0;
4740 int left = d->size - offset;
4741 total_len = left > size ? size: left;
4742 memcpy(buf, d->buf + offset, total_len);
4743 return total_len;
4744 }
4745
4746 pid_t initpid = lookup_initpid_in_store(fc->pid);
4747 lxcfs_v("initpid: %d\n", initpid);
4748 if (initpid <= 0)
4749 initpid = fc->pid;
4750
4751 /*
4752 * when container run with host pid namespace initpid == 1, cgroup will "/"
4753 * we should return host os's /proc contents.
4754 * in some case cpuacct_usage.all in "/" will larger then /proc/stat
4755 */
4756 if (initpid == 1) {
4757 return read_file_fuse("/proc/stat", buf, size, d);
4758 }
4759
4760 cg = get_pid_cgroup(initpid, "cpuset");
4761 lxcfs_v("cg: %s\n", cg);
4762 if (!cg)
4763 return read_file_fuse("/proc/stat", buf, size, d);
4764 prune_init_slice(cg);
4765
4766 cpuset = get_cpuset(cg);
4767 if (!cpuset)
4768 return 0;
4769
4770 /*
4771 * Read cpuacct.usage_all for all CPUs.
4772 * If the cpuacct cgroup is present, it is used to calculate the container's
4773 * CPU usage. If not, values from the host's /proc/stat are used.
4774 */
4775 if (read_cpuacct_usage_all(cg, cpuset, &cg_cpu_usage, &cg_cpu_usage_size) != 0) {
4776 lxcfs_v("%s\n", "proc_stat_read failed to read from cpuacct, "
4777 "falling back to the host's /proc/stat");
4778 }
4779
4780 f = fopen("/proc/stat", "r");
4781 if (!f)
4782 return 0;
4783
4784 //skip first line
4785 if (getline(&line, &linelen, f) < 0) {
4786 lxcfs_error("%s\n", "proc_stat_read read first line failed.");
4787 return 0;
4788 }
4789
4790 if (cgroup_ops->can_use_cpuview(cgroup_ops) && cg_cpu_usage) {
4791 total_len = cpuview_proc_stat(cg, cpuset, cg_cpu_usage, cg_cpu_usage_size,
4792 f, d->buf, d->buflen);
4793 goto out;
4794 }
4795
4796 while (getline(&line, &linelen, f) != -1) {
4797 ssize_t l;
4798 char cpu_char[10]; /* That's a lot of cores */
4799 char *c;
4800 uint64_t all_used, cg_used, new_idle;
4801 int ret;
4802
4803 if (strlen(line) == 0)
4804 continue;
4805 if (sscanf(line, "cpu%9[^ ]", cpu_char) != 1) {
4806 /* not a ^cpuN line containing a number N, just print it */
4807 l = snprintf(cache, cache_size, "%s", line);
4808 if (l < 0) {
4809 perror("Error writing to cache");
4810 return 0;
4811 }
4812 if (l >= cache_size) {
4813 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4814 return 0;
4815 }
4816 cache += l;
4817 cache_size -= l;
4818 total_len += l;
4819 continue;
4820 }
4821
4822 if (sscanf(cpu_char, "%d", &physcpu) != 1)
4823 continue;
4824 if (!cpu_in_cpuset(physcpu, cpuset))
4825 continue;
4826 curcpu ++;
4827
4828 ret = sscanf(line, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
4829 &user,
4830 &nice,
4831 &system,
4832 &idle,
4833 &iowait,
4834 &irq,
4835 &softirq,
4836 &steal,
4837 &guest,
4838 &guest_nice);
4839
4840 if (ret != 10 || !cg_cpu_usage) {
4841 c = strchr(line, ' ');
4842 if (!c)
4843 continue;
4844 l = snprintf(cache, cache_size, "cpu%d%s", curcpu, c);
4845 if (l < 0) {
4846 perror("Error writing to cache");
4847 return 0;
4848
4849 }
4850 if (l >= cache_size) {
4851 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4852 return 0;
4853 }
4854
4855 cache += l;
4856 cache_size -= l;
4857 total_len += l;
4858
4859 if (ret != 10)
4860 continue;
4861 }
4862
4863 if (cg_cpu_usage) {
4864 if (physcpu >= cg_cpu_usage_size)
4865 break;
4866
4867 all_used = user + nice + system + iowait + irq + softirq + steal + guest + guest_nice;
4868 cg_used = cg_cpu_usage[physcpu].user + cg_cpu_usage[physcpu].system;
4869
4870 if (all_used >= cg_used) {
4871 new_idle = idle + (all_used - cg_used);
4872
4873 } else {
4874 lxcfs_error("cpu%d from %s has unexpected cpu time: %lu in /proc/stat, "
4875 "%lu in cpuacct.usage_all; unable to determine idle time\n",
4876 curcpu, cg, all_used, cg_used);
4877 new_idle = idle;
4878 }
4879
4880 l = snprintf(cache, cache_size, "cpu%d %lu 0 %lu %lu 0 0 0 0 0 0\n",
4881 curcpu, cg_cpu_usage[physcpu].user, cg_cpu_usage[physcpu].system,
4882 new_idle);
4883
4884 if (l < 0) {
4885 perror("Error writing to cache");
4886 return 0;
4887
4888 }
4889 if (l >= cache_size) {
4890 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4891 return 0;
4892 }
4893
4894 cache += l;
4895 cache_size -= l;
4896 total_len += l;
4897
4898 user_sum += cg_cpu_usage[physcpu].user;
4899 system_sum += cg_cpu_usage[physcpu].system;
4900 idle_sum += new_idle;
4901
4902 } else {
4903 user_sum += user;
4904 nice_sum += nice;
4905 system_sum += system;
4906 idle_sum += idle;
4907 iowait_sum += iowait;
4908 irq_sum += irq;
4909 softirq_sum += softirq;
4910 steal_sum += steal;
4911 guest_sum += guest;
4912 guest_nice_sum += guest_nice;
4913 }
4914 }
4915
4916 cache = d->buf;
4917
4918 int cpuall_len = snprintf(cpuall, CPUALL_MAX_SIZE, "cpu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
4919 user_sum,
4920 nice_sum,
4921 system_sum,
4922 idle_sum,
4923 iowait_sum,
4924 irq_sum,
4925 softirq_sum,
4926 steal_sum,
4927 guest_sum,
4928 guest_nice_sum);
4929 if (cpuall_len > 0 && cpuall_len < CPUALL_MAX_SIZE) {
4930 memcpy(cache, cpuall, cpuall_len);
4931 cache += cpuall_len;
4932 } else {
4933 /* shouldn't happen */
4934 lxcfs_error("proc_stat_read copy cpuall failed, cpuall_len=%d.", cpuall_len);
4935 cpuall_len = 0;
4936 }
4937
4938 memmove(cache, d->buf + CPUALL_MAX_SIZE, total_len);
4939 total_len += cpuall_len;
4940
4941 out:
4942 d->cached = 1;
4943 d->size = total_len;
4944 if (total_len > size)
4945 total_len = size;
4946
4947 memcpy(buf, d->buf, total_len);
4948 return total_len;
4949 }
4950
4951 /* This function retrieves the busy time of a group of tasks by looking at
4952 * cpuacct.usage. Unfortunately, this only makes sense when the container has
4953 * been given it's own cpuacct cgroup. If not, this function will take the busy
4954 * time of all other taks that do not actually belong to the container into
4955 * account as well. If someone has a clever solution for this please send a
4956 * patch!
4957 */
4958 static double get_reaper_busy(pid_t task)
4959 {
4960 __do_free char *cgroup = NULL, *usage_str = NULL;
4961 unsigned long usage = 0;
4962 pid_t initpid;
4963
4964 initpid = lookup_initpid_in_store(task);
4965 if (initpid <= 0)
4966 return 0;
4967
4968 cgroup = get_pid_cgroup(initpid, "cpuacct");
4969 if (!cgroup)
4970 return 0;
4971 prune_init_slice(cgroup);
4972 if (!cgroup_ops->get(cgroup_ops, "cpuacct", cgroup, "cpuacct.usage",
4973 &usage_str))
4974 return 0;
4975
4976 usage = strtoul(usage_str, NULL, 10);
4977 return ((double)usage / 1000000000);
4978 }
4979
4980 #if RELOADTEST
4981 void iwashere(void)
4982 {
4983 int fd;
4984
4985 fd = creat("/tmp/lxcfs-iwashere", 0644);
4986 if (fd >= 0)
4987 close(fd);
4988 }
4989 #endif
4990
4991 /*
4992 * We read /proc/uptime and reuse its second field.
4993 * For the first field, we use the mtime for the reaper for
4994 * the calling pid as returned by getreaperage
4995 */
4996 static int proc_uptime_read(char *buf, size_t size, off_t offset,
4997 struct fuse_file_info *fi)
4998 {
4999 struct fuse_context *fc = fuse_get_context();
5000 struct file_info *d = (struct file_info *)fi->fh;
5001 double busytime = get_reaper_busy(fc->pid);
5002 char *cache = d->buf;
5003 ssize_t total_len = 0;
5004 double idletime, reaperage;
5005
5006 #if RELOADTEST
5007 iwashere();
5008 #endif
5009
5010 if (offset){
5011 if (!d->cached)
5012 return 0;
5013 if (offset > d->size)
5014 return -EINVAL;
5015 int left = d->size - offset;
5016 total_len = left > size ? size: left;
5017 memcpy(buf, cache + offset, total_len);
5018 return total_len;
5019 }
5020
5021 reaperage = get_reaper_age(fc->pid);
5022 /* To understand why this is done, please read the comment to the
5023 * get_reaper_busy() function.
5024 */
5025 idletime = reaperage;
5026 if (reaperage >= busytime)
5027 idletime = reaperage - busytime;
5028
5029 total_len = snprintf(d->buf, d->buflen, "%.2lf %.2lf\n", reaperage, idletime);
5030 if (total_len < 0 || total_len >= d->buflen){
5031 lxcfs_error("%s\n", "failed to write to cache");
5032 return 0;
5033 }
5034
5035 d->size = (int)total_len;
5036 d->cached = 1;
5037
5038 if (total_len > size) total_len = size;
5039
5040 memcpy(buf, d->buf, total_len);
5041 return total_len;
5042 }
5043
5044 static int proc_diskstats_read(char *buf, size_t size, off_t offset,
5045 struct fuse_file_info *fi)
5046 {
5047 __do_free char *cg = NULL, *io_serviced_str = NULL,
5048 *io_merged_str = NULL, *io_service_bytes_str = NULL,
5049 *io_wait_time_str = NULL, *io_service_time_str = NULL,
5050 *line = NULL;
5051 __do_fclose FILE *f = NULL;
5052 struct fuse_context *fc = fuse_get_context();
5053 struct file_info *d = (struct file_info *)fi->fh;
5054 unsigned long read = 0, write = 0;
5055 unsigned long read_merged = 0, write_merged = 0;
5056 unsigned long read_sectors = 0, write_sectors = 0;
5057 unsigned long read_ticks = 0, write_ticks = 0;
5058 unsigned long ios_pgr = 0, tot_ticks = 0, rq_ticks = 0;
5059 unsigned long rd_svctm = 0, wr_svctm = 0, rd_wait = 0, wr_wait = 0;
5060 char *cache = d->buf;
5061 size_t cache_size = d->buflen;
5062 size_t linelen = 0, total_len = 0;
5063 unsigned int major = 0, minor = 0;
5064 int i = 0;
5065 int ret;
5066 char dev_name[72];
5067
5068 if (offset){
5069 int left;
5070
5071 if (offset > d->size)
5072 return -EINVAL;
5073
5074 if (!d->cached)
5075 return 0;
5076
5077 left = d->size - offset;
5078 total_len = left > size ? size: left;
5079 memcpy(buf, cache + offset, total_len);
5080
5081 return total_len;
5082 }
5083
5084 pid_t initpid = lookup_initpid_in_store(fc->pid);
5085 if (initpid <= 1 || is_shared_pidns(initpid))
5086 initpid = fc->pid;
5087 cg = get_pid_cgroup(initpid, "blkio");
5088 if (!cg)
5089 return read_file_fuse("/proc/diskstats", buf, size, d);
5090 prune_init_slice(cg);
5091
5092 ret = cgroup_ops->get_io_serviced(cgroup_ops, cg, &io_serviced_str);
5093 if (ret < 0) {
5094 if (ret == -EOPNOTSUPP)
5095 return read_file_fuse("/proc/diskstats", buf, size, d);
5096 }
5097
5098 ret = cgroup_ops->get_io_merged(cgroup_ops, cg, &io_merged_str);
5099 if (ret < 0) {
5100 if (ret == -EOPNOTSUPP)
5101 return read_file_fuse("/proc/diskstats", buf, size, d);
5102 }
5103
5104 ret = cgroup_ops->get_io_service_bytes(cgroup_ops, cg, &io_service_bytes_str);
5105 if (ret < 0) {
5106 if (ret == -EOPNOTSUPP)
5107 return read_file_fuse("/proc/diskstats", buf, size, d);
5108 }
5109
5110 ret = cgroup_ops->get_io_wait_time(cgroup_ops, cg, &io_wait_time_str);
5111 if (ret < 0) {
5112 if (ret == -EOPNOTSUPP)
5113 return read_file_fuse("/proc/diskstats", buf, size, d);
5114 }
5115
5116 ret = cgroup_ops->get_io_service_time(cgroup_ops, cg, &io_service_time_str);
5117 if (ret < 0) {
5118 if (ret == -EOPNOTSUPP)
5119 return read_file_fuse("/proc/diskstats", buf, size, d);
5120 }
5121
5122 f = fopen("/proc/diskstats", "r");
5123 if (!f)
5124 return 0;
5125
5126 while (getline(&line, &linelen, f) != -1) {
5127 ssize_t l;
5128 char lbuf[256];
5129
5130 i = sscanf(line, "%u %u %71s", &major, &minor, dev_name);
5131 if (i != 3)
5132 continue;
5133
5134 get_blkio_io_value(io_serviced_str, major, minor, "Read", &read);
5135 get_blkio_io_value(io_serviced_str, major, minor, "Write", &write);
5136 get_blkio_io_value(io_merged_str, major, minor, "Read", &read_merged);
5137 get_blkio_io_value(io_merged_str, major, minor, "Write", &write_merged);
5138 get_blkio_io_value(io_service_bytes_str, major, minor, "Read", &read_sectors);
5139 read_sectors = read_sectors/512;
5140 get_blkio_io_value(io_service_bytes_str, major, minor, "Write", &write_sectors);
5141 write_sectors = write_sectors/512;
5142
5143 get_blkio_io_value(io_service_time_str, major, minor, "Read", &rd_svctm);
5144 rd_svctm = rd_svctm/1000000;
5145 get_blkio_io_value(io_wait_time_str, major, minor, "Read", &rd_wait);
5146 rd_wait = rd_wait/1000000;
5147 read_ticks = rd_svctm + rd_wait;
5148
5149 get_blkio_io_value(io_service_time_str, major, minor, "Write", &wr_svctm);
5150 wr_svctm = wr_svctm/1000000;
5151 get_blkio_io_value(io_wait_time_str, major, minor, "Write", &wr_wait);
5152 wr_wait = wr_wait/1000000;
5153 write_ticks = wr_svctm + wr_wait;
5154
5155 get_blkio_io_value(io_service_time_str, major, minor, "Total", &tot_ticks);
5156 tot_ticks = tot_ticks/1000000;
5157
5158 memset(lbuf, 0, 256);
5159 if (read || write || read_merged || write_merged || read_sectors || write_sectors || read_ticks || write_ticks)
5160 snprintf(lbuf, 256, "%u %u %s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
5161 major, minor, dev_name, read, read_merged, read_sectors, read_ticks,
5162 write, write_merged, write_sectors, write_ticks, ios_pgr, tot_ticks, rq_ticks);
5163 else
5164 continue;
5165
5166 l = snprintf(cache, cache_size, "%s", lbuf);
5167 if (l < 0) {
5168 perror("Error writing to fuse buf");
5169 return 0;
5170 }
5171 if (l >= cache_size) {
5172 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
5173 return 0;
5174 }
5175 cache += l;
5176 cache_size -= l;
5177 total_len += l;
5178 }
5179
5180 d->cached = 1;
5181 d->size = total_len;
5182 if (total_len > size ) total_len = size;
5183 memcpy(buf, d->buf, total_len);
5184
5185 return total_len;
5186 }
5187
5188 static int proc_swaps_read(char *buf, size_t size, off_t offset,
5189 struct fuse_file_info *fi)
5190 {
5191 __do_free char *cg = NULL, *memswlimit_str = NULL, *memusage_str = NULL,
5192 *memswusage_str = NULL;
5193 struct fuse_context *fc = fuse_get_context();
5194 struct file_info *d = (struct file_info *)fi->fh;
5195 unsigned long memswlimit = 0, memlimit = 0, memusage = 0,
5196 memswusage = 0, swap_total = 0, swap_free = 0;
5197 ssize_t total_len = 0;
5198 ssize_t l = 0;
5199 char *cache = d->buf;
5200 int ret;
5201
5202 if (offset) {
5203 int left;
5204
5205 if (offset > d->size)
5206 return -EINVAL;
5207
5208 if (!d->cached)
5209 return 0;
5210
5211 left = d->size - offset;
5212 total_len = left > size ? size: left;
5213 memcpy(buf, cache + offset, total_len);
5214
5215 return total_len;
5216 }
5217
5218 pid_t initpid = lookup_initpid_in_store(fc->pid);
5219 if (initpid <= 1 || is_shared_pidns(initpid))
5220 initpid = fc->pid;
5221 cg = get_pid_cgroup(initpid, "memory");
5222 if (!cg)
5223 return read_file_fuse("/proc/swaps", buf, size, d);
5224 prune_init_slice(cg);
5225
5226 memlimit = get_min_memlimit(cg, false);
5227
5228 ret = cgroup_ops->get_memory_current(cgroup_ops, cg, &memusage_str);
5229 if (ret < 0)
5230 return 0;
5231
5232 memusage = strtoul(memusage_str, NULL, 10);
5233
5234 ret = cgroup_ops->get_memory_swap_max(cgroup_ops, cg, &memswlimit_str);
5235 if (ret >= 0)
5236 ret = cgroup_ops->get_memory_swap_current(cgroup_ops, cg, &memswusage_str);
5237 if (ret >= 0) {
5238 memswlimit = get_min_memlimit(cg, true);
5239 memswusage = strtoul(memswusage_str, NULL, 10);
5240 swap_total = (memswlimit - memlimit) / 1024;
5241 swap_free = (memswusage - memusage) / 1024;
5242 }
5243
5244 total_len = snprintf(d->buf, d->size, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
5245
5246 /* When no mem + swap limit is specified or swapaccount=0*/
5247 if (!memswlimit) {
5248 __do_free char *line = NULL;
5249 __do_fclose FILE *f = NULL;
5250 size_t linelen = 0;
5251
5252 f = fopen("/proc/meminfo", "r");
5253 if (!f)
5254 return 0;
5255
5256 while (getline(&line, &linelen, f) != -1) {
5257 if (startswith(line, "SwapTotal:"))
5258 sscanf(line, "SwapTotal: %8lu kB", &swap_total);
5259 else if (startswith(line, "SwapFree:"))
5260 sscanf(line, "SwapFree: %8lu kB", &swap_free);
5261 }
5262 }
5263
5264 if (swap_total > 0) {
5265 l = snprintf(d->buf + total_len, d->size - total_len,
5266 "none%*svirtual\t\t%lu\t%lu\t0\n", 36, " ",
5267 swap_total, swap_free);
5268 total_len += l;
5269 }
5270
5271 if (total_len < 0 || l < 0) {
5272 perror("Error writing to cache");
5273 return 0;
5274 }
5275
5276 d->cached = 1;
5277 d->size = (int)total_len;
5278
5279 if (total_len > size) total_len = size;
5280 memcpy(buf, d->buf, total_len);
5281 return total_len;
5282 }
5283
5284 /*
5285 * Find the process pid from cgroup path.
5286 * eg:from /sys/fs/cgroup/cpu/docker/containerid/cgroup.procs to find the process pid.
5287 * @pid_buf : put pid to pid_buf.
5288 * @dpath : the path of cgroup. eg: /docker/containerid or /docker/containerid/child-cgroup ...
5289 * @depth : the depth of cgroup in container.
5290 * @sum : return the number of pid.
5291 * @cfd : the file descriptor of the mounted cgroup. eg: /sys/fs/cgroup/cpu
5292 */
5293 static int calc_pid(char ***pid_buf, char *dpath, int depth, int sum, int cfd)
5294 {
5295 __do_free char *path = NULL;
5296 __do_close_prot_errno int fd = -EBADF;
5297 __do_fclose FILE *f = NULL;
5298 __do_closedir DIR *dir = NULL;
5299 struct dirent *file;
5300 size_t linelen = 0;
5301 char *line = NULL;
5302 int pd;
5303 char **pid;
5304
5305 /* path = dpath + "/cgroup.procs" + /0 */
5306 path = malloc(strlen(dpath) + 20);
5307 if (!path)
5308 return sum;
5309
5310 strcpy(path, dpath);
5311 fd = openat(cfd, path, O_RDONLY | O_CLOEXEC | O_NOFOLLOW);
5312 if (fd < 0)
5313 return sum;
5314
5315 dir = fdopendir(move_fd(fd));
5316 if (!dir)
5317 return sum;
5318
5319 while (((file = readdir(dir)) != NULL) && depth > 0) {
5320 if (strcmp(file->d_name, ".") == 0)
5321 continue;
5322
5323 if (strcmp(file->d_name, "..") == 0)
5324 continue;
5325
5326 if (file->d_type == DT_DIR) {
5327 __do_free char *path_dir = NULL;
5328
5329 /* path + '/' + d_name +/0 */
5330 path_dir = malloc(strlen(path) + 2 + sizeof(file->d_name));
5331 if (!path_dir)
5332 return sum;
5333
5334 strcpy(path_dir, path);
5335 strcat(path_dir, "/");
5336 strcat(path_dir, file->d_name);
5337 pd = depth - 1;
5338 sum = calc_pid(pid_buf, path_dir, pd, sum, cfd);
5339 }
5340 }
5341
5342 strcat(path, "/cgroup.procs");
5343 fd = openat(cfd, path, O_RDONLY);
5344 if (fd < 0)
5345 return sum;
5346
5347 f = fdopen(move_fd(fd), "r");
5348 if (!f)
5349 return sum;
5350
5351 while (getline(&line, &linelen, f) != -1) {
5352 pid = realloc(*pid_buf, sizeof(char *) * (sum + 1));
5353 if (!pid)
5354 return sum;
5355 *pid_buf = pid;
5356
5357 *(*pid_buf + sum) = malloc(strlen(line) + 1);
5358 if (!*(*pid_buf + sum))
5359 return sum;
5360
5361 strcpy(*(*pid_buf + sum), line);
5362 sum++;
5363 }
5364
5365 return sum;
5366 }
5367
5368 /*
5369 * calc_load calculates the load according to the following formula:
5370 * load1 = load0 * exp + active * (1 - exp)
5371 *
5372 * @load1: the new loadavg.
5373 * @load0: the former loadavg.
5374 * @active: the total number of running pid at this moment.
5375 * @exp: the fixed-point defined in the beginning.
5376 */
5377 static unsigned long
5378 calc_load(unsigned long load, unsigned long exp, unsigned long active)
5379 {
5380 unsigned long newload;
5381
5382 active = active > 0 ? active * FIXED_1 : 0;
5383 newload = load * exp + active * (FIXED_1 - exp);
5384 if (active >= load)
5385 newload += FIXED_1 - 1;
5386
5387 return newload / FIXED_1;
5388 }
5389
5390 /*
5391 * Return 0 means that container p->cg is closed.
5392 * Return -1 means that error occurred in refresh.
5393 * Positive num equals the total number of pid.
5394 */
5395 static int refresh_load(struct load_node *p, char *path)
5396 {
5397 __do_free char *line = NULL;
5398 char **idbuf;
5399 char proc_path[256];
5400 int i, ret, run_pid = 0, total_pid = 0, last_pid = 0;
5401 size_t linelen = 0;
5402 int sum, length;
5403 struct dirent *file;
5404
5405 idbuf = malloc(sizeof(char *));
5406 if (!idbuf)
5407 return -1;
5408
5409 sum = calc_pid(&idbuf, path, DEPTH_DIR, 0, p->cfd);
5410 /* normal exit */
5411 if (sum == 0)
5412 goto out;
5413
5414 for (i = 0; i < sum; i++) {
5415 __do_closedir DIR *dp = NULL;
5416
5417 /*clean up '\n' */
5418 length = strlen(idbuf[i])-1;
5419 idbuf[i][length] = '\0';
5420 ret = snprintf(proc_path, 256, "/proc/%s/task", idbuf[i]);
5421 if (ret < 0 || ret > 255) {
5422 lxcfs_error("%s\n", "snprintf() failed in refresh_load.");
5423 i = sum;
5424 sum = -1;
5425 goto err_out;
5426 }
5427
5428 dp = opendir(proc_path);
5429 if (!dp) {
5430 lxcfs_error("%s\n", "Open proc_path failed in refresh_load.");
5431 continue;
5432 }
5433 while ((file = readdir(dp)) != NULL) {
5434 __do_fclose FILE *f = NULL;
5435
5436 if (strncmp(file->d_name, ".", 1) == 0)
5437 continue;
5438 if (strncmp(file->d_name, "..", 1) == 0)
5439 continue;
5440 total_pid++;
5441 /* We make the biggest pid become last_pid.*/
5442 ret = atof(file->d_name);
5443 last_pid = (ret > last_pid) ? ret : last_pid;
5444
5445 ret = snprintf(proc_path, 256, "/proc/%s/task/%s/status", idbuf[i], file->d_name);
5446 if (ret < 0 || ret > 255) {
5447 lxcfs_error("%s\n", "snprintf() failed in refresh_load.");
5448 i = sum;
5449 sum = -1;
5450 goto err_out;
5451 }
5452
5453 f = fopen(proc_path, "r");
5454 if (f != NULL) {
5455 while (getline(&line, &linelen, f) != -1) {
5456 /* Find State */
5457 if ((line[0] == 'S') && (line[1] == 't'))
5458 break;
5459 }
5460
5461 if ((line[7] == 'R') || (line[7] == 'D'))
5462 run_pid++;
5463 }
5464 }
5465 }
5466 /*Calculate the loadavg.*/
5467 p->avenrun[0] = calc_load(p->avenrun[0], EXP_1, run_pid);
5468 p->avenrun[1] = calc_load(p->avenrun[1], EXP_5, run_pid);
5469 p->avenrun[2] = calc_load(p->avenrun[2], EXP_15, run_pid);
5470 p->run_pid = run_pid;
5471 p->total_pid = total_pid;
5472 p->last_pid = last_pid;
5473
5474 err_out:
5475 for (; i > 0; i--)
5476 free(idbuf[i-1]);
5477 out:
5478 free(idbuf);
5479 return sum;
5480 }
5481
5482 /*
5483 * Traverse the hash table and update it.
5484 */
5485 void *load_begin(void *arg)
5486 {
5487
5488 int i, sum, length, ret;
5489 struct load_node *f;
5490 int first_node;
5491 clock_t time1, time2;
5492
5493 while (1) {
5494 if (loadavg_stop == 1)
5495 return NULL;
5496
5497 time1 = clock();
5498 for (i = 0; i < LOAD_SIZE; i++) {
5499 pthread_mutex_lock(&load_hash[i].lock);
5500 if (load_hash[i].next == NULL) {
5501 pthread_mutex_unlock(&load_hash[i].lock);
5502 continue;
5503 }
5504 f = load_hash[i].next;
5505 first_node = 1;
5506 while (f) {
5507 __do_free char *path = NULL;
5508
5509 length = strlen(f->cg) + 2;
5510 /* strlen(f->cg) + '.' or '' + \0 */
5511 path = malloc(length);
5512 if (!path)
5513 goto out;
5514
5515 ret = snprintf(path, length, "%s%s", dot_or_empty(f->cg), f->cg);
5516 if (ret < 0 || ret > length - 1) {
5517 /* snprintf failed, ignore the node.*/
5518 lxcfs_error("Refresh node %s failed for snprintf().\n", f->cg);
5519 goto out;
5520 }
5521
5522 sum = refresh_load(f, path);
5523 if (sum == 0)
5524 f = del_node(f, i);
5525 else
5526 out: f = f->next;
5527 /* load_hash[i].lock locks only on the first node.*/
5528 if (first_node == 1) {
5529 first_node = 0;
5530 pthread_mutex_unlock(&load_hash[i].lock);
5531 }
5532 }
5533 }
5534
5535 if (loadavg_stop == 1)
5536 return NULL;
5537
5538 time2 = clock();
5539 usleep(FLUSH_TIME * 1000000 - (int)((time2 - time1) * 1000000 / CLOCKS_PER_SEC));
5540 }
5541 }
5542
5543 static int proc_loadavg_read(char *buf, size_t size, off_t offset,
5544 struct fuse_file_info *fi)
5545 {
5546 struct fuse_context *fc = fuse_get_context();
5547 struct file_info *d = (struct file_info *)fi->fh;
5548 pid_t initpid;
5549 char *cg;
5550 size_t total_len = 0;
5551 char *cache = d->buf;
5552 struct load_node *n;
5553 int hash;
5554 int cfd, rv = 0;
5555 unsigned long a, b, c;
5556
5557 if (offset) {
5558 if (offset > d->size)
5559 return -EINVAL;
5560 if (!d->cached)
5561 return 0;
5562 int left = d->size - offset;
5563 total_len = left > size ? size : left;
5564 memcpy(buf, cache + offset, total_len);
5565 return total_len;
5566 }
5567 if (!loadavg)
5568 return read_file_fuse("/proc/loadavg", buf, size, d);
5569
5570 initpid = lookup_initpid_in_store(fc->pid);
5571 if (initpid <= 1 || is_shared_pidns(initpid))
5572 initpid = fc->pid;
5573 cg = get_pid_cgroup(initpid, "cpu");
5574 if (!cg)
5575 return read_file_fuse("/proc/loadavg", buf, size, d);
5576
5577 prune_init_slice(cg);
5578 hash = calc_hash(cg) % LOAD_SIZE;
5579 n = locate_node(cg, hash);
5580
5581 /* First time */
5582 if (n == NULL) {
5583 cfd = find_mounted_controller("cpu");
5584 if (cfd >= 0) {
5585 /*
5586 * In locate_node() above, pthread_rwlock_unlock() isn't used
5587 * because delete is not allowed before read has ended.
5588 */
5589 pthread_rwlock_unlock(&load_hash[hash].rdlock);
5590 rv = 0;
5591 goto err;
5592 }
5593 do {
5594 n = malloc(sizeof(struct load_node));
5595 } while (!n);
5596
5597 do {
5598 n->cg = malloc(strlen(cg)+1);
5599 } while (!n->cg);
5600 strcpy(n->cg, cg);
5601 n->avenrun[0] = 0;
5602 n->avenrun[1] = 0;
5603 n->avenrun[2] = 0;
5604 n->run_pid = 0;
5605 n->total_pid = 1;
5606 n->last_pid = initpid;
5607 n->cfd = cfd;
5608 insert_node(&n, hash);
5609 }
5610 a = n->avenrun[0] + (FIXED_1/200);
5611 b = n->avenrun[1] + (FIXED_1/200);
5612 c = n->avenrun[2] + (FIXED_1/200);
5613 total_len = snprintf(d->buf, d->buflen, "%lu.%02lu %lu.%02lu %lu.%02lu %d/%d %d\n",
5614 LOAD_INT(a), LOAD_FRAC(a),
5615 LOAD_INT(b), LOAD_FRAC(b),
5616 LOAD_INT(c), LOAD_FRAC(c),
5617 n->run_pid, n->total_pid, n->last_pid);
5618 pthread_rwlock_unlock(&load_hash[hash].rdlock);
5619 if (total_len < 0 || total_len >= d->buflen) {
5620 lxcfs_error("%s\n", "Failed to write to cache");
5621 rv = 0;
5622 goto err;
5623 }
5624 d->size = (int)total_len;
5625 d->cached = 1;
5626
5627 if (total_len > size)
5628 total_len = size;
5629 memcpy(buf, d->buf, total_len);
5630 rv = total_len;
5631
5632 err:
5633 free(cg);
5634 return rv;
5635 }
5636 /* Return a positive number on success, return 0 on failure.*/
5637 pthread_t load_daemon(int load_use)
5638 {
5639 int ret;
5640 pthread_t pid;
5641
5642 ret = init_load();
5643 if (ret == -1) {
5644 lxcfs_error("%s\n", "Initialize hash_table fails in load_daemon!");
5645 return 0;
5646 }
5647 ret = pthread_create(&pid, NULL, load_begin, NULL);
5648 if (ret != 0) {
5649 lxcfs_error("%s\n", "Create pthread fails in load_daemon!");
5650 load_free();
5651 return 0;
5652 }
5653 /* use loadavg, here loadavg = 1*/
5654 loadavg = load_use;
5655 return pid;
5656 }
5657
5658 /* Returns 0 on success. */
5659 int stop_load_daemon(pthread_t pid)
5660 {
5661 int s;
5662
5663 /* Signal the thread to gracefully stop */
5664 loadavg_stop = 1;
5665
5666 s = pthread_join(pid, NULL); /* Make sure sub thread has been canceled. */
5667 if (s != 0) {
5668 lxcfs_error("%s\n", "stop_load_daemon error: failed to join");
5669 return -1;
5670 }
5671
5672 load_free();
5673 loadavg_stop = 0;
5674
5675 return 0;
5676 }
5677
5678 static off_t get_procfile_size(const char *which)
5679 {
5680 FILE *f = fopen(which, "r");
5681 char *line = NULL;
5682 size_t len = 0;
5683 ssize_t sz, answer = 0;
5684 if (!f)
5685 return 0;
5686
5687 while ((sz = getline(&line, &len, f)) != -1)
5688 answer += sz;
5689 fclose (f);
5690 free(line);
5691
5692 return answer;
5693 }
5694
5695 int proc_getattr(const char *path, struct stat *sb)
5696 {
5697 struct timespec now;
5698
5699 memset(sb, 0, sizeof(struct stat));
5700 if (clock_gettime(CLOCK_REALTIME, &now) < 0)
5701 return -EINVAL;
5702 sb->st_uid = sb->st_gid = 0;
5703 sb->st_atim = sb->st_mtim = sb->st_ctim = now;
5704 if (strcmp(path, "/proc") == 0) {
5705 sb->st_mode = S_IFDIR | 00555;
5706 sb->st_nlink = 2;
5707 return 0;
5708 }
5709 if (strcmp(path, "/proc/meminfo") == 0 ||
5710 strcmp(path, "/proc/cpuinfo") == 0 ||
5711 strcmp(path, "/proc/uptime") == 0 ||
5712 strcmp(path, "/proc/stat") == 0 ||
5713 strcmp(path, "/proc/diskstats") == 0 ||
5714 strcmp(path, "/proc/swaps") == 0 ||
5715 strcmp(path, "/proc/loadavg") == 0) {
5716 sb->st_size = 0;
5717 sb->st_mode = S_IFREG | 00444;
5718 sb->st_nlink = 1;
5719 return 0;
5720 }
5721
5722 return -ENOENT;
5723 }
5724
5725 int proc_readdir(const char *path, void *buf, fuse_fill_dir_t filler, off_t offset,
5726 struct fuse_file_info *fi)
5727 {
5728 if (filler(buf, ".", NULL, 0) != 0 ||
5729 filler(buf, "..", NULL, 0) != 0 ||
5730 filler(buf, "cpuinfo", NULL, 0) != 0 ||
5731 filler(buf, "meminfo", NULL, 0) != 0 ||
5732 filler(buf, "stat", NULL, 0) != 0 ||
5733 filler(buf, "uptime", NULL, 0) != 0 ||
5734 filler(buf, "diskstats", NULL, 0) != 0 ||
5735 filler(buf, "swaps", NULL, 0) != 0 ||
5736 filler(buf, "loadavg", NULL, 0) != 0)
5737 return -EINVAL;
5738 return 0;
5739 }
5740
5741 int proc_open(const char *path, struct fuse_file_info *fi)
5742 {
5743 int type = -1;
5744 struct file_info *info;
5745
5746 if (strcmp(path, "/proc/meminfo") == 0)
5747 type = LXC_TYPE_PROC_MEMINFO;
5748 else if (strcmp(path, "/proc/cpuinfo") == 0)
5749 type = LXC_TYPE_PROC_CPUINFO;
5750 else if (strcmp(path, "/proc/uptime") == 0)
5751 type = LXC_TYPE_PROC_UPTIME;
5752 else if (strcmp(path, "/proc/stat") == 0)
5753 type = LXC_TYPE_PROC_STAT;
5754 else if (strcmp(path, "/proc/diskstats") == 0)
5755 type = LXC_TYPE_PROC_DISKSTATS;
5756 else if (strcmp(path, "/proc/swaps") == 0)
5757 type = LXC_TYPE_PROC_SWAPS;
5758 else if (strcmp(path, "/proc/loadavg") == 0)
5759 type = LXC_TYPE_PROC_LOADAVG;
5760 if (type == -1)
5761 return -ENOENT;
5762
5763 info = malloc(sizeof(*info));
5764 if (!info)
5765 return -ENOMEM;
5766
5767 memset(info, 0, sizeof(*info));
5768 info->type = type;
5769
5770 info->buflen = get_procfile_size(path) + BUF_RESERVE_SIZE;
5771 do {
5772 info->buf = malloc(info->buflen);
5773 } while (!info->buf);
5774 memset(info->buf, 0, info->buflen);
5775 /* set actual size to buffer size */
5776 info->size = info->buflen;
5777
5778 fi->fh = (unsigned long)info;
5779 return 0;
5780 }
5781
5782 int proc_access(const char *path, int mask)
5783 {
5784 if (strcmp(path, "/proc") == 0 && access(path, R_OK) == 0)
5785 return 0;
5786
5787 /* these are all read-only */
5788 if ((mask & ~R_OK) != 0)
5789 return -EACCES;
5790 return 0;
5791 }
5792
5793 int proc_release(const char *path, struct fuse_file_info *fi)
5794 {
5795 do_release_file_info(fi);
5796 return 0;
5797 }
5798
5799 int proc_read(const char *path, char *buf, size_t size, off_t offset,
5800 struct fuse_file_info *fi)
5801 {
5802 struct file_info *f = (struct file_info *) fi->fh;
5803
5804 switch (f->type) {
5805 case LXC_TYPE_PROC_MEMINFO:
5806 return proc_meminfo_read(buf, size, offset, fi);
5807 case LXC_TYPE_PROC_CPUINFO:
5808 return proc_cpuinfo_read(buf, size, offset, fi);
5809 case LXC_TYPE_PROC_UPTIME:
5810 return proc_uptime_read(buf, size, offset, fi);
5811 case LXC_TYPE_PROC_STAT:
5812 return proc_stat_read(buf, size, offset, fi);
5813 case LXC_TYPE_PROC_DISKSTATS:
5814 return proc_diskstats_read(buf, size, offset, fi);
5815 case LXC_TYPE_PROC_SWAPS:
5816 return proc_swaps_read(buf, size, offset, fi);
5817 case LXC_TYPE_PROC_LOADAVG:
5818 return proc_loadavg_read(buf, size, offset, fi);
5819 default:
5820 return -EINVAL;
5821 }
5822 }
5823
5824 /*
5825 * Functions needed to setup cgroups in the __constructor__.
5826 */
5827
5828 static bool umount_if_mounted(void)
5829 {
5830 if (umount2(BASEDIR, MNT_DETACH) < 0 && errno != EINVAL) {
5831 lxcfs_error("Failed to unmount %s: %s.\n", BASEDIR, strerror(errno));
5832 return false;
5833 }
5834 return true;
5835 }
5836
5837 /* __typeof__ should be safe to use with all compilers. */
5838 typedef __typeof__(((struct statfs *)NULL)->f_type) fs_type_magic;
5839 static bool has_fs_type(const struct statfs *fs, fs_type_magic magic_val)
5840 {
5841 return (fs->f_type == (fs_type_magic)magic_val);
5842 }
5843
5844 /*
5845 * looking at fs/proc_namespace.c, it appears we can
5846 * actually expect the rootfs entry to very specifically contain
5847 * " - rootfs rootfs "
5848 * IIUC, so long as we've chrooted so that rootfs is not our root,
5849 * the rootfs entry should always be skipped in mountinfo contents.
5850 */
5851 static bool is_on_ramfs(void)
5852 {
5853 FILE *f;
5854 char *p, *p2;
5855 char *line = NULL;
5856 size_t len = 0;
5857 int i;
5858
5859 f = fopen("/proc/self/mountinfo", "r");
5860 if (!f)
5861 return false;
5862
5863 while (getline(&line, &len, f) != -1) {
5864 for (p = line, i = 0; p && i < 4; i++)
5865 p = strchr(p + 1, ' ');
5866 if (!p)
5867 continue;
5868 p2 = strchr(p + 1, ' ');
5869 if (!p2)
5870 continue;
5871 *p2 = '\0';
5872 if (strcmp(p + 1, "/") == 0) {
5873 // this is '/'. is it the ramfs?
5874 p = strchr(p2 + 1, '-');
5875 if (p && strncmp(p, "- rootfs rootfs ", 16) == 0) {
5876 free(line);
5877 fclose(f);
5878 return true;
5879 }
5880 }
5881 }
5882 free(line);
5883 fclose(f);
5884 return false;
5885 }
5886
5887 static int pivot_enter()
5888 {
5889 int ret = -1, oldroot = -1, newroot = -1;
5890
5891 oldroot = open("/", O_DIRECTORY | O_RDONLY);
5892 if (oldroot < 0) {
5893 lxcfs_error("%s\n", "Failed to open old root for fchdir.");
5894 return ret;
5895 }
5896
5897 newroot = open(ROOTDIR, O_DIRECTORY | O_RDONLY);
5898 if (newroot < 0) {
5899 lxcfs_error("%s\n", "Failed to open new root for fchdir.");
5900 goto err;
5901 }
5902
5903 /* change into new root fs */
5904 if (fchdir(newroot) < 0) {
5905 lxcfs_error("Failed to change directory to new rootfs: %s.\n", ROOTDIR);
5906 goto err;
5907 }
5908
5909 /* pivot_root into our new root fs */
5910 if (pivot_root(".", ".") < 0) {
5911 lxcfs_error("pivot_root() syscall failed: %s.\n", strerror(errno));
5912 goto err;
5913 }
5914
5915 /*
5916 * At this point the old-root is mounted on top of our new-root.
5917 * To unmounted it we must not be chdir'd into it, so escape back
5918 * to the old-root.
5919 */
5920 if (fchdir(oldroot) < 0) {
5921 lxcfs_error("%s\n", "Failed to enter old root.");
5922 goto err;
5923 }
5924
5925 if (umount2(".", MNT_DETACH) < 0) {
5926 lxcfs_error("%s\n", "Failed to detach old root.");
5927 goto err;
5928 }
5929
5930 if (fchdir(newroot) < 0) {
5931 lxcfs_error("%s\n", "Failed to re-enter new root.");
5932 goto err;
5933 }
5934
5935 ret = 0;
5936
5937 err:
5938 if (oldroot > 0)
5939 close(oldroot);
5940 if (newroot > 0)
5941 close(newroot);
5942
5943 return ret;
5944 }
5945
5946 static int chroot_enter()
5947 {
5948 if (mount(ROOTDIR, "/", NULL, MS_REC | MS_BIND, NULL)) {
5949 lxcfs_error("Failed to recursively bind-mount %s into /.", ROOTDIR);
5950 return -1;
5951 }
5952
5953 if (chroot(".") < 0) {
5954 lxcfs_error("Call to chroot() failed: %s.\n", strerror(errno));
5955 return -1;
5956 }
5957
5958 if (chdir("/") < 0) {
5959 lxcfs_error("Failed to change directory: %s.\n", strerror(errno));
5960 return -1;
5961 }
5962
5963 return 0;
5964 }
5965
5966 static int permute_and_enter(void)
5967 {
5968 struct statfs sb;
5969
5970 if (statfs("/", &sb) < 0) {
5971 lxcfs_error("%s\n", "Could not stat / mountpoint.");
5972 return -1;
5973 }
5974
5975 /* has_fs_type() is not reliable. When the ramfs is a tmpfs it will
5976 * likely report TMPFS_MAGIC. Hence, when it reports no we still check
5977 * /proc/1/mountinfo. */
5978 if (has_fs_type(&sb, RAMFS_MAGIC) || is_on_ramfs())
5979 return chroot_enter();
5980
5981 if (pivot_enter() < 0) {
5982 lxcfs_error("%s\n", "Could not perform pivot root.");
5983 return -1;
5984 }
5985
5986 return 0;
5987 }
5988
5989 /* Prepare our new clean root. */
5990 static int permute_prepare(void)
5991 {
5992 if (mkdir(ROOTDIR, 0700) < 0 && errno != EEXIST) {
5993 lxcfs_error("%s\n", "Failed to create directory for new root.");
5994 return -1;
5995 }
5996
5997 if (mount("/", ROOTDIR, NULL, MS_BIND, 0) < 0) {
5998 lxcfs_error("Failed to bind-mount / for new root: %s.\n", strerror(errno));
5999 return -1;
6000 }
6001
6002 if (mount(RUNTIME_PATH, ROOTDIR RUNTIME_PATH, NULL, MS_BIND, 0) < 0) {
6003 lxcfs_error("Failed to bind-mount /run into new root: %s.\n", strerror(errno));
6004 return -1;
6005 }
6006
6007 if (mount(BASEDIR, ROOTDIR BASEDIR, NULL, MS_REC | MS_MOVE, 0) < 0) {
6008 printf("Failed to move " BASEDIR " into new root: %s.\n", strerror(errno));
6009 return -1;
6010 }
6011
6012 return 0;
6013 }
6014
6015 /* Calls chroot() on ramfs, pivot_root() in all other cases. */
6016 static bool permute_root(void)
6017 {
6018 /* Prepare new root. */
6019 if (permute_prepare() < 0)
6020 return false;
6021
6022 /* Pivot into new root. */
6023 if (permute_and_enter() < 0)
6024 return false;
6025
6026 return true;
6027 }
6028
6029 static int preserve_mnt_ns(int pid)
6030 {
6031 int ret;
6032 size_t len = sizeof("/proc/") + 21 + sizeof("/ns/mnt");
6033 char path[len];
6034
6035 ret = snprintf(path, len, "/proc/%d/ns/mnt", pid);
6036 if (ret < 0 || (size_t)ret >= len)
6037 return -1;
6038
6039 return open(path, O_RDONLY | O_CLOEXEC);
6040 }
6041
6042 static bool cgfs_prepare_mounts(void)
6043 {
6044 if (!mkdir_p(BASEDIR, 0700)) {
6045 lxcfs_error("%s\n", "Failed to create lxcfs cgroup mountpoint.");
6046 return false;
6047 }
6048
6049 if (!umount_if_mounted()) {
6050 lxcfs_error("%s\n", "Failed to clean up old lxcfs cgroup mountpoint.");
6051 return false;
6052 }
6053
6054 if (unshare(CLONE_NEWNS) < 0) {
6055 lxcfs_error("Failed to unshare mount namespace: %s.\n", strerror(errno));
6056 return false;
6057 }
6058
6059 cgroup_mount_ns_fd = preserve_mnt_ns(getpid());
6060 if (cgroup_mount_ns_fd < 0) {
6061 lxcfs_error("Failed to preserve mount namespace: %s.\n", strerror(errno));
6062 return false;
6063 }
6064
6065 if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0) < 0) {
6066 lxcfs_error("Failed to remount / private: %s.\n", strerror(errno));
6067 return false;
6068 }
6069
6070 if (mount("tmpfs", BASEDIR, "tmpfs", 0, "size=100000,mode=700") < 0) {
6071 lxcfs_error("%s\n", "Failed to mount tmpfs over lxcfs cgroup mountpoint.");
6072 return false;
6073 }
6074
6075 return true;
6076 }
6077
6078 static bool cgfs_mount_hierarchies(void)
6079 {
6080 if (!mkdir_p(BASEDIR DEFAULT_CGROUP_MOUNTPOINT, 0755))
6081 return false;
6082
6083 if (!cgroup_ops->mount(cgroup_ops, BASEDIR))
6084 return false;
6085
6086 for (struct hierarchy **h = cgroup_ops->hierarchies; h && *h; h++) {
6087 __do_free char *path = must_make_path(BASEDIR, (*h)->mountpoint, NULL);
6088 (*h)->fd = open(path, O_DIRECTORY | O_CLOEXEC | O_NOFOLLOW);
6089 if ((*h)->fd < 0)
6090 return false;
6091 }
6092
6093 return true;
6094 }
6095
6096 static bool cgfs_setup_controllers(void)
6097 {
6098 if (!cgfs_prepare_mounts())
6099 return false;
6100
6101 if (!cgfs_mount_hierarchies()) {
6102 lxcfs_error("%s\n", "Failed to set up private lxcfs cgroup mounts.");
6103 return false;
6104 }
6105
6106 if (!permute_root())
6107 return false;
6108
6109 return true;
6110 }
6111
6112 static void __attribute__((constructor)) lxcfs_init(void)
6113 {
6114 __do_close_prot_errno int init_ns = -EBADF;
6115 char *cret;
6116 char cwd[MAXPATHLEN];
6117
6118 cgroup_ops = cgroup_init();
6119 if (!cgroup_ops)
6120 log_exit("Failed to initialize cgroup support");
6121
6122 /* Preserve initial namespace. */
6123 init_ns = preserve_mnt_ns(getpid());
6124 if (init_ns < 0)
6125 log_exit("Failed to preserve initial mount namespace");
6126
6127 cret = getcwd(cwd, MAXPATHLEN);
6128 log_exit("%s - Could not retrieve current working directory", strerror(errno));
6129
6130 /* This function calls unshare(CLONE_NEWNS) our initial mount namespace
6131 * to privately mount lxcfs cgroups. */
6132 if (!cgfs_setup_controllers())
6133 log_exit("Failed to setup private cgroup mounts for lxcfs");
6134
6135 if (setns(init_ns, 0) < 0)
6136 log_exit("%s - Failed to switch back to initial mount namespace", strerror(errno));
6137
6138 if (!cret || chdir(cwd) < 0)
6139 log_exit("%s - Could not change back to original working directory", strerror(errno));
6140
6141 if (!init_cpuview())
6142 log_exit("Failed to init CPU view");
6143
6144 print_subsystems();
6145 }
6146
6147 static void __attribute__((destructor)) lxcfs_exit(void)
6148 {
6149 lxcfs_debug("%s\n", "Running destructor for liblxcfs.");
6150 free_cpuview();
6151 close_prot_errno_disarm(cgroup_mount_ns_fd);
6152 cgroup_exit(cgroup_ops);
6153 }