3 * Copyright © 2014-2016 Canonical, Inc
4 * Author: Serge Hallyn <serge.hallyn@ubuntu.com>
6 * See COPYING file for details.
9 #define FUSE_USE_VERSION 26
11 #define __STDC_FORMAT_MACROS
29 #include <linux/magic.h>
30 #include <linux/sched.h>
31 #include <sys/epoll.h>
33 #include <sys/mount.h>
34 #include <sys/param.h>
35 #include <sys/socket.h>
36 #include <sys/syscall.h>
37 #include <sys/sysinfo.h>
41 #include "cgroups/cgroup.h"
42 #include "cgroups/cgroup_utils.h"
43 #include "memory_utils.h"
46 /* Define pivot_root() if missing from the C library */
47 #ifndef HAVE_PIVOT_ROOT
48 static int pivot_root(const char * new_root
, const char * put_old
)
50 #ifdef __NR_pivot_root
51 return syscall(__NR_pivot_root
, new_root
, put_old
);
58 extern int pivot_root(const char * new_root
, const char * put_old
);
61 struct cpuacct_usage
{
68 /* The function of hash table.*/
69 #define LOAD_SIZE 100 /*the size of hash_table */
70 #define FLUSH_TIME 5 /*the flush rate */
71 #define DEPTH_DIR 3 /*the depth of per cgroup */
72 /* The function of calculate loadavg .*/
73 #define FSHIFT 11 /* nr of bits of precision */
74 #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
75 #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
76 #define EXP_5 2014 /* 1/exp(5sec/5min) */
77 #define EXP_15 2037 /* 1/exp(5sec/15min) */
78 #define LOAD_INT(x) ((x) >> FSHIFT)
79 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
81 * This parameter is used for proc_loadavg_read().
82 * 1 means use loadavg, 0 means not use.
84 static int loadavg
= 0;
85 static volatile sig_atomic_t loadavg_stop
= 0;
86 static int calc_hash(const char *name
)
88 unsigned int hash
= 0;
90 /* ELFHash algorithm. */
92 hash
= (hash
<< 4) + *name
++;
93 x
= hash
& 0xf0000000;
98 return (hash
& 0x7fffffff);
103 unsigned long avenrun
[3]; /* Load averages */
104 unsigned int run_pid
;
105 unsigned int total_pid
;
106 unsigned int last_pid
;
107 int cfd
; /* The file descriptor of the mounted cgroup */
108 struct load_node
*next
;
109 struct load_node
**pre
;
114 * The lock is about insert load_node and refresh load_node.To the first
115 * load_node of each hash bucket, insert and refresh in this hash bucket is
116 * mutually exclusive.
118 pthread_mutex_t lock
;
120 * The rdlock is about read loadavg and delete load_node.To each hash
121 * bucket, read and delete is mutually exclusive. But at the same time, we
122 * allow paratactic read operation. This rdlock is at list level.
124 pthread_rwlock_t rdlock
;
126 * The rilock is about read loadavg and insert load_node.To the first
127 * load_node of each hash bucket, read and insert is mutually exclusive.
128 * But at the same time, we allow paratactic read operation.
130 pthread_rwlock_t rilock
;
131 struct load_node
*next
;
134 static struct load_head load_hash
[LOAD_SIZE
]; /* hash table */
136 * init_load initialize the hash table.
137 * Return 0 on success, return -1 on failure.
139 static int init_load(void)
144 for (i
= 0; i
< LOAD_SIZE
; i
++) {
145 load_hash
[i
].next
= NULL
;
146 ret
= pthread_mutex_init(&load_hash
[i
].lock
, NULL
);
148 lxcfs_error("%s\n", "Failed to initialize lock");
151 ret
= pthread_rwlock_init(&load_hash
[i
].rdlock
, NULL
);
153 lxcfs_error("%s\n", "Failed to initialize rdlock");
156 ret
= pthread_rwlock_init(&load_hash
[i
].rilock
, NULL
);
158 lxcfs_error("%s\n", "Failed to initialize rilock");
164 pthread_rwlock_destroy(&load_hash
[i
].rdlock
);
166 pthread_mutex_destroy(&load_hash
[i
].lock
);
170 pthread_mutex_destroy(&load_hash
[i
].lock
);
171 pthread_rwlock_destroy(&load_hash
[i
].rdlock
);
172 pthread_rwlock_destroy(&load_hash
[i
].rilock
);
177 static void insert_node(struct load_node
**n
, int locate
)
181 pthread_mutex_lock(&load_hash
[locate
].lock
);
182 pthread_rwlock_wrlock(&load_hash
[locate
].rilock
);
183 f
= load_hash
[locate
].next
;
184 load_hash
[locate
].next
= *n
;
186 (*n
)->pre
= &(load_hash
[locate
].next
);
188 f
->pre
= &((*n
)->next
);
190 pthread_mutex_unlock(&load_hash
[locate
].lock
);
191 pthread_rwlock_unlock(&load_hash
[locate
].rilock
);
194 * locate_node() finds special node. Not return NULL means success.
195 * It should be noted that rdlock isn't unlocked at the end of code
196 * because this function is used to read special node. Delete is not
197 * allowed before read has ended.
198 * unlock rdlock only in proc_loadavg_read().
200 static struct load_node
*locate_node(char *cg
, int locate
)
202 struct load_node
*f
= NULL
;
205 pthread_rwlock_rdlock(&load_hash
[locate
].rilock
);
206 pthread_rwlock_rdlock(&load_hash
[locate
].rdlock
);
207 if (load_hash
[locate
].next
== NULL
) {
208 pthread_rwlock_unlock(&load_hash
[locate
].rilock
);
211 f
= load_hash
[locate
].next
;
212 pthread_rwlock_unlock(&load_hash
[locate
].rilock
);
213 while (f
&& ((i
= strcmp(f
->cg
, cg
)) != 0))
218 /* Delete the load_node n and return the next node of it. */
219 static struct load_node
*del_node(struct load_node
*n
, int locate
)
223 pthread_rwlock_wrlock(&load_hash
[locate
].rdlock
);
224 if (n
->next
== NULL
) {
228 n
->next
->pre
= n
->pre
;
233 pthread_rwlock_unlock(&load_hash
[locate
].rdlock
);
237 static void load_free(void)
239 struct load_node
*f
, *p
;
241 for (int i
= 0; i
< LOAD_SIZE
; i
++) {
242 pthread_mutex_lock(&load_hash
[i
].lock
);
243 pthread_rwlock_wrlock(&load_hash
[i
].rilock
);
244 pthread_rwlock_wrlock(&load_hash
[i
].rdlock
);
245 if (load_hash
[i
].next
== NULL
) {
246 pthread_mutex_unlock(&load_hash
[i
].lock
);
247 pthread_mutex_destroy(&load_hash
[i
].lock
);
248 pthread_rwlock_unlock(&load_hash
[i
].rilock
);
249 pthread_rwlock_destroy(&load_hash
[i
].rilock
);
250 pthread_rwlock_unlock(&load_hash
[i
].rdlock
);
251 pthread_rwlock_destroy(&load_hash
[i
].rdlock
);
255 for (f
= load_hash
[i
].next
; f
;) {
262 pthread_mutex_unlock(&load_hash
[i
].lock
);
263 pthread_mutex_destroy(&load_hash
[i
].lock
);
264 pthread_rwlock_unlock(&load_hash
[i
].rilock
);
265 pthread_rwlock_destroy(&load_hash
[i
].rilock
);
266 pthread_rwlock_unlock(&load_hash
[i
].rdlock
);
267 pthread_rwlock_destroy(&load_hash
[i
].rdlock
);
271 /* Data for CPU view */
272 struct cg_proc_stat
{
274 struct cpuacct_usage
*usage
; // Real usage as read from the host's /proc/stat
275 struct cpuacct_usage
*view
; // Usage stats reported to the container
277 pthread_mutex_t lock
; // For node manipulation
278 struct cg_proc_stat
*next
;
281 struct cg_proc_stat_head
{
282 struct cg_proc_stat
*next
;
286 * For access to the list. Reading can be parallel, pruning is exclusive.
288 pthread_rwlock_t lock
;
291 #define CPUVIEW_HASH_SIZE 100
292 static struct cg_proc_stat_head
*proc_stat_history
[CPUVIEW_HASH_SIZE
];
294 static bool cpuview_init_head(struct cg_proc_stat_head
**head
)
296 *head
= malloc(sizeof(struct cg_proc_stat_head
));
298 lxcfs_error("%s\n", strerror(errno
));
302 (*head
)->lastcheck
= time(NULL
);
303 (*head
)->next
= NULL
;
305 if (pthread_rwlock_init(&(*head
)->lock
, NULL
) != 0) {
306 lxcfs_error("%s\n", "Failed to initialize list lock");
314 static bool init_cpuview()
318 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++)
319 proc_stat_history
[i
] = NULL
;
321 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
322 if (!cpuview_init_head(&proc_stat_history
[i
]))
329 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
330 if (proc_stat_history
[i
])
331 free_disarm(proc_stat_history
[i
]);
337 static void free_proc_stat_node(struct cg_proc_stat
*node
)
339 pthread_mutex_destroy(&node
->lock
);
340 free_disarm(node
->cg
);
341 free_disarm(node
->usage
);
342 free_disarm(node
->view
);
346 static void cpuview_free_head(struct cg_proc_stat_head
*head
)
348 struct cg_proc_stat
*node
, *tmp
;
356 free_proc_stat_node(tmp
);
363 pthread_rwlock_destroy(&head
->lock
);
367 static void free_cpuview()
371 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
372 if (proc_stat_history
[i
])
373 cpuview_free_head(proc_stat_history
[i
]);
378 * A table caching which pid is init for a pid namespace.
379 * When looking up which pid is init for $qpid, we first
380 * 1. Stat /proc/$qpid/ns/pid.
381 * 2. Check whether the ino_t is in our store.
382 * a. if not, fork a child in qpid's ns to send us
383 * ucred.pid = 1, and read the initpid. Cache
384 * initpid and creation time for /proc/initpid
385 * in a new store entry.
386 * b. if so, verify that /proc/initpid still matches
387 * what we have saved. If not, clear the store
388 * entry and go back to a. If so, return the
391 struct pidns_init_store
{
392 ino_t ino
; // inode number for /proc/$pid/ns/pid
393 pid_t initpid
; // the pid of nit in that ns
394 long int ctime
; // the time at which /proc/$initpid was created
395 struct pidns_init_store
*next
;
399 /* lol - look at how they are allocated in the kernel */
400 #define PIDNS_HASH_SIZE 4096
401 #define HASH(x) ((x) % PIDNS_HASH_SIZE)
403 static struct pidns_init_store
*pidns_hash_table
[PIDNS_HASH_SIZE
];
404 static pthread_mutex_t pidns_store_mutex
= PTHREAD_MUTEX_INITIALIZER
;
405 static void lock_mutex(pthread_mutex_t
*l
)
409 if ((ret
= pthread_mutex_lock(l
)) != 0) {
410 lxcfs_error("returned:%d %s\n", ret
, strerror(ret
));
415 struct cgroup_ops
*cgroup_ops
;
417 static int cgroup_mount_ns_fd
= -1;
419 static void unlock_mutex(pthread_mutex_t
*l
)
423 if ((ret
= pthread_mutex_unlock(l
)) != 0) {
424 lxcfs_error("returned:%d %s\n", ret
, strerror(ret
));
429 static void store_lock(void)
431 lock_mutex(&pidns_store_mutex
);
434 static void store_unlock(void)
436 unlock_mutex(&pidns_store_mutex
);
439 /* Must be called under store_lock */
440 static bool initpid_still_valid(struct pidns_init_store
*e
, struct stat
*nsfdsb
)
445 snprintf(fnam
, 100, "/proc/%d", e
->initpid
);
446 if (stat(fnam
, &initsb
) < 0)
449 lxcfs_debug("Comparing ctime %ld == %ld for pid %d.\n", e
->ctime
,
450 initsb
.st_ctime
, e
->initpid
);
452 if (e
->ctime
!= initsb
.st_ctime
)
457 /* Must be called under store_lock */
458 static void remove_initpid(struct pidns_init_store
*e
)
460 struct pidns_init_store
*tmp
;
463 lxcfs_debug("Remove_initpid: removing entry for %d.\n", e
->initpid
);
466 if (pidns_hash_table
[h
] == e
) {
467 pidns_hash_table
[h
] = e
->next
;
472 tmp
= pidns_hash_table
[h
];
474 if (tmp
->next
== e
) {
484 /* Must be called under store_lock */
485 static void prune_initpid_store(void)
487 static long int last_prune
= 0;
488 struct pidns_init_store
*e
, *prev
, *delme
;
489 long int now
, threshold
;
493 last_prune
= time(NULL
);
497 if (now
< last_prune
+ PURGE_SECS
)
500 lxcfs_debug("%s\n", "Pruning.");
503 threshold
= now
- 2 * PURGE_SECS
;
505 for (i
= 0; i
< PIDNS_HASH_SIZE
; i
++) {
506 for (prev
= NULL
, e
= pidns_hash_table
[i
]; e
; ) {
507 if (e
->lastcheck
< threshold
) {
509 lxcfs_debug("Removing cached entry for %d.\n", e
->initpid
);
513 prev
->next
= e
->next
;
515 pidns_hash_table
[i
] = e
->next
;
526 /* Must be called under store_lock */
527 static void save_initpid(struct stat
*sb
, pid_t pid
)
529 struct pidns_init_store
*e
;
534 lxcfs_debug("Save_initpid: adding entry for %d.\n", pid
);
536 snprintf(fpath
, 100, "/proc/%d", pid
);
537 if (stat(fpath
, &procsb
) < 0)
540 e
= malloc(sizeof(*e
));
544 e
->ctime
= procsb
.st_ctime
;
546 e
->next
= pidns_hash_table
[h
];
547 e
->lastcheck
= time(NULL
);
548 pidns_hash_table
[h
] = e
;
552 * Given the stat(2) info for a nsfd pid inode, lookup the init_pid_store
553 * entry for the inode number and creation time. Verify that the init pid
554 * is still valid. If not, remove it. Return the entry if valid, NULL
556 * Must be called under store_lock
558 static struct pidns_init_store
*lookup_verify_initpid(struct stat
*sb
)
560 int h
= HASH(sb
->st_ino
);
561 struct pidns_init_store
*e
= pidns_hash_table
[h
];
564 if (e
->ino
== sb
->st_ino
) {
565 if (initpid_still_valid(e
, sb
)) {
566 e
->lastcheck
= time(NULL
);
578 static int is_dir(const char *path
, int fd
)
581 int ret
= fstatat(fd
, path
, &statbuf
, fd
);
582 if (ret
== 0 && S_ISDIR(statbuf
.st_mode
))
587 static int preserve_ns(const int pid
, const char *ns
)
590 /* 5 /proc + 21 /int_as_str + 3 /ns + 20 /NS_NAME + 1 \0 */
591 #define __NS_PATH_LEN 50
592 char path
[__NS_PATH_LEN
];
594 /* This way we can use this function to also check whether namespaces
595 * are supported by the kernel by passing in the NULL or the empty
598 ret
= snprintf(path
, __NS_PATH_LEN
, "/proc/%d/ns%s%s", pid
,
599 !ns
|| strcmp(ns
, "") == 0 ? "" : "/",
600 !ns
|| strcmp(ns
, "") == 0 ? "" : ns
);
601 if (ret
< 0 || (size_t)ret
>= __NS_PATH_LEN
) {
606 return open(path
, O_RDONLY
| O_CLOEXEC
);
610 * in_same_namespace - Check whether two processes are in the same namespace.
611 * @pid1 - PID of the first process.
612 * @pid2 - PID of the second process.
613 * @ns - Name of the namespace to check. Must correspond to one of the names
614 * for the namespaces as shown in /proc/<pid/ns/
616 * If the two processes are not in the same namespace returns an fd to the
617 * namespace of the second process identified by @pid2. If the two processes are
618 * in the same namespace returns -EINVAL, -1 if an error occurred.
620 static int in_same_namespace(pid_t pid1
, pid_t pid2
, const char *ns
)
622 __do_close_prot_errno
int ns_fd1
= -1, ns_fd2
= -1;
624 struct stat ns_st1
, ns_st2
;
626 ns_fd1
= preserve_ns(pid1
, ns
);
628 /* The kernel does not support this namespace. This is not an
637 ns_fd2
= preserve_ns(pid2
, ns
);
641 ret
= fstat(ns_fd1
, &ns_st1
);
645 ret
= fstat(ns_fd2
, &ns_st2
);
649 /* processes are in the same namespace */
650 if ((ns_st1
.st_dev
== ns_st2
.st_dev
) && (ns_st1
.st_ino
== ns_st2
.st_ino
))
653 /* processes are in different namespaces */
654 return move_fd(ns_fd2
);
657 static bool is_shared_pidns(pid_t pid
)
662 if (in_same_namespace(pid
, getpid(), "pid") == -EINVAL
)
668 static bool write_string(const char *fnam
, const char *string
, int fd
)
677 len
= strlen(string
);
678 ret
= fwrite(string
, 1, len
, f
);
680 lxcfs_error("%s - Error writing \"%s\" to \"%s\"\n",
681 strerror(errno
), string
, fnam
);
687 lxcfs_error("%s - Failed to close \"%s\"\n", strerror(errno
), fnam
);
700 static void print_subsystems(void)
704 fprintf(stderr
, "mount namespace: %d\n", cgroup_mount_ns_fd
);
705 fprintf(stderr
, "hierarchies:\n");
706 for (struct hierarchy
**h
= cgroup_ops
->hierarchies
; h
&& *h
; h
++, i
++) {
707 __do_free
char *controllers
= lxc_string_join(",", (const char **)(*h
)->controllers
, false);
708 fprintf(stderr
, " %2d: fd: %3d: %s\n", i
, (*h
)->fd
, controllers
?: "");
712 /* do we need to do any massaging here? I'm not sure... */
713 /* Return the mounted controller and store the corresponding open file descriptor
714 * referring to the controller mountpoint in the private lxcfs namespace in
717 static int find_mounted_controller(const char *controller
)
721 h
= cgroup_ops
->get_hierarchy(cgroup_ops
, controller
);
722 return h
? h
->fd
: -EBADF
;
725 bool cgfs_set_value(const char *controller
, const char *cgroup
, const char *file
,
732 cfd
= find_mounted_controller(controller
);
736 /* Make sure we pass a relative path to *at() family of functions.
737 * . + /cgroup + / + file + \0
739 len
= strlen(cgroup
) + strlen(file
) + 3;
741 ret
= snprintf(fnam
, len
, "%s%s/%s", dot_or_empty(cgroup
), cgroup
, file
);
742 if (ret
< 0 || (size_t)ret
>= len
)
745 fd
= openat(cfd
, fnam
, O_WRONLY
);
749 return write_string(fnam
, value
, fd
);
752 // Chown all the files in the cgroup directory. We do this when we create
753 // a cgroup on behalf of a user.
754 static void chown_all_cgroup_files(const char *dirname
, uid_t uid
, gid_t gid
, int fd
)
756 struct dirent
*direntp
;
757 char path
[MAXPATHLEN
];
762 len
= strlen(dirname
);
763 if (len
>= MAXPATHLEN
) {
764 lxcfs_error("Pathname too long: %s\n", dirname
);
768 fd1
= openat(fd
, dirname
, O_DIRECTORY
);
774 lxcfs_error("Failed to open %s\n", dirname
);
778 while ((direntp
= readdir(d
))) {
779 if (!strcmp(direntp
->d_name
, ".") || !strcmp(direntp
->d_name
, ".."))
781 ret
= snprintf(path
, MAXPATHLEN
, "%s/%s", dirname
, direntp
->d_name
);
782 if (ret
< 0 || ret
>= MAXPATHLEN
) {
783 lxcfs_error("Pathname too long under %s\n", dirname
);
786 if (fchownat(fd
, path
, uid
, gid
, 0) < 0)
787 lxcfs_error("Failed to chown file %s to %u:%u", path
, uid
, gid
);
792 int cgfs_create(const char *controller
, const char *cg
, uid_t uid
, gid_t gid
)
798 cfd
= find_mounted_controller(controller
);
802 /* Make sure we pass a relative path to *at() family of functions.
805 len
= strlen(cg
) + 2;
806 dirnam
= alloca(len
);
807 snprintf(dirnam
, len
, "%s%s", dot_or_empty(cg
), cg
);
809 if (mkdirat(cfd
, dirnam
, 0755) < 0)
812 if (uid
== 0 && gid
== 0)
815 if (fchownat(cfd
, dirnam
, uid
, gid
, 0) < 0)
818 chown_all_cgroup_files(dirnam
, uid
, gid
, cfd
);
823 static bool recursive_rmdir(const char *dirname
, int fd
, const int cfd
)
825 struct dirent
*direntp
;
828 char pathname
[MAXPATHLEN
];
831 dupfd
= dup(fd
); // fdopendir() does bad things once it uses an fd.
835 dir
= fdopendir(dupfd
);
837 lxcfs_debug("Failed to open %s: %s.\n", dirname
, strerror(errno
));
842 while ((direntp
= readdir(dir
))) {
846 if (!strcmp(direntp
->d_name
, ".") ||
847 !strcmp(direntp
->d_name
, ".."))
850 rc
= snprintf(pathname
, MAXPATHLEN
, "%s/%s", dirname
, direntp
->d_name
);
851 if (rc
< 0 || rc
>= MAXPATHLEN
) {
852 lxcfs_error("%s\n", "Pathname too long.");
856 rc
= fstatat(cfd
, pathname
, &mystat
, AT_SYMLINK_NOFOLLOW
);
858 lxcfs_debug("Failed to stat %s: %s.\n", pathname
, strerror(errno
));
861 if (S_ISDIR(mystat
.st_mode
))
862 if (!recursive_rmdir(pathname
, fd
, cfd
))
863 lxcfs_debug("Error removing %s.\n", pathname
);
867 if (closedir(dir
) < 0) {
868 lxcfs_error("Failed to close directory %s: %s\n", dirname
, strerror(errno
));
872 if (unlinkat(cfd
, dirname
, AT_REMOVEDIR
) < 0) {
873 lxcfs_debug("Failed to delete %s: %s.\n", dirname
, strerror(errno
));
882 bool cgfs_remove(const char *controller
, const char *cg
)
889 cfd
= find_mounted_controller(controller
);
893 /* Make sure we pass a relative path to *at() family of functions.
896 len
= strlen(cg
) + 2;
897 dirnam
= alloca(len
);
898 snprintf(dirnam
, len
, "%s%s", dot_or_empty(cg
), cg
);
900 fd
= openat(cfd
, dirnam
, O_DIRECTORY
);
904 bret
= recursive_rmdir(dirnam
, fd
, cfd
);
909 bool cgfs_chmod_file(const char *controller
, const char *file
, mode_t mode
)
915 cfd
= find_mounted_controller(controller
);
919 /* Make sure we pass a relative path to *at() family of functions.
922 len
= strlen(file
) + 2;
923 pathname
= alloca(len
);
924 snprintf(pathname
, len
, "%s%s", dot_or_empty(file
), file
);
925 if (fchmodat(cfd
, pathname
, mode
, 0) < 0)
930 static int chown_tasks_files(const char *dirname
, uid_t uid
, gid_t gid
, int fd
)
935 len
= strlen(dirname
) + strlen("/cgroup.procs") + 1;
937 snprintf(fname
, len
, "%s/tasks", dirname
);
938 if (fchownat(fd
, fname
, uid
, gid
, 0) != 0)
940 snprintf(fname
, len
, "%s/cgroup.procs", dirname
);
941 if (fchownat(fd
, fname
, uid
, gid
, 0) != 0)
946 int cgfs_chown_file(const char *controller
, const char *file
, uid_t uid
, gid_t gid
)
952 cfd
= find_mounted_controller(controller
);
956 /* Make sure we pass a relative path to *at() family of functions.
959 len
= strlen(file
) + 2;
960 pathname
= alloca(len
);
961 snprintf(pathname
, len
, "%s%s", dot_or_empty(file
), file
);
962 if (fchownat(cfd
, pathname
, uid
, gid
, 0) < 0)
965 if (is_dir(pathname
, cfd
))
966 // like cgmanager did, we want to chown the tasks file as well
967 return chown_tasks_files(pathname
, uid
, gid
, cfd
);
972 FILE *open_pids_file(const char *controller
, const char *cgroup
)
978 cfd
= find_mounted_controller(controller
);
982 /* Make sure we pass a relative path to *at() family of functions.
983 * . + /cgroup + / "cgroup.procs" + \0
985 len
= strlen(cgroup
) + strlen("cgroup.procs") + 3;
986 pathname
= alloca(len
);
987 snprintf(pathname
, len
, "%s%s/cgroup.procs", dot_or_empty(cgroup
), cgroup
);
989 fd
= openat(cfd
, pathname
, O_WRONLY
);
993 return fdopen(fd
, "w");
996 static bool cgfs_iterate_cgroup(const char *controller
, const char *cgroup
, bool directories
,
997 void ***list
, size_t typesize
,
998 void* (*iterator
)(const char*, const char*, const char*))
1003 char pathname
[MAXPATHLEN
];
1004 size_t sz
= 0, asz
= 0;
1005 struct dirent
*dirent
;
1008 cfd
= find_mounted_controller(controller
);
1013 /* Make sure we pass a relative path to *at() family of functions. */
1014 len
= strlen(cgroup
) + 1 /* . */ + 1 /* \0 */;
1016 ret
= snprintf(cg
, len
, "%s%s", dot_or_empty(cgroup
), cgroup
);
1017 if (ret
< 0 || (size_t)ret
>= len
) {
1018 lxcfs_error("Pathname too long under %s\n", cgroup
);
1022 fd
= openat(cfd
, cg
, O_DIRECTORY
);
1026 dir
= fdopendir(fd
);
1030 while ((dirent
= readdir(dir
))) {
1033 if (!strcmp(dirent
->d_name
, ".") ||
1034 !strcmp(dirent
->d_name
, ".."))
1037 ret
= snprintf(pathname
, MAXPATHLEN
, "%s/%s", cg
, dirent
->d_name
);
1038 if (ret
< 0 || ret
>= MAXPATHLEN
) {
1039 lxcfs_error("Pathname too long under %s\n", cg
);
1043 ret
= fstatat(cfd
, pathname
, &mystat
, AT_SYMLINK_NOFOLLOW
);
1045 lxcfs_error("Failed to stat %s: %s\n", pathname
, strerror(errno
));
1048 if ((!directories
&& !S_ISREG(mystat
.st_mode
)) ||
1049 (directories
&& !S_ISDIR(mystat
.st_mode
)))
1056 tmp
= realloc(*list
, asz
* typesize
);
1060 (*list
)[sz
] = (*iterator
)(controller
, cg
, dirent
->d_name
);
1061 (*list
)[sz
+1] = NULL
;
1064 if (closedir(dir
) < 0) {
1065 lxcfs_error("Failed closedir for %s: %s\n", cgroup
, strerror(errno
));
1071 static void *make_children_list_entry(const char *controller
, const char *cgroup
, const char *dir_entry
)
1075 dup
= strdup(dir_entry
);
1080 bool cgfs_list_children(const char *controller
, const char *cgroup
, char ***list
)
1082 return cgfs_iterate_cgroup(controller
, cgroup
, true, (void***)list
, sizeof(*list
), &make_children_list_entry
);
1085 void free_key(struct cgfs_files
*k
)
1089 free_disarm(k
->name
);
1093 void free_keys(struct cgfs_files
**keys
)
1099 for (i
= 0; keys
[i
]; i
++) {
1105 bool cgfs_param_exist(const char *controller
, const char *cgroup
, const char *file
)
1111 cfd
= find_mounted_controller(controller
);
1115 /* Make sure we pass a relative path to *at() family of functions.
1116 * . + /cgroup + / + file + \0
1118 len
= strlen(cgroup
) + strlen(file
) + 3;
1120 ret
= snprintf(fnam
, len
, "%s%s/%s", dot_or_empty(cgroup
), cgroup
, file
);
1121 if (ret
< 0 || (size_t)ret
>= len
)
1124 return (faccessat(cfd
, fnam
, F_OK
, 0) == 0);
1127 struct cgfs_files
*cgfs_get_key(const char *controller
, const char *cgroup
, const char *file
)
1133 struct cgfs_files
*newkey
;
1135 cfd
= find_mounted_controller(controller
);
1139 if (file
&& *file
== '/')
1142 if (file
&& strchr(file
, '/'))
1145 /* Make sure we pass a relative path to *at() family of functions.
1146 * . + /cgroup + / + file + \0
1148 len
= strlen(cgroup
) + 3;
1150 len
+= strlen(file
) + 1;
1152 snprintf(fnam
, len
, "%s%s%s%s", dot_or_empty(cgroup
), cgroup
,
1153 file
? "/" : "", file
? file
: "");
1155 ret
= fstatat(cfd
, fnam
, &sb
, 0);
1160 newkey
= malloc(sizeof(struct cgfs_files
));
1163 newkey
->name
= must_copy_string(file
);
1164 else if (strrchr(cgroup
, '/'))
1165 newkey
->name
= must_copy_string(strrchr(cgroup
, '/'));
1167 newkey
->name
= must_copy_string(cgroup
);
1168 newkey
->uid
= sb
.st_uid
;
1169 newkey
->gid
= sb
.st_gid
;
1170 newkey
->mode
= sb
.st_mode
;
1175 static void *make_key_list_entry(const char *controller
, const char *cgroup
, const char *dir_entry
)
1177 struct cgfs_files
*entry
= cgfs_get_key(controller
, cgroup
, dir_entry
);
1179 lxcfs_error("Error getting files under %s:%s\n", controller
,
1185 bool cgfs_list_keys(const char *controller
, const char *cgroup
, struct cgfs_files
***keys
)
1187 return cgfs_iterate_cgroup(controller
, cgroup
, false, (void***)keys
, sizeof(*keys
), &make_key_list_entry
);
1190 bool is_child_cgroup(const char *controller
, const char *cgroup
, const char *f
)
1198 cfd
= find_mounted_controller(controller
);
1202 /* Make sure we pass a relative path to *at() family of functions.
1203 * . + /cgroup + / + f + \0
1205 len
= strlen(cgroup
) + strlen(f
) + 3;
1207 ret
= snprintf(fnam
, len
, "%s%s/%s", dot_or_empty(cgroup
), cgroup
, f
);
1208 if (ret
< 0 || (size_t)ret
>= len
)
1211 ret
= fstatat(cfd
, fnam
, &sb
, 0);
1212 if (ret
< 0 || !S_ISDIR(sb
.st_mode
))
1218 #define SEND_CREDS_OK 0
1219 #define SEND_CREDS_NOTSK 1
1220 #define SEND_CREDS_FAIL 2
1221 static bool recv_creds(int sock
, struct ucred
*cred
, char *v
);
1222 static int wait_for_pid(pid_t pid
);
1223 static int send_creds(int sock
, struct ucred
*cred
, char v
, bool pingfirst
);
1224 static int send_creds_clone_wrapper(void *arg
);
1227 * clone a task which switches to @task's namespace and writes '1'.
1228 * over a unix sock so we can read the task's reaper's pid in our
1231 * Note: glibc's fork() does not respect pidns, which can lead to failed
1232 * assertions inside glibc (and thus failed forks) if the child's pid in
1233 * the pidns and the parent pid outside are identical. Using clone prevents
1236 static void write_task_init_pid_exit(int sock
, pid_t target
)
1241 size_t stack_size
= sysconf(_SC_PAGESIZE
);
1242 void *stack
= alloca(stack_size
);
1244 ret
= snprintf(fnam
, sizeof(fnam
), "/proc/%d/ns/pid", (int)target
);
1245 if (ret
< 0 || ret
>= sizeof(fnam
))
1248 fd
= open(fnam
, O_RDONLY
);
1250 perror("write_task_init_pid_exit open of ns/pid");
1254 perror("write_task_init_pid_exit setns 1");
1258 pid
= clone(send_creds_clone_wrapper
, stack
+ stack_size
, SIGCHLD
, &sock
);
1262 if (!wait_for_pid(pid
))
1268 static int send_creds_clone_wrapper(void *arg
) {
1271 int sock
= *(int *)arg
;
1273 /* we are the child */
1278 if (send_creds(sock
, &cred
, v
, true) != SEND_CREDS_OK
)
1283 static pid_t
get_init_pid_for_task(pid_t task
)
1291 if (socketpair(AF_UNIX
, SOCK_DGRAM
, 0, sock
) < 0) {
1292 perror("socketpair");
1301 write_task_init_pid_exit(sock
[0], task
);
1305 if (!recv_creds(sock
[1], &cred
, &v
))
1317 pid_t
lookup_initpid_in_store(pid_t qpid
)
1321 struct pidns_init_store
*e
;
1324 snprintf(fnam
, 100, "/proc/%d/ns/pid", qpid
);
1326 if (stat(fnam
, &sb
) < 0)
1328 e
= lookup_verify_initpid(&sb
);
1330 answer
= e
->initpid
;
1333 answer
= get_init_pid_for_task(qpid
);
1335 save_initpid(&sb
, answer
);
1338 /* we prune at end in case we are returning
1339 * the value we were about to return */
1340 prune_initpid_store();
1345 static int wait_for_pid(pid_t pid
)
1353 ret
= waitpid(pid
, &status
, 0);
1361 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0)
1367 * append the given formatted string to *src.
1368 * src: a pointer to a char* in which to append the formatted string.
1369 * sz: the number of characters printed so far, minus trailing \0.
1370 * asz: the allocated size so far
1371 * format: string format. See printf for details.
1372 * ...: varargs. See printf for details.
1374 static void must_strcat(char **src
, size_t *sz
, size_t *asz
, const char *format
, ...)
1376 char tmp
[BUF_RESERVE_SIZE
];
1379 va_start (args
, format
);
1380 int tmplen
= vsnprintf(tmp
, BUF_RESERVE_SIZE
, format
, args
);
1383 if (!*src
|| tmplen
+ *sz
+ 1 >= *asz
) {
1386 tmp
= realloc(*src
, *asz
+ BUF_RESERVE_SIZE
);
1389 *asz
+= BUF_RESERVE_SIZE
;
1391 memcpy((*src
) +*sz
, tmp
, tmplen
+1); /* include the \0 */
1396 * append pid to *src.
1397 * src: a pointer to a char* in which ot append the pid.
1398 * sz: the number of characters printed so far, minus trailing \0.
1399 * asz: the allocated size so far
1400 * pid: the pid to append
1402 static void must_strcat_pid(char **src
, size_t *sz
, size_t *asz
, pid_t pid
)
1404 must_strcat(src
, sz
, asz
, "%d\n", (int)pid
);
1408 * Given a open file * to /proc/pid/{u,g}id_map, and an id
1409 * valid in the caller's namespace, return the id mapped into
1411 * Returns the mapped id, or -1 on error.
1414 convert_id_to_ns(FILE *idfile
, unsigned int in_id
)
1416 unsigned int nsuid
, // base id for a range in the idfile's namespace
1417 hostuid
, // base id for a range in the caller's namespace
1418 count
; // number of ids in this range
1422 fseek(idfile
, 0L, SEEK_SET
);
1423 while (fgets(line
, 400, idfile
)) {
1424 ret
= sscanf(line
, "%u %u %u\n", &nsuid
, &hostuid
, &count
);
1427 if (hostuid
+ count
< hostuid
|| nsuid
+ count
< nsuid
) {
1429 * uids wrapped around - unexpected as this is a procfile,
1432 lxcfs_error("pid wrapparound at entry %u %u %u in %s\n",
1433 nsuid
, hostuid
, count
, line
);
1436 if (hostuid
<= in_id
&& hostuid
+count
> in_id
) {
1438 * now since hostuid <= in_id < hostuid+count, and
1439 * hostuid+count and nsuid+count do not wrap around,
1440 * we know that nsuid+(in_id-hostuid) which must be
1441 * less that nsuid+(count) must not wrap around
1443 return (in_id
- hostuid
) + nsuid
;
1452 * for is_privileged_over,
1453 * specify whether we require the calling uid to be root in his
1456 #define NS_ROOT_REQD true
1457 #define NS_ROOT_OPT false
1461 static bool is_privileged_over(pid_t pid
, uid_t uid
, uid_t victim
, bool req_ns_root
)
1463 char fpath
[PROCLEN
];
1465 bool answer
= false;
1468 if (victim
== -1 || uid
== -1)
1472 * If the request is one not requiring root in the namespace,
1473 * then having the same uid suffices. (i.e. uid 1000 has write
1474 * access to files owned by uid 1000
1476 if (!req_ns_root
&& uid
== victim
)
1479 ret
= snprintf(fpath
, PROCLEN
, "/proc/%d/uid_map", pid
);
1480 if (ret
< 0 || ret
>= PROCLEN
)
1482 FILE *f
= fopen(fpath
, "r");
1486 /* if caller's not root in his namespace, reject */
1487 nsuid
= convert_id_to_ns(f
, uid
);
1492 * If victim is not mapped into caller's ns, reject.
1493 * XXX I'm not sure this check is needed given that fuse
1494 * will be sending requests where the vfs has converted
1496 nsuid
= convert_id_to_ns(f
, victim
);
1507 static bool perms_include(int fmode
, mode_t req_mode
)
1511 switch (req_mode
& O_ACCMODE
) {
1519 r
= S_IROTH
| S_IWOTH
;
1524 return ((fmode
& r
) == r
);
1530 * querycg is /a/b/c/d/e
1533 static char *get_next_cgroup_dir(const char *taskcg
, const char *querycg
)
1537 if (strlen(taskcg
) <= strlen(querycg
)) {
1538 lxcfs_error("%s\n", "I was fed bad input.");
1542 if ((strcmp(querycg
, "/") == 0) || (strcmp(querycg
, "./") == 0))
1543 start
= strdup(taskcg
+ 1);
1545 start
= strdup(taskcg
+ strlen(querycg
) + 1);
1548 end
= strchr(start
, '/');
1554 char *get_pid_cgroup(pid_t pid
, const char *contrl
)
1558 cfd
= find_mounted_controller(contrl
);
1562 if (pure_unified_layout(cgroup_ops
))
1563 return cg_unified_get_current_cgroup(pid
);
1565 return cg_legacy_get_current_cgroup(pid
, contrl
);
1569 * check whether a fuse context may access a cgroup dir or file
1571 * If file is not null, it is a cgroup file to check under cg.
1572 * If file is null, then we are checking perms on cg itself.
1574 * For files we can check the mode of the list_keys result.
1575 * For cgroups, we must make assumptions based on the files under the
1576 * cgroup, because cgmanager doesn't tell us ownership/perms of cgroups
1579 static bool fc_may_access(struct fuse_context
*fc
, const char *contrl
, const char *cg
, const char *file
, mode_t mode
)
1581 struct cgfs_files
*k
= NULL
;
1584 k
= cgfs_get_key(contrl
, cg
, file
);
1588 if (is_privileged_over(fc
->pid
, fc
->uid
, k
->uid
, NS_ROOT_OPT
)) {
1589 if (perms_include(k
->mode
>> 6, mode
)) {
1594 if (fc
->gid
== k
->gid
) {
1595 if (perms_include(k
->mode
>> 3, mode
)) {
1600 ret
= perms_include(k
->mode
, mode
);
1607 #define INITSCOPE "/init.scope"
1608 void prune_init_slice(char *cg
)
1611 size_t cg_len
= strlen(cg
), initscope_len
= strlen(INITSCOPE
);
1613 if (cg_len
< initscope_len
)
1616 point
= cg
+ cg_len
- initscope_len
;
1617 if (strcmp(point
, INITSCOPE
) == 0) {
1626 * If pid is in /a/b/c/d, he may only act on things under cg=/a/b/c/d.
1627 * If pid is in /a, he may act on /a/b, but not on /b.
1628 * if the answer is false and nextcg is not NULL, then *nextcg will point
1629 * to a string containing the next cgroup directory under cg, which must be
1630 * freed by the caller.
1632 static bool caller_is_in_ancestor(pid_t pid
, const char *contrl
, const char *cg
, char **nextcg
)
1634 bool answer
= false;
1635 char *c2
= get_pid_cgroup(pid
, contrl
);
1640 prune_init_slice(c2
);
1643 * callers pass in '/' or './' (openat()) for root cgroup, otherwise
1644 * they pass in a cgroup without leading '/'
1646 * The original line here was:
1647 * linecmp = *cg == '/' ? c2 : c2+1;
1648 * TODO: I'm not sure why you'd want to increment when *cg != '/'?
1649 * Serge, do you know?
1651 if (*cg
== '/' || !strncmp(cg
, "./", 2))
1655 if (strncmp(linecmp
, cg
, strlen(linecmp
)) != 0) {
1657 *nextcg
= get_next_cgroup_dir(linecmp
, cg
);
1669 * If pid is in /a/b/c, he may see that /a exists, but not /b or /a/c.
1671 static bool caller_may_see_dir(pid_t pid
, const char *contrl
, const char *cg
)
1673 bool answer
= false;
1675 size_t target_len
, task_len
;
1677 if (strcmp(cg
, "/") == 0 || strcmp(cg
, "./") == 0)
1680 c2
= get_pid_cgroup(pid
, contrl
);
1683 prune_init_slice(c2
);
1686 target_len
= strlen(cg
);
1687 task_len
= strlen(task_cg
);
1688 if (task_len
== 0) {
1689 /* Task is in the root cg, it can see everything. This case is
1690 * not handled by the strmcps below, since they test for the
1691 * last /, but that is the first / that we've chopped off
1697 if (strcmp(cg
, task_cg
) == 0) {
1701 if (target_len
< task_len
) {
1702 /* looking up a parent dir */
1703 if (strncmp(task_cg
, cg
, target_len
) == 0 && task_cg
[target_len
] == '/')
1707 if (target_len
> task_len
) {
1708 /* looking up a child dir */
1709 if (strncmp(task_cg
, cg
, task_len
) == 0 && cg
[task_len
] == '/')
1720 * given /cgroup/freezer/a/b, return "freezer".
1721 * the returned char* should NOT be freed.
1723 static char *pick_controller_from_path(struct fuse_context
*fc
, const char *path
)
1726 char *contr
, *slash
;
1728 if (strlen(path
) < 9) {
1732 if (*(path
+ 7) != '/') {
1737 contr
= strdupa(p1
);
1742 slash
= strstr(contr
, "/");
1746 for (struct hierarchy
**h
= cgroup_ops
->hierarchies
; h
&& *h
; h
++) {
1747 if ((*h
)->__controllers
&& strcmp((*h
)->__controllers
, contr
) == 0)
1748 return (*h
)->__controllers
;
1755 * Find the start of cgroup in /cgroup/controller/the/cgroup/path
1756 * Note that the returned value may include files (keynames) etc
1758 static const char *find_cgroup_in_path(const char *path
)
1762 if (strlen(path
) < 9) {
1766 p1
= strstr(path
+ 8, "/");
1776 * split the last path element from the path in @cg.
1777 * @dir is newly allocated and should be freed, @last not
1779 static void get_cgdir_and_path(const char *cg
, char **dir
, char **last
)
1786 *last
= strrchr(cg
, '/');
1791 p
= strrchr(*dir
, '/');
1796 * FUSE ops for /cgroup
1799 int cg_getattr(const char *path
, struct stat
*sb
)
1801 struct timespec now
;
1802 struct fuse_context
*fc
= fuse_get_context();
1803 char * cgdir
= NULL
;
1804 char *last
= NULL
, *path1
, *path2
;
1805 struct cgfs_files
*k
= NULL
;
1807 const char *controller
= NULL
;
1811 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
1814 memset(sb
, 0, sizeof(struct stat
));
1816 if (clock_gettime(CLOCK_REALTIME
, &now
) < 0)
1819 sb
->st_uid
= sb
->st_gid
= 0;
1820 sb
->st_atim
= sb
->st_mtim
= sb
->st_ctim
= now
;
1823 if (strcmp(path
, "/cgroup") == 0) {
1824 sb
->st_mode
= S_IFDIR
| 00755;
1829 controller
= pick_controller_from_path(fc
, path
);
1832 cgroup
= find_cgroup_in_path(path
);
1834 /* this is just /cgroup/controller, return it as a dir */
1835 sb
->st_mode
= S_IFDIR
| 00755;
1840 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
1850 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
1851 if (initpid
<= 1 || is_shared_pidns(initpid
))
1853 /* check that cgcopy is either a child cgroup of cgdir, or listed in its keys.
1854 * Then check that caller's cgroup is under path if last is a child
1855 * cgroup, or cgdir if last is a file */
1857 if (is_child_cgroup(controller
, path1
, path2
)) {
1858 if (!caller_may_see_dir(initpid
, controller
, cgroup
)) {
1862 if (!caller_is_in_ancestor(initpid
, controller
, cgroup
, NULL
)) {
1863 /* this is just /cgroup/controller, return it as a dir */
1864 sb
->st_mode
= S_IFDIR
| 00555;
1869 if (!fc_may_access(fc
, controller
, cgroup
, NULL
, O_RDONLY
)) {
1874 // get uid, gid, from '/tasks' file and make up a mode
1875 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
1876 sb
->st_mode
= S_IFDIR
| 00755;
1877 k
= cgfs_get_key(controller
, cgroup
, NULL
);
1879 sb
->st_uid
= sb
->st_gid
= 0;
1881 sb
->st_uid
= k
->uid
;
1882 sb
->st_gid
= k
->gid
;
1890 if ((k
= cgfs_get_key(controller
, path1
, path2
)) != NULL
) {
1891 sb
->st_mode
= S_IFREG
| k
->mode
;
1893 sb
->st_uid
= k
->uid
;
1894 sb
->st_gid
= k
->gid
;
1897 if (!caller_is_in_ancestor(initpid
, controller
, path1
, NULL
)) {
1909 int cg_opendir(const char *path
, struct fuse_file_info
*fi
)
1911 struct fuse_context
*fc
= fuse_get_context();
1913 struct file_info
*dir_info
;
1914 char *controller
= NULL
;
1916 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
1919 if (strcmp(path
, "/cgroup") == 0) {
1923 // return list of keys for the controller, and list of child cgroups
1924 controller
= pick_controller_from_path(fc
, path
);
1928 cgroup
= find_cgroup_in_path(path
);
1930 /* this is just /cgroup/controller, return its contents */
1935 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
1936 if (initpid
<= 1 || is_shared_pidns(initpid
))
1939 if (!caller_may_see_dir(initpid
, controller
, cgroup
))
1941 if (!fc_may_access(fc
, controller
, cgroup
, NULL
, O_RDONLY
))
1945 /* we'll free this at cg_releasedir */
1946 dir_info
= malloc(sizeof(*dir_info
));
1949 dir_info
->controller
= must_copy_string(controller
);
1950 dir_info
->cgroup
= must_copy_string(cgroup
);
1951 dir_info
->type
= LXC_TYPE_CGDIR
;
1952 dir_info
->buf
= NULL
;
1953 dir_info
->file
= NULL
;
1954 dir_info
->buflen
= 0;
1956 fi
->fh
= (unsigned long)dir_info
;
1960 int cg_readdir(const char *path
, void *buf
, fuse_fill_dir_t filler
, off_t offset
,
1961 struct fuse_file_info
*fi
)
1963 struct file_info
*d
= (struct file_info
*)fi
->fh
;
1964 struct cgfs_files
**list
= NULL
;
1966 char *nextcg
= NULL
;
1967 struct fuse_context
*fc
= fuse_get_context();
1968 char **clist
= NULL
;
1970 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
1973 if (filler(buf
, ".", NULL
, 0) != 0 || filler(buf
, "..", NULL
, 0) != 0)
1976 if (d
->type
!= LXC_TYPE_CGDIR
) {
1977 lxcfs_error("%s\n", "Internal error: file cache info used in readdir.");
1980 if (!d
->cgroup
&& !d
->controller
) {
1982 * ls /var/lib/lxcfs/cgroup - just show list of controllers.
1983 * This only works with the legacy hierarchy.
1985 for (struct hierarchy
**h
= cgroup_ops
->hierarchies
; h
&& *h
; h
++) {
1986 if (is_unified_hierarchy(*h
))
1989 if ((*h
)->__controllers
&& filler(buf
, (*h
)->__controllers
, NULL
, 0))
1996 if (!cgfs_list_keys(d
->controller
, d
->cgroup
, &list
)) {
1997 // not a valid cgroup
2002 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
2003 if (initpid
<= 1 || is_shared_pidns(initpid
))
2005 if (!caller_is_in_ancestor(initpid
, d
->controller
, d
->cgroup
, &nextcg
)) {
2007 ret
= filler(buf
, nextcg
, NULL
, 0);
2018 for (i
= 0; list
&& list
[i
]; i
++) {
2019 if (filler(buf
, list
[i
]->name
, NULL
, 0) != 0) {
2025 // now get the list of child cgroups
2027 if (!cgfs_list_children(d
->controller
, d
->cgroup
, &clist
)) {
2032 for (i
= 0; clist
[i
]; i
++) {
2033 if (filler(buf
, clist
[i
], NULL
, 0) != 0) {
2044 for (i
= 0; clist
[i
]; i
++)
2051 void do_release_file_info(struct fuse_file_info
*fi
)
2053 struct file_info
*f
= (struct file_info
*)fi
->fh
;
2060 free_disarm(f
->controller
);
2061 free_disarm(f
->cgroup
);
2062 free_disarm(f
->file
);
2063 free_disarm(f
->buf
);
2067 int cg_releasedir(const char *path
, struct fuse_file_info
*fi
)
2069 do_release_file_info(fi
);
2073 int cg_open(const char *path
, struct fuse_file_info
*fi
)
2076 char *last
= NULL
, *path1
, *path2
, * cgdir
= NULL
, *controller
;
2077 struct cgfs_files
*k
= NULL
;
2078 struct file_info
*file_info
;
2079 struct fuse_context
*fc
= fuse_get_context();
2082 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
2085 controller
= pick_controller_from_path(fc
, path
);
2088 cgroup
= find_cgroup_in_path(path
);
2092 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
2101 k
= cgfs_get_key(controller
, path1
, path2
);
2108 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
2109 if (initpid
<= 1 || is_shared_pidns(initpid
))
2111 if (!caller_may_see_dir(initpid
, controller
, path1
)) {
2115 if (!fc_may_access(fc
, controller
, path1
, path2
, fi
->flags
)) {
2120 /* we'll free this at cg_release */
2121 file_info
= malloc(sizeof(*file_info
));
2126 file_info
->controller
= must_copy_string(controller
);
2127 file_info
->cgroup
= must_copy_string(path1
);
2128 file_info
->file
= must_copy_string(path2
);
2129 file_info
->type
= LXC_TYPE_CGFILE
;
2130 file_info
->buf
= NULL
;
2131 file_info
->buflen
= 0;
2133 fi
->fh
= (unsigned long)file_info
;
2141 int cg_access(const char *path
, int mode
)
2145 char *path1
, *path2
, *controller
;
2146 char *last
= NULL
, *cgdir
= NULL
;
2147 struct cgfs_files
*k
= NULL
;
2148 struct fuse_context
*fc
= fuse_get_context();
2150 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
2153 if (strcmp(path
, "/cgroup") == 0)
2156 controller
= pick_controller_from_path(fc
, path
);
2159 cgroup
= find_cgroup_in_path(path
);
2161 // access("/sys/fs/cgroup/systemd", mode) - rx allowed, w not
2162 if ((mode
& W_OK
) == 0)
2167 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
2176 k
= cgfs_get_key(controller
, path1
, path2
);
2178 if ((mode
& W_OK
) == 0)
2186 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
2187 if (initpid
<= 1 || is_shared_pidns(initpid
))
2189 if (!caller_may_see_dir(initpid
, controller
, path1
)) {
2193 if (!fc_may_access(fc
, controller
, path1
, path2
, mode
)) {
2205 int cg_release(const char *path
, struct fuse_file_info
*fi
)
2207 do_release_file_info(fi
);
2211 #define POLLIN_SET ( EPOLLIN | EPOLLHUP | EPOLLRDHUP )
2213 static bool wait_for_sock(int sock
, int timeout
)
2215 struct epoll_event ev
;
2216 int epfd
, ret
, now
, starttime
, deltatime
, saved_errno
;
2218 if ((starttime
= time(NULL
)) < 0)
2221 if ((epfd
= epoll_create(1)) < 0) {
2222 lxcfs_error("%s\n", "Failed to create epoll socket: %m.");
2226 ev
.events
= POLLIN_SET
;
2228 if (epoll_ctl(epfd
, EPOLL_CTL_ADD
, sock
, &ev
) < 0) {
2229 lxcfs_error("%s\n", "Failed adding socket to epoll: %m.");
2235 if ((now
= time(NULL
)) < 0) {
2240 deltatime
= (starttime
+ timeout
) - now
;
2241 if (deltatime
< 0) { // timeout
2246 ret
= epoll_wait(epfd
, &ev
, 1, 1000*deltatime
+ 1);
2247 if (ret
< 0 && errno
== EINTR
)
2249 saved_errno
= errno
;
2253 errno
= saved_errno
;
2259 static int msgrecv(int sockfd
, void *buf
, size_t len
)
2261 if (!wait_for_sock(sockfd
, 2))
2263 return recv(sockfd
, buf
, len
, MSG_DONTWAIT
);
2266 static int send_creds(int sock
, struct ucred
*cred
, char v
, bool pingfirst
)
2268 struct msghdr msg
= { 0 };
2270 struct cmsghdr
*cmsg
;
2271 char cmsgbuf
[CMSG_SPACE(sizeof(*cred
))];
2276 if (msgrecv(sock
, buf
, 1) != 1) {
2277 lxcfs_error("%s\n", "Error getting reply from server over socketpair.");
2278 return SEND_CREDS_FAIL
;
2282 msg
.msg_control
= cmsgbuf
;
2283 msg
.msg_controllen
= sizeof(cmsgbuf
);
2285 cmsg
= CMSG_FIRSTHDR(&msg
);
2286 cmsg
->cmsg_len
= CMSG_LEN(sizeof(struct ucred
));
2287 cmsg
->cmsg_level
= SOL_SOCKET
;
2288 cmsg
->cmsg_type
= SCM_CREDENTIALS
;
2289 memcpy(CMSG_DATA(cmsg
), cred
, sizeof(*cred
));
2291 msg
.msg_name
= NULL
;
2292 msg
.msg_namelen
= 0;
2296 iov
.iov_len
= sizeof(buf
);
2300 if (sendmsg(sock
, &msg
, 0) < 0) {
2301 lxcfs_error("Failed at sendmsg: %s.\n",strerror(errno
));
2303 return SEND_CREDS_NOTSK
;
2304 return SEND_CREDS_FAIL
;
2307 return SEND_CREDS_OK
;
2310 static bool recv_creds(int sock
, struct ucred
*cred
, char *v
)
2312 struct msghdr msg
= { 0 };
2314 struct cmsghdr
*cmsg
;
2315 char cmsgbuf
[CMSG_SPACE(sizeof(*cred
))];
2326 if (setsockopt(sock
, SOL_SOCKET
, SO_PASSCRED
, &optval
, sizeof(optval
)) == -1) {
2327 lxcfs_error("Failed to set passcred: %s\n", strerror(errno
));
2331 if (write(sock
, buf
, 1) != 1) {
2332 lxcfs_error("Failed to start write on scm fd: %s\n", strerror(errno
));
2336 msg
.msg_name
= NULL
;
2337 msg
.msg_namelen
= 0;
2338 msg
.msg_control
= cmsgbuf
;
2339 msg
.msg_controllen
= sizeof(cmsgbuf
);
2342 iov
.iov_len
= sizeof(buf
);
2346 if (!wait_for_sock(sock
, 2)) {
2347 lxcfs_error("Timed out waiting for scm_cred: %s\n", strerror(errno
));
2350 ret
= recvmsg(sock
, &msg
, MSG_DONTWAIT
);
2352 lxcfs_error("Failed to receive scm_cred: %s\n", strerror(errno
));
2356 cmsg
= CMSG_FIRSTHDR(&msg
);
2358 if (cmsg
&& cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)) &&
2359 cmsg
->cmsg_level
== SOL_SOCKET
&&
2360 cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
2361 memcpy(cred
, CMSG_DATA(cmsg
), sizeof(*cred
));
2368 struct pid_ns_clone_args
{
2372 int (*wrapped
) (int, pid_t
); // pid_from_ns or pid_to_ns
2376 * pid_ns_clone_wrapper - wraps pid_to_ns or pid_from_ns for usage
2377 * with clone(). This simply writes '1' as ACK back to the parent
2378 * before calling the actual wrapped function.
2380 static int pid_ns_clone_wrapper(void *arg
) {
2381 struct pid_ns_clone_args
* args
= (struct pid_ns_clone_args
*) arg
;
2384 close(args
->cpipe
[0]);
2385 if (write(args
->cpipe
[1], &b
, sizeof(char)) < 0)
2386 lxcfs_error("(child): error on write: %s.\n", strerror(errno
));
2387 close(args
->cpipe
[1]);
2388 return args
->wrapped(args
->sock
, args
->tpid
);
2392 * pid_to_ns - reads pids from a ucred over a socket, then writes the
2393 * int value back over the socket. This shifts the pid from the
2394 * sender's pidns into tpid's pidns.
2396 static int pid_to_ns(int sock
, pid_t tpid
)
2401 while (recv_creds(sock
, &cred
, &v
)) {
2404 if (write(sock
, &cred
.pid
, sizeof(pid_t
)) != sizeof(pid_t
))
2412 * pid_to_ns_wrapper: when you setns into a pidns, you yourself remain
2413 * in your old pidns. Only children which you clone will be in the target
2414 * pidns. So the pid_to_ns_wrapper does the setns, then clones a child to
2415 * actually convert pids.
2417 * Note: glibc's fork() does not respect pidns, which can lead to failed
2418 * assertions inside glibc (and thus failed forks) if the child's pid in
2419 * the pidns and the parent pid outside are identical. Using clone prevents
2422 static void pid_to_ns_wrapper(int sock
, pid_t tpid
)
2424 int newnsfd
= -1, ret
, cpipe
[2];
2429 ret
= snprintf(fnam
, sizeof(fnam
), "/proc/%d/ns/pid", tpid
);
2430 if (ret
< 0 || ret
>= sizeof(fnam
))
2432 newnsfd
= open(fnam
, O_RDONLY
);
2435 if (setns(newnsfd
, 0) < 0)
2439 if (pipe(cpipe
) < 0)
2442 struct pid_ns_clone_args args
= {
2446 .wrapped
= &pid_to_ns
2448 size_t stack_size
= sysconf(_SC_PAGESIZE
);
2449 void *stack
= alloca(stack_size
);
2451 cpid
= clone(pid_ns_clone_wrapper
, stack
+ stack_size
, SIGCHLD
, &args
);
2455 // give the child 1 second to be done forking and
2457 if (!wait_for_sock(cpipe
[0], 1))
2459 ret
= read(cpipe
[0], &v
, 1);
2460 if (ret
!= sizeof(char) || v
!= '1')
2463 if (!wait_for_pid(cpid
))
2469 * To read cgroup files with a particular pid, we will setns into the child
2470 * pidns, open a pipe, fork a child - which will be the first to really be in
2471 * the child ns - which does the cgfs_get_value and writes the data to the pipe.
2473 bool do_read_pids(pid_t tpid
, const char *contrl
, const char *cg
, const char *file
, char **d
)
2475 int sock
[2] = {-1, -1};
2476 char *tmpdata
= NULL
;
2478 pid_t qpid
, cpid
= -1;
2479 bool answer
= false;
2482 size_t sz
= 0, asz
= 0;
2484 if (!cgroup_ops
->get(cgroup_ops
, contrl
, cg
, file
, &tmpdata
))
2488 * Now we read the pids from returned data one by one, pass
2489 * them into a child in the target namespace, read back the
2490 * translated pids, and put them into our to-return data
2493 if (socketpair(AF_UNIX
, SOCK_DGRAM
, 0, sock
) < 0) {
2494 perror("socketpair");
2503 if (!cpid
) // child - exits when done
2504 pid_to_ns_wrapper(sock
[1], tpid
);
2506 char *ptr
= tmpdata
;
2509 while (sscanf(ptr
, "%d\n", &qpid
) == 1) {
2511 ret
= send_creds(sock
[0], &cred
, v
, true);
2513 if (ret
== SEND_CREDS_NOTSK
)
2515 if (ret
== SEND_CREDS_FAIL
)
2518 // read converted results
2519 if (!wait_for_sock(sock
[0], 2)) {
2520 lxcfs_error("Timed out waiting for pid from child: %s.\n", strerror(errno
));
2523 if (read(sock
[0], &qpid
, sizeof(qpid
)) != sizeof(qpid
)) {
2524 lxcfs_error("Error reading pid from child: %s.\n", strerror(errno
));
2527 must_strcat_pid(d
, &sz
, &asz
, qpid
);
2529 ptr
= strchr(ptr
, '\n');
2535 cred
.pid
= getpid();
2537 if (send_creds(sock
[0], &cred
, v
, true) != SEND_CREDS_OK
) {
2538 // failed to ask child to exit
2539 lxcfs_error("Failed to ask child to exit: %s.\n", strerror(errno
));
2549 if (sock
[0] != -1) {
2556 int cg_read(const char *path
, char *buf
, size_t size
, off_t offset
,
2557 struct fuse_file_info
*fi
)
2559 struct fuse_context
*fc
= fuse_get_context();
2560 struct file_info
*f
= (struct file_info
*)fi
->fh
;
2561 struct cgfs_files
*k
= NULL
;
2566 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
2569 if (f
->type
!= LXC_TYPE_CGFILE
) {
2570 lxcfs_error("%s\n", "Internal error: directory cache info used in cg_read.");
2580 if ((k
= cgfs_get_key(f
->controller
, f
->cgroup
, f
->file
)) == NULL
) {
2586 if (!fc_may_access(fc
, f
->controller
, f
->cgroup
, f
->file
, O_RDONLY
)) {
2591 if (strcmp(f
->file
, "tasks") == 0 ||
2592 strcmp(f
->file
, "/tasks") == 0 ||
2593 strcmp(f
->file
, "/cgroup.procs") == 0 ||
2594 strcmp(f
->file
, "cgroup.procs") == 0)
2595 // special case - we have to translate the pids
2596 r
= do_read_pids(fc
->pid
, f
->controller
, f
->cgroup
, f
->file
, &data
);
2598 r
= cgroup_ops
->get(cgroup_ops
, f
->controller
, f
->cgroup
, f
->file
, &data
);
2612 memcpy(buf
, data
, s
);
2613 if (s
> 0 && s
< size
&& data
[s
-1] != '\n')
2623 static int pid_from_ns(int sock
, pid_t tpid
)
2633 if (!wait_for_sock(sock
, 2)) {
2634 lxcfs_error("%s\n", "Timeout reading from parent.");
2637 if ((ret
= read(sock
, &vpid
, sizeof(pid_t
))) != sizeof(pid_t
)) {
2638 lxcfs_error("Bad read from parent: %s.\n", strerror(errno
));
2641 if (vpid
== -1) // done
2645 if (send_creds(sock
, &cred
, v
, true) != SEND_CREDS_OK
) {
2647 cred
.pid
= getpid();
2648 if (send_creds(sock
, &cred
, v
, false) != SEND_CREDS_OK
)
2655 static void pid_from_ns_wrapper(int sock
, pid_t tpid
)
2657 int newnsfd
= -1, ret
, cpipe
[2];
2662 ret
= snprintf(fnam
, sizeof(fnam
), "/proc/%d/ns/pid", tpid
);
2663 if (ret
< 0 || ret
>= sizeof(fnam
))
2665 newnsfd
= open(fnam
, O_RDONLY
);
2668 if (setns(newnsfd
, 0) < 0)
2672 if (pipe(cpipe
) < 0)
2675 struct pid_ns_clone_args args
= {
2679 .wrapped
= &pid_from_ns
2681 size_t stack_size
= sysconf(_SC_PAGESIZE
);
2682 void *stack
= alloca(stack_size
);
2684 cpid
= clone(pid_ns_clone_wrapper
, stack
+ stack_size
, SIGCHLD
, &args
);
2688 // give the child 1 second to be done forking and
2690 if (!wait_for_sock(cpipe
[0], 1))
2692 ret
= read(cpipe
[0], &v
, 1);
2693 if (ret
!= sizeof(char) || v
!= '1')
2696 if (!wait_for_pid(cpid
))
2702 * Given host @uid, return the uid to which it maps in
2703 * @pid's user namespace, or -1 if none.
2705 bool hostuid_to_ns(uid_t uid
, pid_t pid
, uid_t
*answer
)
2710 sprintf(line
, "/proc/%d/uid_map", pid
);
2711 if ((f
= fopen(line
, "r")) == NULL
) {
2715 *answer
= convert_id_to_ns(f
, uid
);
2724 * get_pid_creds: get the real uid and gid of @pid from
2726 * (XXX should we use euid here?)
2728 void get_pid_creds(pid_t pid
, uid_t
*uid
, gid_t
*gid
)
2737 sprintf(line
, "/proc/%d/status", pid
);
2738 if ((f
= fopen(line
, "r")) == NULL
) {
2739 lxcfs_error("Error opening %s: %s\n", line
, strerror(errno
));
2742 while (fgets(line
, 400, f
)) {
2743 if (strncmp(line
, "Uid:", 4) == 0) {
2744 if (sscanf(line
+4, "%u", &u
) != 1) {
2745 lxcfs_error("bad uid line for pid %u\n", pid
);
2750 } else if (strncmp(line
, "Gid:", 4) == 0) {
2751 if (sscanf(line
+4, "%u", &g
) != 1) {
2752 lxcfs_error("bad gid line for pid %u\n", pid
);
2763 * May the requestor @r move victim @v to a new cgroup?
2764 * This is allowed if
2765 * . they are the same task
2766 * . they are ownedy by the same uid
2767 * . @r is root on the host, or
2768 * . @v's uid is mapped into @r's where @r is root.
2770 bool may_move_pid(pid_t r
, uid_t r_uid
, pid_t v
)
2772 uid_t v_uid
, tmpuid
;
2779 get_pid_creds(v
, &v_uid
, &v_gid
);
2782 if (hostuid_to_ns(r_uid
, r
, &tmpuid
) && tmpuid
== 0
2783 && hostuid_to_ns(v_uid
, r
, &tmpuid
))
2788 static bool do_write_pids(pid_t tpid
, uid_t tuid
, const char *contrl
, const char *cg
,
2789 const char *file
, const char *buf
)
2791 int sock
[2] = {-1, -1};
2792 pid_t qpid
, cpid
= -1;
2793 FILE *pids_file
= NULL
;
2794 bool answer
= false, fail
= false;
2796 pids_file
= open_pids_file(contrl
, cg
);
2801 * write the pids to a socket, have helper in writer's pidns
2802 * call movepid for us
2804 if (socketpair(AF_UNIX
, SOCK_DGRAM
, 0, sock
) < 0) {
2805 perror("socketpair");
2813 if (!cpid
) { // child
2815 pid_from_ns_wrapper(sock
[1], tpid
);
2818 const char *ptr
= buf
;
2819 while (sscanf(ptr
, "%d", &qpid
) == 1) {
2823 if (write(sock
[0], &qpid
, sizeof(qpid
)) != sizeof(qpid
)) {
2824 lxcfs_error("Error writing pid to child: %s.\n", strerror(errno
));
2828 if (recv_creds(sock
[0], &cred
, &v
)) {
2830 if (!may_move_pid(tpid
, tuid
, cred
.pid
)) {
2834 if (fprintf(pids_file
, "%d", (int) cred
.pid
) < 0)
2839 ptr
= strchr(ptr
, '\n');
2845 /* All good, write the value */
2847 if (write(sock
[0], &qpid
,sizeof(qpid
)) != sizeof(qpid
))
2848 lxcfs_error("%s\n", "Warning: failed to ask child to exit.");
2856 if (sock
[0] != -1) {
2861 if (fclose(pids_file
) != 0)
2867 int cg_write(const char *path
, const char *buf
, size_t size
, off_t offset
,
2868 struct fuse_file_info
*fi
)
2870 struct fuse_context
*fc
= fuse_get_context();
2871 char *localbuf
= NULL
;
2872 struct cgfs_files
*k
= NULL
;
2873 struct file_info
*f
= (struct file_info
*)fi
->fh
;
2876 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
2879 if (f
->type
!= LXC_TYPE_CGFILE
) {
2880 lxcfs_error("%s\n", "Internal error: directory cache info used in cg_write.");
2887 localbuf
= alloca(size
+1);
2888 localbuf
[size
] = '\0';
2889 memcpy(localbuf
, buf
, size
);
2891 if ((k
= cgfs_get_key(f
->controller
, f
->cgroup
, f
->file
)) == NULL
) {
2896 if (!fc_may_access(fc
, f
->controller
, f
->cgroup
, f
->file
, O_WRONLY
)) {
2901 if (strcmp(f
->file
, "tasks") == 0 ||
2902 strcmp(f
->file
, "/tasks") == 0 ||
2903 strcmp(f
->file
, "/cgroup.procs") == 0 ||
2904 strcmp(f
->file
, "cgroup.procs") == 0)
2905 // special case - we have to translate the pids
2906 r
= do_write_pids(fc
->pid
, fc
->uid
, f
->controller
, f
->cgroup
, f
->file
, localbuf
);
2908 r
= cgfs_set_value(f
->controller
, f
->cgroup
, f
->file
, localbuf
);
2918 int cg_chown(const char *path
, uid_t uid
, gid_t gid
)
2920 struct fuse_context
*fc
= fuse_get_context();
2921 char *cgdir
= NULL
, *last
= NULL
, *path1
, *path2
, *controller
;
2922 struct cgfs_files
*k
= NULL
;
2926 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
2929 if (strcmp(path
, "/cgroup") == 0)
2932 controller
= pick_controller_from_path(fc
, path
);
2934 return errno
== ENOENT
? -EPERM
: -errno
;
2936 cgroup
= find_cgroup_in_path(path
);
2938 /* this is just /cgroup/controller */
2941 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
2951 if (is_child_cgroup(controller
, path1
, path2
)) {
2952 // get uid, gid, from '/tasks' file and make up a mode
2953 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
2954 k
= cgfs_get_key(controller
, cgroup
, "tasks");
2957 k
= cgfs_get_key(controller
, path1
, path2
);
2965 * This being a fuse request, the uid and gid must be valid
2966 * in the caller's namespace. So we can just check to make
2967 * sure that the caller is root in his uid, and privileged
2968 * over the file's current owner.
2970 if (!is_privileged_over(fc
->pid
, fc
->uid
, k
->uid
, NS_ROOT_REQD
)) {
2975 ret
= cgfs_chown_file(controller
, cgroup
, uid
, gid
);
2984 int cg_chmod(const char *path
, mode_t mode
)
2986 struct fuse_context
*fc
= fuse_get_context();
2987 char * cgdir
= NULL
, *last
= NULL
, *path1
, *path2
, *controller
;
2988 struct cgfs_files
*k
= NULL
;
2992 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
2995 if (strcmp(path
, "/cgroup") == 0)
2998 controller
= pick_controller_from_path(fc
, path
);
3000 return errno
== ENOENT
? -EPERM
: -errno
;
3002 cgroup
= find_cgroup_in_path(path
);
3004 /* this is just /cgroup/controller */
3007 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
3017 if (is_child_cgroup(controller
, path1
, path2
)) {
3018 // get uid, gid, from '/tasks' file and make up a mode
3019 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
3020 k
= cgfs_get_key(controller
, cgroup
, "tasks");
3023 k
= cgfs_get_key(controller
, path1
, path2
);
3031 * This being a fuse request, the uid and gid must be valid
3032 * in the caller's namespace. So we can just check to make
3033 * sure that the caller is root in his uid, and privileged
3034 * over the file's current owner.
3036 if (!is_privileged_over(fc
->pid
, fc
->uid
, k
->uid
, NS_ROOT_OPT
)) {
3041 if (!cgfs_chmod_file(controller
, cgroup
, mode
)) {
3053 int cg_mkdir(const char *path
, mode_t mode
)
3055 struct fuse_context
*fc
= fuse_get_context();
3056 char *last
= NULL
, *path1
, *cgdir
= NULL
, *controller
, *next
= NULL
;
3060 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
3063 controller
= pick_controller_from_path(fc
, path
);
3065 return errno
== ENOENT
? -EPERM
: -errno
;
3067 cgroup
= find_cgroup_in_path(path
);
3071 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
3077 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3078 if (initpid
<= 1 || is_shared_pidns(initpid
))
3080 if (!caller_is_in_ancestor(initpid
, controller
, path1
, &next
)) {
3083 else if (last
&& strcmp(next
, last
) == 0)
3090 if (!fc_may_access(fc
, controller
, path1
, NULL
, O_RDWR
)) {
3094 if (!caller_is_in_ancestor(initpid
, controller
, path1
, NULL
)) {
3099 ret
= cgfs_create(controller
, cgroup
, fc
->uid
, fc
->gid
);
3107 int cg_rmdir(const char *path
)
3109 struct fuse_context
*fc
= fuse_get_context();
3110 char *last
= NULL
, *cgdir
= NULL
, *controller
, *next
= NULL
;
3114 if (!fc
|| !cgroup_ops
|| pure_unified_layout(cgroup_ops
))
3117 controller
= pick_controller_from_path(fc
, path
);
3118 if (!controller
) /* Someone's trying to delete "/cgroup". */
3121 cgroup
= find_cgroup_in_path(path
);
3122 if (!cgroup
) /* Someone's trying to delete a controller e.g. "/blkio". */
3125 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
3127 /* Someone's trying to delete a cgroup on the same level as the
3128 * "/lxc" cgroup e.g. rmdir "/cgroup/blkio/lxc" or
3129 * rmdir "/cgroup/blkio/init.slice".
3135 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3136 if (initpid
<= 1 || is_shared_pidns(initpid
))
3138 if (!caller_is_in_ancestor(initpid
, controller
, cgroup
, &next
)) {
3139 if (!last
|| (next
&& (strcmp(next
, last
) == 0)))
3146 if (!fc_may_access(fc
, controller
, cgdir
, NULL
, O_WRONLY
)) {
3150 if (!caller_is_in_ancestor(initpid
, controller
, cgroup
, NULL
)) {
3155 if (!cgfs_remove(controller
, cgroup
)) {
3168 static bool startswith(const char *line
, const char *pref
)
3170 if (strncmp(line
, pref
, strlen(pref
)) == 0)
3175 /* Note that "memory.stat" in cgroup2 is hierarchical by default. */
3176 static void parse_memstat(int version
,
3178 unsigned long *cached
,
3179 unsigned long *active_anon
,
3180 unsigned long *inactive_anon
,
3181 unsigned long *active_file
,
3182 unsigned long *inactive_file
,
3183 unsigned long *unevictable
,
3184 unsigned long *shmem
)
3189 if (startswith(memstat
, is_unified_controller(version
)
3192 sscanf(memstat
+ 11, "%lu", cached
);
3194 } else if (startswith(memstat
, is_unified_controller(version
)
3196 : "total_active_anon")) {
3197 sscanf(memstat
+ 17, "%lu", active_anon
);
3198 *active_anon
/= 1024;
3199 } else if (startswith(memstat
, is_unified_controller(version
)
3201 : "total_inactive_anon")) {
3202 sscanf(memstat
+ 19, "%lu", inactive_anon
);
3203 *inactive_anon
/= 1024;
3204 } else if (startswith(memstat
, is_unified_controller(version
)
3206 : "total_active_file")) {
3207 sscanf(memstat
+ 17, "%lu", active_file
);
3208 *active_file
/= 1024;
3209 } else if (startswith(memstat
, is_unified_controller(version
)
3211 : "total_inactive_file")) {
3212 sscanf(memstat
+ 19, "%lu", inactive_file
);
3213 *inactive_file
/= 1024;
3214 } else if (startswith(memstat
, is_unified_controller(version
)
3216 : "total_unevictable")) {
3217 sscanf(memstat
+ 17, "%lu", unevictable
);
3218 *unevictable
/= 1024;
3219 } else if (startswith(memstat
, is_unified_controller(version
)
3222 sscanf(memstat
+ 11, "%lu", shmem
);
3225 eol
= strchr(memstat
, '\n');
3232 static void get_blkio_io_value(char *str
, unsigned major
, unsigned minor
, char *iotype
, unsigned long *v
)
3238 snprintf(key
, 32, "%u:%u %s", major
, minor
, iotype
);
3240 size_t len
= strlen(key
);
3244 if (startswith(str
, key
)) {
3245 sscanf(str
+ len
, "%lu", v
);
3248 eol
= strchr(str
, '\n');
3255 int read_file_fuse(const char *path
, char *buf
, size_t size
, struct file_info
*d
)
3257 __do_free
char *line
= NULL
;
3258 __do_fclose
FILE *f
= NULL
;
3259 size_t linelen
= 0, total_len
= 0;
3260 char *cache
= d
->buf
;
3261 size_t cache_size
= d
->buflen
;
3263 f
= fopen(path
, "r");
3267 while (getline(&line
, &linelen
, f
) != -1) {
3268 ssize_t l
= snprintf(cache
, cache_size
, "%s", line
);
3270 perror("Error writing to cache");
3273 if (l
>= cache_size
) {
3274 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3282 d
->size
= total_len
;
3283 if (total_len
> size
)
3286 /* read from off 0 */
3287 memcpy(buf
, d
->buf
, total_len
);
3289 if (d
->size
> total_len
)
3290 d
->cached
= d
->size
- total_len
;
3295 * FUSE ops for /proc
3298 static unsigned long get_memlimit(const char *cgroup
, bool swap
)
3301 __do_free
char *memlimit_str
= NULL
;
3302 unsigned long memlimit
= -1;
3305 ret
= cgroup_ops
->get_memory_swap_max(cgroup_ops
, cgroup
, &memlimit_str
);
3307 ret
= cgroup_ops
->get_memory_max(cgroup_ops
, cgroup
, &memlimit_str
);
3309 memlimit
= strtoul(memlimit_str
, NULL
, 10);
3314 static unsigned long get_min_memlimit(const char *cgroup
, bool swap
)
3316 __do_free
char *copy
= NULL
;
3317 unsigned long memlimit
= 0;
3318 unsigned long retlimit
;
3320 copy
= strdup(cgroup
);
3321 retlimit
= get_memlimit(copy
, swap
);
3323 while (strcmp(copy
, "/") != 0) {
3327 memlimit
= get_memlimit(it
, swap
);
3328 if (memlimit
!= -1 && memlimit
< retlimit
)
3329 retlimit
= memlimit
;
3335 static int proc_meminfo_read(char *buf
, size_t size
, off_t offset
,
3336 struct fuse_file_info
*fi
)
3338 __do_free
char *cgroup
= NULL
, *line
= NULL
,
3339 *memusage_str
= NULL
, *memstat_str
= NULL
,
3340 *memswlimit_str
= NULL
, *memswusage_str
= NULL
;
3341 __do_fclose
FILE *f
= NULL
;
3342 struct fuse_context
*fc
= fuse_get_context();
3343 struct lxcfs_opts
*opts
= (struct lxcfs_opts
*) fuse_get_context()->private_data
;
3344 struct file_info
*d
= (struct file_info
*)fi
->fh
;
3345 unsigned long memlimit
= 0, memusage
= 0, memswlimit
= 0,
3346 memswusage
= 0, cached
= 0, hosttotal
= 0, active_anon
= 0,
3347 inactive_anon
= 0, active_file
= 0, inactive_file
= 0,
3348 unevictable
= 0, shmem
= 0, hostswtotal
= 0;
3349 size_t linelen
= 0, total_len
= 0;
3350 char *cache
= d
->buf
;
3351 size_t cache_size
= d
->buflen
;
3357 if (offset
> d
->size
)
3363 left
= d
->size
- offset
;
3364 total_len
= left
> size
? size
: left
;
3365 memcpy(buf
, cache
+ offset
, total_len
);
3370 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3371 if (initpid
<= 1 || is_shared_pidns(initpid
))
3374 cgroup
= get_pid_cgroup(initpid
, "memory");
3376 return read_file_fuse("/proc/meminfo", buf
, size
, d
);
3378 prune_init_slice(cgroup
);
3380 memlimit
= get_min_memlimit(cgroup
, false);
3382 ret
= cgroup_ops
->get_memory_current(cgroup_ops
, cgroup
, &memusage_str
);
3386 ret
= cgroup_ops
->get_memory_stats(cgroup_ops
, cgroup
, &memstat_str
);
3389 parse_memstat(ret
, memstat_str
, &cached
, &active_anon
, &inactive_anon
,
3390 &active_file
, &inactive_file
, &unevictable
, &shmem
);
3393 * Following values are allowed to fail, because swapaccount might be
3394 * turned off for current kernel.
3396 ret
= cgroup_ops
->get_memory_swap_max(cgroup_ops
, cgroup
, &memswlimit_str
);
3398 ret
= cgroup_ops
->get_memory_swap_current(cgroup_ops
, cgroup
, &memswusage_str
);
3400 memswlimit
= get_min_memlimit(cgroup
, true);
3401 memswusage
= strtoul(memswusage_str
, NULL
, 10);
3402 memswlimit
= memswlimit
/ 1024;
3403 memswusage
= memswusage
/ 1024;
3406 memusage
= strtoul(memusage_str
, NULL
, 10);
3410 f
= fopen("/proc/meminfo", "r");
3414 while (getline(&line
, &linelen
, f
) != -1) {
3416 char *printme
, lbuf
[100];
3418 memset(lbuf
, 0, 100);
3419 if (startswith(line
, "MemTotal:")) {
3420 sscanf(line
+sizeof("MemTotal:")-1, "%lu", &hosttotal
);
3421 if (hosttotal
< memlimit
)
3422 memlimit
= hosttotal
;
3423 snprintf(lbuf
, 100, "MemTotal: %8lu kB\n", memlimit
);
3425 } else if (startswith(line
, "MemFree:")) {
3426 snprintf(lbuf
, 100, "MemFree: %8lu kB\n", memlimit
- memusage
);
3428 } else if (startswith(line
, "MemAvailable:")) {
3429 snprintf(lbuf
, 100, "MemAvailable: %8lu kB\n", memlimit
- memusage
+ cached
);
3431 } else if (startswith(line
, "SwapTotal:") && memswlimit
> 0 &&
3432 opts
&& opts
->swap_off
== false) {
3433 sscanf(line
+sizeof("SwapTotal:")-1, "%lu", &hostswtotal
);
3434 if (hostswtotal
< memswlimit
)
3435 memswlimit
= hostswtotal
;
3436 snprintf(lbuf
, 100, "SwapTotal: %8lu kB\n", memswlimit
);
3438 } else if (startswith(line
, "SwapTotal:") && opts
&& opts
->swap_off
== true) {
3439 snprintf(lbuf
, 100, "SwapTotal: %8lu kB\n", 0UL);
3441 } else if (startswith(line
, "SwapFree:") && memswlimit
> 0 &&
3442 memswusage
> 0 && opts
&& opts
->swap_off
== false) {
3443 unsigned long swaptotal
= memswlimit
,
3444 swapusage
= memusage
> memswusage
3446 : memswusage
- memusage
,
3447 swapfree
= swapusage
< swaptotal
3448 ? swaptotal
- swapusage
3450 snprintf(lbuf
, 100, "SwapFree: %8lu kB\n", swapfree
);
3452 } else if (startswith(line
, "SwapFree:") && opts
&& opts
->swap_off
== true) {
3453 snprintf(lbuf
, 100, "SwapFree: %8lu kB\n", 0UL);
3455 } else if (startswith(line
, "Slab:")) {
3456 snprintf(lbuf
, 100, "Slab: %8lu kB\n", 0UL);
3458 } else if (startswith(line
, "Buffers:")) {
3459 snprintf(lbuf
, 100, "Buffers: %8lu kB\n", 0UL);
3461 } else if (startswith(line
, "Cached:")) {
3462 snprintf(lbuf
, 100, "Cached: %8lu kB\n", cached
);
3464 } else if (startswith(line
, "SwapCached:")) {
3465 snprintf(lbuf
, 100, "SwapCached: %8lu kB\n", 0UL);
3467 } else if (startswith(line
, "Active:")) {
3468 snprintf(lbuf
, 100, "Active: %8lu kB\n",
3469 active_anon
+ active_file
);
3471 } else if (startswith(line
, "Inactive:")) {
3472 snprintf(lbuf
, 100, "Inactive: %8lu kB\n",
3473 inactive_anon
+ inactive_file
);
3475 } else if (startswith(line
, "Active(anon)")) {
3476 snprintf(lbuf
, 100, "Active(anon): %8lu kB\n", active_anon
);
3478 } else if (startswith(line
, "Inactive(anon)")) {
3479 snprintf(lbuf
, 100, "Inactive(anon): %8lu kB\n", inactive_anon
);
3481 } else if (startswith(line
, "Active(file)")) {
3482 snprintf(lbuf
, 100, "Active(file): %8lu kB\n", active_file
);
3484 } else if (startswith(line
, "Inactive(file)")) {
3485 snprintf(lbuf
, 100, "Inactive(file): %8lu kB\n", inactive_file
);
3487 } else if (startswith(line
, "Unevictable")) {
3488 snprintf(lbuf
, 100, "Unevictable: %8lu kB\n", unevictable
);
3490 } else if (startswith(line
, "SReclaimable")) {
3491 snprintf(lbuf
, 100, "SReclaimable: %8lu kB\n", 0UL);
3493 } else if (startswith(line
, "SUnreclaim")) {
3494 snprintf(lbuf
, 100, "SUnreclaim: %8lu kB\n", 0UL);
3496 } else if (startswith(line
, "Shmem:")) {
3497 snprintf(lbuf
, 100, "Shmem: %8lu kB\n", shmem
);
3499 } else if (startswith(line
, "ShmemHugePages")) {
3500 snprintf(lbuf
, 100, "ShmemHugePages: %8lu kB\n", 0UL);
3502 } else if (startswith(line
, "ShmemPmdMapped")) {
3503 snprintf(lbuf
, 100, "ShmemPmdMapped: %8lu kB\n", 0UL);
3508 l
= snprintf(cache
, cache_size
, "%s", printme
);
3510 perror("Error writing to cache");
3514 if (l
>= cache_size
) {
3515 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3525 d
->size
= total_len
;
3526 if (total_len
> size
) total_len
= size
;
3527 memcpy(buf
, d
->buf
, total_len
);
3533 * Read the cpuset.cpus for cg
3534 * Return the answer in a newly allocated string which must be freed
3536 char *get_cpuset(const char *cg
)
3541 ret
= cgroup_ops
->get_cpuset_cpus(cgroup_ops
, cg
, &value
);
3548 bool cpu_in_cpuset(int cpu
, const char *cpuset
);
3550 static bool cpuline_in_cpuset(const char *line
, const char *cpuset
)
3554 if (sscanf(line
, "processor : %d", &cpu
) != 1)
3556 return cpu_in_cpuset(cpu
, cpuset
);
3560 * Read cgroup CPU quota parameters from `cpu.cfs_quota_us` or `cpu.cfs_period_us`,
3561 * depending on `param`. Parameter value is returned throuh `value`.
3563 static bool read_cpu_cfs_param(const char *cg
, const char *param
, int64_t *value
)
3565 __do_free
char *str
= NULL
;
3566 char file
[11 + 6 + 1]; /* cpu.cfs__us + quota/period + \0 */
3568 snprintf(file
, sizeof(file
), "cpu.cfs_%s_us", param
);
3570 if (!cgroup_ops
->get(cgroup_ops
, "cpu", cg
, file
, &str
))
3573 if (sscanf(str
, "%ld", value
) != 1)
3580 * Return the maximum number of visible CPUs based on CPU quotas.
3581 * If there is no quota set, zero is returned.
3583 int max_cpu_count(const char *cg
)
3586 int64_t cfs_quota
, cfs_period
;
3587 int nr_cpus_in_cpuset
= 0;
3588 char *cpuset
= NULL
;
3590 if (!read_cpu_cfs_param(cg
, "quota", &cfs_quota
))
3593 if (!read_cpu_cfs_param(cg
, "period", &cfs_period
))
3596 cpuset
= get_cpuset(cg
);
3598 nr_cpus_in_cpuset
= cpu_number_in_cpuset(cpuset
);
3600 if (cfs_quota
<= 0 || cfs_period
<= 0){
3601 if (nr_cpus_in_cpuset
> 0)
3602 return nr_cpus_in_cpuset
;
3607 rv
= cfs_quota
/ cfs_period
;
3609 /* In case quota/period does not yield a whole number, add one CPU for
3612 if ((cfs_quota
% cfs_period
) > 0)
3615 nprocs
= get_nprocs();
3620 /* use min value in cpu quota and cpuset */
3621 if (nr_cpus_in_cpuset
> 0 && nr_cpus_in_cpuset
< rv
)
3622 rv
= nr_cpus_in_cpuset
;
3628 * Return the exact number of visible CPUs based on CPU quotas.
3629 * If there is no quota set, zero is returned.
3631 static double exact_cpu_count(const char *cg
)
3635 int64_t cfs_quota
, cfs_period
;
3637 if (!read_cpu_cfs_param(cg
, "quota", &cfs_quota
))
3640 if (!read_cpu_cfs_param(cg
, "period", &cfs_period
))
3643 if (cfs_quota
<= 0 || cfs_period
<= 0)
3646 rv
= (double)cfs_quota
/ (double)cfs_period
;
3648 nprocs
= get_nprocs();
3657 * check whether this is a '^processor" line in /proc/cpuinfo
3659 static bool is_processor_line(const char *line
)
3663 if (sscanf(line
, "processor : %d", &cpu
) == 1)
3668 static int proc_cpuinfo_read(char *buf
, size_t size
, off_t offset
,
3669 struct fuse_file_info
*fi
)
3671 __do_free
char *cg
= NULL
, *cpuset
= NULL
, *line
= NULL
;
3672 __do_fclose
FILE *f
= NULL
;
3673 struct fuse_context
*fc
= fuse_get_context();
3674 struct file_info
*d
= (struct file_info
*)fi
->fh
;
3675 size_t linelen
= 0, total_len
= 0;
3676 bool am_printing
= false, firstline
= true, is_s390x
= false;
3677 int curcpu
= -1, cpu
, max_cpus
= 0;
3679 char *cache
= d
->buf
;
3680 size_t cache_size
= d
->buflen
;
3685 if (offset
> d
->size
)
3691 left
= d
->size
- offset
;
3692 total_len
= left
> size
? size
: left
;
3693 memcpy(buf
, cache
+ offset
, total_len
);
3698 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3699 if (initpid
<= 1 || is_shared_pidns(initpid
))
3701 cg
= get_pid_cgroup(initpid
, "cpuset");
3703 return read_file_fuse("proc/cpuinfo", buf
, size
, d
);
3704 prune_init_slice(cg
);
3706 cpuset
= get_cpuset(cg
);
3710 use_view
= cgroup_ops
->can_use_cpuview(cgroup_ops
);
3712 max_cpus
= max_cpu_count(cg
);
3714 f
= fopen("/proc/cpuinfo", "r");
3718 while (getline(&line
, &linelen
, f
) != -1) {
3722 if (strstr(line
, "IBM/S390") != NULL
) {
3728 if (strncmp(line
, "# processors:", 12) == 0)
3730 if (is_processor_line(line
)) {
3731 if (use_view
&& max_cpus
> 0 && (curcpu
+1) == max_cpus
)
3733 am_printing
= cpuline_in_cpuset(line
, cpuset
);
3736 l
= snprintf(cache
, cache_size
, "processor : %d\n", curcpu
);
3738 perror("Error writing to cache");
3741 if (l
>= cache_size
) {
3742 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3750 } else if (is_s390x
&& sscanf(line
, "processor %d:", &cpu
) == 1) {
3752 if (use_view
&& max_cpus
> 0 && (curcpu
+1) == max_cpus
)
3754 if (!cpu_in_cpuset(cpu
, cpuset
))
3757 p
= strchr(line
, ':');
3761 l
= snprintf(cache
, cache_size
, "processor %d:%s", curcpu
, p
);
3763 perror("Error writing to cache");
3766 if (l
>= cache_size
) {
3767 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3777 l
= snprintf(cache
, cache_size
, "%s", line
);
3779 perror("Error writing to cache");
3782 if (l
>= cache_size
) {
3783 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
3793 __do_free
char *origcache
= d
->buf
;
3796 d
->buf
= malloc(d
->buflen
);
3798 d
->buf
= move_ptr(origcache
);
3803 cache_size
= d
->buflen
;
3805 l
= snprintf(cache
, cache_size
, "vendor_id : IBM/S390\n");
3806 if (l
< 0 || l
>= cache_size
)
3812 l
= snprintf(cache
, cache_size
, "# processors : %d\n", curcpu
+ 1);
3813 if (l
< 0 || l
>= cache_size
)
3819 l
= snprintf(cache
, cache_size
, "%s", origcache
);
3820 if (l
< 0 || l
>= cache_size
)
3826 d
->size
= total_len
;
3827 if (total_len
> size
) total_len
= size
;
3829 /* read from off 0 */
3830 memcpy(buf
, d
->buf
, total_len
);
3834 static uint64_t get_reaper_start_time(pid_t pid
)
3839 /* strlen("/proc/") = 6
3843 * strlen("/stat") = 5
3847 #define __PROC_PID_STAT_LEN (6 + LXCFS_NUMSTRLEN64 + 5 + 1)
3848 char path
[__PROC_PID_STAT_LEN
];
3851 qpid
= lookup_initpid_in_store(pid
);
3853 /* Caller can check for EINVAL on 0. */
3858 ret
= snprintf(path
, __PROC_PID_STAT_LEN
, "/proc/%d/stat", qpid
);
3859 if (ret
< 0 || ret
>= __PROC_PID_STAT_LEN
) {
3860 /* Caller can check for EINVAL on 0. */
3865 f
= fopen(path
, "r");
3867 /* Caller can check for EINVAL on 0. */
3872 /* Note that the *scanf() argument supression requires that length
3873 * modifiers such as "l" are omitted. Otherwise some compilers will yell
3874 * at us. It's like telling someone you're not married and then asking
3875 * if you can bring your wife to the party.
3877 ret
= fscanf(f
, "%*d " /* (1) pid %d */
3878 "%*s " /* (2) comm %s */
3879 "%*c " /* (3) state %c */
3880 "%*d " /* (4) ppid %d */
3881 "%*d " /* (5) pgrp %d */
3882 "%*d " /* (6) session %d */
3883 "%*d " /* (7) tty_nr %d */
3884 "%*d " /* (8) tpgid %d */
3885 "%*u " /* (9) flags %u */
3886 "%*u " /* (10) minflt %lu */
3887 "%*u " /* (11) cminflt %lu */
3888 "%*u " /* (12) majflt %lu */
3889 "%*u " /* (13) cmajflt %lu */
3890 "%*u " /* (14) utime %lu */
3891 "%*u " /* (15) stime %lu */
3892 "%*d " /* (16) cutime %ld */
3893 "%*d " /* (17) cstime %ld */
3894 "%*d " /* (18) priority %ld */
3895 "%*d " /* (19) nice %ld */
3896 "%*d " /* (20) num_threads %ld */
3897 "%*d " /* (21) itrealvalue %ld */
3898 "%" PRIu64
, /* (22) starttime %llu */
3902 /* Caller can check for EINVAL on 0. */
3913 static double get_reaper_start_time_in_sec(pid_t pid
)
3915 uint64_t clockticks
, ticks_per_sec
;
3919 clockticks
= get_reaper_start_time(pid
);
3920 if (clockticks
== 0 && errno
== EINVAL
) {
3921 lxcfs_debug("failed to retrieve start time of pid %d\n", pid
);
3925 ret
= sysconf(_SC_CLK_TCK
);
3926 if (ret
< 0 && errno
== EINVAL
) {
3929 "failed to determine number of clock ticks in a second");
3933 ticks_per_sec
= (uint64_t)ret
;
3934 res
= (double)clockticks
/ ticks_per_sec
;
3938 static double get_reaper_age(pid_t pid
)
3941 double procstart
, procage
;
3943 /* We need to substract the time the process has started since system
3944 * boot minus the time when the system has started to get the actual
3947 procstart
= get_reaper_start_time_in_sec(pid
);
3948 procage
= procstart
;
3949 if (procstart
> 0) {
3951 struct timespec spec
;
3953 ret
= clock_gettime(CLOCK_BOOTTIME
, &spec
);
3957 /* We could make this more precise here by using the tv_nsec
3958 * field in the timespec struct and convert it to milliseconds
3959 * and then create a double for the seconds and milliseconds but
3960 * that seems more work than it is worth.
3962 uptime_ms
= (spec
.tv_sec
* 1000) + (spec
.tv_nsec
* 1e-6);
3963 procage
= (uptime_ms
- (procstart
* 1000)) / 1000;
3970 * Returns 0 on success.
3971 * It is the caller's responsibility to free `return_usage`, unless this
3972 * function returns an error.
3974 static int read_cpuacct_usage_all(char *cg
, char *cpuset
, struct cpuacct_usage
**return_usage
, int *size
)
3976 __do_free
char *usage_str
= NULL
;
3977 __do_free
struct cpuacct_usage
*cpu_usage
= NULL
;
3978 int cpucount
= get_nprocs_conf();
3979 int read_pos
= 0, read_cnt
=0;
3982 uint64_t cg_user
, cg_system
;
3983 int64_t ticks_per_sec
;
3985 ticks_per_sec
= sysconf(_SC_CLK_TCK
);
3987 if (ticks_per_sec
< 0 && errno
== EINVAL
) {
3990 "read_cpuacct_usage_all failed to determine number of clock ticks "
3995 cpu_usage
= malloc(sizeof(struct cpuacct_usage
) * cpucount
);
3999 memset(cpu_usage
, 0, sizeof(struct cpuacct_usage
) * cpucount
);
4000 if (!cgroup_ops
->get(cgroup_ops
, "cpuacct", cg
, "cpuacct.usage_all", &usage_str
)) {
4002 int i
= 0, read_pos
= 0, read_cnt
=0;
4003 size_t sz
= 0, asz
= 0;
4005 /* read cpuacct.usage_percpu instead. */
4006 lxcfs_v("failed to read cpuacct.usage_all. reading cpuacct.usage_percpu instead\n%s", "");
4007 if (!cgroup_ops
->get(cgroup_ops
, "cpuacct", cg
, "cpuacct.usage_percpu", &usage_str
))
4009 lxcfs_v("usage_str: %s\n", usage_str
);
4011 /* convert cpuacct.usage_percpu into cpuacct.usage_all. */
4012 lxcfs_v("converting cpuacct.usage_percpu into cpuacct.usage_all\n%s", "");
4014 must_strcat(&data
, &sz
, &asz
, "cpu user system\n");
4016 while (sscanf(usage_str
+ read_pos
, "%lu %n", &cg_user
, &read_cnt
) > 0) {
4017 lxcfs_debug("i: %d, cg_user: %lu, read_pos: %d, read_cnt: %d\n", i
, cg_user
, read_pos
, read_cnt
);
4018 must_strcat(&data
, &sz
, &asz
, "%d %lu 0\n", i
, cg_user
);
4020 read_pos
+= read_cnt
;
4025 lxcfs_v("usage_str: %s\n", usage_str
);
4028 if (sscanf(usage_str
, "cpu user system\n%n", &read_cnt
) != 0) {
4029 lxcfs_error("read_cpuacct_usage_all reading first line from "
4030 "%s/cpuacct.usage_all failed.\n", cg
);
4034 read_pos
+= read_cnt
;
4036 for (i
= 0, j
= 0; i
< cpucount
; i
++) {
4037 ret
= sscanf(usage_str
+ read_pos
, "%d %lu %lu\n%n", &cg_cpu
, &cg_user
,
4038 &cg_system
, &read_cnt
);
4044 lxcfs_error("read_cpuacct_usage_all reading from %s/cpuacct.usage_all "
4049 read_pos
+= read_cnt
;
4051 /* Convert the time from nanoseconds to USER_HZ */
4052 cpu_usage
[j
].user
= cg_user
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
4053 cpu_usage
[j
].system
= cg_system
/ 1000.0 / 1000 / 1000 * ticks_per_sec
;
4057 *return_usage
= move_ptr(cpu_usage
);
4062 static unsigned long diff_cpu_usage(struct cpuacct_usage
*older
, struct cpuacct_usage
*newer
, struct cpuacct_usage
*diff
, int cpu_count
)
4065 unsigned long sum
= 0;
4067 for (i
= 0; i
< cpu_count
; i
++) {
4068 if (!newer
[i
].online
)
4071 /* When cpuset is changed on the fly, the CPUs might get reordered.
4072 * We could either reset all counters, or check that the substractions
4073 * below will return expected results.
4075 if (newer
[i
].user
> older
[i
].user
)
4076 diff
[i
].user
= newer
[i
].user
- older
[i
].user
;
4080 if (newer
[i
].system
> older
[i
].system
)
4081 diff
[i
].system
= newer
[i
].system
- older
[i
].system
;
4085 if (newer
[i
].idle
> older
[i
].idle
)
4086 diff
[i
].idle
= newer
[i
].idle
- older
[i
].idle
;
4090 sum
+= diff
[i
].user
;
4091 sum
+= diff
[i
].system
;
4092 sum
+= diff
[i
].idle
;
4098 static void add_cpu_usage(unsigned long *surplus
, struct cpuacct_usage
*usage
, unsigned long *counter
, unsigned long threshold
)
4100 unsigned long free_space
, to_add
;
4102 free_space
= threshold
- usage
->user
- usage
->system
;
4104 if (free_space
> usage
->idle
)
4105 free_space
= usage
->idle
;
4107 to_add
= free_space
> *surplus
? *surplus
: free_space
;
4110 usage
->idle
-= to_add
;
4114 static struct cg_proc_stat
*prune_proc_stat_list(struct cg_proc_stat
*node
)
4116 struct cg_proc_stat
*first
= NULL
, *prev
, *tmp
;
4118 for (prev
= NULL
; node
; ) {
4119 if (!cgfs_param_exist("cpu", node
->cg
, "cpu.shares")) {
4121 lxcfs_debug("Removing stat node for %s\n", node
->cg
);
4124 prev
->next
= node
->next
;
4129 free_proc_stat_node(tmp
);
4141 #define PROC_STAT_PRUNE_INTERVAL 10
4142 static void prune_proc_stat_history(void)
4145 time_t now
= time(NULL
);
4147 for (i
= 0; i
< CPUVIEW_HASH_SIZE
; i
++) {
4148 pthread_rwlock_wrlock(&proc_stat_history
[i
]->lock
);
4150 if ((proc_stat_history
[i
]->lastcheck
+ PROC_STAT_PRUNE_INTERVAL
) > now
) {
4151 pthread_rwlock_unlock(&proc_stat_history
[i
]->lock
);
4155 if (proc_stat_history
[i
]->next
) {
4156 proc_stat_history
[i
]->next
= prune_proc_stat_list(proc_stat_history
[i
]->next
);
4157 proc_stat_history
[i
]->lastcheck
= now
;
4160 pthread_rwlock_unlock(&proc_stat_history
[i
]->lock
);
4164 static struct cg_proc_stat
*find_proc_stat_node(struct cg_proc_stat_head
*head
, const char *cg
)
4166 struct cg_proc_stat
*node
;
4168 pthread_rwlock_rdlock(&head
->lock
);
4171 pthread_rwlock_unlock(&head
->lock
);
4178 if (strcmp(cg
, node
->cg
) == 0)
4180 } while ((node
= node
->next
));
4185 pthread_rwlock_unlock(&head
->lock
);
4186 prune_proc_stat_history();
4190 static struct cg_proc_stat
*new_proc_stat_node(struct cpuacct_usage
*usage
, int cpu_count
, const char *cg
)
4192 struct cg_proc_stat
*node
;
4195 node
= malloc(sizeof(struct cg_proc_stat
));
4203 node
->cg
= malloc(strlen(cg
) + 1);
4207 strcpy(node
->cg
, cg
);
4209 node
->usage
= malloc(sizeof(struct cpuacct_usage
) * cpu_count
);
4213 memcpy(node
->usage
, usage
, sizeof(struct cpuacct_usage
) * cpu_count
);
4215 node
->view
= malloc(sizeof(struct cpuacct_usage
) * cpu_count
);
4219 node
->cpu_count
= cpu_count
;
4222 if (pthread_mutex_init(&node
->lock
, NULL
) != 0) {
4223 lxcfs_error("%s\n", "Failed to initialize node lock");
4227 for (i
= 0; i
< cpu_count
; i
++) {
4228 node
->view
[i
].user
= 0;
4229 node
->view
[i
].system
= 0;
4230 node
->view
[i
].idle
= 0;
4236 if (node
&& node
->cg
)
4238 if (node
&& node
->usage
)
4240 if (node
&& node
->view
)
4248 static struct cg_proc_stat
*add_proc_stat_node(struct cg_proc_stat
*new_node
)
4250 int hash
= calc_hash(new_node
->cg
) % CPUVIEW_HASH_SIZE
;
4251 struct cg_proc_stat_head
*head
= proc_stat_history
[hash
];
4252 struct cg_proc_stat
*node
, *rv
= new_node
;
4254 pthread_rwlock_wrlock(&head
->lock
);
4257 head
->next
= new_node
;
4264 if (strcmp(node
->cg
, new_node
->cg
) == 0) {
4265 /* The node is already present, return it */
4266 free_proc_stat_node(new_node
);
4276 node
->next
= new_node
;
4281 pthread_rwlock_unlock(&head
->lock
);
4285 static bool expand_proc_stat_node(struct cg_proc_stat
*node
, int cpu_count
)
4287 __do_free
struct cpuacct_usage
*new_usage
= NULL
, *new_view
= NULL
;
4289 /* Allocate new memory */
4290 new_usage
= malloc(sizeof(struct cpuacct_usage
) * cpu_count
);
4294 new_view
= malloc(sizeof(struct cpuacct_usage
) * cpu_count
);
4298 /* Copy existing data & initialize new elements */
4299 for (int i
= 0; i
< cpu_count
; i
++) {
4300 if (i
< node
->cpu_count
) {
4301 new_usage
[i
].user
= node
->usage
[i
].user
;
4302 new_usage
[i
].system
= node
->usage
[i
].system
;
4303 new_usage
[i
].idle
= node
->usage
[i
].idle
;
4305 new_view
[i
].user
= node
->view
[i
].user
;
4306 new_view
[i
].system
= node
->view
[i
].system
;
4307 new_view
[i
].idle
= node
->view
[i
].idle
;
4309 new_usage
[i
].user
= 0;
4310 new_usage
[i
].system
= 0;
4311 new_usage
[i
].idle
= 0;
4313 new_view
[i
].user
= 0;
4314 new_view
[i
].system
= 0;
4315 new_view
[i
].idle
= 0;
4320 node
->usage
= move_ptr(new_usage
);
4323 node
->view
= move_ptr(new_view
);
4324 node
->cpu_count
= cpu_count
;
4329 static struct cg_proc_stat
*find_or_create_proc_stat_node(struct cpuacct_usage
*usage
, int cpu_count
, const char *cg
)
4331 int hash
= calc_hash(cg
) % CPUVIEW_HASH_SIZE
;
4332 struct cg_proc_stat_head
*head
= proc_stat_history
[hash
];
4333 struct cg_proc_stat
*node
;
4335 node
= find_proc_stat_node(head
, cg
);
4338 node
= new_proc_stat_node(usage
, cpu_count
, cg
);
4342 node
= add_proc_stat_node(node
);
4343 lxcfs_debug("New stat node (%d) for %s\n", cpu_count
, cg
);
4346 pthread_mutex_lock(&node
->lock
);
4348 /* If additional CPUs on the host have been enabled, CPU usage counter
4349 * arrays have to be expanded */
4350 if (node
->cpu_count
< cpu_count
) {
4351 lxcfs_debug("Expanding stat node %d->%d for %s\n",
4352 node
->cpu_count
, cpu_count
, cg
);
4354 if (!expand_proc_stat_node(node
, cpu_count
)) {
4355 pthread_mutex_unlock(&node
->lock
);
4356 lxcfs_debug("Unable to expand stat node %d->%d for %s\n",
4357 node
->cpu_count
, cpu_count
, cg
);
4365 static void reset_proc_stat_node(struct cg_proc_stat
*node
, struct cpuacct_usage
*usage
, int cpu_count
)
4369 lxcfs_debug("Resetting stat node for %s\n", node
->cg
);
4370 memcpy(node
->usage
, usage
, sizeof(struct cpuacct_usage
) * cpu_count
);
4372 for (i
= 0; i
< cpu_count
; i
++) {
4373 node
->view
[i
].user
= 0;
4374 node
->view
[i
].system
= 0;
4375 node
->view
[i
].idle
= 0;
4378 node
->cpu_count
= cpu_count
;
4381 static int cpuview_proc_stat(const char *cg
, const char *cpuset
,
4382 struct cpuacct_usage
*cg_cpu_usage
,
4383 int cg_cpu_usage_size
, FILE *f
, char *buf
,
4386 __do_free
char *line
= NULL
;
4387 __do_free
struct cpuacct_usage
*diff
= NULL
;
4388 size_t linelen
= 0, total_len
= 0, l
;
4389 int curcpu
= -1; /* cpu numbering starts at 0 */
4391 int max_cpus
= max_cpu_count(cg
), cpu_cnt
= 0;
4392 unsigned long user
= 0, nice
= 0, system
= 0, idle
= 0, iowait
= 0,
4393 irq
= 0, softirq
= 0, steal
= 0, guest
= 0, guest_nice
= 0;
4394 unsigned long user_sum
= 0, system_sum
= 0, idle_sum
= 0;
4395 unsigned long user_surplus
= 0, system_surplus
= 0;
4396 unsigned long total_sum
, threshold
;
4397 struct cg_proc_stat
*stat_node
;
4398 int nprocs
= get_nprocs_conf();
4400 if (cg_cpu_usage_size
< nprocs
)
4401 nprocs
= cg_cpu_usage_size
;
4403 /* Read all CPU stats and stop when we've encountered other lines */
4404 while (getline(&line
, &linelen
, f
) != -1) {
4406 char cpu_char
[10]; /* That's a lot of cores */
4407 uint64_t all_used
, cg_used
;
4409 if (strlen(line
) == 0)
4412 /* not a ^cpuN line containing a number N */
4413 if (sscanf(line
, "cpu%9[^ ]", cpu_char
) != 1)
4416 if (sscanf(cpu_char
, "%d", &physcpu
) != 1)
4419 if (physcpu
>= cg_cpu_usage_size
)
4425 if (!cpu_in_cpuset(physcpu
, cpuset
)) {
4426 for (i
= curcpu
; i
<= physcpu
; i
++)
4427 cg_cpu_usage
[i
].online
= false;
4431 if (curcpu
< physcpu
) {
4432 /* Some CPUs may be disabled */
4433 for (i
= curcpu
; i
< physcpu
; i
++)
4434 cg_cpu_usage
[i
].online
= false;
4439 cg_cpu_usage
[curcpu
].online
= true;
4441 ret
= sscanf(line
, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
4456 all_used
= user
+ nice
+ system
+ iowait
+ irq
+ softirq
+ steal
+ guest
+ guest_nice
;
4457 cg_used
= cg_cpu_usage
[curcpu
].user
+ cg_cpu_usage
[curcpu
].system
;
4459 if (all_used
>= cg_used
) {
4460 cg_cpu_usage
[curcpu
].idle
= idle
+ (all_used
- cg_used
);
4463 lxcfs_error("cpu%d from %s has unexpected cpu time: %lu in /proc/stat, "
4464 "%lu in cpuacct.usage_all; unable to determine idle time\n",
4465 curcpu
, cg
, all_used
, cg_used
);
4466 cg_cpu_usage
[curcpu
].idle
= idle
;
4470 /* Cannot use more CPUs than is available due to cpuset */
4471 if (max_cpus
> cpu_cnt
)
4474 stat_node
= find_or_create_proc_stat_node(cg_cpu_usage
, nprocs
, cg
);
4477 lxcfs_error("unable to find/create stat node for %s\n", cg
);
4481 diff
= malloc(sizeof(struct cpuacct_usage
) * nprocs
);
4487 * If the new values are LOWER than values stored in memory, it means
4488 * the cgroup has been reset/recreated and we should reset too.
4490 for (curcpu
= 0; curcpu
< nprocs
; curcpu
++) {
4491 if (!cg_cpu_usage
[curcpu
].online
)
4494 if (cg_cpu_usage
[curcpu
].user
< stat_node
->usage
[curcpu
].user
)
4495 reset_proc_stat_node(stat_node
, cg_cpu_usage
, nprocs
);
4500 total_sum
= diff_cpu_usage(stat_node
->usage
, cg_cpu_usage
, diff
, nprocs
);
4502 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
4503 stat_node
->usage
[curcpu
].online
= cg_cpu_usage
[curcpu
].online
;
4505 if (!stat_node
->usage
[curcpu
].online
)
4510 stat_node
->usage
[curcpu
].user
+= diff
[curcpu
].user
;
4511 stat_node
->usage
[curcpu
].system
+= diff
[curcpu
].system
;
4512 stat_node
->usage
[curcpu
].idle
+= diff
[curcpu
].idle
;
4514 if (max_cpus
> 0 && i
>= max_cpus
) {
4515 user_surplus
+= diff
[curcpu
].user
;
4516 system_surplus
+= diff
[curcpu
].system
;
4520 /* Calculate usage counters of visible CPUs */
4522 unsigned long diff_user
= 0;
4523 unsigned long diff_system
= 0;
4524 unsigned long diff_idle
= 0;
4525 unsigned long max_diff_idle
= 0;
4526 unsigned long max_diff_idle_index
= 0;
4529 /* threshold = maximum usage per cpu, including idle */
4530 threshold
= total_sum
/ cpu_cnt
* max_cpus
;
4532 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
4533 if (!stat_node
->usage
[curcpu
].online
)
4541 if (diff
[curcpu
].user
+ diff
[curcpu
].system
>= threshold
)
4545 add_cpu_usage(&user_surplus
, &diff
[curcpu
],
4546 &diff
[curcpu
].user
, threshold
);
4548 if (diff
[curcpu
].user
+ diff
[curcpu
].system
>= threshold
)
4551 /* If there is still room, add system */
4552 add_cpu_usage(&system_surplus
, &diff
[curcpu
],
4553 &diff
[curcpu
].system
, threshold
);
4556 if (user_surplus
> 0)
4557 lxcfs_debug("leftover user: %lu for %s\n", user_surplus
, cg
);
4558 if (system_surplus
> 0)
4559 lxcfs_debug("leftover system: %lu for %s\n", system_surplus
, cg
);
4561 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
4562 if (!stat_node
->usage
[curcpu
].online
)
4570 stat_node
->view
[curcpu
].user
+= diff
[curcpu
].user
;
4571 stat_node
->view
[curcpu
].system
+= diff
[curcpu
].system
;
4572 stat_node
->view
[curcpu
].idle
+= diff
[curcpu
].idle
;
4574 user_sum
+= stat_node
->view
[curcpu
].user
;
4575 system_sum
+= stat_node
->view
[curcpu
].system
;
4576 idle_sum
+= stat_node
->view
[curcpu
].idle
;
4578 diff_user
+= diff
[curcpu
].user
;
4579 diff_system
+= diff
[curcpu
].system
;
4580 diff_idle
+= diff
[curcpu
].idle
;
4581 if (diff
[curcpu
].idle
> max_diff_idle
) {
4582 max_diff_idle
= diff
[curcpu
].idle
;
4583 max_diff_idle_index
= curcpu
;
4586 lxcfs_v("curcpu: %d, diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", curcpu
, diff
[curcpu
].user
, diff
[curcpu
].system
, diff
[curcpu
].idle
);
4588 lxcfs_v("total. diff_user: %lu, diff_system: %lu, diff_idle: %lu\n", diff_user
, diff_system
, diff_idle
);
4590 /* revise cpu usage view to support partial cpu case. */
4591 exact_cpus
= exact_cpu_count(cg
);
4592 if (exact_cpus
< (double)max_cpus
){
4593 unsigned long delta
= (unsigned long)((double)(diff_user
+ diff_system
+ diff_idle
) * (1 - exact_cpus
/ (double)max_cpus
));
4595 lxcfs_v("revising cpu usage view to match the exact cpu count [%f]\n", exact_cpus
);
4596 lxcfs_v("delta: %lu\n", delta
);
4597 lxcfs_v("idle_sum before: %lu\n", idle_sum
);
4598 idle_sum
= idle_sum
> delta
? idle_sum
- delta
: 0;
4599 lxcfs_v("idle_sum after: %lu\n", idle_sum
);
4601 curcpu
= max_diff_idle_index
;
4602 lxcfs_v("curcpu: %d, idle before: %lu\n", curcpu
, stat_node
->view
[curcpu
].idle
);
4603 stat_node
->view
[curcpu
].idle
= stat_node
->view
[curcpu
].idle
> delta
? stat_node
->view
[curcpu
].idle
- delta
: 0;
4604 lxcfs_v("curcpu: %d, idle after: %lu\n", curcpu
, stat_node
->view
[curcpu
].idle
);
4607 for (curcpu
= 0; curcpu
< nprocs
; curcpu
++) {
4608 if (!stat_node
->usage
[curcpu
].online
)
4611 stat_node
->view
[curcpu
].user
= stat_node
->usage
[curcpu
].user
;
4612 stat_node
->view
[curcpu
].system
= stat_node
->usage
[curcpu
].system
;
4613 stat_node
->view
[curcpu
].idle
= stat_node
->usage
[curcpu
].idle
;
4615 user_sum
+= stat_node
->view
[curcpu
].user
;
4616 system_sum
+= stat_node
->view
[curcpu
].system
;
4617 idle_sum
+= stat_node
->view
[curcpu
].idle
;
4621 /* Render the file */
4623 l
= snprintf(buf
, buf_size
, "cpu %lu 0 %lu %lu 0 0 0 0 0 0\n",
4627 lxcfs_v("cpu-all: %s\n", buf
);
4630 perror("Error writing to cache");
4633 if (l
>= buf_size
) {
4634 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4642 /* Render visible CPUs */
4643 for (curcpu
= 0, i
= -1; curcpu
< nprocs
; curcpu
++) {
4644 if (!stat_node
->usage
[curcpu
].online
)
4649 if (max_cpus
> 0 && i
== max_cpus
)
4652 l
= snprintf(buf
, buf_size
, "cpu%d %lu 0 %lu %lu 0 0 0 0 0 0\n",
4654 stat_node
->view
[curcpu
].user
,
4655 stat_node
->view
[curcpu
].system
,
4656 stat_node
->view
[curcpu
].idle
);
4657 lxcfs_v("cpu: %s\n", buf
);
4660 perror("Error writing to cache");
4664 if (l
>= buf_size
) {
4665 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4674 /* Pass the rest of /proc/stat, start with the last line read */
4675 l
= snprintf(buf
, buf_size
, "%s", line
);
4678 perror("Error writing to cache");
4682 if (l
>= buf_size
) {
4683 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4691 /* Pass the rest of the host's /proc/stat */
4692 while (getline(&line
, &linelen
, f
) != -1) {
4693 l
= snprintf(buf
, buf_size
, "%s", line
);
4695 perror("Error writing to cache");
4698 if (l
>= buf_size
) {
4699 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4708 pthread_mutex_unlock(&stat_node
->lock
);
4712 #define CPUALL_MAX_SIZE (BUF_RESERVE_SIZE / 2)
4713 static int proc_stat_read(char *buf
, size_t size
, off_t offset
,
4714 struct fuse_file_info
*fi
)
4716 __do_free
char *cg
= NULL
, *cpuset
= NULL
, *line
= NULL
;
4717 __do_free
struct cpuacct_usage
*cg_cpu_usage
= NULL
;
4718 __do_fclose
FILE *f
= NULL
;
4719 struct fuse_context
*fc
= fuse_get_context();
4720 struct file_info
*d
= (struct file_info
*)fi
->fh
;
4721 size_t linelen
= 0, total_len
= 0;
4722 int curcpu
= -1; /* cpu numbering starts at 0 */
4724 unsigned long user
= 0, nice
= 0, system
= 0, idle
= 0, iowait
= 0,
4725 irq
= 0, softirq
= 0, steal
= 0, guest
= 0, guest_nice
= 0;
4726 unsigned long user_sum
= 0, nice_sum
= 0, system_sum
= 0, idle_sum
= 0,
4727 iowait_sum
= 0, irq_sum
= 0, softirq_sum
= 0,
4728 steal_sum
= 0, guest_sum
= 0, guest_nice_sum
= 0;
4729 char cpuall
[CPUALL_MAX_SIZE
];
4730 /* reserve for cpu all */
4731 char *cache
= d
->buf
+ CPUALL_MAX_SIZE
;
4732 size_t cache_size
= d
->buflen
- CPUALL_MAX_SIZE
;
4733 int cg_cpu_usage_size
= 0;
4736 if (offset
> d
->size
)
4740 int left
= d
->size
- offset
;
4741 total_len
= left
> size
? size
: left
;
4742 memcpy(buf
, d
->buf
+ offset
, total_len
);
4746 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
4747 lxcfs_v("initpid: %d\n", initpid
);
4752 * when container run with host pid namespace initpid == 1, cgroup will "/"
4753 * we should return host os's /proc contents.
4754 * in some case cpuacct_usage.all in "/" will larger then /proc/stat
4757 return read_file_fuse("/proc/stat", buf
, size
, d
);
4760 cg
= get_pid_cgroup(initpid
, "cpuset");
4761 lxcfs_v("cg: %s\n", cg
);
4763 return read_file_fuse("/proc/stat", buf
, size
, d
);
4764 prune_init_slice(cg
);
4766 cpuset
= get_cpuset(cg
);
4771 * Read cpuacct.usage_all for all CPUs.
4772 * If the cpuacct cgroup is present, it is used to calculate the container's
4773 * CPU usage. If not, values from the host's /proc/stat are used.
4775 if (read_cpuacct_usage_all(cg
, cpuset
, &cg_cpu_usage
, &cg_cpu_usage_size
) != 0) {
4776 lxcfs_v("%s\n", "proc_stat_read failed to read from cpuacct, "
4777 "falling back to the host's /proc/stat");
4780 f
= fopen("/proc/stat", "r");
4785 if (getline(&line
, &linelen
, f
) < 0) {
4786 lxcfs_error("%s\n", "proc_stat_read read first line failed.");
4790 if (cgroup_ops
->can_use_cpuview(cgroup_ops
) && cg_cpu_usage
) {
4791 total_len
= cpuview_proc_stat(cg
, cpuset
, cg_cpu_usage
, cg_cpu_usage_size
,
4792 f
, d
->buf
, d
->buflen
);
4796 while (getline(&line
, &linelen
, f
) != -1) {
4798 char cpu_char
[10]; /* That's a lot of cores */
4800 uint64_t all_used
, cg_used
, new_idle
;
4803 if (strlen(line
) == 0)
4805 if (sscanf(line
, "cpu%9[^ ]", cpu_char
) != 1) {
4806 /* not a ^cpuN line containing a number N, just print it */
4807 l
= snprintf(cache
, cache_size
, "%s", line
);
4809 perror("Error writing to cache");
4812 if (l
>= cache_size
) {
4813 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4822 if (sscanf(cpu_char
, "%d", &physcpu
) != 1)
4824 if (!cpu_in_cpuset(physcpu
, cpuset
))
4828 ret
= sscanf(line
, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
4840 if (ret
!= 10 || !cg_cpu_usage
) {
4841 c
= strchr(line
, ' ');
4844 l
= snprintf(cache
, cache_size
, "cpu%d%s", curcpu
, c
);
4846 perror("Error writing to cache");
4850 if (l
>= cache_size
) {
4851 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4864 if (physcpu
>= cg_cpu_usage_size
)
4867 all_used
= user
+ nice
+ system
+ iowait
+ irq
+ softirq
+ steal
+ guest
+ guest_nice
;
4868 cg_used
= cg_cpu_usage
[physcpu
].user
+ cg_cpu_usage
[physcpu
].system
;
4870 if (all_used
>= cg_used
) {
4871 new_idle
= idle
+ (all_used
- cg_used
);
4874 lxcfs_error("cpu%d from %s has unexpected cpu time: %lu in /proc/stat, "
4875 "%lu in cpuacct.usage_all; unable to determine idle time\n",
4876 curcpu
, cg
, all_used
, cg_used
);
4880 l
= snprintf(cache
, cache_size
, "cpu%d %lu 0 %lu %lu 0 0 0 0 0 0\n",
4881 curcpu
, cg_cpu_usage
[physcpu
].user
, cg_cpu_usage
[physcpu
].system
,
4885 perror("Error writing to cache");
4889 if (l
>= cache_size
) {
4890 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
4898 user_sum
+= cg_cpu_usage
[physcpu
].user
;
4899 system_sum
+= cg_cpu_usage
[physcpu
].system
;
4900 idle_sum
+= new_idle
;
4905 system_sum
+= system
;
4907 iowait_sum
+= iowait
;
4909 softirq_sum
+= softirq
;
4912 guest_nice_sum
+= guest_nice
;
4918 int cpuall_len
= snprintf(cpuall
, CPUALL_MAX_SIZE
, "cpu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
4929 if (cpuall_len
> 0 && cpuall_len
< CPUALL_MAX_SIZE
) {
4930 memcpy(cache
, cpuall
, cpuall_len
);
4931 cache
+= cpuall_len
;
4933 /* shouldn't happen */
4934 lxcfs_error("proc_stat_read copy cpuall failed, cpuall_len=%d.", cpuall_len
);
4938 memmove(cache
, d
->buf
+ CPUALL_MAX_SIZE
, total_len
);
4939 total_len
+= cpuall_len
;
4943 d
->size
= total_len
;
4944 if (total_len
> size
)
4947 memcpy(buf
, d
->buf
, total_len
);
4951 /* This function retrieves the busy time of a group of tasks by looking at
4952 * cpuacct.usage. Unfortunately, this only makes sense when the container has
4953 * been given it's own cpuacct cgroup. If not, this function will take the busy
4954 * time of all other taks that do not actually belong to the container into
4955 * account as well. If someone has a clever solution for this please send a
4958 static double get_reaper_busy(pid_t task
)
4960 __do_free
char *cgroup
= NULL
, *usage_str
= NULL
;
4961 unsigned long usage
= 0;
4964 initpid
= lookup_initpid_in_store(task
);
4968 cgroup
= get_pid_cgroup(initpid
, "cpuacct");
4971 prune_init_slice(cgroup
);
4972 if (!cgroup_ops
->get(cgroup_ops
, "cpuacct", cgroup
, "cpuacct.usage",
4976 usage
= strtoul(usage_str
, NULL
, 10);
4977 return ((double)usage
/ 1000000000);
4985 fd
= creat("/tmp/lxcfs-iwashere", 0644);
4992 * We read /proc/uptime and reuse its second field.
4993 * For the first field, we use the mtime for the reaper for
4994 * the calling pid as returned by getreaperage
4996 static int proc_uptime_read(char *buf
, size_t size
, off_t offset
,
4997 struct fuse_file_info
*fi
)
4999 struct fuse_context
*fc
= fuse_get_context();
5000 struct file_info
*d
= (struct file_info
*)fi
->fh
;
5001 double busytime
= get_reaper_busy(fc
->pid
);
5002 char *cache
= d
->buf
;
5003 ssize_t total_len
= 0;
5004 double idletime
, reaperage
;
5013 if (offset
> d
->size
)
5015 int left
= d
->size
- offset
;
5016 total_len
= left
> size
? size
: left
;
5017 memcpy(buf
, cache
+ offset
, total_len
);
5021 reaperage
= get_reaper_age(fc
->pid
);
5022 /* To understand why this is done, please read the comment to the
5023 * get_reaper_busy() function.
5025 idletime
= reaperage
;
5026 if (reaperage
>= busytime
)
5027 idletime
= reaperage
- busytime
;
5029 total_len
= snprintf(d
->buf
, d
->buflen
, "%.2lf %.2lf\n", reaperage
, idletime
);
5030 if (total_len
< 0 || total_len
>= d
->buflen
){
5031 lxcfs_error("%s\n", "failed to write to cache");
5035 d
->size
= (int)total_len
;
5038 if (total_len
> size
) total_len
= size
;
5040 memcpy(buf
, d
->buf
, total_len
);
5044 static int proc_diskstats_read(char *buf
, size_t size
, off_t offset
,
5045 struct fuse_file_info
*fi
)
5047 __do_free
char *cg
= NULL
, *io_serviced_str
= NULL
,
5048 *io_merged_str
= NULL
, *io_service_bytes_str
= NULL
,
5049 *io_wait_time_str
= NULL
, *io_service_time_str
= NULL
,
5051 __do_fclose
FILE *f
= NULL
;
5052 struct fuse_context
*fc
= fuse_get_context();
5053 struct file_info
*d
= (struct file_info
*)fi
->fh
;
5054 unsigned long read
= 0, write
= 0;
5055 unsigned long read_merged
= 0, write_merged
= 0;
5056 unsigned long read_sectors
= 0, write_sectors
= 0;
5057 unsigned long read_ticks
= 0, write_ticks
= 0;
5058 unsigned long ios_pgr
= 0, tot_ticks
= 0, rq_ticks
= 0;
5059 unsigned long rd_svctm
= 0, wr_svctm
= 0, rd_wait
= 0, wr_wait
= 0;
5060 char *cache
= d
->buf
;
5061 size_t cache_size
= d
->buflen
;
5062 size_t linelen
= 0, total_len
= 0;
5063 unsigned int major
= 0, minor
= 0;
5071 if (offset
> d
->size
)
5077 left
= d
->size
- offset
;
5078 total_len
= left
> size
? size
: left
;
5079 memcpy(buf
, cache
+ offset
, total_len
);
5084 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
5085 if (initpid
<= 1 || is_shared_pidns(initpid
))
5087 cg
= get_pid_cgroup(initpid
, "blkio");
5089 return read_file_fuse("/proc/diskstats", buf
, size
, d
);
5090 prune_init_slice(cg
);
5092 ret
= cgroup_ops
->get_io_serviced(cgroup_ops
, cg
, &io_serviced_str
);
5094 if (ret
== -EOPNOTSUPP
)
5095 return read_file_fuse("/proc/diskstats", buf
, size
, d
);
5098 ret
= cgroup_ops
->get_io_merged(cgroup_ops
, cg
, &io_merged_str
);
5100 if (ret
== -EOPNOTSUPP
)
5101 return read_file_fuse("/proc/diskstats", buf
, size
, d
);
5104 ret
= cgroup_ops
->get_io_service_bytes(cgroup_ops
, cg
, &io_service_bytes_str
);
5106 if (ret
== -EOPNOTSUPP
)
5107 return read_file_fuse("/proc/diskstats", buf
, size
, d
);
5110 ret
= cgroup_ops
->get_io_wait_time(cgroup_ops
, cg
, &io_wait_time_str
);
5112 if (ret
== -EOPNOTSUPP
)
5113 return read_file_fuse("/proc/diskstats", buf
, size
, d
);
5116 ret
= cgroup_ops
->get_io_service_time(cgroup_ops
, cg
, &io_service_time_str
);
5118 if (ret
== -EOPNOTSUPP
)
5119 return read_file_fuse("/proc/diskstats", buf
, size
, d
);
5122 f
= fopen("/proc/diskstats", "r");
5126 while (getline(&line
, &linelen
, f
) != -1) {
5130 i
= sscanf(line
, "%u %u %71s", &major
, &minor
, dev_name
);
5134 get_blkio_io_value(io_serviced_str
, major
, minor
, "Read", &read
);
5135 get_blkio_io_value(io_serviced_str
, major
, minor
, "Write", &write
);
5136 get_blkio_io_value(io_merged_str
, major
, minor
, "Read", &read_merged
);
5137 get_blkio_io_value(io_merged_str
, major
, minor
, "Write", &write_merged
);
5138 get_blkio_io_value(io_service_bytes_str
, major
, minor
, "Read", &read_sectors
);
5139 read_sectors
= read_sectors
/512;
5140 get_blkio_io_value(io_service_bytes_str
, major
, minor
, "Write", &write_sectors
);
5141 write_sectors
= write_sectors
/512;
5143 get_blkio_io_value(io_service_time_str
, major
, minor
, "Read", &rd_svctm
);
5144 rd_svctm
= rd_svctm
/1000000;
5145 get_blkio_io_value(io_wait_time_str
, major
, minor
, "Read", &rd_wait
);
5146 rd_wait
= rd_wait
/1000000;
5147 read_ticks
= rd_svctm
+ rd_wait
;
5149 get_blkio_io_value(io_service_time_str
, major
, minor
, "Write", &wr_svctm
);
5150 wr_svctm
= wr_svctm
/1000000;
5151 get_blkio_io_value(io_wait_time_str
, major
, minor
, "Write", &wr_wait
);
5152 wr_wait
= wr_wait
/1000000;
5153 write_ticks
= wr_svctm
+ wr_wait
;
5155 get_blkio_io_value(io_service_time_str
, major
, minor
, "Total", &tot_ticks
);
5156 tot_ticks
= tot_ticks
/1000000;
5158 memset(lbuf
, 0, 256);
5159 if (read
|| write
|| read_merged
|| write_merged
|| read_sectors
|| write_sectors
|| read_ticks
|| write_ticks
)
5160 snprintf(lbuf
, 256, "%u %u %s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
5161 major
, minor
, dev_name
, read
, read_merged
, read_sectors
, read_ticks
,
5162 write
, write_merged
, write_sectors
, write_ticks
, ios_pgr
, tot_ticks
, rq_ticks
);
5166 l
= snprintf(cache
, cache_size
, "%s", lbuf
);
5168 perror("Error writing to fuse buf");
5171 if (l
>= cache_size
) {
5172 lxcfs_error("%s\n", "Internal error: truncated write to cache.");
5181 d
->size
= total_len
;
5182 if (total_len
> size
) total_len
= size
;
5183 memcpy(buf
, d
->buf
, total_len
);
5188 static int proc_swaps_read(char *buf
, size_t size
, off_t offset
,
5189 struct fuse_file_info
*fi
)
5191 __do_free
char *cg
= NULL
, *memswlimit_str
= NULL
, *memusage_str
= NULL
,
5192 *memswusage_str
= NULL
;
5193 struct fuse_context
*fc
= fuse_get_context();
5194 struct file_info
*d
= (struct file_info
*)fi
->fh
;
5195 unsigned long memswlimit
= 0, memlimit
= 0, memusage
= 0,
5196 memswusage
= 0, swap_total
= 0, swap_free
= 0;
5197 ssize_t total_len
= 0;
5199 char *cache
= d
->buf
;
5205 if (offset
> d
->size
)
5211 left
= d
->size
- offset
;
5212 total_len
= left
> size
? size
: left
;
5213 memcpy(buf
, cache
+ offset
, total_len
);
5218 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
5219 if (initpid
<= 1 || is_shared_pidns(initpid
))
5221 cg
= get_pid_cgroup(initpid
, "memory");
5223 return read_file_fuse("/proc/swaps", buf
, size
, d
);
5224 prune_init_slice(cg
);
5226 memlimit
= get_min_memlimit(cg
, false);
5228 ret
= cgroup_ops
->get_memory_current(cgroup_ops
, cg
, &memusage_str
);
5232 memusage
= strtoul(memusage_str
, NULL
, 10);
5234 ret
= cgroup_ops
->get_memory_swap_max(cgroup_ops
, cg
, &memswlimit_str
);
5236 ret
= cgroup_ops
->get_memory_swap_current(cgroup_ops
, cg
, &memswusage_str
);
5238 memswlimit
= get_min_memlimit(cg
, true);
5239 memswusage
= strtoul(memswusage_str
, NULL
, 10);
5240 swap_total
= (memswlimit
- memlimit
) / 1024;
5241 swap_free
= (memswusage
- memusage
) / 1024;
5244 total_len
= snprintf(d
->buf
, d
->size
, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
5246 /* When no mem + swap limit is specified or swapaccount=0*/
5248 __do_free
char *line
= NULL
;
5249 __do_fclose
FILE *f
= NULL
;
5252 f
= fopen("/proc/meminfo", "r");
5256 while (getline(&line
, &linelen
, f
) != -1) {
5257 if (startswith(line
, "SwapTotal:"))
5258 sscanf(line
, "SwapTotal: %8lu kB", &swap_total
);
5259 else if (startswith(line
, "SwapFree:"))
5260 sscanf(line
, "SwapFree: %8lu kB", &swap_free
);
5264 if (swap_total
> 0) {
5265 l
= snprintf(d
->buf
+ total_len
, d
->size
- total_len
,
5266 "none%*svirtual\t\t%lu\t%lu\t0\n", 36, " ",
5267 swap_total
, swap_free
);
5271 if (total_len
< 0 || l
< 0) {
5272 perror("Error writing to cache");
5277 d
->size
= (int)total_len
;
5279 if (total_len
> size
) total_len
= size
;
5280 memcpy(buf
, d
->buf
, total_len
);
5285 * Find the process pid from cgroup path.
5286 * eg:from /sys/fs/cgroup/cpu/docker/containerid/cgroup.procs to find the process pid.
5287 * @pid_buf : put pid to pid_buf.
5288 * @dpath : the path of cgroup. eg: /docker/containerid or /docker/containerid/child-cgroup ...
5289 * @depth : the depth of cgroup in container.
5290 * @sum : return the number of pid.
5291 * @cfd : the file descriptor of the mounted cgroup. eg: /sys/fs/cgroup/cpu
5293 static int calc_pid(char ***pid_buf
, char *dpath
, int depth
, int sum
, int cfd
)
5295 __do_free
char *path
= NULL
;
5296 __do_close_prot_errno
int fd
= -EBADF
;
5297 __do_fclose
FILE *f
= NULL
;
5298 __do_closedir
DIR *dir
= NULL
;
5299 struct dirent
*file
;
5305 /* path = dpath + "/cgroup.procs" + /0 */
5306 path
= malloc(strlen(dpath
) + 20);
5310 strcpy(path
, dpath
);
5311 fd
= openat(cfd
, path
, O_RDONLY
| O_CLOEXEC
| O_NOFOLLOW
);
5315 dir
= fdopendir(move_fd(fd
));
5319 while (((file
= readdir(dir
)) != NULL
) && depth
> 0) {
5320 if (strcmp(file
->d_name
, ".") == 0)
5323 if (strcmp(file
->d_name
, "..") == 0)
5326 if (file
->d_type
== DT_DIR
) {
5327 __do_free
char *path_dir
= NULL
;
5329 /* path + '/' + d_name +/0 */
5330 path_dir
= malloc(strlen(path
) + 2 + sizeof(file
->d_name
));
5334 strcpy(path_dir
, path
);
5335 strcat(path_dir
, "/");
5336 strcat(path_dir
, file
->d_name
);
5338 sum
= calc_pid(pid_buf
, path_dir
, pd
, sum
, cfd
);
5342 strcat(path
, "/cgroup.procs");
5343 fd
= openat(cfd
, path
, O_RDONLY
);
5347 f
= fdopen(move_fd(fd
), "r");
5351 while (getline(&line
, &linelen
, f
) != -1) {
5352 pid
= realloc(*pid_buf
, sizeof(char *) * (sum
+ 1));
5357 *(*pid_buf
+ sum
) = malloc(strlen(line
) + 1);
5358 if (!*(*pid_buf
+ sum
))
5361 strcpy(*(*pid_buf
+ sum
), line
);
5369 * calc_load calculates the load according to the following formula:
5370 * load1 = load0 * exp + active * (1 - exp)
5372 * @load1: the new loadavg.
5373 * @load0: the former loadavg.
5374 * @active: the total number of running pid at this moment.
5375 * @exp: the fixed-point defined in the beginning.
5377 static unsigned long
5378 calc_load(unsigned long load
, unsigned long exp
, unsigned long active
)
5380 unsigned long newload
;
5382 active
= active
> 0 ? active
* FIXED_1
: 0;
5383 newload
= load
* exp
+ active
* (FIXED_1
- exp
);
5385 newload
+= FIXED_1
- 1;
5387 return newload
/ FIXED_1
;
5391 * Return 0 means that container p->cg is closed.
5392 * Return -1 means that error occurred in refresh.
5393 * Positive num equals the total number of pid.
5395 static int refresh_load(struct load_node
*p
, char *path
)
5397 __do_free
char *line
= NULL
;
5399 char proc_path
[256];
5400 int i
, ret
, run_pid
= 0, total_pid
= 0, last_pid
= 0;
5403 struct dirent
*file
;
5405 idbuf
= malloc(sizeof(char *));
5409 sum
= calc_pid(&idbuf
, path
, DEPTH_DIR
, 0, p
->cfd
);
5414 for (i
= 0; i
< sum
; i
++) {
5415 __do_closedir
DIR *dp
= NULL
;
5418 length
= strlen(idbuf
[i
])-1;
5419 idbuf
[i
][length
] = '\0';
5420 ret
= snprintf(proc_path
, 256, "/proc/%s/task", idbuf
[i
]);
5421 if (ret
< 0 || ret
> 255) {
5422 lxcfs_error("%s\n", "snprintf() failed in refresh_load.");
5428 dp
= opendir(proc_path
);
5430 lxcfs_error("%s\n", "Open proc_path failed in refresh_load.");
5433 while ((file
= readdir(dp
)) != NULL
) {
5434 __do_fclose
FILE *f
= NULL
;
5436 if (strncmp(file
->d_name
, ".", 1) == 0)
5438 if (strncmp(file
->d_name
, "..", 1) == 0)
5441 /* We make the biggest pid become last_pid.*/
5442 ret
= atof(file
->d_name
);
5443 last_pid
= (ret
> last_pid
) ? ret
: last_pid
;
5445 ret
= snprintf(proc_path
, 256, "/proc/%s/task/%s/status", idbuf
[i
], file
->d_name
);
5446 if (ret
< 0 || ret
> 255) {
5447 lxcfs_error("%s\n", "snprintf() failed in refresh_load.");
5453 f
= fopen(proc_path
, "r");
5455 while (getline(&line
, &linelen
, f
) != -1) {
5457 if ((line
[0] == 'S') && (line
[1] == 't'))
5461 if ((line
[7] == 'R') || (line
[7] == 'D'))
5466 /*Calculate the loadavg.*/
5467 p
->avenrun
[0] = calc_load(p
->avenrun
[0], EXP_1
, run_pid
);
5468 p
->avenrun
[1] = calc_load(p
->avenrun
[1], EXP_5
, run_pid
);
5469 p
->avenrun
[2] = calc_load(p
->avenrun
[2], EXP_15
, run_pid
);
5470 p
->run_pid
= run_pid
;
5471 p
->total_pid
= total_pid
;
5472 p
->last_pid
= last_pid
;
5483 * Traverse the hash table and update it.
5485 void *load_begin(void *arg
)
5488 int i
, sum
, length
, ret
;
5489 struct load_node
*f
;
5491 clock_t time1
, time2
;
5494 if (loadavg_stop
== 1)
5498 for (i
= 0; i
< LOAD_SIZE
; i
++) {
5499 pthread_mutex_lock(&load_hash
[i
].lock
);
5500 if (load_hash
[i
].next
== NULL
) {
5501 pthread_mutex_unlock(&load_hash
[i
].lock
);
5504 f
= load_hash
[i
].next
;
5507 __do_free
char *path
= NULL
;
5509 length
= strlen(f
->cg
) + 2;
5510 /* strlen(f->cg) + '.' or '' + \0 */
5511 path
= malloc(length
);
5515 ret
= snprintf(path
, length
, "%s%s", dot_or_empty(f
->cg
), f
->cg
);
5516 if (ret
< 0 || ret
> length
- 1) {
5517 /* snprintf failed, ignore the node.*/
5518 lxcfs_error("Refresh node %s failed for snprintf().\n", f
->cg
);
5522 sum
= refresh_load(f
, path
);
5527 /* load_hash[i].lock locks only on the first node.*/
5528 if (first_node
== 1) {
5530 pthread_mutex_unlock(&load_hash
[i
].lock
);
5535 if (loadavg_stop
== 1)
5539 usleep(FLUSH_TIME
* 1000000 - (int)((time2
- time1
) * 1000000 / CLOCKS_PER_SEC
));
5543 static int proc_loadavg_read(char *buf
, size_t size
, off_t offset
,
5544 struct fuse_file_info
*fi
)
5546 struct fuse_context
*fc
= fuse_get_context();
5547 struct file_info
*d
= (struct file_info
*)fi
->fh
;
5550 size_t total_len
= 0;
5551 char *cache
= d
->buf
;
5552 struct load_node
*n
;
5555 unsigned long a
, b
, c
;
5558 if (offset
> d
->size
)
5562 int left
= d
->size
- offset
;
5563 total_len
= left
> size
? size
: left
;
5564 memcpy(buf
, cache
+ offset
, total_len
);
5568 return read_file_fuse("/proc/loadavg", buf
, size
, d
);
5570 initpid
= lookup_initpid_in_store(fc
->pid
);
5571 if (initpid
<= 1 || is_shared_pidns(initpid
))
5573 cg
= get_pid_cgroup(initpid
, "cpu");
5575 return read_file_fuse("/proc/loadavg", buf
, size
, d
);
5577 prune_init_slice(cg
);
5578 hash
= calc_hash(cg
) % LOAD_SIZE
;
5579 n
= locate_node(cg
, hash
);
5583 cfd
= find_mounted_controller("cpu");
5586 * In locate_node() above, pthread_rwlock_unlock() isn't used
5587 * because delete is not allowed before read has ended.
5589 pthread_rwlock_unlock(&load_hash
[hash
].rdlock
);
5594 n
= malloc(sizeof(struct load_node
));
5598 n
->cg
= malloc(strlen(cg
)+1);
5606 n
->last_pid
= initpid
;
5608 insert_node(&n
, hash
);
5610 a
= n
->avenrun
[0] + (FIXED_1
/200);
5611 b
= n
->avenrun
[1] + (FIXED_1
/200);
5612 c
= n
->avenrun
[2] + (FIXED_1
/200);
5613 total_len
= snprintf(d
->buf
, d
->buflen
, "%lu.%02lu %lu.%02lu %lu.%02lu %d/%d %d\n",
5614 LOAD_INT(a
), LOAD_FRAC(a
),
5615 LOAD_INT(b
), LOAD_FRAC(b
),
5616 LOAD_INT(c
), LOAD_FRAC(c
),
5617 n
->run_pid
, n
->total_pid
, n
->last_pid
);
5618 pthread_rwlock_unlock(&load_hash
[hash
].rdlock
);
5619 if (total_len
< 0 || total_len
>= d
->buflen
) {
5620 lxcfs_error("%s\n", "Failed to write to cache");
5624 d
->size
= (int)total_len
;
5627 if (total_len
> size
)
5629 memcpy(buf
, d
->buf
, total_len
);
5636 /* Return a positive number on success, return 0 on failure.*/
5637 pthread_t
load_daemon(int load_use
)
5644 lxcfs_error("%s\n", "Initialize hash_table fails in load_daemon!");
5647 ret
= pthread_create(&pid
, NULL
, load_begin
, NULL
);
5649 lxcfs_error("%s\n", "Create pthread fails in load_daemon!");
5653 /* use loadavg, here loadavg = 1*/
5658 /* Returns 0 on success. */
5659 int stop_load_daemon(pthread_t pid
)
5663 /* Signal the thread to gracefully stop */
5666 s
= pthread_join(pid
, NULL
); /* Make sure sub thread has been canceled. */
5668 lxcfs_error("%s\n", "stop_load_daemon error: failed to join");
5678 static off_t
get_procfile_size(const char *which
)
5680 FILE *f
= fopen(which
, "r");
5683 ssize_t sz
, answer
= 0;
5687 while ((sz
= getline(&line
, &len
, f
)) != -1)
5695 int proc_getattr(const char *path
, struct stat
*sb
)
5697 struct timespec now
;
5699 memset(sb
, 0, sizeof(struct stat
));
5700 if (clock_gettime(CLOCK_REALTIME
, &now
) < 0)
5702 sb
->st_uid
= sb
->st_gid
= 0;
5703 sb
->st_atim
= sb
->st_mtim
= sb
->st_ctim
= now
;
5704 if (strcmp(path
, "/proc") == 0) {
5705 sb
->st_mode
= S_IFDIR
| 00555;
5709 if (strcmp(path
, "/proc/meminfo") == 0 ||
5710 strcmp(path
, "/proc/cpuinfo") == 0 ||
5711 strcmp(path
, "/proc/uptime") == 0 ||
5712 strcmp(path
, "/proc/stat") == 0 ||
5713 strcmp(path
, "/proc/diskstats") == 0 ||
5714 strcmp(path
, "/proc/swaps") == 0 ||
5715 strcmp(path
, "/proc/loadavg") == 0) {
5717 sb
->st_mode
= S_IFREG
| 00444;
5725 int proc_readdir(const char *path
, void *buf
, fuse_fill_dir_t filler
, off_t offset
,
5726 struct fuse_file_info
*fi
)
5728 if (filler(buf
, ".", NULL
, 0) != 0 ||
5729 filler(buf
, "..", NULL
, 0) != 0 ||
5730 filler(buf
, "cpuinfo", NULL
, 0) != 0 ||
5731 filler(buf
, "meminfo", NULL
, 0) != 0 ||
5732 filler(buf
, "stat", NULL
, 0) != 0 ||
5733 filler(buf
, "uptime", NULL
, 0) != 0 ||
5734 filler(buf
, "diskstats", NULL
, 0) != 0 ||
5735 filler(buf
, "swaps", NULL
, 0) != 0 ||
5736 filler(buf
, "loadavg", NULL
, 0) != 0)
5741 int proc_open(const char *path
, struct fuse_file_info
*fi
)
5744 struct file_info
*info
;
5746 if (strcmp(path
, "/proc/meminfo") == 0)
5747 type
= LXC_TYPE_PROC_MEMINFO
;
5748 else if (strcmp(path
, "/proc/cpuinfo") == 0)
5749 type
= LXC_TYPE_PROC_CPUINFO
;
5750 else if (strcmp(path
, "/proc/uptime") == 0)
5751 type
= LXC_TYPE_PROC_UPTIME
;
5752 else if (strcmp(path
, "/proc/stat") == 0)
5753 type
= LXC_TYPE_PROC_STAT
;
5754 else if (strcmp(path
, "/proc/diskstats") == 0)
5755 type
= LXC_TYPE_PROC_DISKSTATS
;
5756 else if (strcmp(path
, "/proc/swaps") == 0)
5757 type
= LXC_TYPE_PROC_SWAPS
;
5758 else if (strcmp(path
, "/proc/loadavg") == 0)
5759 type
= LXC_TYPE_PROC_LOADAVG
;
5763 info
= malloc(sizeof(*info
));
5767 memset(info
, 0, sizeof(*info
));
5770 info
->buflen
= get_procfile_size(path
) + BUF_RESERVE_SIZE
;
5772 info
->buf
= malloc(info
->buflen
);
5773 } while (!info
->buf
);
5774 memset(info
->buf
, 0, info
->buflen
);
5775 /* set actual size to buffer size */
5776 info
->size
= info
->buflen
;
5778 fi
->fh
= (unsigned long)info
;
5782 int proc_access(const char *path
, int mask
)
5784 if (strcmp(path
, "/proc") == 0 && access(path
, R_OK
) == 0)
5787 /* these are all read-only */
5788 if ((mask
& ~R_OK
) != 0)
5793 int proc_release(const char *path
, struct fuse_file_info
*fi
)
5795 do_release_file_info(fi
);
5799 int proc_read(const char *path
, char *buf
, size_t size
, off_t offset
,
5800 struct fuse_file_info
*fi
)
5802 struct file_info
*f
= (struct file_info
*) fi
->fh
;
5805 case LXC_TYPE_PROC_MEMINFO
:
5806 return proc_meminfo_read(buf
, size
, offset
, fi
);
5807 case LXC_TYPE_PROC_CPUINFO
:
5808 return proc_cpuinfo_read(buf
, size
, offset
, fi
);
5809 case LXC_TYPE_PROC_UPTIME
:
5810 return proc_uptime_read(buf
, size
, offset
, fi
);
5811 case LXC_TYPE_PROC_STAT
:
5812 return proc_stat_read(buf
, size
, offset
, fi
);
5813 case LXC_TYPE_PROC_DISKSTATS
:
5814 return proc_diskstats_read(buf
, size
, offset
, fi
);
5815 case LXC_TYPE_PROC_SWAPS
:
5816 return proc_swaps_read(buf
, size
, offset
, fi
);
5817 case LXC_TYPE_PROC_LOADAVG
:
5818 return proc_loadavg_read(buf
, size
, offset
, fi
);
5825 * Functions needed to setup cgroups in the __constructor__.
5828 static bool umount_if_mounted(void)
5830 if (umount2(BASEDIR
, MNT_DETACH
) < 0 && errno
!= EINVAL
) {
5831 lxcfs_error("Failed to unmount %s: %s.\n", BASEDIR
, strerror(errno
));
5837 /* __typeof__ should be safe to use with all compilers. */
5838 typedef __typeof__(((struct statfs
*)NULL
)->f_type
) fs_type_magic
;
5839 static bool has_fs_type(const struct statfs
*fs
, fs_type_magic magic_val
)
5841 return (fs
->f_type
== (fs_type_magic
)magic_val
);
5845 * looking at fs/proc_namespace.c, it appears we can
5846 * actually expect the rootfs entry to very specifically contain
5847 * " - rootfs rootfs "
5848 * IIUC, so long as we've chrooted so that rootfs is not our root,
5849 * the rootfs entry should always be skipped in mountinfo contents.
5851 static bool is_on_ramfs(void)
5859 f
= fopen("/proc/self/mountinfo", "r");
5863 while (getline(&line
, &len
, f
) != -1) {
5864 for (p
= line
, i
= 0; p
&& i
< 4; i
++)
5865 p
= strchr(p
+ 1, ' ');
5868 p2
= strchr(p
+ 1, ' ');
5872 if (strcmp(p
+ 1, "/") == 0) {
5873 // this is '/'. is it the ramfs?
5874 p
= strchr(p2
+ 1, '-');
5875 if (p
&& strncmp(p
, "- rootfs rootfs ", 16) == 0) {
5887 static int pivot_enter()
5889 int ret
= -1, oldroot
= -1, newroot
= -1;
5891 oldroot
= open("/", O_DIRECTORY
| O_RDONLY
);
5893 lxcfs_error("%s\n", "Failed to open old root for fchdir.");
5897 newroot
= open(ROOTDIR
, O_DIRECTORY
| O_RDONLY
);
5899 lxcfs_error("%s\n", "Failed to open new root for fchdir.");
5903 /* change into new root fs */
5904 if (fchdir(newroot
) < 0) {
5905 lxcfs_error("Failed to change directory to new rootfs: %s.\n", ROOTDIR
);
5909 /* pivot_root into our new root fs */
5910 if (pivot_root(".", ".") < 0) {
5911 lxcfs_error("pivot_root() syscall failed: %s.\n", strerror(errno
));
5916 * At this point the old-root is mounted on top of our new-root.
5917 * To unmounted it we must not be chdir'd into it, so escape back
5920 if (fchdir(oldroot
) < 0) {
5921 lxcfs_error("%s\n", "Failed to enter old root.");
5925 if (umount2(".", MNT_DETACH
) < 0) {
5926 lxcfs_error("%s\n", "Failed to detach old root.");
5930 if (fchdir(newroot
) < 0) {
5931 lxcfs_error("%s\n", "Failed to re-enter new root.");
5946 static int chroot_enter()
5948 if (mount(ROOTDIR
, "/", NULL
, MS_REC
| MS_BIND
, NULL
)) {
5949 lxcfs_error("Failed to recursively bind-mount %s into /.", ROOTDIR
);
5953 if (chroot(".") < 0) {
5954 lxcfs_error("Call to chroot() failed: %s.\n", strerror(errno
));
5958 if (chdir("/") < 0) {
5959 lxcfs_error("Failed to change directory: %s.\n", strerror(errno
));
5966 static int permute_and_enter(void)
5970 if (statfs("/", &sb
) < 0) {
5971 lxcfs_error("%s\n", "Could not stat / mountpoint.");
5975 /* has_fs_type() is not reliable. When the ramfs is a tmpfs it will
5976 * likely report TMPFS_MAGIC. Hence, when it reports no we still check
5977 * /proc/1/mountinfo. */
5978 if (has_fs_type(&sb
, RAMFS_MAGIC
) || is_on_ramfs())
5979 return chroot_enter();
5981 if (pivot_enter() < 0) {
5982 lxcfs_error("%s\n", "Could not perform pivot root.");
5989 /* Prepare our new clean root. */
5990 static int permute_prepare(void)
5992 if (mkdir(ROOTDIR
, 0700) < 0 && errno
!= EEXIST
) {
5993 lxcfs_error("%s\n", "Failed to create directory for new root.");
5997 if (mount("/", ROOTDIR
, NULL
, MS_BIND
, 0) < 0) {
5998 lxcfs_error("Failed to bind-mount / for new root: %s.\n", strerror(errno
));
6002 if (mount(RUNTIME_PATH
, ROOTDIR RUNTIME_PATH
, NULL
, MS_BIND
, 0) < 0) {
6003 lxcfs_error("Failed to bind-mount /run into new root: %s.\n", strerror(errno
));
6007 if (mount(BASEDIR
, ROOTDIR BASEDIR
, NULL
, MS_REC
| MS_MOVE
, 0) < 0) {
6008 printf("Failed to move " BASEDIR
" into new root: %s.\n", strerror(errno
));
6015 /* Calls chroot() on ramfs, pivot_root() in all other cases. */
6016 static bool permute_root(void)
6018 /* Prepare new root. */
6019 if (permute_prepare() < 0)
6022 /* Pivot into new root. */
6023 if (permute_and_enter() < 0)
6029 static int preserve_mnt_ns(int pid
)
6032 size_t len
= sizeof("/proc/") + 21 + sizeof("/ns/mnt");
6035 ret
= snprintf(path
, len
, "/proc/%d/ns/mnt", pid
);
6036 if (ret
< 0 || (size_t)ret
>= len
)
6039 return open(path
, O_RDONLY
| O_CLOEXEC
);
6042 static bool cgfs_prepare_mounts(void)
6044 if (!mkdir_p(BASEDIR
, 0700)) {
6045 lxcfs_error("%s\n", "Failed to create lxcfs cgroup mountpoint.");
6049 if (!umount_if_mounted()) {
6050 lxcfs_error("%s\n", "Failed to clean up old lxcfs cgroup mountpoint.");
6054 if (unshare(CLONE_NEWNS
) < 0) {
6055 lxcfs_error("Failed to unshare mount namespace: %s.\n", strerror(errno
));
6059 cgroup_mount_ns_fd
= preserve_mnt_ns(getpid());
6060 if (cgroup_mount_ns_fd
< 0) {
6061 lxcfs_error("Failed to preserve mount namespace: %s.\n", strerror(errno
));
6065 if (mount(NULL
, "/", NULL
, MS_REC
| MS_PRIVATE
, 0) < 0) {
6066 lxcfs_error("Failed to remount / private: %s.\n", strerror(errno
));
6070 if (mount("tmpfs", BASEDIR
, "tmpfs", 0, "size=100000,mode=700") < 0) {
6071 lxcfs_error("%s\n", "Failed to mount tmpfs over lxcfs cgroup mountpoint.");
6078 static bool cgfs_mount_hierarchies(void)
6080 if (!mkdir_p(BASEDIR DEFAULT_CGROUP_MOUNTPOINT
, 0755))
6083 if (!cgroup_ops
->mount(cgroup_ops
, BASEDIR
))
6086 for (struct hierarchy
**h
= cgroup_ops
->hierarchies
; h
&& *h
; h
++) {
6087 __do_free
char *path
= must_make_path(BASEDIR
, (*h
)->mountpoint
, NULL
);
6088 (*h
)->fd
= open(path
, O_DIRECTORY
| O_CLOEXEC
| O_NOFOLLOW
);
6096 static bool cgfs_setup_controllers(void)
6098 if (!cgfs_prepare_mounts())
6101 if (!cgfs_mount_hierarchies()) {
6102 lxcfs_error("%s\n", "Failed to set up private lxcfs cgroup mounts.");
6106 if (!permute_root())
6112 static void __attribute__((constructor
)) lxcfs_init(void)
6114 __do_close_prot_errno
int init_ns
= -EBADF
;
6116 char cwd
[MAXPATHLEN
];
6118 cgroup_ops
= cgroup_init();
6120 log_exit("Failed to initialize cgroup support");
6122 /* Preserve initial namespace. */
6123 init_ns
= preserve_mnt_ns(getpid());
6125 log_exit("Failed to preserve initial mount namespace");
6127 cret
= getcwd(cwd
, MAXPATHLEN
);
6128 log_exit("%s - Could not retrieve current working directory", strerror(errno
));
6130 /* This function calls unshare(CLONE_NEWNS) our initial mount namespace
6131 * to privately mount lxcfs cgroups. */
6132 if (!cgfs_setup_controllers())
6133 log_exit("Failed to setup private cgroup mounts for lxcfs");
6135 if (setns(init_ns
, 0) < 0)
6136 log_exit("%s - Failed to switch back to initial mount namespace", strerror(errno
));
6138 if (!cret
|| chdir(cwd
) < 0)
6139 log_exit("%s - Could not change back to original working directory", strerror(errno
));
6141 if (!init_cpuview())
6142 log_exit("Failed to init CPU view");
6147 static void __attribute__((destructor
)) lxcfs_exit(void)
6149 lxcfs_debug("%s\n", "Running destructor for liblxcfs.");
6151 close_prot_errno_disarm(cgroup_mount_ns_fd
);
6152 cgroup_exit(cgroup_ops
);