3 * Copyright © 2014-2016 Canonical, Inc
4 * Author: Serge Hallyn <serge.hallyn@ubuntu.com>
6 * See COPYING file for details.
9 #define FUSE_USE_VERSION 26
24 #include <linux/sched.h>
25 #include <sys/param.h>
26 #include <sys/socket.h>
27 #include <sys/mount.h>
28 #include <sys/epoll.h>
33 #include "config.h" // for VERSION
38 LXC_TYPE_PROC_MEMINFO
,
39 LXC_TYPE_PROC_CPUINFO
,
42 LXC_TYPE_PROC_DISKSTATS
,
51 char *buf
; // unused as of yet
53 int size
; //actual data size
57 /* reserve buffer size, for cpuall in /proc/stat */
58 #define BUF_RESERVE_SIZE 256
61 * A table caching which pid is init for a pid namespace.
62 * When looking up which pid is init for $qpid, we first
63 * 1. Stat /proc/$qpid/ns/pid.
64 * 2. Check whether the ino_t is in our store.
65 * a. if not, fork a child in qpid's ns to send us
66 * ucred.pid = 1, and read the initpid. Cache
67 * initpid and creation time for /proc/initpid
68 * in a new store entry.
69 * b. if so, verify that /proc/initpid still matches
70 * what we have saved. If not, clear the store
71 * entry and go back to a. If so, return the
74 struct pidns_init_store
{
75 ino_t ino
; // inode number for /proc/$pid/ns/pid
76 pid_t initpid
; // the pid of nit in that ns
77 long int ctime
; // the time at which /proc/$initpid was created
78 struct pidns_init_store
*next
;
82 /* lol - look at how they are allocated in the kernel */
83 #define PIDNS_HASH_SIZE 4096
84 #define HASH(x) ((x) % PIDNS_HASH_SIZE)
86 static struct pidns_init_store
*pidns_hash_table
[PIDNS_HASH_SIZE
];
87 static pthread_mutex_t pidns_store_mutex
= PTHREAD_MUTEX_INITIALIZER
;
88 static void lock_mutex(pthread_mutex_t
*l
)
92 if ((ret
= pthread_mutex_lock(l
)) != 0) {
93 fprintf(stderr
, "pthread_mutex_lock returned:%d %s\n", ret
, strerror(ret
));
98 static void unlock_mutex(pthread_mutex_t
*l
)
102 if ((ret
= pthread_mutex_unlock(l
)) != 0) {
103 fprintf(stderr
, "pthread_mutex_unlock returned:%d %s\n", ret
, strerror(ret
));
108 static void store_lock(void)
110 lock_mutex(&pidns_store_mutex
);
113 static void store_unlock(void)
115 unlock_mutex(&pidns_store_mutex
);
118 /* Must be called under store_lock */
119 static bool initpid_still_valid(struct pidns_init_store
*e
, struct stat
*nsfdsb
)
124 snprintf(fnam
, 100, "/proc/%d", e
->initpid
);
125 if (stat(fnam
, &initsb
) < 0)
128 fprintf(stderr
, "comparing ctime %ld %ld for pid %d\n",
129 e
->ctime
, initsb
.st_ctime
, e
->initpid
);
131 if (e
->ctime
!= initsb
.st_ctime
)
136 /* Must be called under store_lock */
137 static void remove_initpid(struct pidns_init_store
*e
)
139 struct pidns_init_store
*tmp
;
143 fprintf(stderr
, "remove_initpid: removing entry for %d\n", e
->initpid
);
146 if (pidns_hash_table
[h
] == e
) {
147 pidns_hash_table
[h
] = e
->next
;
152 tmp
= pidns_hash_table
[h
];
154 if (tmp
->next
== e
) {
164 /* Must be called under store_lock */
165 static void prune_initpid_store(void)
167 static long int last_prune
= 0;
168 struct pidns_init_store
*e
, *prev
, *delme
;
169 long int now
, threshold
;
173 last_prune
= time(NULL
);
177 if (now
< last_prune
+ PURGE_SECS
)
180 fprintf(stderr
, "pruning\n");
183 threshold
= now
- 2 * PURGE_SECS
;
185 for (i
= 0; i
< PIDNS_HASH_SIZE
; i
++) {
186 for (prev
= NULL
, e
= pidns_hash_table
[i
]; e
; ) {
187 if (e
->lastcheck
< threshold
) {
189 fprintf(stderr
, "Removing cached entry for %d\n", e
->initpid
);
193 prev
->next
= e
->next
;
195 pidns_hash_table
[i
] = e
->next
;
206 /* Must be called under store_lock */
207 static void save_initpid(struct stat
*sb
, pid_t pid
)
209 struct pidns_init_store
*e
;
215 fprintf(stderr
, "save_initpid: adding entry for %d\n", pid
);
217 snprintf(fpath
, 100, "/proc/%d", pid
);
218 if (stat(fpath
, &procsb
) < 0)
221 e
= malloc(sizeof(*e
));
225 e
->ctime
= procsb
.st_ctime
;
227 e
->next
= pidns_hash_table
[h
];
228 e
->lastcheck
= time(NULL
);
229 pidns_hash_table
[h
] = e
;
233 * Given the stat(2) info for a nsfd pid inode, lookup the init_pid_store
234 * entry for the inode number and creation time. Verify that the init pid
235 * is still valid. If not, remove it. Return the entry if valid, NULL
237 * Must be called under store_lock
239 static struct pidns_init_store
*lookup_verify_initpid(struct stat
*sb
)
241 int h
= HASH(sb
->st_ino
);
242 struct pidns_init_store
*e
= pidns_hash_table
[h
];
245 if (e
->ino
== sb
->st_ino
) {
246 if (initpid_still_valid(e
, sb
)) {
247 e
->lastcheck
= time(NULL
);
259 static int is_dir(const char *path
)
262 int ret
= stat(path
, &statbuf
);
263 if (ret
== 0 && S_ISDIR(statbuf
.st_mode
))
268 static char *must_copy_string(const char *str
)
280 static inline void drop_trailing_newlines(char *s
)
284 for (l
=strlen(s
); l
>0 && s
[l
-1] == '\n'; l
--)
288 #define BATCH_SIZE 50
289 static void dorealloc(char **mem
, size_t oldlen
, size_t newlen
)
291 int newbatches
= (newlen
/ BATCH_SIZE
) + 1;
292 int oldbatches
= (oldlen
/ BATCH_SIZE
) + 1;
294 if (!*mem
|| newbatches
> oldbatches
) {
297 tmp
= realloc(*mem
, newbatches
* BATCH_SIZE
);
302 static void append_line(char **contents
, size_t *len
, char *line
, ssize_t linelen
)
304 size_t newlen
= *len
+ linelen
;
305 dorealloc(contents
, *len
, newlen
+ 1);
306 memcpy(*contents
+ *len
, line
, linelen
+1);
310 static char *slurp_file(const char *from
)
313 char *contents
= NULL
;
314 FILE *f
= fopen(from
, "r");
315 size_t len
= 0, fulllen
= 0;
321 while ((linelen
= getline(&line
, &len
, f
)) != -1) {
322 append_line(&contents
, &fulllen
, line
, linelen
);
327 drop_trailing_newlines(contents
);
332 static bool write_string(const char *fnam
, const char *string
)
337 if (!(f
= fopen(fnam
, "w")))
339 len
= strlen(string
);
340 ret
= fwrite(string
, 1, len
, f
);
342 fprintf(stderr
, "Error writing to file: %s\n", strerror(errno
));
347 fprintf(stderr
, "Error writing to file: %s\n", strerror(errno
));
360 static bool store_hierarchy(char *stridx
, char *h
)
362 if (num_hierarchies
% ALLOC_NUM
== 0) {
363 size_t n
= (num_hierarchies
/ ALLOC_NUM
) + 1;
365 char **tmp
= realloc(hierarchies
, n
* sizeof(char *));
367 fprintf(stderr
, "Out of memory\n");
373 hierarchies
[num_hierarchies
++] = must_copy_string(h
);
377 static void print_subsystems(void)
381 fprintf(stderr
, "hierarchies:\n");
382 for (i
= 0; i
< num_hierarchies
; i
++) {
384 fprintf(stderr
, " %d: %s\n", i
, hierarchies
[i
]);
388 static bool in_comma_list(const char *needle
, const char *haystack
)
390 const char *s
= haystack
, *e
;
391 size_t nlen
= strlen(needle
);
393 while (*s
&& (e
= index(s
, ','))) {
398 if (strncmp(needle
, s
, nlen
) == 0)
402 if (strcmp(needle
, s
) == 0)
407 /* do we need to do any massaging here? I'm not sure... */
408 static char *find_mounted_controller(const char *controller
)
412 for (i
= 0; i
< num_hierarchies
; i
++) {
415 if (strcmp(hierarchies
[i
], controller
) == 0)
416 return hierarchies
[i
];
417 if (in_comma_list(controller
, hierarchies
[i
]))
418 return hierarchies
[i
];
424 bool cgfs_set_value(const char *controller
, const char *cgroup
, const char *file
,
428 char *fnam
, *tmpc
= find_mounted_controller(controller
);
432 /* BASEDIR / tmpc / cgroup / file \0 */
433 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cgroup
) + strlen(file
) + 4;
435 snprintf(fnam
, len
, "%s/%s/%s/%s", BASEDIR
, tmpc
, cgroup
, file
);
437 return write_string(fnam
, value
);
440 // Chown all the files in the cgroup directory. We do this when we create
441 // a cgroup on behalf of a user.
442 static void chown_all_cgroup_files(const char *dirname
, uid_t uid
, gid_t gid
)
444 struct dirent dirent
, *direntp
;
445 char path
[MAXPATHLEN
];
450 len
= strlen(dirname
);
451 if (len
>= MAXPATHLEN
) {
452 fprintf(stderr
, "chown_all_cgroup_files: pathname too long: %s\n", dirname
);
456 d
= opendir(dirname
);
458 fprintf(stderr
, "chown_all_cgroup_files: failed to open %s\n", dirname
);
462 while (readdir_r(d
, &dirent
, &direntp
) == 0 && direntp
) {
463 if (!strcmp(direntp
->d_name
, ".") || !strcmp(direntp
->d_name
, ".."))
465 ret
= snprintf(path
, MAXPATHLEN
, "%s/%s", dirname
, direntp
->d_name
);
466 if (ret
< 0 || ret
>= MAXPATHLEN
) {
467 fprintf(stderr
, "chown_all_cgroup_files: pathname too long under %s\n", dirname
);
470 if (chown(path
, uid
, gid
) < 0)
471 fprintf(stderr
, "Failed to chown file %s to %u:%u", path
, uid
, gid
);
476 int cgfs_create(const char *controller
, const char *cg
, uid_t uid
, gid_t gid
)
479 char *dirnam
, *tmpc
= find_mounted_controller(controller
);
483 /* BASEDIR / tmpc / cg \0 */
484 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cg
) + 3;
485 dirnam
= alloca(len
);
486 snprintf(dirnam
, len
, "%s/%s/%s", BASEDIR
,tmpc
, cg
);
488 if (mkdir(dirnam
, 0755) < 0)
491 if (uid
== 0 && gid
== 0)
494 if (chown(dirnam
, uid
, gid
) < 0)
497 chown_all_cgroup_files(dirnam
, uid
, gid
);
502 static bool recursive_rmdir(const char *dirname
)
504 struct dirent dirent
, *direntp
;
507 char pathname
[MAXPATHLEN
];
509 dir
= opendir(dirname
);
512 fprintf(stderr
, "%s: failed to open %s: %s\n", __func__
, dirname
, strerror(errno
));
517 while (!readdir_r(dir
, &dirent
, &direntp
)) {
524 if (!strcmp(direntp
->d_name
, ".") ||
525 !strcmp(direntp
->d_name
, ".."))
528 rc
= snprintf(pathname
, MAXPATHLEN
, "%s/%s", dirname
, direntp
->d_name
);
529 if (rc
< 0 || rc
>= MAXPATHLEN
) {
530 fprintf(stderr
, "pathname too long\n");
534 ret
= lstat(pathname
, &mystat
);
537 fprintf(stderr
, "%s: failed to stat %s: %s\n", __func__
, pathname
, strerror(errno
));
541 if (S_ISDIR(mystat
.st_mode
)) {
542 if (!recursive_rmdir(pathname
)) {
544 fprintf(stderr
, "Error removing %s\n", pathname
);
551 if (closedir(dir
) < 0) {
552 fprintf(stderr
, "%s: failed to close directory %s: %s\n", __func__
, dirname
, strerror(errno
));
556 if (rmdir(dirname
) < 0) {
558 fprintf(stderr
, "%s: failed to delete %s: %s\n", __func__
, dirname
, strerror(errno
));
566 bool cgfs_remove(const char *controller
, const char *cg
)
569 char *dirnam
, *tmpc
= find_mounted_controller(controller
);
573 /* BASEDIR / tmpc / cg \0 */
574 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cg
) + 3;
575 dirnam
= alloca(len
);
576 snprintf(dirnam
, len
, "%s/%s/%s", BASEDIR
,tmpc
, cg
);
577 return recursive_rmdir(dirnam
);
580 bool cgfs_chmod_file(const char *controller
, const char *file
, mode_t mode
)
583 char *pathname
, *tmpc
= find_mounted_controller(controller
);
587 /* BASEDIR / tmpc / file \0 */
588 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(file
) + 3;
589 pathname
= alloca(len
);
590 snprintf(pathname
, len
, "%s/%s/%s", BASEDIR
, tmpc
, file
);
591 if (chmod(pathname
, mode
) < 0)
596 static int chown_tasks_files(const char *dirname
, uid_t uid
, gid_t gid
)
601 len
= strlen(dirname
) + strlen("/cgroup.procs") + 1;
603 snprintf(fname
, len
, "%s/tasks", dirname
);
604 if (chown(fname
, uid
, gid
) != 0)
606 snprintf(fname
, len
, "%s/cgroup.procs", dirname
);
607 if (chown(fname
, uid
, gid
) != 0)
612 int cgfs_chown_file(const char *controller
, const char *file
, uid_t uid
, gid_t gid
)
615 char *pathname
, *tmpc
= find_mounted_controller(controller
);
619 /* BASEDIR / tmpc / file \0 */
620 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(file
) + 3;
621 pathname
= alloca(len
);
622 snprintf(pathname
, len
, "%s/%s/%s", BASEDIR
, tmpc
, file
);
623 if (chown(pathname
, uid
, gid
) < 0)
626 if (is_dir(pathname
))
627 // like cgmanager did, we want to chown the tasks file as well
628 return chown_tasks_files(pathname
, uid
, gid
);
633 FILE *open_pids_file(const char *controller
, const char *cgroup
)
636 char *pathname
, *tmpc
= find_mounted_controller(controller
);
640 /* BASEDIR / tmpc / cgroup / "cgroup.procs" \0 */
641 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cgroup
) + 4 + strlen("cgroup.procs");
642 pathname
= alloca(len
);
643 snprintf(pathname
, len
, "%s/%s/%s/cgroup.procs", BASEDIR
, tmpc
, cgroup
);
644 return fopen(pathname
, "w");
647 static bool cgfs_iterate_cgroup(const char *controller
, const char *cgroup
, bool directories
,
648 void ***list
, size_t typesize
,
649 void* (*iterator
)(const char*, const char*, const char*))
652 char *dirname
, *tmpc
= find_mounted_controller(controller
);
653 char pathname
[MAXPATHLEN
];
654 size_t sz
= 0, asz
= 0;
655 struct dirent dirent
, *direntp
;
663 /* BASEDIR / tmpc / cgroup \0 */
664 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cgroup
) + 3;
665 dirname
= alloca(len
);
666 snprintf(dirname
, len
, "%s/%s/%s", BASEDIR
, tmpc
, cgroup
);
668 dir
= opendir(dirname
);
672 while (!readdir_r(dir
, &dirent
, &direntp
)) {
679 if (!strcmp(direntp
->d_name
, ".") ||
680 !strcmp(direntp
->d_name
, ".."))
683 rc
= snprintf(pathname
, MAXPATHLEN
, "%s/%s", dirname
, direntp
->d_name
);
684 if (rc
< 0 || rc
>= MAXPATHLEN
) {
685 fprintf(stderr
, "%s: pathname too long under %s\n", __func__
, dirname
);
689 ret
= lstat(pathname
, &mystat
);
691 fprintf(stderr
, "%s: failed to stat %s: %s\n", __func__
, pathname
, strerror(errno
));
694 if ((!directories
&& !S_ISREG(mystat
.st_mode
)) ||
695 (directories
&& !S_ISDIR(mystat
.st_mode
)))
702 tmp
= realloc(*list
, asz
* typesize
);
706 (*list
)[sz
] = (*iterator
)(controller
, cgroup
, direntp
->d_name
);
707 (*list
)[sz
+1] = NULL
;
710 if (closedir(dir
) < 0) {
711 fprintf(stderr
, "%s: failed closedir for %s: %s\n", __func__
, dirname
, strerror(errno
));
717 static void *make_children_list_entry(const char *controller
, const char *cgroup
, const char *dir_entry
)
721 dup
= strdup(dir_entry
);
726 bool cgfs_list_children(const char *controller
, const char *cgroup
, char ***list
)
728 return cgfs_iterate_cgroup(controller
, cgroup
, true, (void***)list
, sizeof(*list
), &make_children_list_entry
);
731 void free_key(struct cgfs_files
*k
)
739 void free_keys(struct cgfs_files
**keys
)
745 for (i
= 0; keys
[i
]; i
++) {
751 bool cgfs_get_value(const char *controller
, const char *cgroup
, const char *file
, char **value
)
754 char *fnam
, *tmpc
= find_mounted_controller(controller
);
758 /* BASEDIR / tmpc / cgroup / file \0 */
759 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cgroup
) + strlen(file
) + 4;
761 snprintf(fnam
, len
, "%s/%s/%s/%s", BASEDIR
, tmpc
, cgroup
, file
);
763 *value
= slurp_file(fnam
);
764 return *value
!= NULL
;
767 struct cgfs_files
*cgfs_get_key(const char *controller
, const char *cgroup
, const char *file
)
770 char *fnam
, *tmpc
= find_mounted_controller(controller
);
772 struct cgfs_files
*newkey
;
778 if (file
&& *file
== '/')
781 if (file
&& index(file
, '/'))
784 /* BASEDIR / tmpc / cgroup / file \0 */
785 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cgroup
) + 3;
787 len
+= strlen(file
) + 1;
789 snprintf(fnam
, len
, "%s/%s/%s%s%s", BASEDIR
, tmpc
, cgroup
,
790 file
? "/" : "", file
? file
: "");
792 ret
= stat(fnam
, &sb
);
797 newkey
= malloc(sizeof(struct cgfs_files
));
800 newkey
->name
= must_copy_string(file
);
801 else if (rindex(cgroup
, '/'))
802 newkey
->name
= must_copy_string(rindex(cgroup
, '/'));
804 newkey
->name
= must_copy_string(cgroup
);
805 newkey
->uid
= sb
.st_uid
;
806 newkey
->gid
= sb
.st_gid
;
807 newkey
->mode
= sb
.st_mode
;
812 static void *make_key_list_entry(const char *controller
, const char *cgroup
, const char *dir_entry
)
814 struct cgfs_files
*entry
= cgfs_get_key(controller
, cgroup
, dir_entry
);
816 fprintf(stderr
, "%s: Error getting files under %s:%s\n",
817 __func__
, controller
, cgroup
);
822 bool cgfs_list_keys(const char *controller
, const char *cgroup
, struct cgfs_files
***keys
)
824 return cgfs_iterate_cgroup(controller
, cgroup
, false, (void***)keys
, sizeof(*keys
), &make_key_list_entry
);
827 bool is_child_cgroup(const char *controller
, const char *cgroup
, const char *f
)
829 char *fnam
, *tmpc
= find_mounted_controller(controller
);
835 /* BASEDIR / tmpc / cgroup / f \0 */
836 len
= strlen(BASEDIR
) + strlen(tmpc
) + strlen(cgroup
) + strlen(f
) + 4;
838 snprintf(fnam
, len
, "%s/%s/%s/%s", BASEDIR
, tmpc
, cgroup
, f
);
840 ret
= stat(fnam
, &sb
);
841 if (ret
< 0 || !S_ISDIR(sb
.st_mode
))
846 #define SEND_CREDS_OK 0
847 #define SEND_CREDS_NOTSK 1
848 #define SEND_CREDS_FAIL 2
849 static bool recv_creds(int sock
, struct ucred
*cred
, char *v
);
850 static int wait_for_pid(pid_t pid
);
851 static int send_creds(int sock
, struct ucred
*cred
, char v
, bool pingfirst
);
852 static int send_creds_clone_wrapper(void *arg
);
855 * clone a task which switches to @task's namespace and writes '1'.
856 * over a unix sock so we can read the task's reaper's pid in our
859 * Note: glibc's fork() does not respect pidns, which can lead to failed
860 * assertions inside glibc (and thus failed forks) if the child's pid in
861 * the pidns and the parent pid outside are identical. Using clone prevents
864 static void write_task_init_pid_exit(int sock
, pid_t target
)
869 size_t stack_size
= sysconf(_SC_PAGESIZE
);
870 void *stack
= alloca(stack_size
);
872 ret
= snprintf(fnam
, sizeof(fnam
), "/proc/%d/ns/pid", (int)target
);
873 if (ret
< 0 || ret
>= sizeof(fnam
))
876 fd
= open(fnam
, O_RDONLY
);
878 perror("write_task_init_pid_exit open of ns/pid");
882 perror("write_task_init_pid_exit setns 1");
886 pid
= clone(send_creds_clone_wrapper
, stack
+ stack_size
, SIGCHLD
, &sock
);
890 if (!wait_for_pid(pid
))
896 static int send_creds_clone_wrapper(void *arg
) {
899 int sock
= *(int *)arg
;
901 /* we are the child */
906 if (send_creds(sock
, &cred
, v
, true) != SEND_CREDS_OK
)
911 static pid_t
get_init_pid_for_task(pid_t task
)
919 if (socketpair(AF_UNIX
, SOCK_DGRAM
, 0, sock
) < 0) {
920 perror("socketpair");
929 write_task_init_pid_exit(sock
[0], task
);
933 if (!recv_creds(sock
[1], &cred
, &v
))
945 static pid_t
lookup_initpid_in_store(pid_t qpid
)
949 struct pidns_init_store
*e
;
952 snprintf(fnam
, 100, "/proc/%d/ns/pid", qpid
);
954 if (stat(fnam
, &sb
) < 0)
956 e
= lookup_verify_initpid(&sb
);
961 answer
= get_init_pid_for_task(qpid
);
963 save_initpid(&sb
, answer
);
966 /* we prune at end in case we are returning
967 * the value we were about to return */
968 prune_initpid_store();
973 static int wait_for_pid(pid_t pid
)
981 ret
= waitpid(pid
, &status
, 0);
989 if (!WIFEXITED(status
) || WEXITSTATUS(status
) != 0)
996 * append pid to *src.
997 * src: a pointer to a char* in which ot append the pid.
998 * sz: the number of characters printed so far, minus trailing \0.
999 * asz: the allocated size so far
1000 * pid: the pid to append
1002 static void must_strcat_pid(char **src
, size_t *sz
, size_t *asz
, pid_t pid
)
1006 int tmplen
= sprintf(tmp
, "%d\n", (int)pid
);
1008 if (!*src
|| tmplen
+ *sz
+ 1 >= *asz
) {
1011 tmp
= realloc(*src
, *asz
+ BUF_RESERVE_SIZE
);
1014 *asz
+= BUF_RESERVE_SIZE
;
1016 memcpy((*src
) +*sz
, tmp
, tmplen
+1); /* include the \0 */
1021 * Given a open file * to /proc/pid/{u,g}id_map, and an id
1022 * valid in the caller's namespace, return the id mapped into
1024 * Returns the mapped id, or -1 on error.
1027 convert_id_to_ns(FILE *idfile
, unsigned int in_id
)
1029 unsigned int nsuid
, // base id for a range in the idfile's namespace
1030 hostuid
, // base id for a range in the caller's namespace
1031 count
; // number of ids in this range
1035 fseek(idfile
, 0L, SEEK_SET
);
1036 while (fgets(line
, 400, idfile
)) {
1037 ret
= sscanf(line
, "%u %u %u\n", &nsuid
, &hostuid
, &count
);
1040 if (hostuid
+ count
< hostuid
|| nsuid
+ count
< nsuid
) {
1042 * uids wrapped around - unexpected as this is a procfile,
1045 fprintf(stderr
, "pid wrapparound at entry %u %u %u in %s\n",
1046 nsuid
, hostuid
, count
, line
);
1049 if (hostuid
<= in_id
&& hostuid
+count
> in_id
) {
1051 * now since hostuid <= in_id < hostuid+count, and
1052 * hostuid+count and nsuid+count do not wrap around,
1053 * we know that nsuid+(in_id-hostuid) which must be
1054 * less that nsuid+(count) must not wrap around
1056 return (in_id
- hostuid
) + nsuid
;
1065 * for is_privileged_over,
1066 * specify whether we require the calling uid to be root in his
1069 #define NS_ROOT_REQD true
1070 #define NS_ROOT_OPT false
1074 static bool is_privileged_over(pid_t pid
, uid_t uid
, uid_t victim
, bool req_ns_root
)
1076 char fpath
[PROCLEN
];
1078 bool answer
= false;
1081 if (victim
== -1 || uid
== -1)
1085 * If the request is one not requiring root in the namespace,
1086 * then having the same uid suffices. (i.e. uid 1000 has write
1087 * access to files owned by uid 1000
1089 if (!req_ns_root
&& uid
== victim
)
1092 ret
= snprintf(fpath
, PROCLEN
, "/proc/%d/uid_map", pid
);
1093 if (ret
< 0 || ret
>= PROCLEN
)
1095 FILE *f
= fopen(fpath
, "r");
1099 /* if caller's not root in his namespace, reject */
1100 nsuid
= convert_id_to_ns(f
, uid
);
1105 * If victim is not mapped into caller's ns, reject.
1106 * XXX I'm not sure this check is needed given that fuse
1107 * will be sending requests where the vfs has converted
1109 nsuid
= convert_id_to_ns(f
, victim
);
1120 static bool perms_include(int fmode
, mode_t req_mode
)
1124 switch (req_mode
& O_ACCMODE
) {
1132 r
= S_IROTH
| S_IWOTH
;
1137 return ((fmode
& r
) == r
);
1143 * querycg is /a/b/c/d/e
1146 static char *get_next_cgroup_dir(const char *taskcg
, const char *querycg
)
1150 if (strlen(taskcg
) <= strlen(querycg
)) {
1151 fprintf(stderr
, "%s: I was fed bad input\n", __func__
);
1155 if (strcmp(querycg
, "/") == 0)
1156 start
= strdup(taskcg
+ 1);
1158 start
= strdup(taskcg
+ strlen(querycg
) + 1);
1161 end
= strchr(start
, '/');
1167 static void stripnewline(char *x
)
1169 size_t l
= strlen(x
);
1170 if (l
&& x
[l
-1] == '\n')
1174 static char *get_pid_cgroup(pid_t pid
, const char *contrl
)
1178 char *answer
= NULL
;
1182 const char *h
= find_mounted_controller(contrl
);
1186 ret
= snprintf(fnam
, PROCLEN
, "/proc/%d/cgroup", pid
);
1187 if (ret
< 0 || ret
>= PROCLEN
)
1189 if (!(f
= fopen(fnam
, "r")))
1192 while (getline(&line
, &len
, f
) != -1) {
1196 c1
= strchr(line
, ':');
1200 c2
= strchr(c1
, ':');
1204 if (strcmp(c1
, h
) != 0)
1209 answer
= strdup(c2
);
1221 * check whether a fuse context may access a cgroup dir or file
1223 * If file is not null, it is a cgroup file to check under cg.
1224 * If file is null, then we are checking perms on cg itself.
1226 * For files we can check the mode of the list_keys result.
1227 * For cgroups, we must make assumptions based on the files under the
1228 * cgroup, because cgmanager doesn't tell us ownership/perms of cgroups
1231 static bool fc_may_access(struct fuse_context
*fc
, const char *contrl
, const char *cg
, const char *file
, mode_t mode
)
1233 struct cgfs_files
*k
= NULL
;
1236 k
= cgfs_get_key(contrl
, cg
, file
);
1240 if (is_privileged_over(fc
->pid
, fc
->uid
, k
->uid
, NS_ROOT_OPT
)) {
1241 if (perms_include(k
->mode
>> 6, mode
)) {
1246 if (fc
->gid
== k
->gid
) {
1247 if (perms_include(k
->mode
>> 3, mode
)) {
1252 ret
= perms_include(k
->mode
, mode
);
1259 #define INITSCOPE "/init.scope"
1260 static void prune_init_slice(char *cg
)
1263 size_t cg_len
= strlen(cg
), initscope_len
= strlen(INITSCOPE
);
1265 if (cg_len
< initscope_len
)
1268 point
= cg
+ cg_len
- initscope_len
;
1269 if (strcmp(point
, INITSCOPE
) == 0) {
1278 * If pid is in /a/b/c/d, he may only act on things under cg=/a/b/c/d.
1279 * If pid is in /a, he may act on /a/b, but not on /b.
1280 * if the answer is false and nextcg is not NULL, then *nextcg will point
1281 * to a string containing the next cgroup directory under cg, which must be
1282 * freed by the caller.
1284 static bool caller_is_in_ancestor(pid_t pid
, const char *contrl
, const char *cg
, char **nextcg
)
1286 bool answer
= false;
1287 char *c2
= get_pid_cgroup(pid
, contrl
);
1292 prune_init_slice(c2
);
1295 * callers pass in '/' for root cgroup, otherwise they pass
1296 * in a cgroup without leading '/'
1298 linecmp
= *cg
== '/' ? c2
: c2
+1;
1299 if (strncmp(linecmp
, cg
, strlen(linecmp
)) != 0) {
1301 *nextcg
= get_next_cgroup_dir(linecmp
, cg
);
1313 * If pid is in /a/b/c, he may see that /a exists, but not /b or /a/c.
1315 static bool caller_may_see_dir(pid_t pid
, const char *contrl
, const char *cg
)
1317 bool answer
= false;
1319 size_t target_len
, task_len
;
1321 if (strcmp(cg
, "/") == 0)
1324 c2
= get_pid_cgroup(pid
, contrl
);
1327 prune_init_slice(c2
);
1330 target_len
= strlen(cg
);
1331 task_len
= strlen(task_cg
);
1332 if (task_len
== 0) {
1333 /* Task is in the root cg, it can see everything. This case is
1334 * not handled by the strmcps below, since they test for the
1335 * last /, but that is the first / that we've chopped off
1341 if (strcmp(cg
, task_cg
) == 0) {
1345 if (target_len
< task_len
) {
1346 /* looking up a parent dir */
1347 if (strncmp(task_cg
, cg
, target_len
) == 0 && task_cg
[target_len
] == '/')
1351 if (target_len
> task_len
) {
1352 /* looking up a child dir */
1353 if (strncmp(task_cg
, cg
, task_len
) == 0 && cg
[task_len
] == '/')
1364 * given /cgroup/freezer/a/b, return "freezer".
1365 * the returned char* should NOT be freed.
1367 static char *pick_controller_from_path(struct fuse_context
*fc
, const char *path
)
1370 char *contr
, *slash
;
1372 if (strlen(path
) < 9)
1374 if (*(path
+7) != '/')
1377 contr
= strdupa(p1
);
1380 slash
= strstr(contr
, "/");
1385 for (i
= 0; i
< num_hierarchies
; i
++) {
1386 if (hierarchies
[i
] && strcmp(hierarchies
[i
], contr
) == 0)
1387 return hierarchies
[i
];
1393 * Find the start of cgroup in /cgroup/controller/the/cgroup/path
1394 * Note that the returned value may include files (keynames) etc
1396 static const char *find_cgroup_in_path(const char *path
)
1400 if (strlen(path
) < 9)
1402 p1
= strstr(path
+8, "/");
1409 * split the last path element from the path in @cg.
1410 * @dir is newly allocated and should be freed, @last not
1412 static void get_cgdir_and_path(const char *cg
, char **dir
, char **last
)
1419 *last
= strrchr(cg
, '/');
1424 p
= strrchr(*dir
, '/');
1429 * FUSE ops for /cgroup
1432 int cg_getattr(const char *path
, struct stat
*sb
)
1434 struct timespec now
;
1435 struct fuse_context
*fc
= fuse_get_context();
1436 char * cgdir
= NULL
;
1437 char *last
= NULL
, *path1
, *path2
;
1438 struct cgfs_files
*k
= NULL
;
1440 const char *controller
= NULL
;
1447 memset(sb
, 0, sizeof(struct stat
));
1449 if (clock_gettime(CLOCK_REALTIME
, &now
) < 0)
1452 sb
->st_uid
= sb
->st_gid
= 0;
1453 sb
->st_atim
= sb
->st_mtim
= sb
->st_ctim
= now
;
1456 if (strcmp(path
, "/cgroup") == 0) {
1457 sb
->st_mode
= S_IFDIR
| 00755;
1462 controller
= pick_controller_from_path(fc
, path
);
1465 cgroup
= find_cgroup_in_path(path
);
1467 /* this is just /cgroup/controller, return it as a dir */
1468 sb
->st_mode
= S_IFDIR
| 00755;
1473 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
1483 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
1486 /* check that cgcopy is either a child cgroup of cgdir, or listed in its keys.
1487 * Then check that caller's cgroup is under path if last is a child
1488 * cgroup, or cgdir if last is a file */
1490 if (is_child_cgroup(controller
, path1
, path2
)) {
1491 if (!caller_may_see_dir(initpid
, controller
, cgroup
)) {
1495 if (!caller_is_in_ancestor(initpid
, controller
, cgroup
, NULL
)) {
1496 /* this is just /cgroup/controller, return it as a dir */
1497 sb
->st_mode
= S_IFDIR
| 00555;
1502 if (!fc_may_access(fc
, controller
, cgroup
, NULL
, O_RDONLY
)) {
1507 // get uid, gid, from '/tasks' file and make up a mode
1508 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
1509 sb
->st_mode
= S_IFDIR
| 00755;
1510 k
= cgfs_get_key(controller
, cgroup
, NULL
);
1512 sb
->st_uid
= sb
->st_gid
= 0;
1514 sb
->st_uid
= k
->uid
;
1515 sb
->st_gid
= k
->gid
;
1523 if ((k
= cgfs_get_key(controller
, path1
, path2
)) != NULL
) {
1524 sb
->st_mode
= S_IFREG
| k
->mode
;
1526 sb
->st_uid
= k
->uid
;
1527 sb
->st_gid
= k
->gid
;
1530 if (!caller_is_in_ancestor(initpid
, controller
, path1
, NULL
)) {
1534 if (!fc_may_access(fc
, controller
, path1
, path2
, O_RDONLY
)) {
1547 int cg_opendir(const char *path
, struct fuse_file_info
*fi
)
1549 struct fuse_context
*fc
= fuse_get_context();
1551 struct file_info
*dir_info
;
1552 char *controller
= NULL
;
1557 if (strcmp(path
, "/cgroup") == 0) {
1561 // return list of keys for the controller, and list of child cgroups
1562 controller
= pick_controller_from_path(fc
, path
);
1566 cgroup
= find_cgroup_in_path(path
);
1568 /* this is just /cgroup/controller, return its contents */
1573 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
1577 if (!caller_may_see_dir(initpid
, controller
, cgroup
))
1579 if (!fc_may_access(fc
, controller
, cgroup
, NULL
, O_RDONLY
))
1583 /* we'll free this at cg_releasedir */
1584 dir_info
= malloc(sizeof(*dir_info
));
1587 dir_info
->controller
= must_copy_string(controller
);
1588 dir_info
->cgroup
= must_copy_string(cgroup
);
1589 dir_info
->type
= LXC_TYPE_CGDIR
;
1590 dir_info
->buf
= NULL
;
1591 dir_info
->file
= NULL
;
1592 dir_info
->buflen
= 0;
1594 fi
->fh
= (unsigned long)dir_info
;
1598 int cg_readdir(const char *path
, void *buf
, fuse_fill_dir_t filler
, off_t offset
,
1599 struct fuse_file_info
*fi
)
1601 struct file_info
*d
= (struct file_info
*)fi
->fh
;
1602 struct cgfs_files
**list
= NULL
;
1604 char *nextcg
= NULL
;
1605 struct fuse_context
*fc
= fuse_get_context();
1606 char **clist
= NULL
;
1608 if (d
->type
!= LXC_TYPE_CGDIR
) {
1609 fprintf(stderr
, "Internal error: file cache info used in readdir\n");
1612 if (!d
->cgroup
&& !d
->controller
) {
1613 // ls /var/lib/lxcfs/cgroup - just show list of controllers
1616 for (i
= 0; i
< num_hierarchies
; i
++) {
1617 if (hierarchies
[i
] && filler(buf
, hierarchies
[i
], NULL
, 0) != 0) {
1624 if (!cgfs_list_keys(d
->controller
, d
->cgroup
, &list
)) {
1625 // not a valid cgroup
1630 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
1633 if (!caller_is_in_ancestor(initpid
, d
->controller
, d
->cgroup
, &nextcg
)) {
1635 ret
= filler(buf
, nextcg
, NULL
, 0);
1646 for (i
= 0; list
[i
]; i
++) {
1647 if (filler(buf
, list
[i
]->name
, NULL
, 0) != 0) {
1653 // now get the list of child cgroups
1655 if (!cgfs_list_children(d
->controller
, d
->cgroup
, &clist
)) {
1660 for (i
= 0; clist
[i
]; i
++) {
1661 if (filler(buf
, clist
[i
], NULL
, 0) != 0) {
1672 for (i
= 0; clist
[i
]; i
++)
1679 static void do_release_file_info(struct fuse_file_info
*fi
)
1681 struct file_info
*f
= (struct file_info
*)fi
->fh
;
1688 free(f
->controller
);
1689 f
->controller
= NULL
;
1699 int cg_releasedir(const char *path
, struct fuse_file_info
*fi
)
1701 do_release_file_info(fi
);
1705 int cg_open(const char *path
, struct fuse_file_info
*fi
)
1708 char *last
= NULL
, *path1
, *path2
, * cgdir
= NULL
, *controller
;
1709 struct cgfs_files
*k
= NULL
;
1710 struct file_info
*file_info
;
1711 struct fuse_context
*fc
= fuse_get_context();
1717 controller
= pick_controller_from_path(fc
, path
);
1720 cgroup
= find_cgroup_in_path(path
);
1724 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
1733 k
= cgfs_get_key(controller
, path1
, path2
);
1740 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
1743 if (!caller_may_see_dir(initpid
, controller
, path1
)) {
1747 if (!fc_may_access(fc
, controller
, path1
, path2
, fi
->flags
)) {
1752 /* we'll free this at cg_release */
1753 file_info
= malloc(sizeof(*file_info
));
1758 file_info
->controller
= must_copy_string(controller
);
1759 file_info
->cgroup
= must_copy_string(path1
);
1760 file_info
->file
= must_copy_string(path2
);
1761 file_info
->type
= LXC_TYPE_CGFILE
;
1762 file_info
->buf
= NULL
;
1763 file_info
->buflen
= 0;
1765 fi
->fh
= (unsigned long)file_info
;
1773 int cg_access(const char *path
, int mode
)
1776 char *last
= NULL
, *path1
, *path2
, * cgdir
= NULL
, *controller
;
1777 struct cgfs_files
*k
= NULL
;
1778 struct fuse_context
*fc
= fuse_get_context();
1784 controller
= pick_controller_from_path(fc
, path
);
1787 cgroup
= find_cgroup_in_path(path
);
1789 // access("/sys/fs/cgroup/systemd", mode) - rx allowed, w not
1790 if ((mode
& W_OK
) == 0)
1795 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
1804 k
= cgfs_get_key(controller
, path1
, path2
);
1806 if ((mode
& W_OK
) == 0)
1814 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
1817 if (!caller_may_see_dir(initpid
, controller
, path1
)) {
1821 if (!fc_may_access(fc
, controller
, path1
, path2
, mode
)) {
1833 int cg_release(const char *path
, struct fuse_file_info
*fi
)
1835 do_release_file_info(fi
);
1839 #define POLLIN_SET ( EPOLLIN | EPOLLHUP | EPOLLRDHUP )
1841 static bool wait_for_sock(int sock
, int timeout
)
1843 struct epoll_event ev
;
1844 int epfd
, ret
, now
, starttime
, deltatime
, saved_errno
;
1846 if ((starttime
= time(NULL
)) < 0)
1849 if ((epfd
= epoll_create(1)) < 0) {
1850 fprintf(stderr
, "Failed to create epoll socket: %m\n");
1854 ev
.events
= POLLIN_SET
;
1856 if (epoll_ctl(epfd
, EPOLL_CTL_ADD
, sock
, &ev
) < 0) {
1857 fprintf(stderr
, "Failed adding socket to epoll: %m\n");
1863 if ((now
= time(NULL
)) < 0) {
1868 deltatime
= (starttime
+ timeout
) - now
;
1869 if (deltatime
< 0) { // timeout
1874 ret
= epoll_wait(epfd
, &ev
, 1, 1000*deltatime
+ 1);
1875 if (ret
< 0 && errno
== EINTR
)
1877 saved_errno
= errno
;
1881 errno
= saved_errno
;
1887 static int msgrecv(int sockfd
, void *buf
, size_t len
)
1889 if (!wait_for_sock(sockfd
, 2))
1891 return recv(sockfd
, buf
, len
, MSG_DONTWAIT
);
1894 static int send_creds(int sock
, struct ucred
*cred
, char v
, bool pingfirst
)
1896 struct msghdr msg
= { 0 };
1898 struct cmsghdr
*cmsg
;
1899 char cmsgbuf
[CMSG_SPACE(sizeof(*cred
))];
1904 if (msgrecv(sock
, buf
, 1) != 1) {
1905 fprintf(stderr
, "%s: Error getting reply from server over socketpair\n",
1907 return SEND_CREDS_FAIL
;
1911 msg
.msg_control
= cmsgbuf
;
1912 msg
.msg_controllen
= sizeof(cmsgbuf
);
1914 cmsg
= CMSG_FIRSTHDR(&msg
);
1915 cmsg
->cmsg_len
= CMSG_LEN(sizeof(struct ucred
));
1916 cmsg
->cmsg_level
= SOL_SOCKET
;
1917 cmsg
->cmsg_type
= SCM_CREDENTIALS
;
1918 memcpy(CMSG_DATA(cmsg
), cred
, sizeof(*cred
));
1920 msg
.msg_name
= NULL
;
1921 msg
.msg_namelen
= 0;
1925 iov
.iov_len
= sizeof(buf
);
1929 if (sendmsg(sock
, &msg
, 0) < 0) {
1930 fprintf(stderr
, "%s: failed at sendmsg: %s\n", __func__
,
1933 return SEND_CREDS_NOTSK
;
1934 return SEND_CREDS_FAIL
;
1937 return SEND_CREDS_OK
;
1940 static bool recv_creds(int sock
, struct ucred
*cred
, char *v
)
1942 struct msghdr msg
= { 0 };
1944 struct cmsghdr
*cmsg
;
1945 char cmsgbuf
[CMSG_SPACE(sizeof(*cred
))];
1956 if (setsockopt(sock
, SOL_SOCKET
, SO_PASSCRED
, &optval
, sizeof(optval
)) == -1) {
1957 fprintf(stderr
, "Failed to set passcred: %s\n", strerror(errno
));
1961 if (write(sock
, buf
, 1) != 1) {
1962 fprintf(stderr
, "Failed to start write on scm fd: %s\n", strerror(errno
));
1966 msg
.msg_name
= NULL
;
1967 msg
.msg_namelen
= 0;
1968 msg
.msg_control
= cmsgbuf
;
1969 msg
.msg_controllen
= sizeof(cmsgbuf
);
1972 iov
.iov_len
= sizeof(buf
);
1976 if (!wait_for_sock(sock
, 2)) {
1977 fprintf(stderr
, "Timed out waiting for scm_cred: %s\n",
1981 ret
= recvmsg(sock
, &msg
, MSG_DONTWAIT
);
1983 fprintf(stderr
, "Failed to receive scm_cred: %s\n",
1988 cmsg
= CMSG_FIRSTHDR(&msg
);
1990 if (cmsg
&& cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
)) &&
1991 cmsg
->cmsg_level
== SOL_SOCKET
&&
1992 cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1993 memcpy(cred
, CMSG_DATA(cmsg
), sizeof(*cred
));
2000 struct pid_ns_clone_args
{
2004 int (*wrapped
) (int, pid_t
); // pid_from_ns or pid_to_ns
2008 * pid_ns_clone_wrapper - wraps pid_to_ns or pid_from_ns for usage
2009 * with clone(). This simply writes '1' as ACK back to the parent
2010 * before calling the actual wrapped function.
2012 static int pid_ns_clone_wrapper(void *arg
) {
2013 struct pid_ns_clone_args
* args
= (struct pid_ns_clone_args
*) arg
;
2016 close(args
->cpipe
[0]);
2017 if (write(args
->cpipe
[1], &b
, sizeof(char)) < 0) {
2018 fprintf(stderr
, "%s (child): error on write: %s\n",
2019 __func__
, strerror(errno
));
2021 close(args
->cpipe
[1]);
2022 return args
->wrapped(args
->sock
, args
->tpid
);
2026 * pid_to_ns - reads pids from a ucred over a socket, then writes the
2027 * int value back over the socket. This shifts the pid from the
2028 * sender's pidns into tpid's pidns.
2030 static int pid_to_ns(int sock
, pid_t tpid
)
2035 while (recv_creds(sock
, &cred
, &v
)) {
2038 if (write(sock
, &cred
.pid
, sizeof(pid_t
)) != sizeof(pid_t
))
2046 * pid_to_ns_wrapper: when you setns into a pidns, you yourself remain
2047 * in your old pidns. Only children which you clone will be in the target
2048 * pidns. So the pid_to_ns_wrapper does the setns, then clones a child to
2049 * actually convert pids.
2051 * Note: glibc's fork() does not respect pidns, which can lead to failed
2052 * assertions inside glibc (and thus failed forks) if the child's pid in
2053 * the pidns and the parent pid outside are identical. Using clone prevents
2056 static void pid_to_ns_wrapper(int sock
, pid_t tpid
)
2058 int newnsfd
= -1, ret
, cpipe
[2];
2063 ret
= snprintf(fnam
, sizeof(fnam
), "/proc/%d/ns/pid", tpid
);
2064 if (ret
< 0 || ret
>= sizeof(fnam
))
2066 newnsfd
= open(fnam
, O_RDONLY
);
2069 if (setns(newnsfd
, 0) < 0)
2073 if (pipe(cpipe
) < 0)
2076 struct pid_ns_clone_args args
= {
2080 .wrapped
= &pid_to_ns
2082 size_t stack_size
= sysconf(_SC_PAGESIZE
);
2083 void *stack
= alloca(stack_size
);
2085 cpid
= clone(pid_ns_clone_wrapper
, stack
+ stack_size
, SIGCHLD
, &args
);
2089 // give the child 1 second to be done forking and
2091 if (!wait_for_sock(cpipe
[0], 1))
2093 ret
= read(cpipe
[0], &v
, 1);
2094 if (ret
!= sizeof(char) || v
!= '1')
2097 if (!wait_for_pid(cpid
))
2103 * To read cgroup files with a particular pid, we will setns into the child
2104 * pidns, open a pipe, fork a child - which will be the first to really be in
2105 * the child ns - which does the cgfs_get_value and writes the data to the pipe.
2107 bool do_read_pids(pid_t tpid
, const char *contrl
, const char *cg
, const char *file
, char **d
)
2109 int sock
[2] = {-1, -1};
2110 char *tmpdata
= NULL
;
2112 pid_t qpid
, cpid
= -1;
2113 bool answer
= false;
2116 size_t sz
= 0, asz
= 0;
2118 if (!cgfs_get_value(contrl
, cg
, file
, &tmpdata
))
2122 * Now we read the pids from returned data one by one, pass
2123 * them into a child in the target namespace, read back the
2124 * translated pids, and put them into our to-return data
2127 if (socketpair(AF_UNIX
, SOCK_DGRAM
, 0, sock
) < 0) {
2128 perror("socketpair");
2137 if (!cpid
) // child - exits when done
2138 pid_to_ns_wrapper(sock
[1], tpid
);
2140 char *ptr
= tmpdata
;
2143 while (sscanf(ptr
, "%d\n", &qpid
) == 1) {
2145 ret
= send_creds(sock
[0], &cred
, v
, true);
2147 if (ret
== SEND_CREDS_NOTSK
)
2149 if (ret
== SEND_CREDS_FAIL
)
2152 // read converted results
2153 if (!wait_for_sock(sock
[0], 2)) {
2154 fprintf(stderr
, "%s: timed out waiting for pid from child: %s\n",
2155 __func__
, strerror(errno
));
2158 if (read(sock
[0], &qpid
, sizeof(qpid
)) != sizeof(qpid
)) {
2159 fprintf(stderr
, "%s: error reading pid from child: %s\n",
2160 __func__
, strerror(errno
));
2163 must_strcat_pid(d
, &sz
, &asz
, qpid
);
2165 ptr
= strchr(ptr
, '\n');
2171 cred
.pid
= getpid();
2173 if (send_creds(sock
[0], &cred
, v
, true) != SEND_CREDS_OK
) {
2174 // failed to ask child to exit
2175 fprintf(stderr
, "%s: failed to ask child to exit: %s\n",
2176 __func__
, strerror(errno
));
2186 if (sock
[0] != -1) {
2193 int cg_read(const char *path
, char *buf
, size_t size
, off_t offset
,
2194 struct fuse_file_info
*fi
)
2196 struct fuse_context
*fc
= fuse_get_context();
2197 struct file_info
*f
= (struct file_info
*)fi
->fh
;
2198 struct cgfs_files
*k
= NULL
;
2203 if (f
->type
!= LXC_TYPE_CGFILE
) {
2204 fprintf(stderr
, "Internal error: directory cache info used in cg_read\n");
2217 if ((k
= cgfs_get_key(f
->controller
, f
->cgroup
, f
->file
)) == NULL
) {
2223 if (!fc_may_access(fc
, f
->controller
, f
->cgroup
, f
->file
, O_RDONLY
)) {
2228 if (strcmp(f
->file
, "tasks") == 0 ||
2229 strcmp(f
->file
, "/tasks") == 0 ||
2230 strcmp(f
->file
, "/cgroup.procs") == 0 ||
2231 strcmp(f
->file
, "cgroup.procs") == 0)
2232 // special case - we have to translate the pids
2233 r
= do_read_pids(fc
->pid
, f
->controller
, f
->cgroup
, f
->file
, &data
);
2235 r
= cgfs_get_value(f
->controller
, f
->cgroup
, f
->file
, &data
);
2249 memcpy(buf
, data
, s
);
2250 if (s
> 0 && s
< size
&& data
[s
-1] != '\n')
2260 static int pid_from_ns(int sock
, pid_t tpid
)
2270 if (!wait_for_sock(sock
, 2)) {
2271 fprintf(stderr
, "%s: timeout reading from parent\n", __func__
);
2274 if ((ret
= read(sock
, &vpid
, sizeof(pid_t
))) != sizeof(pid_t
)) {
2275 fprintf(stderr
, "%s: bad read from parent: %s\n",
2276 __func__
, strerror(errno
));
2279 if (vpid
== -1) // done
2283 if (send_creds(sock
, &cred
, v
, true) != SEND_CREDS_OK
) {
2285 cred
.pid
= getpid();
2286 if (send_creds(sock
, &cred
, v
, false) != SEND_CREDS_OK
)
2293 static void pid_from_ns_wrapper(int sock
, pid_t tpid
)
2295 int newnsfd
= -1, ret
, cpipe
[2];
2300 ret
= snprintf(fnam
, sizeof(fnam
), "/proc/%d/ns/pid", tpid
);
2301 if (ret
< 0 || ret
>= sizeof(fnam
))
2303 newnsfd
= open(fnam
, O_RDONLY
);
2306 if (setns(newnsfd
, 0) < 0)
2310 if (pipe(cpipe
) < 0)
2313 struct pid_ns_clone_args args
= {
2317 .wrapped
= &pid_from_ns
2319 size_t stack_size
= sysconf(_SC_PAGESIZE
);
2320 void *stack
= alloca(stack_size
);
2322 cpid
= clone(pid_ns_clone_wrapper
, stack
+ stack_size
, SIGCHLD
, &args
);
2326 // give the child 1 second to be done forking and
2328 if (!wait_for_sock(cpipe
[0], 1))
2330 ret
= read(cpipe
[0], &v
, 1);
2331 if (ret
!= sizeof(char) || v
!= '1')
2334 if (!wait_for_pid(cpid
))
2340 * Given host @uid, return the uid to which it maps in
2341 * @pid's user namespace, or -1 if none.
2343 bool hostuid_to_ns(uid_t uid
, pid_t pid
, uid_t
*answer
)
2348 sprintf(line
, "/proc/%d/uid_map", pid
);
2349 if ((f
= fopen(line
, "r")) == NULL
) {
2353 *answer
= convert_id_to_ns(f
, uid
);
2362 * get_pid_creds: get the real uid and gid of @pid from
2364 * (XXX should we use euid here?)
2366 void get_pid_creds(pid_t pid
, uid_t
*uid
, gid_t
*gid
)
2375 sprintf(line
, "/proc/%d/status", pid
);
2376 if ((f
= fopen(line
, "r")) == NULL
) {
2377 fprintf(stderr
, "Error opening %s: %s\n", line
, strerror(errno
));
2380 while (fgets(line
, 400, f
)) {
2381 if (strncmp(line
, "Uid:", 4) == 0) {
2382 if (sscanf(line
+4, "%u", &u
) != 1) {
2383 fprintf(stderr
, "bad uid line for pid %u\n", pid
);
2388 } else if (strncmp(line
, "Gid:", 4) == 0) {
2389 if (sscanf(line
+4, "%u", &g
) != 1) {
2390 fprintf(stderr
, "bad gid line for pid %u\n", pid
);
2401 * May the requestor @r move victim @v to a new cgroup?
2402 * This is allowed if
2403 * . they are the same task
2404 * . they are ownedy by the same uid
2405 * . @r is root on the host, or
2406 * . @v's uid is mapped into @r's where @r is root.
2408 bool may_move_pid(pid_t r
, uid_t r_uid
, pid_t v
)
2410 uid_t v_uid
, tmpuid
;
2417 get_pid_creds(v
, &v_uid
, &v_gid
);
2420 if (hostuid_to_ns(r_uid
, r
, &tmpuid
) && tmpuid
== 0
2421 && hostuid_to_ns(v_uid
, r
, &tmpuid
))
2426 static bool do_write_pids(pid_t tpid
, uid_t tuid
, const char *contrl
, const char *cg
,
2427 const char *file
, const char *buf
)
2429 int sock
[2] = {-1, -1};
2430 pid_t qpid
, cpid
= -1;
2431 FILE *pids_file
= NULL
;
2432 bool answer
= false, fail
= false;
2434 pids_file
= open_pids_file(contrl
, cg
);
2439 * write the pids to a socket, have helper in writer's pidns
2440 * call movepid for us
2442 if (socketpair(AF_UNIX
, SOCK_DGRAM
, 0, sock
) < 0) {
2443 perror("socketpair");
2451 if (!cpid
) { // child
2453 pid_from_ns_wrapper(sock
[1], tpid
);
2456 const char *ptr
= buf
;
2457 while (sscanf(ptr
, "%d", &qpid
) == 1) {
2461 if (write(sock
[0], &qpid
, sizeof(qpid
)) != sizeof(qpid
)) {
2462 fprintf(stderr
, "%s: error writing pid to child: %s\n",
2463 __func__
, strerror(errno
));
2467 if (recv_creds(sock
[0], &cred
, &v
)) {
2469 if (!may_move_pid(tpid
, tuid
, cred
.pid
)) {
2473 if (fprintf(pids_file
, "%d", (int) cred
.pid
) < 0)
2478 ptr
= strchr(ptr
, '\n');
2484 /* All good, write the value */
2486 if (write(sock
[0], &qpid
,sizeof(qpid
)) != sizeof(qpid
))
2487 fprintf(stderr
, "Warning: failed to ask child to exit\n");
2495 if (sock
[0] != -1) {
2500 if (fclose(pids_file
) != 0)
2506 int cg_write(const char *path
, const char *buf
, size_t size
, off_t offset
,
2507 struct fuse_file_info
*fi
)
2509 struct fuse_context
*fc
= fuse_get_context();
2510 char *localbuf
= NULL
;
2511 struct cgfs_files
*k
= NULL
;
2512 struct file_info
*f
= (struct file_info
*)fi
->fh
;
2515 if (f
->type
!= LXC_TYPE_CGFILE
) {
2516 fprintf(stderr
, "Internal error: directory cache info used in cg_write\n");
2526 localbuf
= alloca(size
+1);
2527 localbuf
[size
] = '\0';
2528 memcpy(localbuf
, buf
, size
);
2530 if ((k
= cgfs_get_key(f
->controller
, f
->cgroup
, f
->file
)) == NULL
) {
2535 if (!fc_may_access(fc
, f
->controller
, f
->cgroup
, f
->file
, O_WRONLY
)) {
2540 if (strcmp(f
->file
, "tasks") == 0 ||
2541 strcmp(f
->file
, "/tasks") == 0 ||
2542 strcmp(f
->file
, "/cgroup.procs") == 0 ||
2543 strcmp(f
->file
, "cgroup.procs") == 0)
2544 // special case - we have to translate the pids
2545 r
= do_write_pids(fc
->pid
, fc
->uid
, f
->controller
, f
->cgroup
, f
->file
, localbuf
);
2547 r
= cgfs_set_value(f
->controller
, f
->cgroup
, f
->file
, localbuf
);
2557 int cg_chown(const char *path
, uid_t uid
, gid_t gid
)
2559 struct fuse_context
*fc
= fuse_get_context();
2560 char *cgdir
= NULL
, *last
= NULL
, *path1
, *path2
, *controller
;
2561 struct cgfs_files
*k
= NULL
;
2568 if (strcmp(path
, "/cgroup") == 0)
2571 controller
= pick_controller_from_path(fc
, path
);
2574 cgroup
= find_cgroup_in_path(path
);
2576 /* this is just /cgroup/controller */
2579 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
2589 if (is_child_cgroup(controller
, path1
, path2
)) {
2590 // get uid, gid, from '/tasks' file and make up a mode
2591 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
2592 k
= cgfs_get_key(controller
, cgroup
, "tasks");
2595 k
= cgfs_get_key(controller
, path1
, path2
);
2603 * This being a fuse request, the uid and gid must be valid
2604 * in the caller's namespace. So we can just check to make
2605 * sure that the caller is root in his uid, and privileged
2606 * over the file's current owner.
2608 if (!is_privileged_over(fc
->pid
, fc
->uid
, k
->uid
, NS_ROOT_REQD
)) {
2613 ret
= cgfs_chown_file(controller
, cgroup
, uid
, gid
);
2622 int cg_chmod(const char *path
, mode_t mode
)
2624 struct fuse_context
*fc
= fuse_get_context();
2625 char * cgdir
= NULL
, *last
= NULL
, *path1
, *path2
, *controller
;
2626 struct cgfs_files
*k
= NULL
;
2633 if (strcmp(path
, "/cgroup") == 0)
2636 controller
= pick_controller_from_path(fc
, path
);
2639 cgroup
= find_cgroup_in_path(path
);
2641 /* this is just /cgroup/controller */
2644 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
2654 if (is_child_cgroup(controller
, path1
, path2
)) {
2655 // get uid, gid, from '/tasks' file and make up a mode
2656 // That is a hack, until cgmanager gains a GetCgroupPerms fn.
2657 k
= cgfs_get_key(controller
, cgroup
, "tasks");
2660 k
= cgfs_get_key(controller
, path1
, path2
);
2668 * This being a fuse request, the uid and gid must be valid
2669 * in the caller's namespace. So we can just check to make
2670 * sure that the caller is root in his uid, and privileged
2671 * over the file's current owner.
2673 if (!is_privileged_over(fc
->pid
, fc
->uid
, k
->uid
, NS_ROOT_OPT
)) {
2678 if (!cgfs_chmod_file(controller
, cgroup
, mode
)) {
2690 int cg_mkdir(const char *path
, mode_t mode
)
2692 struct fuse_context
*fc
= fuse_get_context();
2693 char *last
= NULL
, *path1
, *cgdir
= NULL
, *controller
, *next
= NULL
;
2701 controller
= pick_controller_from_path(fc
, path
);
2705 cgroup
= find_cgroup_in_path(path
);
2709 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
2715 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
2718 if (!caller_is_in_ancestor(initpid
, controller
, path1
, &next
)) {
2721 else if (last
&& strcmp(next
, last
) == 0)
2728 if (!fc_may_access(fc
, controller
, path1
, NULL
, O_RDWR
)) {
2732 if (!caller_is_in_ancestor(initpid
, controller
, path1
, NULL
)) {
2737 ret
= cgfs_create(controller
, cgroup
, fc
->uid
, fc
->gid
);
2745 int cg_rmdir(const char *path
)
2747 struct fuse_context
*fc
= fuse_get_context();
2748 char *last
= NULL
, *cgdir
= NULL
, *controller
, *next
= NULL
;
2755 controller
= pick_controller_from_path(fc
, path
);
2759 cgroup
= find_cgroup_in_path(path
);
2763 get_cgdir_and_path(cgroup
, &cgdir
, &last
);
2769 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
2772 if (!caller_is_in_ancestor(initpid
, controller
, cgroup
, &next
)) {
2773 if (!last
|| strcmp(next
, last
) == 0)
2780 if (!fc_may_access(fc
, controller
, cgdir
, NULL
, O_WRONLY
)) {
2784 if (!caller_is_in_ancestor(initpid
, controller
, cgroup
, NULL
)) {
2789 if (!cgfs_remove(controller
, cgroup
)) {
2802 static bool startswith(const char *line
, const char *pref
)
2804 if (strncmp(line
, pref
, strlen(pref
)) == 0)
2809 static void get_mem_cached(char *memstat
, unsigned long *v
)
2815 if (startswith(memstat
, "total_cache")) {
2816 sscanf(memstat
+ 11, "%lu", v
);
2820 eol
= strchr(memstat
, '\n');
2827 static void get_blkio_io_value(char *str
, unsigned major
, unsigned minor
, char *iotype
, unsigned long *v
)
2833 snprintf(key
, 32, "%u:%u %s", major
, minor
, iotype
);
2835 size_t len
= strlen(key
);
2839 if (startswith(str
, key
)) {
2840 sscanf(str
+ len
, "%lu", v
);
2843 eol
= strchr(str
, '\n');
2850 static int read_file(const char *path
, char *buf
, size_t size
,
2851 struct file_info
*d
)
2853 size_t linelen
= 0, total_len
= 0, rv
= 0;
2855 char *cache
= d
->buf
;
2856 size_t cache_size
= d
->buflen
;
2857 FILE *f
= fopen(path
, "r");
2861 while (getline(&line
, &linelen
, f
) != -1) {
2862 ssize_t l
= snprintf(cache
, cache_size
, "%s", line
);
2864 perror("Error writing to cache");
2868 if (l
>= cache_size
) {
2869 fprintf(stderr
, "Internal error: truncated write to cache\n");
2878 d
->size
= total_len
;
2879 if (total_len
> size
)
2882 /* read from off 0 */
2883 memcpy(buf
, d
->buf
, total_len
);
2892 * FUSE ops for /proc
2895 static unsigned long get_memlimit(const char *cgroup
)
2897 char *memlimit_str
= NULL
;
2898 unsigned long memlimit
= -1;
2900 if (cgfs_get_value("memory", cgroup
, "memory.limit_in_bytes", &memlimit_str
))
2901 memlimit
= strtoul(memlimit_str
, NULL
, 10);
2908 static unsigned long get_min_memlimit(const char *cgroup
)
2910 char *copy
= strdupa(cgroup
);
2911 unsigned long memlimit
= 0, retlimit
;
2913 retlimit
= get_memlimit(copy
);
2915 while (strcmp(copy
, "/") != 0) {
2916 copy
= dirname(copy
);
2917 memlimit
= get_memlimit(copy
);
2918 if (memlimit
!= -1 && memlimit
< retlimit
)
2919 retlimit
= memlimit
;
2925 static int proc_meminfo_read(char *buf
, size_t size
, off_t offset
,
2926 struct fuse_file_info
*fi
)
2928 struct fuse_context
*fc
= fuse_get_context();
2929 struct file_info
*d
= (struct file_info
*)fi
->fh
;
2931 char *memusage_str
= NULL
, *memstat_str
= NULL
,
2932 *memswlimit_str
= NULL
, *memswusage_str
= NULL
,
2933 *memswlimit_default_str
= NULL
, *memswusage_default_str
= NULL
;
2934 unsigned long memlimit
= 0, memusage
= 0, memswlimit
= 0, memswusage
= 0,
2935 cached
= 0, hosttotal
= 0;
2937 size_t linelen
= 0, total_len
= 0, rv
= 0;
2938 char *cache
= d
->buf
;
2939 size_t cache_size
= d
->buflen
;
2943 if (offset
> d
->size
)
2947 int left
= d
->size
- offset
;
2948 total_len
= left
> size
? size
: left
;
2949 memcpy(buf
, cache
+ offset
, total_len
);
2953 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
2956 cg
= get_pid_cgroup(initpid
, "memory");
2958 return read_file("/proc/meminfo", buf
, size
, d
);
2959 prune_init_slice(cg
);
2961 memlimit
= get_min_memlimit(cg
);
2962 if (!cgfs_get_value("memory", cg
, "memory.usage_in_bytes", &memusage_str
))
2964 if (!cgfs_get_value("memory", cg
, "memory.stat", &memstat_str
))
2967 // Following values are allowed to fail, because swapaccount might be turned
2968 // off for current kernel
2969 if(cgfs_get_value("memory", cg
, "memory.memsw.limit_in_bytes", &memswlimit_str
) &&
2970 cgfs_get_value("memory", cg
, "memory.memsw.usage_in_bytes", &memswusage_str
))
2972 /* If swapaccounting is turned on, then default value is assumed to be that of cgroup / */
2973 if (!cgfs_get_value("memory", "/", "memory.memsw.limit_in_bytes", &memswlimit_default_str
))
2975 if (!cgfs_get_value("memory", "/", "memory.memsw.usage_in_bytes", &memswusage_default_str
))
2978 memswlimit
= strtoul(memswlimit_str
, NULL
, 10);
2979 memswusage
= strtoul(memswusage_str
, NULL
, 10);
2981 if (!strcmp(memswlimit_str
, memswlimit_default_str
))
2983 if (!strcmp(memswusage_str
, memswusage_default_str
))
2986 memswlimit
= memswlimit
/ 1024;
2987 memswusage
= memswusage
/ 1024;
2990 memusage
= strtoul(memusage_str
, NULL
, 10);
2994 get_mem_cached(memstat_str
, &cached
);
2996 f
= fopen("/proc/meminfo", "r");
3000 while (getline(&line
, &linelen
, f
) != -1) {
3002 char *printme
, lbuf
[100];
3004 memset(lbuf
, 0, 100);
3005 if (startswith(line
, "MemTotal:")) {
3006 sscanf(line
+14, "%lu", &hosttotal
);
3007 if (hosttotal
< memlimit
)
3008 memlimit
= hosttotal
;
3009 snprintf(lbuf
, 100, "MemTotal: %8lu kB\n", memlimit
);
3011 } else if (startswith(line
, "MemFree:")) {
3012 snprintf(lbuf
, 100, "MemFree: %8lu kB\n", memlimit
- memusage
);
3014 } else if (startswith(line
, "MemAvailable:")) {
3015 snprintf(lbuf
, 100, "MemAvailable: %8lu kB\n", memlimit
- memusage
);
3017 } else if (startswith(line
, "SwapTotal:") && memswlimit
> 0) {
3018 snprintf(lbuf
, 100, "SwapTotal: %8lu kB\n", memswlimit
- memlimit
);
3020 } else if (startswith(line
, "SwapFree:") && memswlimit
> 0 && memswusage
> 0) {
3021 snprintf(lbuf
, 100, "SwapFree: %8lu kB\n",
3022 (memswlimit
- memlimit
) - (memswusage
- memusage
));
3024 } else if (startswith(line
, "Slab:")) {
3025 snprintf(lbuf
, 100, "Slab: %8lu kB\n", 0UL);
3027 } else if (startswith(line
, "Buffers:")) {
3028 snprintf(lbuf
, 100, "Buffers: %8lu kB\n", 0UL);
3030 } else if (startswith(line
, "Cached:")) {
3031 snprintf(lbuf
, 100, "Cached: %8lu kB\n", cached
);
3033 } else if (startswith(line
, "SwapCached:")) {
3034 snprintf(lbuf
, 100, "SwapCached: %8lu kB\n", 0UL);
3039 l
= snprintf(cache
, cache_size
, "%s", printme
);
3041 perror("Error writing to cache");
3046 if (l
>= cache_size
) {
3047 fprintf(stderr
, "Internal error: truncated write to cache\n");
3058 d
->size
= total_len
;
3059 if (total_len
> size
) total_len
= size
;
3060 memcpy(buf
, d
->buf
, total_len
);
3069 free(memswlimit_str
);
3070 free(memswusage_str
);
3072 free(memswlimit_default_str
);
3073 free(memswusage_default_str
);
3078 * Read the cpuset.cpus for cg
3079 * Return the answer in a newly allocated string which must be freed
3081 static char *get_cpuset(const char *cg
)
3085 if (!cgfs_get_value("cpuset", cg
, "cpuset.cpus", &answer
))
3090 bool cpu_in_cpuset(int cpu
, const char *cpuset
);
3092 static bool cpuline_in_cpuset(const char *line
, const char *cpuset
)
3096 if (sscanf(line
, "processor : %d", &cpu
) != 1)
3098 return cpu_in_cpuset(cpu
, cpuset
);
3102 * check whether this is a '^processor" line in /proc/cpuinfo
3104 static bool is_processor_line(const char *line
)
3108 if (sscanf(line
, "processor : %d", &cpu
) == 1)
3113 static int proc_cpuinfo_read(char *buf
, size_t size
, off_t offset
,
3114 struct fuse_file_info
*fi
)
3116 struct fuse_context
*fc
= fuse_get_context();
3117 struct file_info
*d
= (struct file_info
*)fi
->fh
;
3119 char *cpuset
= NULL
;
3121 size_t linelen
= 0, total_len
= 0, rv
= 0;
3122 bool am_printing
= false, firstline
= true, is_s390x
= false;
3123 int curcpu
= -1, cpu
;
3124 char *cache
= d
->buf
;
3125 size_t cache_size
= d
->buflen
;
3129 if (offset
> d
->size
)
3133 int left
= d
->size
- offset
;
3134 total_len
= left
> size
? size
: left
;
3135 memcpy(buf
, cache
+ offset
, total_len
);
3139 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3142 cg
= get_pid_cgroup(initpid
, "cpuset");
3144 return read_file("proc/cpuinfo", buf
, size
, d
);
3145 prune_init_slice(cg
);
3147 cpuset
= get_cpuset(cg
);
3151 f
= fopen("/proc/cpuinfo", "r");
3155 while (getline(&line
, &linelen
, f
) != -1) {
3159 if (strstr(line
, "IBM/S390") != NULL
) {
3165 if (strncmp(line
, "# processors:", 12) == 0)
3167 if (is_processor_line(line
)) {
3168 am_printing
= cpuline_in_cpuset(line
, cpuset
);
3171 l
= snprintf(cache
, cache_size
, "processor : %d\n", curcpu
);
3173 perror("Error writing to cache");
3177 if (l
>= cache_size
) {
3178 fprintf(stderr
, "Internal error: truncated write to cache\n");
3187 } else if (is_s390x
&& sscanf(line
, "processor %d:", &cpu
) == 1) {
3189 if (!cpu_in_cpuset(cpu
, cpuset
))
3192 p
= strchr(line
, ':');
3196 l
= snprintf(cache
, cache_size
, "processor %d:%s", curcpu
, p
);
3198 perror("Error writing to cache");
3202 if (l
>= cache_size
) {
3203 fprintf(stderr
, "Internal error: truncated write to cache\n");
3214 l
= snprintf(cache
, cache_size
, "%s", line
);
3216 perror("Error writing to cache");
3220 if (l
>= cache_size
) {
3221 fprintf(stderr
, "Internal error: truncated write to cache\n");
3232 char *origcache
= d
->buf
;
3235 d
->buf
= malloc(d
->buflen
);
3238 cache_size
= d
->buflen
;
3240 l
= snprintf(cache
, cache_size
, "vendor_id : IBM/S390\n");
3241 if (l
< 0 || l
>= cache_size
) {
3248 l
= snprintf(cache
, cache_size
, "# processors : %d\n", curcpu
+ 1);
3249 if (l
< 0 || l
>= cache_size
) {
3256 l
= snprintf(cache
, cache_size
, "%s", origcache
);
3258 if (l
< 0 || l
>= cache_size
)
3264 d
->size
= total_len
;
3265 if (total_len
> size
) total_len
= size
;
3267 /* read from off 0 */
3268 memcpy(buf
, d
->buf
, total_len
);
3279 static int proc_stat_read(char *buf
, size_t size
, off_t offset
,
3280 struct fuse_file_info
*fi
)
3282 struct fuse_context
*fc
= fuse_get_context();
3283 struct file_info
*d
= (struct file_info
*)fi
->fh
;
3285 char *cpuset
= NULL
;
3287 size_t linelen
= 0, total_len
= 0, rv
= 0;
3288 int curcpu
= -1; /* cpu numbering starts at 0 */
3289 unsigned long user
= 0, nice
= 0, system
= 0, idle
= 0, iowait
= 0, irq
= 0, softirq
= 0, steal
= 0, guest
= 0;
3290 unsigned long user_sum
= 0, nice_sum
= 0, system_sum
= 0, idle_sum
= 0, iowait_sum
= 0,
3291 irq_sum
= 0, softirq_sum
= 0, steal_sum
= 0, guest_sum
= 0;
3292 #define CPUALL_MAX_SIZE BUF_RESERVE_SIZE
3293 char cpuall
[CPUALL_MAX_SIZE
];
3294 /* reserve for cpu all */
3295 char *cache
= d
->buf
+ CPUALL_MAX_SIZE
;
3296 size_t cache_size
= d
->buflen
- CPUALL_MAX_SIZE
;
3300 if (offset
> d
->size
)
3304 int left
= d
->size
- offset
;
3305 total_len
= left
> size
? size
: left
;
3306 memcpy(buf
, d
->buf
+ offset
, total_len
);
3310 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3313 cg
= get_pid_cgroup(initpid
, "cpuset");
3315 return read_file("/proc/stat", buf
, size
, d
);
3316 prune_init_slice(cg
);
3318 cpuset
= get_cpuset(cg
);
3322 f
= fopen("/proc/stat", "r");
3327 if (getline(&line
, &linelen
, f
) < 0) {
3328 fprintf(stderr
, "proc_stat_read read first line failed\n");
3332 while (getline(&line
, &linelen
, f
) != -1) {
3335 char cpu_char
[10]; /* That's a lot of cores */
3338 if (sscanf(line
, "cpu%9[^ ]", cpu_char
) != 1) {
3339 /* not a ^cpuN line containing a number N, just print it */
3340 l
= snprintf(cache
, cache_size
, "%s", line
);
3342 perror("Error writing to cache");
3346 if (l
>= cache_size
) {
3347 fprintf(stderr
, "Internal error: truncated write to cache\n");
3357 if (sscanf(cpu_char
, "%d", &cpu
) != 1)
3359 if (!cpu_in_cpuset(cpu
, cpuset
))
3363 c
= strchr(line
, ' ');
3366 l
= snprintf(cache
, cache_size
, "cpu%d%s", curcpu
, c
);
3368 perror("Error writing to cache");
3373 if (l
>= cache_size
) {
3374 fprintf(stderr
, "Internal error: truncated write to cache\n");
3383 if (sscanf(line
, "%*s %lu %lu %lu %lu %lu %lu %lu %lu %lu", &user
, &nice
, &system
, &idle
, &iowait
, &irq
,
3384 &softirq
, &steal
, &guest
) != 9)
3388 system_sum
+= system
;
3390 iowait_sum
+= iowait
;
3392 softirq_sum
+= softirq
;
3399 int cpuall_len
= snprintf(cpuall
, CPUALL_MAX_SIZE
, "%s %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
3400 "cpu ", user_sum
, nice_sum
, system_sum
, idle_sum
, iowait_sum
, irq_sum
, softirq_sum
, steal_sum
, guest_sum
);
3401 if (cpuall_len
> 0 && cpuall_len
< CPUALL_MAX_SIZE
){
3402 memcpy(cache
, cpuall
, cpuall_len
);
3403 cache
+= cpuall_len
;
3405 /* shouldn't happen */
3406 fprintf(stderr
, "proc_stat_read copy cpuall failed, cpuall_len=%d\n", cpuall_len
);
3410 memmove(cache
, d
->buf
+ CPUALL_MAX_SIZE
, total_len
);
3411 total_len
+= cpuall_len
;
3413 d
->size
= total_len
;
3414 if (total_len
> size
) total_len
= size
;
3416 memcpy(buf
, d
->buf
, total_len
);
3428 static long int getreaperage(pid_t pid
)
3435 qpid
= lookup_initpid_in_store(pid
);
3439 ret
= snprintf(fnam
, 100, "/proc/%d", qpid
);
3440 if (ret
< 0 || ret
>= 100)
3443 if (lstat(fnam
, &sb
) < 0)
3446 return time(NULL
) - sb
.st_ctime
;
3449 static unsigned long get_reaper_busy(pid_t task
)
3451 pid_t initpid
= lookup_initpid_in_store(task
);
3452 char *cgroup
= NULL
, *usage_str
= NULL
;
3453 unsigned long usage
= 0;
3458 cgroup
= get_pid_cgroup(initpid
, "cpuacct");
3461 prune_init_slice(cgroup
);
3462 if (!cgfs_get_value("cpuacct", cgroup
, "cpuacct.usage", &usage_str
))
3464 usage
= strtoul(usage_str
, NULL
, 10);
3465 usage
/= 1000000000;
3476 char *name
, *cwd
= get_current_dir_name();
3482 len
= strlen(cwd
) + strlen("/iwashere") + 1;
3484 snprintf(name
, len
, "%s/iwashere", cwd
);
3486 fd
= creat(name
, 0755);
3493 * We read /proc/uptime and reuse its second field.
3494 * For the first field, we use the mtime for the reaper for
3495 * the calling pid as returned by getreaperage
3497 static int proc_uptime_read(char *buf
, size_t size
, off_t offset
,
3498 struct fuse_file_info
*fi
)
3500 struct fuse_context
*fc
= fuse_get_context();
3501 struct file_info
*d
= (struct file_info
*)fi
->fh
;
3502 long int reaperage
= getreaperage(fc
->pid
);
3503 unsigned long int busytime
= get_reaper_busy(fc
->pid
), idletime
;
3504 char *cache
= d
->buf
;
3505 ssize_t total_len
= 0;
3512 if (offset
> d
->size
)
3516 int left
= d
->size
- offset
;
3517 total_len
= left
> size
? size
: left
;
3518 memcpy(buf
, cache
+ offset
, total_len
);
3522 idletime
= reaperage
- busytime
;
3523 if (idletime
> reaperage
)
3524 idletime
= reaperage
;
3526 total_len
= snprintf(d
->buf
, d
->size
, "%ld.0 %lu.0\n", reaperage
, idletime
);
3528 perror("Error writing to cache");
3532 d
->size
= (int)total_len
;
3535 if (total_len
> size
) total_len
= size
;
3537 memcpy(buf
, d
->buf
, total_len
);
3541 static int proc_diskstats_read(char *buf
, size_t size
, off_t offset
,
3542 struct fuse_file_info
*fi
)
3545 struct fuse_context
*fc
= fuse_get_context();
3546 struct file_info
*d
= (struct file_info
*)fi
->fh
;
3548 char *io_serviced_str
= NULL
, *io_merged_str
= NULL
, *io_service_bytes_str
= NULL
,
3549 *io_wait_time_str
= NULL
, *io_service_time_str
= NULL
;
3550 unsigned long read
= 0, write
= 0;
3551 unsigned long read_merged
= 0, write_merged
= 0;
3552 unsigned long read_sectors
= 0, write_sectors
= 0;
3553 unsigned long read_ticks
= 0, write_ticks
= 0;
3554 unsigned long ios_pgr
= 0, tot_ticks
= 0, rq_ticks
= 0;
3555 unsigned long rd_svctm
= 0, wr_svctm
= 0, rd_wait
= 0, wr_wait
= 0;
3556 char *cache
= d
->buf
;
3557 size_t cache_size
= d
->buflen
;
3559 size_t linelen
= 0, total_len
= 0, rv
= 0;
3560 unsigned int major
= 0, minor
= 0;
3565 if (offset
> d
->size
)
3569 int left
= d
->size
- offset
;
3570 total_len
= left
> size
? size
: left
;
3571 memcpy(buf
, cache
+ offset
, total_len
);
3575 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3578 cg
= get_pid_cgroup(initpid
, "blkio");
3580 return read_file("/proc/diskstats", buf
, size
, d
);
3581 prune_init_slice(cg
);
3583 if (!cgfs_get_value("blkio", cg
, "blkio.io_serviced_recursive", &io_serviced_str
))
3585 if (!cgfs_get_value("blkio", cg
, "blkio.io_merged_recursive", &io_merged_str
))
3587 if (!cgfs_get_value("blkio", cg
, "blkio.io_service_bytes_recursive", &io_service_bytes_str
))
3589 if (!cgfs_get_value("blkio", cg
, "blkio.io_wait_time_recursive", &io_wait_time_str
))
3591 if (!cgfs_get_value("blkio", cg
, "blkio.io_service_time_recursive", &io_service_time_str
))
3595 f
= fopen("/proc/diskstats", "r");
3599 while (getline(&line
, &linelen
, f
) != -1) {
3603 i
= sscanf(line
, "%u %u %71s", &major
, &minor
, dev_name
);
3607 get_blkio_io_value(io_serviced_str
, major
, minor
, "Read", &read
);
3608 get_blkio_io_value(io_serviced_str
, major
, minor
, "Write", &write
);
3609 get_blkio_io_value(io_merged_str
, major
, minor
, "Read", &read_merged
);
3610 get_blkio_io_value(io_merged_str
, major
, minor
, "Write", &write_merged
);
3611 get_blkio_io_value(io_service_bytes_str
, major
, minor
, "Read", &read_sectors
);
3612 read_sectors
= read_sectors
/512;
3613 get_blkio_io_value(io_service_bytes_str
, major
, minor
, "Write", &write_sectors
);
3614 write_sectors
= write_sectors
/512;
3616 get_blkio_io_value(io_service_time_str
, major
, minor
, "Read", &rd_svctm
);
3617 rd_svctm
= rd_svctm
/1000000;
3618 get_blkio_io_value(io_wait_time_str
, major
, minor
, "Read", &rd_wait
);
3619 rd_wait
= rd_wait
/1000000;
3620 read_ticks
= rd_svctm
+ rd_wait
;
3622 get_blkio_io_value(io_service_time_str
, major
, minor
, "Write", &wr_svctm
);
3623 wr_svctm
= wr_svctm
/1000000;
3624 get_blkio_io_value(io_wait_time_str
, major
, minor
, "Write", &wr_wait
);
3625 wr_wait
= wr_wait
/1000000;
3626 write_ticks
= wr_svctm
+ wr_wait
;
3628 get_blkio_io_value(io_service_time_str
, major
, minor
, "Total", &tot_ticks
);
3629 tot_ticks
= tot_ticks
/1000000;
3631 memset(lbuf
, 0, 256);
3632 if (read
|| write
|| read_merged
|| write_merged
|| read_sectors
|| write_sectors
|| read_ticks
|| write_ticks
)
3633 snprintf(lbuf
, 256, "%u %u %s %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
3634 major
, minor
, dev_name
, read
, read_merged
, read_sectors
, read_ticks
,
3635 write
, write_merged
, write_sectors
, write_ticks
, ios_pgr
, tot_ticks
, rq_ticks
);
3639 l
= snprintf(cache
, cache_size
, "%s", lbuf
);
3641 perror("Error writing to fuse buf");
3645 if (l
>= cache_size
) {
3646 fprintf(stderr
, "Internal error: truncated write to cache\n");
3656 d
->size
= total_len
;
3657 if (total_len
> size
) total_len
= size
;
3658 memcpy(buf
, d
->buf
, total_len
);
3666 free(io_serviced_str
);
3667 free(io_merged_str
);
3668 free(io_service_bytes_str
);
3669 free(io_wait_time_str
);
3670 free(io_service_time_str
);
3674 static int proc_swaps_read(char *buf
, size_t size
, off_t offset
,
3675 struct fuse_file_info
*fi
)
3677 struct fuse_context
*fc
= fuse_get_context();
3678 struct file_info
*d
= (struct file_info
*)fi
->fh
;
3680 char *memswlimit_str
= NULL
, *memlimit_str
= NULL
, *memusage_str
= NULL
, *memswusage_str
= NULL
,
3681 *memswlimit_default_str
= NULL
, *memswusage_default_str
= NULL
;
3682 unsigned long memswlimit
= 0, memlimit
= 0, memusage
= 0, memswusage
= 0, swap_total
= 0, swap_free
= 0;
3683 ssize_t total_len
= 0, rv
= 0;
3685 char *cache
= d
->buf
;
3688 if (offset
> d
->size
)
3692 int left
= d
->size
- offset
;
3693 total_len
= left
> size
? size
: left
;
3694 memcpy(buf
, cache
+ offset
, total_len
);
3698 pid_t initpid
= lookup_initpid_in_store(fc
->pid
);
3701 cg
= get_pid_cgroup(initpid
, "memory");
3703 return read_file("/proc/swaps", buf
, size
, d
);
3704 prune_init_slice(cg
);
3706 if (!cgfs_get_value("memory", cg
, "memory.limit_in_bytes", &memlimit_str
))
3709 if (!cgfs_get_value("memory", cg
, "memory.usage_in_bytes", &memusage_str
))
3712 memlimit
= strtoul(memlimit_str
, NULL
, 10);
3713 memusage
= strtoul(memusage_str
, NULL
, 10);
3715 if (cgfs_get_value("memory", cg
, "memory.memsw.usage_in_bytes", &memswusage_str
) &&
3716 cgfs_get_value("memory", cg
, "memory.memsw.limit_in_bytes", &memswlimit_str
)) {
3718 /* If swap accounting is turned on, then default value is assumed to be that of cgroup / */
3719 if (!cgfs_get_value("memory", "/", "memory.memsw.limit_in_bytes", &memswlimit_default_str
))
3721 if (!cgfs_get_value("memory", "/", "memory.memsw.usage_in_bytes", &memswusage_default_str
))
3724 memswlimit
= strtoul(memswlimit_str
, NULL
, 10);
3725 memswusage
= strtoul(memswusage_str
, NULL
, 10);
3727 if (!strcmp(memswlimit_str
, memswlimit_default_str
))
3729 if (!strcmp(memswusage_str
, memswusage_default_str
))
3732 swap_total
= (memswlimit
- memlimit
) / 1024;
3733 swap_free
= (memswusage
- memusage
) / 1024;
3736 total_len
= snprintf(d
->buf
, d
->size
, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
3738 /* When no mem + swap limit is specified or swapaccount=0*/
3742 FILE *f
= fopen("/proc/meminfo", "r");
3747 while (getline(&line
, &linelen
, f
) != -1) {
3748 if (startswith(line
, "SwapTotal:")) {
3749 sscanf(line
, "SwapTotal: %8lu kB", &swap_total
);
3750 } else if (startswith(line
, "SwapFree:")) {
3751 sscanf(line
, "SwapFree: %8lu kB", &swap_free
);
3759 if (swap_total
> 0) {
3760 l
= snprintf(d
->buf
+ total_len
, d
->size
- total_len
,
3761 "none%*svirtual\t\t%lu\t%lu\t0\n", 36, " ",
3762 swap_total
, swap_free
);
3766 if (total_len
< 0 || l
< 0) {
3767 perror("Error writing to cache");
3773 d
->size
= (int)total_len
;
3775 if (total_len
> size
) total_len
= size
;
3776 memcpy(buf
, d
->buf
, total_len
);
3781 free(memswlimit_str
);
3784 free(memswusage_str
);
3785 free(memswusage_default_str
);
3786 free(memswlimit_default_str
);
3790 static off_t
get_procfile_size(const char *which
)
3792 FILE *f
= fopen(which
, "r");
3795 ssize_t sz
, answer
= 0;
3799 while ((sz
= getline(&line
, &len
, f
)) != -1)
3807 int proc_getattr(const char *path
, struct stat
*sb
)
3809 struct timespec now
;
3811 memset(sb
, 0, sizeof(struct stat
));
3812 if (clock_gettime(CLOCK_REALTIME
, &now
) < 0)
3814 sb
->st_uid
= sb
->st_gid
= 0;
3815 sb
->st_atim
= sb
->st_mtim
= sb
->st_ctim
= now
;
3816 if (strcmp(path
, "/proc") == 0) {
3817 sb
->st_mode
= S_IFDIR
| 00555;
3821 if (strcmp(path
, "/proc/meminfo") == 0 ||
3822 strcmp(path
, "/proc/cpuinfo") == 0 ||
3823 strcmp(path
, "/proc/uptime") == 0 ||
3824 strcmp(path
, "/proc/stat") == 0 ||
3825 strcmp(path
, "/proc/diskstats") == 0 ||
3826 strcmp(path
, "/proc/swaps") == 0) {
3828 sb
->st_mode
= S_IFREG
| 00444;
3836 int proc_readdir(const char *path
, void *buf
, fuse_fill_dir_t filler
, off_t offset
,
3837 struct fuse_file_info
*fi
)
3839 if (filler(buf
, "cpuinfo", NULL
, 0) != 0 ||
3840 filler(buf
, "meminfo", NULL
, 0) != 0 ||
3841 filler(buf
, "stat", NULL
, 0) != 0 ||
3842 filler(buf
, "uptime", NULL
, 0) != 0 ||
3843 filler(buf
, "diskstats", NULL
, 0) != 0 ||
3844 filler(buf
, "swaps", NULL
, 0) != 0)
3849 int proc_open(const char *path
, struct fuse_file_info
*fi
)
3852 struct file_info
*info
;
3854 if (strcmp(path
, "/proc/meminfo") == 0)
3855 type
= LXC_TYPE_PROC_MEMINFO
;
3856 else if (strcmp(path
, "/proc/cpuinfo") == 0)
3857 type
= LXC_TYPE_PROC_CPUINFO
;
3858 else if (strcmp(path
, "/proc/uptime") == 0)
3859 type
= LXC_TYPE_PROC_UPTIME
;
3860 else if (strcmp(path
, "/proc/stat") == 0)
3861 type
= LXC_TYPE_PROC_STAT
;
3862 else if (strcmp(path
, "/proc/diskstats") == 0)
3863 type
= LXC_TYPE_PROC_DISKSTATS
;
3864 else if (strcmp(path
, "/proc/swaps") == 0)
3865 type
= LXC_TYPE_PROC_SWAPS
;
3869 info
= malloc(sizeof(*info
));
3873 memset(info
, 0, sizeof(*info
));
3876 info
->buflen
= get_procfile_size(path
) + BUF_RESERVE_SIZE
;
3878 info
->buf
= malloc(info
->buflen
);
3879 } while (!info
->buf
);
3880 memset(info
->buf
, 0, info
->buflen
);
3881 /* set actual size to buffer size */
3882 info
->size
= info
->buflen
;
3884 fi
->fh
= (unsigned long)info
;
3888 int proc_access(const char *path
, int mask
)
3890 /* these are all read-only */
3891 if ((mask
& ~R_OK
) != 0)
3896 int proc_release(const char *path
, struct fuse_file_info
*fi
)
3898 do_release_file_info(fi
);
3902 int proc_read(const char *path
, char *buf
, size_t size
, off_t offset
,
3903 struct fuse_file_info
*fi
)
3905 struct file_info
*f
= (struct file_info
*) fi
->fh
;
3908 case LXC_TYPE_PROC_MEMINFO
:
3909 return proc_meminfo_read(buf
, size
, offset
, fi
);
3910 case LXC_TYPE_PROC_CPUINFO
:
3911 return proc_cpuinfo_read(buf
, size
, offset
, fi
);
3912 case LXC_TYPE_PROC_UPTIME
:
3913 return proc_uptime_read(buf
, size
, offset
, fi
);
3914 case LXC_TYPE_PROC_STAT
:
3915 return proc_stat_read(buf
, size
, offset
, fi
);
3916 case LXC_TYPE_PROC_DISKSTATS
:
3917 return proc_diskstats_read(buf
, size
, offset
, fi
);
3918 case LXC_TYPE_PROC_SWAPS
:
3919 return proc_swaps_read(buf
, size
, offset
, fi
);
3925 static void __attribute__((constructor
)) collect_subsystems(void)
3931 if ((f
= fopen("/proc/self/cgroup", "r")) == NULL
) {
3932 fprintf(stderr
, "Error opening /proc/self/cgroup: %s\n", strerror(errno
));
3935 while (getline(&line
, &len
, f
) != -1) {
3938 p
= strchr(line
, ':');
3943 p2
= strrchr(p
, ':');
3948 /* With cgroupv2 /proc/self/cgroup can contain entries of the
3949 * form: 0::/ This will cause lxcfs to fail the cgroup mounts
3950 * because it parses out the empty string "" and later on passes
3951 * it to mount(). Let's skip such entries.
3956 if (!store_hierarchy(line
, p
))
3967 static void __attribute__((destructor
)) free_subsystems(void)
3971 for (i
= 0; i
< num_hierarchies
; i
++)
3973 free(hierarchies
[i
]);