1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
30 #include <sys/types.h>
35 #include "cgroup2_devices.h"
36 #include "cgroup_utils.h"
43 #include "memory_utils.h"
44 #include "storage/storage.h"
48 #include "include/strlcpy.h"
52 #include "include/strlcat.h"
55 lxc_log_define(cgfsng
, cgroup
);
57 static void free_string_list(char **clist
)
64 for (i
= 0; clist
[i
]; i
++)
70 /* Given a pointer to a null-terminated array of pointers, realloc to add one
71 * entry, and point the new entry to NULL. Do not fail. Return the index to the
72 * second-to-last entry - that is, the one which is now available for use
73 * (keeping the list null-terminated).
75 static int append_null_to_list(void ***list
)
80 for (; (*list
)[newentry
]; newentry
++)
83 *list
= must_realloc(*list
, (newentry
+ 2) * sizeof(void **));
84 (*list
)[newentry
+ 1] = NULL
;
88 /* Given a null-terminated array of strings, check whether @entry is one of the
91 static bool string_in_list(char **list
, const char *entry
)
98 for (i
= 0; list
[i
]; i
++)
99 if (strcmp(list
[i
], entry
) == 0)
105 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
106 * "name=systemd". Do not fail.
108 static char *cg_legacy_must_prefix_named(char *entry
)
114 prefixed
= must_realloc(NULL
, len
+ 6);
116 memcpy(prefixed
, "name=", STRLITERALLEN("name="));
117 memcpy(prefixed
+ STRLITERALLEN("name="), entry
, len
);
118 prefixed
[len
+ 5] = '\0';
123 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
126 * We also handle named subsystems here. Any controller which is not a kernel
127 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
128 * we refuse to use because we're not sure which we have here.
129 * (TODO: We could work around this in some cases by just remounting to be
130 * unambiguous, or by comparing mountpoint contents with current cgroup.)
132 * The last entry will always be NULL.
134 static void must_append_controller(char **klist
, char **nlist
, char ***clist
,
140 if (string_in_list(klist
, entry
) && string_in_list(nlist
, entry
)) {
141 ERROR("Refusing to use ambiguous controller \"%s\"", entry
);
142 ERROR("It is both a named and kernel subsystem");
146 newentry
= append_null_to_list((void ***)clist
);
148 if (strncmp(entry
, "name=", 5) == 0)
149 copy
= must_copy_string(entry
);
150 else if (string_in_list(klist
, entry
))
151 copy
= must_copy_string(entry
);
153 copy
= cg_legacy_must_prefix_named(entry
);
155 (*clist
)[newentry
] = copy
;
158 static inline bool pure_unified_layout(const struct cgroup_ops
*ops
)
160 return ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
;
163 /* Given a handler's cgroup data, return the struct hierarchy for the controller
164 * @c, or NULL if there is none.
166 struct hierarchy
*get_hierarchy(struct cgroup_ops
*ops
, const char *controller
)
172 if (!ops
->hierarchies
) {
173 TRACE("There are no useable cgroup controllers");
177 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
179 /* This is the empty unified hierarchy. */
180 if (ops
->hierarchies
[i
]->controllers
&&
181 !ops
->hierarchies
[i
]->controllers
[0])
182 return ops
->hierarchies
[i
];
184 } else if (pure_unified_layout(ops
) &&
185 strcmp(controller
, "devices") == 0) {
186 if (ops
->unified
->bpf_device_controller
)
191 if (string_in_list(ops
->hierarchies
[i
]->controllers
, controller
))
192 return ops
->hierarchies
[i
];
196 WARN("There is no useable %s controller", controller
);
198 WARN("There is no empty unified cgroup hierarchy");
203 #define BATCH_SIZE 50
204 static void batch_realloc(char **mem
, size_t oldlen
, size_t newlen
)
206 int newbatches
= (newlen
/ BATCH_SIZE
) + 1;
207 int oldbatches
= (oldlen
/ BATCH_SIZE
) + 1;
209 if (!*mem
|| newbatches
> oldbatches
) {
210 *mem
= must_realloc(*mem
, newbatches
* BATCH_SIZE
);
214 static void append_line(char **dest
, size_t oldlen
, char *new, size_t newlen
)
216 size_t full
= oldlen
+ newlen
;
218 batch_realloc(dest
, oldlen
, full
+ 1);
220 memcpy(*dest
+ oldlen
, new, newlen
+ 1);
223 /* Slurp in a whole file */
224 static char *read_file(const char *fnam
)
226 __do_free
char *line
= NULL
;
227 __do_fclose
FILE *f
= NULL
;
230 size_t len
= 0, fulllen
= 0;
232 f
= fopen(fnam
, "r");
235 while ((linelen
= getline(&line
, &len
, f
)) != -1) {
236 append_line(&buf
, fulllen
, line
, linelen
);
242 /* Taken over modified from the kernel sources. */
243 #define NBITS 32 /* bits in uint32_t */
244 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
245 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
247 static void set_bit(unsigned bit
, uint32_t *bitarr
)
249 bitarr
[bit
/ NBITS
] |= (1 << (bit
% NBITS
));
252 static void clear_bit(unsigned bit
, uint32_t *bitarr
)
254 bitarr
[bit
/ NBITS
] &= ~(1 << (bit
% NBITS
));
257 static bool is_set(unsigned bit
, uint32_t *bitarr
)
259 return (bitarr
[bit
/ NBITS
] & (1 << (bit
% NBITS
))) != 0;
262 /* Create cpumask from cpulist aka turn:
270 static uint32_t *lxc_cpumask(char *buf
, size_t nbits
)
276 arrlen
= BITS_TO_LONGS(nbits
);
277 bitarr
= calloc(arrlen
, sizeof(uint32_t));
281 lxc_iterate_parts(token
, buf
, ",") {
286 start
= strtoul(token
, NULL
, 0);
288 range
= strchr(token
, '-');
290 end
= strtoul(range
+ 1, NULL
, 0);
292 if (!(start
<= end
)) {
303 set_bit(start
++, bitarr
);
309 /* Turn cpumask into simple, comma-separated cpulist. */
310 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr
, size_t nbits
)
315 char **cpulist
= NULL
;
316 char numstr
[INTTYPE_TO_STRLEN(size_t)] = {0};
318 for (i
= 0; i
<= nbits
; i
++) {
319 if (!is_set(i
, bitarr
))
322 ret
= snprintf(numstr
, sizeof(numstr
), "%zu", i
);
323 if (ret
< 0 || (size_t)ret
>= sizeof(numstr
)) {
324 lxc_free_array((void **)cpulist
, free
);
328 ret
= lxc_append_string(&cpulist
, numstr
);
330 lxc_free_array((void **)cpulist
, free
);
338 tmp
= lxc_string_join(",", (const char **)cpulist
, false);
339 lxc_free_array((void **)cpulist
, free
);
344 static ssize_t
get_max_cpus(char *cpulist
)
347 char *maxcpus
= cpulist
;
350 c1
= strrchr(maxcpus
, ',');
354 c2
= strrchr(maxcpus
, '-');
368 cpus
= strtoul(c1
, NULL
, 0);
375 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
376 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
377 static bool cg_legacy_filter_and_set_cpus(char *path
, bool am_initialized
)
379 __do_free
char *cpulist
= NULL
, *fpath
= NULL
, *isolcpus
= NULL
,
380 *offlinecpus
= NULL
, *posscpus
= NULL
;
381 __do_free
uint32_t *isolmask
= NULL
, *offlinemask
= NULL
,
386 ssize_t maxisol
= 0, maxoffline
= 0, maxposs
= 0;
387 bool bret
= false, flipped_bit
= false;
389 lastslash
= strrchr(path
, '/');
391 ERROR("Failed to detect \"/\" in \"%s\"", path
);
395 fpath
= must_make_path(path
, "cpuset.cpus", NULL
);
397 posscpus
= read_file(fpath
);
399 SYSERROR("Failed to read file \"%s\"", fpath
);
403 /* Get maximum number of cpus found in possible cpuset. */
404 maxposs
= get_max_cpus(posscpus
);
405 if (maxposs
< 0 || maxposs
>= INT_MAX
- 1)
408 if (file_exists(__ISOL_CPUS
)) {
409 isolcpus
= read_file(__ISOL_CPUS
);
411 SYSERROR("Failed to read file \"%s\"", __ISOL_CPUS
);
415 if (isdigit(isolcpus
[0])) {
416 /* Get maximum number of cpus found in isolated cpuset. */
417 maxisol
= get_max_cpus(isolcpus
);
418 if (maxisol
< 0 || maxisol
>= INT_MAX
- 1)
422 if (maxposs
< maxisol
)
426 TRACE("The path \""__ISOL_CPUS
"\" to read isolated cpus from does not exist");
429 if (file_exists(__OFFLINE_CPUS
)) {
430 offlinecpus
= read_file(__OFFLINE_CPUS
);
432 SYSERROR("Failed to read file \"%s\"", __OFFLINE_CPUS
);
436 if (isdigit(offlinecpus
[0])) {
437 /* Get maximum number of cpus found in offline cpuset. */
438 maxoffline
= get_max_cpus(offlinecpus
);
439 if (maxoffline
< 0 || maxoffline
>= INT_MAX
- 1)
443 if (maxposs
< maxoffline
)
444 maxposs
= maxoffline
;
447 TRACE("The path \""__OFFLINE_CPUS
"\" to read offline cpus from does not exist");
450 if ((maxisol
== 0) && (maxoffline
== 0)) {
451 cpulist
= move_ptr(posscpus
);
455 possmask
= lxc_cpumask(posscpus
, maxposs
);
457 ERROR("Failed to create cpumask for possible cpus");
462 isolmask
= lxc_cpumask(isolcpus
, maxposs
);
464 ERROR("Failed to create cpumask for isolated cpus");
469 if (maxoffline
> 0) {
470 offlinemask
= lxc_cpumask(offlinecpus
, maxposs
);
472 ERROR("Failed to create cpumask for offline cpus");
477 for (i
= 0; i
<= maxposs
; i
++) {
478 if ((isolmask
&& !is_set(i
, isolmask
)) ||
479 (offlinemask
&& !is_set(i
, offlinemask
)) ||
480 !is_set(i
, possmask
))
484 clear_bit(i
, possmask
);
488 cpulist
= lxc_cpumask_to_cpulist(possmask
, maxposs
);
489 TRACE("No isolated or offline cpus present in cpuset");
491 cpulist
= move_ptr(posscpus
);
492 TRACE("Removed isolated or offline cpus from cpuset");
495 ERROR("Failed to create cpu list");
500 if (!am_initialized
) {
501 ret
= lxc_write_openat(path
, "cpuset.cpus", cpulist
, strlen(cpulist
));
503 return log_error_errno(false,
504 errno
, "Failed to write cpu list to \"%s/cpuset.cpus\"",
507 TRACE("Copied cpu settings of parent cgroup");
513 /* Copy contents of parent(@path)/@file to @path/@file */
514 static bool copy_parent_file(char *path
, char *file
)
516 __do_free
char *parent_path
= NULL
, *value
= NULL
;
518 char *lastslash
= NULL
;
521 lastslash
= strrchr(path
, '/');
523 return log_error_errno(false, ENOENT
,
524 "Failed to detect \"/\" in \"%s\"", path
);
527 parent_path
= must_make_path(path
, file
, NULL
);
530 len
= lxc_read_from_file(parent_path
, NULL
, 0);
532 return log_error_errno(false, errno
,
533 "Failed to determine buffer size");
535 value
= must_realloc(NULL
, len
+ 1);
537 ret
= lxc_read_from_file(parent_path
, value
, len
);
539 return log_error_errno(false, errno
,
540 "Failed to read from parent file \"%s\"",
543 ret
= lxc_write_openat(path
, file
, value
, len
);
544 if (ret
< 0 && errno
!= EACCES
)
545 return log_error_errno(false,
546 errno
, "Failed to write \"%s\" to file \"%s/%s\"",
551 static bool is_unified_hierarchy(const struct hierarchy
*h
)
553 return h
->version
== CGROUP2_SUPER_MAGIC
;
556 /* Initialize the cpuset hierarchy in first directory of @gname and set
557 * cgroup.clone_children so that children inherit settings. Since the
558 * h->base_path is populated by init or ourselves, we know it is already
561 * returns -1 on error, 0 when we didn't created a cgroup, 1 if we created a
564 static int cg_legacy_handle_cpuset_hierarchy(struct hierarchy
*h
, char *cgname
)
567 __do_free
char *cgpath
= NULL
;
568 __do_close_prot_errno
int cgroup_fd
= -EBADF
;
573 if (is_unified_hierarchy(h
))
576 if (!string_in_list(h
->controllers
, "cpuset"))
581 slash
= strchr(cgname
, '/');
585 cgpath
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgname
, NULL
);
590 ret
= mkdir(cgpath
, 0755);
593 return log_error_errno(-1, errno
, "Failed to create directory \"%s\"", cgpath
);
598 cgroup_fd
= lxc_open_dirfd(cgpath
);
602 ret
= lxc_readat(cgroup_fd
, "cgroup.clone_children", &v
, 1);
604 return log_error_errno(-1, errno
, "Failed to read file \"%s/cgroup.clone_children\"", cgpath
);
606 /* Make sure any isolated cpus are removed from cpuset.cpus. */
607 if (!cg_legacy_filter_and_set_cpus(cgpath
, v
== '1'))
608 return log_error_errno(-1, errno
, "Failed to remove isolated cpus");
610 /* Already set for us by someone else. */
612 TRACE("\"cgroup.clone_children\" was already set to \"1\"");
614 /* copy parent's settings */
615 if (!copy_parent_file(cgpath
, "cpuset.mems"))
616 return log_error_errno(-1, errno
, "Failed to copy \"cpuset.mems\" settings");
618 /* Set clone_children so children inherit our settings */
619 ret
= lxc_writeat(cgroup_fd
, "cgroup.clone_children", "1", 1);
621 return log_error_errno(-1, errno
, "Failed to write 1 to \"%s/cgroup.clone_children\"", cgpath
);
626 /* Given two null-terminated lists of strings, return true if any string is in
629 static bool controller_lists_intersect(char **l1
, char **l2
)
636 for (i
= 0; l1
[i
]; i
++) {
637 if (string_in_list(l2
, l1
[i
]))
644 /* For a null-terminated list of controllers @clist, return true if any of those
645 * controllers is already listed the null-terminated list of hierarchies @hlist.
646 * Realistically, if one is present, all must be present.
648 static bool controller_list_is_dup(struct hierarchy
**hlist
, char **clist
)
655 for (i
= 0; hlist
[i
]; i
++)
656 if (controller_lists_intersect(hlist
[i
]->controllers
, clist
))
662 /* Return true if the controller @entry is found in the null-terminated list of
663 * hierarchies @hlist.
665 static bool controller_found(struct hierarchy
**hlist
, char *entry
)
672 for (i
= 0; hlist
[i
]; i
++)
673 if (string_in_list(hlist
[i
]->controllers
, entry
))
679 /* Return true if all of the controllers which we require have been found. The
680 * required list is freezer and anything in lxc.cgroup.use.
682 static bool all_controllers_found(struct cgroup_ops
*ops
)
685 struct hierarchy
**hlist
= ops
->hierarchies
;
687 if (!ops
->cgroup_use
)
690 for (cur
= ops
->cgroup_use
; cur
&& *cur
; cur
++)
691 if (!controller_found(hlist
, *cur
)) {
692 ERROR("No %s controller mountpoint found", *cur
);
699 /* Get the controllers from a mountinfo line There are other ways we could get
700 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
701 * could parse the mount options. But we simply assume that the mountpoint must
702 * be /sys/fs/cgroup/controller-list
704 static char **cg_hybrid_get_controllers(char **klist
, char **nlist
, char *line
,
707 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
708 * for legacy hierarchies.
712 char *p
= line
, *sep
= ",";
715 for (i
= 0; i
< 4; i
++) {
722 /* Note, if we change how mountinfo works, then our caller will need to
723 * verify /sys/fs/cgroup/ in this field.
725 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0) {
726 ERROR("Found hierarchy not under " DEFAULT_CGROUP_MOUNTPOINT
": \"%s\"", p
);
733 ERROR("Corrupt mountinfo");
738 if (type
== CGROUP_SUPER_MAGIC
) {
739 __do_free
char *dup
= NULL
;
741 /* strdup() here for v1 hierarchies. Otherwise
742 * lxc_iterate_parts() will destroy mountpoints such as
743 * "/sys/fs/cgroup/cpu,cpuacct".
745 dup
= must_copy_string(p
);
749 lxc_iterate_parts (tok
, dup
, sep
)
750 must_append_controller(klist
, nlist
, &aret
, tok
);
757 static char **cg_unified_make_empty_controller(void)
762 newentry
= append_null_to_list((void ***)&aret
);
763 aret
[newentry
] = NULL
;
767 static char **cg_unified_get_controllers(const char *file
)
769 __do_free
char *buf
= NULL
;
774 buf
= read_file(file
);
778 lxc_iterate_parts(tok
, buf
, sep
) {
782 newentry
= append_null_to_list((void ***)&aret
);
783 copy
= must_copy_string(tok
);
784 aret
[newentry
] = copy
;
790 static struct hierarchy
*add_hierarchy(struct hierarchy
***h
, char **clist
, char *mountpoint
,
791 char *container_base_path
, int type
)
793 struct hierarchy
*new;
796 new = must_realloc(NULL
, sizeof(*new));
797 new->controllers
= clist
;
798 new->mountpoint
= mountpoint
;
799 new->container_base_path
= container_base_path
;
800 new->container_full_path
= NULL
;
801 new->monitor_full_path
= NULL
;
803 new->cgroup2_chown
= NULL
;
805 newentry
= append_null_to_list((void ***)h
);
806 (*h
)[newentry
] = new;
810 /* Get a copy of the mountpoint from @line, which is a line from
811 * /proc/self/mountinfo.
813 static char *cg_hybrid_get_mountpoint(char *line
)
818 char *p
= line
, *sret
= NULL
;
820 for (i
= 0; i
< 4; i
++) {
827 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0)
830 p2
= strchr(p
+ 15, ' ');
836 sret
= must_realloc(NULL
, len
+ 1);
837 memcpy(sret
, p
, len
);
842 /* Given a multi-line string, return a null-terminated copy of the current line. */
843 static char *copy_to_eol(char *p
)
845 char *p2
= strchr(p
, '\n'), *sret
;
852 sret
= must_realloc(NULL
, len
+ 1);
853 memcpy(sret
, p
, len
);
858 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
859 * /proc/self/cgroup file. Check whether controller c is present.
861 static bool controller_in_clist(char *cgline
, char *c
)
863 __do_free
char *tmp
= NULL
;
867 eol
= strchr(cgline
, ':');
872 tmp
= must_realloc(NULL
, len
+ 1);
873 memcpy(tmp
, cgline
, len
);
876 lxc_iterate_parts(tok
, tmp
, ",")
877 if (strcmp(tok
, c
) == 0)
883 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
886 static char *cg_hybrid_get_current_cgroup(char *basecginfo
, char *controller
,
889 char *p
= basecginfo
;
892 bool is_cgv2_base_cgroup
= false;
894 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
895 if ((type
== CGROUP2_SUPER_MAGIC
) && (*p
== '0'))
896 is_cgv2_base_cgroup
= true;
903 if (is_cgv2_base_cgroup
|| (controller
&& controller_in_clist(p
, controller
))) {
908 return copy_to_eol(p
);
918 static void must_append_string(char ***list
, char *entry
)
923 newentry
= append_null_to_list((void ***)list
);
924 copy
= must_copy_string(entry
);
925 (*list
)[newentry
] = copy
;
928 static int get_existing_subsystems(char ***klist
, char ***nlist
)
930 __do_free
char *line
= NULL
;
931 __do_fclose
FILE *f
= NULL
;
934 f
= fopen("/proc/self/cgroup", "r");
938 while (getline(&line
, &len
, f
) != -1) {
940 p
= strchr(line
, ':');
949 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
950 * contains an entry of the form:
954 * In this case we use "cgroup2" as controller name.
957 must_append_string(klist
, "cgroup2");
961 lxc_iterate_parts(tok
, p
, ",") {
962 if (strncmp(tok
, "name=", 5) == 0)
963 must_append_string(nlist
, tok
);
965 must_append_string(klist
, tok
);
972 static void trim(char *s
)
977 while ((len
> 1) && (s
[len
- 1] == '\n'))
981 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops
*ops
)
984 struct hierarchy
**it
;
986 if (!ops
->hierarchies
) {
987 TRACE(" No hierarchies found");
991 TRACE(" Hierarchies:");
992 for (i
= 0, it
= ops
->hierarchies
; it
&& *it
; it
++, i
++) {
996 TRACE(" %d: base_cgroup: %s", i
, (*it
)->container_base_path
? (*it
)->container_base_path
: "(null)");
997 TRACE(" mountpoint: %s", (*it
)->mountpoint
? (*it
)->mountpoint
: "(null)");
998 TRACE(" controllers:");
999 for (j
= 0, cit
= (*it
)->controllers
; cit
&& *cit
; cit
++, j
++)
1000 TRACE(" %d: %s", j
, *cit
);
1004 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo
, char **klist
,
1010 TRACE("basecginfo is:");
1011 TRACE("%s", basecginfo
);
1013 for (k
= 0, it
= klist
; it
&& *it
; it
++, k
++)
1014 TRACE("kernel subsystem %d: %s", k
, *it
);
1016 for (k
= 0, it
= nlist
; it
&& *it
; it
++, k
++)
1017 TRACE("named subsystem %d: %s", k
, *it
);
1020 static int cgroup_rmdir(struct hierarchy
**hierarchies
,
1021 const char *container_cgroup
)
1025 if (!container_cgroup
|| !hierarchies
)
1028 for (i
= 0; hierarchies
[i
]; i
++) {
1030 struct hierarchy
*h
= hierarchies
[i
];
1032 if (!h
->container_full_path
)
1035 ret
= recursive_destroy(h
->container_full_path
);
1037 WARN("Failed to destroy \"%s\"", h
->container_full_path
);
1039 free(h
->container_full_path
);
1040 h
->container_full_path
= NULL
;
1046 struct generic_userns_exec_data
{
1047 struct hierarchy
**hierarchies
;
1048 const char *container_cgroup
;
1049 struct lxc_conf
*conf
;
1050 uid_t origuid
; /* target uid in parent namespace */
1054 static int cgroup_rmdir_wrapper(void *data
)
1057 struct generic_userns_exec_data
*arg
= data
;
1058 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1059 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1061 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1063 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid
,
1064 (int)nsgid
, (int)nsgid
);
1068 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1070 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid
,
1071 (int)nsuid
, (int)nsuid
);
1075 ret
= setgroups(0, NULL
);
1076 if (ret
< 0 && errno
!= EPERM
) {
1077 SYSERROR("Failed to setgroups(0, NULL)");
1081 return cgroup_rmdir(arg
->hierarchies
, arg
->container_cgroup
);
1084 __cgfsng_ops
static void cgfsng_payload_destroy(struct cgroup_ops
*ops
,
1085 struct lxc_handler
*handler
)
1088 struct generic_userns_exec_data wrap
;
1091 log_error_errno(return, ENOENT
, "Called with uninitialized cgroup operations");
1093 if (!ops
->hierarchies
)
1097 log_error_errno(return, EINVAL
, "Called with uninitialized handler");
1100 log_error_errno(return, EINVAL
, "Called with uninitialized conf");
1103 wrap
.container_cgroup
= ops
->container_cgroup
;
1104 wrap
.hierarchies
= ops
->hierarchies
;
1105 wrap
.conf
= handler
->conf
;
1107 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
1108 ret
= bpf_program_cgroup_detach(handler
->conf
->cgroup2_devices
);
1110 WARN("Failed to detach bpf program from cgroup");
1113 if (handler
->conf
&& !lxc_list_empty(&handler
->conf
->id_map
))
1114 ret
= userns_exec_1(handler
->conf
, cgroup_rmdir_wrapper
, &wrap
,
1115 "cgroup_rmdir_wrapper");
1117 ret
= cgroup_rmdir(ops
->hierarchies
, ops
->container_cgroup
);
1119 WARN("Failed to destroy cgroups");
1124 __cgfsng_ops
static void cgfsng_monitor_destroy(struct cgroup_ops
*ops
,
1125 struct lxc_handler
*handler
)
1128 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1129 struct lxc_conf
*conf
;
1132 log_error_errno(return, ENOENT
, "Called with uninitialized cgroup operations");
1134 if (!ops
->hierarchies
)
1138 log_error_errno(return, EINVAL
, "Called with uninitialized handler");
1141 log_error_errno(return, EINVAL
, "Called with uninitialized conf");
1143 conf
= handler
->conf
;
1145 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", handler
->monitor_pid
);
1146 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
1149 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1150 __do_free
char *pivot_path
= NULL
;
1151 char pivot_cgroup
[] = CGROUP_PIVOT
;
1152 struct hierarchy
*h
= ops
->hierarchies
[i
];
1155 if (!h
->monitor_full_path
)
1158 if (conf
&& conf
->cgroup_meta
.dir
)
1159 pivot_path
= must_make_path(h
->mountpoint
,
1160 h
->container_base_path
,
1161 conf
->cgroup_meta
.dir
,
1162 CGROUP_PIVOT
, NULL
);
1164 pivot_path
= must_make_path(h
->mountpoint
,
1165 h
->container_base_path
,
1166 CGROUP_PIVOT
, NULL
);
1169 * Make sure not to pass in the ro string literal CGROUP_PIVOT
1172 if (cg_legacy_handle_cpuset_hierarchy(h
, pivot_cgroup
) < 0)
1173 log_warn_errno(continue, errno
, "Failed to handle legacy cpuset controller");
1175 ret
= mkdir_p(pivot_path
, 0755);
1176 if (ret
< 0 && errno
!= EEXIST
)
1177 log_warn_errno(continue, errno
,
1178 "Failed to create cgroup \"%s\"\n",
1182 * Move ourselves into the pivot cgroup to delete our own
1185 ret
= lxc_write_openat(pivot_path
, "cgroup.procs", pidstr
, len
);
1187 log_warn_errno(continue, errno
,
1188 "Failed to move monitor %s to \"%s\"\n",
1189 pidstr
, pivot_path
);
1191 ret
= recursive_destroy(h
->monitor_full_path
);
1193 WARN("Failed to destroy \"%s\"", h
->monitor_full_path
);
1197 static int mkdir_eexist_on_last(const char *dir
, mode_t mode
)
1199 const char *tmp
= dir
;
1200 const char *orig
= dir
;
1203 orig_len
= strlen(dir
);
1205 __do_free
char *makeme
= NULL
;
1209 dir
= tmp
+ strspn(tmp
, "/");
1210 tmp
= dir
+ strcspn(dir
, "/");
1213 cur_len
= dir
- orig
;
1214 makeme
= strndup(orig
, cur_len
);
1218 ret
= mkdir(makeme
, mode
);
1220 if ((errno
!= EEXIST
) || (orig_len
== cur_len
)) {
1221 SYSERROR("Failed to create directory \"%s\"", makeme
);
1225 } while (tmp
!= dir
);
1230 static bool create_cgroup_tree(struct hierarchy
*h
, const char *cgroup_tree
,
1231 char *cgroup_leaf
, bool payload
)
1233 __do_free
char *path
= NULL
;
1234 int ret
, ret_cpuset
;
1236 path
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgroup_leaf
, NULL
);
1237 if (dir_exists(path
))
1238 return log_warn_errno(false, errno
, "The %s cgroup already existed", path
);
1240 ret_cpuset
= cg_legacy_handle_cpuset_hierarchy(h
, cgroup_leaf
);
1242 return log_error_errno(false, errno
, "Failed to handle legacy cpuset controller");
1244 ret
= mkdir_eexist_on_last(path
, 0755);
1247 * This is the cpuset controller and
1248 * cg_legacy_handle_cpuset_hierarchy() has created our target
1249 * directory for us to ensure correct initialization.
1251 if (ret_cpuset
!= 1 || cgroup_tree
)
1252 return log_error_errno(false, errno
, "Failed to create %s cgroup", path
);
1256 h
->container_full_path
= move_ptr(path
);
1258 h
->monitor_full_path
= move_ptr(path
);
1263 static void cgroup_remove_leaf(struct hierarchy
*h
, bool payload
)
1265 __do_free
char *full_path
= NULL
;
1268 full_path
= h
->container_full_path
;
1270 full_path
= h
->monitor_full_path
;
1272 if (rmdir(full_path
))
1273 SYSWARN("Failed to rmdir(\"%s\") cgroup", full_path
);
1276 h
->container_full_path
= NULL
;
1278 h
->monitor_full_path
= NULL
;
1281 __cgfsng_ops
static inline bool cgfsng_monitor_create(struct cgroup_ops
*ops
,
1282 struct lxc_handler
*handler
)
1284 __do_free
char *monitor_cgroup
= NULL
;
1285 const char *cgroup_tree
;
1290 struct lxc_conf
*conf
;
1293 return ret_set_errno(false, ENOENT
);
1295 if (!ops
->hierarchies
)
1298 if (ops
->monitor_cgroup
)
1299 return ret_set_errno(false, EEXIST
);
1301 if (!handler
|| !handler
->conf
)
1302 return ret_set_errno(false, EINVAL
);
1304 conf
= handler
->conf
;
1305 cgroup_tree
= conf
->cgroup_meta
.dir
;
1308 monitor_cgroup
= must_concat(&len
, conf
->cgroup_meta
.dir
, "/",
1309 DEFAULT_MONITOR_CGROUP_PREFIX
,
1311 CGROUP_CREATE_RETRY
, NULL
);
1313 monitor_cgroup
= must_concat(&len
, DEFAULT_MONITOR_CGROUP_PREFIX
,
1315 CGROUP_CREATE_RETRY
, NULL
);
1316 if (!monitor_cgroup
)
1317 return ret_set_errno(false, ENOMEM
);
1319 suffix
= monitor_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1323 sprintf(suffix
, "-%d", idx
);
1325 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1326 if (create_cgroup_tree(ops
->hierarchies
[i
], cgroup_tree
, monitor_cgroup
, false))
1329 ERROR("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->monitor_full_path
?: "(null)");
1330 for (int j
= 0; j
< i
; j
++)
1331 cgroup_remove_leaf(ops
->hierarchies
[j
], false);
1336 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000);
1339 return ret_set_errno(false, ERANGE
);
1341 ops
->monitor_cgroup
= move_ptr(monitor_cgroup
);
1342 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops
->monitor_cgroup
);
1346 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1347 * next cgroup_pattern-1, -2, ..., -999.
1349 __cgfsng_ops
static inline bool cgfsng_payload_create(struct cgroup_ops
*ops
,
1350 struct lxc_handler
*handler
)
1352 __do_free
char *container_cgroup
= NULL
;
1353 const char *cgroup_tree
;
1358 struct lxc_conf
*conf
;
1361 return ret_set_errno(false, ENOENT
);
1363 if (!ops
->hierarchies
)
1366 if (ops
->container_cgroup
)
1367 return ret_set_errno(false, EEXIST
);
1369 if (!handler
|| !handler
->conf
)
1370 return ret_set_errno(false, EINVAL
);
1372 conf
= handler
->conf
;
1373 cgroup_tree
= conf
->cgroup_meta
.dir
;
1376 container_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1377 DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1379 CGROUP_CREATE_RETRY
, NULL
);
1381 container_cgroup
= must_concat(&len
, DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1383 CGROUP_CREATE_RETRY
, NULL
);
1384 if (!container_cgroup
)
1385 return ret_set_errno(false, ENOMEM
);
1387 suffix
= container_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1391 sprintf(suffix
, "-%d", idx
);
1393 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1394 if (create_cgroup_tree(ops
->hierarchies
[i
], cgroup_tree
, container_cgroup
, true))
1397 ERROR("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->container_full_path
?: "(null)");
1398 for (int j
= 0; j
< i
; j
++)
1399 cgroup_remove_leaf(ops
->hierarchies
[j
], true);
1404 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000);
1407 return ret_set_errno(false, ERANGE
);
1409 if (ops
->unified
&& ops
->unified
->container_full_path
) {
1412 ret
= open(ops
->unified
->container_full_path
,
1413 O_DIRECTORY
| O_RDONLY
| O_CLOEXEC
);
1415 return log_error_errno(false,
1416 errno
, "Failed to open file descriptor for unified hierarchy");
1417 ops
->unified_fd
= ret
;
1420 ops
->container_cgroup
= move_ptr(container_cgroup
);
1421 INFO("The container process uses \"%s\" as cgroup", ops
->container_cgroup
);
1425 __cgfsng_ops
static bool cgfsng_monitor_enter(struct cgroup_ops
*ops
,
1426 struct lxc_handler
*handler
)
1428 int monitor_len
, transient_len
;
1429 char monitor
[INTTYPE_TO_STRLEN(pid_t
)],
1430 transient
[INTTYPE_TO_STRLEN(pid_t
)];
1433 return ret_set_errno(false, ENOENT
);
1435 if (!ops
->hierarchies
)
1438 if (!ops
->monitor_cgroup
)
1439 return ret_set_errno(false, ENOENT
);
1441 if (!handler
|| !handler
->conf
)
1442 return ret_set_errno(false, EINVAL
);
1444 monitor_len
= snprintf(monitor
, sizeof(monitor
), "%d", handler
->monitor_pid
);
1445 if (handler
->transient_pid
> 0)
1446 transient_len
= snprintf(transient
, sizeof(transient
), "%d",
1447 handler
->transient_pid
);
1449 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1450 __do_free
char *path
= NULL
;
1453 path
= must_make_path(ops
->hierarchies
[i
]->monitor_full_path
,
1454 "cgroup.procs", NULL
);
1455 ret
= lxc_writeat(-1, path
, monitor
, monitor_len
);
1457 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", path
);
1459 if (handler
->transient_pid
< 0)
1462 ret
= lxc_writeat(-1, path
, transient
, transient_len
);
1464 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", path
);
1466 handler
->transient_pid
= -1;
1471 __cgfsng_ops
static bool cgfsng_payload_enter(struct cgroup_ops
*ops
,
1472 struct lxc_handler
*handler
)
1475 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1478 return ret_set_errno(false, ENOENT
);
1480 if (!ops
->hierarchies
)
1483 if (!ops
->container_cgroup
)
1484 return ret_set_errno(false, ENOENT
);
1486 if (!handler
|| !handler
->conf
)
1487 return ret_set_errno(false, EINVAL
);
1489 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", handler
->pid
);
1491 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1492 __do_free
char *path
= NULL
;
1495 path
= must_make_path(ops
->hierarchies
[i
]->container_full_path
,
1496 "cgroup.procs", NULL
);
1497 ret
= lxc_writeat(-1, path
, pidstr
, len
);
1499 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", path
);
1505 static int chowmod(char *path
, uid_t chown_uid
, gid_t chown_gid
,
1510 ret
= chown(path
, chown_uid
, chown_gid
);
1512 SYSWARN("Failed to chown(%s, %d, %d)", path
, (int)chown_uid
, (int)chown_gid
);
1516 ret
= chmod(path
, chmod_mode
);
1518 SYSWARN("Failed to chmod(%s, %d)", path
, (int)chmod_mode
);
1525 /* chgrp the container cgroups to container group. We leave
1526 * the container owner as cgroup owner. So we must make the
1527 * directories 775 so that the container can create sub-cgroups.
1529 * Also chown the tasks and cgroup.procs files. Those may not
1530 * exist depending on kernel version.
1532 static int chown_cgroup_wrapper(void *data
)
1536 struct generic_userns_exec_data
*arg
= data
;
1537 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1538 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1540 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1542 return log_error_errno(-1, errno
,
1543 "Failed to setresgid(%d, %d, %d)",
1544 (int)nsgid
, (int)nsgid
, (int)nsgid
);
1546 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1548 return log_error_errno(-1, errno
,
1549 "Failed to setresuid(%d, %d, %d)",
1550 (int)nsuid
, (int)nsuid
, (int)nsuid
);
1552 ret
= setgroups(0, NULL
);
1553 if (ret
< 0 && errno
!= EPERM
)
1554 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
1556 destuid
= get_ns_uid(arg
->origuid
);
1557 if (destuid
== LXC_INVALID_UID
)
1560 for (int i
= 0; arg
->hierarchies
[i
]; i
++) {
1561 __do_free
char *fullpath
= NULL
;
1562 char *path
= arg
->hierarchies
[i
]->container_full_path
;
1564 ret
= chowmod(path
, destuid
, nsgid
, 0775);
1566 log_info_errno(continue,
1567 errno
, "Failed to change %s to uid %d and gid %d and mode 0755",
1568 path
, destuid
, nsgid
);
1570 /* Failures to chown() these are inconvenient but not
1571 * detrimental We leave these owned by the container launcher,
1572 * so that container root can write to the files to attach. We
1573 * chmod() them 664 so that container systemd can write to the
1574 * files (which systemd in wily insists on doing).
1577 if (arg
->hierarchies
[i
]->version
== CGROUP_SUPER_MAGIC
) {
1578 fullpath
= must_make_path(path
, "tasks", NULL
);
1579 ret
= chowmod(fullpath
, destuid
, nsgid
, 0664);
1581 SYSINFO("Failed to change %s to uid %d and gid %d and mode 0664",
1582 fullpath
, destuid
, nsgid
);
1585 fullpath
= must_make_path(path
, "cgroup.procs", NULL
);
1586 ret
= chowmod(fullpath
, destuid
, nsgid
, 0664);
1588 SYSINFO("Failed to change %s to uid %d and gid %d and mode 0664",
1589 fullpath
, destuid
, nsgid
);
1591 if (arg
->hierarchies
[i
]->version
!= CGROUP2_SUPER_MAGIC
)
1594 for (char **p
= arg
->hierarchies
[i
]->cgroup2_chown
; p
&& *p
; p
++) {
1595 fullpath
= must_make_path(path
, *p
, NULL
);
1596 ret
= chowmod(fullpath
, destuid
, nsgid
, 0664);
1598 SYSINFO("Failed to change %s to uid %d and gid %d and mode 0664",
1599 fullpath
, destuid
, nsgid
);
1606 __cgfsng_ops
static bool cgfsng_chown(struct cgroup_ops
*ops
,
1607 struct lxc_conf
*conf
)
1609 struct generic_userns_exec_data wrap
;
1612 return ret_set_errno(false, ENOENT
);
1614 if (!ops
->hierarchies
)
1617 if (!ops
->container_cgroup
)
1618 return ret_set_errno(false, ENOENT
);
1621 return ret_set_errno(false, EINVAL
);
1623 if (lxc_list_empty(&conf
->id_map
))
1626 wrap
.origuid
= geteuid();
1628 wrap
.hierarchies
= ops
->hierarchies
;
1631 if (userns_exec_1(conf
, chown_cgroup_wrapper
, &wrap
, "chown_cgroup_wrapper") < 0)
1632 return log_error_errno(false, errno
, "Error requesting cgroup chown in new user namespace");
1637 /* cgroup-full:* is done, no need to create subdirs */
1638 static bool cg_mount_needs_subdirs(int type
)
1640 if (type
>= LXC_AUTO_CGROUP_FULL_RO
)
1646 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1647 * remount controller ro if needed and bindmount the cgroupfs onto
1648 * control/the/cg/path.
1650 static int cg_legacy_mount_controllers(int type
, struct hierarchy
*h
,
1651 char *controllerpath
, char *cgpath
,
1652 const char *container_cgroup
)
1654 __do_free
char *sourcepath
= NULL
;
1655 int ret
, remount_flags
;
1656 int flags
= MS_BIND
;
1658 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_MIXED
) {
1659 ret
= mount(controllerpath
, controllerpath
, "cgroup", MS_BIND
, NULL
);
1661 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1662 controllerpath
, controllerpath
);
1666 remount_flags
= add_required_remount_flags(controllerpath
,
1668 flags
| MS_REMOUNT
);
1669 ret
= mount(controllerpath
, controllerpath
, "cgroup",
1670 remount_flags
| MS_REMOUNT
| MS_BIND
| MS_RDONLY
,
1673 SYSERROR("Failed to remount \"%s\" ro", controllerpath
);
1677 INFO("Remounted %s read-only", controllerpath
);
1680 sourcepath
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1681 container_cgroup
, NULL
);
1682 if (type
== LXC_AUTO_CGROUP_RO
)
1685 ret
= mount(sourcepath
, cgpath
, "cgroup", flags
, NULL
);
1687 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1690 INFO("Mounted \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1692 if (flags
& MS_RDONLY
) {
1693 remount_flags
= add_required_remount_flags(sourcepath
, cgpath
,
1694 flags
| MS_REMOUNT
);
1695 ret
= mount(sourcepath
, cgpath
, "cgroup", remount_flags
, NULL
);
1697 SYSERROR("Failed to remount \"%s\" ro", cgpath
);
1700 INFO("Remounted %s read-only", cgpath
);
1703 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath
);
1707 /* __cg_mount_direct
1709 * Mount cgroup hierarchies directly without using bind-mounts. The main
1710 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1711 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1713 static int __cg_mount_direct(int type
, struct hierarchy
*h
,
1714 const char *controllerpath
)
1716 __do_free
char *controllers
= NULL
;
1717 char *fstype
= "cgroup2";
1718 unsigned long flags
= 0;
1724 flags
|= MS_RELATIME
;
1726 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_FULL_RO
)
1729 if (h
->version
!= CGROUP2_SUPER_MAGIC
) {
1730 controllers
= lxc_string_join(",", (const char **)h
->controllers
, false);
1736 ret
= mount("cgroup", controllerpath
, fstype
, flags
, controllers
);
1738 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath
, fstype
);
1742 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath
, fstype
);
1746 static inline int cg_mount_in_cgroup_namespace(int type
, struct hierarchy
*h
,
1747 const char *controllerpath
)
1749 return __cg_mount_direct(type
, h
, controllerpath
);
1752 static inline int cg_mount_cgroup_full(int type
, struct hierarchy
*h
,
1753 const char *controllerpath
)
1755 if (type
< LXC_AUTO_CGROUP_FULL_RO
|| type
> LXC_AUTO_CGROUP_FULL_MIXED
)
1758 return __cg_mount_direct(type
, h
, controllerpath
);
1761 __cgfsng_ops
static bool cgfsng_mount(struct cgroup_ops
*ops
,
1762 struct lxc_handler
*handler
,
1763 const char *root
, int type
)
1765 __do_free
char *cgroup_root
= NULL
;
1767 bool has_cgns
= false, retval
= false, wants_force_mount
= false;
1770 return ret_set_errno(false, ENOENT
);
1772 if (!ops
->hierarchies
)
1775 if (!handler
|| !handler
->conf
)
1776 return ret_set_errno(false, EINVAL
);
1778 if ((type
& LXC_AUTO_CGROUP_MASK
) == 0)
1781 if (type
& LXC_AUTO_CGROUP_FORCE
) {
1782 type
&= ~LXC_AUTO_CGROUP_FORCE
;
1783 wants_force_mount
= true;
1786 if (!wants_force_mount
){
1787 if (!lxc_list_empty(&handler
->conf
->keepcaps
))
1788 wants_force_mount
= !in_caplist(CAP_SYS_ADMIN
, &handler
->conf
->keepcaps
);
1790 wants_force_mount
= in_caplist(CAP_SYS_ADMIN
, &handler
->conf
->caps
);
1793 has_cgns
= cgns_supported();
1794 if (has_cgns
&& !wants_force_mount
)
1797 if (type
== LXC_AUTO_CGROUP_NOSPEC
)
1798 type
= LXC_AUTO_CGROUP_MIXED
;
1799 else if (type
== LXC_AUTO_CGROUP_FULL_NOSPEC
)
1800 type
= LXC_AUTO_CGROUP_FULL_MIXED
;
1802 cgroup_root
= must_make_path(root
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
1803 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
1804 if (has_cgns
&& wants_force_mount
) {
1805 /* If cgroup namespaces are supported but the container
1806 * will not have CAP_SYS_ADMIN after it has started we
1807 * need to mount the cgroups manually.
1809 return cg_mount_in_cgroup_namespace(type
, ops
->unified
,
1813 return cg_mount_cgroup_full(type
, ops
->unified
, cgroup_root
) == 0;
1817 ret
= safe_mount(NULL
, cgroup_root
, "tmpfs",
1818 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
1819 "size=10240k,mode=755", root
);
1823 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1824 __do_free
char *controllerpath
= NULL
, *path2
= NULL
;
1825 struct hierarchy
*h
= ops
->hierarchies
[i
];
1826 char *controller
= strrchr(h
->mountpoint
, '/');
1832 controllerpath
= must_make_path(cgroup_root
, controller
, NULL
);
1833 if (dir_exists(controllerpath
))
1836 ret
= mkdir(controllerpath
, 0755);
1838 log_error_errno(goto on_error
, errno
,
1839 "Error creating cgroup path: %s",
1842 if (has_cgns
&& wants_force_mount
) {
1843 /* If cgroup namespaces are supported but the container
1844 * will not have CAP_SYS_ADMIN after it has started we
1845 * need to mount the cgroups manually.
1847 ret
= cg_mount_in_cgroup_namespace(type
, h
, controllerpath
);
1854 ret
= cg_mount_cgroup_full(type
, h
, controllerpath
);
1858 if (!cg_mount_needs_subdirs(type
))
1861 path2
= must_make_path(controllerpath
, h
->container_base_path
,
1862 ops
->container_cgroup
, NULL
);
1863 ret
= mkdir_p(path2
, 0755);
1867 ret
= cg_legacy_mount_controllers(type
, h
, controllerpath
,
1868 path2
, ops
->container_cgroup
);
1878 static int recursive_count_nrtasks(char *dirname
)
1880 __do_free
char *path
= NULL
;
1881 __do_closedir
DIR *dir
= NULL
;
1882 struct dirent
*direntp
;
1885 dir
= opendir(dirname
);
1889 while ((direntp
= readdir(dir
))) {
1892 if (!strcmp(direntp
->d_name
, ".") ||
1893 !strcmp(direntp
->d_name
, ".."))
1896 path
= must_make_path(dirname
, direntp
->d_name
, NULL
);
1898 if (lstat(path
, &mystat
))
1901 if (!S_ISDIR(mystat
.st_mode
))
1904 count
+= recursive_count_nrtasks(path
);
1907 path
= must_make_path(dirname
, "cgroup.procs", NULL
);
1908 ret
= lxc_count_file_lines(path
);
1915 __cgfsng_ops
static int cgfsng_nrtasks(struct cgroup_ops
*ops
)
1917 __do_free
char *path
= NULL
;
1920 return ret_set_errno(-1, ENOENT
);
1922 if (!ops
->container_cgroup
|| !ops
->hierarchies
)
1923 return ret_set_errno(-1, EINVAL
);
1925 path
= must_make_path(ops
->hierarchies
[0]->container_full_path
, NULL
);
1926 return recursive_count_nrtasks(path
);
1929 /* Only root needs to escape to the cgroup of its init. */
1930 __cgfsng_ops
static bool cgfsng_escape(const struct cgroup_ops
*ops
,
1931 struct lxc_conf
*conf
)
1934 return ret_set_errno(false, ENOENT
);
1936 if (!ops
->hierarchies
)
1940 return ret_set_errno(false, EINVAL
);
1942 if (conf
->cgroup_meta
.relative
|| geteuid())
1945 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1946 __do_free
char *fullpath
= NULL
;
1950 must_make_path(ops
->hierarchies
[i
]->mountpoint
,
1951 ops
->hierarchies
[i
]->container_base_path
,
1952 "cgroup.procs", NULL
);
1953 ret
= lxc_write_to_file(fullpath
, "0", 2, false, 0666);
1955 return log_error_errno(false,
1956 errno
, "Failed to escape to cgroup \"%s\"",
1963 __cgfsng_ops
static int cgfsng_num_hierarchies(struct cgroup_ops
*ops
)
1968 return ret_set_errno(-1, ENOENT
);
1970 if (!ops
->hierarchies
)
1973 for (; ops
->hierarchies
[i
]; i
++)
1979 __cgfsng_ops
static bool cgfsng_get_hierarchies(struct cgroup_ops
*ops
, int n
,
1985 return ret_set_errno(false, ENOENT
);
1987 if (!ops
->hierarchies
)
1990 /* sanity check n */
1991 for (i
= 0; i
< n
; i
++)
1992 if (!ops
->hierarchies
[i
])
1993 return ret_set_errno(false, ENOENT
);
1995 *out
= ops
->hierarchies
[i
]->controllers
;
2000 static bool cg_legacy_freeze(struct cgroup_ops
*ops
)
2002 struct hierarchy
*h
;
2004 h
= get_hierarchy(ops
, "freezer");
2006 return ret_set_errno(-1, ENOENT
);
2008 return lxc_write_openat(h
->container_full_path
, "freezer.state",
2009 "FROZEN", STRLITERALLEN("FROZEN"));
2012 static int freezer_cgroup_events_cb(int fd
, uint32_t events
, void *cbdata
,
2013 struct lxc_epoll_descr
*descr
)
2015 __do_close_prot_errno
int duped_fd
= -EBADF
;
2016 __do_free
char *line
= NULL
;
2017 __do_fclose
FILE *f
= NULL
;
2018 int state
= PTR_TO_INT(cbdata
);
2020 const char *state_string
;
2024 return LXC_MAINLOOP_ERROR
;
2026 if (lseek(duped_fd
, 0, SEEK_SET
) < (off_t
)-1)
2027 return LXC_MAINLOOP_ERROR
;
2029 f
= fdopen(duped_fd
, "re");
2031 return LXC_MAINLOOP_ERROR
;
2035 state_string
= "frozen 1";
2037 state_string
= "frozen 0";
2039 while (getline(&line
, &len
, f
) != -1)
2040 if (strncmp(line
, state_string
, STRLITERALLEN("frozen") + 2) == 0)
2041 return LXC_MAINLOOP_CLOSE
;
2043 return LXC_MAINLOOP_CONTINUE
;
2046 static int cg_unified_freeze(struct cgroup_ops
*ops
, int timeout
)
2048 __do_close_prot_errno
int fd
= -EBADF
;
2049 __do_lxc_mainloop_close
struct lxc_epoll_descr
*descr_ptr
= NULL
;
2051 struct lxc_epoll_descr descr
;
2052 struct hierarchy
*h
;
2056 return ret_set_errno(-1, ENOENT
);
2058 if (!h
->container_full_path
)
2059 return ret_set_errno(-1, EEXIST
);
2062 __do_free
char *events_file
= NULL
;
2064 events_file
= must_make_path(h
->container_full_path
, "cgroup.events", NULL
);
2065 fd
= open(events_file
, O_RDONLY
| O_CLOEXEC
);
2067 return log_error_errno(-1, errno
, "Failed to open cgroup.events file");
2069 ret
= lxc_mainloop_open(&descr
);
2071 return log_error_errno(-1, errno
, "Failed to create epoll instance to wait for container freeze");
2073 /* automatically cleaned up now */
2076 ret
= lxc_mainloop_add_handler(&descr
, fd
, freezer_cgroup_events_cb
, INT_TO_PTR((int){1}));
2078 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
2081 ret
= lxc_write_openat(h
->container_full_path
, "cgroup.freeze", "1", 1);
2083 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
2085 if (timeout
!= 0 && lxc_mainloop(&descr
, timeout
))
2086 return log_error_errno(-1, errno
, "Failed to wait for container to be frozen");
2091 __cgfsng_ops
static int cgfsng_freeze(struct cgroup_ops
*ops
, int timeout
)
2093 if (!ops
->hierarchies
)
2094 return ret_set_errno(-1, ENOENT
);
2096 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2097 return cg_legacy_freeze(ops
);
2099 return cg_unified_freeze(ops
, timeout
);
2102 static int cg_legacy_unfreeze(struct cgroup_ops
*ops
)
2104 struct hierarchy
*h
;
2106 h
= get_hierarchy(ops
, "freezer");
2108 return ret_set_errno(-1, ENOENT
);
2110 return lxc_write_openat(h
->container_full_path
, "freezer.state",
2111 "THAWED", STRLITERALLEN("THAWED"));
2114 static int cg_unified_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2116 __do_close_prot_errno
int fd
= -EBADF
;
2117 __do_lxc_mainloop_close
struct lxc_epoll_descr
*descr_ptr
= NULL
;
2119 struct lxc_epoll_descr descr
;
2120 struct hierarchy
*h
;
2124 return ret_set_errno(-1, ENOENT
);
2126 if (!h
->container_full_path
)
2127 return ret_set_errno(-1, EEXIST
);
2130 __do_free
char *events_file
= NULL
;
2132 events_file
= must_make_path(h
->container_full_path
, "cgroup.events", NULL
);
2133 fd
= open(events_file
, O_RDONLY
| O_CLOEXEC
);
2135 return log_error_errno(-1, errno
, "Failed to open cgroup.events file");
2137 ret
= lxc_mainloop_open(&descr
);
2139 return log_error_errno(-1, errno
, "Failed to create epoll instance to wait for container unfreeze");
2141 /* automatically cleaned up now */
2144 ret
= lxc_mainloop_add_handler(&descr
, fd
, freezer_cgroup_events_cb
, INT_TO_PTR((int){0}));
2146 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
2149 ret
= lxc_write_openat(h
->container_full_path
, "cgroup.freeze", "0", 1);
2151 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
2153 if (timeout
!= 0 && lxc_mainloop(&descr
, timeout
))
2154 return log_error_errno(-1, errno
, "Failed to wait for container to be unfrozen");
2159 __cgfsng_ops
static int cgfsng_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2161 if (!ops
->hierarchies
)
2162 return ret_set_errno(-1, ENOENT
);
2164 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2165 return cg_legacy_unfreeze(ops
);
2167 return cg_unified_unfreeze(ops
, timeout
);
2170 __cgfsng_ops
static const char *cgfsng_get_cgroup(struct cgroup_ops
*ops
,
2171 const char *controller
)
2173 struct hierarchy
*h
;
2175 h
= get_hierarchy(ops
, controller
);
2177 return log_warn_errno(NULL
,
2178 ENOENT
, "Failed to find hierarchy for controller \"%s\"",
2179 controller
? controller
: "(null)");
2181 return h
->container_full_path
2182 ? h
->container_full_path
+ strlen(h
->mountpoint
)
2186 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2187 * which must be freed by the caller.
2189 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy
*h
,
2191 const char *filename
)
2193 return must_make_path(h
->mountpoint
, inpath
, filename
, NULL
);
2196 static int cgroup_attach_leaf(int unified_fd
, int64_t pid
)
2200 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2201 char attach_cgroup
[STRLITERALLEN("lxc-1000/cgroup.procs") + 1];
2204 /* Create leaf cgroup. */
2205 ret
= mkdirat(unified_fd
, "lxc", 0755);
2206 if (ret
< 0 && errno
!= EEXIST
)
2207 return log_error_errno(-1, errno
, "Failed to create leaf cgroup \"lxc\"");
2209 pidstr_len
= sprintf(pidstr
, INT64_FMT
, pid
);
2210 ret
= lxc_writeat(unified_fd
, "lxc/cgroup.procs", pidstr
, pidstr_len
);
2212 ret
= lxc_writeat(unified_fd
, "cgroup.procs", pidstr
, pidstr_len
);
2216 /* this is a non-leaf node */
2218 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2223 sprintf(attach_cgroup
, "lxc-%d/cgroup.procs", idx
);
2224 slash
= &attach_cgroup
[ret
] - STRLITERALLEN("/cgroup.procs");
2227 ret
= mkdirat(unified_fd
, attach_cgroup
, 0755);
2228 if (ret
< 0 && errno
!= EEXIST
)
2229 return log_error_errno(-1, errno
, "Failed to create cgroup %s", attach_cgroup
);
2233 ret
= lxc_writeat(unified_fd
, attach_cgroup
, pidstr
, pidstr_len
);
2237 /* this is a non-leaf node */
2239 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2242 } while (idx
< 1000);
2244 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2247 int cgroup_attach(const char *name
, const char *lxcpath
, int64_t pid
)
2249 __do_close_prot_errno
int unified_fd
= -EBADF
;
2251 unified_fd
= lxc_cmd_get_cgroup2_fd(name
, lxcpath
);
2255 return cgroup_attach_leaf(unified_fd
, pid
);
2258 /* Technically, we're always at a delegation boundary here (This is especially
2259 * true when cgroup namespaces are available.). The reasoning is that in order
2260 * for us to have been able to start a container in the first place the root
2261 * cgroup must have been a leaf node. Now, either the container's init system
2262 * has populated the cgroup and kept it as a leaf node or it has created
2263 * subtrees. In the former case we will simply attach to the leaf node we
2264 * created when we started the container in the latter case we create our own
2265 * cgroup for the attaching process.
2267 static int __cg_unified_attach(const struct hierarchy
*h
, const char *name
,
2268 const char *lxcpath
, pid_t pid
,
2269 const char *controller
)
2271 __do_close_prot_errno
int unified_fd
= -EBADF
;
2274 ret
= cgroup_attach(name
, lxcpath
, pid
);
2276 __do_free
char *path
= NULL
, *cgroup
= NULL
;
2278 cgroup
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2283 path
= must_make_path(h
->mountpoint
, cgroup
, NULL
);
2284 unified_fd
= open(path
, O_DIRECTORY
| O_RDONLY
| O_CLOEXEC
);
2289 return cgroup_attach_leaf(unified_fd
, pid
);
2292 __cgfsng_ops
static bool cgfsng_attach(struct cgroup_ops
*ops
, const char *name
,
2293 const char *lxcpath
, pid_t pid
)
2296 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
2299 return ret_set_errno(false, ENOENT
);
2301 if (!ops
->hierarchies
)
2304 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", pid
);
2305 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
2308 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
2309 __do_free
char *fullpath
= NULL
, *path
= NULL
;
2310 struct hierarchy
*h
= ops
->hierarchies
[i
];
2312 if (h
->version
== CGROUP2_SUPER_MAGIC
) {
2313 ret
= __cg_unified_attach(h
, name
, lxcpath
, pid
,
2321 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, h
->controllers
[0]);
2326 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, "cgroup.procs");
2327 ret
= lxc_write_to_file(fullpath
, pidstr
, len
, false, 0666);
2329 return log_error_errno(false, errno
,
2330 "Failed to attach %d to %s",
2331 (int)pid
, fullpath
);
2337 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2338 * don't have a cgroup_data set up, so we ask the running container through the
2339 * commands API for the cgroup path.
2341 __cgfsng_ops
static int cgfsng_get(struct cgroup_ops
*ops
, const char *filename
,
2342 char *value
, size_t len
, const char *name
,
2343 const char *lxcpath
)
2345 __do_free
char *path
= NULL
;
2346 __do_free
char *controller
= NULL
;
2348 struct hierarchy
*h
;
2352 return ret_set_errno(-1, ENOENT
);
2354 controller
= must_copy_string(filename
);
2355 p
= strchr(controller
, '.');
2359 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2364 h
= get_hierarchy(ops
, controller
);
2366 __do_free
char *fullpath
= NULL
;
2368 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, filename
);
2369 ret
= lxc_read_from_file(fullpath
, value
, len
);
2375 static int device_cgroup_parse_access(struct device_item
*device
, const char *val
)
2377 for (int count
= 0; count
< 3; count
++, val
++) {
2380 device
->access
[count
] = *val
;
2383 device
->access
[count
] = *val
;
2386 device
->access
[count
] = *val
;
2393 return ret_errno(EINVAL
);
2400 static int device_cgroup_rule_parse(struct device_item
*device
, const char *key
,
2406 if (strcmp("devices.allow", key
) == 0)
2411 if (strcmp(val
, "a") == 0) {
2416 device
->global_rule
= device
->allow
2417 ? LXC_BPF_DEVICE_CGROUP_BLACKLIST
2418 : LXC_BPF_DEVICE_CGROUP_WHITELIST
;
2422 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2431 device
->type
= *val
;
2444 } else if (isdigit(*val
)) {
2445 memset(temp
, 0, sizeof(temp
));
2446 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2452 ret
= lxc_safe_int(temp
, &device
->major
);
2466 } else if (isdigit(*val
)) {
2467 memset(temp
, 0, sizeof(temp
));
2468 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2474 ret
= lxc_safe_int(temp
, &device
->minor
);
2483 return device_cgroup_parse_access(device
, ++val
);
2486 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2487 * don't have a cgroup_data set up, so we ask the running container through the
2488 * commands API for the cgroup path.
2490 __cgfsng_ops
static int cgfsng_set(struct cgroup_ops
*ops
,
2491 const char *key
, const char *value
,
2492 const char *name
, const char *lxcpath
)
2494 __do_free
char *path
= NULL
;
2495 __do_free
char *controller
= NULL
;
2497 struct hierarchy
*h
;
2501 return ret_set_errno(-1, ENOENT
);
2503 controller
= must_copy_string(key
);
2504 p
= strchr(controller
, '.');
2508 if (pure_unified_layout(ops
) && strcmp(controller
, "devices") == 0) {
2509 struct device_item device
= {0};
2511 ret
= device_cgroup_rule_parse(&device
, key
, value
);
2513 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s",
2516 ret
= lxc_cmd_add_bpf_device_cgroup(name
, lxcpath
, &device
);
2523 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2528 h
= get_hierarchy(ops
, controller
);
2530 __do_free
char *fullpath
= NULL
;
2532 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, key
);
2533 ret
= lxc_write_to_file(fullpath
, value
, strlen(value
), false, 0666);
2539 /* take devices cgroup line
2541 * and convert it to a valid
2542 * type major:minor mode
2543 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2546 static int device_cgroup_rule_parse_devpath(struct device_item
*device
,
2547 const char *devpath
)
2549 __do_free
char *path
= NULL
;
2555 path
= must_copy_string(devpath
);
2558 * Read path followed by mode. Ignore any trailing text.
2559 * A ' # comment' would be legal. Technically other text is not
2560 * legal, we could check for that if we cared to.
2562 for (n_parts
= 1, p
= path
; *p
; p
++) {
2578 return ret_set_errno(-1, EINVAL
);
2581 if (device_cgroup_parse_access(device
, mode
) < 0)
2585 return ret_set_errno(-1, EINVAL
);
2587 ret
= stat(path
, &sb
);
2589 return ret_set_errno(-1, errno
);
2591 mode_t m
= sb
.st_mode
& S_IFMT
;
2600 return log_error_errno(-1, EINVAL
,
2601 "Unsupported device type %i for \"%s\"",
2605 device
->major
= MAJOR(sb
.st_rdev
);
2606 device
->minor
= MINOR(sb
.st_rdev
);
2608 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2613 static int convert_devpath(const char *invalue
, char *dest
)
2615 struct device_item device
= {0};
2618 ret
= device_cgroup_rule_parse_devpath(&device
, invalue
);
2622 ret
= snprintf(dest
, 50, "%c %d:%d %s", device
.type
, device
.major
,
2623 device
.minor
, device
.access
);
2624 if (ret
< 0 || ret
>= 50)
2625 return log_error_errno(-1,
2626 ENAMETOOLONG
, "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2627 device
.type
, device
.major
, device
.minor
,
2633 /* Called from setup_limits - here we have the container's cgroup_data because
2634 * we created the cgroups.
2636 static int cg_legacy_set_data(struct cgroup_ops
*ops
, const char *filename
,
2639 __do_free
char *controller
= NULL
;
2641 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2642 char converted_value
[50];
2643 struct hierarchy
*h
;
2645 controller
= must_copy_string(filename
);
2646 p
= strchr(controller
, '.');
2650 if (strcmp("devices.allow", filename
) == 0 && value
[0] == '/') {
2653 ret
= convert_devpath(value
, converted_value
);
2656 value
= converted_value
;
2659 h
= get_hierarchy(ops
, controller
);
2661 ERROR("Failed to setup limits for the \"%s\" controller. "
2662 "The controller seems to be unused by \"cgfsng\" cgroup "
2663 "driver or not enabled on the cgroup hierarchy",
2669 return lxc_write_openat(h
->container_full_path
, filename
, value
, strlen(value
));
2672 __cgfsng_ops
static bool cgfsng_setup_limits_legacy(struct cgroup_ops
*ops
,
2673 struct lxc_conf
*conf
,
2676 __do_free
struct lxc_list
*sorted_cgroup_settings
= NULL
;
2677 struct lxc_list
*cgroup_settings
= &conf
->cgroup
;
2678 struct lxc_list
*iterator
, *next
;
2679 struct lxc_cgroup
*cg
;
2683 return ret_set_errno(false, ENOENT
);
2686 return ret_set_errno(false, EINVAL
);
2688 cgroup_settings
= &conf
->cgroup
;
2689 if (lxc_list_empty(cgroup_settings
))
2692 if (!ops
->hierarchies
)
2693 return ret_set_errno(false, EINVAL
);
2695 sorted_cgroup_settings
= sort_cgroup_settings(cgroup_settings
);
2696 if (!sorted_cgroup_settings
)
2699 lxc_list_for_each(iterator
, sorted_cgroup_settings
) {
2700 cg
= iterator
->elem
;
2702 if (do_devices
== !strncmp("devices", cg
->subsystem
, 7)) {
2703 if (cg_legacy_set_data(ops
, cg
->subsystem
, cg
->value
)) {
2704 if (do_devices
&& (errno
== EACCES
|| errno
== EPERM
))
2705 log_warn_errno(continue,
2706 errno
, "Failed to set \"%s\" to \"%s\"",
2707 cg
->subsystem
, cg
->value
);
2708 log_warn_errno(goto out
, errno
,
2709 "Failed to set \"%s\" to \"%s\"",
2710 cg
->subsystem
, cg
->value
);
2712 DEBUG("Set controller \"%s\" set to \"%s\"",
2713 cg
->subsystem
, cg
->value
);
2718 INFO("Limits for the legacy cgroup hierarchies have been setup");
2720 lxc_list_for_each_safe(iterator
, sorted_cgroup_settings
, next
) {
2721 lxc_list_del(iterator
);
2729 * Some of the parsing logic comes from the original cgroup device v1
2730 * implementation in the kernel.
2732 static int bpf_device_cgroup_prepare(struct cgroup_ops
*ops
,
2733 struct lxc_conf
*conf
, const char *key
,
2736 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2737 struct device_item device_item
= {0};
2740 if (strcmp("devices.allow", key
) == 0 && *val
== '/')
2741 ret
= device_cgroup_rule_parse_devpath(&device_item
, val
);
2743 ret
= device_cgroup_rule_parse(&device_item
, key
, val
);
2745 return log_error_errno(-1, EINVAL
,
2746 "Failed to parse device string %s=%s",
2749 ret
= bpf_list_add_device(conf
, &device_item
);
2756 __cgfsng_ops
static bool cgfsng_setup_limits(struct cgroup_ops
*ops
,
2757 struct lxc_handler
*handler
)
2759 struct lxc_list
*cgroup_settings
, *iterator
;
2760 struct hierarchy
*h
;
2761 struct lxc_conf
*conf
;
2764 return ret_set_errno(false, ENOENT
);
2766 if (!ops
->hierarchies
)
2769 if (!ops
->container_cgroup
)
2770 return ret_set_errno(false, EINVAL
);
2772 if (!handler
|| !handler
->conf
)
2773 return ret_set_errno(false, EINVAL
);
2774 conf
= handler
->conf
;
2776 if (lxc_list_empty(&conf
->cgroup2
))
2778 cgroup_settings
= &conf
->cgroup2
;
2784 lxc_list_for_each (iterator
, cgroup_settings
) {
2785 struct lxc_cgroup
*cg
= iterator
->elem
;
2788 if (strncmp("devices", cg
->subsystem
, 7) == 0) {
2789 ret
= bpf_device_cgroup_prepare(ops
, conf
, cg
->subsystem
,
2792 ret
= lxc_write_openat(h
->container_full_path
,
2793 cg
->subsystem
, cg
->value
,
2796 return log_error_errno(false,
2797 errno
, "Failed to set \"%s\" to \"%s\"",
2798 cg
->subsystem
, cg
->value
);
2800 TRACE("Set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2803 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
2806 __cgfsng_ops
bool cgfsng_devices_activate(struct cgroup_ops
*ops
,
2807 struct lxc_handler
*handler
)
2809 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2810 __do_bpf_program_free
struct bpf_program
*devices
= NULL
;
2812 struct lxc_conf
*conf
;
2813 struct hierarchy
*unified
;
2814 struct lxc_list
*it
;
2815 struct bpf_program
*devices_old
;
2818 return ret_set_errno(false, ENOENT
);
2820 if (!ops
->hierarchies
)
2823 if (!ops
->container_cgroup
)
2824 return ret_set_errno(false, EEXIST
);
2826 if (!handler
|| !handler
->conf
)
2827 return ret_set_errno(false, EINVAL
);
2828 conf
= handler
->conf
;
2830 unified
= ops
->unified
;
2831 if (!unified
|| !unified
->bpf_device_controller
||
2832 !unified
->container_full_path
|| lxc_list_empty(&conf
->devices
))
2835 devices
= bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE
);
2837 return log_error_errno(false, ENOMEM
,
2838 "Failed to create new bpf program");
2840 ret
= bpf_program_init(devices
);
2842 return log_error_errno(false, ENOMEM
,
2843 "Failed to initialize bpf program");
2845 lxc_list_for_each(it
, &conf
->devices
) {
2846 struct device_item
*cur
= it
->elem
;
2848 ret
= bpf_program_append_device(devices
, cur
);
2850 return log_error_errno(false,
2851 ENOMEM
, "Failed to add new rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
2852 cur
->type
, cur
->major
,
2853 cur
->minor
, cur
->access
,
2854 cur
->allow
, cur
->global_rule
);
2855 TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
2856 cur
->type
, cur
->major
, cur
->minor
, cur
->access
,
2857 cur
->allow
, cur
->global_rule
);
2860 ret
= bpf_program_finalize(devices
);
2862 return log_error_errno(false, ENOMEM
,
2863 "Failed to finalize bpf program");
2865 ret
= bpf_program_cgroup_attach(devices
, BPF_CGROUP_DEVICE
,
2866 unified
->container_full_path
,
2869 return log_error_errno(false, ENOMEM
,
2870 "Failed to attach bpf program");
2872 /* Replace old bpf program. */
2873 devices_old
= move_ptr(conf
->cgroup2_devices
);
2874 conf
->cgroup2_devices
= move_ptr(devices
);
2875 devices
= move_ptr(devices_old
);
2880 bool __cgfsng_delegate_controllers(struct cgroup_ops
*ops
, const char *cgroup
)
2882 __do_free
char *add_controllers
= NULL
, *base_path
= NULL
;
2883 struct hierarchy
*unified
= ops
->unified
;
2886 size_t full_len
= 0;
2887 char **parts
= NULL
;
2890 if (!ops
->hierarchies
|| !pure_unified_layout(ops
) ||
2891 !unified
->controllers
[0])
2894 /* For now we simply enable all controllers that we have detected by
2895 * creating a string like "+memory +pids +cpu +io".
2896 * TODO: In the near future we might want to support "-<controller>"
2897 * etc. but whether supporting semantics like this make sense will need
2900 for (it
= unified
->controllers
; it
&& *it
; it
++) {
2901 full_len
+= strlen(*it
) + 2;
2902 add_controllers
= must_realloc(add_controllers
, full_len
+ 1);
2904 if (unified
->controllers
[0] == *it
)
2905 add_controllers
[0] = '\0';
2907 (void)strlcat(add_controllers
, "+", full_len
+ 1);
2908 (void)strlcat(add_controllers
, *it
, full_len
+ 1);
2910 if ((it
+ 1) && *(it
+ 1))
2911 (void)strlcat(add_controllers
, " ", full_len
+ 1);
2914 parts
= lxc_string_split(cgroup
, '/');
2918 parts_len
= lxc_array_len((void **)parts
);
2922 base_path
= must_make_path(unified
->mountpoint
, unified
->container_base_path
, NULL
);
2923 for (ssize_t i
= -1; i
< parts_len
; i
++) {
2925 __do_free
char *target
= NULL
;
2928 base_path
= must_append_path(base_path
, parts
[i
], NULL
);
2929 target
= must_make_path(base_path
, "cgroup.subtree_control", NULL
);
2930 ret
= lxc_writeat(-1, target
, add_controllers
, full_len
);
2932 log_error_errno(goto on_error
,
2933 errno
, "Could not enable \"%s\" controllers in the unified cgroup \"%s\"",
2934 add_controllers
, target
);
2935 TRACE("Enable \"%s\" controllers in the unified cgroup \"%s\"", add_controllers
, target
);
2941 lxc_free_array((void **)parts
, free
);
2945 __cgfsng_ops
bool cgfsng_monitor_delegate_controllers(struct cgroup_ops
*ops
)
2948 return ret_set_errno(false, ENOENT
);
2950 return __cgfsng_delegate_controllers(ops
, ops
->monitor_cgroup
);
2953 __cgfsng_ops
bool cgfsng_payload_delegate_controllers(struct cgroup_ops
*ops
)
2956 return ret_set_errno(false, ENOENT
);
2958 return __cgfsng_delegate_controllers(ops
, ops
->container_cgroup
);
2961 static bool cgroup_use_wants_controllers(const struct cgroup_ops
*ops
,
2964 if (!ops
->cgroup_use
)
2967 for (char **cur_ctrl
= controllers
; cur_ctrl
&& *cur_ctrl
; cur_ctrl
++) {
2970 for (char **cur_use
= ops
->cgroup_use
; cur_use
&& *cur_use
; cur_use
++) {
2971 if (strcmp(*cur_use
, *cur_ctrl
) != 0)
2987 static void cg_unified_delegate(char ***delegate
)
2989 __do_free
char *buf
= NULL
;
2990 char *standard
[] = {"cgroup.subtree_control", "cgroup.threads", NULL
};
2994 buf
= read_file("/sys/kernel/cgroup/delegate");
2996 for (char **p
= standard
; p
&& *p
; p
++) {
2997 idx
= append_null_to_list((void ***)delegate
);
2998 (*delegate
)[idx
] = must_copy_string(*p
);
3000 log_warn_errno(return, errno
, "Failed to read /sys/kernel/cgroup/delegate");
3003 lxc_iterate_parts (token
, buf
, " \t\n") {
3005 * We always need to chown this for both cgroup and
3008 if (strcmp(token
, "cgroup.procs") == 0)
3011 idx
= append_null_to_list((void ***)delegate
);
3012 (*delegate
)[idx
] = must_copy_string(token
);
3016 /* At startup, parse_hierarchies finds all the info we need about cgroup
3017 * mountpoints and current cgroups, and stores it in @d.
3019 static int cg_hybrid_init(struct cgroup_ops
*ops
, bool relative
, bool unprivileged
)
3021 __do_free
char *basecginfo
= NULL
;
3022 __do_free
char *line
= NULL
;
3023 __do_fclose
FILE *f
= NULL
;
3026 char **klist
= NULL
, **nlist
= NULL
;
3028 /* Root spawned containers escape the current cgroup, so use init's
3029 * cgroups as our base in that case.
3031 if (!relative
&& (geteuid() == 0))
3032 basecginfo
= read_file("/proc/1/cgroup");
3034 basecginfo
= read_file("/proc/self/cgroup");
3036 return ret_set_errno(-1, ENOMEM
);
3038 ret
= get_existing_subsystems(&klist
, &nlist
);
3040 return log_error_errno(-1, errno
, "Failed to retrieve available legacy cgroup controllers");
3042 f
= fopen("/proc/self/mountinfo", "r");
3044 return log_error_errno(-1, errno
, "Failed to open \"/proc/self/mountinfo\"");
3046 lxc_cgfsng_print_basecg_debuginfo(basecginfo
, klist
, nlist
);
3048 while (getline(&line
, &len
, f
) != -1) {
3051 struct hierarchy
*new;
3052 char *base_cgroup
= NULL
, *mountpoint
= NULL
;
3053 char **controller_list
= NULL
;
3055 type
= get_cgroup_version(line
);
3059 if (type
== CGROUP2_SUPER_MAGIC
&& ops
->unified
)
3062 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNKNOWN
) {
3063 if (type
== CGROUP2_SUPER_MAGIC
)
3064 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3065 else if (type
== CGROUP_SUPER_MAGIC
)
3066 ops
->cgroup_layout
= CGROUP_LAYOUT_LEGACY
;
3067 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
3068 if (type
== CGROUP_SUPER_MAGIC
)
3069 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3070 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_LEGACY
) {
3071 if (type
== CGROUP2_SUPER_MAGIC
)
3072 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3075 controller_list
= cg_hybrid_get_controllers(klist
, nlist
, line
, type
);
3076 if (!controller_list
&& type
== CGROUP_SUPER_MAGIC
)
3079 if (type
== CGROUP_SUPER_MAGIC
)
3080 if (controller_list_is_dup(ops
->hierarchies
, controller_list
))
3081 log_trace_errno(goto next
, EEXIST
, "Skipping duplicating controller");
3083 mountpoint
= cg_hybrid_get_mountpoint(line
);
3085 log_error_errno(goto next
, EINVAL
, "Failed parsing mountpoint from \"%s\"", line
);
3087 if (type
== CGROUP_SUPER_MAGIC
)
3088 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, controller_list
[0], CGROUP_SUPER_MAGIC
);
3090 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, NULL
, CGROUP2_SUPER_MAGIC
);
3092 log_error_errno(goto next
, EINVAL
, "Failed to find current cgroup");
3095 prune_init_scope(base_cgroup
);
3096 if (type
== CGROUP2_SUPER_MAGIC
)
3097 writeable
= test_writeable_v2(mountpoint
, base_cgroup
);
3099 writeable
= test_writeable_v1(mountpoint
, base_cgroup
);
3101 log_trace_errno(goto next
, EROFS
, "The %s group is not writeable", base_cgroup
);
3103 if (type
== CGROUP2_SUPER_MAGIC
) {
3104 char *cgv2_ctrl_path
;
3106 cgv2_ctrl_path
= must_make_path(mountpoint
, base_cgroup
,
3107 "cgroup.controllers",
3110 controller_list
= cg_unified_get_controllers(cgv2_ctrl_path
);
3111 free(cgv2_ctrl_path
);
3112 if (!controller_list
) {
3113 controller_list
= cg_unified_make_empty_controller();
3114 TRACE("No controllers are enabled for "
3115 "delegation in the unified hierarchy");
3119 /* Exclude all controllers that cgroup use does not want. */
3120 if (!cgroup_use_wants_controllers(ops
, controller_list
))
3121 log_trace_errno(goto next
, EINVAL
, "Skipping controller");
3123 new = add_hierarchy(&ops
->hierarchies
, controller_list
, mountpoint
, base_cgroup
, type
);
3124 if (type
== CGROUP2_SUPER_MAGIC
&& !ops
->unified
) {
3126 cg_unified_delegate(&new->cgroup2_chown
);
3133 free_string_list(controller_list
);
3138 free_string_list(klist
);
3139 free_string_list(nlist
);
3141 TRACE("Writable cgroup hierarchies:");
3142 lxc_cgfsng_print_hierarchies(ops
);
3144 /* verify that all controllers in cgroup.use and all crucial
3145 * controllers are accounted for
3147 if (!all_controllers_found(ops
))
3148 return log_error_errno(-1, ENOENT
, "Failed to find all required controllers");
3153 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
3154 static char *cg_unified_get_current_cgroup(bool relative
)
3156 __do_free
char *basecginfo
= NULL
;
3160 if (!relative
&& (geteuid() == 0))
3161 basecginfo
= read_file("/proc/1/cgroup");
3163 basecginfo
= read_file("/proc/self/cgroup");
3167 base_cgroup
= strstr(basecginfo
, "0::/");
3169 goto cleanup_on_err
;
3171 base_cgroup
= base_cgroup
+ 3;
3172 copy
= copy_to_eol(base_cgroup
);
3174 goto cleanup_on_err
;
3183 static int cg_unified_init(struct cgroup_ops
*ops
, bool relative
,
3186 __do_free
char *subtree_path
= NULL
;
3190 struct hierarchy
*new;
3191 char *base_cgroup
= NULL
;
3193 ret
= unified_cgroup_hierarchy();
3194 if (ret
== -ENOMEDIUM
)
3195 return ret_errno(ENOMEDIUM
);
3197 if (ret
!= CGROUP2_SUPER_MAGIC
)
3200 base_cgroup
= cg_unified_get_current_cgroup(relative
);
3202 return ret_errno(EINVAL
);
3204 prune_init_scope(base_cgroup
);
3207 * We assume that the cgroup we're currently in has been delegated to
3208 * us and we are free to further delege all of the controllers listed
3209 * in cgroup.controllers further down the hierarchy.
3211 mountpoint
= must_copy_string(DEFAULT_CGROUP_MOUNTPOINT
);
3212 subtree_path
= must_make_path(mountpoint
, base_cgroup
, "cgroup.controllers", NULL
);
3213 delegatable
= cg_unified_get_controllers(subtree_path
);
3215 delegatable
= cg_unified_make_empty_controller();
3216 if (!delegatable
[0])
3217 TRACE("No controllers are enabled for delegation");
3219 /* TODO: If the user requested specific controllers via lxc.cgroup.use
3220 * we should verify here. The reason I'm not doing it right is that I'm
3221 * not convinced that lxc.cgroup.use will be the future since it is a
3222 * global property. I much rather have an option that lets you request
3223 * controllers per container.
3226 new = add_hierarchy(&ops
->hierarchies
, delegatable
, mountpoint
, base_cgroup
, CGROUP2_SUPER_MAGIC
);
3228 cg_unified_delegate(&new->cgroup2_chown
);
3230 if (bpf_devices_cgroup_supported())
3231 new->bpf_device_controller
= 1;
3233 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3235 return CGROUP2_SUPER_MAGIC
;
3238 static int cg_init(struct cgroup_ops
*ops
, struct lxc_conf
*conf
)
3242 bool relative
= conf
->cgroup_meta
.relative
;
3244 tmp
= lxc_global_config_value("lxc.cgroup.use");
3246 __do_free
char *pin
= NULL
;
3249 pin
= must_copy_string(tmp
);
3252 lxc_iterate_parts(cur
, chop
, ",")
3253 must_append_string(&ops
->cgroup_use
, cur
);
3256 ret
= cg_unified_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3260 if (ret
== CGROUP2_SUPER_MAGIC
)
3263 return cg_hybrid_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3266 __cgfsng_ops
static int cgfsng_data_init(struct cgroup_ops
*ops
)
3268 const char *cgroup_pattern
;
3271 return ret_set_errno(-1, ENOENT
);
3273 /* copy system-wide cgroup information */
3274 cgroup_pattern
= lxc_global_config_value("lxc.cgroup.pattern");
3275 if (!cgroup_pattern
) {
3276 /* lxc.cgroup.pattern is only NULL on error. */
3277 ERROR("Failed to retrieve cgroup pattern");
3278 return ret_set_errno(-1, ENOMEM
);
3280 ops
->cgroup_pattern
= must_copy_string(cgroup_pattern
);
3285 struct cgroup_ops
*cgfsng_ops_init(struct lxc_conf
*conf
)
3287 __do_free
struct cgroup_ops
*cgfsng_ops
= NULL
;
3289 cgfsng_ops
= malloc(sizeof(struct cgroup_ops
));
3291 return ret_set_errno(NULL
, ENOMEM
);
3293 memset(cgfsng_ops
, 0, sizeof(struct cgroup_ops
));
3294 cgfsng_ops
->cgroup_layout
= CGROUP_LAYOUT_UNKNOWN
;
3296 if (cg_init(cgfsng_ops
, conf
))
3299 cgfsng_ops
->unified_fd
= -EBADF
;
3301 cgfsng_ops
->data_init
= cgfsng_data_init
;
3302 cgfsng_ops
->payload_destroy
= cgfsng_payload_destroy
;
3303 cgfsng_ops
->monitor_destroy
= cgfsng_monitor_destroy
;
3304 cgfsng_ops
->monitor_create
= cgfsng_monitor_create
;
3305 cgfsng_ops
->monitor_enter
= cgfsng_monitor_enter
;
3306 cgfsng_ops
->monitor_delegate_controllers
= cgfsng_monitor_delegate_controllers
;
3307 cgfsng_ops
->payload_delegate_controllers
= cgfsng_payload_delegate_controllers
;
3308 cgfsng_ops
->payload_create
= cgfsng_payload_create
;
3309 cgfsng_ops
->payload_enter
= cgfsng_payload_enter
;
3310 cgfsng_ops
->escape
= cgfsng_escape
;
3311 cgfsng_ops
->num_hierarchies
= cgfsng_num_hierarchies
;
3312 cgfsng_ops
->get_hierarchies
= cgfsng_get_hierarchies
;
3313 cgfsng_ops
->get_cgroup
= cgfsng_get_cgroup
;
3314 cgfsng_ops
->get
= cgfsng_get
;
3315 cgfsng_ops
->set
= cgfsng_set
;
3316 cgfsng_ops
->freeze
= cgfsng_freeze
;
3317 cgfsng_ops
->unfreeze
= cgfsng_unfreeze
;
3318 cgfsng_ops
->setup_limits_legacy
= cgfsng_setup_limits_legacy
;
3319 cgfsng_ops
->setup_limits
= cgfsng_setup_limits
;
3320 cgfsng_ops
->driver
= "cgfsng";
3321 cgfsng_ops
->version
= "1.0.0";
3322 cgfsng_ops
->attach
= cgfsng_attach
;
3323 cgfsng_ops
->chown
= cgfsng_chown
;
3324 cgfsng_ops
->mount
= cgfsng_mount
;
3325 cgfsng_ops
->nrtasks
= cgfsng_nrtasks
;
3326 cgfsng_ops
->devices_activate
= cgfsng_devices_activate
;
3328 return move_ptr(cgfsng_ops
);