1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
30 #include <sys/types.h>
35 #include "cgroup2_devices.h"
36 #include "cgroup_utils.h"
43 #include "memory_utils.h"
44 #include "storage/storage.h"
48 #include "include/strlcpy.h"
52 #include "include/strlcat.h"
55 lxc_log_define(cgfsng
, cgroup
);
57 /* Given a pointer to a null-terminated array of pointers, realloc to add one
58 * entry, and point the new entry to NULL. Do not fail. Return the index to the
59 * second-to-last entry - that is, the one which is now available for use
60 * (keeping the list null-terminated).
62 static int append_null_to_list(void ***list
)
67 for (; (*list
)[newentry
]; newentry
++)
70 *list
= must_realloc(*list
, (newentry
+ 2) * sizeof(void **));
71 (*list
)[newentry
+ 1] = NULL
;
75 /* Given a null-terminated array of strings, check whether @entry is one of the
78 static bool string_in_list(char **list
, const char *entry
)
83 for (int i
= 0; list
[i
]; i
++)
84 if (strcmp(list
[i
], entry
) == 0)
90 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
91 * "name=systemd". Do not fail.
93 static char *cg_legacy_must_prefix_named(char *entry
)
99 prefixed
= must_realloc(NULL
, len
+ 6);
101 memcpy(prefixed
, "name=", STRLITERALLEN("name="));
102 memcpy(prefixed
+ STRLITERALLEN("name="), entry
, len
);
103 prefixed
[len
+ 5] = '\0';
108 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
111 * We also handle named subsystems here. Any controller which is not a kernel
112 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
113 * we refuse to use because we're not sure which we have here.
114 * (TODO: We could work around this in some cases by just remounting to be
115 * unambiguous, or by comparing mountpoint contents with current cgroup.)
117 * The last entry will always be NULL.
119 static void must_append_controller(char **klist
, char **nlist
, char ***clist
,
125 if (string_in_list(klist
, entry
) && string_in_list(nlist
, entry
)) {
126 ERROR("Refusing to use ambiguous controller \"%s\"", entry
);
127 ERROR("It is both a named and kernel subsystem");
131 newentry
= append_null_to_list((void ***)clist
);
133 if (strncmp(entry
, "name=", 5) == 0)
134 copy
= must_copy_string(entry
);
135 else if (string_in_list(klist
, entry
))
136 copy
= must_copy_string(entry
);
138 copy
= cg_legacy_must_prefix_named(entry
);
140 (*clist
)[newentry
] = copy
;
143 /* Given a handler's cgroup data, return the struct hierarchy for the controller
144 * @c, or NULL if there is none.
146 struct hierarchy
*get_hierarchy(struct cgroup_ops
*ops
, const char *controller
)
148 if (!ops
->hierarchies
)
149 return log_trace_errno(NULL
, errno
, "There are no useable cgroup controllers");
151 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
153 /* This is the empty unified hierarchy. */
154 if (ops
->hierarchies
[i
]->controllers
&&
155 !ops
->hierarchies
[i
]->controllers
[0])
156 return ops
->hierarchies
[i
];
158 } else if (pure_unified_layout(ops
) &&
159 strcmp(controller
, "devices") == 0) {
160 if (ops
->unified
->bpf_device_controller
)
165 if (string_in_list(ops
->hierarchies
[i
]->controllers
, controller
))
166 return ops
->hierarchies
[i
];
170 WARN("There is no useable %s controller", controller
);
172 WARN("There is no empty unified cgroup hierarchy");
174 return ret_set_errno(NULL
, ENOENT
);
177 #define BATCH_SIZE 50
178 static void batch_realloc(char **mem
, size_t oldlen
, size_t newlen
)
180 int newbatches
= (newlen
/ BATCH_SIZE
) + 1;
181 int oldbatches
= (oldlen
/ BATCH_SIZE
) + 1;
183 if (!*mem
|| newbatches
> oldbatches
)
184 *mem
= must_realloc(*mem
, newbatches
* BATCH_SIZE
);
187 static void append_line(char **dest
, size_t oldlen
, char *new, size_t newlen
)
189 size_t full
= oldlen
+ newlen
;
191 batch_realloc(dest
, oldlen
, full
+ 1);
193 memcpy(*dest
+ oldlen
, new, newlen
+ 1);
196 /* Slurp in a whole file */
197 static char *read_file(const char *fnam
)
199 __do_free
char *buf
= NULL
, *line
= NULL
;
200 __do_fclose
FILE *f
= NULL
;
201 size_t len
= 0, fulllen
= 0;
204 f
= fopen(fnam
, "re");
208 while ((linelen
= getline(&line
, &len
, f
)) != -1) {
209 append_line(&buf
, fulllen
, line
, linelen
);
213 return move_ptr(buf
);
216 /* Taken over modified from the kernel sources. */
217 #define NBITS 32 /* bits in uint32_t */
218 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
219 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
221 static void set_bit(unsigned bit
, uint32_t *bitarr
)
223 bitarr
[bit
/ NBITS
] |= (1 << (bit
% NBITS
));
226 static void clear_bit(unsigned bit
, uint32_t *bitarr
)
228 bitarr
[bit
/ NBITS
] &= ~(1 << (bit
% NBITS
));
231 static bool is_set(unsigned bit
, uint32_t *bitarr
)
233 return (bitarr
[bit
/ NBITS
] & (1 << (bit
% NBITS
))) != 0;
236 /* Create cpumask from cpulist aka turn:
244 static uint32_t *lxc_cpumask(char *buf
, size_t nbits
)
246 __do_free
uint32_t *bitarr
= NULL
;
250 arrlen
= BITS_TO_LONGS(nbits
);
251 bitarr
= calloc(arrlen
, sizeof(uint32_t));
253 return ret_set_errno(NULL
, ENOMEM
);
255 lxc_iterate_parts(token
, buf
, ",") {
260 start
= strtoul(token
, NULL
, 0);
262 range
= strchr(token
, '-');
264 end
= strtoul(range
+ 1, NULL
, 0);
267 return ret_set_errno(NULL
, EINVAL
);
270 return ret_set_errno(NULL
, EINVAL
);
273 set_bit(start
++, bitarr
);
276 return move_ptr(bitarr
);
279 /* Turn cpumask into simple, comma-separated cpulist. */
280 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr
, size_t nbits
)
282 __do_free_string_list
char **cpulist
= NULL
;
283 char numstr
[INTTYPE_TO_STRLEN(size_t)] = {0};
286 for (size_t i
= 0; i
<= nbits
; i
++) {
287 if (!is_set(i
, bitarr
))
290 ret
= snprintf(numstr
, sizeof(numstr
), "%zu", i
);
291 if (ret
< 0 || (size_t)ret
>= sizeof(numstr
))
294 ret
= lxc_append_string(&cpulist
, numstr
);
296 return ret_set_errno(NULL
, ENOMEM
);
300 return ret_set_errno(NULL
, ENOMEM
);
302 return lxc_string_join(",", (const char **)cpulist
, false);
305 static ssize_t
get_max_cpus(char *cpulist
)
308 char *maxcpus
= cpulist
;
311 c1
= strrchr(maxcpus
, ',');
315 c2
= strrchr(maxcpus
, '-');
329 cpus
= strtoul(c1
, NULL
, 0);
336 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
337 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
338 static bool cg_legacy_filter_and_set_cpus(const char *parent_cgroup
,
339 char *child_cgroup
, bool am_initialized
)
341 __do_free
char *cpulist
= NULL
, *fpath
= NULL
, *isolcpus
= NULL
,
342 *offlinecpus
= NULL
, *posscpus
= NULL
;
343 __do_free
uint32_t *isolmask
= NULL
, *offlinemask
= NULL
,
347 ssize_t maxisol
= 0, maxoffline
= 0, maxposs
= 0;
348 bool flipped_bit
= false;
350 fpath
= must_make_path(parent_cgroup
, "cpuset.cpus", NULL
);
351 posscpus
= read_file(fpath
);
353 return log_error_errno(false, errno
, "Failed to read file \"%s\"", fpath
);
355 /* Get maximum number of cpus found in possible cpuset. */
356 maxposs
= get_max_cpus(posscpus
);
357 if (maxposs
< 0 || maxposs
>= INT_MAX
- 1)
360 if (file_exists(__ISOL_CPUS
)) {
361 isolcpus
= read_file(__ISOL_CPUS
);
363 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __ISOL_CPUS
);
365 if (isdigit(isolcpus
[0])) {
366 /* Get maximum number of cpus found in isolated cpuset. */
367 maxisol
= get_max_cpus(isolcpus
);
368 if (maxisol
< 0 || maxisol
>= INT_MAX
- 1)
372 if (maxposs
< maxisol
)
376 TRACE("The path \""__ISOL_CPUS
"\" to read isolated cpus from does not exist");
379 if (file_exists(__OFFLINE_CPUS
)) {
380 offlinecpus
= read_file(__OFFLINE_CPUS
);
382 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __OFFLINE_CPUS
);
384 if (isdigit(offlinecpus
[0])) {
385 /* Get maximum number of cpus found in offline cpuset. */
386 maxoffline
= get_max_cpus(offlinecpus
);
387 if (maxoffline
< 0 || maxoffline
>= INT_MAX
- 1)
391 if (maxposs
< maxoffline
)
392 maxposs
= maxoffline
;
395 TRACE("The path \""__OFFLINE_CPUS
"\" to read offline cpus from does not exist");
398 if ((maxisol
== 0) && (maxoffline
== 0)) {
399 cpulist
= move_ptr(posscpus
);
403 possmask
= lxc_cpumask(posscpus
, maxposs
);
405 return log_error_errno(false, errno
, "Failed to create cpumask for possible cpus");
408 isolmask
= lxc_cpumask(isolcpus
, maxposs
);
410 return log_error_errno(false, errno
, "Failed to create cpumask for isolated cpus");
413 if (maxoffline
> 0) {
414 offlinemask
= lxc_cpumask(offlinecpus
, maxposs
);
416 return log_error_errno(false, errno
, "Failed to create cpumask for offline cpus");
419 for (i
= 0; i
<= maxposs
; i
++) {
420 if ((isolmask
&& !is_set(i
, isolmask
)) ||
421 (offlinemask
&& !is_set(i
, offlinemask
)) ||
422 !is_set(i
, possmask
))
426 clear_bit(i
, possmask
);
430 cpulist
= lxc_cpumask_to_cpulist(possmask
, maxposs
);
431 TRACE("No isolated or offline cpus present in cpuset");
433 cpulist
= move_ptr(posscpus
);
434 TRACE("Removed isolated or offline cpus from cpuset");
437 return log_error_errno(false, errno
, "Failed to create cpu list");
440 if (!am_initialized
) {
441 ret
= lxc_write_openat(child_cgroup
, "cpuset.cpus", cpulist
, strlen(cpulist
));
443 return log_error_errno(false,
444 errno
, "Failed to write cpu list to \"%s/cpuset.cpus\"",
447 TRACE("Copied cpu settings of parent cgroup");
453 /* Copy contents of parent(@path)/@file to @path/@file */
454 static bool copy_parent_file(const char *parent_cgroup
,
455 const char *child_cgroup
, const char *file
)
457 __do_free
char *parent_file
= NULL
, *value
= NULL
;
461 parent_file
= must_make_path(parent_cgroup
, file
, NULL
);
462 len
= lxc_read_from_file(parent_file
, NULL
, 0);
464 return log_error_errno(false, errno
, "Failed to determine buffer size");
466 value
= must_realloc(NULL
, len
+ 1);
468 ret
= lxc_read_from_file(parent_file
, value
, len
);
470 return log_error_errno(false, errno
, "Failed to read from parent file \"%s\"", parent_file
);
472 ret
= lxc_write_openat(child_cgroup
, file
, value
, len
);
473 if (ret
< 0 && errno
!= EACCES
)
474 return log_error_errno(false, errno
, "Failed to write \"%s\" to file \"%s/%s\"",
475 value
, child_cgroup
, file
);
479 static inline bool is_unified_hierarchy(const struct hierarchy
*h
)
481 return h
->version
== CGROUP2_SUPER_MAGIC
;
485 * Initialize the cpuset hierarchy in first directory of @cgroup_leaf and set
486 * cgroup.clone_children so that children inherit settings. Since the
487 * h->base_path is populated by init or ourselves, we know it is already
490 * returns -1 on error, 0 when we didn't created a cgroup, 1 if we created a
493 static int cg_legacy_handle_cpuset_hierarchy(struct hierarchy
*h
,
494 const char *cgroup_leaf
)
496 __do_free
char *parent_cgroup
= NULL
, *child_cgroup
= NULL
, *dup
= NULL
;
497 __do_close
int cgroup_fd
= -EBADF
;
503 if (is_unified_hierarchy(h
))
506 if (!string_in_list(h
->controllers
, "cpuset"))
510 return ret_set_errno(-1, EINVAL
);
512 dup
= strdup(cgroup_leaf
);
514 return ret_set_errno(-1, ENOMEM
);
516 parent_cgroup
= must_make_path(h
->mountpoint
, h
->container_base_path
, NULL
);
519 leaf
+= strspn(leaf
, "/");
520 slash
= strchr(leaf
, '/');
523 child_cgroup
= must_make_path(parent_cgroup
, leaf
, NULL
);
528 ret
= mkdir(child_cgroup
, 0755);
531 return log_error_errno(-1, errno
, "Failed to create directory \"%s\"", child_cgroup
);
536 cgroup_fd
= lxc_open_dirfd(child_cgroup
);
540 ret
= lxc_readat(cgroup_fd
, "cgroup.clone_children", &v
, 1);
542 return log_error_errno(-1, errno
, "Failed to read file \"%s/cgroup.clone_children\"", child_cgroup
);
544 /* Make sure any isolated cpus are removed from cpuset.cpus. */
545 if (!cg_legacy_filter_and_set_cpus(parent_cgroup
, child_cgroup
, v
== '1'))
546 return log_error_errno(-1, errno
, "Failed to remove isolated cpus");
548 /* Already set for us by someone else. */
550 TRACE("\"cgroup.clone_children\" was already set to \"1\"");
552 /* copy parent's settings */
553 if (!copy_parent_file(parent_cgroup
, child_cgroup
, "cpuset.mems"))
554 return log_error_errno(-1, errno
, "Failed to copy \"cpuset.mems\" settings");
556 /* Set clone_children so children inherit our settings */
557 ret
= lxc_writeat(cgroup_fd
, "cgroup.clone_children", "1", 1);
559 return log_error_errno(-1, errno
, "Failed to write 1 to \"%s/cgroup.clone_children\"", child_cgroup
);
564 /* Given two null-terminated lists of strings, return true if any string is in
567 static bool controller_lists_intersect(char **l1
, char **l2
)
572 for (int i
= 0; l1
[i
]; i
++)
573 if (string_in_list(l2
, l1
[i
]))
579 /* For a null-terminated list of controllers @clist, return true if any of those
580 * controllers is already listed the null-terminated list of hierarchies @hlist.
581 * Realistically, if one is present, all must be present.
583 static bool controller_list_is_dup(struct hierarchy
**hlist
, char **clist
)
588 for (int i
= 0; hlist
[i
]; i
++)
589 if (controller_lists_intersect(hlist
[i
]->controllers
, clist
))
595 /* Return true if the controller @entry is found in the null-terminated list of
596 * hierarchies @hlist.
598 static bool controller_found(struct hierarchy
**hlist
, char *entry
)
603 for (int i
= 0; hlist
[i
]; i
++)
604 if (string_in_list(hlist
[i
]->controllers
, entry
))
610 /* Return true if all of the controllers which we require have been found. The
611 * required list is freezer and anything in lxc.cgroup.use.
613 static bool all_controllers_found(struct cgroup_ops
*ops
)
615 struct hierarchy
**hlist
;
617 if (!ops
->cgroup_use
)
620 hlist
= ops
->hierarchies
;
621 for (char **cur
= ops
->cgroup_use
; cur
&& *cur
; cur
++)
622 if (!controller_found(hlist
, *cur
))
623 return log_error(false, "No %s controller mountpoint found", *cur
);
628 /* Get the controllers from a mountinfo line There are other ways we could get
629 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
630 * could parse the mount options. But we simply assume that the mountpoint must
631 * be /sys/fs/cgroup/controller-list
633 static char **cg_hybrid_get_controllers(char **klist
, char **nlist
, char *line
,
636 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
637 * for legacy hierarchies.
639 __do_free_string_list
char **aret
= NULL
;
642 char *p
= line
, *sep
= ",";
644 for (i
= 0; i
< 4; i
++) {
651 /* Note, if we change how mountinfo works, then our caller will need to
652 * verify /sys/fs/cgroup/ in this field.
654 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0)
655 return log_error(NULL
, "Found hierarchy not under " DEFAULT_CGROUP_MOUNTPOINT
": \"%s\"", p
);
660 return log_error(NULL
, "Corrupt mountinfo");
663 if (type
== CGROUP_SUPER_MAGIC
) {
664 __do_free
char *dup
= NULL
;
666 /* strdup() here for v1 hierarchies. Otherwise
667 * lxc_iterate_parts() will destroy mountpoints such as
668 * "/sys/fs/cgroup/cpu,cpuacct".
670 dup
= must_copy_string(p
);
674 lxc_iterate_parts (tok
, dup
, sep
)
675 must_append_controller(klist
, nlist
, &aret
, tok
);
679 return move_ptr(aret
);
682 static char **cg_unified_make_empty_controller(void)
684 __do_free_string_list
char **aret
= NULL
;
687 newentry
= append_null_to_list((void ***)&aret
);
688 aret
[newentry
] = NULL
;
689 return move_ptr(aret
);
692 static char **cg_unified_get_controllers(const char *file
)
694 __do_free
char *buf
= NULL
;
695 __do_free_string_list
char **aret
= NULL
;
699 buf
= read_file(file
);
703 lxc_iterate_parts(tok
, buf
, sep
) {
707 newentry
= append_null_to_list((void ***)&aret
);
708 copy
= must_copy_string(tok
);
709 aret
[newentry
] = copy
;
712 return move_ptr(aret
);
715 static struct hierarchy
*add_hierarchy(struct hierarchy
***h
, char **clist
, char *mountpoint
,
716 char *container_base_path
, int type
)
718 struct hierarchy
*new;
721 new = zalloc(sizeof(*new));
722 new->controllers
= clist
;
723 new->mountpoint
= mountpoint
;
724 new->container_base_path
= container_base_path
;
726 new->cgfd_con
= -EBADF
;
727 new->cgfd_mon
= -EBADF
;
729 newentry
= append_null_to_list((void ***)h
);
730 (*h
)[newentry
] = new;
734 /* Get a copy of the mountpoint from @line, which is a line from
735 * /proc/self/mountinfo.
737 static char *cg_hybrid_get_mountpoint(char *line
)
739 char *p
= line
, *sret
= NULL
;
743 for (int i
= 0; i
< 4; i
++) {
750 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0)
753 p2
= strchr(p
+ 15, ' ');
759 sret
= must_realloc(NULL
, len
+ 1);
760 memcpy(sret
, p
, len
);
766 /* Given a multi-line string, return a null-terminated copy of the current line. */
767 static char *copy_to_eol(char *p
)
772 p2
= strchr(p
, '\n');
777 sret
= must_realloc(NULL
, len
+ 1);
778 memcpy(sret
, p
, len
);
784 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
785 * /proc/self/cgroup file. Check whether controller c is present.
787 static bool controller_in_clist(char *cgline
, char *c
)
789 __do_free
char *tmp
= NULL
;
793 eol
= strchr(cgline
, ':');
798 tmp
= must_realloc(NULL
, len
+ 1);
799 memcpy(tmp
, cgline
, len
);
802 lxc_iterate_parts(tok
, tmp
, ",")
803 if (strcmp(tok
, c
) == 0)
809 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
812 static char *cg_hybrid_get_current_cgroup(char *basecginfo
, char *controller
,
815 char *p
= basecginfo
;
818 bool is_cgv2_base_cgroup
= false;
820 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
821 if ((type
== CGROUP2_SUPER_MAGIC
) && (*p
== '0'))
822 is_cgv2_base_cgroup
= true;
829 if (is_cgv2_base_cgroup
|| (controller
&& controller_in_clist(p
, controller
))) {
834 return copy_to_eol(p
);
844 static void must_append_string(char ***list
, char *entry
)
849 newentry
= append_null_to_list((void ***)list
);
850 copy
= must_copy_string(entry
);
851 (*list
)[newentry
] = copy
;
854 static int get_existing_subsystems(char ***klist
, char ***nlist
)
856 __do_free
char *line
= NULL
;
857 __do_fclose
FILE *f
= NULL
;
860 f
= fopen("/proc/self/cgroup", "re");
864 while (getline(&line
, &len
, f
) != -1) {
866 p
= strchr(line
, ':');
875 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
876 * contains an entry of the form:
880 * In this case we use "cgroup2" as controller name.
883 must_append_string(klist
, "cgroup2");
887 lxc_iterate_parts(tok
, p
, ",") {
888 if (strncmp(tok
, "name=", 5) == 0)
889 must_append_string(nlist
, tok
);
891 must_append_string(klist
, tok
);
898 static char *trim(char *s
)
903 while ((len
> 1) && (s
[len
- 1] == '\n'))
909 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops
*ops
)
912 struct hierarchy
**it
;
914 if (!ops
->hierarchies
) {
915 TRACE(" No hierarchies found");
919 TRACE(" Hierarchies:");
920 for (i
= 0, it
= ops
->hierarchies
; it
&& *it
; it
++, i
++) {
924 TRACE(" %d: base_cgroup: %s", i
, (*it
)->container_base_path
? (*it
)->container_base_path
: "(null)");
925 TRACE(" mountpoint: %s", (*it
)->mountpoint
? (*it
)->mountpoint
: "(null)");
926 TRACE(" controllers:");
927 for (j
= 0, cit
= (*it
)->controllers
; cit
&& *cit
; cit
++, j
++)
928 TRACE(" %d: %s", j
, *cit
);
932 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo
, char **klist
,
938 TRACE("basecginfo is:");
939 TRACE("%s", basecginfo
);
941 for (k
= 0, it
= klist
; it
&& *it
; it
++, k
++)
942 TRACE("kernel subsystem %d: %s", k
, *it
);
944 for (k
= 0, it
= nlist
; it
&& *it
; it
++, k
++)
945 TRACE("named subsystem %d: %s", k
, *it
);
948 static int cgroup_rmdir(struct hierarchy
**hierarchies
,
949 const char *container_cgroup
)
951 if (!container_cgroup
|| !hierarchies
)
954 for (int i
= 0; hierarchies
[i
]; i
++) {
955 struct hierarchy
*h
= hierarchies
[i
];
958 if (!h
->container_full_path
)
961 ret
= recursive_destroy(h
->container_full_path
);
963 WARN("Failed to destroy \"%s\"", h
->container_full_path
);
965 free_disarm(h
->container_full_path
);
971 struct generic_userns_exec_data
{
972 struct hierarchy
**hierarchies
;
973 const char *container_cgroup
;
974 struct lxc_conf
*conf
;
975 uid_t origuid
; /* target uid in parent namespace */
979 static int cgroup_rmdir_wrapper(void *data
)
981 struct generic_userns_exec_data
*arg
= data
;
982 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
983 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
986 if (!lxc_setgroups(0, NULL
) && errno
!= EPERM
)
987 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
989 ret
= setresgid(nsgid
, nsgid
, nsgid
);
991 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
992 (int)nsgid
, (int)nsgid
, (int)nsgid
);
994 ret
= setresuid(nsuid
, nsuid
, nsuid
);
996 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
997 (int)nsuid
, (int)nsuid
, (int)nsuid
);
999 return cgroup_rmdir(arg
->hierarchies
, arg
->container_cgroup
);
1002 __cgfsng_ops
static void cgfsng_payload_destroy(struct cgroup_ops
*ops
,
1003 struct lxc_handler
*handler
)
1008 ERROR("Called with uninitialized cgroup operations");
1012 if (!ops
->hierarchies
)
1016 ERROR("Called with uninitialized handler");
1020 if (!handler
->conf
) {
1021 ERROR("Called with uninitialized conf");
1025 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
1026 ret
= bpf_program_cgroup_detach(handler
->conf
->cgroup2_devices
);
1028 WARN("Failed to detach bpf program from cgroup");
1031 if (handler
->conf
&& !lxc_list_empty(&handler
->conf
->id_map
)) {
1032 struct generic_userns_exec_data wrap
= {
1033 .conf
= handler
->conf
,
1034 .container_cgroup
= ops
->container_cgroup
,
1035 .hierarchies
= ops
->hierarchies
,
1038 ret
= userns_exec_1(handler
->conf
, cgroup_rmdir_wrapper
, &wrap
,
1039 "cgroup_rmdir_wrapper");
1041 ret
= cgroup_rmdir(ops
->hierarchies
, ops
->container_cgroup
);
1044 SYSWARN("Failed to destroy cgroups");
1047 __cgfsng_ops
static void cgfsng_monitor_destroy(struct cgroup_ops
*ops
,
1048 struct lxc_handler
*handler
)
1051 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1052 const struct lxc_conf
*conf
;
1055 ERROR("Called with uninitialized cgroup operations");
1059 if (!ops
->hierarchies
)
1063 ERROR("Called with uninitialized handler");
1067 if (!handler
->conf
) {
1068 ERROR("Called with uninitialized conf");
1071 conf
= handler
->conf
;
1073 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", handler
->monitor_pid
);
1074 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
1077 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1078 __do_free
char *pivot_path
= NULL
;
1079 struct hierarchy
*h
= ops
->hierarchies
[i
];
1082 if (!h
->monitor_full_path
)
1085 if (conf
&& conf
->cgroup_meta
.dir
)
1086 pivot_path
= must_make_path(h
->mountpoint
,
1087 h
->container_base_path
,
1088 conf
->cgroup_meta
.dir
,
1089 CGROUP_PIVOT
, NULL
);
1091 pivot_path
= must_make_path(h
->mountpoint
,
1092 h
->container_base_path
,
1093 CGROUP_PIVOT
, NULL
);
1095 ret
= mkdir_p(pivot_path
, 0755);
1096 if (ret
< 0 && errno
!= EEXIST
) {
1097 ERROR("Failed to create %s", pivot_path
);
1098 goto try_recursive_destroy
;
1101 ret
= lxc_write_openat(pivot_path
, "cgroup.procs", pidstr
, len
);
1103 SYSWARN("Failed to move monitor %s to \"%s\"", pidstr
, pivot_path
);
1107 try_recursive_destroy
:
1108 ret
= recursive_destroy(h
->monitor_full_path
);
1110 WARN("Failed to destroy \"%s\"", h
->monitor_full_path
);
1114 static int mkdir_eexist_on_last(const char *dir
, mode_t mode
)
1116 const char *tmp
= dir
;
1117 const char *orig
= dir
;
1120 orig_len
= strlen(dir
);
1122 __do_free
char *makeme
= NULL
;
1126 dir
= tmp
+ strspn(tmp
, "/");
1127 tmp
= dir
+ strcspn(dir
, "/");
1129 cur_len
= dir
- orig
;
1130 makeme
= strndup(orig
, cur_len
);
1132 return ret_set_errno(-1, ENOMEM
);
1134 ret
= mkdir(makeme
, mode
);
1135 if (ret
< 0 && ((errno
!= EEXIST
) || (orig_len
== cur_len
)))
1136 return log_error_errno(-1, errno
, "Failed to create directory \"%s\"", makeme
);
1137 } while (tmp
!= dir
);
1142 static bool create_cgroup_tree(struct hierarchy
*h
, const char *cgroup_tree
,
1143 const char *cgroup_leaf
, bool payload
)
1145 __do_free
char *path
= NULL
;
1146 int ret
, ret_cpuset
;
1148 path
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgroup_leaf
, NULL
);
1149 if (dir_exists(path
))
1150 return log_warn_errno(false, errno
, "The %s cgroup already existed", path
);
1152 ret_cpuset
= cg_legacy_handle_cpuset_hierarchy(h
, cgroup_leaf
);
1154 return log_error_errno(false, errno
, "Failed to handle legacy cpuset controller");
1156 ret
= mkdir_eexist_on_last(path
, 0755);
1159 * This is the cpuset controller and
1160 * cg_legacy_handle_cpuset_hierarchy() has created our target
1161 * directory for us to ensure correct initialization.
1163 if (ret_cpuset
!= 1 || cgroup_tree
)
1164 return log_error_errno(false, errno
, "Failed to create %s cgroup", path
);
1168 h
->cgfd_con
= lxc_open_dirfd(path
);
1169 if (h
->cgfd_con
< 0)
1170 return log_error_errno(false, errno
, "Failed to open %s", path
);
1171 h
->container_full_path
= move_ptr(path
);
1173 h
->cgfd_mon
= lxc_open_dirfd(path
);
1174 if (h
->cgfd_mon
< 0)
1175 return log_error_errno(false, errno
, "Failed to open %s", path
);
1176 h
->monitor_full_path
= move_ptr(path
);
1182 static void cgroup_remove_leaf(struct hierarchy
*h
, bool payload
)
1184 __do_free
char *full_path
= NULL
;
1187 __lxc_unused __do_close
int fd
= move_fd(h
->cgfd_con
);
1188 full_path
= move_ptr(h
->container_full_path
);
1190 __lxc_unused __do_close
int fd
= move_fd(h
->cgfd_mon
);
1191 full_path
= move_ptr(h
->monitor_full_path
);
1194 if (full_path
&& rmdir(full_path
))
1195 SYSWARN("Failed to rmdir(\"%s\") cgroup", full_path
);
1198 __cgfsng_ops
static inline bool cgfsng_monitor_create(struct cgroup_ops
*ops
,
1199 struct lxc_handler
*handler
)
1201 __do_free
char *monitor_cgroup
= NULL
, *__cgroup_tree
= NULL
;
1202 const char *cgroup_tree
;
1207 struct lxc_conf
*conf
;
1210 return ret_set_errno(false, ENOENT
);
1212 if (!ops
->hierarchies
)
1215 if (ops
->monitor_cgroup
)
1216 return ret_set_errno(false, EEXIST
);
1218 if (!handler
|| !handler
->conf
)
1219 return ret_set_errno(false, EINVAL
);
1221 conf
= handler
->conf
;
1223 if (conf
->cgroup_meta
.dir
) {
1224 cgroup_tree
= conf
->cgroup_meta
.dir
;
1225 monitor_cgroup
= must_concat(&len
, conf
->cgroup_meta
.dir
, "/",
1226 DEFAULT_MONITOR_CGROUP_PREFIX
,
1228 CGROUP_CREATE_RETRY
, NULL
);
1229 } else if (ops
->cgroup_pattern
) {
1230 __cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1232 return ret_set_errno(false, ENOMEM
);
1234 cgroup_tree
= __cgroup_tree
;
1235 monitor_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1236 DEFAULT_MONITOR_CGROUP
,
1237 CGROUP_CREATE_RETRY
, NULL
);
1240 monitor_cgroup
= must_concat(&len
, DEFAULT_MONITOR_CGROUP_PREFIX
,
1242 CGROUP_CREATE_RETRY
, NULL
);
1244 if (!monitor_cgroup
)
1245 return ret_set_errno(false, ENOMEM
);
1247 suffix
= monitor_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1251 sprintf(suffix
, "-%d", idx
);
1253 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1254 if (create_cgroup_tree(ops
->hierarchies
[i
], cgroup_tree
, monitor_cgroup
, false))
1257 ERROR("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->monitor_full_path
?: "(null)");
1258 for (int j
= 0; j
< i
; j
++)
1259 cgroup_remove_leaf(ops
->hierarchies
[j
], false);
1264 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000);
1267 return ret_set_errno(false, ERANGE
);
1269 ops
->monitor_cgroup
= move_ptr(monitor_cgroup
);
1270 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops
->monitor_cgroup
);
1274 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1275 * next cgroup_pattern-1, -2, ..., -999.
1277 __cgfsng_ops
static inline bool cgfsng_payload_create(struct cgroup_ops
*ops
,
1278 struct lxc_handler
*handler
)
1280 __do_free
char *container_cgroup
= NULL
, *__cgroup_tree
= NULL
;
1281 const char *cgroup_tree
;
1286 struct lxc_conf
*conf
;
1289 return ret_set_errno(false, ENOENT
);
1291 if (!ops
->hierarchies
)
1294 if (ops
->container_cgroup
)
1295 return ret_set_errno(false, EEXIST
);
1297 if (!handler
|| !handler
->conf
)
1298 return ret_set_errno(false, EINVAL
);
1300 conf
= handler
->conf
;
1302 if (conf
->cgroup_meta
.dir
) {
1303 cgroup_tree
= conf
->cgroup_meta
.dir
;
1304 container_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1305 DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1307 CGROUP_CREATE_RETRY
, NULL
);
1308 } else if (ops
->cgroup_pattern
) {
1309 __cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1311 return ret_set_errno(false, ENOMEM
);
1313 cgroup_tree
= __cgroup_tree
;
1314 container_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1315 DEFAULT_PAYLOAD_CGROUP
,
1316 CGROUP_CREATE_RETRY
, NULL
);
1319 container_cgroup
= must_concat(&len
, DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1321 CGROUP_CREATE_RETRY
, NULL
);
1323 if (!container_cgroup
)
1324 return ret_set_errno(false, ENOMEM
);
1326 suffix
= container_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1330 sprintf(suffix
, "-%d", idx
);
1332 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1333 if (create_cgroup_tree(ops
->hierarchies
[i
], cgroup_tree
, container_cgroup
, true))
1336 ERROR("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->container_full_path
?: "(null)");
1337 for (int j
= 0; j
< i
; j
++)
1338 cgroup_remove_leaf(ops
->hierarchies
[j
], true);
1343 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000);
1346 return ret_set_errno(false, ERANGE
);
1348 ops
->container_cgroup
= move_ptr(container_cgroup
);
1349 INFO("The container process uses \"%s\" as cgroup", ops
->container_cgroup
);
1353 __cgfsng_ops
static bool cgfsng_monitor_enter(struct cgroup_ops
*ops
,
1354 struct lxc_handler
*handler
)
1356 int monitor_len
, transient_len
;
1357 char monitor
[INTTYPE_TO_STRLEN(pid_t
)],
1358 transient
[INTTYPE_TO_STRLEN(pid_t
)];
1361 return ret_set_errno(false, ENOENT
);
1363 if (!ops
->hierarchies
)
1366 if (!ops
->monitor_cgroup
)
1367 return ret_set_errno(false, ENOENT
);
1369 if (!handler
|| !handler
->conf
)
1370 return ret_set_errno(false, EINVAL
);
1372 monitor_len
= snprintf(monitor
, sizeof(monitor
), "%d", handler
->monitor_pid
);
1373 if (handler
->transient_pid
> 0)
1374 transient_len
= snprintf(transient
, sizeof(transient
), "%d", handler
->transient_pid
);
1376 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1377 struct hierarchy
*h
= ops
->hierarchies
[i
];
1380 ret
= lxc_writeat(h
->cgfd_mon
, "cgroup.procs", monitor
, monitor_len
);
1382 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->monitor_full_path
);
1384 if (handler
->transient_pid
< 0)
1387 ret
= lxc_writeat(h
->cgfd_mon
, "cgroup.procs", transient
, transient_len
);
1389 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->monitor_full_path
);
1392 * we don't keep the fds for non-unified hierarchies around
1393 * mainly because we don't make use of them anymore after the
1394 * core cgroup setup is done but also because there are quite a
1397 if (!is_unified_hierarchy(h
))
1398 close_prot_errno_disarm(h
->cgfd_mon
);
1400 handler
->transient_pid
= -1;
1405 __cgfsng_ops
static bool cgfsng_payload_enter(struct cgroup_ops
*ops
,
1406 struct lxc_handler
*handler
)
1409 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1412 return ret_set_errno(false, ENOENT
);
1414 if (!ops
->hierarchies
)
1417 if (!ops
->container_cgroup
)
1418 return ret_set_errno(false, ENOENT
);
1420 if (!handler
|| !handler
->conf
)
1421 return ret_set_errno(false, EINVAL
);
1423 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", handler
->pid
);
1425 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1426 struct hierarchy
*h
= ops
->hierarchies
[i
];
1429 ret
= lxc_writeat(h
->cgfd_con
, "cgroup.procs", pidstr
, len
);
1431 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->container_full_path
);
1437 static int fchowmodat(int dirfd
, const char *path
, uid_t chown_uid
,
1438 gid_t chown_gid
, mode_t chmod_mode
)
1442 ret
= fchownat(dirfd
, path
, chown_uid
, chown_gid
,
1443 AT_EMPTY_PATH
| AT_SYMLINK_NOFOLLOW
);
1445 return log_warn_errno(-1,
1446 errno
, "Failed to fchownat(%d, %s, %d, %d, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )",
1447 dirfd
, path
, (int)chown_uid
,
1450 ret
= fchmodat(dirfd
, (*path
!= '\0') ? path
: ".", chmod_mode
, 0);
1452 return log_warn_errno(-1, errno
, "Failed to fchmodat(%d, %s, %d, AT_SYMLINK_NOFOLLOW)",
1453 dirfd
, path
, (int)chmod_mode
);
1458 /* chgrp the container cgroups to container group. We leave
1459 * the container owner as cgroup owner. So we must make the
1460 * directories 775 so that the container can create sub-cgroups.
1462 * Also chown the tasks and cgroup.procs files. Those may not
1463 * exist depending on kernel version.
1465 static int chown_cgroup_wrapper(void *data
)
1469 struct generic_userns_exec_data
*arg
= data
;
1470 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1471 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1473 if (!lxc_setgroups(0, NULL
) && errno
!= EPERM
)
1474 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
1476 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1478 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
1479 (int)nsgid
, (int)nsgid
, (int)nsgid
);
1481 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1483 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
1484 (int)nsuid
, (int)nsuid
, (int)nsuid
);
1486 destuid
= get_ns_uid(arg
->origuid
);
1487 if (destuid
== LXC_INVALID_UID
)
1490 for (int i
= 0; arg
->hierarchies
[i
]; i
++) {
1491 int dirfd
= arg
->hierarchies
[i
]->cgfd_con
;
1493 (void)fchowmodat(dirfd
, "", destuid
, nsgid
, 0775);
1496 * Failures to chown() these are inconvenient but not
1497 * detrimental We leave these owned by the container launcher,
1498 * so that container root can write to the files to attach. We
1499 * chmod() them 664 so that container systemd can write to the
1500 * files (which systemd in wily insists on doing).
1503 if (arg
->hierarchies
[i
]->version
== CGROUP_SUPER_MAGIC
)
1504 (void)fchowmodat(dirfd
, "tasks", destuid
, nsgid
, 0664);
1506 (void)fchowmodat(dirfd
, "cgroup.procs", destuid
, nsgid
, 0664);
1508 if (arg
->hierarchies
[i
]->version
!= CGROUP2_SUPER_MAGIC
)
1511 for (char **p
= arg
->hierarchies
[i
]->cgroup2_chown
; p
&& *p
; p
++)
1512 (void)fchowmodat(dirfd
, *p
, destuid
, nsgid
, 0664);
1518 __cgfsng_ops
static bool cgfsng_chown(struct cgroup_ops
*ops
,
1519 struct lxc_conf
*conf
)
1521 struct generic_userns_exec_data wrap
;
1524 return ret_set_errno(false, ENOENT
);
1526 if (!ops
->hierarchies
)
1529 if (!ops
->container_cgroup
)
1530 return ret_set_errno(false, ENOENT
);
1533 return ret_set_errno(false, EINVAL
);
1535 if (lxc_list_empty(&conf
->id_map
))
1538 wrap
.origuid
= geteuid();
1540 wrap
.hierarchies
= ops
->hierarchies
;
1543 if (userns_exec_1(conf
, chown_cgroup_wrapper
, &wrap
, "chown_cgroup_wrapper") < 0)
1544 return log_error_errno(false, errno
, "Error requesting cgroup chown in new user namespace");
1549 __cgfsng_ops
void cgfsng_payload_finalize(struct cgroup_ops
*ops
)
1554 if (!ops
->hierarchies
)
1557 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1558 struct hierarchy
*h
= ops
->hierarchies
[i
];
1560 * we don't keep the fds for non-unified hierarchies around
1561 * mainly because we don't make use of them anymore after the
1562 * core cgroup setup is done but also because there are quite a
1565 if (!is_unified_hierarchy(h
))
1566 close_prot_errno_disarm(h
->cgfd_con
);
1570 /* cgroup-full:* is done, no need to create subdirs */
1571 static inline bool cg_mount_needs_subdirs(int type
)
1573 return !(type
>= LXC_AUTO_CGROUP_FULL_RO
);
1576 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1577 * remount controller ro if needed and bindmount the cgroupfs onto
1578 * control/the/cg/path.
1580 static int cg_legacy_mount_controllers(int type
, struct hierarchy
*h
,
1581 char *controllerpath
, char *cgpath
,
1582 const char *container_cgroup
)
1584 __do_free
char *sourcepath
= NULL
;
1585 int ret
, remount_flags
;
1586 int flags
= MS_BIND
;
1588 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_MIXED
) {
1589 ret
= mount(controllerpath
, controllerpath
, "cgroup", MS_BIND
, NULL
);
1591 return log_error_errno(-1, errno
, "Failed to bind mount \"%s\" onto \"%s\"",
1592 controllerpath
, controllerpath
);
1594 remount_flags
= add_required_remount_flags(controllerpath
,
1596 flags
| MS_REMOUNT
);
1597 ret
= mount(controllerpath
, controllerpath
, "cgroup",
1598 remount_flags
| MS_REMOUNT
| MS_BIND
| MS_RDONLY
,
1601 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", controllerpath
);
1603 INFO("Remounted %s read-only", controllerpath
);
1606 sourcepath
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1607 container_cgroup
, NULL
);
1608 if (type
== LXC_AUTO_CGROUP_RO
)
1611 ret
= mount(sourcepath
, cgpath
, "cgroup", flags
, NULL
);
1613 return log_error_errno(-1, errno
, "Failed to mount \"%s\" onto \"%s\"",
1614 h
->controllers
[0], cgpath
);
1615 INFO("Mounted \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1617 if (flags
& MS_RDONLY
) {
1618 remount_flags
= add_required_remount_flags(sourcepath
, cgpath
,
1619 flags
| MS_REMOUNT
);
1620 ret
= mount(sourcepath
, cgpath
, "cgroup", remount_flags
, NULL
);
1622 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", cgpath
);
1623 INFO("Remounted %s read-only", cgpath
);
1626 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath
);
1630 /* __cg_mount_direct
1632 * Mount cgroup hierarchies directly without using bind-mounts. The main
1633 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1634 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1636 static int __cg_mount_direct(int type
, struct hierarchy
*h
,
1637 const char *controllerpath
)
1639 __do_free
char *controllers
= NULL
;
1640 char *fstype
= "cgroup2";
1641 unsigned long flags
= 0;
1647 flags
|= MS_RELATIME
;
1649 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_FULL_RO
)
1652 if (h
->version
!= CGROUP2_SUPER_MAGIC
) {
1653 controllers
= lxc_string_join(",", (const char **)h
->controllers
, false);
1659 ret
= mount("cgroup", controllerpath
, fstype
, flags
, controllers
);
1661 return log_error_errno(-1, errno
, "Failed to mount \"%s\" with cgroup filesystem type %s",
1662 controllerpath
, fstype
);
1664 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath
, fstype
);
1668 static inline int cg_mount_in_cgroup_namespace(int type
, struct hierarchy
*h
,
1669 const char *controllerpath
)
1671 return __cg_mount_direct(type
, h
, controllerpath
);
1674 static inline int cg_mount_cgroup_full(int type
, struct hierarchy
*h
,
1675 const char *controllerpath
)
1677 if (type
< LXC_AUTO_CGROUP_FULL_RO
|| type
> LXC_AUTO_CGROUP_FULL_MIXED
)
1680 return __cg_mount_direct(type
, h
, controllerpath
);
1683 __cgfsng_ops
static bool cgfsng_mount(struct cgroup_ops
*ops
,
1684 struct lxc_handler
*handler
,
1685 const char *root
, int type
)
1687 __do_free
char *cgroup_root
= NULL
;
1688 bool has_cgns
= false, wants_force_mount
= false;
1692 return ret_set_errno(false, ENOENT
);
1694 if (!ops
->hierarchies
)
1697 if (!handler
|| !handler
->conf
)
1698 return ret_set_errno(false, EINVAL
);
1700 if ((type
& LXC_AUTO_CGROUP_MASK
) == 0)
1703 if (type
& LXC_AUTO_CGROUP_FORCE
) {
1704 type
&= ~LXC_AUTO_CGROUP_FORCE
;
1705 wants_force_mount
= true;
1708 if (!wants_force_mount
){
1709 if (!lxc_list_empty(&handler
->conf
->keepcaps
))
1710 wants_force_mount
= !in_caplist(CAP_SYS_ADMIN
, &handler
->conf
->keepcaps
);
1712 wants_force_mount
= in_caplist(CAP_SYS_ADMIN
, &handler
->conf
->caps
);
1715 has_cgns
= cgns_supported();
1716 if (has_cgns
&& !wants_force_mount
)
1719 if (type
== LXC_AUTO_CGROUP_NOSPEC
)
1720 type
= LXC_AUTO_CGROUP_MIXED
;
1721 else if (type
== LXC_AUTO_CGROUP_FULL_NOSPEC
)
1722 type
= LXC_AUTO_CGROUP_FULL_MIXED
;
1724 cgroup_root
= must_make_path(root
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
1725 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
1726 if (has_cgns
&& wants_force_mount
) {
1728 * If cgroup namespaces are supported but the container
1729 * will not have CAP_SYS_ADMIN after it has started we
1730 * need to mount the cgroups manually.
1732 return cg_mount_in_cgroup_namespace(type
, ops
->unified
, cgroup_root
) == 0;
1735 return cg_mount_cgroup_full(type
, ops
->unified
, cgroup_root
) == 0;
1739 ret
= safe_mount(NULL
, cgroup_root
, "tmpfs",
1740 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
1741 "size=10240k,mode=755", root
);
1745 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1746 __do_free
char *controllerpath
= NULL
, *path2
= NULL
;
1747 struct hierarchy
*h
= ops
->hierarchies
[i
];
1748 char *controller
= strrchr(h
->mountpoint
, '/');
1754 controllerpath
= must_make_path(cgroup_root
, controller
, NULL
);
1755 if (dir_exists(controllerpath
))
1758 ret
= mkdir(controllerpath
, 0755);
1760 return log_error_errno(false, errno
, "Error creating cgroup path: %s", controllerpath
);
1762 if (has_cgns
&& wants_force_mount
) {
1763 /* If cgroup namespaces are supported but the container
1764 * will not have CAP_SYS_ADMIN after it has started we
1765 * need to mount the cgroups manually.
1767 ret
= cg_mount_in_cgroup_namespace(type
, h
, controllerpath
);
1774 ret
= cg_mount_cgroup_full(type
, h
, controllerpath
);
1778 if (!cg_mount_needs_subdirs(type
))
1781 path2
= must_make_path(controllerpath
, h
->container_base_path
,
1782 ops
->container_cgroup
, NULL
);
1783 ret
= mkdir_p(path2
, 0755);
1787 ret
= cg_legacy_mount_controllers(type
, h
, controllerpath
,
1788 path2
, ops
->container_cgroup
);
1796 /* Only root needs to escape to the cgroup of its init. */
1797 __cgfsng_ops
static bool cgfsng_escape(const struct cgroup_ops
*ops
,
1798 struct lxc_conf
*conf
)
1801 return ret_set_errno(false, ENOENT
);
1803 if (!ops
->hierarchies
)
1807 return ret_set_errno(false, EINVAL
);
1809 if (conf
->cgroup_meta
.relative
|| geteuid())
1812 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1813 __do_free
char *fullpath
= NULL
;
1817 must_make_path(ops
->hierarchies
[i
]->mountpoint
,
1818 ops
->hierarchies
[i
]->container_base_path
,
1819 "cgroup.procs", NULL
);
1820 ret
= lxc_write_to_file(fullpath
, "0", 2, false, 0666);
1822 return log_error_errno(false, errno
, "Failed to escape to cgroup \"%s\"", fullpath
);
1828 __cgfsng_ops
static int cgfsng_num_hierarchies(struct cgroup_ops
*ops
)
1833 return ret_set_errno(-1, ENOENT
);
1835 if (!ops
->hierarchies
)
1838 for (; ops
->hierarchies
[i
]; i
++)
1844 __cgfsng_ops
static bool cgfsng_get_hierarchies(struct cgroup_ops
*ops
, int n
,
1850 return ret_set_errno(false, ENOENT
);
1852 if (!ops
->hierarchies
)
1853 return ret_set_errno(false, ENOENT
);
1855 /* sanity check n */
1856 for (i
= 0; i
< n
; i
++)
1857 if (!ops
->hierarchies
[i
])
1858 return ret_set_errno(false, ENOENT
);
1860 *out
= ops
->hierarchies
[i
]->controllers
;
1865 static bool cg_legacy_freeze(struct cgroup_ops
*ops
)
1867 struct hierarchy
*h
;
1869 h
= get_hierarchy(ops
, "freezer");
1871 return ret_set_errno(-1, ENOENT
);
1873 return lxc_write_openat(h
->container_full_path
, "freezer.state",
1874 "FROZEN", STRLITERALLEN("FROZEN"));
1877 static int freezer_cgroup_events_cb(int fd
, uint32_t events
, void *cbdata
,
1878 struct lxc_epoll_descr
*descr
)
1880 __do_close
int duped_fd
= -EBADF
;
1881 __do_free
char *line
= NULL
;
1882 __do_fclose
FILE *f
= NULL
;
1883 int state
= PTR_TO_INT(cbdata
);
1885 const char *state_string
;
1889 return LXC_MAINLOOP_ERROR
;
1891 if (lseek(duped_fd
, 0, SEEK_SET
) < (off_t
)-1)
1892 return LXC_MAINLOOP_ERROR
;
1894 f
= fdopen(duped_fd
, "re");
1896 return LXC_MAINLOOP_ERROR
;
1900 state_string
= "frozen 1";
1902 state_string
= "frozen 0";
1904 while (getline(&line
, &len
, f
) != -1)
1905 if (strncmp(line
, state_string
, STRLITERALLEN("frozen") + 2) == 0)
1906 return LXC_MAINLOOP_CLOSE
;
1908 return LXC_MAINLOOP_CONTINUE
;
1911 static int cg_unified_freeze(struct cgroup_ops
*ops
, int timeout
)
1913 __do_close
int fd
= -EBADF
;
1914 call_cleaner(lxc_mainloop_close
) struct lxc_epoll_descr
*descr_ptr
= NULL
;
1916 struct lxc_epoll_descr descr
;
1917 struct hierarchy
*h
;
1921 return ret_set_errno(-1, ENOENT
);
1923 if (!h
->container_full_path
)
1924 return ret_set_errno(-1, EEXIST
);
1927 __do_free
char *events_file
= NULL
;
1929 events_file
= must_make_path(h
->container_full_path
, "cgroup.events", NULL
);
1930 fd
= open(events_file
, O_RDONLY
| O_CLOEXEC
);
1932 return log_error_errno(-1, errno
, "Failed to open cgroup.events file");
1934 ret
= lxc_mainloop_open(&descr
);
1936 return log_error_errno(-1, errno
, "Failed to create epoll instance to wait for container freeze");
1938 /* automatically cleaned up now */
1941 ret
= lxc_mainloop_add_handler(&descr
, fd
, freezer_cgroup_events_cb
, INT_TO_PTR((int){1}));
1943 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
1946 ret
= lxc_write_openat(h
->container_full_path
, "cgroup.freeze", "1", 1);
1948 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
1950 if (timeout
!= 0 && lxc_mainloop(&descr
, timeout
))
1951 return log_error_errno(-1, errno
, "Failed to wait for container to be frozen");
1956 __cgfsng_ops
static int cgfsng_freeze(struct cgroup_ops
*ops
, int timeout
)
1958 if (!ops
->hierarchies
)
1959 return ret_set_errno(-1, ENOENT
);
1961 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
1962 return cg_legacy_freeze(ops
);
1964 return cg_unified_freeze(ops
, timeout
);
1967 static int cg_legacy_unfreeze(struct cgroup_ops
*ops
)
1969 struct hierarchy
*h
;
1971 h
= get_hierarchy(ops
, "freezer");
1973 return ret_set_errno(-1, ENOENT
);
1975 return lxc_write_openat(h
->container_full_path
, "freezer.state",
1976 "THAWED", STRLITERALLEN("THAWED"));
1979 static int cg_unified_unfreeze(struct cgroup_ops
*ops
, int timeout
)
1981 __do_close
int fd
= -EBADF
;
1982 call_cleaner(lxc_mainloop_close
)struct lxc_epoll_descr
*descr_ptr
= NULL
;
1984 struct lxc_epoll_descr descr
;
1985 struct hierarchy
*h
;
1989 return ret_set_errno(-1, ENOENT
);
1991 if (!h
->container_full_path
)
1992 return ret_set_errno(-1, EEXIST
);
1995 __do_free
char *events_file
= NULL
;
1997 events_file
= must_make_path(h
->container_full_path
, "cgroup.events", NULL
);
1998 fd
= open(events_file
, O_RDONLY
| O_CLOEXEC
);
2000 return log_error_errno(-1, errno
, "Failed to open cgroup.events file");
2002 ret
= lxc_mainloop_open(&descr
);
2004 return log_error_errno(-1, errno
, "Failed to create epoll instance to wait for container unfreeze");
2006 /* automatically cleaned up now */
2009 ret
= lxc_mainloop_add_handler(&descr
, fd
, freezer_cgroup_events_cb
, INT_TO_PTR((int){0}));
2011 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
2014 ret
= lxc_write_openat(h
->container_full_path
, "cgroup.freeze", "0", 1);
2016 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
2018 if (timeout
!= 0 && lxc_mainloop(&descr
, timeout
))
2019 return log_error_errno(-1, errno
, "Failed to wait for container to be unfrozen");
2024 __cgfsng_ops
static int cgfsng_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2026 if (!ops
->hierarchies
)
2027 return ret_set_errno(-1, ENOENT
);
2029 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2030 return cg_legacy_unfreeze(ops
);
2032 return cg_unified_unfreeze(ops
, timeout
);
2035 __cgfsng_ops
static const char *cgfsng_get_cgroup(struct cgroup_ops
*ops
,
2036 const char *controller
)
2038 struct hierarchy
*h
;
2040 h
= get_hierarchy(ops
, controller
);
2042 return log_warn_errno(NULL
, ENOENT
, "Failed to find hierarchy for controller \"%s\"",
2043 controller
? controller
: "(null)");
2045 return h
->container_full_path
2046 ? h
->container_full_path
+ strlen(h
->mountpoint
)
2050 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2051 * which must be freed by the caller.
2053 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy
*h
,
2055 const char *filename
)
2057 return must_make_path(h
->mountpoint
, inpath
, filename
, NULL
);
2060 static int cgroup_attach_leaf(const struct lxc_conf
*conf
, int unified_fd
, pid_t pid
)
2064 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2067 /* Create leaf cgroup. */
2068 ret
= mkdirat(unified_fd
, "lxc", 0755);
2069 if (ret
< 0 && errno
!= EEXIST
)
2070 return log_error_errno(-1, errno
, "Failed to create leaf cgroup \"lxc\"");
2072 pidstr_len
= sprintf(pidstr
, INT64_FMT
, (int64_t)pid
);
2073 ret
= lxc_writeat(unified_fd
, "lxc/cgroup.procs", pidstr
, pidstr_len
);
2075 ret
= lxc_writeat(unified_fd
, "cgroup.procs", pidstr
, pidstr_len
);
2079 /* this is a non-leaf node */
2081 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2085 char attach_cgroup
[STRLITERALLEN("lxc-1000/cgroup.procs") + 1];
2088 sprintf(attach_cgroup
, "lxc-%d/cgroup.procs", idx
);
2089 slash
= &attach_cgroup
[ret
] - STRLITERALLEN("/cgroup.procs");
2092 ret
= mkdirat(unified_fd
, attach_cgroup
, 0755);
2093 if (ret
< 0 && errno
!= EEXIST
)
2094 return log_error_errno(-1, errno
, "Failed to create cgroup %s", attach_cgroup
);
2100 ret
= lxc_writeat(unified_fd
, attach_cgroup
, pidstr
, pidstr_len
);
2104 if (rm
&& unlinkat(unified_fd
, attach_cgroup
, AT_REMOVEDIR
))
2105 SYSERROR("Failed to remove cgroup \"%d(%s)\"", unified_fd
, attach_cgroup
);
2107 /* this is a non-leaf node */
2109 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2112 } while (idx
< 1000);
2114 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2117 struct userns_exec_unified_attach_data
{
2118 const struct lxc_conf
*conf
;
2123 static int cgroup_unified_attach_wrapper(void *data
)
2125 struct userns_exec_unified_attach_data
*args
= data
;
2130 if (!args
->conf
|| args
->unified_fd
< 0 || args
->pid
<= 0)
2131 return ret_errno(EINVAL
);
2133 if (!lxc_setgroups(0, NULL
) && errno
!= EPERM
)
2134 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
2136 nsuid
= (args
->conf
->root_nsuid_map
!= NULL
) ? 0 : args
->conf
->init_uid
;
2137 nsgid
= (args
->conf
->root_nsgid_map
!= NULL
) ? 0 : args
->conf
->init_gid
;
2139 ret
= setresgid(nsgid
, nsgid
, nsgid
);
2141 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
2142 (int)nsgid
, (int)nsgid
, (int)nsgid
);
2144 ret
= setresuid(nsuid
, nsuid
, nsuid
);
2146 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
2147 (int)nsuid
, (int)nsuid
, (int)nsuid
);
2149 return cgroup_attach_leaf(args
->conf
, args
->unified_fd
, args
->pid
);
2152 int cgroup_attach(const struct lxc_conf
*conf
, const char *name
,
2153 const char *lxcpath
, pid_t pid
)
2155 __do_close
int unified_fd
= -EBADF
;
2158 if (!conf
|| !name
|| !lxcpath
|| pid
<= 0)
2159 return ret_errno(EINVAL
);
2161 unified_fd
= lxc_cmd_get_cgroup2_fd(name
, lxcpath
);
2163 return ret_errno(EBADF
);
2165 if (!lxc_list_empty(&conf
->id_map
)) {
2166 struct userns_exec_unified_attach_data args
= {
2168 .unified_fd
= unified_fd
,
2172 ret
= userns_exec_minimal(conf
, cgroup_unified_attach_wrapper
, &args
);
2174 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
2180 /* Technically, we're always at a delegation boundary here (This is especially
2181 * true when cgroup namespaces are available.). The reasoning is that in order
2182 * for us to have been able to start a container in the first place the root
2183 * cgroup must have been a leaf node. Now, either the container's init system
2184 * has populated the cgroup and kept it as a leaf node or it has created
2185 * subtrees. In the former case we will simply attach to the leaf node we
2186 * created when we started the container in the latter case we create our own
2187 * cgroup for the attaching process.
2189 static int __cg_unified_attach(const struct hierarchy
*h
,
2190 const struct lxc_conf
*conf
, const char *name
,
2191 const char *lxcpath
, pid_t pid
,
2192 const char *controller
)
2194 __do_close
int unified_fd
= -EBADF
;
2195 __do_free
char *path
= NULL
, *cgroup
= NULL
;
2198 if (!conf
|| !name
|| !lxcpath
|| pid
<= 0)
2199 return ret_errno(EINVAL
);
2201 ret
= cgroup_attach(conf
, name
, lxcpath
, pid
);
2203 return log_trace(0, "Attached to unified cgroup via command handler");
2205 return log_error_errno(ret
, errno
, "Failed to attach to unified cgroup");
2207 /* Fall back to retrieving the path for the unified cgroup. */
2208 cgroup
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2213 path
= must_make_path(h
->mountpoint
, cgroup
, NULL
);
2215 unified_fd
= open(path
, O_PATH
| O_DIRECTORY
| O_CLOEXEC
);
2217 return ret_errno(EBADF
);
2219 if (!lxc_list_empty(&conf
->id_map
)) {
2220 struct userns_exec_unified_attach_data args
= {
2222 .unified_fd
= unified_fd
,
2226 ret
= userns_exec_minimal(conf
, cgroup_unified_attach_wrapper
, &args
);
2228 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
2234 __cgfsng_ops
static bool cgfsng_attach(struct cgroup_ops
*ops
,
2235 const struct lxc_conf
*conf
,
2236 const char *name
, const char *lxcpath
,
2240 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
2243 return ret_set_errno(false, ENOENT
);
2245 if (!ops
->hierarchies
)
2248 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", pid
);
2249 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
2252 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
2253 __do_free
char *fullpath
= NULL
, *path
= NULL
;
2254 struct hierarchy
*h
= ops
->hierarchies
[i
];
2256 if (h
->version
== CGROUP2_SUPER_MAGIC
) {
2257 ret
= __cg_unified_attach(h
, conf
, name
, lxcpath
, pid
,
2265 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, h
->controllers
[0]);
2270 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, "cgroup.procs");
2271 ret
= lxc_write_to_file(fullpath
, pidstr
, len
, false, 0666);
2273 return log_error_errno(false, errno
, "Failed to attach %d to %s",
2274 (int)pid
, fullpath
);
2280 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2281 * don't have a cgroup_data set up, so we ask the running container through the
2282 * commands API for the cgroup path.
2284 __cgfsng_ops
static int cgfsng_get(struct cgroup_ops
*ops
, const char *filename
,
2285 char *value
, size_t len
, const char *name
,
2286 const char *lxcpath
)
2288 __do_free
char *path
= NULL
;
2289 __do_free
char *controller
= NULL
;
2291 struct hierarchy
*h
;
2295 return ret_set_errno(-1, ENOENT
);
2297 controller
= must_copy_string(filename
);
2298 p
= strchr(controller
, '.');
2302 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2307 h
= get_hierarchy(ops
, controller
);
2309 __do_free
char *fullpath
= NULL
;
2311 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, filename
);
2312 ret
= lxc_read_from_file(fullpath
, value
, len
);
2318 static int device_cgroup_parse_access(struct device_item
*device
, const char *val
)
2320 for (int count
= 0; count
< 3; count
++, val
++) {
2323 device
->access
[count
] = *val
;
2326 device
->access
[count
] = *val
;
2329 device
->access
[count
] = *val
;
2336 return ret_errno(EINVAL
);
2343 static int device_cgroup_rule_parse(struct device_item
*device
, const char *key
,
2349 if (strcmp("devices.allow", key
) == 0)
2354 if (strcmp(val
, "a") == 0) {
2359 device
->global_rule
= device
->allow
2360 ? LXC_BPF_DEVICE_CGROUP_BLACKLIST
2361 : LXC_BPF_DEVICE_CGROUP_WHITELIST
;
2367 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2375 device
->type
= *val
;
2388 } else if (isdigit(*val
)) {
2389 memset(temp
, 0, sizeof(temp
));
2390 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2396 ret
= lxc_safe_int(temp
, &device
->major
);
2410 } else if (isdigit(*val
)) {
2411 memset(temp
, 0, sizeof(temp
));
2412 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2418 ret
= lxc_safe_int(temp
, &device
->minor
);
2427 return device_cgroup_parse_access(device
, ++val
);
2430 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2431 * don't have a cgroup_data set up, so we ask the running container through the
2432 * commands API for the cgroup path.
2434 __cgfsng_ops
static int cgfsng_set(struct cgroup_ops
*ops
,
2435 const char *key
, const char *value
,
2436 const char *name
, const char *lxcpath
)
2438 __do_free
char *path
= NULL
;
2439 __do_free
char *controller
= NULL
;
2441 struct hierarchy
*h
;
2445 return ret_set_errno(-1, ENOENT
);
2447 controller
= must_copy_string(key
);
2448 p
= strchr(controller
, '.');
2452 if (pure_unified_layout(ops
) && strcmp(controller
, "devices") == 0) {
2453 struct device_item device
= {0};
2455 ret
= device_cgroup_rule_parse(&device
, key
, value
);
2457 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s",
2460 ret
= lxc_cmd_add_bpf_device_cgroup(name
, lxcpath
, &device
);
2467 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2472 h
= get_hierarchy(ops
, controller
);
2474 __do_free
char *fullpath
= NULL
;
2476 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, key
);
2477 ret
= lxc_write_to_file(fullpath
, value
, strlen(value
), false, 0666);
2483 /* take devices cgroup line
2485 * and convert it to a valid
2486 * type major:minor mode
2487 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2490 static int device_cgroup_rule_parse_devpath(struct device_item
*device
,
2491 const char *devpath
)
2493 __do_free
char *path
= NULL
;
2499 path
= must_copy_string(devpath
);
2502 * Read path followed by mode. Ignore any trailing text.
2503 * A ' # comment' would be legal. Technically other text is not
2504 * legal, we could check for that if we cared to.
2506 for (n_parts
= 1, p
= path
; *p
; p
++) {
2522 return ret_set_errno(-1, EINVAL
);
2525 if (device_cgroup_parse_access(device
, mode
) < 0)
2529 return ret_set_errno(-1, EINVAL
);
2531 ret
= stat(path
, &sb
);
2533 return ret_set_errno(-1, errno
);
2535 mode_t m
= sb
.st_mode
& S_IFMT
;
2544 return log_error_errno(-1, EINVAL
, "Unsupported device type %i for \"%s\"", m
, path
);
2547 device
->major
= MAJOR(sb
.st_rdev
);
2548 device
->minor
= MINOR(sb
.st_rdev
);
2550 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2555 static int convert_devpath(const char *invalue
, char *dest
)
2557 struct device_item device
= {0};
2560 ret
= device_cgroup_rule_parse_devpath(&device
, invalue
);
2564 ret
= snprintf(dest
, 50, "%c %d:%d %s", device
.type
, device
.major
,
2565 device
.minor
, device
.access
);
2566 if (ret
< 0 || ret
>= 50)
2567 return log_error_errno(-1, ENAMETOOLONG
, "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2568 device
.type
, device
.major
, device
.minor
, device
.access
);
2573 /* Called from setup_limits - here we have the container's cgroup_data because
2574 * we created the cgroups.
2576 static int cg_legacy_set_data(struct cgroup_ops
*ops
, const char *filename
,
2579 __do_free
char *controller
= NULL
;
2581 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2582 char converted_value
[50];
2583 struct hierarchy
*h
;
2585 controller
= must_copy_string(filename
);
2586 p
= strchr(controller
, '.');
2590 if (strcmp("devices.allow", filename
) == 0 && value
[0] == '/') {
2593 ret
= convert_devpath(value
, converted_value
);
2596 value
= converted_value
;
2599 h
= get_hierarchy(ops
, controller
);
2601 return log_error_errno(-ENOENT
, ENOENT
, "Failed to setup limits for the \"%s\" controller. The controller seems to be unused by \"cgfsng\" cgroup driver or not enabled on the cgroup hierarchy", controller
);
2603 return lxc_write_openat(h
->container_full_path
, filename
, value
, strlen(value
));
2606 __cgfsng_ops
static bool cgfsng_setup_limits_legacy(struct cgroup_ops
*ops
,
2607 struct lxc_conf
*conf
,
2610 __do_free
struct lxc_list
*sorted_cgroup_settings
= NULL
;
2611 struct lxc_list
*cgroup_settings
= &conf
->cgroup
;
2612 struct lxc_list
*iterator
, *next
;
2613 struct lxc_cgroup
*cg
;
2617 return ret_set_errno(false, ENOENT
);
2620 return ret_set_errno(false, EINVAL
);
2622 cgroup_settings
= &conf
->cgroup
;
2623 if (lxc_list_empty(cgroup_settings
))
2626 if (!ops
->hierarchies
)
2627 return ret_set_errno(false, EINVAL
);
2629 sorted_cgroup_settings
= sort_cgroup_settings(cgroup_settings
);
2630 if (!sorted_cgroup_settings
)
2633 lxc_list_for_each(iterator
, sorted_cgroup_settings
) {
2634 cg
= iterator
->elem
;
2636 if (do_devices
== !strncmp("devices", cg
->subsystem
, 7)) {
2637 if (cg_legacy_set_data(ops
, cg
->subsystem
, cg
->value
)) {
2638 if (do_devices
&& (errno
== EACCES
|| errno
== EPERM
)) {
2639 SYSWARN("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2642 SYSERROR("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2645 DEBUG("Set controller \"%s\" set to \"%s\"", cg
->subsystem
, cg
->value
);
2650 INFO("Limits for the legacy cgroup hierarchies have been setup");
2652 lxc_list_for_each_safe(iterator
, sorted_cgroup_settings
, next
) {
2653 lxc_list_del(iterator
);
2661 * Some of the parsing logic comes from the original cgroup device v1
2662 * implementation in the kernel.
2664 static int bpf_device_cgroup_prepare(struct cgroup_ops
*ops
,
2665 struct lxc_conf
*conf
, const char *key
,
2668 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2669 struct device_item device_item
= {0};
2672 if (strcmp("devices.allow", key
) == 0 && *val
== '/')
2673 ret
= device_cgroup_rule_parse_devpath(&device_item
, val
);
2675 ret
= device_cgroup_rule_parse(&device_item
, key
, val
);
2677 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s", key
, val
);
2679 ret
= bpf_list_add_device(conf
, &device_item
);
2686 __cgfsng_ops
static bool cgfsng_setup_limits(struct cgroup_ops
*ops
,
2687 struct lxc_handler
*handler
)
2689 struct lxc_list
*cgroup_settings
, *iterator
;
2690 struct hierarchy
*h
;
2691 struct lxc_conf
*conf
;
2694 return ret_set_errno(false, ENOENT
);
2696 if (!ops
->hierarchies
)
2699 if (!ops
->container_cgroup
)
2700 return ret_set_errno(false, EINVAL
);
2702 if (!handler
|| !handler
->conf
)
2703 return ret_set_errno(false, EINVAL
);
2704 conf
= handler
->conf
;
2706 if (lxc_list_empty(&conf
->cgroup2
))
2708 cgroup_settings
= &conf
->cgroup2
;
2714 lxc_list_for_each (iterator
, cgroup_settings
) {
2715 struct lxc_cgroup
*cg
= iterator
->elem
;
2718 if (strncmp("devices", cg
->subsystem
, 7) == 0) {
2719 ret
= bpf_device_cgroup_prepare(ops
, conf
, cg
->subsystem
,
2722 ret
= lxc_write_openat(h
->container_full_path
,
2723 cg
->subsystem
, cg
->value
,
2726 return log_error_errno(false, errno
, "Failed to set \"%s\" to \"%s\"",
2727 cg
->subsystem
, cg
->value
);
2729 TRACE("Set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2732 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
2735 __cgfsng_ops
bool cgfsng_devices_activate(struct cgroup_ops
*ops
,
2736 struct lxc_handler
*handler
)
2738 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2739 __do_bpf_program_free
struct bpf_program
*devices
= NULL
;
2741 struct lxc_conf
*conf
;
2742 struct hierarchy
*unified
;
2743 struct lxc_list
*it
;
2744 struct bpf_program
*devices_old
;
2747 return ret_set_errno(false, ENOENT
);
2749 if (!ops
->hierarchies
)
2752 if (!ops
->container_cgroup
)
2753 return ret_set_errno(false, EEXIST
);
2755 if (!handler
|| !handler
->conf
)
2756 return ret_set_errno(false, EINVAL
);
2757 conf
= handler
->conf
;
2759 unified
= ops
->unified
;
2760 if (!unified
|| !unified
->bpf_device_controller
||
2761 !unified
->container_full_path
|| lxc_list_empty(&conf
->devices
))
2764 devices
= bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE
);
2766 return log_error_errno(false, ENOMEM
, "Failed to create new bpf program");
2768 ret
= bpf_program_init(devices
);
2770 return log_error_errno(false, ENOMEM
, "Failed to initialize bpf program");
2772 lxc_list_for_each(it
, &conf
->devices
) {
2773 struct device_item
*cur
= it
->elem
;
2775 ret
= bpf_program_append_device(devices
, cur
);
2777 return log_error_errno(false, ENOMEM
, "Failed to add new rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
2784 TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
2793 ret
= bpf_program_finalize(devices
);
2795 return log_error_errno(false, ENOMEM
, "Failed to finalize bpf program");
2797 ret
= bpf_program_cgroup_attach(devices
, BPF_CGROUP_DEVICE
,
2798 unified
->container_full_path
,
2801 return log_error_errno(false, ENOMEM
, "Failed to attach bpf program");
2803 /* Replace old bpf program. */
2804 devices_old
= move_ptr(conf
->cgroup2_devices
);
2805 conf
->cgroup2_devices
= move_ptr(devices
);
2806 devices
= move_ptr(devices_old
);
2811 bool __cgfsng_delegate_controllers(struct cgroup_ops
*ops
, const char *cgroup
)
2813 __do_free
char *add_controllers
= NULL
, *base_path
= NULL
;
2814 __do_free_string_list
char **parts
= NULL
;
2815 struct hierarchy
*unified
= ops
->unified
;
2818 size_t full_len
= 0;
2820 if (!ops
->hierarchies
|| !pure_unified_layout(ops
) ||
2821 !unified
->controllers
[0])
2824 /* For now we simply enable all controllers that we have detected by
2825 * creating a string like "+memory +pids +cpu +io".
2826 * TODO: In the near future we might want to support "-<controller>"
2827 * etc. but whether supporting semantics like this make sense will need
2830 for (it
= unified
->controllers
; it
&& *it
; it
++) {
2831 full_len
+= strlen(*it
) + 2;
2832 add_controllers
= must_realloc(add_controllers
, full_len
+ 1);
2834 if (unified
->controllers
[0] == *it
)
2835 add_controllers
[0] = '\0';
2837 (void)strlcat(add_controllers
, "+", full_len
+ 1);
2838 (void)strlcat(add_controllers
, *it
, full_len
+ 1);
2840 if ((it
+ 1) && *(it
+ 1))
2841 (void)strlcat(add_controllers
, " ", full_len
+ 1);
2844 parts
= lxc_string_split(cgroup
, '/');
2848 parts_len
= lxc_array_len((void **)parts
);
2852 base_path
= must_make_path(unified
->mountpoint
, unified
->container_base_path
, NULL
);
2853 for (ssize_t i
= -1; i
< parts_len
; i
++) {
2855 __do_free
char *target
= NULL
;
2858 base_path
= must_append_path(base_path
, parts
[i
], NULL
);
2859 target
= must_make_path(base_path
, "cgroup.subtree_control", NULL
);
2860 ret
= lxc_writeat(-1, target
, add_controllers
, full_len
);
2862 return log_error_errno(false, errno
, "Could not enable \"%s\" controllers in the unified cgroup \"%s\"",
2863 add_controllers
, target
);
2864 TRACE("Enable \"%s\" controllers in the unified cgroup \"%s\"", add_controllers
, target
);
2870 __cgfsng_ops
bool cgfsng_monitor_delegate_controllers(struct cgroup_ops
*ops
)
2873 return ret_set_errno(false, ENOENT
);
2875 return __cgfsng_delegate_controllers(ops
, ops
->monitor_cgroup
);
2878 __cgfsng_ops
bool cgfsng_payload_delegate_controllers(struct cgroup_ops
*ops
)
2881 return ret_set_errno(false, ENOENT
);
2883 return __cgfsng_delegate_controllers(ops
, ops
->container_cgroup
);
2886 static bool cgroup_use_wants_controllers(const struct cgroup_ops
*ops
,
2889 if (!ops
->cgroup_use
)
2892 for (char **cur_ctrl
= controllers
; cur_ctrl
&& *cur_ctrl
; cur_ctrl
++) {
2895 for (char **cur_use
= ops
->cgroup_use
; cur_use
&& *cur_use
; cur_use
++) {
2896 if (strcmp(*cur_use
, *cur_ctrl
) != 0)
2912 static void cg_unified_delegate(char ***delegate
)
2914 __do_free
char *buf
= NULL
;
2915 char *standard
[] = {"cgroup.subtree_control", "cgroup.threads", NULL
};
2919 buf
= read_file("/sys/kernel/cgroup/delegate");
2921 for (char **p
= standard
; p
&& *p
; p
++) {
2922 idx
= append_null_to_list((void ***)delegate
);
2923 (*delegate
)[idx
] = must_copy_string(*p
);
2925 SYSWARN("Failed to read /sys/kernel/cgroup/delegate");
2929 lxc_iterate_parts (token
, buf
, " \t\n") {
2931 * We always need to chown this for both cgroup and
2934 if (strcmp(token
, "cgroup.procs") == 0)
2937 idx
= append_null_to_list((void ***)delegate
);
2938 (*delegate
)[idx
] = must_copy_string(token
);
2942 /* At startup, parse_hierarchies finds all the info we need about cgroup
2943 * mountpoints and current cgroups, and stores it in @d.
2945 static int cg_hybrid_init(struct cgroup_ops
*ops
, bool relative
, bool unprivileged
)
2947 __do_free
char *basecginfo
= NULL
, *line
= NULL
;
2948 __do_free_string_list
char **klist
= NULL
, **nlist
= NULL
;
2949 __do_fclose
FILE *f
= NULL
;
2953 /* Root spawned containers escape the current cgroup, so use init's
2954 * cgroups as our base in that case.
2956 if (!relative
&& (geteuid() == 0))
2957 basecginfo
= read_file("/proc/1/cgroup");
2959 basecginfo
= read_file("/proc/self/cgroup");
2961 return ret_set_errno(-1, ENOMEM
);
2963 ret
= get_existing_subsystems(&klist
, &nlist
);
2965 return log_error_errno(-1, errno
, "Failed to retrieve available legacy cgroup controllers");
2967 f
= fopen("/proc/self/mountinfo", "re");
2969 return log_error_errno(-1, errno
, "Failed to open \"/proc/self/mountinfo\"");
2971 lxc_cgfsng_print_basecg_debuginfo(basecginfo
, klist
, nlist
);
2973 while (getline(&line
, &len
, f
) != -1) {
2974 __do_free
char *base_cgroup
= NULL
, *mountpoint
= NULL
;
2975 __do_free_string_list
char **controller_list
= NULL
;
2978 struct hierarchy
*new;
2980 type
= get_cgroup_version(line
);
2984 if (type
== CGROUP2_SUPER_MAGIC
&& ops
->unified
)
2987 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNKNOWN
) {
2988 if (type
== CGROUP2_SUPER_MAGIC
)
2989 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
2990 else if (type
== CGROUP_SUPER_MAGIC
)
2991 ops
->cgroup_layout
= CGROUP_LAYOUT_LEGACY
;
2992 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
2993 if (type
== CGROUP_SUPER_MAGIC
)
2994 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
2995 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_LEGACY
) {
2996 if (type
== CGROUP2_SUPER_MAGIC
)
2997 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3000 controller_list
= cg_hybrid_get_controllers(klist
, nlist
, line
, type
);
3001 if (!controller_list
&& type
== CGROUP_SUPER_MAGIC
)
3004 if (type
== CGROUP_SUPER_MAGIC
)
3005 if (controller_list_is_dup(ops
->hierarchies
, controller_list
)) {
3006 TRACE("Skipping duplicating controller");
3010 mountpoint
= cg_hybrid_get_mountpoint(line
);
3012 ERROR("Failed parsing mountpoint from \"%s\"", line
);
3016 if (type
== CGROUP_SUPER_MAGIC
)
3017 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, controller_list
[0], CGROUP_SUPER_MAGIC
);
3019 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, NULL
, CGROUP2_SUPER_MAGIC
);
3021 ERROR("Failed to find current cgroup");
3026 prune_init_scope(base_cgroup
);
3027 if (type
== CGROUP2_SUPER_MAGIC
)
3028 writeable
= test_writeable_v2(mountpoint
, base_cgroup
);
3030 writeable
= test_writeable_v1(mountpoint
, base_cgroup
);
3032 TRACE("The %s group is not writeable", base_cgroup
);
3036 if (type
== CGROUP2_SUPER_MAGIC
) {
3037 char *cgv2_ctrl_path
;
3039 cgv2_ctrl_path
= must_make_path(mountpoint
, base_cgroup
,
3040 "cgroup.controllers",
3043 controller_list
= cg_unified_get_controllers(cgv2_ctrl_path
);
3044 free(cgv2_ctrl_path
);
3045 if (!controller_list
) {
3046 controller_list
= cg_unified_make_empty_controller();
3047 TRACE("No controllers are enabled for "
3048 "delegation in the unified hierarchy");
3052 /* Exclude all controllers that cgroup use does not want. */
3053 if (!cgroup_use_wants_controllers(ops
, controller_list
)) {
3054 TRACE("Skipping controller");
3058 new = add_hierarchy(&ops
->hierarchies
, move_ptr(controller_list
), move_ptr(mountpoint
), move_ptr(base_cgroup
), type
);
3059 if (type
== CGROUP2_SUPER_MAGIC
&& !ops
->unified
) {
3061 cg_unified_delegate(&new->cgroup2_chown
);
3066 TRACE("Writable cgroup hierarchies:");
3067 lxc_cgfsng_print_hierarchies(ops
);
3069 /* verify that all controllers in cgroup.use and all crucial
3070 * controllers are accounted for
3072 if (!all_controllers_found(ops
))
3073 return log_error_errno(-1, ENOENT
, "Failed to find all required controllers");
3078 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
3079 static char *cg_unified_get_current_cgroup(bool relative
)
3081 __do_free
char *basecginfo
= NULL
;
3085 if (!relative
&& (geteuid() == 0))
3086 basecginfo
= read_file("/proc/1/cgroup");
3088 basecginfo
= read_file("/proc/self/cgroup");
3092 base_cgroup
= strstr(basecginfo
, "0::/");
3096 base_cgroup
= base_cgroup
+ 3;
3097 copy
= copy_to_eol(base_cgroup
);
3104 static int cg_unified_init(struct cgroup_ops
*ops
, bool relative
,
3107 __do_free
char *subtree_path
= NULL
;
3111 struct hierarchy
*new;
3112 char *base_cgroup
= NULL
;
3114 ret
= unified_cgroup_hierarchy();
3115 if (ret
== -ENOMEDIUM
)
3116 return ret_errno(ENOMEDIUM
);
3118 if (ret
!= CGROUP2_SUPER_MAGIC
)
3121 base_cgroup
= cg_unified_get_current_cgroup(relative
);
3123 return ret_errno(EINVAL
);
3125 prune_init_scope(base_cgroup
);
3128 * We assume that the cgroup we're currently in has been delegated to
3129 * us and we are free to further delege all of the controllers listed
3130 * in cgroup.controllers further down the hierarchy.
3132 mountpoint
= must_copy_string(DEFAULT_CGROUP_MOUNTPOINT
);
3133 subtree_path
= must_make_path(mountpoint
, base_cgroup
, "cgroup.controllers", NULL
);
3134 delegatable
= cg_unified_get_controllers(subtree_path
);
3136 delegatable
= cg_unified_make_empty_controller();
3137 if (!delegatable
[0])
3138 TRACE("No controllers are enabled for delegation");
3140 /* TODO: If the user requested specific controllers via lxc.cgroup.use
3141 * we should verify here. The reason I'm not doing it right is that I'm
3142 * not convinced that lxc.cgroup.use will be the future since it is a
3143 * global property. I much rather have an option that lets you request
3144 * controllers per container.
3147 new = add_hierarchy(&ops
->hierarchies
, delegatable
, mountpoint
, base_cgroup
, CGROUP2_SUPER_MAGIC
);
3149 cg_unified_delegate(&new->cgroup2_chown
);
3151 if (bpf_devices_cgroup_supported())
3152 new->bpf_device_controller
= 1;
3154 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3157 return CGROUP2_SUPER_MAGIC
;
3160 static int cg_init(struct cgroup_ops
*ops
, struct lxc_conf
*conf
)
3164 bool relative
= conf
->cgroup_meta
.relative
;
3166 tmp
= lxc_global_config_value("lxc.cgroup.use");
3168 __do_free
char *pin
= NULL
;
3171 pin
= must_copy_string(tmp
);
3174 lxc_iterate_parts(cur
, chop
, ",")
3175 must_append_string(&ops
->cgroup_use
, cur
);
3178 ret
= cg_unified_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3182 if (ret
== CGROUP2_SUPER_MAGIC
)
3185 return cg_hybrid_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3188 __cgfsng_ops
static int cgfsng_data_init(struct cgroup_ops
*ops
)
3190 const char *cgroup_pattern
;
3193 return ret_set_errno(-1, ENOENT
);
3195 /* copy system-wide cgroup information */
3196 cgroup_pattern
= lxc_global_config_value("lxc.cgroup.pattern");
3197 if (cgroup_pattern
&& strcmp(cgroup_pattern
, "") != 0)
3198 ops
->cgroup_pattern
= must_copy_string(cgroup_pattern
);
3203 struct cgroup_ops
*cgfsng_ops_init(struct lxc_conf
*conf
)
3205 __do_free
struct cgroup_ops
*cgfsng_ops
= NULL
;
3207 cgfsng_ops
= malloc(sizeof(struct cgroup_ops
));
3209 return ret_set_errno(NULL
, ENOMEM
);
3211 memset(cgfsng_ops
, 0, sizeof(struct cgroup_ops
));
3212 cgfsng_ops
->cgroup_layout
= CGROUP_LAYOUT_UNKNOWN
;
3214 if (cg_init(cgfsng_ops
, conf
))
3217 cgfsng_ops
->data_init
= cgfsng_data_init
;
3218 cgfsng_ops
->payload_destroy
= cgfsng_payload_destroy
;
3219 cgfsng_ops
->monitor_destroy
= cgfsng_monitor_destroy
;
3220 cgfsng_ops
->monitor_create
= cgfsng_monitor_create
;
3221 cgfsng_ops
->monitor_enter
= cgfsng_monitor_enter
;
3222 cgfsng_ops
->monitor_delegate_controllers
= cgfsng_monitor_delegate_controllers
;
3223 cgfsng_ops
->payload_delegate_controllers
= cgfsng_payload_delegate_controllers
;
3224 cgfsng_ops
->payload_create
= cgfsng_payload_create
;
3225 cgfsng_ops
->payload_enter
= cgfsng_payload_enter
;
3226 cgfsng_ops
->payload_finalize
= cgfsng_payload_finalize
;
3227 cgfsng_ops
->escape
= cgfsng_escape
;
3228 cgfsng_ops
->num_hierarchies
= cgfsng_num_hierarchies
;
3229 cgfsng_ops
->get_hierarchies
= cgfsng_get_hierarchies
;
3230 cgfsng_ops
->get_cgroup
= cgfsng_get_cgroup
;
3231 cgfsng_ops
->get
= cgfsng_get
;
3232 cgfsng_ops
->set
= cgfsng_set
;
3233 cgfsng_ops
->freeze
= cgfsng_freeze
;
3234 cgfsng_ops
->unfreeze
= cgfsng_unfreeze
;
3235 cgfsng_ops
->setup_limits_legacy
= cgfsng_setup_limits_legacy
;
3236 cgfsng_ops
->setup_limits
= cgfsng_setup_limits
;
3237 cgfsng_ops
->driver
= "cgfsng";
3238 cgfsng_ops
->version
= "1.0.0";
3239 cgfsng_ops
->attach
= cgfsng_attach
;
3240 cgfsng_ops
->chown
= cgfsng_chown
;
3241 cgfsng_ops
->mount
= cgfsng_mount
;
3242 cgfsng_ops
->devices_activate
= cgfsng_devices_activate
;
3244 return move_ptr(cgfsng_ops
);