1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
30 #include <sys/epoll.h>
31 #include <sys/types.h>
37 #include "cgroup2_devices.h"
38 #include "cgroup_utils.h"
45 #include "memory_utils.h"
46 #include "storage/storage.h"
50 #include "include/strlcpy.h"
54 #include "include/strlcat.h"
57 lxc_log_define(cgfsng
, cgroup
);
59 /* Given a pointer to a null-terminated array of pointers, realloc to add one
60 * entry, and point the new entry to NULL. Do not fail. Return the index to the
61 * second-to-last entry - that is, the one which is now available for use
62 * (keeping the list null-terminated).
64 static int append_null_to_list(void ***list
)
69 for (; (*list
)[newentry
]; newentry
++)
72 *list
= must_realloc(*list
, (newentry
+ 2) * sizeof(void **));
73 (*list
)[newentry
+ 1] = NULL
;
77 /* Given a null-terminated array of strings, check whether @entry is one of the
80 static bool string_in_list(char **list
, const char *entry
)
85 for (int i
= 0; list
[i
]; i
++)
86 if (strcmp(list
[i
], entry
) == 0)
92 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
93 * "name=systemd". Do not fail.
95 static char *cg_legacy_must_prefix_named(char *entry
)
101 prefixed
= must_realloc(NULL
, len
+ 6);
103 memcpy(prefixed
, "name=", STRLITERALLEN("name="));
104 memcpy(prefixed
+ STRLITERALLEN("name="), entry
, len
);
105 prefixed
[len
+ 5] = '\0';
110 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
113 * We also handle named subsystems here. Any controller which is not a kernel
114 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
115 * we refuse to use because we're not sure which we have here.
116 * (TODO: We could work around this in some cases by just remounting to be
117 * unambiguous, or by comparing mountpoint contents with current cgroup.)
119 * The last entry will always be NULL.
121 static void must_append_controller(char **klist
, char **nlist
, char ***clist
,
127 if (string_in_list(klist
, entry
) && string_in_list(nlist
, entry
)) {
128 ERROR("Refusing to use ambiguous controller \"%s\"", entry
);
129 ERROR("It is both a named and kernel subsystem");
133 newentry
= append_null_to_list((void ***)clist
);
135 if (strncmp(entry
, "name=", 5) == 0)
136 copy
= must_copy_string(entry
);
137 else if (string_in_list(klist
, entry
))
138 copy
= must_copy_string(entry
);
140 copy
= cg_legacy_must_prefix_named(entry
);
142 (*clist
)[newentry
] = copy
;
145 /* Given a handler's cgroup data, return the struct hierarchy for the controller
146 * @c, or NULL if there is none.
148 static struct hierarchy
*get_hierarchy(struct cgroup_ops
*ops
, const char *controller
)
150 if (!ops
->hierarchies
)
151 return log_trace_errno(NULL
, errno
, "There are no useable cgroup controllers");
153 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
155 /* This is the empty unified hierarchy. */
156 if (ops
->hierarchies
[i
]->controllers
&& !ops
->hierarchies
[i
]->controllers
[0])
157 return ops
->hierarchies
[i
];
163 * Handle controllers with significant implementation changes
164 * from cgroup to cgroup2.
166 if (pure_unified_layout(ops
)) {
167 if (strcmp(controller
, "devices") == 0) {
168 if (ops
->unified
->bpf_device_controller
)
172 } else if (strcmp(controller
, "freezer") == 0) {
173 if (ops
->unified
->freezer_controller
)
180 if (string_in_list(ops
->hierarchies
[i
]->controllers
, controller
))
181 return ops
->hierarchies
[i
];
185 WARN("There is no useable %s controller", controller
);
187 WARN("There is no empty unified cgroup hierarchy");
189 return ret_set_errno(NULL
, ENOENT
);
192 /* Taken over modified from the kernel sources. */
193 #define NBITS 32 /* bits in uint32_t */
194 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
195 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
197 static void set_bit(unsigned bit
, uint32_t *bitarr
)
199 bitarr
[bit
/ NBITS
] |= (1 << (bit
% NBITS
));
202 static void clear_bit(unsigned bit
, uint32_t *bitarr
)
204 bitarr
[bit
/ NBITS
] &= ~(1 << (bit
% NBITS
));
207 static bool is_set(unsigned bit
, uint32_t *bitarr
)
209 return (bitarr
[bit
/ NBITS
] & (1 << (bit
% NBITS
))) != 0;
212 /* Create cpumask from cpulist aka turn:
220 static uint32_t *lxc_cpumask(char *buf
, size_t nbits
)
222 __do_free
uint32_t *bitarr
= NULL
;
226 arrlen
= BITS_TO_LONGS(nbits
);
227 bitarr
= calloc(arrlen
, sizeof(uint32_t));
229 return ret_set_errno(NULL
, ENOMEM
);
231 lxc_iterate_parts(token
, buf
, ",") {
236 start
= strtoul(token
, NULL
, 0);
238 range
= strchr(token
, '-');
240 end
= strtoul(range
+ 1, NULL
, 0);
243 return ret_set_errno(NULL
, EINVAL
);
246 return ret_set_errno(NULL
, EINVAL
);
249 set_bit(start
++, bitarr
);
252 return move_ptr(bitarr
);
255 /* Turn cpumask into simple, comma-separated cpulist. */
256 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr
, size_t nbits
)
258 __do_free_string_list
char **cpulist
= NULL
;
259 char numstr
[INTTYPE_TO_STRLEN(size_t)] = {0};
262 for (size_t i
= 0; i
<= nbits
; i
++) {
263 if (!is_set(i
, bitarr
))
266 ret
= snprintf(numstr
, sizeof(numstr
), "%zu", i
);
267 if (ret
< 0 || (size_t)ret
>= sizeof(numstr
))
270 ret
= lxc_append_string(&cpulist
, numstr
);
272 return ret_set_errno(NULL
, ENOMEM
);
276 return ret_set_errno(NULL
, ENOMEM
);
278 return lxc_string_join(",", (const char **)cpulist
, false);
281 static ssize_t
get_max_cpus(char *cpulist
)
284 char *maxcpus
= cpulist
;
287 c1
= strrchr(maxcpus
, ',');
291 c2
= strrchr(maxcpus
, '-');
305 cpus
= strtoul(c1
, NULL
, 0);
312 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
313 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
314 static bool cg_legacy_filter_and_set_cpus(const char *parent_cgroup
,
315 char *child_cgroup
, bool am_initialized
)
317 __do_free
char *cpulist
= NULL
, *fpath
= NULL
, *isolcpus
= NULL
,
318 *offlinecpus
= NULL
, *posscpus
= NULL
;
319 __do_free
uint32_t *isolmask
= NULL
, *offlinemask
= NULL
,
323 ssize_t maxisol
= 0, maxoffline
= 0, maxposs
= 0;
324 bool flipped_bit
= false;
326 fpath
= must_make_path(parent_cgroup
, "cpuset.cpus", NULL
);
327 posscpus
= read_file_at(-EBADF
, fpath
);
329 return log_error_errno(false, errno
, "Failed to read file \"%s\"", fpath
);
331 /* Get maximum number of cpus found in possible cpuset. */
332 maxposs
= get_max_cpus(posscpus
);
333 if (maxposs
< 0 || maxposs
>= INT_MAX
- 1)
336 if (file_exists(__ISOL_CPUS
)) {
337 isolcpus
= read_file_at(-EBADF
, __ISOL_CPUS
);
339 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __ISOL_CPUS
);
341 if (isdigit(isolcpus
[0])) {
342 /* Get maximum number of cpus found in isolated cpuset. */
343 maxisol
= get_max_cpus(isolcpus
);
344 if (maxisol
< 0 || maxisol
>= INT_MAX
- 1)
348 if (maxposs
< maxisol
)
352 TRACE("The path \""__ISOL_CPUS
"\" to read isolated cpus from does not exist");
355 if (file_exists(__OFFLINE_CPUS
)) {
356 offlinecpus
= read_file_at(-EBADF
, __OFFLINE_CPUS
);
358 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __OFFLINE_CPUS
);
360 if (isdigit(offlinecpus
[0])) {
361 /* Get maximum number of cpus found in offline cpuset. */
362 maxoffline
= get_max_cpus(offlinecpus
);
363 if (maxoffline
< 0 || maxoffline
>= INT_MAX
- 1)
367 if (maxposs
< maxoffline
)
368 maxposs
= maxoffline
;
371 TRACE("The path \""__OFFLINE_CPUS
"\" to read offline cpus from does not exist");
374 if ((maxisol
== 0) && (maxoffline
== 0)) {
375 cpulist
= move_ptr(posscpus
);
379 possmask
= lxc_cpumask(posscpus
, maxposs
);
381 return log_error_errno(false, errno
, "Failed to create cpumask for possible cpus");
384 isolmask
= lxc_cpumask(isolcpus
, maxposs
);
386 return log_error_errno(false, errno
, "Failed to create cpumask for isolated cpus");
389 if (maxoffline
> 0) {
390 offlinemask
= lxc_cpumask(offlinecpus
, maxposs
);
392 return log_error_errno(false, errno
, "Failed to create cpumask for offline cpus");
395 for (i
= 0; i
<= maxposs
; i
++) {
396 if ((isolmask
&& !is_set(i
, isolmask
)) ||
397 (offlinemask
&& !is_set(i
, offlinemask
)) ||
398 !is_set(i
, possmask
))
402 clear_bit(i
, possmask
);
406 cpulist
= lxc_cpumask_to_cpulist(possmask
, maxposs
);
407 TRACE("No isolated or offline cpus present in cpuset");
409 cpulist
= move_ptr(posscpus
);
410 TRACE("Removed isolated or offline cpus from cpuset");
413 return log_error_errno(false, errno
, "Failed to create cpu list");
416 if (!am_initialized
) {
417 ret
= lxc_write_openat(child_cgroup
, "cpuset.cpus", cpulist
, strlen(cpulist
));
419 return log_error_errno(false,
420 errno
, "Failed to write cpu list to \"%s/cpuset.cpus\"",
423 TRACE("Copied cpu settings of parent cgroup");
429 /* Copy contents of parent(@path)/@file to @path/@file */
430 static bool copy_parent_file(const char *parent_cgroup
,
431 const char *child_cgroup
, const char *file
)
433 __do_free
char *parent_file
= NULL
, *value
= NULL
;
437 parent_file
= must_make_path(parent_cgroup
, file
, NULL
);
438 len
= lxc_read_from_file(parent_file
, NULL
, 0);
440 return log_error_errno(false, errno
, "Failed to determine buffer size");
442 value
= must_realloc(NULL
, len
+ 1);
444 ret
= lxc_read_from_file(parent_file
, value
, len
);
446 return log_error_errno(false, errno
, "Failed to read from parent file \"%s\"", parent_file
);
448 ret
= lxc_write_openat(child_cgroup
, file
, value
, len
);
449 if (ret
< 0 && errno
!= EACCES
)
450 return log_error_errno(false, errno
, "Failed to write \"%s\" to file \"%s/%s\"",
451 value
, child_cgroup
, file
);
455 static inline bool is_unified_hierarchy(const struct hierarchy
*h
)
457 return h
->version
== CGROUP2_SUPER_MAGIC
;
461 * Initialize the cpuset hierarchy in first directory of @cgroup_leaf and set
462 * cgroup.clone_children so that children inherit settings. Since the
463 * h->base_path is populated by init or ourselves, we know it is already
466 * returns -1 on error, 0 when we didn't created a cgroup, 1 if we created a
469 static int cg_legacy_handle_cpuset_hierarchy(struct hierarchy
*h
,
470 const char *cgroup_leaf
)
472 __do_free
char *parent_cgroup
= NULL
, *child_cgroup
= NULL
, *dup
= NULL
;
473 __do_close
int cgroup_fd
= -EBADF
;
479 if (is_unified_hierarchy(h
))
482 if (!string_in_list(h
->controllers
, "cpuset"))
486 return ret_set_errno(-1, EINVAL
);
488 dup
= strdup(cgroup_leaf
);
490 return ret_set_errno(-1, ENOMEM
);
492 parent_cgroup
= must_make_path(h
->mountpoint
, h
->container_base_path
, NULL
);
495 leaf
+= strspn(leaf
, "/");
496 slash
= strchr(leaf
, '/');
499 child_cgroup
= must_make_path(parent_cgroup
, leaf
, NULL
);
504 ret
= mkdir(child_cgroup
, 0755);
507 return log_error_errno(-1, errno
, "Failed to create directory \"%s\"", child_cgroup
);
512 cgroup_fd
= lxc_open_dirfd(child_cgroup
);
516 ret
= lxc_readat(cgroup_fd
, "cgroup.clone_children", &v
, 1);
518 return log_error_errno(-1, errno
, "Failed to read file \"%s/cgroup.clone_children\"", child_cgroup
);
520 /* Make sure any isolated cpus are removed from cpuset.cpus. */
521 if (!cg_legacy_filter_and_set_cpus(parent_cgroup
, child_cgroup
, v
== '1'))
522 return log_error_errno(-1, errno
, "Failed to remove isolated cpus");
524 /* Already set for us by someone else. */
526 TRACE("\"cgroup.clone_children\" was already set to \"1\"");
528 /* copy parent's settings */
529 if (!copy_parent_file(parent_cgroup
, child_cgroup
, "cpuset.mems"))
530 return log_error_errno(-1, errno
, "Failed to copy \"cpuset.mems\" settings");
532 /* Set clone_children so children inherit our settings */
533 ret
= lxc_writeat(cgroup_fd
, "cgroup.clone_children", "1", 1);
535 return log_error_errno(-1, errno
, "Failed to write 1 to \"%s/cgroup.clone_children\"", child_cgroup
);
540 /* Given two null-terminated lists of strings, return true if any string is in
543 static bool controller_lists_intersect(char **l1
, char **l2
)
548 for (int i
= 0; l1
[i
]; i
++)
549 if (string_in_list(l2
, l1
[i
]))
555 /* For a null-terminated list of controllers @clist, return true if any of those
556 * controllers is already listed the null-terminated list of hierarchies @hlist.
557 * Realistically, if one is present, all must be present.
559 static bool controller_list_is_dup(struct hierarchy
**hlist
, char **clist
)
564 for (int i
= 0; hlist
[i
]; i
++)
565 if (controller_lists_intersect(hlist
[i
]->controllers
, clist
))
571 /* Return true if the controller @entry is found in the null-terminated list of
572 * hierarchies @hlist.
574 static bool controller_found(struct hierarchy
**hlist
, char *entry
)
579 for (int i
= 0; hlist
[i
]; i
++)
580 if (string_in_list(hlist
[i
]->controllers
, entry
))
586 /* Return true if all of the controllers which we require have been found. The
587 * required list is freezer and anything in lxc.cgroup.use.
589 static bool all_controllers_found(struct cgroup_ops
*ops
)
591 struct hierarchy
**hlist
;
593 if (!ops
->cgroup_use
)
596 hlist
= ops
->hierarchies
;
597 for (char **cur
= ops
->cgroup_use
; cur
&& *cur
; cur
++)
598 if (!controller_found(hlist
, *cur
))
599 return log_error(false, "No %s controller mountpoint found", *cur
);
604 /* Get the controllers from a mountinfo line There are other ways we could get
605 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
606 * could parse the mount options. But we simply assume that the mountpoint must
607 * be /sys/fs/cgroup/controller-list
609 static char **cg_hybrid_get_controllers(char **klist
, char **nlist
, char *line
,
612 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
613 * for legacy hierarchies.
615 __do_free_string_list
char **aret
= NULL
;
618 char *p
= line
, *sep
= ",";
620 for (i
= 0; i
< 4; i
++) {
627 /* Note, if we change how mountinfo works, then our caller will need to
628 * verify /sys/fs/cgroup/ in this field.
630 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0)
631 return log_warn(NULL
, "Found hierarchy not under " DEFAULT_CGROUP_MOUNTPOINT
": \"%s\"", p
);
636 return log_error(NULL
, "Corrupt mountinfo");
639 if (type
== CGROUP_SUPER_MAGIC
) {
640 __do_free
char *dup
= NULL
;
642 /* strdup() here for v1 hierarchies. Otherwise
643 * lxc_iterate_parts() will destroy mountpoints such as
644 * "/sys/fs/cgroup/cpu,cpuacct".
646 dup
= must_copy_string(p
);
650 lxc_iterate_parts(tok
, dup
, sep
)
651 must_append_controller(klist
, nlist
, &aret
, tok
);
655 return move_ptr(aret
);
658 static char **cg_unified_make_empty_controller(void)
660 __do_free_string_list
char **aret
= NULL
;
663 newentry
= append_null_to_list((void ***)&aret
);
664 aret
[newentry
] = NULL
;
665 return move_ptr(aret
);
668 static char **cg_unified_get_controllers(int dfd
, const char *file
)
670 __do_free
char *buf
= NULL
;
671 __do_free_string_list
char **aret
= NULL
;
675 buf
= read_file_at(dfd
, file
);
679 lxc_iterate_parts(tok
, buf
, sep
) {
683 newentry
= append_null_to_list((void ***)&aret
);
684 copy
= must_copy_string(tok
);
685 aret
[newentry
] = copy
;
688 return move_ptr(aret
);
691 static struct hierarchy
*add_hierarchy(struct hierarchy
***h
, char **clist
, char *mountpoint
,
692 char *container_base_path
, int type
)
694 struct hierarchy
*new;
697 new = zalloc(sizeof(*new));
699 return ret_set_errno(NULL
, ENOMEM
);
700 new->controllers
= clist
;
701 new->mountpoint
= mountpoint
;
702 new->container_base_path
= container_base_path
;
704 new->cgfd_con
= -EBADF
;
705 new->cgfd_limit
= -EBADF
;
706 new->cgfd_mon
= -EBADF
;
708 newentry
= append_null_to_list((void ***)h
);
709 (*h
)[newentry
] = new;
713 /* Get a copy of the mountpoint from @line, which is a line from
714 * /proc/self/mountinfo.
716 static char *cg_hybrid_get_mountpoint(char *line
)
718 char *p
= line
, *sret
= NULL
;
722 for (int i
= 0; i
< 4; i
++) {
729 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0)
732 p2
= strchr(p
+ 15, ' ');
738 sret
= must_realloc(NULL
, len
+ 1);
739 memcpy(sret
, p
, len
);
745 /* Given a multi-line string, return a null-terminated copy of the current line. */
746 static char *copy_to_eol(char *p
)
751 p2
= strchr(p
, '\n');
756 sret
= must_realloc(NULL
, len
+ 1);
757 memcpy(sret
, p
, len
);
763 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
764 * /proc/self/cgroup file. Check whether controller c is present.
766 static bool controller_in_clist(char *cgline
, char *c
)
768 __do_free
char *tmp
= NULL
;
772 eol
= strchr(cgline
, ':');
777 tmp
= must_realloc(NULL
, len
+ 1);
778 memcpy(tmp
, cgline
, len
);
781 lxc_iterate_parts(tok
, tmp
, ",")
782 if (strcmp(tok
, c
) == 0)
788 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
791 static char *cg_hybrid_get_current_cgroup(char *basecginfo
, char *controller
,
794 char *p
= basecginfo
;
797 bool is_cgv2_base_cgroup
= false;
799 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
800 if ((type
== CGROUP2_SUPER_MAGIC
) && (*p
== '0'))
801 is_cgv2_base_cgroup
= true;
808 if (is_cgv2_base_cgroup
|| (controller
&& controller_in_clist(p
, controller
))) {
813 return copy_to_eol(p
);
823 static void must_append_string(char ***list
, char *entry
)
828 newentry
= append_null_to_list((void ***)list
);
829 copy
= must_copy_string(entry
);
830 (*list
)[newentry
] = copy
;
833 static int get_existing_subsystems(char ***klist
, char ***nlist
)
835 __do_free
char *line
= NULL
;
836 __do_fclose
FILE *f
= NULL
;
839 f
= fopen("/proc/self/cgroup", "re");
843 while (getline(&line
, &len
, f
) != -1) {
845 p
= strchr(line
, ':');
854 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
855 * contains an entry of the form:
859 * In this case we use "cgroup2" as controller name.
862 must_append_string(klist
, "cgroup2");
866 lxc_iterate_parts(tok
, p
, ",") {
867 if (strncmp(tok
, "name=", 5) == 0)
868 must_append_string(nlist
, tok
);
870 must_append_string(klist
, tok
);
877 static char *trim(char *s
)
882 while ((len
> 1) && (s
[len
- 1] == '\n'))
888 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops
*ops
)
891 struct hierarchy
**it
;
893 if (!ops
->hierarchies
) {
894 TRACE(" No hierarchies found");
898 TRACE(" Hierarchies:");
899 for (i
= 0, it
= ops
->hierarchies
; it
&& *it
; it
++, i
++) {
903 TRACE(" %d: base_cgroup: %s", i
, (*it
)->container_base_path
? (*it
)->container_base_path
: "(null)");
904 TRACE(" mountpoint: %s", (*it
)->mountpoint
? (*it
)->mountpoint
: "(null)");
905 TRACE(" controllers:");
906 for (j
= 0, cit
= (*it
)->controllers
; cit
&& *cit
; cit
++, j
++)
907 TRACE(" %d: %s", j
, *cit
);
911 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo
, char **klist
,
917 TRACE("basecginfo is:");
918 TRACE("%s", basecginfo
);
920 for (k
= 0, it
= klist
; it
&& *it
; it
++, k
++)
921 TRACE("kernel subsystem %d: %s", k
, *it
);
923 for (k
= 0, it
= nlist
; it
&& *it
; it
++, k
++)
924 TRACE("named subsystem %d: %s", k
, *it
);
927 static int cgroup_tree_remove(struct hierarchy
**hierarchies
, const char *container_cgroup
)
929 if (!container_cgroup
|| !hierarchies
)
932 for (int i
= 0; hierarchies
[i
]; i
++) {
933 struct hierarchy
*h
= hierarchies
[i
];
936 if (!h
->container_limit_path
)
939 ret
= lxc_rm_rf(h
->container_limit_path
);
941 WARN("Failed to destroy \"%s\"", h
->container_limit_path
);
943 if (h
->container_limit_path
!= h
->container_full_path
)
944 free_disarm(h
->container_limit_path
);
945 free_disarm(h
->container_full_path
);
951 struct generic_userns_exec_data
{
952 struct hierarchy
**hierarchies
;
953 const char *container_cgroup
;
954 struct lxc_conf
*conf
;
955 uid_t origuid
; /* target uid in parent namespace */
959 static int cgroup_tree_remove_wrapper(void *data
)
961 struct generic_userns_exec_data
*arg
= data
;
962 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
963 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
966 if (!lxc_setgroups(0, NULL
) && errno
!= EPERM
)
967 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
969 ret
= setresgid(nsgid
, nsgid
, nsgid
);
971 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
972 (int)nsgid
, (int)nsgid
, (int)nsgid
);
974 ret
= setresuid(nsuid
, nsuid
, nsuid
);
976 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
977 (int)nsuid
, (int)nsuid
, (int)nsuid
);
979 return cgroup_tree_remove(arg
->hierarchies
, arg
->container_cgroup
);
982 __cgfsng_ops
static void cgfsng_payload_destroy(struct cgroup_ops
*ops
,
983 struct lxc_handler
*handler
)
988 ERROR("Called with uninitialized cgroup operations");
992 if (!ops
->hierarchies
)
996 ERROR("Called with uninitialized handler");
1000 if (!handler
->conf
) {
1001 ERROR("Called with uninitialized conf");
1005 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
1006 ret
= bpf_program_cgroup_detach(handler
->cgroup_ops
->cgroup2_devices
);
1008 WARN("Failed to detach bpf program from cgroup");
1011 if (!lxc_list_empty(&handler
->conf
->id_map
)) {
1012 struct generic_userns_exec_data wrap
= {
1013 .conf
= handler
->conf
,
1014 .container_cgroup
= ops
->container_cgroup
,
1015 .hierarchies
= ops
->hierarchies
,
1018 ret
= userns_exec_1(handler
->conf
, cgroup_tree_remove_wrapper
,
1019 &wrap
, "cgroup_tree_remove_wrapper");
1021 ret
= cgroup_tree_remove(ops
->hierarchies
, ops
->container_cgroup
);
1024 SYSWARN("Failed to destroy cgroups");
1027 __cgfsng_ops
static void cgfsng_monitor_destroy(struct cgroup_ops
*ops
,
1028 struct lxc_handler
*handler
)
1031 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1032 const struct lxc_conf
*conf
;
1035 ERROR("Called with uninitialized cgroup operations");
1039 if (!ops
->hierarchies
)
1043 ERROR("Called with uninitialized handler");
1047 if (!handler
->conf
) {
1048 ERROR("Called with uninitialized conf");
1051 conf
= handler
->conf
;
1053 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", handler
->monitor_pid
);
1054 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
1057 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1058 __do_free
char *pivot_path
= NULL
;
1059 struct hierarchy
*h
= ops
->hierarchies
[i
];
1063 if (!h
->monitor_full_path
)
1066 /* Monitor might have died before we entered the cgroup. */
1067 if (handler
->monitor_pid
<= 0) {
1068 WARN("No valid monitor process found while destroying cgroups");
1072 if (conf
->cgroup_meta
.monitor_pivot_dir
)
1073 pivot_path
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1074 conf
->cgroup_meta
.monitor_pivot_dir
, CGROUP_PIVOT
, NULL
);
1075 else if (conf
->cgroup_meta
.monitor_dir
)
1076 pivot_path
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1077 conf
->cgroup_meta
.monitor_dir
, CGROUP_PIVOT
, NULL
);
1078 else if (conf
->cgroup_meta
.dir
)
1079 pivot_path
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1080 conf
->cgroup_meta
.dir
, CGROUP_PIVOT
, NULL
);
1082 pivot_path
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1083 CGROUP_PIVOT
, NULL
);
1085 offset
= strlen(h
->mountpoint
) + strlen(h
->container_base_path
);
1087 if (cg_legacy_handle_cpuset_hierarchy(h
, pivot_path
+ offset
))
1088 SYSWARN("Failed to initialize cpuset %s/" CGROUP_PIVOT
, pivot_path
);
1090 ret
= mkdir_p(pivot_path
, 0755);
1091 if (ret
< 0 && errno
!= EEXIST
) {
1092 ERROR("Failed to create %s", pivot_path
);
1096 ret
= lxc_write_openat(pivot_path
, "cgroup.procs", pidstr
, len
);
1098 SYSWARN("Failed to move monitor %s to \"%s\"", pidstr
, pivot_path
);
1103 ret
= lxc_rm_rf(h
->monitor_full_path
);
1105 WARN("Failed to destroy \"%s\"", h
->monitor_full_path
);
1109 static int mkdir_eexist_on_last(const char *dir
, mode_t mode
)
1111 const char *tmp
= dir
;
1112 const char *orig
= dir
;
1115 orig_len
= strlen(dir
);
1117 __do_free
char *makeme
= NULL
;
1121 dir
= tmp
+ strspn(tmp
, "/");
1122 tmp
= dir
+ strcspn(dir
, "/");
1124 cur_len
= dir
- orig
;
1125 makeme
= strndup(orig
, cur_len
);
1127 return ret_set_errno(-1, ENOMEM
);
1129 ret
= mkdir(makeme
, mode
);
1130 if (ret
< 0 && ((errno
!= EEXIST
) || (orig_len
== cur_len
)))
1131 return log_warn_errno(-1, errno
, "Failed to create directory \"%s\"", makeme
);
1132 } while (tmp
!= dir
);
1137 static bool cgroup_tree_create(struct cgroup_ops
*ops
, struct lxc_conf
*conf
,
1138 struct hierarchy
*h
, const char *cgroup_tree
,
1139 const char *cgroup_leaf
, bool payload
,
1140 const char *cgroup_limit_dir
)
1142 __do_free
char *path
= NULL
, *limit_path
= NULL
;
1143 int ret
, ret_cpuset
;
1145 path
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgroup_leaf
, NULL
);
1146 if (dir_exists(path
))
1147 return log_warn_errno(false, errno
, "The %s cgroup already existed", path
);
1149 ret_cpuset
= cg_legacy_handle_cpuset_hierarchy(h
, cgroup_leaf
);
1151 return log_error_errno(false, errno
, "Failed to handle legacy cpuset controller");
1153 if (payload
&& cgroup_limit_dir
) {
1154 /* with isolation both parts need to not already exist */
1155 limit_path
= must_make_path(h
->mountpoint
,
1156 h
->container_base_path
,
1157 cgroup_limit_dir
, NULL
);
1159 ret
= mkdir_eexist_on_last(limit_path
, 0755);
1161 return log_debug_errno(false,
1162 errno
, "Failed to create %s limiting cgroup",
1165 h
->cgfd_limit
= lxc_open_dirfd(limit_path
);
1166 if (h
->cgfd_limit
< 0)
1167 return log_error_errno(false, errno
,
1168 "Failed to open %s", path
);
1169 h
->container_limit_path
= move_ptr(limit_path
);
1172 * With isolation the devices legacy cgroup needs to be
1173 * iinitialized early, as it typically contains an 'a' (all)
1174 * line, which is not possible once a subdirectory has been
1177 if (string_in_list(h
->controllers
, "devices") &&
1178 !ops
->setup_limits_legacy(ops
, conf
, true))
1179 return log_error(false, "Failed to setup legacy device limits");
1182 ret
= mkdir_eexist_on_last(path
, 0755);
1185 * This is the cpuset controller and
1186 * cg_legacy_handle_cpuset_hierarchy() has created our target
1187 * directory for us to ensure correct initialization.
1189 if (ret_cpuset
!= 1 || cgroup_tree
)
1190 return log_debug_errno(false, errno
, "Failed to create %s cgroup", path
);
1194 h
->cgfd_con
= lxc_open_dirfd(path
);
1195 if (h
->cgfd_con
< 0)
1196 return log_error_errno(false, errno
, "Failed to open %s", path
);
1197 h
->container_full_path
= move_ptr(path
);
1198 if (h
->cgfd_limit
< 0)
1199 h
->cgfd_limit
= h
->cgfd_con
;
1200 if (!h
->container_limit_path
)
1201 h
->container_limit_path
= h
->container_full_path
;
1203 h
->cgfd_mon
= lxc_open_dirfd(path
);
1204 if (h
->cgfd_mon
< 0)
1205 return log_error_errno(false, errno
, "Failed to open %s", path
);
1206 h
->monitor_full_path
= move_ptr(path
);
1212 static void cgroup_tree_leaf_remove(struct hierarchy
*h
, bool payload
)
1214 __do_free
char *full_path
= NULL
, *__limit_path
= NULL
;
1215 char *limit_path
= NULL
;
1218 __lxc_unused __do_close
int fd
= move_fd(h
->cgfd_con
);
1219 full_path
= move_ptr(h
->container_full_path
);
1220 limit_path
= move_ptr(h
->container_limit_path
);
1221 if (limit_path
!= full_path
)
1222 __limit_path
= limit_path
;
1224 __lxc_unused __do_close
int fd
= move_fd(h
->cgfd_mon
);
1225 full_path
= move_ptr(h
->monitor_full_path
);
1228 if (full_path
&& rmdir(full_path
))
1229 SYSWARN("Failed to rmdir(\"%s\") cgroup", full_path
);
1230 if (limit_path
&& rmdir(limit_path
))
1231 SYSWARN("Failed to rmdir(\"%s\") cgroup", limit_path
);
1235 * Check we have no lxc.cgroup.dir, and that lxc.cgroup.dir.limit_prefix is a
1236 * proper prefix directory of lxc.cgroup.dir.payload.
1238 * Returns the prefix length if it is set, otherwise zero on success.
1240 static bool check_cgroup_dir_config(struct lxc_conf
*conf
)
1242 const char *monitor_dir
= conf
->cgroup_meta
.monitor_dir
,
1243 *container_dir
= conf
->cgroup_meta
.container_dir
,
1244 *namespace_dir
= conf
->cgroup_meta
.namespace_dir
;
1246 /* none of the new options are set, all is fine */
1247 if (!monitor_dir
&& !container_dir
&& !namespace_dir
)
1250 /* some are set, make sure lxc.cgroup.dir is not also set*/
1251 if (conf
->cgroup_meta
.dir
)
1252 return log_error_errno(false, EINVAL
,
1253 "lxc.cgroup.dir conflicts with lxc.cgroup.dir.payload/monitor");
1255 /* make sure both monitor and payload are set */
1256 if (!monitor_dir
|| !container_dir
)
1257 return log_error_errno(false, EINVAL
,
1258 "lxc.cgroup.dir.payload and lxc.cgroup.dir.monitor must both be set");
1260 /* namespace_dir may be empty */
1264 __cgfsng_ops
static bool cgfsng_monitor_create(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
1266 __do_free
char *monitor_cgroup
= NULL
, *__cgroup_tree
= NULL
;
1267 const char *cgroup_tree
;
1271 char *suffix
= NULL
;
1272 struct lxc_conf
*conf
;
1275 return ret_set_errno(false, ENOENT
);
1277 if (!ops
->hierarchies
)
1280 if (ops
->monitor_cgroup
)
1281 return ret_set_errno(false, EEXIST
);
1283 if (!handler
|| !handler
->conf
)
1284 return ret_set_errno(false, EINVAL
);
1286 conf
= handler
->conf
;
1288 if (!check_cgroup_dir_config(conf
))
1291 if (conf
->cgroup_meta
.monitor_dir
) {
1293 monitor_cgroup
= strdup(conf
->cgroup_meta
.monitor_dir
);
1294 } else if (conf
->cgroup_meta
.dir
) {
1295 cgroup_tree
= conf
->cgroup_meta
.dir
;
1296 monitor_cgroup
= must_concat(&len
, conf
->cgroup_meta
.dir
, "/",
1297 DEFAULT_MONITOR_CGROUP_PREFIX
,
1299 CGROUP_CREATE_RETRY
, NULL
);
1300 } else if (ops
->cgroup_pattern
) {
1301 __cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1303 return ret_set_errno(false, ENOMEM
);
1305 cgroup_tree
= __cgroup_tree
;
1306 monitor_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1307 DEFAULT_MONITOR_CGROUP
,
1308 CGROUP_CREATE_RETRY
, NULL
);
1311 monitor_cgroup
= must_concat(&len
, DEFAULT_MONITOR_CGROUP_PREFIX
,
1313 CGROUP_CREATE_RETRY
, NULL
);
1315 if (!monitor_cgroup
)
1316 return ret_set_errno(false, ENOMEM
);
1318 if (!conf
->cgroup_meta
.monitor_dir
) {
1319 suffix
= monitor_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1324 sprintf(suffix
, "-%d", idx
);
1326 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1327 if (cgroup_tree_create(ops
, handler
->conf
,
1328 ops
->hierarchies
[i
], cgroup_tree
,
1329 monitor_cgroup
, false, NULL
))
1332 DEBUG("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->monitor_full_path
?: "(null)");
1333 for (int j
= 0; j
< i
; j
++)
1334 cgroup_tree_leaf_remove(ops
->hierarchies
[j
], false);
1339 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000 && suffix
);
1341 if (idx
== 1000 || (!suffix
&& idx
!= 0))
1342 return log_error_errno(false, ERANGE
, "Failed to create monitor cgroup");
1344 ops
->monitor_cgroup
= move_ptr(monitor_cgroup
);
1345 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops
->monitor_cgroup
);
1349 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1350 * next cgroup_pattern-1, -2, ..., -999.
1352 __cgfsng_ops
static bool cgfsng_payload_create(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
1354 __do_free
char *container_cgroup
= NULL
,
1355 *__cgroup_tree
= NULL
,
1356 *limiting_cgroup
= NULL
;
1357 const char *cgroup_tree
;
1361 char *suffix
= NULL
;
1362 struct lxc_conf
*conf
;
1365 return ret_set_errno(false, ENOENT
);
1367 if (!ops
->hierarchies
)
1370 if (ops
->container_cgroup
)
1371 return ret_set_errno(false, EEXIST
);
1373 if (!handler
|| !handler
->conf
)
1374 return ret_set_errno(false, EINVAL
);
1376 conf
= handler
->conf
;
1378 if (!check_cgroup_dir_config(conf
))
1381 if (conf
->cgroup_meta
.container_dir
) {
1384 limiting_cgroup
= strdup(conf
->cgroup_meta
.container_dir
);
1385 if (!limiting_cgroup
)
1386 return ret_set_errno(false, ENOMEM
);
1388 if (conf
->cgroup_meta
.namespace_dir
) {
1389 container_cgroup
= must_make_path(limiting_cgroup
,
1390 conf
->cgroup_meta
.namespace_dir
,
1393 /* explicit paths but without isolation */
1394 container_cgroup
= move_ptr(limiting_cgroup
);
1396 } else if (conf
->cgroup_meta
.dir
) {
1397 cgroup_tree
= conf
->cgroup_meta
.dir
;
1398 container_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1399 DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1401 CGROUP_CREATE_RETRY
, NULL
);
1402 } else if (ops
->cgroup_pattern
) {
1403 __cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1405 return ret_set_errno(false, ENOMEM
);
1407 cgroup_tree
= __cgroup_tree
;
1408 container_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1409 DEFAULT_PAYLOAD_CGROUP
,
1410 CGROUP_CREATE_RETRY
, NULL
);
1413 container_cgroup
= must_concat(&len
, DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1415 CGROUP_CREATE_RETRY
, NULL
);
1417 if (!container_cgroup
)
1418 return ret_set_errno(false, ENOMEM
);
1420 if (!conf
->cgroup_meta
.container_dir
) {
1421 suffix
= container_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1426 sprintf(suffix
, "-%d", idx
);
1428 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1429 if (cgroup_tree_create(ops
, handler
->conf
,
1430 ops
->hierarchies
[i
], cgroup_tree
,
1431 container_cgroup
, true,
1435 DEBUG("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->container_full_path
?: "(null)");
1436 for (int j
= 0; j
< i
; j
++)
1437 cgroup_tree_leaf_remove(ops
->hierarchies
[j
], true);
1442 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000 && suffix
);
1444 if (idx
== 1000 || (!suffix
&& idx
!= 0))
1445 return log_error_errno(false, ERANGE
, "Failed to create container cgroup");
1447 ops
->container_cgroup
= move_ptr(container_cgroup
);
1448 INFO("The container process uses \"%s\" as cgroup", ops
->container_cgroup
);
1452 __cgfsng_ops
static bool cgfsng_monitor_enter(struct cgroup_ops
*ops
,
1453 struct lxc_handler
*handler
)
1455 int monitor_len
, transient_len
= 0;
1456 char monitor
[INTTYPE_TO_STRLEN(pid_t
)],
1457 transient
[INTTYPE_TO_STRLEN(pid_t
)];
1460 return ret_set_errno(false, ENOENT
);
1462 if (!ops
->hierarchies
)
1465 if (!ops
->monitor_cgroup
)
1466 return ret_set_errno(false, ENOENT
);
1468 if (!handler
|| !handler
->conf
)
1469 return ret_set_errno(false, EINVAL
);
1471 monitor_len
= snprintf(monitor
, sizeof(monitor
), "%d", handler
->monitor_pid
);
1472 if (handler
->transient_pid
> 0)
1473 transient_len
= snprintf(transient
, sizeof(transient
), "%d", handler
->transient_pid
);
1475 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1476 struct hierarchy
*h
= ops
->hierarchies
[i
];
1479 ret
= lxc_writeat(h
->cgfd_mon
, "cgroup.procs", monitor
, monitor_len
);
1481 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->monitor_full_path
);
1483 if (handler
->transient_pid
<= 0)
1486 ret
= lxc_writeat(h
->cgfd_mon
, "cgroup.procs", transient
, transient_len
);
1488 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->monitor_full_path
);
1491 * we don't keep the fds for non-unified hierarchies around
1492 * mainly because we don't make use of them anymore after the
1493 * core cgroup setup is done but also because there are quite a
1496 if (!is_unified_hierarchy(h
))
1497 close_prot_errno_disarm(h
->cgfd_mon
);
1499 handler
->transient_pid
= -1;
1504 __cgfsng_ops
static bool cgfsng_payload_enter(struct cgroup_ops
*ops
,
1505 struct lxc_handler
*handler
)
1508 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1511 return ret_set_errno(false, ENOENT
);
1513 if (!ops
->hierarchies
)
1516 if (!ops
->container_cgroup
)
1517 return ret_set_errno(false, ENOENT
);
1519 if (!handler
|| !handler
->conf
)
1520 return ret_set_errno(false, EINVAL
);
1522 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", handler
->pid
);
1524 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1525 struct hierarchy
*h
= ops
->hierarchies
[i
];
1528 if (is_unified_hierarchy(h
) && handler
->clone_flags
& CLONE_INTO_CGROUP
)
1531 ret
= lxc_writeat(h
->cgfd_con
, "cgroup.procs", pidstr
, len
);
1533 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->container_full_path
);
1539 static int fchowmodat(int dirfd
, const char *path
, uid_t chown_uid
,
1540 gid_t chown_gid
, mode_t chmod_mode
)
1544 ret
= fchownat(dirfd
, path
, chown_uid
, chown_gid
,
1545 AT_EMPTY_PATH
| AT_SYMLINK_NOFOLLOW
);
1547 return log_warn_errno(-1,
1548 errno
, "Failed to fchownat(%d, %s, %d, %d, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )",
1549 dirfd
, path
, (int)chown_uid
,
1552 ret
= fchmodat(dirfd
, (*path
!= '\0') ? path
: ".", chmod_mode
, 0);
1554 return log_warn_errno(-1, errno
, "Failed to fchmodat(%d, %s, %d, AT_SYMLINK_NOFOLLOW)",
1555 dirfd
, path
, (int)chmod_mode
);
1560 /* chgrp the container cgroups to container group. We leave
1561 * the container owner as cgroup owner. So we must make the
1562 * directories 775 so that the container can create sub-cgroups.
1564 * Also chown the tasks and cgroup.procs files. Those may not
1565 * exist depending on kernel version.
1567 static int chown_cgroup_wrapper(void *data
)
1571 struct generic_userns_exec_data
*arg
= data
;
1572 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1573 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1575 if (!lxc_setgroups(0, NULL
) && errno
!= EPERM
)
1576 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
1578 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1580 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
1581 (int)nsgid
, (int)nsgid
, (int)nsgid
);
1583 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1585 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
1586 (int)nsuid
, (int)nsuid
, (int)nsuid
);
1588 destuid
= get_ns_uid(arg
->origuid
);
1589 if (destuid
== LXC_INVALID_UID
)
1592 for (int i
= 0; arg
->hierarchies
[i
]; i
++) {
1593 int dirfd
= arg
->hierarchies
[i
]->cgfd_con
;
1595 (void)fchowmodat(dirfd
, "", destuid
, nsgid
, 0775);
1598 * Failures to chown() these are inconvenient but not
1599 * detrimental We leave these owned by the container launcher,
1600 * so that container root can write to the files to attach. We
1601 * chmod() them 664 so that container systemd can write to the
1602 * files (which systemd in wily insists on doing).
1605 if (arg
->hierarchies
[i
]->version
== CGROUP_SUPER_MAGIC
)
1606 (void)fchowmodat(dirfd
, "tasks", destuid
, nsgid
, 0664);
1608 (void)fchowmodat(dirfd
, "cgroup.procs", destuid
, nsgid
, 0664);
1610 if (arg
->hierarchies
[i
]->version
!= CGROUP2_SUPER_MAGIC
)
1613 for (char **p
= arg
->hierarchies
[i
]->cgroup2_chown
; p
&& *p
; p
++)
1614 (void)fchowmodat(dirfd
, *p
, destuid
, nsgid
, 0664);
1620 __cgfsng_ops
static bool cgfsng_chown(struct cgroup_ops
*ops
,
1621 struct lxc_conf
*conf
)
1623 struct generic_userns_exec_data wrap
;
1626 return ret_set_errno(false, ENOENT
);
1628 if (!ops
->hierarchies
)
1631 if (!ops
->container_cgroup
)
1632 return ret_set_errno(false, ENOENT
);
1635 return ret_set_errno(false, EINVAL
);
1637 if (lxc_list_empty(&conf
->id_map
))
1640 wrap
.origuid
= geteuid();
1642 wrap
.hierarchies
= ops
->hierarchies
;
1645 if (userns_exec_1(conf
, chown_cgroup_wrapper
, &wrap
, "chown_cgroup_wrapper") < 0)
1646 return log_error_errno(false, errno
, "Error requesting cgroup chown in new user namespace");
1651 __cgfsng_ops
static void cgfsng_payload_finalize(struct cgroup_ops
*ops
)
1656 if (!ops
->hierarchies
)
1659 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1660 struct hierarchy
*h
= ops
->hierarchies
[i
];
1662 * we don't keep the fds for non-unified hierarchies around
1663 * mainly because we don't make use of them anymore after the
1664 * core cgroup setup is done but also because there are quite a
1667 if (!is_unified_hierarchy(h
))
1668 close_prot_errno_disarm(h
->cgfd_con
);
1672 * The checking for freezer support should obviously be done at cgroup
1673 * initialization time but that doesn't work reliable. The freezer
1674 * controller has been demoted (rightly so) to a simple file located in
1675 * each non-root cgroup. At the time when the container is created we
1676 * might still be located in /sys/fs/cgroup and so checking for
1677 * cgroup.freeze won't tell us anything because this file doesn't exist
1678 * in the root cgroup. We could then iterate through /sys/fs/cgroup and
1679 * find an already existing cgroup and then check within that cgroup
1680 * for the existence of cgroup.freeze but that will only work on
1681 * systemd based hosts. Other init systems might not manage cgroups and
1682 * so no cgroup will exist. So we defer until we have created cgroups
1683 * for our container which means we check here.
1685 if (pure_unified_layout(ops
) &&
1686 !faccessat(ops
->unified
->cgfd_con
, "cgroup.freeze", F_OK
,
1687 AT_SYMLINK_NOFOLLOW
)) {
1688 TRACE("Unified hierarchy supports freezer");
1689 ops
->unified
->freezer_controller
= 1;
1693 /* cgroup-full:* is done, no need to create subdirs */
1694 static inline bool cg_mount_needs_subdirs(int type
)
1696 return !(type
>= LXC_AUTO_CGROUP_FULL_RO
);
1699 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1700 * remount controller ro if needed and bindmount the cgroupfs onto
1701 * control/the/cg/path.
1703 static int cg_legacy_mount_controllers(int type
, struct hierarchy
*h
,
1704 char *controllerpath
, char *cgpath
,
1705 const char *container_cgroup
)
1707 __do_free
char *sourcepath
= NULL
;
1708 int ret
, remount_flags
;
1709 int flags
= MS_BIND
;
1711 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_MIXED
) {
1712 ret
= mount(controllerpath
, controllerpath
, "cgroup", MS_BIND
, NULL
);
1714 return log_error_errno(-1, errno
, "Failed to bind mount \"%s\" onto \"%s\"",
1715 controllerpath
, controllerpath
);
1717 remount_flags
= add_required_remount_flags(controllerpath
,
1719 flags
| MS_REMOUNT
);
1720 ret
= mount(controllerpath
, controllerpath
, "cgroup",
1721 remount_flags
| MS_REMOUNT
| MS_BIND
| MS_RDONLY
,
1724 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", controllerpath
);
1726 INFO("Remounted %s read-only", controllerpath
);
1729 sourcepath
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1730 container_cgroup
, NULL
);
1731 if (type
== LXC_AUTO_CGROUP_RO
)
1734 ret
= mount(sourcepath
, cgpath
, "cgroup", flags
, NULL
);
1736 return log_error_errno(-1, errno
, "Failed to mount \"%s\" onto \"%s\"",
1737 h
->controllers
[0], cgpath
);
1738 INFO("Mounted \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1740 if (flags
& MS_RDONLY
) {
1741 remount_flags
= add_required_remount_flags(sourcepath
, cgpath
,
1742 flags
| MS_REMOUNT
);
1743 ret
= mount(sourcepath
, cgpath
, "cgroup", remount_flags
, NULL
);
1745 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", cgpath
);
1746 INFO("Remounted %s read-only", cgpath
);
1749 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath
);
1753 /* __cg_mount_direct
1755 * Mount cgroup hierarchies directly without using bind-mounts. The main
1756 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1757 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1759 static int __cg_mount_direct(int type
, struct hierarchy
*h
,
1760 const char *controllerpath
)
1762 __do_free
char *controllers
= NULL
;
1763 char *fstype
= "cgroup2";
1764 unsigned long flags
= 0;
1770 flags
|= MS_RELATIME
;
1772 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_FULL_RO
)
1775 if (h
->version
!= CGROUP2_SUPER_MAGIC
) {
1776 controllers
= lxc_string_join(",", (const char **)h
->controllers
, false);
1782 ret
= mount("cgroup", controllerpath
, fstype
, flags
, controllers
);
1784 return log_error_errno(-1, errno
, "Failed to mount \"%s\" with cgroup filesystem type %s",
1785 controllerpath
, fstype
);
1787 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath
, fstype
);
1791 static inline int cg_mount_in_cgroup_namespace(int type
, struct hierarchy
*h
,
1792 const char *controllerpath
)
1794 return __cg_mount_direct(type
, h
, controllerpath
);
1797 static inline int cg_mount_cgroup_full(int type
, struct hierarchy
*h
,
1798 const char *controllerpath
)
1800 if (type
< LXC_AUTO_CGROUP_FULL_RO
|| type
> LXC_AUTO_CGROUP_FULL_MIXED
)
1803 return __cg_mount_direct(type
, h
, controllerpath
);
1806 __cgfsng_ops
static bool cgfsng_mount(struct cgroup_ops
*ops
,
1807 struct lxc_handler
*handler
,
1808 const char *root
, int type
)
1810 __do_free
char *cgroup_root
= NULL
;
1811 bool has_cgns
= false, wants_force_mount
= false;
1815 return ret_set_errno(false, ENOENT
);
1817 if (!ops
->hierarchies
)
1820 if (!handler
|| !handler
->conf
)
1821 return ret_set_errno(false, EINVAL
);
1823 if ((type
& LXC_AUTO_CGROUP_MASK
) == 0)
1826 if (type
& LXC_AUTO_CGROUP_FORCE
) {
1827 type
&= ~LXC_AUTO_CGROUP_FORCE
;
1828 wants_force_mount
= true;
1831 if (!wants_force_mount
) {
1832 wants_force_mount
= !lxc_wants_cap(CAP_SYS_ADMIN
, handler
->conf
);
1835 * Most recent distro versions currently have init system that
1836 * do support cgroup2 but do not mount it by default unless
1837 * explicitly told so even if the host is cgroup2 only. That
1838 * means they often will fail to boot. Fix this by pre-mounting
1839 * cgroup2 by default. We will likely need to be doing this a
1840 * few years until all distros have switched over to cgroup2 at
1841 * which point we can safely assume that their init systems
1842 * will mount it themselves.
1844 if (pure_unified_layout(ops
))
1845 wants_force_mount
= true;
1848 has_cgns
= cgns_supported();
1849 if (has_cgns
&& !wants_force_mount
)
1852 if (type
== LXC_AUTO_CGROUP_NOSPEC
)
1853 type
= LXC_AUTO_CGROUP_MIXED
;
1854 else if (type
== LXC_AUTO_CGROUP_FULL_NOSPEC
)
1855 type
= LXC_AUTO_CGROUP_FULL_MIXED
;
1857 cgroup_root
= must_make_path(root
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
1858 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
1859 if (has_cgns
&& wants_force_mount
) {
1861 * If cgroup namespaces are supported but the container
1862 * will not have CAP_SYS_ADMIN after it has started we
1863 * need to mount the cgroups manually.
1865 return cg_mount_in_cgroup_namespace(type
, ops
->unified
, cgroup_root
) == 0;
1868 return cg_mount_cgroup_full(type
, ops
->unified
, cgroup_root
) == 0;
1872 * Mount a tmpfs over DEFAULT_CGROUP_MOUNTPOINT. Note that we're
1873 * relying on RESOLVE_BENEATH so we need to skip the leading "/" in the
1874 * DEFAULT_CGROUP_MOUNTPOINT define.
1876 ret
= safe_mount_beneath(root
, NULL
,
1877 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
,
1879 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
1880 "size=10240k,mode=755");
1882 if (errno
!= ENOSYS
)
1883 return log_error_errno(false, errno
,
1884 "Failed to mount tmpfs on %s",
1885 DEFAULT_CGROUP_MOUNTPOINT
);
1887 ret
= safe_mount(NULL
, cgroup_root
, "tmpfs",
1888 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
1889 "size=10240k,mode=755", root
);
1894 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1895 __do_free
char *controllerpath
= NULL
, *path2
= NULL
;
1896 struct hierarchy
*h
= ops
->hierarchies
[i
];
1897 char *controller
= strrchr(h
->mountpoint
, '/');
1903 controllerpath
= must_make_path(cgroup_root
, controller
, NULL
);
1904 if (dir_exists(controllerpath
))
1907 ret
= mkdir(controllerpath
, 0755);
1909 return log_error_errno(false, errno
, "Error creating cgroup path: %s", controllerpath
);
1911 if (has_cgns
&& wants_force_mount
) {
1912 /* If cgroup namespaces are supported but the container
1913 * will not have CAP_SYS_ADMIN after it has started we
1914 * need to mount the cgroups manually.
1916 ret
= cg_mount_in_cgroup_namespace(type
, h
, controllerpath
);
1923 ret
= cg_mount_cgroup_full(type
, h
, controllerpath
);
1927 if (!cg_mount_needs_subdirs(type
))
1930 path2
= must_make_path(controllerpath
, h
->container_base_path
,
1931 ops
->container_cgroup
, NULL
);
1932 ret
= mkdir_p(path2
, 0755);
1936 ret
= cg_legacy_mount_controllers(type
, h
, controllerpath
,
1937 path2
, ops
->container_cgroup
);
1945 /* Only root needs to escape to the cgroup of its init. */
1946 __cgfsng_ops
static bool cgfsng_escape(const struct cgroup_ops
*ops
,
1947 struct lxc_conf
*conf
)
1950 return ret_set_errno(false, ENOENT
);
1952 if (!ops
->hierarchies
)
1956 return ret_set_errno(false, EINVAL
);
1958 if (conf
->cgroup_meta
.relative
|| geteuid())
1961 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1962 __do_free
char *fullpath
= NULL
;
1966 must_make_path(ops
->hierarchies
[i
]->mountpoint
,
1967 ops
->hierarchies
[i
]->container_base_path
,
1968 "cgroup.procs", NULL
);
1969 ret
= lxc_write_to_file(fullpath
, "0", 2, false, 0666);
1971 return log_error_errno(false, errno
, "Failed to escape to cgroup \"%s\"", fullpath
);
1977 __cgfsng_ops
static int cgfsng_num_hierarchies(struct cgroup_ops
*ops
)
1982 return ret_set_errno(-1, ENOENT
);
1984 if (!ops
->hierarchies
)
1987 for (; ops
->hierarchies
[i
]; i
++)
1993 __cgfsng_ops
static bool cgfsng_get_hierarchies(struct cgroup_ops
*ops
, int n
,
1999 return ret_set_errno(false, ENOENT
);
2001 if (!ops
->hierarchies
)
2002 return ret_set_errno(false, ENOENT
);
2004 /* sanity check n */
2005 for (i
= 0; i
< n
; i
++)
2006 if (!ops
->hierarchies
[i
])
2007 return ret_set_errno(false, ENOENT
);
2009 *out
= ops
->hierarchies
[i
]->controllers
;
2014 static bool cg_legacy_freeze(struct cgroup_ops
*ops
)
2016 struct hierarchy
*h
;
2018 h
= get_hierarchy(ops
, "freezer");
2020 return ret_set_errno(-1, ENOENT
);
2022 return lxc_write_openat(h
->container_full_path
, "freezer.state",
2023 "FROZEN", STRLITERALLEN("FROZEN"));
2026 static int freezer_cgroup_events_cb(int fd
, uint32_t events
, void *cbdata
,
2027 struct lxc_epoll_descr
*descr
)
2029 __do_close
int duped_fd
= -EBADF
;
2030 __do_free
char *line
= NULL
;
2031 __do_fclose
FILE *f
= NULL
;
2032 int state
= PTR_TO_INT(cbdata
);
2034 const char *state_string
;
2038 return LXC_MAINLOOP_ERROR
;
2040 if (lseek(duped_fd
, 0, SEEK_SET
) < (off_t
)-1)
2041 return LXC_MAINLOOP_ERROR
;
2043 f
= fdopen(duped_fd
, "re");
2045 return LXC_MAINLOOP_ERROR
;
2049 state_string
= "frozen 1";
2051 state_string
= "frozen 0";
2053 while (getline(&line
, &len
, f
) != -1)
2054 if (strncmp(line
, state_string
, STRLITERALLEN("frozen") + 2) == 0)
2055 return LXC_MAINLOOP_CLOSE
;
2057 return LXC_MAINLOOP_CONTINUE
;
2060 static int cg_unified_freeze_do(struct cgroup_ops
*ops
, int timeout
,
2061 const char *state_string
,
2063 const char *epoll_error
,
2064 const char *wait_error
)
2066 __do_close
int fd
= -EBADF
;
2067 call_cleaner(lxc_mainloop_close
) struct lxc_epoll_descr
*descr_ptr
= NULL
;
2069 struct lxc_epoll_descr descr
;
2070 struct hierarchy
*h
;
2074 return ret_set_errno(-1, ENOENT
);
2076 if (!h
->container_full_path
)
2077 return ret_set_errno(-1, EEXIST
);
2080 __do_free
char *events_file
= NULL
;
2082 events_file
= must_make_path(h
->container_full_path
, "cgroup.events", NULL
);
2083 fd
= open(events_file
, O_RDONLY
| O_CLOEXEC
);
2085 return log_error_errno(-1, errno
, "Failed to open cgroup.events file");
2087 ret
= lxc_mainloop_open(&descr
);
2089 return log_error_errno(-1, errno
, "%s", epoll_error
);
2091 /* automatically cleaned up now */
2094 ret
= lxc_mainloop_add_handler_events(&descr
, fd
, EPOLLPRI
, freezer_cgroup_events_cb
, INT_TO_PTR(state_num
));
2096 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
2099 ret
= lxc_write_openat(h
->container_full_path
, "cgroup.freeze", state_string
, 1);
2101 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
2103 if (timeout
!= 0 && lxc_mainloop(&descr
, timeout
))
2104 return log_error_errno(-1, errno
, "%s", wait_error
);
2109 static int cg_unified_freeze(struct cgroup_ops
*ops
, int timeout
)
2111 return cg_unified_freeze_do(ops
, timeout
, "1", 1,
2112 "Failed to create epoll instance to wait for container freeze",
2113 "Failed to wait for container to be frozen");
2116 __cgfsng_ops
static int cgfsng_freeze(struct cgroup_ops
*ops
, int timeout
)
2118 if (!ops
->hierarchies
)
2119 return ret_set_errno(-1, ENOENT
);
2121 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2122 return cg_legacy_freeze(ops
);
2124 return cg_unified_freeze(ops
, timeout
);
2127 static int cg_legacy_unfreeze(struct cgroup_ops
*ops
)
2129 struct hierarchy
*h
;
2131 h
= get_hierarchy(ops
, "freezer");
2133 return ret_set_errno(-1, ENOENT
);
2135 return lxc_write_openat(h
->container_full_path
, "freezer.state",
2136 "THAWED", STRLITERALLEN("THAWED"));
2139 static int cg_unified_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2141 return cg_unified_freeze_do(ops
, timeout
, "0", 0,
2142 "Failed to create epoll instance to wait for container unfreeze",
2143 "Failed to wait for container to be unfrozen");
2146 __cgfsng_ops
static int cgfsng_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2148 if (!ops
->hierarchies
)
2149 return ret_set_errno(-1, ENOENT
);
2151 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2152 return cg_legacy_unfreeze(ops
);
2154 return cg_unified_unfreeze(ops
, timeout
);
2157 static const char *cgfsng_get_cgroup_do(struct cgroup_ops
*ops
,
2158 const char *controller
, bool limiting
)
2160 struct hierarchy
*h
;
2162 h
= get_hierarchy(ops
, controller
);
2164 return log_warn_errno(NULL
, ENOENT
, "Failed to find hierarchy for controller \"%s\"",
2165 controller
? controller
: "(null)");
2168 return h
->container_limit_path
2169 ? h
->container_limit_path
+ strlen(h
->mountpoint
)
2172 return h
->container_full_path
2173 ? h
->container_full_path
+ strlen(h
->mountpoint
)
2177 __cgfsng_ops
static const char *cgfsng_get_cgroup(struct cgroup_ops
*ops
,
2178 const char *controller
)
2180 return cgfsng_get_cgroup_do(ops
, controller
, false);
2183 __cgfsng_ops
static const char *cgfsng_get_limiting_cgroup(struct cgroup_ops
*ops
,
2184 const char *controller
)
2186 return cgfsng_get_cgroup_do(ops
, controller
, true);
2189 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2190 * which must be freed by the caller.
2192 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy
*h
,
2194 const char *filename
)
2196 return must_make_path(h
->mountpoint
, inpath
, filename
, NULL
);
2199 static int cgroup_attach_leaf(const struct lxc_conf
*conf
, int unified_fd
, pid_t pid
)
2203 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2206 /* Create leaf cgroup. */
2207 ret
= mkdirat(unified_fd
, ".lxc", 0755);
2208 if (ret
< 0 && errno
!= EEXIST
)
2209 return log_error_errno(-1, errno
, "Failed to create leaf cgroup \".lxc\"");
2211 pidstr_len
= sprintf(pidstr
, INT64_FMT
, (int64_t)pid
);
2212 ret
= lxc_writeat(unified_fd
, ".lxc/cgroup.procs", pidstr
, pidstr_len
);
2214 ret
= lxc_writeat(unified_fd
, "cgroup.procs", pidstr
, pidstr_len
);
2218 /* this is a non-leaf node */
2220 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2224 char attach_cgroup
[STRLITERALLEN(".lxc-/cgroup.procs") + INTTYPE_TO_STRLEN(int) + 1];
2225 char *slash
= attach_cgroup
;
2227 ret
= snprintf(attach_cgroup
, sizeof(attach_cgroup
), ".lxc-%d/cgroup.procs", idx
);
2228 if (ret
< 0 || (size_t)ret
>= sizeof(attach_cgroup
))
2229 return ret_errno(EIO
);
2232 * This shouldn't really happen but the compiler might complain
2233 * that a short write would cause a buffer overrun. So be on
2236 if (ret
< STRLITERALLEN(".lxc-/cgroup.procs"))
2237 return log_error_errno(-EINVAL
, EINVAL
, "Unexpected short write would cause buffer-overrun");
2239 slash
+= (ret
- STRLITERALLEN("/cgroup.procs"));
2242 ret
= mkdirat(unified_fd
, attach_cgroup
, 0755);
2243 if (ret
< 0 && errno
!= EEXIST
)
2244 return log_error_errno(-1, errno
, "Failed to create cgroup %s", attach_cgroup
);
2250 ret
= lxc_writeat(unified_fd
, attach_cgroup
, pidstr
, pidstr_len
);
2254 if (rm
&& unlinkat(unified_fd
, attach_cgroup
, AT_REMOVEDIR
))
2255 SYSERROR("Failed to remove cgroup \"%d(%s)\"", unified_fd
, attach_cgroup
);
2257 /* this is a non-leaf node */
2259 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2262 } while (idx
< 1000);
2264 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2267 static int cgroup_attach_create_leaf(const struct lxc_conf
*conf
,
2268 int unified_fd
, int *sk_fd
)
2270 __do_close
int sk
= *sk_fd
, target_fd0
= -EBADF
, target_fd1
= -EBADF
;
2274 /* Create leaf cgroup. */
2275 ret
= mkdirat(unified_fd
, ".lxc", 0755);
2276 if (ret
< 0 && errno
!= EEXIST
)
2277 return log_error_errno(-1, errno
, "Failed to create leaf cgroup \".lxc\"");
2279 target_fd0
= openat(unified_fd
, ".lxc/cgroup.procs", O_WRONLY
| O_CLOEXEC
| O_NOFOLLOW
);
2281 return log_error_errno(-errno
, errno
, "Failed to open \".lxc/cgroup.procs\"");
2282 target_fds
[0] = target_fd0
;
2284 target_fd1
= openat(unified_fd
, "cgroup.procs", O_WRONLY
| O_CLOEXEC
| O_NOFOLLOW
);
2286 return log_error_errno(-errno
, errno
, "Failed to open \".lxc/cgroup.procs\"");
2287 target_fds
[1] = target_fd1
;
2289 ret
= lxc_abstract_unix_send_fds(sk
, target_fds
, 2, NULL
, 0);
2291 return log_error_errno(-errno
, errno
, "Failed to send \".lxc/cgroup.procs\" fds %d and %d",
2292 target_fd0
, target_fd1
);
2294 return log_debug(0, "Sent target cgroup fds %d and %d", target_fd0
, target_fd1
);
2297 static int cgroup_attach_move_into_leaf(const struct lxc_conf
*conf
,
2298 int *sk_fd
, pid_t pid
)
2300 __do_close
int sk
= *sk_fd
, target_fd0
= -EBADF
, target_fd1
= -EBADF
;
2302 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2306 ret
= lxc_abstract_unix_recv_fds(sk
, target_fds
, 2, NULL
, 0);
2308 return log_error_errno(-1, errno
, "Failed to receive target cgroup fd");
2309 target_fd0
= target_fds
[0];
2310 target_fd1
= target_fds
[1];
2312 pidstr_len
= sprintf(pidstr
, INT64_FMT
, (int64_t)pid
);
2314 ret
= lxc_write_nointr(target_fd0
, pidstr
, pidstr_len
);
2315 if (ret
> 0 && ret
== pidstr_len
)
2316 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd0
);
2318 ret
= lxc_write_nointr(target_fd1
, pidstr
, pidstr_len
);
2319 if (ret
> 0 && ret
== pidstr_len
)
2320 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd1
);
2322 return log_debug_errno(-1, errno
, "Failed to move process into target cgroup via fd %d and %d",
2323 target_fd0
, target_fd1
);
2326 struct userns_exec_unified_attach_data
{
2327 const struct lxc_conf
*conf
;
2333 static int cgroup_unified_attach_child_wrapper(void *data
)
2335 struct userns_exec_unified_attach_data
*args
= data
;
2337 if (!args
->conf
|| args
->unified_fd
< 0 || args
->pid
<= 0 ||
2338 args
->sk_pair
[0] < 0 || args
->sk_pair
[1] < 0)
2339 return ret_errno(EINVAL
);
2341 close_prot_errno_disarm(args
->sk_pair
[0]);
2342 return cgroup_attach_create_leaf(args
->conf
, args
->unified_fd
,
2346 static int cgroup_unified_attach_parent_wrapper(void *data
)
2348 struct userns_exec_unified_attach_data
*args
= data
;
2350 if (!args
->conf
|| args
->unified_fd
< 0 || args
->pid
<= 0 ||
2351 args
->sk_pair
[0] < 0 || args
->sk_pair
[1] < 0)
2352 return ret_errno(EINVAL
);
2354 close_prot_errno_disarm(args
->sk_pair
[1]);
2355 return cgroup_attach_move_into_leaf(args
->conf
, &args
->sk_pair
[0],
2359 int cgroup_attach(const struct lxc_conf
*conf
, const char *name
,
2360 const char *lxcpath
, pid_t pid
)
2362 __do_close
int unified_fd
= -EBADF
;
2365 if (!conf
|| !name
|| !lxcpath
|| pid
<= 0)
2366 return ret_errno(EINVAL
);
2368 unified_fd
= lxc_cmd_get_cgroup2_fd(name
, lxcpath
);
2370 return ret_errno(EBADF
);
2372 if (!lxc_list_empty(&conf
->id_map
)) {
2373 struct userns_exec_unified_attach_data args
= {
2375 .unified_fd
= unified_fd
,
2379 ret
= socketpair(PF_LOCAL
, SOCK_STREAM
| SOCK_CLOEXEC
, 0, args
.sk_pair
);
2383 ret
= userns_exec_minimal(conf
,
2384 cgroup_unified_attach_parent_wrapper
,
2386 cgroup_unified_attach_child_wrapper
,
2389 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
2395 /* Technically, we're always at a delegation boundary here (This is especially
2396 * true when cgroup namespaces are available.). The reasoning is that in order
2397 * for us to have been able to start a container in the first place the root
2398 * cgroup must have been a leaf node. Now, either the container's init system
2399 * has populated the cgroup and kept it as a leaf node or it has created
2400 * subtrees. In the former case we will simply attach to the leaf node we
2401 * created when we started the container in the latter case we create our own
2402 * cgroup for the attaching process.
2404 static int __cg_unified_attach(const struct hierarchy
*h
,
2405 const struct lxc_conf
*conf
, const char *name
,
2406 const char *lxcpath
, pid_t pid
,
2407 const char *controller
)
2409 __do_close
int unified_fd
= -EBADF
;
2410 __do_free
char *path
= NULL
, *cgroup
= NULL
;
2413 if (!conf
|| !name
|| !lxcpath
|| pid
<= 0)
2414 return ret_errno(EINVAL
);
2416 ret
= cgroup_attach(conf
, name
, lxcpath
, pid
);
2418 return log_trace(0, "Attached to unified cgroup via command handler");
2420 return log_error_errno(ret
, errno
, "Failed to attach to unified cgroup");
2422 /* Fall back to retrieving the path for the unified cgroup. */
2423 cgroup
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2428 path
= must_make_path(h
->mountpoint
, cgroup
, NULL
);
2430 unified_fd
= open(path
, O_PATH
| O_DIRECTORY
| O_CLOEXEC
);
2432 return ret_errno(EBADF
);
2434 if (!lxc_list_empty(&conf
->id_map
)) {
2435 struct userns_exec_unified_attach_data args
= {
2437 .unified_fd
= unified_fd
,
2441 ret
= socketpair(PF_LOCAL
, SOCK_STREAM
| SOCK_CLOEXEC
, 0, args
.sk_pair
);
2445 ret
= userns_exec_minimal(conf
,
2446 cgroup_unified_attach_parent_wrapper
,
2448 cgroup_unified_attach_child_wrapper
,
2451 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
2457 __cgfsng_ops
static bool cgfsng_attach(struct cgroup_ops
*ops
,
2458 const struct lxc_conf
*conf
,
2459 const char *name
, const char *lxcpath
,
2463 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
2466 return ret_set_errno(false, ENOENT
);
2468 if (!ops
->hierarchies
)
2471 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", pid
);
2472 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
2475 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
2476 __do_free
char *fullpath
= NULL
, *path
= NULL
;
2477 struct hierarchy
*h
= ops
->hierarchies
[i
];
2479 if (h
->version
== CGROUP2_SUPER_MAGIC
) {
2480 ret
= __cg_unified_attach(h
, conf
, name
, lxcpath
, pid
,
2488 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, h
->controllers
[0]);
2493 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, "cgroup.procs");
2494 ret
= lxc_write_to_file(fullpath
, pidstr
, len
, false, 0666);
2496 return log_error_errno(false, errno
, "Failed to attach %d to %s",
2497 (int)pid
, fullpath
);
2503 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2504 * don't have a cgroup_data set up, so we ask the running container through the
2505 * commands API for the cgroup path.
2507 __cgfsng_ops
static int cgfsng_get(struct cgroup_ops
*ops
, const char *filename
,
2508 char *value
, size_t len
, const char *name
,
2509 const char *lxcpath
)
2511 __do_free
char *path
= NULL
;
2512 __do_free
char *controller
= NULL
;
2514 struct hierarchy
*h
;
2518 return ret_set_errno(-1, ENOENT
);
2520 controller
= must_copy_string(filename
);
2521 p
= strchr(controller
, '.');
2525 path
= lxc_cmd_get_limiting_cgroup_path(name
, lxcpath
, controller
);
2530 h
= get_hierarchy(ops
, controller
);
2532 __do_free
char *fullpath
= NULL
;
2534 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, filename
);
2535 ret
= lxc_read_from_file(fullpath
, value
, len
);
2541 static int device_cgroup_parse_access(struct device_item
*device
, const char *val
)
2543 for (int count
= 0; count
< 3; count
++, val
++) {
2546 device
->access
[count
] = *val
;
2549 device
->access
[count
] = *val
;
2552 device
->access
[count
] = *val
;
2559 return ret_errno(EINVAL
);
2566 static int device_cgroup_rule_parse(struct device_item
*device
, const char *key
,
2572 if (strcmp("devices.allow", key
) == 0)
2577 if (strcmp(val
, "a") == 0) {
2582 device
->global_rule
= device
->allow
2583 ? LXC_BPF_DEVICE_CGROUP_DENYLIST
2584 : LXC_BPF_DEVICE_CGROUP_ALLOWLIST
;
2590 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2598 device
->type
= *val
;
2611 } else if (isdigit(*val
)) {
2612 memset(temp
, 0, sizeof(temp
));
2613 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2619 ret
= lxc_safe_int(temp
, &device
->major
);
2633 } else if (isdigit(*val
)) {
2634 memset(temp
, 0, sizeof(temp
));
2635 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2641 ret
= lxc_safe_int(temp
, &device
->minor
);
2650 return device_cgroup_parse_access(device
, ++val
);
2653 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2654 * don't have a cgroup_data set up, so we ask the running container through the
2655 * commands API for the cgroup path.
2657 __cgfsng_ops
static int cgfsng_set(struct cgroup_ops
*ops
,
2658 const char *key
, const char *value
,
2659 const char *name
, const char *lxcpath
)
2661 __do_free
char *path
= NULL
;
2662 __do_free
char *controller
= NULL
;
2664 struct hierarchy
*h
;
2667 if (!ops
|| !key
|| !value
|| !name
|| !lxcpath
)
2668 return ret_errno(ENOENT
);
2670 controller
= must_copy_string(key
);
2671 p
= strchr(controller
, '.');
2675 if (pure_unified_layout(ops
) && strcmp(controller
, "devices") == 0) {
2676 struct device_item device
= {};
2678 ret
= device_cgroup_rule_parse(&device
, key
, value
);
2680 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s",
2683 ret
= lxc_cmd_add_bpf_device_cgroup(name
, lxcpath
, &device
);
2690 path
= lxc_cmd_get_limiting_cgroup_path(name
, lxcpath
, controller
);
2695 h
= get_hierarchy(ops
, controller
);
2697 __do_free
char *fullpath
= NULL
;
2699 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, key
);
2700 ret
= lxc_write_to_file(fullpath
, value
, strlen(value
), false, 0666);
2706 /* take devices cgroup line
2708 * and convert it to a valid
2709 * type major:minor mode
2710 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2713 static int device_cgroup_rule_parse_devpath(struct device_item
*device
,
2714 const char *devpath
)
2716 __do_free
char *path
= NULL
;
2722 path
= must_copy_string(devpath
);
2725 * Read path followed by mode. Ignore any trailing text.
2726 * A ' # comment' would be legal. Technically other text is not
2727 * legal, we could check for that if we cared to.
2729 for (n_parts
= 1, p
= path
; *p
; p
++) {
2745 return ret_set_errno(-1, EINVAL
);
2749 return ret_errno(EINVAL
);
2751 if (device_cgroup_parse_access(device
, mode
) < 0)
2754 ret
= stat(path
, &sb
);
2756 return ret_set_errno(-1, errno
);
2758 mode_t m
= sb
.st_mode
& S_IFMT
;
2767 return log_error_errno(-1, EINVAL
, "Unsupported device type %i for \"%s\"", m
, path
);
2770 device
->major
= MAJOR(sb
.st_rdev
);
2771 device
->minor
= MINOR(sb
.st_rdev
);
2773 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2778 static int convert_devpath(const char *invalue
, char *dest
)
2780 struct device_item device
= {};
2783 ret
= device_cgroup_rule_parse_devpath(&device
, invalue
);
2787 ret
= snprintf(dest
, 50, "%c %d:%d %s", device
.type
, device
.major
,
2788 device
.minor
, device
.access
);
2789 if (ret
< 0 || ret
>= 50)
2790 return log_error_errno(-1, ENAMETOOLONG
, "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2791 device
.type
, device
.major
, device
.minor
, device
.access
);
2796 /* Called from setup_limits - here we have the container's cgroup_data because
2797 * we created the cgroups.
2799 static int cg_legacy_set_data(struct cgroup_ops
*ops
, const char *filename
,
2800 const char *value
, bool is_cpuset
)
2802 __do_free
char *controller
= NULL
;
2804 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2805 char converted_value
[50];
2806 struct hierarchy
*h
;
2808 controller
= must_copy_string(filename
);
2809 p
= strchr(controller
, '.');
2813 if (strcmp("devices.allow", filename
) == 0 && value
[0] == '/') {
2816 ret
= convert_devpath(value
, converted_value
);
2819 value
= converted_value
;
2822 h
= get_hierarchy(ops
, controller
);
2824 return log_error_errno(-ENOENT
, ENOENT
, "Failed to setup limits for the \"%s\" controller. The controller seems to be unused by \"cgfsng\" cgroup driver or not enabled on the cgroup hierarchy", controller
);
2827 int ret
= lxc_write_openat(h
->container_full_path
, filename
, value
, strlen(value
));
2831 return lxc_write_openat(h
->container_limit_path
, filename
, value
, strlen(value
));
2834 __cgfsng_ops
static bool cgfsng_setup_limits_legacy(struct cgroup_ops
*ops
,
2835 struct lxc_conf
*conf
,
2838 __do_free
struct lxc_list
*sorted_cgroup_settings
= NULL
;
2839 struct lxc_list
*cgroup_settings
= &conf
->cgroup
;
2840 struct lxc_list
*iterator
, *next
;
2841 struct lxc_cgroup
*cg
;
2845 return ret_set_errno(false, ENOENT
);
2848 return ret_set_errno(false, EINVAL
);
2850 cgroup_settings
= &conf
->cgroup
;
2851 if (lxc_list_empty(cgroup_settings
))
2854 if (!ops
->hierarchies
)
2855 return ret_set_errno(false, EINVAL
);
2857 if (pure_unified_layout(ops
))
2858 return log_warn_errno(true, EINVAL
, "Ignoring legacy cgroup limits on pure cgroup2 system");
2860 sorted_cgroup_settings
= sort_cgroup_settings(cgroup_settings
);
2861 if (!sorted_cgroup_settings
)
2864 lxc_list_for_each(iterator
, sorted_cgroup_settings
) {
2865 cg
= iterator
->elem
;
2867 if (do_devices
== !strncmp("devices", cg
->subsystem
, 7)) {
2868 if (cg_legacy_set_data(ops
, cg
->subsystem
, cg
->value
, strncmp("cpuset", cg
->subsystem
, 6) == 0)) {
2869 if (do_devices
&& (errno
== EACCES
|| errno
== EPERM
)) {
2870 SYSWARN("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2873 SYSERROR("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2876 DEBUG("Set controller \"%s\" set to \"%s\"", cg
->subsystem
, cg
->value
);
2881 INFO("Limits for the legacy cgroup hierarchies have been setup");
2883 lxc_list_for_each_safe(iterator
, sorted_cgroup_settings
, next
) {
2884 lxc_list_del(iterator
);
2892 * Some of the parsing logic comes from the original cgroup device v1
2893 * implementation in the kernel.
2895 static int bpf_device_cgroup_prepare(struct cgroup_ops
*ops
,
2896 struct lxc_conf
*conf
, const char *key
,
2899 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2900 struct device_item device_item
= {};
2903 if (strcmp("devices.allow", key
) == 0 && *val
== '/')
2904 ret
= device_cgroup_rule_parse_devpath(&device_item
, val
);
2906 ret
= device_cgroup_rule_parse(&device_item
, key
, val
);
2908 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s", key
, val
);
2910 ret
= bpf_list_add_device(conf
, &device_item
);
2917 __cgfsng_ops
static bool cgfsng_setup_limits(struct cgroup_ops
*ops
,
2918 struct lxc_handler
*handler
)
2920 struct lxc_list
*cgroup_settings
, *iterator
;
2921 struct hierarchy
*h
;
2922 struct lxc_conf
*conf
;
2925 return ret_set_errno(false, ENOENT
);
2927 if (!ops
->hierarchies
)
2930 if (!ops
->container_cgroup
)
2931 return ret_set_errno(false, EINVAL
);
2933 if (!handler
|| !handler
->conf
)
2934 return ret_set_errno(false, EINVAL
);
2935 conf
= handler
->conf
;
2937 cgroup_settings
= &conf
->cgroup2
;
2938 if (lxc_list_empty(cgroup_settings
))
2941 if (!pure_unified_layout(ops
))
2942 return log_warn_errno(true, EINVAL
, "Ignoring cgroup2 limits on legacy cgroup system");
2948 lxc_list_for_each (iterator
, cgroup_settings
) {
2949 struct lxc_cgroup
*cg
= iterator
->elem
;
2952 if (strncmp("devices", cg
->subsystem
, 7) == 0)
2953 ret
= bpf_device_cgroup_prepare(ops
, conf
, cg
->subsystem
, cg
->value
);
2955 ret
= lxc_write_openat(h
->container_limit_path
, cg
->subsystem
, cg
->value
, strlen(cg
->value
));
2957 return log_error_errno(false, errno
, "Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2959 TRACE("Set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2962 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
2965 __cgfsng_ops
static bool cgfsng_devices_activate(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
2967 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2968 __do_bpf_program_free
struct bpf_program
*prog
= NULL
;
2970 struct lxc_conf
*conf
;
2971 struct hierarchy
*unified
;
2972 struct lxc_list
*it
;
2973 struct bpf_program
*prog_old
;
2976 return ret_set_errno(false, ENOENT
);
2978 if (!ops
->hierarchies
)
2981 if (!ops
->container_cgroup
)
2982 return ret_set_errno(false, EEXIST
);
2984 if (!handler
|| !handler
->conf
)
2985 return ret_set_errno(false, EINVAL
);
2986 conf
= handler
->conf
;
2988 unified
= ops
->unified
;
2989 if (!unified
|| !unified
->bpf_device_controller
||
2990 !unified
->container_full_path
|| lxc_list_empty(&conf
->devices
))
2993 prog
= bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE
);
2995 return log_error_errno(false, ENOMEM
, "Failed to create new bpf program");
2997 ret
= bpf_program_init(prog
);
2999 return log_error_errno(false, ENOMEM
, "Failed to initialize bpf program");
3001 lxc_list_for_each(it
, &conf
->devices
) {
3002 struct device_item
*cur
= it
->elem
;
3004 ret
= bpf_program_append_device(prog
, cur
);
3006 return log_error_errno(false, ENOMEM
, "Failed to add new rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3013 TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3022 ret
= bpf_program_finalize(prog
);
3024 return log_error_errno(false, ENOMEM
, "Failed to finalize bpf program");
3026 ret
= bpf_program_cgroup_attach(prog
, BPF_CGROUP_DEVICE
,
3027 unified
->container_limit_path
,
3030 return log_error_errno(false, ENOMEM
, "Failed to attach bpf program");
3032 /* Replace old bpf program. */
3033 prog_old
= move_ptr(ops
->cgroup2_devices
);
3034 ops
->cgroup2_devices
= move_ptr(prog
);
3035 prog
= move_ptr(prog_old
);
3040 static bool __cgfsng_delegate_controllers(struct cgroup_ops
*ops
, const char *cgroup
)
3042 __do_free
char *add_controllers
= NULL
, *base_path
= NULL
;
3043 __do_free_string_list
char **parts
= NULL
;
3044 struct hierarchy
*unified
= ops
->unified
;
3047 size_t full_len
= 0;
3049 if (!ops
->hierarchies
|| !pure_unified_layout(ops
) ||
3050 !unified
->controllers
[0])
3053 /* For now we simply enable all controllers that we have detected by
3054 * creating a string like "+memory +pids +cpu +io".
3055 * TODO: In the near future we might want to support "-<controller>"
3056 * etc. but whether supporting semantics like this make sense will need
3059 for (it
= unified
->controllers
; it
&& *it
; it
++) {
3060 full_len
+= strlen(*it
) + 2;
3061 add_controllers
= must_realloc(add_controllers
, full_len
+ 1);
3063 if (unified
->controllers
[0] == *it
)
3064 add_controllers
[0] = '\0';
3066 (void)strlcat(add_controllers
, "+", full_len
+ 1);
3067 (void)strlcat(add_controllers
, *it
, full_len
+ 1);
3069 if ((it
+ 1) && *(it
+ 1))
3070 (void)strlcat(add_controllers
, " ", full_len
+ 1);
3073 parts
= lxc_string_split(cgroup
, '/');
3077 parts_len
= lxc_array_len((void **)parts
);
3081 base_path
= must_make_path(unified
->mountpoint
, unified
->container_base_path
, NULL
);
3082 for (ssize_t i
= -1; i
< parts_len
; i
++) {
3084 __do_free
char *target
= NULL
;
3087 base_path
= must_append_path(base_path
, parts
[i
], NULL
);
3088 target
= must_make_path(base_path
, "cgroup.subtree_control", NULL
);
3089 ret
= lxc_writeat(-1, target
, add_controllers
, full_len
);
3091 return log_error_errno(false, errno
, "Could not enable \"%s\" controllers in the unified cgroup \"%s\"",
3092 add_controllers
, target
);
3093 TRACE("Enable \"%s\" controllers in the unified cgroup \"%s\"", add_controllers
, target
);
3099 __cgfsng_ops
static bool cgfsng_monitor_delegate_controllers(struct cgroup_ops
*ops
)
3102 return ret_set_errno(false, ENOENT
);
3104 return __cgfsng_delegate_controllers(ops
, ops
->monitor_cgroup
);
3107 __cgfsng_ops
static bool cgfsng_payload_delegate_controllers(struct cgroup_ops
*ops
)
3110 return ret_set_errno(false, ENOENT
);
3112 return __cgfsng_delegate_controllers(ops
, ops
->container_cgroup
);
3115 static bool cgroup_use_wants_controllers(const struct cgroup_ops
*ops
,
3118 if (!ops
->cgroup_use
)
3121 for (char **cur_ctrl
= controllers
; cur_ctrl
&& *cur_ctrl
; cur_ctrl
++) {
3124 for (char **cur_use
= ops
->cgroup_use
; cur_use
&& *cur_use
; cur_use
++) {
3125 if (strcmp(*cur_use
, *cur_ctrl
) != 0)
3141 static void cg_unified_delegate(char ***delegate
)
3143 __do_free
char *buf
= NULL
;
3144 char *standard
[] = {"cgroup.subtree_control", "cgroup.threads", NULL
};
3148 buf
= read_file_at(-EBADF
, "/sys/kernel/cgroup/delegate");
3150 for (char **p
= standard
; p
&& *p
; p
++) {
3151 idx
= append_null_to_list((void ***)delegate
);
3152 (*delegate
)[idx
] = must_copy_string(*p
);
3154 SYSWARN("Failed to read /sys/kernel/cgroup/delegate");
3158 lxc_iterate_parts(token
, buf
, " \t\n") {
3160 * We always need to chown this for both cgroup and
3163 if (strcmp(token
, "cgroup.procs") == 0)
3166 idx
= append_null_to_list((void ***)delegate
);
3167 (*delegate
)[idx
] = must_copy_string(token
);
3171 /* At startup, parse_hierarchies finds all the info we need about cgroup
3172 * mountpoints and current cgroups, and stores it in @d.
3174 static int cg_hybrid_init(struct cgroup_ops
*ops
, bool relative
, bool unprivileged
)
3176 __do_free
char *basecginfo
= NULL
, *line
= NULL
;
3177 __do_free_string_list
char **klist
= NULL
, **nlist
= NULL
;
3178 __do_fclose
FILE *f
= NULL
;
3182 /* Root spawned containers escape the current cgroup, so use init's
3183 * cgroups as our base in that case.
3185 if (!relative
&& (geteuid() == 0))
3186 basecginfo
= read_file_at(-EBADF
, "/proc/1/cgroup");
3188 basecginfo
= read_file_at(-EBADF
, "/proc/self/cgroup");
3190 return ret_set_errno(-1, ENOMEM
);
3192 ret
= get_existing_subsystems(&klist
, &nlist
);
3194 return log_error_errno(-1, errno
, "Failed to retrieve available legacy cgroup controllers");
3196 f
= fopen("/proc/self/mountinfo", "re");
3198 return log_error_errno(-1, errno
, "Failed to open \"/proc/self/mountinfo\"");
3200 lxc_cgfsng_print_basecg_debuginfo(basecginfo
, klist
, nlist
);
3202 while (getline(&line
, &len
, f
) != -1) {
3203 __do_free
char *base_cgroup
= NULL
, *mountpoint
= NULL
;
3204 __do_free_string_list
char **controller_list
= NULL
;
3207 struct hierarchy
*new;
3209 type
= get_cgroup_version(line
);
3213 if (type
== CGROUP2_SUPER_MAGIC
&& ops
->unified
)
3216 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNKNOWN
) {
3217 if (type
== CGROUP2_SUPER_MAGIC
)
3218 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3219 else if (type
== CGROUP_SUPER_MAGIC
)
3220 ops
->cgroup_layout
= CGROUP_LAYOUT_LEGACY
;
3221 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
3222 if (type
== CGROUP_SUPER_MAGIC
)
3223 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3224 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_LEGACY
) {
3225 if (type
== CGROUP2_SUPER_MAGIC
)
3226 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3229 controller_list
= cg_hybrid_get_controllers(klist
, nlist
, line
, type
);
3230 if (!controller_list
&& type
== CGROUP_SUPER_MAGIC
)
3233 if (type
== CGROUP_SUPER_MAGIC
)
3234 if (controller_list_is_dup(ops
->hierarchies
, controller_list
)) {
3235 TRACE("Skipping duplicating controller");
3239 mountpoint
= cg_hybrid_get_mountpoint(line
);
3241 WARN("Failed parsing mountpoint from \"%s\"", line
);
3245 if (type
== CGROUP_SUPER_MAGIC
)
3246 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, controller_list
[0], CGROUP_SUPER_MAGIC
);
3248 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, NULL
, CGROUP2_SUPER_MAGIC
);
3250 WARN("Failed to find current cgroup");
3255 prune_init_scope(base_cgroup
);
3256 if (type
== CGROUP2_SUPER_MAGIC
)
3257 writeable
= test_writeable_v2(mountpoint
, base_cgroup
);
3259 writeable
= test_writeable_v1(mountpoint
, base_cgroup
);
3261 TRACE("The %s group is not writeable", base_cgroup
);
3265 if (type
== CGROUP2_SUPER_MAGIC
) {
3266 char *cgv2_ctrl_path
;
3268 cgv2_ctrl_path
= must_make_path(mountpoint
, base_cgroup
,
3269 "cgroup.controllers",
3272 controller_list
= cg_unified_get_controllers(-EBADF
, cgv2_ctrl_path
);
3273 free(cgv2_ctrl_path
);
3274 if (!controller_list
) {
3275 controller_list
= cg_unified_make_empty_controller();
3276 TRACE("No controllers are enabled for "
3277 "delegation in the unified hierarchy");
3281 /* Exclude all controllers that cgroup use does not want. */
3282 if (!cgroup_use_wants_controllers(ops
, controller_list
)) {
3283 TRACE("Skipping controller");
3287 new = add_hierarchy(&ops
->hierarchies
, move_ptr(controller_list
), move_ptr(mountpoint
), move_ptr(base_cgroup
), type
);
3289 return log_error_errno(-1, errno
, "Failed to add cgroup hierarchy");
3290 if (type
== CGROUP2_SUPER_MAGIC
&& !ops
->unified
) {
3292 cg_unified_delegate(&new->cgroup2_chown
);
3297 TRACE("Writable cgroup hierarchies:");
3298 lxc_cgfsng_print_hierarchies(ops
);
3300 /* verify that all controllers in cgroup.use and all crucial
3301 * controllers are accounted for
3303 if (!all_controllers_found(ops
))
3304 return log_error_errno(-1, ENOENT
, "Failed to find all required controllers");
3309 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
3310 static char *cg_unified_get_current_cgroup(bool relative
)
3312 __do_free
char *basecginfo
= NULL
;
3316 if (!relative
&& (geteuid() == 0))
3317 basecginfo
= read_file_at(-EBADF
, "/proc/1/cgroup");
3319 basecginfo
= read_file_at(-EBADF
, "/proc/self/cgroup");
3323 base_cgroup
= strstr(basecginfo
, "0::/");
3327 base_cgroup
= base_cgroup
+ 3;
3328 copy
= copy_to_eol(base_cgroup
);
3335 static int cg_unified_init(struct cgroup_ops
*ops
, bool relative
,
3338 __do_close
int cgroup_root_fd
= -EBADF
;
3339 __do_free
char *base_cgroup
= NULL
, *controllers_path
= NULL
;
3340 __do_free_string_list
char **delegatable
;
3341 __do_free
struct hierarchy
*new = NULL
;
3344 ret
= unified_cgroup_hierarchy();
3345 if (ret
== -ENOMEDIUM
)
3346 return ret_errno(ENOMEDIUM
);
3348 if (ret
!= CGROUP2_SUPER_MAGIC
)
3351 base_cgroup
= cg_unified_get_current_cgroup(relative
);
3353 return ret_errno(EINVAL
);
3355 prune_init_scope(base_cgroup
);
3357 cgroup_root_fd
= openat(-EBADF
, DEFAULT_CGROUP_MOUNTPOINT
,
3358 O_NOCTTY
| O_CLOEXEC
| O_NOFOLLOW
| O_DIRECTORY
);
3359 if (cgroup_root_fd
< 0)
3363 * We assume that the cgroup we're currently in has been delegated to
3364 * us and we are free to further delege all of the controllers listed
3365 * in cgroup.controllers further down the hierarchy.
3367 controllers_path
= must_make_path_relative(base_cgroup
, "cgroup.controllers", NULL
);
3368 delegatable
= cg_unified_get_controllers(cgroup_root_fd
, controllers_path
);
3370 delegatable
= cg_unified_make_empty_controller();
3371 if (!delegatable
[0])
3372 TRACE("No controllers are enabled for delegation");
3374 /* TODO: If the user requested specific controllers via lxc.cgroup.use
3375 * we should verify here. The reason I'm not doing it right is that I'm
3376 * not convinced that lxc.cgroup.use will be the future since it is a
3377 * global property. I much rather have an option that lets you request
3378 * controllers per container.
3381 new = add_hierarchy(&ops
->hierarchies
,
3382 move_ptr(delegatable
),
3383 must_copy_string(DEFAULT_CGROUP_MOUNTPOINT
),
3384 move_ptr(base_cgroup
),
3385 CGROUP2_SUPER_MAGIC
);
3387 return log_error_errno(-1, errno
, "Failed to add unified cgroup hierarchy");
3390 cg_unified_delegate(&new->cgroup2_chown
);
3392 if (bpf_devices_cgroup_supported())
3393 new->bpf_device_controller
= 1;
3395 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3396 ops
->unified
= move_ptr(new);
3398 return CGROUP2_SUPER_MAGIC
;
3401 static int cg_init(struct cgroup_ops
*ops
, struct lxc_conf
*conf
)
3405 bool relative
= conf
->cgroup_meta
.relative
;
3407 tmp
= lxc_global_config_value("lxc.cgroup.use");
3409 __do_free
char *pin
= NULL
;
3412 pin
= must_copy_string(tmp
);
3415 lxc_iterate_parts(cur
, chop
, ",")
3416 must_append_string(&ops
->cgroup_use
, cur
);
3419 ret
= cg_unified_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3423 if (ret
== CGROUP2_SUPER_MAGIC
)
3426 return cg_hybrid_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3429 __cgfsng_ops
static int cgfsng_data_init(struct cgroup_ops
*ops
)
3431 const char *cgroup_pattern
;
3434 return ret_set_errno(-1, ENOENT
);
3436 /* copy system-wide cgroup information */
3437 cgroup_pattern
= lxc_global_config_value("lxc.cgroup.pattern");
3438 if (cgroup_pattern
&& strcmp(cgroup_pattern
, "") != 0)
3439 ops
->cgroup_pattern
= must_copy_string(cgroup_pattern
);
3444 struct cgroup_ops
*cgfsng_ops_init(struct lxc_conf
*conf
)
3446 __do_free
struct cgroup_ops
*cgfsng_ops
= NULL
;
3448 cgfsng_ops
= zalloc(sizeof(struct cgroup_ops
));
3450 return ret_set_errno(NULL
, ENOMEM
);
3452 cgfsng_ops
->cgroup_layout
= CGROUP_LAYOUT_UNKNOWN
;
3454 if (cg_init(cgfsng_ops
, conf
))
3457 cgfsng_ops
->data_init
= cgfsng_data_init
;
3458 cgfsng_ops
->payload_destroy
= cgfsng_payload_destroy
;
3459 cgfsng_ops
->monitor_destroy
= cgfsng_monitor_destroy
;
3460 cgfsng_ops
->monitor_create
= cgfsng_monitor_create
;
3461 cgfsng_ops
->monitor_enter
= cgfsng_monitor_enter
;
3462 cgfsng_ops
->monitor_delegate_controllers
= cgfsng_monitor_delegate_controllers
;
3463 cgfsng_ops
->payload_delegate_controllers
= cgfsng_payload_delegate_controllers
;
3464 cgfsng_ops
->payload_create
= cgfsng_payload_create
;
3465 cgfsng_ops
->payload_enter
= cgfsng_payload_enter
;
3466 cgfsng_ops
->payload_finalize
= cgfsng_payload_finalize
;
3467 cgfsng_ops
->escape
= cgfsng_escape
;
3468 cgfsng_ops
->num_hierarchies
= cgfsng_num_hierarchies
;
3469 cgfsng_ops
->get_hierarchies
= cgfsng_get_hierarchies
;
3470 cgfsng_ops
->get_cgroup
= cgfsng_get_cgroup
;
3471 cgfsng_ops
->get
= cgfsng_get
;
3472 cgfsng_ops
->set
= cgfsng_set
;
3473 cgfsng_ops
->freeze
= cgfsng_freeze
;
3474 cgfsng_ops
->unfreeze
= cgfsng_unfreeze
;
3475 cgfsng_ops
->setup_limits_legacy
= cgfsng_setup_limits_legacy
;
3476 cgfsng_ops
->setup_limits
= cgfsng_setup_limits
;
3477 cgfsng_ops
->driver
= "cgfsng";
3478 cgfsng_ops
->version
= "1.0.0";
3479 cgfsng_ops
->attach
= cgfsng_attach
;
3480 cgfsng_ops
->chown
= cgfsng_chown
;
3481 cgfsng_ops
->mount
= cgfsng_mount
;
3482 cgfsng_ops
->devices_activate
= cgfsng_devices_activate
;
3483 cgfsng_ops
->get_limiting_cgroup
= cgfsng_get_limiting_cgroup
;
3485 return move_ptr(cgfsng_ops
);