1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
30 #include <sys/epoll.h>
31 #include <sys/types.h>
37 #include "cgroup2_devices.h"
38 #include "cgroup_utils.h"
40 #include "commands_utils.h"
46 #include "memory_utils.h"
47 #include "mount_utils.h"
48 #include "storage/storage.h"
49 #include "string_utils.h"
50 #include "syscall_wrappers.h"
54 #include "include/strlcpy.h"
58 #include "include/strlcat.h"
61 lxc_log_define(cgfsng
, cgroup
);
63 /* Given a pointer to a null-terminated array of pointers, realloc to add one
64 * entry, and point the new entry to NULL. Do not fail. Return the index to the
65 * second-to-last entry - that is, the one which is now available for use
66 * (keeping the list null-terminated).
68 static int append_null_to_list(void ***list
)
73 for (; (*list
)[newentry
]; newentry
++)
76 *list
= must_realloc(*list
, (newentry
+ 2) * sizeof(void **));
77 (*list
)[newentry
+ 1] = NULL
;
81 /* Given a null-terminated array of strings, check whether @entry is one of the
84 static bool string_in_list(char **list
, const char *entry
)
89 for (int i
= 0; list
[i
]; i
++)
90 if (strequal(list
[i
], entry
))
96 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
97 * "name=systemd". Do not fail.
99 static char *cg_legacy_must_prefix_named(char *entry
)
105 prefixed
= must_realloc(NULL
, len
+ 6);
107 memcpy(prefixed
, "name=", STRLITERALLEN("name="));
108 memcpy(prefixed
+ STRLITERALLEN("name="), entry
, len
);
109 prefixed
[len
+ 5] = '\0';
114 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
117 * We also handle named subsystems here. Any controller which is not a kernel
118 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
119 * we refuse to use because we're not sure which we have here.
120 * (TODO: We could work around this in some cases by just remounting to be
121 * unambiguous, or by comparing mountpoint contents with current cgroup.)
123 * The last entry will always be NULL.
125 static void must_append_controller(char **klist
, char **nlist
, char ***clist
,
131 if (string_in_list(klist
, entry
) && string_in_list(nlist
, entry
)) {
132 ERROR("Refusing to use ambiguous controller \"%s\"", entry
);
133 ERROR("It is both a named and kernel subsystem");
137 newentry
= append_null_to_list((void ***)clist
);
139 if (strnequal(entry
, "name=", 5))
140 copy
= must_copy_string(entry
);
141 else if (string_in_list(klist
, entry
))
142 copy
= must_copy_string(entry
);
144 copy
= cg_legacy_must_prefix_named(entry
);
146 (*clist
)[newentry
] = copy
;
149 /* Given a handler's cgroup data, return the struct hierarchy for the controller
150 * @c, or NULL if there is none.
152 static struct hierarchy
*get_hierarchy(struct cgroup_ops
*ops
, const char *controller
)
154 if (!ops
->hierarchies
)
155 return log_trace_errno(NULL
, errno
, "There are no useable cgroup controllers");
157 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
159 /* This is the empty unified hierarchy. */
160 if (ops
->hierarchies
[i
]->controllers
&& !ops
->hierarchies
[i
]->controllers
[0])
161 return ops
->hierarchies
[i
];
167 * Handle controllers with significant implementation changes
168 * from cgroup to cgroup2.
170 if (pure_unified_layout(ops
)) {
171 if (strequal(controller
, "devices")) {
172 if (ops
->unified
->bpf_device_controller
)
176 } else if (strequal(controller
, "freezer")) {
177 if (ops
->unified
->freezer_controller
)
184 if (string_in_list(ops
->hierarchies
[i
]->controllers
, controller
))
185 return ops
->hierarchies
[i
];
189 WARN("There is no useable %s controller", controller
);
191 WARN("There is no empty unified cgroup hierarchy");
193 return ret_set_errno(NULL
, ENOENT
);
196 /* Taken over modified from the kernel sources. */
197 #define NBITS 32 /* bits in uint32_t */
198 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
199 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
201 static void set_bit(unsigned bit
, uint32_t *bitarr
)
203 bitarr
[bit
/ NBITS
] |= (1 << (bit
% NBITS
));
206 static void clear_bit(unsigned bit
, uint32_t *bitarr
)
208 bitarr
[bit
/ NBITS
] &= ~(1 << (bit
% NBITS
));
211 static bool is_set(unsigned bit
, uint32_t *bitarr
)
213 return (bitarr
[bit
/ NBITS
] & (1 << (bit
% NBITS
))) != 0;
216 /* Create cpumask from cpulist aka turn:
224 static uint32_t *lxc_cpumask(char *buf
, size_t nbits
)
226 __do_free
uint32_t *bitarr
= NULL
;
230 arrlen
= BITS_TO_LONGS(nbits
);
231 bitarr
= calloc(arrlen
, sizeof(uint32_t));
233 return ret_set_errno(NULL
, ENOMEM
);
235 lxc_iterate_parts(token
, buf
, ",") {
240 start
= strtoul(token
, NULL
, 0);
242 range
= strchr(token
, '-');
244 end
= strtoul(range
+ 1, NULL
, 0);
247 return ret_set_errno(NULL
, EINVAL
);
250 return ret_set_errno(NULL
, EINVAL
);
253 set_bit(start
++, bitarr
);
256 return move_ptr(bitarr
);
259 /* Turn cpumask into simple, comma-separated cpulist. */
260 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr
, size_t nbits
)
262 __do_free_string_list
char **cpulist
= NULL
;
263 char numstr
[INTTYPE_TO_STRLEN(size_t)] = {0};
266 for (size_t i
= 0; i
<= nbits
; i
++) {
267 if (!is_set(i
, bitarr
))
270 ret
= strnprintf(numstr
, sizeof(numstr
), "%zu", i
);
274 ret
= lxc_append_string(&cpulist
, numstr
);
276 return ret_set_errno(NULL
, ENOMEM
);
280 return ret_set_errno(NULL
, ENOMEM
);
282 return lxc_string_join(",", (const char **)cpulist
, false);
285 static ssize_t
get_max_cpus(char *cpulist
)
288 char *maxcpus
= cpulist
;
291 c1
= strrchr(maxcpus
, ',');
295 c2
= strrchr(maxcpus
, '-');
309 cpus
= strtoul(c1
, NULL
, 0);
316 static inline bool is_unified_hierarchy(const struct hierarchy
*h
)
318 return h
->version
== CGROUP2_SUPER_MAGIC
;
321 /* Given two null-terminated lists of strings, return true if any string is in
324 static bool controller_lists_intersect(char **l1
, char **l2
)
329 for (int i
= 0; l1
[i
]; i
++)
330 if (string_in_list(l2
, l1
[i
]))
336 /* For a null-terminated list of controllers @clist, return true if any of those
337 * controllers is already listed the null-terminated list of hierarchies @hlist.
338 * Realistically, if one is present, all must be present.
340 static bool controller_list_is_dup(struct hierarchy
**hlist
, char **clist
)
345 for (int i
= 0; hlist
[i
]; i
++)
346 if (controller_lists_intersect(hlist
[i
]->controllers
, clist
))
352 /* Return true if the controller @entry is found in the null-terminated list of
353 * hierarchies @hlist.
355 static bool controller_found(struct hierarchy
**hlist
, char *entry
)
360 for (int i
= 0; hlist
[i
]; i
++)
361 if (string_in_list(hlist
[i
]->controllers
, entry
))
367 /* Return true if all of the controllers which we require have been found. The
368 * required list is freezer and anything in lxc.cgroup.use.
370 static bool all_controllers_found(struct cgroup_ops
*ops
)
372 struct hierarchy
**hlist
;
374 if (!ops
->cgroup_use
)
377 hlist
= ops
->hierarchies
;
378 for (char **cur
= ops
->cgroup_use
; cur
&& *cur
; cur
++)
379 if (!controller_found(hlist
, *cur
))
380 return log_error(false, "No %s controller mountpoint found", *cur
);
385 /* Get the controllers from a mountinfo line There are other ways we could get
386 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
387 * could parse the mount options. But we simply assume that the mountpoint must
388 * be /sys/fs/cgroup/controller-list
390 static char **cg_hybrid_get_controllers(char **klist
, char **nlist
, char *line
,
393 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
394 * for legacy hierarchies.
396 __do_free_string_list
char **aret
= NULL
;
399 char *p
= line
, *sep
= ",";
401 for (i
= 0; i
< 4; i
++) {
408 /* Note, if we change how mountinfo works, then our caller will need to
409 * verify /sys/fs/cgroup/ in this field.
411 if (!strnequal(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15))
412 return log_warn(NULL
, "Found hierarchy not under " DEFAULT_CGROUP_MOUNTPOINT
": \"%s\"", p
);
417 return log_error(NULL
, "Corrupt mountinfo");
420 if (type
== CGROUP_SUPER_MAGIC
) {
421 __do_free
char *dup
= NULL
;
423 /* strdup() here for v1 hierarchies. Otherwise
424 * lxc_iterate_parts() will destroy mountpoints such as
425 * "/sys/fs/cgroup/cpu,cpuacct".
427 dup
= must_copy_string(p
);
431 lxc_iterate_parts(tok
, dup
, sep
)
432 must_append_controller(klist
, nlist
, &aret
, tok
);
436 return move_ptr(aret
);
439 static char **cg_unified_make_empty_controller(void)
441 __do_free_string_list
char **aret
= NULL
;
444 newentry
= append_null_to_list((void ***)&aret
);
445 aret
[newentry
] = NULL
;
446 return move_ptr(aret
);
449 static char **cg_unified_get_controllers(int dfd
, const char *file
)
451 __do_free
char *buf
= NULL
;
452 __do_free_string_list
char **aret
= NULL
;
456 buf
= read_file_at(dfd
, file
, PROTECT_OPEN
, 0);
460 lxc_iterate_parts(tok
, buf
, sep
) {
464 newentry
= append_null_to_list((void ***)&aret
);
465 copy
= must_copy_string(tok
);
466 aret
[newentry
] = copy
;
469 return move_ptr(aret
);
472 static bool cgroup_use_wants_controllers(const struct cgroup_ops
*ops
,
475 if (!ops
->cgroup_use
)
478 for (char **cur_ctrl
= controllers
; cur_ctrl
&& *cur_ctrl
; cur_ctrl
++) {
481 for (char **cur_use
= ops
->cgroup_use
; cur_use
&& *cur_use
; cur_use
++) {
482 if (!strequal(*cur_use
, *cur_ctrl
))
498 static int add_hierarchy(struct cgroup_ops
*ops
, char **clist
, char *mountpoint
,
499 char *container_base_path
, int type
)
501 __do_close
int dfd_base
= -EBADF
, dfd_mnt
= -EBADF
;
502 __do_free
struct hierarchy
*new = NULL
;
503 __do_free_string_list
char **controllers
= clist
;
506 if (abspath(container_base_path
))
507 return syserrno(-errno
, "Container base path must be relative to controller mount");
509 if (!controllers
&& type
!= CGROUP2_SUPER_MAGIC
)
510 return syserrno_set(-EINVAL
, "Empty controller list for non-unified cgroup hierarchy passed");
512 dfd_mnt
= open_at(-EBADF
, mountpoint
, PROTECT_OPATH_DIRECTORY
,
513 PROTECT_LOOKUP_ABSOLUTE_XDEV
, 0);
515 return syserrno(-errno
, "Failed to open %s", mountpoint
);
517 if (!is_empty_string(container_base_path
)) {
518 dfd_base
= open_at(dfd_mnt
, container_base_path
,
519 PROTECT_OPATH_DIRECTORY
,
520 PROTECT_LOOKUP_BENEATH_XDEV
, 0);
522 return syserrno(-errno
, "Failed to open %d(%s)", dfd_base
, container_base_path
);
527 * We assume that the cgroup we're currently in has been delegated to
528 * us and we are free to further delege all of the controllers listed
529 * in cgroup.controllers further down the hierarchy.
532 controllers
= cg_unified_get_controllers(dfd_mnt
, "cgroup.controllers");
534 controllers
= cg_unified_get_controllers(dfd_base
, "cgroup.controllers");
536 controllers
= cg_unified_make_empty_controller();
538 TRACE("No controllers are enabled for delegation");
541 /* Exclude all controllers that cgroup use does not want. */
542 if (!cgroup_use_wants_controllers(ops
, controllers
))
543 return log_trace(0, "Skipping cgroup hiearchy with non-requested controllers");
545 new = zalloc(sizeof(*new));
547 return ret_errno(ENOMEM
);
550 new->controllers
= move_ptr(controllers
);
551 new->mountpoint
= mountpoint
;
552 new->container_base_path
= container_base_path
;
553 new->cgfd_con
= -EBADF
;
554 new->cgfd_limit
= -EBADF
;
555 new->cgfd_mon
= -EBADF
;
557 TRACE("Adding cgroup hierarchy with mountpoint %s and base cgroup %s",
558 mountpoint
, container_base_path
);
559 for (char *const *it
= new->controllers
; it
&& *it
; it
++)
560 TRACE("The detected hierarchy contains the %s controller", *it
);
562 idx
= append_null_to_list((void ***)&ops
->hierarchies
);
564 new->dfd_base
= dfd_mnt
;
566 new->dfd_base
= move_fd(dfd_base
);
567 new->dfd_mnt
= move_fd(dfd_mnt
);
568 if (type
== CGROUP2_SUPER_MAGIC
)
570 (ops
->hierarchies
)[idx
] = move_ptr(new);
574 /* Get a copy of the mountpoint from @line, which is a line from
575 * /proc/self/mountinfo.
577 static char *cg_hybrid_get_mountpoint(char *line
)
579 char *p
= line
, *sret
= NULL
;
583 for (int i
= 0; i
< 4; i
++) {
590 if (!strnequal(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15))
593 p2
= strchr(p
+ 15, ' ');
599 sret
= must_realloc(NULL
, len
+ 1);
600 memcpy(sret
, p
, len
);
606 /* Given a multi-line string, return a null-terminated copy of the current line. */
607 static char *copy_to_eol(char *p
)
612 p2
= strchr(p
, '\n');
617 sret
= must_realloc(NULL
, len
+ 1);
618 memcpy(sret
, p
, len
);
624 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
625 * /proc/self/cgroup file. Check whether controller c is present.
627 static bool controller_in_clist(char *cgline
, char *c
)
629 __do_free
char *tmp
= NULL
;
633 eol
= strchr(cgline
, ':');
638 tmp
= must_realloc(NULL
, len
+ 1);
639 memcpy(tmp
, cgline
, len
);
642 lxc_iterate_parts(tok
, tmp
, ",")
643 if (strequal(tok
, c
))
649 static inline char *trim(char *s
)
654 while ((len
> 1) && (s
[len
- 1] == '\n'))
660 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
663 static char *cg_hybrid_get_current_cgroup(bool relative
, char *basecginfo
,
664 char *controller
, int type
)
666 char *base_cgroup
= basecginfo
;
669 bool is_cgv2_base_cgroup
= false;
671 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
672 if ((type
== CGROUP2_SUPER_MAGIC
) && (*base_cgroup
== '0'))
673 is_cgv2_base_cgroup
= true;
675 base_cgroup
= strchr(base_cgroup
, ':');
680 if (is_cgv2_base_cgroup
|| (controller
&& controller_in_clist(base_cgroup
, controller
))) {
681 __do_free
char *copy
= NULL
;
683 base_cgroup
= strchr(base_cgroup
, ':');
688 copy
= copy_to_eol(base_cgroup
);
694 base_cgroup
= prune_init_scope(copy
);
701 if (abspath(base_cgroup
))
702 base_cgroup
= deabs(base_cgroup
);
704 /* We're allowing base_cgroup to be "". */
705 return strdup(base_cgroup
);
708 base_cgroup
= strchr(base_cgroup
, '\n');
715 static void must_append_string(char ***list
, char *entry
)
720 newentry
= append_null_to_list((void ***)list
);
721 copy
= must_copy_string(entry
);
722 (*list
)[newentry
] = copy
;
725 static int get_existing_subsystems(char ***klist
, char ***nlist
)
727 __do_free
char *line
= NULL
;
728 __do_fclose
FILE *f
= NULL
;
731 f
= fopen("/proc/self/cgroup", "re");
735 while (getline(&line
, &len
, f
) != -1) {
737 p
= strchr(line
, ':');
746 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
747 * contains an entry of the form:
751 * In this case we use "cgroup2" as controller name.
754 must_append_string(klist
, "cgroup2");
758 lxc_iterate_parts(tok
, p
, ",") {
759 if (strnequal(tok
, "name=", 5))
760 must_append_string(nlist
, tok
);
762 must_append_string(klist
, tok
);
769 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo
, char **klist
,
775 TRACE("basecginfo is:");
776 TRACE("%s", basecginfo
);
778 for (k
= 0, it
= klist
; it
&& *it
; it
++, k
++)
779 TRACE("kernel subsystem %d: %s", k
, *it
);
781 for (k
= 0, it
= nlist
; it
&& *it
; it
++, k
++)
782 TRACE("named subsystem %d: %s", k
, *it
);
785 static int cgroup_tree_remove(struct hierarchy
**hierarchies
, const char *path_prune
)
787 if (!path_prune
|| !hierarchies
)
790 for (int i
= 0; hierarchies
[i
]; i
++) {
791 struct hierarchy
*h
= hierarchies
[i
];
794 ret
= cgroup_tree_prune(h
->dfd_base
, path_prune
);
796 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, path_prune
);
798 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, path_prune
);
800 if (h
->container_limit_path
!= h
->container_full_path
)
801 free_disarm(h
->container_limit_path
);
802 free_disarm(h
->container_full_path
);
808 struct generic_userns_exec_data
{
809 struct hierarchy
**hierarchies
;
810 const char *path_prune
;
811 struct lxc_conf
*conf
;
812 uid_t origuid
; /* target uid in parent namespace */
816 static int cgroup_tree_remove_wrapper(void *data
)
818 struct generic_userns_exec_data
*arg
= data
;
819 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
820 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
823 if (!lxc_drop_groups() && errno
!= EPERM
)
824 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
826 ret
= setresgid(nsgid
, nsgid
, nsgid
);
828 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
829 (int)nsgid
, (int)nsgid
, (int)nsgid
);
831 ret
= setresuid(nsuid
, nsuid
, nsuid
);
833 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
834 (int)nsuid
, (int)nsuid
, (int)nsuid
);
836 return cgroup_tree_remove(arg
->hierarchies
, arg
->path_prune
);
839 __cgfsng_ops
static void cgfsng_payload_destroy(struct cgroup_ops
*ops
,
840 struct lxc_handler
*handler
)
845 ERROR("Called with uninitialized cgroup operations");
849 if (!ops
->hierarchies
)
853 ERROR("Called with uninitialized handler");
857 if (!handler
->conf
) {
858 ERROR("Called with uninitialized conf");
862 if (!ops
->container_limit_cgroup
) {
863 WARN("Uninitialized limit cgroup");
867 ret
= bpf_program_cgroup_detach(handler
->cgroup_ops
->cgroup2_devices
);
869 WARN("Failed to detach bpf program from cgroup");
871 if (!lxc_list_empty(&handler
->conf
->id_map
)) {
872 struct generic_userns_exec_data wrap
= {
873 .conf
= handler
->conf
,
874 .path_prune
= ops
->container_limit_cgroup
,
875 .hierarchies
= ops
->hierarchies
,
878 ret
= userns_exec_1(handler
->conf
, cgroup_tree_remove_wrapper
,
879 &wrap
, "cgroup_tree_remove_wrapper");
881 ret
= cgroup_tree_remove(ops
->hierarchies
, ops
->container_limit_cgroup
);
884 SYSWARN("Failed to destroy cgroups");
887 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
888 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
889 static bool cpuset1_cpus_initialize(int dfd_parent
, int dfd_child
,
892 __do_free
char *cpulist
= NULL
, *fpath
= NULL
, *isolcpus
= NULL
,
893 *offlinecpus
= NULL
, *posscpus
= NULL
;
894 __do_free
uint32_t *isolmask
= NULL
, *offlinemask
= NULL
,
898 ssize_t maxisol
= 0, maxoffline
= 0, maxposs
= 0;
899 bool flipped_bit
= false;
901 posscpus
= read_file_at(dfd_parent
, "cpuset.cpus", PROTECT_OPEN
, 0);
903 return log_error_errno(false, errno
, "Failed to read file \"%s\"", fpath
);
905 /* Get maximum number of cpus found in possible cpuset. */
906 maxposs
= get_max_cpus(posscpus
);
907 if (maxposs
< 0 || maxposs
>= INT_MAX
- 1)
910 if (file_exists(__ISOL_CPUS
)) {
911 isolcpus
= read_file_at(-EBADF
, __ISOL_CPUS
, PROTECT_OPEN
, 0);
913 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __ISOL_CPUS
);
915 if (isdigit(isolcpus
[0])) {
916 /* Get maximum number of cpus found in isolated cpuset. */
917 maxisol
= get_max_cpus(isolcpus
);
918 if (maxisol
< 0 || maxisol
>= INT_MAX
- 1)
922 if (maxposs
< maxisol
)
926 TRACE("The path \""__ISOL_CPUS
"\" to read isolated cpus from does not exist");
929 if (file_exists(__OFFLINE_CPUS
)) {
930 offlinecpus
= read_file_at(-EBADF
, __OFFLINE_CPUS
, PROTECT_OPEN
, 0);
932 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __OFFLINE_CPUS
);
934 if (isdigit(offlinecpus
[0])) {
935 /* Get maximum number of cpus found in offline cpuset. */
936 maxoffline
= get_max_cpus(offlinecpus
);
937 if (maxoffline
< 0 || maxoffline
>= INT_MAX
- 1)
941 if (maxposs
< maxoffline
)
942 maxposs
= maxoffline
;
945 TRACE("The path \""__OFFLINE_CPUS
"\" to read offline cpus from does not exist");
948 if ((maxisol
== 0) && (maxoffline
== 0)) {
949 cpulist
= move_ptr(posscpus
);
953 possmask
= lxc_cpumask(posscpus
, maxposs
);
955 return log_error_errno(false, errno
, "Failed to create cpumask for possible cpus");
958 isolmask
= lxc_cpumask(isolcpus
, maxposs
);
960 return log_error_errno(false, errno
, "Failed to create cpumask for isolated cpus");
963 if (maxoffline
> 0) {
964 offlinemask
= lxc_cpumask(offlinecpus
, maxposs
);
966 return log_error_errno(false, errno
, "Failed to create cpumask for offline cpus");
969 for (i
= 0; i
<= maxposs
; i
++) {
970 if ((isolmask
&& !is_set(i
, isolmask
)) ||
971 (offlinemask
&& !is_set(i
, offlinemask
)) ||
972 !is_set(i
, possmask
))
976 clear_bit(i
, possmask
);
980 cpulist
= lxc_cpumask_to_cpulist(possmask
, maxposs
);
981 TRACE("No isolated or offline cpus present in cpuset");
983 cpulist
= move_ptr(posscpus
);
984 TRACE("Removed isolated or offline cpus from cpuset");
987 return log_error_errno(false, errno
, "Failed to create cpu list");
990 if (!am_initialized
) {
991 ret
= lxc_writeat(dfd_child
, "cpuset.cpus", cpulist
, strlen(cpulist
));
993 return log_error_errno(false, errno
, "Failed to write cpu list to \"%d/cpuset.cpus\"", dfd_child
);
995 TRACE("Copied cpu settings of parent cgroup");
1001 static bool cpuset1_initialize(int dfd_base
, int dfd_next
)
1003 char mems
[PATH_MAX
];
1008 * Determine whether the base cgroup has cpuset
1009 * inheritance turned on.
1011 bytes
= lxc_readat(dfd_base
, "cgroup.clone_children", &v
, 1);
1013 return syserrno(false, "Failed to read file %d(cgroup.clone_children)", dfd_base
);
1016 * Initialize cpuset.cpus and make remove any isolated
1019 if (!cpuset1_cpus_initialize(dfd_base
, dfd_next
, v
== '1'))
1020 return syserrno(false, "Failed to initialize cpuset.cpus");
1022 /* Read cpuset.mems from parent... */
1023 bytes
= lxc_readat(dfd_base
, "cpuset.mems", mems
, sizeof(mems
));
1025 return syserrno(false, "Failed to read file %d(cpuset.mems)", dfd_base
);
1027 /* ... and copy to first cgroup in the tree... */
1028 bytes
= lxc_writeat(dfd_next
, "cpuset.mems", mems
, bytes
);
1030 return syserrno(false, "Failed to write %d(cpuset.mems)", dfd_next
);
1032 /* ... and finally turn on cpuset inheritance. */
1033 bytes
= lxc_writeat(dfd_next
, "cgroup.clone_children", "1", 1);
1035 return syserrno(false, "Failed to write %d(cgroup.clone_children)", dfd_next
);
1037 return log_trace(true, "Initialized cpuset in the legacy hierarchy");
1040 static int __cgroup_tree_create(int dfd_base
, const char *path
, mode_t mode
,
1041 bool cpuset_v1
, bool eexist_ignore
)
1043 __do_close
int dfd_final
= -EBADF
;
1044 int dfd_cur
= dfd_base
;
1050 if (is_empty_string(path
))
1051 return ret_errno(EINVAL
);
1053 len
= strlcpy(buf
, path
, sizeof(buf
));
1054 if (len
>= sizeof(buf
))
1055 return ret_errno(E2BIG
);
1057 lxc_iterate_parts(cur
, buf
, "/") {
1059 * Even though we vetted the paths when we parsed the config
1060 * we're paranoid here and check that the path is neither
1061 * absolute nor walks upwards.
1064 return syserrno_set(-EINVAL
, "No absolute paths allowed");
1066 if (strnequal(cur
, "..", STRLITERALLEN("..")))
1067 return syserrno_set(-EINVAL
, "No upward walking paths allowed");
1069 ret
= mkdirat(dfd_cur
, cur
, mode
);
1071 if (errno
!= EEXIST
)
1072 return syserrno(-errno
, "Failed to create %d(%s)", dfd_cur
, cur
);
1076 TRACE("%s %d(%s) cgroup", !ret
? "Created" : "Reusing", dfd_cur
, cur
);
1078 dfd_final
= open_at(dfd_cur
, cur
, PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH
, 0);
1080 return syserrno(-errno
, "Fail to open%s directory %d(%s)",
1081 !ret
? " newly created" : "", dfd_base
, cur
);
1082 if (dfd_cur
!= dfd_base
)
1084 else if (cpuset_v1
&& !cpuset1_initialize(dfd_base
, dfd_final
))
1085 return syserrno(-EINVAL
, "Failed to initialize cpuset controller in the legacy hierarchy");
1087 * Leave dfd_final pointing to the last fd we opened so
1088 * it will be automatically zapped if we return early.
1090 dfd_cur
= dfd_final
;
1093 /* The final cgroup must be succesfully creatd by us. */
1095 if (ret
!= -EEXIST
|| !eexist_ignore
)
1096 return syserrno_set(ret
, "Creating the final cgroup %d(%s) failed", dfd_base
, path
);
1099 return move_fd(dfd_final
);
1102 static bool cgroup_tree_create(struct cgroup_ops
*ops
, struct lxc_conf
*conf
,
1103 struct hierarchy
*h
, const char *cgroup_limit_dir
,
1104 const char *cgroup_leaf
, bool payload
)
1106 __do_close
int fd_limit
= -EBADF
, fd_final
= -EBADF
;
1107 __do_free
char *path
= NULL
, *limit_path
= NULL
;
1108 bool cpuset_v1
= false;
1111 * The legacy cpuset controller needs massaging in case inheriting
1112 * settings from its immediate ancestor cgroup hasn't been turned on.
1114 cpuset_v1
= !is_unified_hierarchy(h
) && string_in_list(h
->controllers
, "cpuset");
1116 if (payload
&& cgroup_leaf
) {
1117 /* With isolation both parts need to not already exist. */
1118 fd_limit
= __cgroup_tree_create(h
->dfd_base
, cgroup_limit_dir
, 0755, cpuset_v1
, false);
1120 return syserrno(false, "Failed to create limiting cgroup %d(%s)", h
->dfd_base
, cgroup_limit_dir
);
1122 TRACE("Created limit cgroup %d->%d(%s)",
1123 fd_limit
, h
->dfd_base
, cgroup_limit_dir
);
1126 * With isolation the devices legacy cgroup needs to be
1127 * iinitialized early, as it typically contains an 'a' (all)
1128 * line, which is not possible once a subdirectory has been
1131 if (string_in_list(h
->controllers
, "devices") &&
1132 !ops
->setup_limits_legacy(ops
, conf
, true))
1133 return log_error(false, "Failed to setup legacy device limits");
1135 limit_path
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgroup_limit_dir
, NULL
);
1136 path
= must_make_path(limit_path
, cgroup_leaf
, NULL
);
1139 * If we use a separate limit cgroup, the leaf cgroup, i.e. the
1140 * cgroup the container actually resides in, is below fd_limit.
1142 fd_final
= __cgroup_tree_create(fd_limit
, cgroup_leaf
, 0755, cpuset_v1
, false);
1144 /* Ensure we don't leave any garbage behind. */
1145 if (cgroup_tree_prune(h
->dfd_base
, cgroup_limit_dir
))
1146 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, cgroup_limit_dir
);
1148 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, cgroup_limit_dir
);
1151 path
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgroup_limit_dir
, NULL
);
1153 fd_final
= __cgroup_tree_create(h
->dfd_base
, cgroup_limit_dir
, 0755, cpuset_v1
, false);
1156 return syserrno(false, "Failed to create %s cgroup %d(%s)", payload
? "payload" : "monitor", h
->dfd_base
, cgroup_limit_dir
);
1159 h
->cgfd_con
= move_fd(fd_final
);
1160 h
->container_full_path
= move_ptr(path
);
1163 h
->cgfd_limit
= h
->cgfd_con
;
1165 h
->cgfd_limit
= move_fd(fd_limit
);
1168 h
->container_limit_path
= move_ptr(limit_path
);
1170 h
->container_limit_path
= h
->container_full_path
;
1172 h
->cgfd_mon
= move_fd(fd_final
);
1178 static void cgroup_tree_prune_leaf(struct hierarchy
*h
, const char *path_prune
,
1184 /* Check whether we actually created the cgroup to prune. */
1185 if (h
->cgfd_limit
< 0)
1188 if (h
->container_full_path
!= h
->container_limit_path
)
1189 free_disarm(h
->container_limit_path
);
1190 free_disarm(h
->container_full_path
);
1192 close_prot_errno_disarm(h
->cgfd_con
);
1193 close_prot_errno_disarm(h
->cgfd_limit
);
1195 /* Check whether we actually created the cgroup to prune. */
1196 if (h
->cgfd_mon
< 0)
1199 close_prot_errno_disarm(h
->cgfd_mon
);
1202 /* We didn't create this cgroup. */
1206 if (cgroup_tree_prune(h
->dfd_base
, path_prune
))
1207 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, path_prune
);
1209 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, path_prune
);
1212 __cgfsng_ops
static void cgfsng_monitor_destroy(struct cgroup_ops
*ops
,
1213 struct lxc_handler
*handler
)
1216 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1217 const struct lxc_conf
*conf
;
1220 ERROR("Called with uninitialized cgroup operations");
1224 if (!ops
->hierarchies
)
1228 ERROR("Called with uninitialized handler");
1232 if (!handler
->conf
) {
1233 ERROR("Called with uninitialized conf");
1236 conf
= handler
->conf
;
1238 if (!ops
->monitor_cgroup
) {
1239 WARN("Uninitialized monitor cgroup");
1243 len
= strnprintf(pidstr
, sizeof(pidstr
), "%d", handler
->monitor_pid
);
1247 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1248 __do_close
int fd_pivot
= -EBADF
;
1249 __do_free
char *pivot_path
= NULL
;
1250 struct hierarchy
*h
= ops
->hierarchies
[i
];
1251 bool cpuset_v1
= false;
1254 /* Monitor might have died before we entered the cgroup. */
1255 if (handler
->monitor_pid
<= 0) {
1256 WARN("No valid monitor process found while destroying cgroups");
1257 goto cgroup_prune_tree
;
1260 if (conf
->cgroup_meta
.monitor_pivot_dir
)
1261 pivot_path
= must_make_path(conf
->cgroup_meta
.monitor_pivot_dir
, CGROUP_PIVOT
, NULL
);
1262 else if (conf
->cgroup_meta
.dir
)
1263 pivot_path
= must_make_path(conf
->cgroup_meta
.dir
, CGROUP_PIVOT
, NULL
);
1265 pivot_path
= must_make_path(CGROUP_PIVOT
, NULL
);
1267 cpuset_v1
= !is_unified_hierarchy(h
) && string_in_list(h
->controllers
, "cpuset");
1269 fd_pivot
= __cgroup_tree_create(h
->dfd_base
, pivot_path
, 0755, cpuset_v1
, true);
1271 SYSWARN("Failed to create pivot cgroup %d(%s)", h
->dfd_base
, pivot_path
);
1275 ret
= lxc_writeat(fd_pivot
, "cgroup.procs", pidstr
, len
);
1277 SYSWARN("Failed to move monitor %s to \"%s\"", pidstr
, pivot_path
);
1282 ret
= cgroup_tree_prune(h
->dfd_base
, ops
->monitor_cgroup
);
1284 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, ops
->monitor_cgroup
);
1286 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, ops
->monitor_cgroup
);
1291 * Check we have no lxc.cgroup.dir, and that lxc.cgroup.dir.limit_prefix is a
1292 * proper prefix directory of lxc.cgroup.dir.payload.
1294 * Returns the prefix length if it is set, otherwise zero on success.
1296 static bool check_cgroup_dir_config(struct lxc_conf
*conf
)
1298 const char *monitor_dir
= conf
->cgroup_meta
.monitor_dir
,
1299 *container_dir
= conf
->cgroup_meta
.container_dir
,
1300 *namespace_dir
= conf
->cgroup_meta
.namespace_dir
;
1302 /* none of the new options are set, all is fine */
1303 if (!monitor_dir
&& !container_dir
&& !namespace_dir
)
1306 /* some are set, make sure lxc.cgroup.dir is not also set*/
1307 if (conf
->cgroup_meta
.dir
)
1308 return log_error_errno(false, EINVAL
,
1309 "lxc.cgroup.dir conflicts with lxc.cgroup.dir.payload/monitor");
1311 /* make sure both monitor and payload are set */
1312 if (!monitor_dir
|| !container_dir
)
1313 return log_error_errno(false, EINVAL
,
1314 "lxc.cgroup.dir.payload and lxc.cgroup.dir.monitor must both be set");
1316 /* namespace_dir may be empty */
1320 __cgfsng_ops
static bool cgfsng_monitor_create(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
1322 __do_free
char *monitor_cgroup
= NULL
;
1326 char *suffix
= NULL
;
1327 struct lxc_conf
*conf
;
1330 return ret_set_errno(false, ENOENT
);
1332 if (!ops
->hierarchies
)
1335 if (ops
->monitor_cgroup
)
1336 return ret_set_errno(false, EEXIST
);
1338 if (!handler
|| !handler
->conf
)
1339 return ret_set_errno(false, EINVAL
);
1341 conf
= handler
->conf
;
1343 if (!check_cgroup_dir_config(conf
))
1346 if (conf
->cgroup_meta
.monitor_dir
) {
1347 monitor_cgroup
= strdup(conf
->cgroup_meta
.monitor_dir
);
1348 } else if (conf
->cgroup_meta
.dir
) {
1349 monitor_cgroup
= must_concat(&len
, conf
->cgroup_meta
.dir
, "/",
1350 DEFAULT_MONITOR_CGROUP_PREFIX
,
1352 CGROUP_CREATE_RETRY
, NULL
);
1353 } else if (ops
->cgroup_pattern
) {
1354 __do_free
char *cgroup_tree
= NULL
;
1356 cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1358 return ret_set_errno(false, ENOMEM
);
1360 monitor_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1361 DEFAULT_MONITOR_CGROUP
,
1362 CGROUP_CREATE_RETRY
, NULL
);
1364 monitor_cgroup
= must_concat(&len
, DEFAULT_MONITOR_CGROUP_PREFIX
,
1366 CGROUP_CREATE_RETRY
, NULL
);
1368 if (!monitor_cgroup
)
1369 return ret_set_errno(false, ENOMEM
);
1371 if (!conf
->cgroup_meta
.monitor_dir
) {
1372 suffix
= monitor_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1377 sprintf(suffix
, "-%d", idx
);
1379 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1380 if (cgroup_tree_create(ops
, handler
->conf
,
1381 ops
->hierarchies
[i
],
1382 monitor_cgroup
, NULL
, false))
1385 DEBUG("Failed to create cgroup %s)", monitor_cgroup
);
1386 for (int j
= 0; j
<= i
; j
++)
1387 cgroup_tree_prune_leaf(ops
->hierarchies
[j
],
1388 monitor_cgroup
, false);
1393 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000 && suffix
);
1395 if (idx
== 1000 || (!suffix
&& idx
!= 0))
1396 return log_error_errno(false, ERANGE
, "Failed to create monitor cgroup");
1398 ops
->monitor_cgroup
= move_ptr(monitor_cgroup
);
1399 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops
->monitor_cgroup
);
1403 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1404 * next cgroup_pattern-1, -2, ..., -999.
1406 __cgfsng_ops
static bool cgfsng_payload_create(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
1408 __do_free
char *container_cgroup
= NULL
, *__limit_cgroup
= NULL
;
1413 char *suffix
= NULL
;
1414 struct lxc_conf
*conf
;
1417 return ret_set_errno(false, ENOENT
);
1419 if (!ops
->hierarchies
)
1422 if (ops
->container_cgroup
|| ops
->container_limit_cgroup
)
1423 return ret_set_errno(false, EEXIST
);
1425 if (!handler
|| !handler
->conf
)
1426 return ret_set_errno(false, EINVAL
);
1428 conf
= handler
->conf
;
1430 if (!check_cgroup_dir_config(conf
))
1433 if (conf
->cgroup_meta
.container_dir
) {
1434 __limit_cgroup
= strdup(conf
->cgroup_meta
.container_dir
);
1435 if (!__limit_cgroup
)
1436 return ret_set_errno(false, ENOMEM
);
1438 if (conf
->cgroup_meta
.namespace_dir
) {
1439 container_cgroup
= must_make_path(__limit_cgroup
,
1440 conf
->cgroup_meta
.namespace_dir
,
1442 limit_cgroup
= __limit_cgroup
;
1444 /* explicit paths but without isolation */
1445 limit_cgroup
= move_ptr(__limit_cgroup
);
1446 container_cgroup
= limit_cgroup
;
1448 } else if (conf
->cgroup_meta
.dir
) {
1449 limit_cgroup
= must_concat(&len
, conf
->cgroup_meta
.dir
, "/",
1450 DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1452 CGROUP_CREATE_RETRY
, NULL
);
1453 container_cgroup
= limit_cgroup
;
1454 } else if (ops
->cgroup_pattern
) {
1455 __do_free
char *cgroup_tree
= NULL
;
1457 cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1459 return ret_set_errno(false, ENOMEM
);
1461 limit_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1462 DEFAULT_PAYLOAD_CGROUP
,
1463 CGROUP_CREATE_RETRY
, NULL
);
1464 container_cgroup
= limit_cgroup
;
1466 limit_cgroup
= must_concat(&len
, DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1468 CGROUP_CREATE_RETRY
, NULL
);
1469 container_cgroup
= limit_cgroup
;
1472 return ret_set_errno(false, ENOMEM
);
1474 if (!conf
->cgroup_meta
.container_dir
) {
1475 suffix
= container_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1480 sprintf(suffix
, "-%d", idx
);
1482 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1483 if (cgroup_tree_create(ops
, handler
->conf
,
1484 ops
->hierarchies
[i
], limit_cgroup
,
1485 conf
->cgroup_meta
.namespace_dir
,
1489 DEBUG("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->container_full_path
?: "(null)");
1490 for (int j
= 0; j
<= i
; j
++)
1491 cgroup_tree_prune_leaf(ops
->hierarchies
[j
],
1492 limit_cgroup
, true);
1497 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000 && suffix
);
1499 if (idx
== 1000 || (!suffix
&& idx
!= 0))
1500 return log_error_errno(false, ERANGE
, "Failed to create container cgroup");
1502 ops
->container_cgroup
= move_ptr(container_cgroup
);
1504 ops
->container_limit_cgroup
= move_ptr(__limit_cgroup
);
1506 ops
->container_limit_cgroup
= ops
->container_cgroup
;
1507 INFO("The container process uses \"%s\" as inner and \"%s\" as limit cgroup",
1508 ops
->container_cgroup
, ops
->container_limit_cgroup
);
1512 __cgfsng_ops
static bool cgfsng_monitor_enter(struct cgroup_ops
*ops
,
1513 struct lxc_handler
*handler
)
1515 int monitor_len
, transient_len
= 0;
1516 char monitor
[INTTYPE_TO_STRLEN(pid_t
)],
1517 transient
[INTTYPE_TO_STRLEN(pid_t
)];
1520 return ret_set_errno(false, ENOENT
);
1522 if (!ops
->hierarchies
)
1525 if (!ops
->monitor_cgroup
)
1526 return ret_set_errno(false, ENOENT
);
1528 if (!handler
|| !handler
->conf
)
1529 return ret_set_errno(false, EINVAL
);
1531 monitor_len
= strnprintf(monitor
, sizeof(monitor
), "%d", handler
->monitor_pid
);
1532 if (monitor_len
< 0)
1535 if (handler
->transient_pid
> 0) {
1536 transient_len
= strnprintf(transient
, sizeof(transient
), "%d", handler
->transient_pid
);
1537 if (transient_len
< 0)
1541 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1542 struct hierarchy
*h
= ops
->hierarchies
[i
];
1545 ret
= lxc_writeat(h
->cgfd_mon
, "cgroup.procs", monitor
, monitor_len
);
1547 return log_error_errno(false, errno
, "Failed to enter cgroup %d", h
->cgfd_mon
);
1549 TRACE("Moved monitor into cgroup %d", h
->cgfd_mon
);
1551 if (handler
->transient_pid
<= 0)
1554 ret
= lxc_writeat(h
->cgfd_mon
, "cgroup.procs", transient
, transient_len
);
1556 return log_error_errno(false, errno
, "Failed to enter cgroup %d", h
->cgfd_mon
);
1558 TRACE("Moved transient process into cgroup %d", h
->cgfd_mon
);
1561 * we don't keep the fds for non-unified hierarchies around
1562 * mainly because we don't make use of them anymore after the
1563 * core cgroup setup is done but also because there are quite a
1566 if (!is_unified_hierarchy(h
))
1567 close_prot_errno_disarm(h
->cgfd_mon
);
1569 handler
->transient_pid
= -1;
1574 __cgfsng_ops
static bool cgfsng_payload_enter(struct cgroup_ops
*ops
,
1575 struct lxc_handler
*handler
)
1578 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1581 return ret_set_errno(false, ENOENT
);
1583 if (!ops
->hierarchies
)
1586 if (!ops
->container_cgroup
)
1587 return ret_set_errno(false, ENOENT
);
1589 if (!handler
|| !handler
->conf
)
1590 return ret_set_errno(false, EINVAL
);
1592 len
= strnprintf(pidstr
, sizeof(pidstr
), "%d", handler
->pid
);
1596 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1597 struct hierarchy
*h
= ops
->hierarchies
[i
];
1600 if (is_unified_hierarchy(h
) &&
1601 (handler
->clone_flags
& CLONE_INTO_CGROUP
))
1604 ret
= lxc_writeat(h
->cgfd_con
, "cgroup.procs", pidstr
, len
);
1606 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->container_full_path
);
1608 TRACE("Moved container into %s cgroup via %d", h
->container_full_path
, h
->cgfd_con
);
1614 static int fchowmodat(int dirfd
, const char *path
, uid_t chown_uid
,
1615 gid_t chown_gid
, mode_t chmod_mode
)
1619 ret
= fchownat(dirfd
, path
, chown_uid
, chown_gid
,
1620 AT_EMPTY_PATH
| AT_SYMLINK_NOFOLLOW
);
1622 return log_warn_errno(-1,
1623 errno
, "Failed to fchownat(%d, %s, %d, %d, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )",
1624 dirfd
, path
, (int)chown_uid
,
1627 ret
= fchmodat(dirfd
, (*path
!= '\0') ? path
: ".", chmod_mode
, 0);
1629 return log_warn_errno(-1, errno
, "Failed to fchmodat(%d, %s, %d, AT_SYMLINK_NOFOLLOW)",
1630 dirfd
, path
, (int)chmod_mode
);
1635 /* chgrp the container cgroups to container group. We leave
1636 * the container owner as cgroup owner. So we must make the
1637 * directories 775 so that the container can create sub-cgroups.
1639 * Also chown the tasks and cgroup.procs files. Those may not
1640 * exist depending on kernel version.
1642 static int chown_cgroup_wrapper(void *data
)
1646 struct generic_userns_exec_data
*arg
= data
;
1647 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1648 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1650 if (!lxc_drop_groups() && errno
!= EPERM
)
1651 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
1653 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1655 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
1656 (int)nsgid
, (int)nsgid
, (int)nsgid
);
1658 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1660 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
1661 (int)nsuid
, (int)nsuid
, (int)nsuid
);
1663 destuid
= get_ns_uid(arg
->origuid
);
1664 if (destuid
== LXC_INVALID_UID
)
1667 for (int i
= 0; arg
->hierarchies
[i
]; i
++) {
1668 int dirfd
= arg
->hierarchies
[i
]->cgfd_con
;
1670 (void)fchowmodat(dirfd
, "", destuid
, nsgid
, 0775);
1673 * Failures to chown() these are inconvenient but not
1674 * detrimental We leave these owned by the container launcher,
1675 * so that container root can write to the files to attach. We
1676 * chmod() them 664 so that container systemd can write to the
1677 * files (which systemd in wily insists on doing).
1680 if (arg
->hierarchies
[i
]->version
== CGROUP_SUPER_MAGIC
)
1681 (void)fchowmodat(dirfd
, "tasks", destuid
, nsgid
, 0664);
1683 (void)fchowmodat(dirfd
, "cgroup.procs", destuid
, nsgid
, 0664);
1685 if (arg
->hierarchies
[i
]->version
!= CGROUP2_SUPER_MAGIC
)
1688 for (char **p
= arg
->hierarchies
[i
]->cgroup2_chown
; p
&& *p
; p
++)
1689 (void)fchowmodat(dirfd
, *p
, destuid
, nsgid
, 0664);
1695 __cgfsng_ops
static bool cgfsng_chown(struct cgroup_ops
*ops
,
1696 struct lxc_conf
*conf
)
1698 struct generic_userns_exec_data wrap
;
1701 return ret_set_errno(false, ENOENT
);
1703 if (!ops
->hierarchies
)
1706 if (!ops
->container_cgroup
)
1707 return ret_set_errno(false, ENOENT
);
1710 return ret_set_errno(false, EINVAL
);
1712 if (lxc_list_empty(&conf
->id_map
))
1715 wrap
.origuid
= geteuid();
1717 wrap
.hierarchies
= ops
->hierarchies
;
1720 if (userns_exec_1(conf
, chown_cgroup_wrapper
, &wrap
, "chown_cgroup_wrapper") < 0)
1721 return log_error_errno(false, errno
, "Error requesting cgroup chown in new user namespace");
1726 __cgfsng_ops
static void cgfsng_payload_finalize(struct cgroup_ops
*ops
)
1731 if (!ops
->hierarchies
)
1734 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1735 struct hierarchy
*h
= ops
->hierarchies
[i
];
1737 * we don't keep the fds for non-unified hierarchies around
1738 * mainly because we don't make use of them anymore after the
1739 * core cgroup setup is done but also because there are quite a
1742 if (!is_unified_hierarchy(h
))
1743 close_prot_errno_disarm(h
->cgfd_con
);
1747 * The checking for freezer support should obviously be done at cgroup
1748 * initialization time but that doesn't work reliable. The freezer
1749 * controller has been demoted (rightly so) to a simple file located in
1750 * each non-root cgroup. At the time when the container is created we
1751 * might still be located in /sys/fs/cgroup and so checking for
1752 * cgroup.freeze won't tell us anything because this file doesn't exist
1753 * in the root cgroup. We could then iterate through /sys/fs/cgroup and
1754 * find an already existing cgroup and then check within that cgroup
1755 * for the existence of cgroup.freeze but that will only work on
1756 * systemd based hosts. Other init systems might not manage cgroups and
1757 * so no cgroup will exist. So we defer until we have created cgroups
1758 * for our container which means we check here.
1760 if (pure_unified_layout(ops
) &&
1761 !faccessat(ops
->unified
->cgfd_con
, "cgroup.freeze", F_OK
,
1762 AT_SYMLINK_NOFOLLOW
)) {
1763 TRACE("Unified hierarchy supports freezer");
1764 ops
->unified
->freezer_controller
= 1;
1768 /* cgroup-full:* is done, no need to create subdirs */
1769 static inline bool cg_mount_needs_subdirs(int cgroup_automount_type
)
1771 switch (cgroup_automount_type
) {
1772 case LXC_AUTO_CGROUP_RO
:
1774 case LXC_AUTO_CGROUP_RW
:
1776 case LXC_AUTO_CGROUP_MIXED
:
1783 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1784 * remount controller ro if needed and bindmount the cgroupfs onto
1785 * control/the/cg/path.
1787 static int cg_legacy_mount_controllers(int cgroup_automount_type
, struct hierarchy
*h
,
1788 char *controllerpath
, char *cgpath
,
1789 const char *container_cgroup
)
1791 __do_free
char *sourcepath
= NULL
;
1792 int ret
, remount_flags
;
1793 int flags
= MS_BIND
;
1795 if ((cgroup_automount_type
== LXC_AUTO_CGROUP_RO
) ||
1796 (cgroup_automount_type
== LXC_AUTO_CGROUP_MIXED
)) {
1797 ret
= mount(controllerpath
, controllerpath
, "cgroup", MS_BIND
, NULL
);
1799 return log_error_errno(-1, errno
, "Failed to bind mount \"%s\" onto \"%s\"",
1800 controllerpath
, controllerpath
);
1802 remount_flags
= add_required_remount_flags(controllerpath
,
1804 flags
| MS_REMOUNT
);
1805 ret
= mount(controllerpath
, controllerpath
, "cgroup",
1806 remount_flags
| MS_REMOUNT
| MS_BIND
| MS_RDONLY
,
1809 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", controllerpath
);
1811 INFO("Remounted %s read-only", controllerpath
);
1814 sourcepath
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1815 container_cgroup
, NULL
);
1816 if (cgroup_automount_type
== LXC_AUTO_CGROUP_RO
)
1819 ret
= mount(sourcepath
, cgpath
, "cgroup", flags
, NULL
);
1821 return log_error_errno(-1, errno
, "Failed to mount \"%s\" onto \"%s\"",
1822 h
->controllers
[0], cgpath
);
1823 INFO("Mounted \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1825 if (flags
& MS_RDONLY
) {
1826 remount_flags
= add_required_remount_flags(sourcepath
, cgpath
,
1827 flags
| MS_REMOUNT
);
1828 ret
= mount(sourcepath
, cgpath
, "cgroup", remount_flags
, NULL
);
1830 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", cgpath
);
1831 INFO("Remounted %s read-only", cgpath
);
1834 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath
);
1840 * Mount cgroup hierarchies directly without using bind-mounts. The main
1841 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1842 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1844 static int __cgroupfs_mount(int cgroup_automount_type
, struct hierarchy
*h
,
1845 struct lxc_rootfs
*rootfs
, int dfd_mnt_cgroupfs
,
1846 const char *hierarchy_mnt
)
1848 __do_close
int fd_fs
= -EBADF
;
1849 unsigned int flags
= 0;
1853 if (dfd_mnt_cgroupfs
< 0)
1854 return ret_errno(EINVAL
);
1856 flags
|= MOUNT_ATTR_NOSUID
;
1857 flags
|= MOUNT_ATTR_NOEXEC
;
1858 flags
|= MOUNT_ATTR_NODEV
;
1859 flags
|= MOUNT_ATTR_RELATIME
;
1861 if ((cgroup_automount_type
== LXC_AUTO_CGROUP_RO
) ||
1862 (cgroup_automount_type
== LXC_AUTO_CGROUP_FULL_RO
))
1863 flags
|= MOUNT_ATTR_RDONLY
;
1865 if (is_unified_hierarchy(h
))
1870 if (can_use_mount_api()) {
1871 fd_fs
= fs_prepare(fstype
, -EBADF
, "", 0, 0);
1873 return log_error_errno(-errno
, errno
, "Failed to prepare filesystem context for %s", fstype
);
1875 if (!is_unified_hierarchy(h
)) {
1876 for (const char **it
= (const char **)h
->controllers
; it
&& *it
; it
++) {
1877 if (strnequal(*it
, "name=", STRLITERALLEN("name=")))
1878 ret
= fs_set_property(fd_fs
, "name", *it
+ STRLITERALLEN("name="));
1880 ret
= fs_set_property(fd_fs
, *it
, "");
1882 return log_error_errno(-errno
, errno
, "Failed to add %s controller to cgroup filesystem context %d(dev)", *it
, fd_fs
);
1886 ret
= fs_attach(fd_fs
, dfd_mnt_cgroupfs
, hierarchy_mnt
,
1887 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH
,
1890 __do_free
char *controllers
= NULL
, *target
= NULL
;
1891 unsigned int old_flags
= 0;
1892 const char *rootfs_mnt
;
1894 if (!is_unified_hierarchy(h
)) {
1895 controllers
= lxc_string_join(",", (const char **)h
->controllers
, false);
1897 return ret_errno(ENOMEM
);
1900 rootfs_mnt
= get_rootfs_mnt(rootfs
);
1901 ret
= mnt_attributes_old(flags
, &old_flags
);
1903 return log_error_errno(-EINVAL
, EINVAL
, "Unsupported mount properties specified");
1905 target
= must_make_path(rootfs_mnt
, DEFAULT_CGROUP_MOUNTPOINT
, hierarchy_mnt
, NULL
);
1906 ret
= safe_mount(NULL
, target
, fstype
, old_flags
, controllers
, rootfs_mnt
);
1909 return log_error_errno(ret
, errno
, "Failed to mount %s filesystem onto %d(%s)",
1910 fstype
, dfd_mnt_cgroupfs
, maybe_empty(hierarchy_mnt
));
1912 DEBUG("Mounted cgroup filesystem %s onto %d(%s)",
1913 fstype
, dfd_mnt_cgroupfs
, maybe_empty(hierarchy_mnt
));
1917 static inline int cgroupfs_mount(int cgroup_automount_type
, struct hierarchy
*h
,
1918 struct lxc_rootfs
*rootfs
,
1919 int dfd_mnt_cgroupfs
, const char *hierarchy_mnt
)
1921 return __cgroupfs_mount(cgroup_automount_type
, h
, rootfs
,
1922 dfd_mnt_cgroupfs
, hierarchy_mnt
);
1925 static inline int cgroupfs_bind_mount(int cgroup_automount_type
, struct hierarchy
*h
,
1926 struct lxc_rootfs
*rootfs
,
1927 int dfd_mnt_cgroupfs
,
1928 const char *hierarchy_mnt
)
1930 switch (cgroup_automount_type
) {
1931 case LXC_AUTO_CGROUP_FULL_RO
:
1933 case LXC_AUTO_CGROUP_FULL_RW
:
1935 case LXC_AUTO_CGROUP_FULL_MIXED
:
1941 return __cgroupfs_mount(cgroup_automount_type
, h
, rootfs
,
1942 dfd_mnt_cgroupfs
, hierarchy_mnt
);
1945 __cgfsng_ops
static bool cgfsng_mount(struct cgroup_ops
*ops
,
1946 struct lxc_handler
*handler
, int cg_flags
)
1948 __do_close
int dfd_mnt_tmpfs
= -EBADF
, fd_fs
= -EBADF
;
1949 __do_free
char *cgroup_root
= NULL
;
1950 int cgroup_automount_type
;
1951 bool in_cgroup_ns
= false, wants_force_mount
= false;
1952 struct lxc_conf
*conf
= handler
->conf
;
1953 struct lxc_rootfs
*rootfs
= &conf
->rootfs
;
1954 const char *rootfs_mnt
= get_rootfs_mnt(rootfs
);
1958 return ret_set_errno(false, ENOENT
);
1960 if (!ops
->hierarchies
)
1964 return ret_set_errno(false, EINVAL
);
1966 if ((cg_flags
& LXC_AUTO_CGROUP_MASK
) == 0)
1967 return log_trace(true, "No cgroup mounts requested");
1969 if (cg_flags
& LXC_AUTO_CGROUP_FORCE
) {
1970 cg_flags
&= ~LXC_AUTO_CGROUP_FORCE
;
1971 wants_force_mount
= true;
1975 case LXC_AUTO_CGROUP_RO
:
1976 TRACE("Read-only cgroup mounts requested");
1978 case LXC_AUTO_CGROUP_RW
:
1979 TRACE("Read-write cgroup mounts requested");
1981 case LXC_AUTO_CGROUP_MIXED
:
1982 TRACE("Mixed cgroup mounts requested");
1984 case LXC_AUTO_CGROUP_FULL_RO
:
1985 TRACE("Full read-only cgroup mounts requested");
1987 case LXC_AUTO_CGROUP_FULL_RW
:
1988 TRACE("Full read-write cgroup mounts requested");
1990 case LXC_AUTO_CGROUP_FULL_MIXED
:
1991 TRACE("Full mixed cgroup mounts requested");
1994 return log_error_errno(false, EINVAL
, "Invalid cgroup mount options specified");
1996 cgroup_automount_type
= cg_flags
;
1998 if (!wants_force_mount
) {
1999 wants_force_mount
= !lxc_wants_cap(CAP_SYS_ADMIN
, conf
);
2002 * Most recent distro versions currently have init system that
2003 * do support cgroup2 but do not mount it by default unless
2004 * explicitly told so even if the host is cgroup2 only. That
2005 * means they often will fail to boot. Fix this by pre-mounting
2006 * cgroup2 by default. We will likely need to be doing this a
2007 * few years until all distros have switched over to cgroup2 at
2008 * which point we can safely assume that their init systems
2009 * will mount it themselves.
2011 if (pure_unified_layout(ops
))
2012 wants_force_mount
= true;
2015 if (cgns_supported() && container_uses_namespace(handler
, CLONE_NEWCGROUP
))
2016 in_cgroup_ns
= true;
2018 if (in_cgroup_ns
&& !wants_force_mount
)
2019 return log_trace(true, "Mounting cgroups not requested or needed");
2021 /* This is really the codepath that we want. */
2022 if (pure_unified_layout(ops
)) {
2023 __do_close
int dfd_mnt_unified
= -EBADF
;
2025 dfd_mnt_unified
= open_at(rootfs
->dfd_mnt
, DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
,
2026 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH_XDEV
, 0);
2027 if (dfd_mnt_unified
< 0)
2028 return syserrno(-errno
, "Failed to open %d(%s)", rootfs
->dfd_mnt
,
2029 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
);
2031 * If cgroup namespaces are supported but the container will
2032 * not have CAP_SYS_ADMIN after it has started we need to mount
2033 * the cgroups manually.
2035 * Note that here we know that wants_force_mount is true.
2036 * Otherwise we would've returned early above.
2040 * 1. cgroup:rw:force -> Mount the cgroup2 filesystem.
2041 * 2. cgroup:ro:force -> Mount the cgroup2 filesystem read-only.
2042 * 3. cgroup:mixed:force -> See comment above how this
2044 * cgroup:mixed is equal to
2045 * cgroup:rw when cgroup
2046 * namespaces are supported.
2048 * 4. cgroup:rw -> No-op; init system responsible for mounting.
2049 * 5. cgroup:ro -> No-op; init system responsible for mounting.
2050 * 6. cgroup:mixed -> No-op; init system responsible for mounting.
2052 * 7. cgroup-full:rw -> Not supported.
2053 * 8. cgroup-full:ro -> Not supported.
2054 * 9. cgroup-full:mixed -> Not supported.
2056 * 10. cgroup-full:rw:force -> Not supported.
2057 * 11. cgroup-full:ro:force -> Not supported.
2058 * 12. cgroup-full:mixed:force -> Not supported.
2060 ret
= cgroupfs_mount(cgroup_automount_type
, ops
->unified
, rootfs
, dfd_mnt_unified
, "");
2062 return syserrno(false, "Failed to force mount cgroup filesystem in cgroup namespace");
2064 return log_trace(true, "Force mounted cgroup filesystem in new cgroup namespace");
2067 * Either no cgroup namespace supported (highly
2068 * unlikely unless we're dealing with a Frankenkernel.
2069 * Or the user requested to keep the cgroup namespace
2070 * of the host or another container.
2072 if (wants_force_mount
) {
2074 * 1. cgroup:rw:force -> Bind-mount the cgroup2 filesystem writable.
2075 * 2. cgroup:ro:force -> Bind-mount the cgroup2 filesystem read-only.
2076 * 3. cgroup:mixed:force -> bind-mount the cgroup2 filesystem and
2077 * and make the parent directory of the
2078 * container's cgroup read-only but the
2079 * container's cgroup writable.
2081 * 10. cgroup-full:rw:force ->
2082 * 11. cgroup-full:ro:force ->
2083 * 12. cgroup-full:mixed:force ->
2086 SYSWARN("Force-mounting the unified cgroup hierarchy without cgroup namespace support is currently not supported");
2089 SYSWARN("Mounting the unified cgroup hierarchy without cgroup namespace support is currently not supported");
2093 return syserrno(false, "Failed to mount cgroups");
2097 * Mount a tmpfs over DEFAULT_CGROUP_MOUNTPOINT. Note that we're
2098 * relying on RESOLVE_BENEATH so we need to skip the leading "/" in the
2099 * DEFAULT_CGROUP_MOUNTPOINT define.
2101 if (can_use_mount_api()) {
2102 fd_fs
= fs_prepare("tmpfs", -EBADF
, "", 0, 0);
2104 return log_error_errno(-errno
, errno
, "Failed to create new filesystem context for tmpfs");
2106 ret
= fs_set_property(fd_fs
, "mode", "0755");
2108 return log_error_errno(-errno
, errno
, "Failed to mount tmpfs onto %d(dev)", fd_fs
);
2110 ret
= fs_set_property(fd_fs
, "size", "10240k");
2112 return log_error_errno(-errno
, errno
, "Failed to mount tmpfs onto %d(dev)", fd_fs
);
2114 ret
= fs_attach(fd_fs
, rootfs
->dfd_mnt
, DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
,
2115 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH_XDEV
,
2116 MOUNT_ATTR_NOSUID
| MOUNT_ATTR_NODEV
|
2117 MOUNT_ATTR_NOEXEC
| MOUNT_ATTR_RELATIME
);
2119 cgroup_root
= must_make_path(rootfs_mnt
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
2120 ret
= safe_mount(NULL
, cgroup_root
, "tmpfs",
2121 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
2122 "size=10240k,mode=755", rootfs_mnt
);
2125 return log_error_errno(false, errno
, "Failed to mount tmpfs on %s",
2126 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
);
2128 dfd_mnt_tmpfs
= open_at(rootfs
->dfd_mnt
, DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
,
2129 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH_XDEV
, 0);
2130 if (dfd_mnt_tmpfs
< 0)
2131 return syserrno(-errno
, "Failed to open %d(%s)", rootfs
->dfd_mnt
,
2132 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
);
2134 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
2135 __do_free
char *controllerpath
= NULL
, *path2
= NULL
;
2136 struct hierarchy
*h
= ops
->hierarchies
[i
];
2137 char *controller
= strrchr(h
->mountpoint
, '/');
2143 ret
= mkdirat(dfd_mnt_tmpfs
, controller
, 0000);
2145 return log_error_errno(false, errno
, "Failed to create cgroup mountpoint %d(%s)", dfd_mnt_tmpfs
, controller
);
2147 if (in_cgroup_ns
&& wants_force_mount
) {
2149 * If cgroup namespaces are supported but the container
2150 * will not have CAP_SYS_ADMIN after it has started we
2151 * need to mount the cgroups manually.
2153 ret
= cgroupfs_mount(cgroup_automount_type
, h
, rootfs
, dfd_mnt_tmpfs
, controller
);
2160 /* Here is where the ancient kernel section begins. */
2161 ret
= cgroupfs_bind_mount(cgroup_automount_type
, h
, rootfs
, dfd_mnt_tmpfs
, controller
);
2165 if (!cg_mount_needs_subdirs(cgroup_automount_type
))
2169 cgroup_root
= must_make_path(rootfs_mnt
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
2171 controllerpath
= must_make_path(cgroup_root
, controller
, NULL
);
2172 path2
= must_make_path(controllerpath
, h
->container_base_path
, ops
->container_cgroup
, NULL
);
2173 ret
= mkdir_p(path2
, 0755);
2174 if (ret
< 0 && (errno
!= EEXIST
))
2177 ret
= cg_legacy_mount_controllers(cgroup_automount_type
, h
, controllerpath
, path2
, ops
->container_cgroup
);
2185 /* Only root needs to escape to the cgroup of its init. */
2186 __cgfsng_ops
static bool cgfsng_criu_escape(const struct cgroup_ops
*ops
,
2187 struct lxc_conf
*conf
)
2190 return ret_set_errno(false, ENOENT
);
2192 if (!ops
->hierarchies
)
2196 return ret_set_errno(false, EINVAL
);
2198 if (conf
->cgroup_meta
.relative
|| geteuid())
2201 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
2202 __do_free
char *fullpath
= NULL
;
2206 must_make_path(ops
->hierarchies
[i
]->mountpoint
,
2207 ops
->hierarchies
[i
]->container_base_path
,
2208 "cgroup.procs", NULL
);
2209 ret
= lxc_write_to_file(fullpath
, "0", 2, false, 0666);
2211 return log_error_errno(false, errno
, "Failed to escape to cgroup \"%s\"", fullpath
);
2217 __cgfsng_ops
static int cgfsng_criu_num_hierarchies(struct cgroup_ops
*ops
)
2222 return ret_set_errno(-1, ENOENT
);
2224 if (!ops
->hierarchies
)
2227 for (; ops
->hierarchies
[i
]; i
++)
2233 __cgfsng_ops
static bool cgfsng_criu_get_hierarchies(struct cgroup_ops
*ops
,
2239 return ret_set_errno(false, ENOENT
);
2241 if (!ops
->hierarchies
)
2242 return ret_set_errno(false, ENOENT
);
2244 /* sanity check n */
2245 for (i
= 0; i
< n
; i
++)
2246 if (!ops
->hierarchies
[i
])
2247 return ret_set_errno(false, ENOENT
);
2249 *out
= ops
->hierarchies
[i
]->controllers
;
2254 static bool cg_legacy_freeze(struct cgroup_ops
*ops
)
2256 struct hierarchy
*h
;
2258 h
= get_hierarchy(ops
, "freezer");
2260 return ret_set_errno(-1, ENOENT
);
2262 return lxc_write_openat(h
->container_full_path
, "freezer.state",
2263 "FROZEN", STRLITERALLEN("FROZEN"));
2266 static int freezer_cgroup_events_cb(int fd
, uint32_t events
, void *cbdata
,
2267 struct lxc_epoll_descr
*descr
)
2269 __do_free
char *line
= NULL
;
2270 __do_fclose
FILE *f
= NULL
;
2271 int state
= PTR_TO_INT(cbdata
);
2273 const char *state_string
;
2275 f
= fdopen_at(fd
, "", "re", PROTECT_OPEN
, PROTECT_LOOKUP_BENEATH
);
2277 return LXC_MAINLOOP_ERROR
;
2280 state_string
= "frozen 1";
2282 state_string
= "frozen 0";
2284 while (getline(&line
, &len
, f
) != -1)
2285 if (strnequal(line
, state_string
, STRLITERALLEN("frozen") + 2))
2286 return LXC_MAINLOOP_CLOSE
;
2290 return LXC_MAINLOOP_CONTINUE
;
2293 static int cg_unified_freeze_do(struct cgroup_ops
*ops
, int timeout
,
2294 const char *state_string
,
2296 const char *epoll_error
,
2297 const char *wait_error
)
2299 __do_close
int fd
= -EBADF
;
2300 call_cleaner(lxc_mainloop_close
) struct lxc_epoll_descr
*descr_ptr
= NULL
;
2302 struct lxc_epoll_descr descr
;
2303 struct hierarchy
*h
;
2307 return ret_set_errno(-1, ENOENT
);
2309 if (!h
->container_full_path
)
2310 return ret_set_errno(-1, EEXIST
);
2313 __do_free
char *events_file
= NULL
;
2315 events_file
= must_make_path(h
->container_full_path
, "cgroup.events", NULL
);
2316 fd
= open(events_file
, O_RDONLY
| O_CLOEXEC
);
2318 return log_error_errno(-1, errno
, "Failed to open cgroup.events file");
2320 ret
= lxc_mainloop_open(&descr
);
2322 return log_error_errno(-1, errno
, "%s", epoll_error
);
2324 /* automatically cleaned up now */
2327 ret
= lxc_mainloop_add_handler_events(&descr
, fd
, EPOLLPRI
, freezer_cgroup_events_cb
, INT_TO_PTR(state_num
));
2329 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
2332 ret
= lxc_write_openat(h
->container_full_path
, "cgroup.freeze", state_string
, 1);
2334 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
2336 if (timeout
!= 0 && lxc_mainloop(&descr
, timeout
))
2337 return log_error_errno(-1, errno
, "%s", wait_error
);
2342 static int cg_unified_freeze(struct cgroup_ops
*ops
, int timeout
)
2344 return cg_unified_freeze_do(ops
, timeout
, "1", 1,
2345 "Failed to create epoll instance to wait for container freeze",
2346 "Failed to wait for container to be frozen");
2349 __cgfsng_ops
static int cgfsng_freeze(struct cgroup_ops
*ops
, int timeout
)
2351 if (!ops
->hierarchies
)
2352 return ret_set_errno(-1, ENOENT
);
2354 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2355 return cg_legacy_freeze(ops
);
2357 return cg_unified_freeze(ops
, timeout
);
2360 static int cg_legacy_unfreeze(struct cgroup_ops
*ops
)
2362 struct hierarchy
*h
;
2364 h
= get_hierarchy(ops
, "freezer");
2366 return ret_set_errno(-1, ENOENT
);
2368 return lxc_write_openat(h
->container_full_path
, "freezer.state",
2369 "THAWED", STRLITERALLEN("THAWED"));
2372 static int cg_unified_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2374 return cg_unified_freeze_do(ops
, timeout
, "0", 0,
2375 "Failed to create epoll instance to wait for container unfreeze",
2376 "Failed to wait for container to be unfrozen");
2379 __cgfsng_ops
static int cgfsng_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2381 if (!ops
->hierarchies
)
2382 return ret_set_errno(-1, ENOENT
);
2384 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2385 return cg_legacy_unfreeze(ops
);
2387 return cg_unified_unfreeze(ops
, timeout
);
2390 static const char *cgfsng_get_cgroup_do(struct cgroup_ops
*ops
,
2391 const char *controller
, bool limiting
)
2393 struct hierarchy
*h
;
2395 h
= get_hierarchy(ops
, controller
);
2397 return log_warn_errno(NULL
, ENOENT
, "Failed to find hierarchy for controller \"%s\"",
2398 controller
? controller
: "(null)");
2401 return h
->container_limit_path
2402 ? h
->container_limit_path
+ strlen(h
->mountpoint
)
2405 return h
->container_full_path
2406 ? h
->container_full_path
+ strlen(h
->mountpoint
)
2410 __cgfsng_ops
static const char *cgfsng_get_cgroup(struct cgroup_ops
*ops
,
2411 const char *controller
)
2413 return cgfsng_get_cgroup_do(ops
, controller
, false);
2416 __cgfsng_ops
static const char *cgfsng_get_limiting_cgroup(struct cgroup_ops
*ops
,
2417 const char *controller
)
2419 return cgfsng_get_cgroup_do(ops
, controller
, true);
2422 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2423 * which must be freed by the caller.
2425 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy
*h
,
2427 const char *filename
)
2429 return must_make_path(h
->mountpoint
, inpath
, filename
, NULL
);
2432 static int cgroup_attach_leaf(const struct lxc_conf
*conf
, int unified_fd
, pid_t pid
)
2436 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2439 /* Create leaf cgroup. */
2440 ret
= mkdirat(unified_fd
, ".lxc", 0755);
2441 if (ret
< 0 && errno
!= EEXIST
)
2442 return log_error_errno(-errno
, errno
, "Failed to create leaf cgroup \".lxc\"");
2444 pidstr_len
= strnprintf(pidstr
, sizeof(pidstr
), INT64_FMT
, (int64_t)pid
);
2448 ret
= lxc_writeat(unified_fd
, ".lxc/cgroup.procs", pidstr
, pidstr_len
);
2450 ret
= lxc_writeat(unified_fd
, "cgroup.procs", pidstr
, pidstr_len
);
2452 return log_trace(0, "Moved process %s into cgroup %d(.lxc)", pidstr
, unified_fd
);
2454 /* this is a non-leaf node */
2456 return log_error_errno(-errno
, errno
, "Failed to attach to unified cgroup");
2460 char attach_cgroup
[STRLITERALLEN(".lxc-/cgroup.procs") + INTTYPE_TO_STRLEN(int) + 1];
2461 char *slash
= attach_cgroup
;
2463 ret
= strnprintf(attach_cgroup
, sizeof(attach_cgroup
), ".lxc-%d/cgroup.procs", idx
);
2468 * This shouldn't really happen but the compiler might complain
2469 * that a short write would cause a buffer overrun. So be on
2472 if (ret
< STRLITERALLEN(".lxc-/cgroup.procs"))
2473 return log_error_errno(-EINVAL
, EINVAL
, "Unexpected short write would cause buffer-overrun");
2475 slash
+= (ret
- STRLITERALLEN("/cgroup.procs"));
2478 ret
= mkdirat(unified_fd
, attach_cgroup
, 0755);
2479 if (ret
< 0 && errno
!= EEXIST
)
2480 return log_error_errno(-1, errno
, "Failed to create cgroup %s", attach_cgroup
);
2486 ret
= lxc_writeat(unified_fd
, attach_cgroup
, pidstr
, pidstr_len
);
2488 return log_trace(0, "Moved process %s into cgroup %d(%s)", pidstr
, unified_fd
, attach_cgroup
);
2490 if (rm
&& unlinkat(unified_fd
, attach_cgroup
, AT_REMOVEDIR
))
2491 SYSERROR("Failed to remove cgroup \"%d(%s)\"", unified_fd
, attach_cgroup
);
2493 /* this is a non-leaf node */
2495 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2498 } while (idx
< 1000);
2500 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2503 static int cgroup_attach_create_leaf(const struct lxc_conf
*conf
,
2504 int unified_fd
, int *sk_fd
)
2506 __do_close
int sk
= *sk_fd
, target_fd0
= -EBADF
, target_fd1
= -EBADF
;
2510 /* Create leaf cgroup. */
2511 ret
= mkdirat(unified_fd
, ".lxc", 0755);
2512 if (ret
< 0 && errno
!= EEXIST
)
2513 return log_error_errno(-1, errno
, "Failed to create leaf cgroup \".lxc\"");
2515 target_fd0
= open_at(unified_fd
, ".lxc/cgroup.procs", PROTECT_OPEN_W
, PROTECT_LOOKUP_BENEATH
, 0);
2517 return log_error_errno(-errno
, errno
, "Failed to open \".lxc/cgroup.procs\"");
2518 target_fds
[0] = target_fd0
;
2520 target_fd1
= open_at(unified_fd
, "cgroup.procs", PROTECT_OPEN_W
, PROTECT_LOOKUP_BENEATH
, 0);
2522 return log_error_errno(-errno
, errno
, "Failed to open \".lxc/cgroup.procs\"");
2523 target_fds
[1] = target_fd1
;
2525 ret
= lxc_abstract_unix_send_fds(sk
, target_fds
, 2, NULL
, 0);
2527 return log_error_errno(-errno
, errno
, "Failed to send \".lxc/cgroup.procs\" fds %d and %d",
2528 target_fd0
, target_fd1
);
2530 return log_debug(0, "Sent target cgroup fds %d and %d", target_fd0
, target_fd1
);
2533 static int cgroup_attach_move_into_leaf(const struct lxc_conf
*conf
,
2534 int *sk_fd
, pid_t pid
)
2536 __do_close
int sk
= *sk_fd
, target_fd0
= -EBADF
, target_fd1
= -EBADF
;
2538 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2542 ret
= lxc_abstract_unix_recv_fds(sk
, target_fds
, 2, NULL
, 0);
2544 return log_error_errno(-1, errno
, "Failed to receive target cgroup fd");
2545 target_fd0
= target_fds
[0];
2546 target_fd1
= target_fds
[1];
2548 pidstr_len
= sprintf(pidstr
, INT64_FMT
, (int64_t)pid
);
2550 ret
= lxc_write_nointr(target_fd0
, pidstr
, pidstr_len
);
2551 if (ret
> 0 && ret
== pidstr_len
)
2552 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd0
);
2554 ret
= lxc_write_nointr(target_fd1
, pidstr
, pidstr_len
);
2555 if (ret
> 0 && ret
== pidstr_len
)
2556 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd1
);
2558 return log_debug_errno(-1, errno
, "Failed to move process into target cgroup via fd %d and %d",
2559 target_fd0
, target_fd1
);
2562 struct userns_exec_unified_attach_data
{
2563 const struct lxc_conf
*conf
;
2569 static int cgroup_unified_attach_child_wrapper(void *data
)
2571 struct userns_exec_unified_attach_data
*args
= data
;
2573 if (!args
->conf
|| args
->unified_fd
< 0 || args
->pid
<= 0 ||
2574 args
->sk_pair
[0] < 0 || args
->sk_pair
[1] < 0)
2575 return ret_errno(EINVAL
);
2577 close_prot_errno_disarm(args
->sk_pair
[0]);
2578 return cgroup_attach_create_leaf(args
->conf
, args
->unified_fd
,
2582 static int cgroup_unified_attach_parent_wrapper(void *data
)
2584 struct userns_exec_unified_attach_data
*args
= data
;
2586 if (!args
->conf
|| args
->unified_fd
< 0 || args
->pid
<= 0 ||
2587 args
->sk_pair
[0] < 0 || args
->sk_pair
[1] < 0)
2588 return ret_errno(EINVAL
);
2590 close_prot_errno_disarm(args
->sk_pair
[1]);
2591 return cgroup_attach_move_into_leaf(args
->conf
, &args
->sk_pair
[0],
2595 /* Technically, we're always at a delegation boundary here (This is especially
2596 * true when cgroup namespaces are available.). The reasoning is that in order
2597 * for us to have been able to start a container in the first place the root
2598 * cgroup must have been a leaf node. Now, either the container's init system
2599 * has populated the cgroup and kept it as a leaf node or it has created
2600 * subtrees. In the former case we will simply attach to the leaf node we
2601 * created when we started the container in the latter case we create our own
2602 * cgroup for the attaching process.
2604 static int __cg_unified_attach(const struct hierarchy
*h
,
2605 const struct lxc_conf
*conf
, const char *name
,
2606 const char *lxcpath
, pid_t pid
,
2607 const char *controller
)
2609 __do_close
int unified_fd
= -EBADF
;
2610 __do_free
char *path
= NULL
, *cgroup
= NULL
;
2613 if (!conf
|| !name
|| !lxcpath
|| pid
<= 0)
2614 return ret_errno(EINVAL
);
2616 ret
= cgroup_attach(conf
, name
, lxcpath
, pid
);
2618 return log_trace(0, "Attached to unified cgroup via command handler");
2619 if (ret
!= -ENOCGROUP2
)
2620 return log_error_errno(ret
, errno
, "Failed to attach to unified cgroup");
2622 /* Fall back to retrieving the path for the unified cgroup. */
2623 cgroup
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2628 path
= must_make_path(h
->mountpoint
, cgroup
, NULL
);
2630 unified_fd
= open(path
, O_PATH
| O_DIRECTORY
| O_CLOEXEC
);
2632 return ret_errno(EBADF
);
2634 if (!lxc_list_empty(&conf
->id_map
)) {
2635 struct userns_exec_unified_attach_data args
= {
2637 .unified_fd
= unified_fd
,
2641 ret
= socketpair(PF_LOCAL
, SOCK_STREAM
| SOCK_CLOEXEC
, 0, args
.sk_pair
);
2645 ret
= userns_exec_minimal(conf
,
2646 cgroup_unified_attach_parent_wrapper
,
2648 cgroup_unified_attach_child_wrapper
,
2651 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
2657 __cgfsng_ops
static bool cgfsng_attach(struct cgroup_ops
*ops
,
2658 const struct lxc_conf
*conf
,
2659 const char *name
, const char *lxcpath
,
2663 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
2666 return ret_set_errno(false, ENOENT
);
2668 if (!ops
->hierarchies
)
2671 len
= strnprintf(pidstr
, sizeof(pidstr
), "%d", pid
);
2675 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
2676 __do_free
char *fullpath
= NULL
, *path
= NULL
;
2677 struct hierarchy
*h
= ops
->hierarchies
[i
];
2679 if (h
->version
== CGROUP2_SUPER_MAGIC
) {
2680 ret
= __cg_unified_attach(h
, conf
, name
, lxcpath
, pid
,
2688 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, h
->controllers
[0]);
2693 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, "cgroup.procs");
2694 ret
= lxc_write_to_file(fullpath
, pidstr
, len
, false, 0666);
2696 return log_error_errno(false, errno
, "Failed to attach %d to %s",
2697 (int)pid
, fullpath
);
2703 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2704 * don't have a cgroup_data set up, so we ask the running container through the
2705 * commands API for the cgroup path.
2707 __cgfsng_ops
static int cgfsng_get(struct cgroup_ops
*ops
, const char *filename
,
2708 char *value
, size_t len
, const char *name
,
2709 const char *lxcpath
)
2711 __do_free
char *path
= NULL
;
2712 __do_free
char *controller
= NULL
;
2714 struct hierarchy
*h
;
2718 return ret_set_errno(-1, ENOENT
);
2720 controller
= must_copy_string(filename
);
2721 p
= strchr(controller
, '.');
2725 path
= lxc_cmd_get_limiting_cgroup_path(name
, lxcpath
, controller
);
2730 h
= get_hierarchy(ops
, controller
);
2732 __do_free
char *fullpath
= NULL
;
2734 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, filename
);
2735 ret
= lxc_read_from_file(fullpath
, value
, len
);
2741 static int device_cgroup_parse_access(struct device_item
*device
, const char *val
)
2743 for (int count
= 0; count
< 3; count
++, val
++) {
2746 device
->access
[count
] = *val
;
2749 device
->access
[count
] = *val
;
2752 device
->access
[count
] = *val
;
2759 return ret_errno(EINVAL
);
2766 static int device_cgroup_rule_parse(struct device_item
*device
, const char *key
,
2772 if (strequal("devices.allow", key
))
2773 device
->allow
= 1; /* allow the device */
2775 device
->allow
= 0; /* deny the device */
2777 if (strequal(val
, "a")) {
2783 if (device
->allow
) /* allow all devices */
2784 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_DENYLIST
;
2785 else /* deny all devices */
2786 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_ALLOWLIST
;
2793 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2801 device
->type
= *val
;
2814 } else if (isdigit(*val
)) {
2815 memset(temp
, 0, sizeof(temp
));
2816 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2822 ret
= lxc_safe_int(temp
, &device
->major
);
2836 } else if (isdigit(*val
)) {
2837 memset(temp
, 0, sizeof(temp
));
2838 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2844 ret
= lxc_safe_int(temp
, &device
->minor
);
2853 return device_cgroup_parse_access(device
, ++val
);
2856 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2857 * don't have a cgroup_data set up, so we ask the running container through the
2858 * commands API for the cgroup path.
2860 __cgfsng_ops
static int cgfsng_set(struct cgroup_ops
*ops
,
2861 const char *key
, const char *value
,
2862 const char *name
, const char *lxcpath
)
2864 __do_free
char *path
= NULL
;
2865 __do_free
char *controller
= NULL
;
2867 struct hierarchy
*h
;
2870 if (!ops
|| is_empty_string(key
) || is_empty_string(value
) ||
2871 is_empty_string(name
) || is_empty_string(lxcpath
))
2872 return ret_errno(EINVAL
);
2874 controller
= must_copy_string(key
);
2875 p
= strchr(controller
, '.');
2879 if (pure_unified_layout(ops
) && strequal(controller
, "devices")) {
2880 struct device_item device
= {};
2882 ret
= device_cgroup_rule_parse(&device
, key
, value
);
2884 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s",
2887 ret
= lxc_cmd_add_bpf_device_cgroup(name
, lxcpath
, &device
);
2894 path
= lxc_cmd_get_limiting_cgroup_path(name
, lxcpath
, controller
);
2899 h
= get_hierarchy(ops
, controller
);
2901 __do_free
char *fullpath
= NULL
;
2903 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, key
);
2904 ret
= lxc_write_to_file(fullpath
, value
, strlen(value
), false, 0666);
2910 /* take devices cgroup line
2912 * and convert it to a valid
2913 * type major:minor mode
2914 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2917 static int device_cgroup_rule_parse_devpath(struct device_item
*device
,
2918 const char *devpath
)
2920 __do_free
char *path
= NULL
;
2926 path
= must_copy_string(devpath
);
2929 * Read path followed by mode. Ignore any trailing text.
2930 * A ' # comment' would be legal. Technically other text is not
2931 * legal, we could check for that if we cared to.
2933 for (n_parts
= 1, p
= path
; *p
; p
++) {
2949 return ret_set_errno(-1, EINVAL
);
2953 return ret_errno(EINVAL
);
2955 if (device_cgroup_parse_access(device
, mode
) < 0)
2958 ret
= stat(path
, &sb
);
2960 return ret_set_errno(-1, errno
);
2962 mode_t m
= sb
.st_mode
& S_IFMT
;
2971 return log_error_errno(-1, EINVAL
, "Unsupported device type %i for \"%s\"", m
, path
);
2974 device
->major
= MAJOR(sb
.st_rdev
);
2975 device
->minor
= MINOR(sb
.st_rdev
);
2977 device
->global_rule
= LXC_BPF_DEVICE_CGROUP_LOCAL_RULE
;
2982 static int convert_devpath(const char *invalue
, char *dest
)
2984 struct device_item device
= {};
2987 ret
= device_cgroup_rule_parse_devpath(&device
, invalue
);
2991 ret
= strnprintf(dest
, 50, "%c %d:%d %s", device
.type
, device
.major
,
2992 device
.minor
, device
.access
);
2994 return log_error_errno(ret
, -ret
,
2995 "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2996 device
.type
, device
.major
, device
.minor
,
3002 /* Called from setup_limits - here we have the container's cgroup_data because
3003 * we created the cgroups.
3005 static int cg_legacy_set_data(struct cgroup_ops
*ops
, const char *filename
,
3006 const char *value
, bool is_cpuset
)
3008 __do_free
char *controller
= NULL
;
3010 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
3011 char converted_value
[50];
3012 struct hierarchy
*h
;
3014 controller
= must_copy_string(filename
);
3015 p
= strchr(controller
, '.');
3019 if (strequal("devices.allow", filename
) && value
[0] == '/') {
3022 ret
= convert_devpath(value
, converted_value
);
3025 value
= converted_value
;
3028 h
= get_hierarchy(ops
, controller
);
3030 return log_error_errno(-ENOENT
, ENOENT
, "Failed to setup limits for the \"%s\" controller. The controller seems to be unused by \"cgfsng\" cgroup driver or not enabled on the cgroup hierarchy", controller
);
3033 int ret
= lxc_write_openat(h
->container_full_path
, filename
, value
, strlen(value
));
3037 return lxc_write_openat(h
->container_limit_path
, filename
, value
, strlen(value
));
3040 __cgfsng_ops
static bool cgfsng_setup_limits_legacy(struct cgroup_ops
*ops
,
3041 struct lxc_conf
*conf
,
3044 __do_free
struct lxc_list
*sorted_cgroup_settings
= NULL
;
3045 struct lxc_list
*cgroup_settings
= &conf
->cgroup
;
3046 struct lxc_list
*iterator
, *next
;
3047 struct lxc_cgroup
*cg
;
3051 return ret_set_errno(false, ENOENT
);
3054 return ret_set_errno(false, EINVAL
);
3056 cgroup_settings
= &conf
->cgroup
;
3057 if (lxc_list_empty(cgroup_settings
))
3060 if (!ops
->hierarchies
)
3061 return ret_set_errno(false, EINVAL
);
3063 if (pure_unified_layout(ops
))
3064 return log_warn_errno(true, EINVAL
, "Ignoring legacy cgroup limits on pure cgroup2 system");
3066 sorted_cgroup_settings
= sort_cgroup_settings(cgroup_settings
);
3067 if (!sorted_cgroup_settings
)
3070 lxc_list_for_each(iterator
, sorted_cgroup_settings
) {
3071 cg
= iterator
->elem
;
3073 if (do_devices
== strnequal("devices", cg
->subsystem
, 7)) {
3074 if (cg_legacy_set_data(ops
, cg
->subsystem
, cg
->value
, strnequal("cpuset", cg
->subsystem
, 6))) {
3075 if (do_devices
&& (errno
== EACCES
|| errno
== EPERM
)) {
3076 SYSWARN("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
3079 SYSERROR("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
3082 DEBUG("Set controller \"%s\" set to \"%s\"", cg
->subsystem
, cg
->value
);
3087 INFO("Limits for the legacy cgroup hierarchies have been setup");
3089 lxc_list_for_each_safe(iterator
, sorted_cgroup_settings
, next
) {
3090 lxc_list_del(iterator
);
3098 * Some of the parsing logic comes from the original cgroup device v1
3099 * implementation in the kernel.
3101 static int bpf_device_cgroup_prepare(struct cgroup_ops
*ops
,
3102 struct lxc_conf
*conf
, const char *key
,
3105 struct device_item device_item
= {};
3108 if (strequal("devices.allow", key
) && *val
== '/')
3109 ret
= device_cgroup_rule_parse_devpath(&device_item
, val
);
3111 ret
= device_cgroup_rule_parse(&device_item
, key
, val
);
3113 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s", key
, val
);
3115 ret
= bpf_list_add_device(&conf
->devices
, &device_item
);
3121 __cgfsng_ops
static bool cgfsng_setup_limits(struct cgroup_ops
*ops
,
3122 struct lxc_handler
*handler
)
3124 struct lxc_list
*cgroup_settings
, *iterator
;
3125 struct hierarchy
*h
;
3126 struct lxc_conf
*conf
;
3129 return ret_set_errno(false, ENOENT
);
3131 if (!ops
->hierarchies
)
3134 if (!ops
->container_cgroup
)
3135 return ret_set_errno(false, EINVAL
);
3137 if (!handler
|| !handler
->conf
)
3138 return ret_set_errno(false, EINVAL
);
3139 conf
= handler
->conf
;
3141 cgroup_settings
= &conf
->cgroup2
;
3142 if (lxc_list_empty(cgroup_settings
))
3145 if (!pure_unified_layout(ops
))
3146 return log_warn_errno(true, EINVAL
, "Ignoring cgroup2 limits on legacy cgroup system");
3152 lxc_list_for_each (iterator
, cgroup_settings
) {
3153 struct lxc_cgroup
*cg
= iterator
->elem
;
3156 if (strnequal("devices", cg
->subsystem
, 7))
3157 ret
= bpf_device_cgroup_prepare(ops
, conf
, cg
->subsystem
, cg
->value
);
3159 ret
= lxc_write_openat(h
->container_limit_path
, cg
->subsystem
, cg
->value
, strlen(cg
->value
));
3161 return log_error_errno(false, errno
, "Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
3163 TRACE("Set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
3166 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
3169 __cgfsng_ops
static bool cgfsng_devices_activate(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
3171 struct lxc_conf
*conf
;
3172 struct hierarchy
*unified
;
3175 return ret_set_errno(false, ENOENT
);
3177 if (!ops
->hierarchies
)
3180 if (!ops
->container_cgroup
)
3181 return ret_set_errno(false, EEXIST
);
3183 if (!handler
|| !handler
->conf
)
3184 return ret_set_errno(false, EINVAL
);
3185 conf
= handler
->conf
;
3187 unified
= ops
->unified
;
3188 if (!unified
|| !unified
->bpf_device_controller
||
3189 !unified
->container_full_path
|| lxc_list_empty(&conf
->devices
))
3192 return bpf_cgroup_devices_attach(ops
, &conf
->devices
);
3195 static bool __cgfsng_delegate_controllers(struct cgroup_ops
*ops
, const char *cgroup
)
3197 __do_close
int dfd_final
= -EBADF
;
3198 __do_free
char *add_controllers
= NULL
, *copy
= NULL
;
3199 size_t full_len
= 0;
3200 struct hierarchy
*unified
;
3205 if (!ops
->hierarchies
|| !pure_unified_layout(ops
))
3208 unified
= ops
->unified
;
3209 if (!unified
->controllers
[0])
3212 /* For now we simply enable all controllers that we have detected by
3213 * creating a string like "+memory +pids +cpu +io".
3214 * TODO: In the near future we might want to support "-<controller>"
3215 * etc. but whether supporting semantics like this make sense will need
3218 for (it
= unified
->controllers
; it
&& *it
; it
++) {
3219 full_len
+= strlen(*it
) + 2;
3220 add_controllers
= must_realloc(add_controllers
, full_len
+ 1);
3222 if (unified
->controllers
[0] == *it
)
3223 add_controllers
[0] = '\0';
3225 (void)strlcat(add_controllers
, "+", full_len
+ 1);
3226 (void)strlcat(add_controllers
, *it
, full_len
+ 1);
3228 if ((it
+ 1) && *(it
+ 1))
3229 (void)strlcat(add_controllers
, " ", full_len
+ 1);
3232 copy
= strdup(cgroup
);
3237 * Placing the write to cgroup.subtree_control before the open() is
3238 * intentional because of the cgroup2 delegation model. It enforces
3239 * that leaf cgroups don't have any controllers enabled for delegation.
3241 dfd_cur
= unified
->dfd_base
;
3242 lxc_iterate_parts(cur
, copy
, "/") {
3244 * Even though we vetted the paths when we parsed the config
3245 * we're paranoid here and check that the path is neither
3246 * absolute nor walks upwards.
3249 return syserrno_set(-EINVAL
, "No absolute paths allowed");
3251 if (strnequal(cur
, "..", STRLITERALLEN("..")))
3252 return syserrno_set(-EINVAL
, "No upward walking paths allowed");
3254 ret
= lxc_writeat(dfd_cur
, "cgroup.subtree_control", add_controllers
, full_len
);
3256 return syserrno(-errno
, "Could not enable \"%s\" controllers in the unified cgroup %d", add_controllers
, dfd_cur
);
3258 TRACE("Enabled \"%s\" controllers in the unified cgroup %d", add_controllers
, dfd_cur
);
3260 dfd_final
= open_at(dfd_cur
, cur
, PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH
, 0);
3262 return syserrno(-errno
, "Fail to open directory %d(%s)", dfd_cur
, cur
);
3263 if (dfd_cur
!= unified
->dfd_base
)
3266 * Leave dfd_final pointing to the last fd we opened so
3267 * it will be automatically zapped if we return early.
3269 dfd_cur
= dfd_final
;
3275 __cgfsng_ops
static bool cgfsng_monitor_delegate_controllers(struct cgroup_ops
*ops
)
3278 return ret_set_errno(false, ENOENT
);
3280 return __cgfsng_delegate_controllers(ops
, ops
->monitor_cgroup
);
3283 __cgfsng_ops
static bool cgfsng_payload_delegate_controllers(struct cgroup_ops
*ops
)
3286 return ret_set_errno(false, ENOENT
);
3288 return __cgfsng_delegate_controllers(ops
, ops
->container_cgroup
);
3291 static void cg_unified_delegate(char ***delegate
)
3293 __do_free
char *buf
= NULL
;
3294 char *standard
[] = {"cgroup.subtree_control", "cgroup.threads", NULL
};
3298 buf
= read_file_at(-EBADF
, "/sys/kernel/cgroup/delegate", PROTECT_OPEN
, 0);
3300 for (char **p
= standard
; p
&& *p
; p
++) {
3301 idx
= append_null_to_list((void ***)delegate
);
3302 (*delegate
)[idx
] = must_copy_string(*p
);
3304 SYSWARN("Failed to read /sys/kernel/cgroup/delegate");
3308 lxc_iterate_parts(token
, buf
, " \t\n") {
3310 * We always need to chown this for both cgroup and
3313 if (strequal(token
, "cgroup.procs"))
3316 idx
= append_null_to_list((void ***)delegate
);
3317 (*delegate
)[idx
] = must_copy_string(token
);
3321 /* At startup, parse_hierarchies finds all the info we need about cgroup
3322 * mountpoints and current cgroups, and stores it in @d.
3324 static int cg_hybrid_init(struct cgroup_ops
*ops
, bool relative
, bool unprivileged
)
3326 __do_free
char *basecginfo
= NULL
, *line
= NULL
;
3327 __do_free_string_list
char **klist
= NULL
, **nlist
= NULL
;
3328 __do_fclose
FILE *f
= NULL
;
3332 /* Root spawned containers escape the current cgroup, so use init's
3333 * cgroups as our base in that case.
3335 if (!relative
&& (geteuid() == 0))
3336 basecginfo
= read_file_at(-EBADF
, "/proc/1/cgroup", PROTECT_OPEN
, 0);
3338 basecginfo
= read_file_at(-EBADF
, "/proc/self/cgroup", PROTECT_OPEN
, 0);
3340 return ret_set_errno(-1, ENOMEM
);
3342 ret
= get_existing_subsystems(&klist
, &nlist
);
3344 return log_error_errno(-1, errno
, "Failed to retrieve available legacy cgroup controllers");
3346 f
= fopen("/proc/self/mountinfo", "re");
3348 return log_error_errno(-1, errno
, "Failed to open \"/proc/self/mountinfo\"");
3350 lxc_cgfsng_print_basecg_debuginfo(basecginfo
, klist
, nlist
);
3352 while (getline(&line
, &len
, f
) != -1) {
3353 __do_free
char *base_cgroup
= NULL
, *mountpoint
= NULL
;
3354 __do_free_string_list
char **controller_list
= NULL
;
3358 type
= get_cgroup_version(line
);
3362 if (type
== CGROUP2_SUPER_MAGIC
&& ops
->unified
)
3365 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNKNOWN
) {
3366 if (type
== CGROUP2_SUPER_MAGIC
)
3367 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3368 else if (type
== CGROUP_SUPER_MAGIC
)
3369 ops
->cgroup_layout
= CGROUP_LAYOUT_LEGACY
;
3370 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
3371 if (type
== CGROUP_SUPER_MAGIC
)
3372 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3373 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_LEGACY
) {
3374 if (type
== CGROUP2_SUPER_MAGIC
)
3375 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3378 controller_list
= cg_hybrid_get_controllers(klist
, nlist
, line
, type
);
3379 if (!controller_list
&& type
== CGROUP_SUPER_MAGIC
)
3382 if (type
== CGROUP_SUPER_MAGIC
)
3383 if (controller_list_is_dup(ops
->hierarchies
, controller_list
)) {
3384 TRACE("Skipping duplicating controller");
3388 mountpoint
= cg_hybrid_get_mountpoint(line
);
3390 WARN("Failed parsing mountpoint from \"%s\"", line
);
3394 if (type
== CGROUP_SUPER_MAGIC
)
3395 base_cgroup
= cg_hybrid_get_current_cgroup(relative
, basecginfo
, controller_list
[0], CGROUP_SUPER_MAGIC
);
3397 base_cgroup
= cg_hybrid_get_current_cgroup(relative
, basecginfo
, NULL
, CGROUP2_SUPER_MAGIC
);
3399 WARN("Failed to find current cgroup");
3403 if (type
== CGROUP2_SUPER_MAGIC
)
3404 writeable
= test_writeable_v2(mountpoint
, base_cgroup
);
3406 writeable
= test_writeable_v1(mountpoint
, base_cgroup
);
3408 TRACE("The %s group is not writeable", base_cgroup
);
3412 if (type
== CGROUP2_SUPER_MAGIC
)
3413 ret
= add_hierarchy(ops
, NULL
, move_ptr(mountpoint
), move_ptr(base_cgroup
), type
);
3415 ret
= add_hierarchy(ops
, move_ptr(controller_list
), move_ptr(mountpoint
), move_ptr(base_cgroup
), type
);
3417 return syserrno(ret
, "Failed to add cgroup hierarchy");
3418 if (ops
->unified
&& unprivileged
)
3419 cg_unified_delegate(&(ops
->unified
)->cgroup2_chown
);
3422 /* verify that all controllers in cgroup.use and all crucial
3423 * controllers are accounted for
3425 if (!all_controllers_found(ops
))
3426 return log_error_errno(-1, ENOENT
, "Failed to find all required controllers");
3431 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
3432 static char *cg_unified_get_current_cgroup(bool relative
)
3434 __do_free
char *basecginfo
= NULL
, *copy
= NULL
;
3437 if (!relative
&& (geteuid() == 0))
3438 basecginfo
= read_file_at(-EBADF
, "/proc/1/cgroup", PROTECT_OPEN
, 0);
3440 basecginfo
= read_file_at(-EBADF
, "/proc/self/cgroup", PROTECT_OPEN
, 0);
3444 base_cgroup
= strstr(basecginfo
, "0::/");
3448 base_cgroup
= base_cgroup
+ 3;
3449 copy
= copy_to_eol(base_cgroup
);
3455 base_cgroup
= prune_init_scope(copy
);
3462 if (abspath(base_cgroup
))
3463 base_cgroup
= deabs(base_cgroup
);
3465 /* We're allowing base_cgroup to be "". */
3466 return strdup(base_cgroup
);
3469 static int cg_unified_init(struct cgroup_ops
*ops
, bool relative
,
3472 __do_free
char *base_cgroup
= NULL
;
3475 base_cgroup
= cg_unified_get_current_cgroup(relative
);
3477 return ret_errno(EINVAL
);
3479 /* TODO: If the user requested specific controllers via lxc.cgroup.use
3480 * we should verify here. The reason I'm not doing it right is that I'm
3481 * not convinced that lxc.cgroup.use will be the future since it is a
3482 * global property. I much rather have an option that lets you request
3483 * controllers per container.
3486 ret
= add_hierarchy(ops
, NULL
,
3487 must_copy_string(DEFAULT_CGROUP_MOUNTPOINT
),
3488 move_ptr(base_cgroup
), CGROUP2_SUPER_MAGIC
);
3490 return syserrno(ret
, "Failed to add unified cgroup hierarchy");
3493 cg_unified_delegate(&(ops
->unified
)->cgroup2_chown
);
3495 if (bpf_devices_cgroup_supported())
3496 ops
->unified
->bpf_device_controller
= 1;
3498 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3499 return CGROUP2_SUPER_MAGIC
;
3502 static int __cgroup_init(struct cgroup_ops
*ops
, struct lxc_conf
*conf
)
3504 __do_close
int dfd
= -EBADF
;
3505 bool relative
= conf
->cgroup_meta
.relative
;
3509 if (ops
->dfd_mnt_cgroupfs_host
>= 0)
3510 return ret_errno(EINVAL
);
3513 * I don't see the need for allowing symlinks here. If users want to
3514 * have their hierarchy available in different locations I strongly
3515 * suggest bind-mounts.
3517 dfd
= open_at(-EBADF
, DEFAULT_CGROUP_MOUNTPOINT
,
3518 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_ABSOLUTE_XDEV
, 0);
3520 return syserrno(-errno
, "Failed to open " DEFAULT_CGROUP_MOUNTPOINT
);
3522 tmp
= lxc_global_config_value("lxc.cgroup.use");
3524 __do_free
char *pin
= NULL
;
3527 pin
= must_copy_string(tmp
);
3530 lxc_iterate_parts(cur
, chop
, ",")
3531 must_append_string(&ops
->cgroup_use
, cur
);
3535 * Keep dfd referenced by the cleanup function and actually move the fd
3536 * once we know the initialization succeeded. So if we fail we clean up
3539 ops
->dfd_mnt_cgroupfs_host
= dfd
;
3541 if (unified_cgroup_fd(dfd
))
3542 ret
= cg_unified_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3544 ret
= cg_hybrid_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
3546 return syserrno(ret
, "Failed to initialize cgroups");
3548 /* Transfer ownership to cgroup_ops. */
3553 __cgfsng_ops
static int cgfsng_data_init(struct cgroup_ops
*ops
)
3555 const char *cgroup_pattern
;
3558 return ret_set_errno(-1, ENOENT
);
3560 /* copy system-wide cgroup information */
3561 cgroup_pattern
= lxc_global_config_value("lxc.cgroup.pattern");
3562 if (cgroup_pattern
&& !strequal(cgroup_pattern
, ""))
3563 ops
->cgroup_pattern
= must_copy_string(cgroup_pattern
);
3568 struct cgroup_ops
*cgfsng_ops_init(struct lxc_conf
*conf
)
3570 __do_free
struct cgroup_ops
*cgfsng_ops
= NULL
;
3572 cgfsng_ops
= zalloc(sizeof(struct cgroup_ops
));
3574 return ret_set_errno(NULL
, ENOMEM
);
3576 cgfsng_ops
->cgroup_layout
= CGROUP_LAYOUT_UNKNOWN
;
3577 cgfsng_ops
->dfd_mnt_cgroupfs_host
= -EBADF
;
3579 if (__cgroup_init(cgfsng_ops
, conf
))
3582 cgfsng_ops
->data_init
= cgfsng_data_init
;
3583 cgfsng_ops
->payload_destroy
= cgfsng_payload_destroy
;
3584 cgfsng_ops
->monitor_destroy
= cgfsng_monitor_destroy
;
3585 cgfsng_ops
->monitor_create
= cgfsng_monitor_create
;
3586 cgfsng_ops
->monitor_enter
= cgfsng_monitor_enter
;
3587 cgfsng_ops
->monitor_delegate_controllers
= cgfsng_monitor_delegate_controllers
;
3588 cgfsng_ops
->payload_delegate_controllers
= cgfsng_payload_delegate_controllers
;
3589 cgfsng_ops
->payload_create
= cgfsng_payload_create
;
3590 cgfsng_ops
->payload_enter
= cgfsng_payload_enter
;
3591 cgfsng_ops
->payload_finalize
= cgfsng_payload_finalize
;
3592 cgfsng_ops
->get_cgroup
= cgfsng_get_cgroup
;
3593 cgfsng_ops
->get
= cgfsng_get
;
3594 cgfsng_ops
->set
= cgfsng_set
;
3595 cgfsng_ops
->freeze
= cgfsng_freeze
;
3596 cgfsng_ops
->unfreeze
= cgfsng_unfreeze
;
3597 cgfsng_ops
->setup_limits_legacy
= cgfsng_setup_limits_legacy
;
3598 cgfsng_ops
->setup_limits
= cgfsng_setup_limits
;
3599 cgfsng_ops
->driver
= "cgfsng";
3600 cgfsng_ops
->version
= "1.0.0";
3601 cgfsng_ops
->attach
= cgfsng_attach
;
3602 cgfsng_ops
->chown
= cgfsng_chown
;
3603 cgfsng_ops
->mount
= cgfsng_mount
;
3604 cgfsng_ops
->devices_activate
= cgfsng_devices_activate
;
3605 cgfsng_ops
->get_limiting_cgroup
= cgfsng_get_limiting_cgroup
;
3607 cgfsng_ops
->criu_escape
= cgfsng_criu_escape
;
3608 cgfsng_ops
->criu_num_hierarchies
= cgfsng_criu_num_hierarchies
;
3609 cgfsng_ops
->criu_get_hierarchies
= cgfsng_criu_get_hierarchies
;
3611 return move_ptr(cgfsng_ops
);
3614 int cgroup_attach(const struct lxc_conf
*conf
, const char *name
,
3615 const char *lxcpath
, pid_t pid
)
3617 __do_close
int unified_fd
= -EBADF
;
3620 if (!conf
|| is_empty_string(name
) || is_empty_string(lxcpath
) || pid
<= 0)
3621 return ret_errno(EINVAL
);
3623 unified_fd
= lxc_cmd_get_cgroup2_fd(name
, lxcpath
);
3625 return ret_errno(ENOCGROUP2
);
3627 if (!lxc_list_empty(&conf
->id_map
)) {
3628 struct userns_exec_unified_attach_data args
= {
3630 .unified_fd
= unified_fd
,
3634 ret
= socketpair(PF_LOCAL
, SOCK_STREAM
| SOCK_CLOEXEC
, 0, args
.sk_pair
);
3638 ret
= userns_exec_minimal(conf
,
3639 cgroup_unified_attach_parent_wrapper
,
3641 cgroup_unified_attach_child_wrapper
,
3644 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
3650 /* Connects to command socket therefore isn't callable from command handler. */
3651 int cgroup_get(const char *name
, const char *lxcpath
,
3652 const char *filename
, char *buf
, size_t len
)
3654 __do_close
int unified_fd
= -EBADF
;
3657 if (is_empty_string(filename
) || is_empty_string(name
) ||
3658 is_empty_string(lxcpath
))
3659 return ret_errno(EINVAL
);
3661 if ((buf
&& !len
) || (len
&& !buf
))
3662 return ret_errno(EINVAL
);
3664 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3666 return ret_errno(ENOCGROUP2
);
3668 ret
= lxc_read_try_buf_at(unified_fd
, filename
, buf
, len
);
3670 SYSERROR("Failed to read cgroup value");
3675 /* Connects to command socket therefore isn't callable from command handler. */
3676 int cgroup_set(const char *name
, const char *lxcpath
,
3677 const char *filename
, const char *value
)
3679 __do_close
int unified_fd
= -EBADF
;
3682 if (is_empty_string(filename
) || is_empty_string(value
) ||
3683 is_empty_string(name
) || is_empty_string(lxcpath
))
3684 return ret_errno(EINVAL
);
3686 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3688 return ret_errno(ENOCGROUP2
);
3690 if (strnequal(filename
, "devices.", STRLITERALLEN("devices."))) {
3691 struct device_item device
= {};
3693 ret
= device_cgroup_rule_parse(&device
, filename
, value
);
3695 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s", filename
, value
);
3697 ret
= lxc_cmd_add_bpf_device_cgroup(name
, lxcpath
, &device
);
3699 ret
= lxc_writeat(unified_fd
, filename
, value
, strlen(value
));
3705 static int do_cgroup_freeze(int unified_fd
,
3706 const char *state_string
,
3709 const char *epoll_error
,
3710 const char *wait_error
)
3712 __do_close
int events_fd
= -EBADF
;
3713 call_cleaner(lxc_mainloop_close
) struct lxc_epoll_descr
*descr_ptr
= NULL
;
3715 struct lxc_epoll_descr descr
= {};
3718 ret
= lxc_mainloop_open(&descr
);
3720 return log_error_errno(-1, errno
, "%s", epoll_error
);
3722 /* automatically cleaned up now */
3725 events_fd
= open_at(unified_fd
, "cgroup.events", PROTECT_OPEN
, PROTECT_LOOKUP_BENEATH
, 0);
3727 return log_error_errno(-errno
, errno
, "Failed to open cgroup.events file");
3729 ret
= lxc_mainloop_add_handler_events(&descr
, events_fd
, EPOLLPRI
, freezer_cgroup_events_cb
, INT_TO_PTR(state_num
));
3731 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
3734 ret
= lxc_writeat(unified_fd
, "cgroup.freeze", state_string
, 1);
3736 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
3739 ret
= lxc_mainloop(&descr
, timeout
);
3741 return log_error_errno(-1, errno
, "%s", wait_error
);
3744 return log_trace(0, "Container now %s", (state_num
== 1) ? "frozen" : "unfrozen");
3747 static inline int __cgroup_freeze(int unified_fd
, int timeout
)
3749 return do_cgroup_freeze(unified_fd
, "1", 1, timeout
,
3750 "Failed to create epoll instance to wait for container freeze",
3751 "Failed to wait for container to be frozen");
3754 int cgroup_freeze(const char *name
, const char *lxcpath
, int timeout
)
3756 __do_close
int unified_fd
= -EBADF
;
3759 if (is_empty_string(name
) || is_empty_string(lxcpath
))
3760 return ret_errno(EINVAL
);
3762 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3764 return ret_errno(ENOCGROUP2
);
3766 lxc_cmd_notify_state_listeners(name
, lxcpath
, FREEZING
);
3767 ret
= __cgroup_freeze(unified_fd
, timeout
);
3768 lxc_cmd_notify_state_listeners(name
, lxcpath
, !ret
? FROZEN
: RUNNING
);
3772 int __cgroup_unfreeze(int unified_fd
, int timeout
)
3774 return do_cgroup_freeze(unified_fd
, "0", 0, timeout
,
3775 "Failed to create epoll instance to wait for container freeze",
3776 "Failed to wait for container to be frozen");
3779 int cgroup_unfreeze(const char *name
, const char *lxcpath
, int timeout
)
3781 __do_close
int unified_fd
= -EBADF
;
3784 if (is_empty_string(name
) || is_empty_string(lxcpath
))
3785 return ret_errno(EINVAL
);
3787 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3789 return ret_errno(ENOCGROUP2
);
3791 lxc_cmd_notify_state_listeners(name
, lxcpath
, THAWED
);
3792 ret
= __cgroup_unfreeze(unified_fd
, timeout
);
3793 lxc_cmd_notify_state_listeners(name
, lxcpath
, !ret
? RUNNING
: FROZEN
);