1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
30 #include <sys/epoll.h>
31 #include <sys/types.h>
37 #include "cgroup2_devices.h"
38 #include "cgroup_utils.h"
40 #include "commands_utils.h"
46 #include "memory_utils.h"
47 #include "mount_utils.h"
48 #include "storage/storage.h"
49 #include "string_utils.h"
50 #include "syscall_wrappers.h"
54 #include "include/strlcpy.h"
58 #include "include/strlcat.h"
61 lxc_log_define(cgfsng
, cgroup
);
64 * Given a pointer to a null-terminated array of pointers, realloc to add one
65 * entry, and point the new entry to NULL. Do not fail. Return the index to the
66 * second-to-last entry - that is, the one which is now available for use
67 * (keeping the list null-terminated).
69 static int list_add(void ***list
)
75 for (; (*list
)[idx
]; idx
++)
78 p
= realloc(*list
, (idx
+ 2) * sizeof(void **));
80 return ret_errno(ENOMEM
);
88 /* Given a null-terminated array of strings, check whether @entry is one of the
91 static bool string_in_list(char **list
, const char *entry
)
96 for (int i
= 0; list
[i
]; i
++)
97 if (strequal(list
[i
], entry
))
103 /* Given a handler's cgroup data, return the struct hierarchy for the controller
104 * @c, or NULL if there is none.
106 static struct hierarchy
*get_hierarchy(struct cgroup_ops
*ops
, const char *controller
)
108 if (!ops
->hierarchies
)
109 return log_trace_errno(NULL
, errno
, "There are no useable cgroup controllers");
111 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
113 /* This is the empty unified hierarchy. */
114 if (ops
->hierarchies
[i
]->controllers
&& !ops
->hierarchies
[i
]->controllers
[0])
115 return ops
->hierarchies
[i
];
121 * Handle controllers with significant implementation changes
122 * from cgroup to cgroup2.
124 if (pure_unified_layout(ops
)) {
125 if (strequal(controller
, "devices")) {
126 if (device_utility_controller(ops
->unified
))
130 } else if (strequal(controller
, "freezer")) {
131 if (freezer_utility_controller(ops
->unified
))
138 if (string_in_list(ops
->hierarchies
[i
]->controllers
, controller
))
139 return ops
->hierarchies
[i
];
143 WARN("There is no useable %s controller", controller
);
145 WARN("There is no empty unified cgroup hierarchy");
147 return ret_set_errno(NULL
, ENOENT
);
150 /* Taken over modified from the kernel sources. */
151 #define NBITS 32 /* bits in uint32_t */
152 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
153 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
155 static void set_bit(unsigned bit
, uint32_t *bitarr
)
157 bitarr
[bit
/ NBITS
] |= (1 << (bit
% NBITS
));
160 static void clear_bit(unsigned bit
, uint32_t *bitarr
)
162 bitarr
[bit
/ NBITS
] &= ~(1 << (bit
% NBITS
));
165 static bool is_set(unsigned bit
, uint32_t *bitarr
)
167 return (bitarr
[bit
/ NBITS
] & (1 << (bit
% NBITS
))) != 0;
170 /* Create cpumask from cpulist aka turn:
178 static uint32_t *lxc_cpumask(char *buf
, size_t nbits
)
180 __do_free
uint32_t *bitarr
= NULL
;
184 arrlen
= BITS_TO_LONGS(nbits
);
185 bitarr
= calloc(arrlen
, sizeof(uint32_t));
187 return ret_set_errno(NULL
, ENOMEM
);
189 lxc_iterate_parts(token
, buf
, ",") {
194 start
= strtoul(token
, NULL
, 0);
196 range
= strchr(token
, '-');
198 end
= strtoul(range
+ 1, NULL
, 0);
201 return ret_set_errno(NULL
, EINVAL
);
204 return ret_set_errno(NULL
, EINVAL
);
207 set_bit(start
++, bitarr
);
210 return move_ptr(bitarr
);
213 /* Turn cpumask into simple, comma-separated cpulist. */
214 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr
, size_t nbits
)
216 __do_free_string_list
char **cpulist
= NULL
;
217 char numstr
[INTTYPE_TO_STRLEN(size_t)] = {0};
220 for (size_t i
= 0; i
<= nbits
; i
++) {
221 if (!is_set(i
, bitarr
))
224 ret
= strnprintf(numstr
, sizeof(numstr
), "%zu", i
);
228 ret
= lxc_append_string(&cpulist
, numstr
);
230 return ret_set_errno(NULL
, ENOMEM
);
234 return ret_set_errno(NULL
, ENOMEM
);
236 return lxc_string_join(",", (const char **)cpulist
, false);
239 static ssize_t
get_max_cpus(char *cpulist
)
242 char *maxcpus
= cpulist
;
245 c1
= strrchr(maxcpus
, ',');
249 c2
= strrchr(maxcpus
, '-');
263 cpus
= strtoul(c1
, NULL
, 0);
270 static inline bool is_unified_hierarchy(const struct hierarchy
*h
)
272 return h
->fs_type
== UNIFIED_HIERARCHY
;
275 /* Return true if the controller @entry is found in the null-terminated list of
276 * hierarchies @hlist.
278 static bool controller_available(struct hierarchy
**hlist
, char *entry
)
283 for (int i
= 0; hlist
[i
]; i
++)
284 if (string_in_list(hlist
[i
]->controllers
, entry
))
290 static bool controllers_available(struct cgroup_ops
*ops
)
292 struct hierarchy
**hlist
;
294 if (!ops
->cgroup_use
)
297 hlist
= ops
->hierarchies
;
298 for (char **cur
= ops
->cgroup_use
; cur
&& *cur
; cur
++)
299 if (!controller_available(hlist
, *cur
))
300 return log_error(false, "The %s controller found", *cur
);
305 static char **list_new(void)
307 __do_free_string_list
char **list
= NULL
;
310 idx
= list_add((void ***)&list
);
315 return move_ptr(list
);
318 static int list_add_string(char ***list
, char *entry
)
320 __do_free
char *dup
= NULL
;
325 return ret_errno(ENOMEM
);
327 idx
= list_add((void ***)list
);
331 (*list
)[idx
] = move_ptr(dup
);
335 static char **list_add_controllers(char *controllers
)
337 __do_free_string_list
char **list
= NULL
;
340 lxc_iterate_parts(it
, controllers
, " \t\n") {
343 ret
= list_add_string(&list
, it
);
348 return move_ptr(list
);
351 static char **unified_controllers(int dfd
, const char *file
)
353 __do_free
char *buf
= NULL
;
355 buf
= read_file_at(dfd
, file
, PROTECT_OPEN
, 0);
359 return list_add_controllers(buf
);
362 static bool skip_hierarchy(const struct cgroup_ops
*ops
, char **controllers
)
364 if (!ops
->cgroup_use
)
367 for (char **cur_ctrl
= controllers
; cur_ctrl
&& *cur_ctrl
; cur_ctrl
++) {
370 for (char **cur_use
= ops
->cgroup_use
; cur_use
&& *cur_use
; cur_use
++) {
371 if (!strequal(*cur_use
, *cur_ctrl
))
387 static int cgroup_hierarchy_add(struct cgroup_ops
*ops
, int dfd_mnt
, char *mnt
,
388 int dfd_base
, char *base_cgroup
,
389 char **controllers
, cgroupfs_type_magic_t fs_type
)
391 __do_free
struct hierarchy
*new = NULL
;
394 if (abspath(base_cgroup
))
395 return syserrno_set(-EINVAL
, "Container base path must be relative to controller mount");
397 new = zalloc(sizeof(*new));
399 return ret_errno(ENOMEM
);
401 new->dfd_con
= -EBADF
;
402 new->dfd_lim
= -EBADF
;
403 new->dfd_mon
= -EBADF
;
405 new->fs_type
= fs_type
;
406 new->controllers
= controllers
;
408 new->at_base
= base_cgroup
;
410 new->dfd_mnt
= dfd_mnt
;
411 new->dfd_base
= dfd_base
;
413 TRACE("Adding cgroup hierarchy mounted at %s and base cgroup %s",
414 mnt
, maybe_empty(base_cgroup
));
415 for (char *const *it
= new->controllers
; it
&& *it
; it
++)
416 TRACE("The hierarchy contains the %s controller", *it
);
418 idx
= list_add((void ***)&ops
->hierarchies
);
420 return ret_errno(idx
);
422 if (fs_type
== UNIFIED_HIERARCHY
)
424 (ops
->hierarchies
)[idx
] = move_ptr(new);
429 static int cgroup_tree_remove(struct hierarchy
**hierarchies
, const char *path_prune
)
431 if (!path_prune
|| !hierarchies
)
434 for (int i
= 0; hierarchies
[i
]; i
++) {
435 struct hierarchy
*h
= hierarchies
[i
];
438 ret
= cgroup_tree_prune(h
->dfd_base
, path_prune
);
440 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, path_prune
);
442 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, path_prune
);
444 free_equal(h
->path_lim
, h
->path_con
);
450 struct generic_userns_exec_data
{
451 struct hierarchy
**hierarchies
;
452 const char *path_prune
;
453 struct lxc_conf
*conf
;
454 uid_t origuid
; /* target uid in parent namespace */
458 static int cgroup_tree_remove_wrapper(void *data
)
460 struct generic_userns_exec_data
*arg
= data
;
461 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
462 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
465 if (!lxc_drop_groups() && errno
!= EPERM
)
466 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
468 ret
= setresgid(nsgid
, nsgid
, nsgid
);
470 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
471 (int)nsgid
, (int)nsgid
, (int)nsgid
);
473 ret
= setresuid(nsuid
, nsuid
, nsuid
);
475 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
476 (int)nsuid
, (int)nsuid
, (int)nsuid
);
478 return cgroup_tree_remove(arg
->hierarchies
, arg
->path_prune
);
481 __cgfsng_ops
static void cgfsng_payload_destroy(struct cgroup_ops
*ops
,
482 struct lxc_handler
*handler
)
487 ERROR("Called with uninitialized cgroup operations");
491 if (!ops
->hierarchies
)
495 ERROR("Called with uninitialized handler");
499 if (!handler
->conf
) {
500 ERROR("Called with uninitialized conf");
504 if (!ops
->container_limit_cgroup
) {
505 WARN("Uninitialized limit cgroup");
509 ret
= bpf_program_cgroup_detach(handler
->cgroup_ops
->cgroup2_devices
);
511 WARN("Failed to detach bpf program from cgroup");
513 if (!lxc_list_empty(&handler
->conf
->id_map
)) {
514 struct generic_userns_exec_data wrap
= {
515 .conf
= handler
->conf
,
516 .path_prune
= ops
->container_limit_cgroup
,
517 .hierarchies
= ops
->hierarchies
,
520 ret
= userns_exec_1(handler
->conf
, cgroup_tree_remove_wrapper
,
521 &wrap
, "cgroup_tree_remove_wrapper");
523 ret
= cgroup_tree_remove(ops
->hierarchies
, ops
->container_limit_cgroup
);
526 SYSWARN("Failed to destroy cgroups");
529 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
530 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
531 static bool cpuset1_cpus_initialize(int dfd_parent
, int dfd_child
,
534 __do_free
char *cpulist
= NULL
, *fpath
= NULL
, *isolcpus
= NULL
,
535 *offlinecpus
= NULL
, *posscpus
= NULL
;
536 __do_free
uint32_t *isolmask
= NULL
, *offlinemask
= NULL
,
540 ssize_t maxisol
= 0, maxoffline
= 0, maxposs
= 0;
541 bool flipped_bit
= false;
543 posscpus
= read_file_at(dfd_parent
, "cpuset.cpus", PROTECT_OPEN
, 0);
545 return log_error_errno(false, errno
, "Failed to read file \"%s\"", fpath
);
547 /* Get maximum number of cpus found in possible cpuset. */
548 maxposs
= get_max_cpus(posscpus
);
549 if (maxposs
< 0 || maxposs
>= INT_MAX
- 1)
552 if (file_exists(__ISOL_CPUS
)) {
553 isolcpus
= read_file_at(-EBADF
, __ISOL_CPUS
, PROTECT_OPEN
, 0);
555 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __ISOL_CPUS
);
557 if (isdigit(isolcpus
[0])) {
558 /* Get maximum number of cpus found in isolated cpuset. */
559 maxisol
= get_max_cpus(isolcpus
);
560 if (maxisol
< 0 || maxisol
>= INT_MAX
- 1)
564 if (maxposs
< maxisol
)
568 TRACE("The path \""__ISOL_CPUS
"\" to read isolated cpus from does not exist");
571 if (file_exists(__OFFLINE_CPUS
)) {
572 offlinecpus
= read_file_at(-EBADF
, __OFFLINE_CPUS
, PROTECT_OPEN
, 0);
574 return log_error_errno(false, errno
, "Failed to read file \"%s\"", __OFFLINE_CPUS
);
576 if (isdigit(offlinecpus
[0])) {
577 /* Get maximum number of cpus found in offline cpuset. */
578 maxoffline
= get_max_cpus(offlinecpus
);
579 if (maxoffline
< 0 || maxoffline
>= INT_MAX
- 1)
583 if (maxposs
< maxoffline
)
584 maxposs
= maxoffline
;
587 TRACE("The path \""__OFFLINE_CPUS
"\" to read offline cpus from does not exist");
590 if ((maxisol
== 0) && (maxoffline
== 0)) {
591 cpulist
= move_ptr(posscpus
);
595 possmask
= lxc_cpumask(posscpus
, maxposs
);
597 return log_error_errno(false, errno
, "Failed to create cpumask for possible cpus");
600 isolmask
= lxc_cpumask(isolcpus
, maxposs
);
602 return log_error_errno(false, errno
, "Failed to create cpumask for isolated cpus");
605 if (maxoffline
> 0) {
606 offlinemask
= lxc_cpumask(offlinecpus
, maxposs
);
608 return log_error_errno(false, errno
, "Failed to create cpumask for offline cpus");
611 for (i
= 0; i
<= maxposs
; i
++) {
612 if ((isolmask
&& !is_set(i
, isolmask
)) ||
613 (offlinemask
&& !is_set(i
, offlinemask
)) ||
614 !is_set(i
, possmask
))
618 clear_bit(i
, possmask
);
622 cpulist
= lxc_cpumask_to_cpulist(possmask
, maxposs
);
623 TRACE("No isolated or offline cpus present in cpuset");
625 cpulist
= move_ptr(posscpus
);
626 TRACE("Removed isolated or offline cpus from cpuset");
629 return log_error_errno(false, errno
, "Failed to create cpu list");
632 if (!am_initialized
) {
633 ret
= lxc_writeat(dfd_child
, "cpuset.cpus", cpulist
, strlen(cpulist
));
635 return log_error_errno(false, errno
, "Failed to write cpu list to \"%d/cpuset.cpus\"", dfd_child
);
637 TRACE("Copied cpu settings of parent cgroup");
643 static bool cpuset1_initialize(int dfd_base
, int dfd_next
)
650 * Determine whether the base cgroup has cpuset
651 * inheritance turned on.
653 bytes
= lxc_readat(dfd_base
, "cgroup.clone_children", &v
, 1);
655 return syserrno(false, "Failed to read file %d(cgroup.clone_children)", dfd_base
);
658 * Initialize cpuset.cpus and make remove any isolated
661 if (!cpuset1_cpus_initialize(dfd_base
, dfd_next
, v
== '1'))
662 return syserrno(false, "Failed to initialize cpuset.cpus");
664 /* Read cpuset.mems from parent... */
665 bytes
= lxc_readat(dfd_base
, "cpuset.mems", mems
, sizeof(mems
));
667 return syserrno(false, "Failed to read file %d(cpuset.mems)", dfd_base
);
669 /* ... and copy to first cgroup in the tree... */
670 bytes
= lxc_writeat(dfd_next
, "cpuset.mems", mems
, bytes
);
672 return syserrno(false, "Failed to write %d(cpuset.mems)", dfd_next
);
674 /* ... and finally turn on cpuset inheritance. */
675 bytes
= lxc_writeat(dfd_next
, "cgroup.clone_children", "1", 1);
677 return syserrno(false, "Failed to write %d(cgroup.clone_children)", dfd_next
);
679 return log_trace(true, "Initialized cpuset in the legacy hierarchy");
682 static int __cgroup_tree_create(int dfd_base
, const char *path
, mode_t mode
,
683 bool cpuset_v1
, bool eexist_ignore
)
685 __do_close
int dfd_final
= -EBADF
;
686 int dfd_cur
= dfd_base
;
692 if (is_empty_string(path
))
693 return ret_errno(EINVAL
);
695 len
= strlcpy(buf
, path
, sizeof(buf
));
696 if (len
>= sizeof(buf
))
697 return ret_errno(E2BIG
);
699 lxc_iterate_parts(cur
, buf
, "/") {
701 * Even though we vetted the paths when we parsed the config
702 * we're paranoid here and check that the path is neither
703 * absolute nor walks upwards.
706 return syserrno_set(-EINVAL
, "No absolute paths allowed");
708 if (strnequal(cur
, "..", STRLITERALLEN("..")))
709 return syserrno_set(-EINVAL
, "No upward walking paths allowed");
711 ret
= mkdirat(dfd_cur
, cur
, mode
);
714 return syserrno(-errno
, "Failed to create %d(%s)", dfd_cur
, cur
);
718 TRACE("%s %d(%s) cgroup", !ret
? "Created" : "Reusing", dfd_cur
, cur
);
720 dfd_final
= open_at(dfd_cur
, cur
, PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH
, 0);
722 return syserrno(-errno
, "Fail to open%s directory %d(%s)",
723 !ret
? " newly created" : "", dfd_base
, cur
);
724 if (dfd_cur
!= dfd_base
)
726 else if (cpuset_v1
&& !cpuset1_initialize(dfd_base
, dfd_final
))
727 return syserrno(-EINVAL
, "Failed to initialize cpuset controller in the legacy hierarchy");
729 * Leave dfd_final pointing to the last fd we opened so
730 * it will be automatically zapped if we return early.
735 /* The final cgroup must be succesfully creatd by us. */
737 if (ret
!= -EEXIST
|| !eexist_ignore
)
738 return syserrno_set(ret
, "Creating the final cgroup %d(%s) failed", dfd_base
, path
);
741 return move_fd(dfd_final
);
744 static bool cgroup_tree_create(struct cgroup_ops
*ops
, struct lxc_conf
*conf
,
745 struct hierarchy
*h
, const char *cgroup_limit_dir
,
746 const char *cgroup_leaf
, bool payload
)
748 __do_close
int fd_limit
= -EBADF
, fd_final
= -EBADF
;
749 __do_free
char *path
= NULL
, *limit_path
= NULL
;
750 bool cpuset_v1
= false;
753 * The legacy cpuset controller needs massaging in case inheriting
754 * settings from its immediate ancestor cgroup hasn't been turned on.
756 cpuset_v1
= !is_unified_hierarchy(h
) && string_in_list(h
->controllers
, "cpuset");
758 if (payload
&& cgroup_leaf
) {
759 /* With isolation both parts need to not already exist. */
760 fd_limit
= __cgroup_tree_create(h
->dfd_base
, cgroup_limit_dir
, 0755, cpuset_v1
, false);
762 return syserrno(false, "Failed to create limiting cgroup %d(%s)", h
->dfd_base
, cgroup_limit_dir
);
764 TRACE("Created limit cgroup %d->%d(%s)",
765 fd_limit
, h
->dfd_base
, cgroup_limit_dir
);
768 * With isolation the devices legacy cgroup needs to be
769 * iinitialized early, as it typically contains an 'a' (all)
770 * line, which is not possible once a subdirectory has been
773 if (string_in_list(h
->controllers
, "devices") &&
774 !ops
->setup_limits_legacy(ops
, conf
, true))
775 return log_error(false, "Failed to setup legacy device limits");
777 limit_path
= make_cgroup_path(h
, h
->at_base
, cgroup_limit_dir
, NULL
);
778 path
= must_make_path(limit_path
, cgroup_leaf
, NULL
);
781 * If we use a separate limit cgroup, the leaf cgroup, i.e. the
782 * cgroup the container actually resides in, is below fd_limit.
784 fd_final
= __cgroup_tree_create(fd_limit
, cgroup_leaf
, 0755, cpuset_v1
, false);
786 /* Ensure we don't leave any garbage behind. */
787 if (cgroup_tree_prune(h
->dfd_base
, cgroup_limit_dir
))
788 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, cgroup_limit_dir
);
790 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, cgroup_limit_dir
);
793 path
= make_cgroup_path(h
, h
->at_base
, cgroup_limit_dir
, NULL
);
795 fd_final
= __cgroup_tree_create(h
->dfd_base
, cgroup_limit_dir
, 0755, cpuset_v1
, false);
798 return syserrno(false, "Failed to create %s cgroup %d(%s)", payload
? "payload" : "monitor", h
->dfd_base
, cgroup_limit_dir
);
801 h
->dfd_con
= move_fd(fd_final
);
802 h
->path_con
= move_ptr(path
);
805 h
->dfd_lim
= h
->dfd_con
;
807 h
->dfd_lim
= move_fd(fd_limit
);
810 h
->path_lim
= move_ptr(limit_path
);
812 h
->path_lim
= h
->path_con
;
814 h
->dfd_mon
= move_fd(fd_final
);
820 static void cgroup_tree_prune_leaf(struct hierarchy
*h
, const char *path_prune
,
826 /* Check whether we actually created the cgroup to prune. */
830 free_equal(h
->path_con
, h
->path_lim
);
831 close_equal(h
->dfd_con
, h
->dfd_lim
);
833 /* Check whether we actually created the cgroup to prune. */
837 close_prot_errno_disarm(h
->dfd_mon
);
840 /* We didn't create this cgroup. */
844 if (cgroup_tree_prune(h
->dfd_base
, path_prune
))
845 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, path_prune
);
847 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, path_prune
);
850 __cgfsng_ops
static void cgfsng_monitor_destroy(struct cgroup_ops
*ops
,
851 struct lxc_handler
*handler
)
854 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
855 const struct lxc_conf
*conf
;
858 ERROR("Called with uninitialized cgroup operations");
862 if (!ops
->hierarchies
)
866 ERROR("Called with uninitialized handler");
870 if (!handler
->conf
) {
871 ERROR("Called with uninitialized conf");
874 conf
= handler
->conf
;
876 if (!ops
->monitor_cgroup
) {
877 WARN("Uninitialized monitor cgroup");
881 len
= strnprintf(pidstr
, sizeof(pidstr
), "%d", handler
->monitor_pid
);
885 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
886 __do_close
int fd_pivot
= -EBADF
;
887 __do_free
char *pivot_path
= NULL
;
888 struct hierarchy
*h
= ops
->hierarchies
[i
];
889 bool cpuset_v1
= false;
892 /* Monitor might have died before we entered the cgroup. */
893 if (handler
->monitor_pid
<= 0) {
894 WARN("No valid monitor process found while destroying cgroups");
895 goto cgroup_prune_tree
;
898 if (conf
->cgroup_meta
.monitor_pivot_dir
)
899 pivot_path
= must_make_path(conf
->cgroup_meta
.monitor_pivot_dir
, CGROUP_PIVOT
, NULL
);
900 else if (conf
->cgroup_meta
.dir
)
901 pivot_path
= must_make_path(conf
->cgroup_meta
.dir
, CGROUP_PIVOT
, NULL
);
903 pivot_path
= must_make_path(CGROUP_PIVOT
, NULL
);
905 cpuset_v1
= !is_unified_hierarchy(h
) && string_in_list(h
->controllers
, "cpuset");
907 fd_pivot
= __cgroup_tree_create(h
->dfd_base
, pivot_path
, 0755, cpuset_v1
, true);
909 SYSWARN("Failed to create pivot cgroup %d(%s)", h
->dfd_base
, pivot_path
);
913 ret
= lxc_writeat(fd_pivot
, "cgroup.procs", pidstr
, len
);
915 SYSWARN("Failed to move monitor %s to \"%s\"", pidstr
, pivot_path
);
920 ret
= cgroup_tree_prune(h
->dfd_base
, ops
->monitor_cgroup
);
922 SYSWARN("Failed to destroy %d(%s)", h
->dfd_base
, ops
->monitor_cgroup
);
924 TRACE("Removed cgroup tree %d(%s)", h
->dfd_base
, ops
->monitor_cgroup
);
929 * Check we have no lxc.cgroup.dir, and that lxc.cgroup.dir.limit_prefix is a
930 * proper prefix directory of lxc.cgroup.dir.payload.
932 * Returns the prefix length if it is set, otherwise zero on success.
934 static bool check_cgroup_dir_config(struct lxc_conf
*conf
)
936 const char *monitor_dir
= conf
->cgroup_meta
.monitor_dir
,
937 *container_dir
= conf
->cgroup_meta
.container_dir
,
938 *namespace_dir
= conf
->cgroup_meta
.namespace_dir
;
940 /* none of the new options are set, all is fine */
941 if (!monitor_dir
&& !container_dir
&& !namespace_dir
)
944 /* some are set, make sure lxc.cgroup.dir is not also set*/
945 if (conf
->cgroup_meta
.dir
)
946 return log_error_errno(false, EINVAL
,
947 "lxc.cgroup.dir conflicts with lxc.cgroup.dir.payload/monitor");
949 /* make sure both monitor and payload are set */
950 if (!monitor_dir
|| !container_dir
)
951 return log_error_errno(false, EINVAL
,
952 "lxc.cgroup.dir.payload and lxc.cgroup.dir.monitor must both be set");
954 /* namespace_dir may be empty */
958 __cgfsng_ops
static bool cgfsng_monitor_create(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
960 __do_free
char *monitor_cgroup
= NULL
;
965 struct lxc_conf
*conf
;
968 return ret_set_errno(false, ENOENT
);
970 if (!ops
->hierarchies
)
973 if (ops
->monitor_cgroup
)
974 return ret_set_errno(false, EEXIST
);
976 if (!handler
|| !handler
->conf
)
977 return ret_set_errno(false, EINVAL
);
979 conf
= handler
->conf
;
981 if (!check_cgroup_dir_config(conf
))
984 if (conf
->cgroup_meta
.monitor_dir
) {
985 monitor_cgroup
= strdup(conf
->cgroup_meta
.monitor_dir
);
986 } else if (conf
->cgroup_meta
.dir
) {
987 monitor_cgroup
= must_concat(&len
, conf
->cgroup_meta
.dir
, "/",
988 DEFAULT_MONITOR_CGROUP_PREFIX
,
990 CGROUP_CREATE_RETRY
, NULL
);
991 } else if (ops
->cgroup_pattern
) {
992 __do_free
char *cgroup_tree
= NULL
;
994 cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
996 return ret_set_errno(false, ENOMEM
);
998 monitor_cgroup
= must_concat(&len
, cgroup_tree
, "/",
999 DEFAULT_MONITOR_CGROUP
,
1000 CGROUP_CREATE_RETRY
, NULL
);
1002 monitor_cgroup
= must_concat(&len
, DEFAULT_MONITOR_CGROUP_PREFIX
,
1004 CGROUP_CREATE_RETRY
, NULL
);
1006 if (!monitor_cgroup
)
1007 return ret_set_errno(false, ENOMEM
);
1009 if (!conf
->cgroup_meta
.monitor_dir
) {
1010 suffix
= monitor_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1015 sprintf(suffix
, "-%d", idx
);
1017 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1018 if (cgroup_tree_create(ops
, handler
->conf
,
1019 ops
->hierarchies
[i
],
1020 monitor_cgroup
, NULL
, false))
1023 DEBUG("Failed to create cgroup %s)", monitor_cgroup
);
1024 for (int j
= 0; j
<= i
; j
++)
1025 cgroup_tree_prune_leaf(ops
->hierarchies
[j
],
1026 monitor_cgroup
, false);
1031 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000 && suffix
);
1033 if (idx
== 1000 || (!suffix
&& idx
!= 0))
1034 return log_error_errno(false, ERANGE
, "Failed to create monitor cgroup");
1036 ops
->monitor_cgroup
= move_ptr(monitor_cgroup
);
1037 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops
->monitor_cgroup
);
1041 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1042 * next cgroup_pattern-1, -2, ..., -999.
1044 __cgfsng_ops
static bool cgfsng_payload_create(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
1046 __do_free
char *container_cgroup
= NULL
, *__limit_cgroup
= NULL
;
1051 char *suffix
= NULL
;
1052 struct lxc_conf
*conf
;
1055 return ret_set_errno(false, ENOENT
);
1057 if (!ops
->hierarchies
)
1060 if (ops
->container_cgroup
|| ops
->container_limit_cgroup
)
1061 return ret_set_errno(false, EEXIST
);
1063 if (!handler
|| !handler
->conf
)
1064 return ret_set_errno(false, EINVAL
);
1066 conf
= handler
->conf
;
1068 if (!check_cgroup_dir_config(conf
))
1071 if (conf
->cgroup_meta
.container_dir
) {
1072 __limit_cgroup
= strdup(conf
->cgroup_meta
.container_dir
);
1073 if (!__limit_cgroup
)
1074 return ret_set_errno(false, ENOMEM
);
1076 if (conf
->cgroup_meta
.namespace_dir
) {
1077 container_cgroup
= must_make_path(__limit_cgroup
,
1078 conf
->cgroup_meta
.namespace_dir
,
1080 limit_cgroup
= __limit_cgroup
;
1082 /* explicit paths but without isolation */
1083 limit_cgroup
= move_ptr(__limit_cgroup
);
1084 container_cgroup
= limit_cgroup
;
1086 } else if (conf
->cgroup_meta
.dir
) {
1087 limit_cgroup
= must_concat(&len
, conf
->cgroup_meta
.dir
, "/",
1088 DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1090 CGROUP_CREATE_RETRY
, NULL
);
1091 container_cgroup
= limit_cgroup
;
1092 } else if (ops
->cgroup_pattern
) {
1093 __do_free
char *cgroup_tree
= NULL
;
1095 cgroup_tree
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1097 return ret_set_errno(false, ENOMEM
);
1099 limit_cgroup
= must_concat(&len
, cgroup_tree
, "/",
1100 DEFAULT_PAYLOAD_CGROUP
,
1101 CGROUP_CREATE_RETRY
, NULL
);
1102 container_cgroup
= limit_cgroup
;
1104 limit_cgroup
= must_concat(&len
, DEFAULT_PAYLOAD_CGROUP_PREFIX
,
1106 CGROUP_CREATE_RETRY
, NULL
);
1107 container_cgroup
= limit_cgroup
;
1110 return ret_set_errno(false, ENOMEM
);
1112 if (!conf
->cgroup_meta
.container_dir
) {
1113 suffix
= container_cgroup
+ len
- CGROUP_CREATE_RETRY_LEN
;
1118 sprintf(suffix
, "-%d", idx
);
1120 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1121 if (cgroup_tree_create(ops
, handler
->conf
,
1122 ops
->hierarchies
[i
], limit_cgroup
,
1123 conf
->cgroup_meta
.namespace_dir
,
1127 DEBUG("Failed to create cgroup \"%s\"", ops
->hierarchies
[i
]->path_con
?: "(null)");
1128 for (int j
= 0; j
<= i
; j
++)
1129 cgroup_tree_prune_leaf(ops
->hierarchies
[j
],
1130 limit_cgroup
, true);
1135 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000 && suffix
);
1137 if (idx
== 1000 || (!suffix
&& idx
!= 0))
1138 return log_error_errno(false, ERANGE
, "Failed to create container cgroup");
1140 ops
->container_cgroup
= move_ptr(container_cgroup
);
1142 ops
->container_limit_cgroup
= move_ptr(__limit_cgroup
);
1144 ops
->container_limit_cgroup
= ops
->container_cgroup
;
1145 INFO("The container process uses \"%s\" as inner and \"%s\" as limit cgroup",
1146 ops
->container_cgroup
, ops
->container_limit_cgroup
);
1150 __cgfsng_ops
static bool cgfsng_monitor_enter(struct cgroup_ops
*ops
,
1151 struct lxc_handler
*handler
)
1153 int monitor_len
, transient_len
= 0;
1154 char monitor
[INTTYPE_TO_STRLEN(pid_t
)],
1155 transient
[INTTYPE_TO_STRLEN(pid_t
)];
1158 return ret_set_errno(false, ENOENT
);
1160 if (!ops
->hierarchies
)
1163 if (!ops
->monitor_cgroup
)
1164 return ret_set_errno(false, ENOENT
);
1166 if (!handler
|| !handler
->conf
)
1167 return ret_set_errno(false, EINVAL
);
1169 monitor_len
= strnprintf(monitor
, sizeof(monitor
), "%d", handler
->monitor_pid
);
1170 if (monitor_len
< 0)
1173 if (handler
->transient_pid
> 0) {
1174 transient_len
= strnprintf(transient
, sizeof(transient
), "%d", handler
->transient_pid
);
1175 if (transient_len
< 0)
1179 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1180 struct hierarchy
*h
= ops
->hierarchies
[i
];
1183 ret
= lxc_writeat(h
->dfd_mon
, "cgroup.procs", monitor
, monitor_len
);
1185 return log_error_errno(false, errno
, "Failed to enter cgroup %d", h
->dfd_mon
);
1187 TRACE("Moved monitor into cgroup %d", h
->dfd_mon
);
1189 if (handler
->transient_pid
<= 0)
1192 ret
= lxc_writeat(h
->dfd_mon
, "cgroup.procs", transient
, transient_len
);
1194 return log_error_errno(false, errno
, "Failed to enter cgroup %d", h
->dfd_mon
);
1196 TRACE("Moved transient process into cgroup %d", h
->dfd_mon
);
1199 * we don't keep the fds for non-unified hierarchies around
1200 * mainly because we don't make use of them anymore after the
1201 * core cgroup setup is done but also because there are quite a
1204 if (!is_unified_hierarchy(h
))
1205 close_prot_errno_disarm(h
->dfd_mon
);
1207 handler
->transient_pid
= -1;
1212 __cgfsng_ops
static bool cgfsng_payload_enter(struct cgroup_ops
*ops
,
1213 struct lxc_handler
*handler
)
1216 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1219 return ret_set_errno(false, ENOENT
);
1221 if (!ops
->hierarchies
)
1224 if (!ops
->container_cgroup
)
1225 return ret_set_errno(false, ENOENT
);
1227 if (!handler
|| !handler
->conf
)
1228 return ret_set_errno(false, EINVAL
);
1230 len
= strnprintf(pidstr
, sizeof(pidstr
), "%d", handler
->pid
);
1234 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1235 struct hierarchy
*h
= ops
->hierarchies
[i
];
1238 if (is_unified_hierarchy(h
) &&
1239 (handler
->clone_flags
& CLONE_INTO_CGROUP
))
1242 ret
= lxc_writeat(h
->dfd_con
, "cgroup.procs", pidstr
, len
);
1244 return log_error_errno(false, errno
, "Failed to enter cgroup \"%s\"", h
->path_con
);
1246 TRACE("Moved container into %s cgroup via %d", h
->path_con
, h
->dfd_con
);
1252 static int fchowmodat(int dirfd
, const char *path
, uid_t chown_uid
,
1253 gid_t chown_gid
, mode_t chmod_mode
)
1257 ret
= fchownat(dirfd
, path
, chown_uid
, chown_gid
,
1258 AT_EMPTY_PATH
| AT_SYMLINK_NOFOLLOW
);
1260 return log_warn_errno(-1,
1261 errno
, "Failed to fchownat(%d, %s, %d, %d, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )",
1262 dirfd
, path
, (int)chown_uid
,
1265 ret
= fchmodat(dirfd
, (*path
!= '\0') ? path
: ".", chmod_mode
, 0);
1267 return log_warn_errno(-1, errno
, "Failed to fchmodat(%d, %s, %d, AT_SYMLINK_NOFOLLOW)",
1268 dirfd
, path
, (int)chmod_mode
);
1273 /* chgrp the container cgroups to container group. We leave
1274 * the container owner as cgroup owner. So we must make the
1275 * directories 775 so that the container can create sub-cgroups.
1277 * Also chown the tasks and cgroup.procs files. Those may not
1278 * exist depending on kernel version.
1280 static int chown_cgroup_wrapper(void *data
)
1284 struct generic_userns_exec_data
*arg
= data
;
1285 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1286 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1288 if (!lxc_drop_groups() && errno
!= EPERM
)
1289 return log_error_errno(-1, errno
, "Failed to setgroups(0, NULL)");
1291 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1293 return log_error_errno(-1, errno
, "Failed to setresgid(%d, %d, %d)",
1294 (int)nsgid
, (int)nsgid
, (int)nsgid
);
1296 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1298 return log_error_errno(-1, errno
, "Failed to setresuid(%d, %d, %d)",
1299 (int)nsuid
, (int)nsuid
, (int)nsuid
);
1301 destuid
= get_ns_uid(arg
->origuid
);
1302 if (destuid
== LXC_INVALID_UID
)
1305 for (int i
= 0; arg
->hierarchies
[i
]; i
++) {
1306 int dirfd
= arg
->hierarchies
[i
]->dfd_con
;
1309 return syserrno_set(-EBADF
, "Invalid cgroup file descriptor");
1311 (void)fchowmodat(dirfd
, "", destuid
, nsgid
, 0775);
1314 * Failures to chown() these are inconvenient but not
1315 * detrimental We leave these owned by the container launcher,
1316 * so that container root can write to the files to attach. We
1317 * chmod() them 664 so that container systemd can write to the
1318 * files (which systemd in wily insists on doing).
1321 if (arg
->hierarchies
[i
]->fs_type
== LEGACY_HIERARCHY
)
1322 (void)fchowmodat(dirfd
, "tasks", destuid
, nsgid
, 0664);
1324 (void)fchowmodat(dirfd
, "cgroup.procs", destuid
, nsgid
, 0664);
1326 if (arg
->hierarchies
[i
]->fs_type
!= UNIFIED_HIERARCHY
)
1329 for (char **p
= arg
->hierarchies
[i
]->delegate
; p
&& *p
; p
++)
1330 (void)fchowmodat(dirfd
, *p
, destuid
, nsgid
, 0664);
1336 __cgfsng_ops
static bool cgfsng_chown(struct cgroup_ops
*ops
,
1337 struct lxc_conf
*conf
)
1339 struct generic_userns_exec_data wrap
;
1342 return ret_set_errno(false, ENOENT
);
1344 if (!ops
->hierarchies
)
1347 if (!ops
->container_cgroup
)
1348 return ret_set_errno(false, ENOENT
);
1351 return ret_set_errno(false, EINVAL
);
1353 if (lxc_list_empty(&conf
->id_map
))
1356 wrap
.origuid
= geteuid();
1358 wrap
.hierarchies
= ops
->hierarchies
;
1361 if (userns_exec_1(conf
, chown_cgroup_wrapper
, &wrap
, "chown_cgroup_wrapper") < 0)
1362 return log_error_errno(false, errno
, "Error requesting cgroup chown in new user namespace");
1367 __cgfsng_ops
static void cgfsng_finalize(struct cgroup_ops
*ops
)
1372 if (!ops
->hierarchies
)
1375 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1376 struct hierarchy
*h
= ops
->hierarchies
[i
];
1378 /* Close all monitor cgroup file descriptors. */
1379 close_prot_errno_disarm(h
->dfd_mon
);
1381 /* Close the cgroup root file descriptor. */
1382 close_prot_errno_disarm(ops
->dfd_mnt
);
1385 * The checking for freezer support should obviously be done at cgroup
1386 * initialization time but that doesn't work reliable. The freezer
1387 * controller has been demoted (rightly so) to a simple file located in
1388 * each non-root cgroup. At the time when the container is created we
1389 * might still be located in /sys/fs/cgroup and so checking for
1390 * cgroup.freeze won't tell us anything because this file doesn't exist
1391 * in the root cgroup. We could then iterate through /sys/fs/cgroup and
1392 * find an already existing cgroup and then check within that cgroup
1393 * for the existence of cgroup.freeze but that will only work on
1394 * systemd based hosts. Other init systems might not manage cgroups and
1395 * so no cgroup will exist. So we defer until we have created cgroups
1396 * for our container which means we check here.
1398 if (pure_unified_layout(ops
) &&
1399 !faccessat(ops
->unified
->dfd_con
, "cgroup.freeze", F_OK
,
1400 AT_SYMLINK_NOFOLLOW
)) {
1401 TRACE("Unified hierarchy supports freezer");
1402 ops
->unified
->utilities
|= FREEZER_CONTROLLER
;
1406 /* cgroup-full:* is done, no need to create subdirs */
1407 static inline bool cg_mount_needs_subdirs(int cgroup_automount_type
)
1409 switch (cgroup_automount_type
) {
1410 case LXC_AUTO_CGROUP_RO
:
1412 case LXC_AUTO_CGROUP_RW
:
1414 case LXC_AUTO_CGROUP_MIXED
:
1421 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1422 * remount controller ro if needed and bindmount the cgroupfs onto
1423 * control/the/cg/path.
1425 static int cg_legacy_mount_controllers(int cgroup_automount_type
, struct hierarchy
*h
,
1426 char *hierarchy_mnt
, char *cgpath
,
1427 const char *container_cgroup
)
1429 __do_free
char *sourcepath
= NULL
;
1430 int ret
, remount_flags
;
1431 int flags
= MS_BIND
;
1433 if ((cgroup_automount_type
== LXC_AUTO_CGROUP_RO
) ||
1434 (cgroup_automount_type
== LXC_AUTO_CGROUP_MIXED
)) {
1435 ret
= mount(hierarchy_mnt
, hierarchy_mnt
, "cgroup", MS_BIND
, NULL
);
1437 return log_error_errno(-1, errno
, "Failed to bind mount \"%s\" onto \"%s\"",
1438 hierarchy_mnt
, hierarchy_mnt
);
1440 remount_flags
= add_required_remount_flags(hierarchy_mnt
,
1442 flags
| MS_REMOUNT
);
1443 ret
= mount(hierarchy_mnt
, hierarchy_mnt
, "cgroup",
1444 remount_flags
| MS_REMOUNT
| MS_BIND
| MS_RDONLY
,
1447 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", hierarchy_mnt
);
1449 INFO("Remounted %s read-only", hierarchy_mnt
);
1452 sourcepath
= make_cgroup_path(h
, h
->at_base
, container_cgroup
, NULL
);
1453 if (cgroup_automount_type
== LXC_AUTO_CGROUP_RO
)
1456 ret
= mount(sourcepath
, cgpath
, "cgroup", flags
, NULL
);
1458 return log_error_errno(-1, errno
, "Failed to mount \"%s\" onto \"%s\"",
1459 h
->controllers
[0], cgpath
);
1460 INFO("Mounted \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1462 if (flags
& MS_RDONLY
) {
1463 remount_flags
= add_required_remount_flags(sourcepath
, cgpath
,
1464 flags
| MS_REMOUNT
);
1465 ret
= mount(sourcepath
, cgpath
, "cgroup", remount_flags
, NULL
);
1467 return log_error_errno(-1, errno
, "Failed to remount \"%s\" ro", cgpath
);
1468 INFO("Remounted %s read-only", cgpath
);
1471 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath
);
1477 * Mount cgroup hierarchies directly without using bind-mounts. The main
1478 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1479 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1481 static int __cgroupfs_mount(int cgroup_automount_type
, struct hierarchy
*h
,
1482 struct lxc_rootfs
*rootfs
, int dfd_mnt_cgroupfs
,
1483 const char *hierarchy_mnt
)
1485 __do_close
int fd_fs
= -EBADF
;
1486 unsigned int flags
= 0;
1490 if (dfd_mnt_cgroupfs
< 0)
1491 return ret_errno(EINVAL
);
1493 flags
|= MOUNT_ATTR_NOSUID
;
1494 flags
|= MOUNT_ATTR_NOEXEC
;
1495 flags
|= MOUNT_ATTR_NODEV
;
1496 flags
|= MOUNT_ATTR_RELATIME
;
1498 if ((cgroup_automount_type
== LXC_AUTO_CGROUP_RO
) ||
1499 (cgroup_automount_type
== LXC_AUTO_CGROUP_FULL_RO
))
1500 flags
|= MOUNT_ATTR_RDONLY
;
1502 if (is_unified_hierarchy(h
))
1507 if (can_use_mount_api()) {
1508 fd_fs
= fs_prepare(fstype
, -EBADF
, "", 0, 0);
1510 return log_error_errno(-errno
, errno
, "Failed to prepare filesystem context for %s", fstype
);
1512 if (!is_unified_hierarchy(h
)) {
1513 for (const char **it
= (const char **)h
->controllers
; it
&& *it
; it
++) {
1514 if (strnequal(*it
, "name=", STRLITERALLEN("name=")))
1515 ret
= fs_set_property(fd_fs
, "name", *it
+ STRLITERALLEN("name="));
1517 ret
= fs_set_property(fd_fs
, *it
, "");
1519 return log_error_errno(-errno
, errno
, "Failed to add %s controller to cgroup filesystem context %d(dev)", *it
, fd_fs
);
1523 ret
= fs_attach(fd_fs
, dfd_mnt_cgroupfs
, hierarchy_mnt
,
1524 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH
,
1527 __do_free
char *controllers
= NULL
, *target
= NULL
;
1528 unsigned int old_flags
= 0;
1529 const char *rootfs_mnt
;
1531 if (!is_unified_hierarchy(h
)) {
1532 controllers
= lxc_string_join(",", (const char **)h
->controllers
, false);
1534 return ret_errno(ENOMEM
);
1537 rootfs_mnt
= get_rootfs_mnt(rootfs
);
1538 ret
= mnt_attributes_old(flags
, &old_flags
);
1540 return log_error_errno(-EINVAL
, EINVAL
, "Unsupported mount properties specified");
1542 target
= must_make_path(rootfs_mnt
, DEFAULT_CGROUP_MOUNTPOINT
, hierarchy_mnt
, NULL
);
1543 ret
= safe_mount(NULL
, target
, fstype
, old_flags
, controllers
, rootfs_mnt
);
1546 return log_error_errno(ret
, errno
, "Failed to mount %s filesystem onto %d(%s)",
1547 fstype
, dfd_mnt_cgroupfs
, maybe_empty(hierarchy_mnt
));
1549 DEBUG("Mounted cgroup filesystem %s onto %d(%s)",
1550 fstype
, dfd_mnt_cgroupfs
, maybe_empty(hierarchy_mnt
));
1554 static inline int cgroupfs_mount(int cgroup_automount_type
, struct hierarchy
*h
,
1555 struct lxc_rootfs
*rootfs
,
1556 int dfd_mnt_cgroupfs
, const char *hierarchy_mnt
)
1558 return __cgroupfs_mount(cgroup_automount_type
, h
, rootfs
,
1559 dfd_mnt_cgroupfs
, hierarchy_mnt
);
1562 static inline int cgroupfs_bind_mount(int cgroup_automount_type
, struct hierarchy
*h
,
1563 struct lxc_rootfs
*rootfs
,
1564 int dfd_mnt_cgroupfs
,
1565 const char *hierarchy_mnt
)
1567 switch (cgroup_automount_type
) {
1568 case LXC_AUTO_CGROUP_FULL_RO
:
1570 case LXC_AUTO_CGROUP_FULL_RW
:
1572 case LXC_AUTO_CGROUP_FULL_MIXED
:
1578 return __cgroupfs_mount(cgroup_automount_type
, h
, rootfs
,
1579 dfd_mnt_cgroupfs
, hierarchy_mnt
);
1582 __cgfsng_ops
static bool cgfsng_mount(struct cgroup_ops
*ops
,
1583 struct lxc_handler
*handler
, int cg_flags
)
1585 __do_close
int dfd_mnt_tmpfs
= -EBADF
, fd_fs
= -EBADF
;
1586 __do_free
char *cgroup_root
= NULL
;
1587 int cgroup_automount_type
;
1588 bool in_cgroup_ns
= false, wants_force_mount
= false;
1589 struct lxc_conf
*conf
= handler
->conf
;
1590 struct lxc_rootfs
*rootfs
= &conf
->rootfs
;
1591 const char *rootfs_mnt
= get_rootfs_mnt(rootfs
);
1595 return ret_set_errno(false, ENOENT
);
1597 if (!ops
->hierarchies
)
1601 return ret_set_errno(false, EINVAL
);
1603 if ((cg_flags
& LXC_AUTO_CGROUP_MASK
) == 0)
1604 return log_trace(true, "No cgroup mounts requested");
1606 if (cg_flags
& LXC_AUTO_CGROUP_FORCE
) {
1607 cg_flags
&= ~LXC_AUTO_CGROUP_FORCE
;
1608 wants_force_mount
= true;
1612 case LXC_AUTO_CGROUP_RO
:
1613 TRACE("Read-only cgroup mounts requested");
1615 case LXC_AUTO_CGROUP_RW
:
1616 TRACE("Read-write cgroup mounts requested");
1618 case LXC_AUTO_CGROUP_MIXED
:
1619 TRACE("Mixed cgroup mounts requested");
1621 case LXC_AUTO_CGROUP_FULL_RO
:
1622 TRACE("Full read-only cgroup mounts requested");
1624 case LXC_AUTO_CGROUP_FULL_RW
:
1625 TRACE("Full read-write cgroup mounts requested");
1627 case LXC_AUTO_CGROUP_FULL_MIXED
:
1628 TRACE("Full mixed cgroup mounts requested");
1631 return log_error_errno(false, EINVAL
, "Invalid cgroup mount options specified");
1633 cgroup_automount_type
= cg_flags
;
1635 if (!wants_force_mount
) {
1636 wants_force_mount
= !lxc_wants_cap(CAP_SYS_ADMIN
, conf
);
1639 * Most recent distro versions currently have init system that
1640 * do support cgroup2 but do not mount it by default unless
1641 * explicitly told so even if the host is cgroup2 only. That
1642 * means they often will fail to boot. Fix this by pre-mounting
1643 * cgroup2 by default. We will likely need to be doing this a
1644 * few years until all distros have switched over to cgroup2 at
1645 * which point we can safely assume that their init systems
1646 * will mount it themselves.
1648 if (pure_unified_layout(ops
))
1649 wants_force_mount
= true;
1652 if (cgns_supported() && container_uses_namespace(handler
, CLONE_NEWCGROUP
))
1653 in_cgroup_ns
= true;
1655 if (in_cgroup_ns
&& !wants_force_mount
)
1656 return log_trace(true, "Mounting cgroups not requested or needed");
1658 /* This is really the codepath that we want. */
1659 if (pure_unified_layout(ops
)) {
1660 __do_close
int dfd_mnt_unified
= -EBADF
;
1662 dfd_mnt_unified
= open_at(rootfs
->dfd_mnt
, DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
,
1663 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH_XDEV
, 0);
1664 if (dfd_mnt_unified
< 0)
1665 return syserrno(-errno
, "Failed to open %d(%s)", rootfs
->dfd_mnt
,
1666 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
);
1668 * If cgroup namespaces are supported but the container will
1669 * not have CAP_SYS_ADMIN after it has started we need to mount
1670 * the cgroups manually.
1672 * Note that here we know that wants_force_mount is true.
1673 * Otherwise we would've returned early above.
1677 * 1. cgroup:rw:force -> Mount the cgroup2 filesystem.
1678 * 2. cgroup:ro:force -> Mount the cgroup2 filesystem read-only.
1679 * 3. cgroup:mixed:force -> See comment above how this
1681 * cgroup:mixed is equal to
1682 * cgroup:rw when cgroup
1683 * namespaces are supported.
1685 * 4. cgroup:rw -> No-op; init system responsible for mounting.
1686 * 5. cgroup:ro -> No-op; init system responsible for mounting.
1687 * 6. cgroup:mixed -> No-op; init system responsible for mounting.
1689 * 7. cgroup-full:rw -> Not supported.
1690 * 8. cgroup-full:ro -> Not supported.
1691 * 9. cgroup-full:mixed -> Not supported.
1693 * 10. cgroup-full:rw:force -> Not supported.
1694 * 11. cgroup-full:ro:force -> Not supported.
1695 * 12. cgroup-full:mixed:force -> Not supported.
1697 ret
= cgroupfs_mount(cgroup_automount_type
, ops
->unified
, rootfs
, dfd_mnt_unified
, "");
1699 return syserrno(false, "Failed to force mount cgroup filesystem in cgroup namespace");
1701 return log_trace(true, "Force mounted cgroup filesystem in new cgroup namespace");
1704 * Either no cgroup namespace supported (highly
1705 * unlikely unless we're dealing with a Frankenkernel.
1706 * Or the user requested to keep the cgroup namespace
1707 * of the host or another container.
1709 if (wants_force_mount
) {
1711 * 1. cgroup:rw:force -> Bind-mount the cgroup2 filesystem writable.
1712 * 2. cgroup:ro:force -> Bind-mount the cgroup2 filesystem read-only.
1713 * 3. cgroup:mixed:force -> bind-mount the cgroup2 filesystem and
1714 * and make the parent directory of the
1715 * container's cgroup read-only but the
1716 * container's cgroup writable.
1718 * 10. cgroup-full:rw:force ->
1719 * 11. cgroup-full:ro:force ->
1720 * 12. cgroup-full:mixed:force ->
1723 SYSWARN("Force-mounting the unified cgroup hierarchy without cgroup namespace support is currently not supported");
1726 SYSWARN("Mounting the unified cgroup hierarchy without cgroup namespace support is currently not supported");
1730 return syserrno(false, "Failed to mount cgroups");
1734 * Mount a tmpfs over DEFAULT_CGROUP_MOUNTPOINT. Note that we're
1735 * relying on RESOLVE_BENEATH so we need to skip the leading "/" in the
1736 * DEFAULT_CGROUP_MOUNTPOINT define.
1738 if (can_use_mount_api()) {
1739 fd_fs
= fs_prepare("tmpfs", -EBADF
, "", 0, 0);
1741 return log_error_errno(-errno
, errno
, "Failed to create new filesystem context for tmpfs");
1743 ret
= fs_set_property(fd_fs
, "mode", "0755");
1745 return log_error_errno(-errno
, errno
, "Failed to mount tmpfs onto %d(dev)", fd_fs
);
1747 ret
= fs_set_property(fd_fs
, "size", "10240k");
1749 return log_error_errno(-errno
, errno
, "Failed to mount tmpfs onto %d(dev)", fd_fs
);
1751 ret
= fs_attach(fd_fs
, rootfs
->dfd_mnt
, DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
,
1752 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH_XDEV
,
1753 MOUNT_ATTR_NOSUID
| MOUNT_ATTR_NODEV
|
1754 MOUNT_ATTR_NOEXEC
| MOUNT_ATTR_RELATIME
);
1756 cgroup_root
= must_make_path(rootfs_mnt
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
1757 ret
= safe_mount(NULL
, cgroup_root
, "tmpfs",
1758 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
1759 "size=10240k,mode=755", rootfs_mnt
);
1762 return log_error_errno(false, errno
, "Failed to mount tmpfs on %s",
1763 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
);
1765 dfd_mnt_tmpfs
= open_at(rootfs
->dfd_mnt
, DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
,
1766 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH_XDEV
, 0);
1767 if (dfd_mnt_tmpfs
< 0)
1768 return syserrno(-errno
, "Failed to open %d(%s)", rootfs
->dfd_mnt
,
1769 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE
);
1771 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1772 __do_free
char *hierarchy_mnt
= NULL
, *path2
= NULL
;
1773 struct hierarchy
*h
= ops
->hierarchies
[i
];
1775 ret
= mkdirat(dfd_mnt_tmpfs
, h
->at_mnt
, 0000);
1777 return syserrno(false, "Failed to create cgroup at_mnt %d(%s)", dfd_mnt_tmpfs
, h
->at_mnt
);
1779 if (in_cgroup_ns
&& wants_force_mount
) {
1781 * If cgroup namespaces are supported but the container
1782 * will not have CAP_SYS_ADMIN after it has started we
1783 * need to mount the cgroups manually.
1785 ret
= cgroupfs_mount(cgroup_automount_type
, h
, rootfs
,
1786 dfd_mnt_tmpfs
, h
->at_mnt
);
1793 /* Here is where the ancient kernel section begins. */
1794 ret
= cgroupfs_bind_mount(cgroup_automount_type
, h
, rootfs
,
1795 dfd_mnt_tmpfs
, h
->at_mnt
);
1799 if (!cg_mount_needs_subdirs(cgroup_automount_type
))
1803 cgroup_root
= must_make_path(rootfs_mnt
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
1805 hierarchy_mnt
= must_make_path(cgroup_root
, h
->at_mnt
, NULL
);
1806 path2
= must_make_path(hierarchy_mnt
, h
->at_base
,
1807 ops
->container_cgroup
, NULL
);
1808 ret
= mkdir_p(path2
, 0755);
1809 if (ret
< 0 && (errno
!= EEXIST
))
1812 ret
= cg_legacy_mount_controllers(cgroup_automount_type
, h
,
1813 hierarchy_mnt
, path2
,
1814 ops
->container_cgroup
);
1822 /* Only root needs to escape to the cgroup of its init. */
1823 __cgfsng_ops
static bool cgfsng_criu_escape(const struct cgroup_ops
*ops
,
1824 struct lxc_conf
*conf
)
1827 return ret_set_errno(false, ENOENT
);
1829 if (!ops
->hierarchies
)
1833 return ret_set_errno(false, EINVAL
);
1835 if (conf
->cgroup_meta
.relative
|| geteuid())
1838 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1839 __do_free
char *fullpath
= NULL
;
1842 fullpath
= make_cgroup_path(ops
->hierarchies
[i
],
1843 ops
->hierarchies
[i
]->at_base
,
1844 "cgroup.procs", NULL
);
1845 ret
= lxc_write_to_file(fullpath
, "0", 2, false, 0666);
1847 return log_error_errno(false, errno
, "Failed to escape to cgroup \"%s\"", fullpath
);
1853 __cgfsng_ops
static int cgfsng_criu_num_hierarchies(struct cgroup_ops
*ops
)
1858 return ret_set_errno(-1, ENOENT
);
1860 if (!ops
->hierarchies
)
1863 for (; ops
->hierarchies
[i
]; i
++)
1869 __cgfsng_ops
static bool cgfsng_criu_get_hierarchies(struct cgroup_ops
*ops
,
1875 return ret_set_errno(false, ENOENT
);
1877 if (!ops
->hierarchies
)
1878 return ret_set_errno(false, ENOENT
);
1880 /* sanity check n */
1881 for (i
= 0; i
< n
; i
++)
1882 if (!ops
->hierarchies
[i
])
1883 return ret_set_errno(false, ENOENT
);
1885 *out
= ops
->hierarchies
[i
]->controllers
;
1890 static int cg_legacy_freeze(struct cgroup_ops
*ops
)
1892 struct hierarchy
*h
;
1894 h
= get_hierarchy(ops
, "freezer");
1896 return ret_set_errno(-1, ENOENT
);
1898 return lxc_write_openat(h
->path_con
, "freezer.state",
1899 "FROZEN", STRLITERALLEN("FROZEN"));
1902 static int freezer_cgroup_events_cb(int fd
, uint32_t events
, void *cbdata
,
1903 struct lxc_epoll_descr
*descr
)
1905 __do_free
char *line
= NULL
;
1906 __do_fclose
FILE *f
= NULL
;
1907 int state
= PTR_TO_INT(cbdata
);
1909 const char *state_string
;
1911 f
= fdopen_at(fd
, "", "re", PROTECT_OPEN
, PROTECT_LOOKUP_BENEATH
);
1913 return LXC_MAINLOOP_ERROR
;
1916 state_string
= "frozen 1";
1918 state_string
= "frozen 0";
1920 while (getline(&line
, &len
, f
) != -1)
1921 if (strnequal(line
, state_string
, STRLITERALLEN("frozen") + 2))
1922 return LXC_MAINLOOP_CLOSE
;
1926 return LXC_MAINLOOP_CONTINUE
;
1929 static int cg_unified_freeze_do(struct cgroup_ops
*ops
, int timeout
,
1930 const char *state_string
,
1932 const char *epoll_error
,
1933 const char *wait_error
)
1935 __do_close
int fd
= -EBADF
;
1936 call_cleaner(lxc_mainloop_close
) struct lxc_epoll_descr
*descr_ptr
= NULL
;
1938 struct lxc_epoll_descr descr
;
1939 struct hierarchy
*h
;
1943 return ret_set_errno(-1, ENOENT
);
1946 return ret_set_errno(-1, EEXIST
);
1949 __do_free
char *events_file
= NULL
;
1951 events_file
= must_make_path(h
->path_con
, "cgroup.events", NULL
);
1952 fd
= open(events_file
, O_RDONLY
| O_CLOEXEC
);
1954 return log_error_errno(-1, errno
, "Failed to open cgroup.events file");
1956 ret
= lxc_mainloop_open(&descr
);
1958 return log_error_errno(-1, errno
, "%s", epoll_error
);
1960 /* automatically cleaned up now */
1963 ret
= lxc_mainloop_add_handler_events(&descr
, fd
, EPOLLPRI
, freezer_cgroup_events_cb
, INT_TO_PTR(state_num
));
1965 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
1968 ret
= lxc_write_openat(h
->path_con
, "cgroup.freeze", state_string
, 1);
1970 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
1972 if (timeout
!= 0 && lxc_mainloop(&descr
, timeout
))
1973 return log_error_errno(-1, errno
, "%s", wait_error
);
1978 static int cg_unified_freeze(struct cgroup_ops
*ops
, int timeout
)
1980 return cg_unified_freeze_do(ops
, timeout
, "1", 1,
1981 "Failed to create epoll instance to wait for container freeze",
1982 "Failed to wait for container to be frozen");
1985 __cgfsng_ops
static int cgfsng_freeze(struct cgroup_ops
*ops
, int timeout
)
1987 if (!ops
->hierarchies
)
1988 return ret_set_errno(-1, ENOENT
);
1990 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
1991 return cg_legacy_freeze(ops
);
1993 return cg_unified_freeze(ops
, timeout
);
1996 static int cg_legacy_unfreeze(struct cgroup_ops
*ops
)
1998 struct hierarchy
*h
;
2000 h
= get_hierarchy(ops
, "freezer");
2002 return ret_set_errno(-1, ENOENT
);
2004 return lxc_write_openat(h
->path_con
, "freezer.state",
2005 "THAWED", STRLITERALLEN("THAWED"));
2008 static int cg_unified_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2010 return cg_unified_freeze_do(ops
, timeout
, "0", 0,
2011 "Failed to create epoll instance to wait for container unfreeze",
2012 "Failed to wait for container to be unfrozen");
2015 __cgfsng_ops
static int cgfsng_unfreeze(struct cgroup_ops
*ops
, int timeout
)
2017 if (!ops
->hierarchies
)
2018 return ret_set_errno(-1, ENOENT
);
2020 if (ops
->cgroup_layout
!= CGROUP_LAYOUT_UNIFIED
)
2021 return cg_legacy_unfreeze(ops
);
2023 return cg_unified_unfreeze(ops
, timeout
);
2026 static const char *cgfsng_get_cgroup_do(struct cgroup_ops
*ops
,
2027 const char *controller
, bool limiting
)
2029 struct hierarchy
*h
;
2033 h
= get_hierarchy(ops
, controller
);
2035 return log_warn_errno(NULL
, ENOENT
,
2036 "Failed to find hierarchy for controller \"%s\"", maybe_empty(controller
));
2045 len
= strlen(h
->at_mnt
);
2046 if (!strnequal(h
->at_mnt
, DEFAULT_CGROUP_MOUNTPOINT
,
2047 STRLITERALLEN(DEFAULT_CGROUP_MOUNTPOINT
))) {
2048 path
+= STRLITERALLEN(DEFAULT_CGROUP_MOUNTPOINT
);
2049 path
+= strspn(path
, "/");
2054 __cgfsng_ops
static const char *cgfsng_get_cgroup(struct cgroup_ops
*ops
,
2055 const char *controller
)
2057 return cgfsng_get_cgroup_do(ops
, controller
, false);
2060 __cgfsng_ops
static const char *cgfsng_get_limiting_cgroup(struct cgroup_ops
*ops
,
2061 const char *controller
)
2063 return cgfsng_get_cgroup_do(ops
, controller
, true);
2066 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2067 * which must be freed by the caller.
2069 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy
*h
,
2071 const char *filename
)
2073 return make_cgroup_path(h
, inpath
, filename
, NULL
);
2076 static int cgroup_attach_leaf(const struct lxc_conf
*conf
, int unified_fd
, pid_t pid
)
2080 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2083 /* Create leaf cgroup. */
2084 ret
= mkdirat(unified_fd
, ".lxc", 0755);
2085 if (ret
< 0 && errno
!= EEXIST
)
2086 return log_error_errno(-errno
, errno
, "Failed to create leaf cgroup \".lxc\"");
2088 pidstr_len
= strnprintf(pidstr
, sizeof(pidstr
), INT64_FMT
, (int64_t)pid
);
2092 ret
= lxc_writeat(unified_fd
, ".lxc/cgroup.procs", pidstr
, pidstr_len
);
2094 ret
= lxc_writeat(unified_fd
, "cgroup.procs", pidstr
, pidstr_len
);
2096 return log_trace(0, "Moved process %s into cgroup %d(.lxc)", pidstr
, unified_fd
);
2098 /* this is a non-leaf node */
2100 return log_error_errno(-errno
, errno
, "Failed to attach to unified cgroup");
2104 char attach_cgroup
[STRLITERALLEN(".lxc-/cgroup.procs") + INTTYPE_TO_STRLEN(int) + 1];
2105 char *slash
= attach_cgroup
;
2107 ret
= strnprintf(attach_cgroup
, sizeof(attach_cgroup
), ".lxc-%d/cgroup.procs", idx
);
2112 * This shouldn't really happen but the compiler might complain
2113 * that a short write would cause a buffer overrun. So be on
2116 if (ret
< STRLITERALLEN(".lxc-/cgroup.procs"))
2117 return log_error_errno(-EINVAL
, EINVAL
, "Unexpected short write would cause buffer-overrun");
2119 slash
+= (ret
- STRLITERALLEN("/cgroup.procs"));
2122 ret
= mkdirat(unified_fd
, attach_cgroup
, 0755);
2123 if (ret
< 0 && errno
!= EEXIST
)
2124 return log_error_errno(-1, errno
, "Failed to create cgroup %s", attach_cgroup
);
2130 ret
= lxc_writeat(unified_fd
, attach_cgroup
, pidstr
, pidstr_len
);
2132 return log_trace(0, "Moved process %s into cgroup %d(%s)", pidstr
, unified_fd
, attach_cgroup
);
2134 if (rm
&& unlinkat(unified_fd
, attach_cgroup
, AT_REMOVEDIR
))
2135 SYSERROR("Failed to remove cgroup \"%d(%s)\"", unified_fd
, attach_cgroup
);
2137 /* this is a non-leaf node */
2139 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2142 } while (idx
< 1000);
2144 return log_error_errno(-1, errno
, "Failed to attach to unified cgroup");
2147 static int cgroup_attach_create_leaf(const struct lxc_conf
*conf
,
2148 int unified_fd
, int *sk_fd
)
2150 __do_close
int sk
= *sk_fd
, target_fd0
= -EBADF
, target_fd1
= -EBADF
;
2154 /* Create leaf cgroup. */
2155 ret
= mkdirat(unified_fd
, ".lxc", 0755);
2156 if (ret
< 0 && errno
!= EEXIST
)
2157 return log_error_errno(-1, errno
, "Failed to create leaf cgroup \".lxc\"");
2159 target_fd0
= open_at(unified_fd
, ".lxc/cgroup.procs", PROTECT_OPEN_W
, PROTECT_LOOKUP_BENEATH
, 0);
2161 return log_error_errno(-errno
, errno
, "Failed to open \".lxc/cgroup.procs\"");
2162 target_fds
[0] = target_fd0
;
2164 target_fd1
= open_at(unified_fd
, "cgroup.procs", PROTECT_OPEN_W
, PROTECT_LOOKUP_BENEATH
, 0);
2166 return log_error_errno(-errno
, errno
, "Failed to open \".lxc/cgroup.procs\"");
2167 target_fds
[1] = target_fd1
;
2169 ret
= lxc_abstract_unix_send_fds(sk
, target_fds
, 2, NULL
, 0);
2171 return log_error_errno(-errno
, errno
, "Failed to send \".lxc/cgroup.procs\" fds %d and %d",
2172 target_fd0
, target_fd1
);
2174 return log_debug(0, "Sent target cgroup fds %d and %d", target_fd0
, target_fd1
);
2177 static int cgroup_attach_move_into_leaf(const struct lxc_conf
*conf
,
2178 int *sk_fd
, pid_t pid
)
2180 __do_close
int sk
= *sk_fd
, target_fd0
= -EBADF
, target_fd1
= -EBADF
;
2182 char pidstr
[INTTYPE_TO_STRLEN(int64_t) + 1];
2186 ret
= lxc_abstract_unix_recv_two_fds(sk
, target_fds
);
2188 return log_error_errno(-1, errno
, "Failed to receive target cgroup fd");
2189 target_fd0
= target_fds
[0];
2190 target_fd1
= target_fds
[1];
2192 pidstr_len
= sprintf(pidstr
, INT64_FMT
, (int64_t)pid
);
2194 ret
= lxc_write_nointr(target_fd0
, pidstr
, pidstr_len
);
2195 if (ret
> 0 && ret
== pidstr_len
)
2196 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd0
);
2198 ret
= lxc_write_nointr(target_fd1
, pidstr
, pidstr_len
);
2199 if (ret
> 0 && ret
== pidstr_len
)
2200 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd1
);
2202 return log_debug_errno(-1, errno
, "Failed to move process into target cgroup via fd %d and %d",
2203 target_fd0
, target_fd1
);
2206 struct userns_exec_unified_attach_data
{
2207 const struct lxc_conf
*conf
;
2213 static int cgroup_unified_attach_child_wrapper(void *data
)
2215 struct userns_exec_unified_attach_data
*args
= data
;
2217 if (!args
->conf
|| args
->unified_fd
< 0 || args
->pid
<= 0 ||
2218 args
->sk_pair
[0] < 0 || args
->sk_pair
[1] < 0)
2219 return ret_errno(EINVAL
);
2221 close_prot_errno_disarm(args
->sk_pair
[0]);
2222 return cgroup_attach_create_leaf(args
->conf
, args
->unified_fd
,
2226 static int cgroup_unified_attach_parent_wrapper(void *data
)
2228 struct userns_exec_unified_attach_data
*args
= data
;
2230 if (!args
->conf
|| args
->unified_fd
< 0 || args
->pid
<= 0 ||
2231 args
->sk_pair
[0] < 0 || args
->sk_pair
[1] < 0)
2232 return ret_errno(EINVAL
);
2234 close_prot_errno_disarm(args
->sk_pair
[1]);
2235 return cgroup_attach_move_into_leaf(args
->conf
, &args
->sk_pair
[0],
2239 /* Technically, we're always at a delegation boundary here (This is especially
2240 * true when cgroup namespaces are available.). The reasoning is that in order
2241 * for us to have been able to start a container in the first place the root
2242 * cgroup must have been a leaf node. Now, either the container's init system
2243 * has populated the cgroup and kept it as a leaf node or it has created
2244 * subtrees. In the former case we will simply attach to the leaf node we
2245 * created when we started the container in the latter case we create our own
2246 * cgroup for the attaching process.
2248 static int __cg_unified_attach(const struct hierarchy
*h
,
2249 const struct lxc_conf
*conf
, const char *name
,
2250 const char *lxcpath
, pid_t pid
,
2251 const char *controller
)
2253 __do_close
int unified_fd
= -EBADF
;
2254 __do_free
char *path
= NULL
, *cgroup
= NULL
;
2257 if (!conf
|| !name
|| !lxcpath
|| pid
<= 0)
2258 return ret_errno(EINVAL
);
2260 ret
= cgroup_attach(conf
, name
, lxcpath
, pid
);
2262 return log_trace(0, "Attached to unified cgroup via command handler");
2263 if (ret
!= -ENOCGROUP2
)
2264 return log_error_errno(ret
, errno
, "Failed to attach to unified cgroup");
2266 /* Fall back to retrieving the path for the unified cgroup. */
2267 cgroup
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2272 path
= make_cgroup_path(h
, cgroup
, NULL
);
2274 unified_fd
= open(path
, O_PATH
| O_DIRECTORY
| O_CLOEXEC
);
2276 return ret_errno(EBADF
);
2278 if (!lxc_list_empty(&conf
->id_map
)) {
2279 struct userns_exec_unified_attach_data args
= {
2281 .unified_fd
= unified_fd
,
2285 ret
= socketpair(PF_LOCAL
, SOCK_STREAM
| SOCK_CLOEXEC
, 0, args
.sk_pair
);
2289 ret
= userns_exec_minimal(conf
,
2290 cgroup_unified_attach_parent_wrapper
,
2292 cgroup_unified_attach_child_wrapper
,
2295 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
2301 __cgfsng_ops
static bool cgfsng_attach(struct cgroup_ops
*ops
,
2302 const struct lxc_conf
*conf
,
2303 const char *name
, const char *lxcpath
,
2307 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
2310 return ret_set_errno(false, ENOENT
);
2312 if (!ops
->hierarchies
)
2315 len
= strnprintf(pidstr
, sizeof(pidstr
), "%d", pid
);
2319 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
2320 __do_free
char *fullpath
= NULL
, *path
= NULL
;
2321 struct hierarchy
*h
= ops
->hierarchies
[i
];
2323 if (h
->fs_type
== UNIFIED_HIERARCHY
) {
2324 ret
= __cg_unified_attach(h
, conf
, name
, lxcpath
, pid
,
2332 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, h
->controllers
[0]);
2337 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, "cgroup.procs");
2338 ret
= lxc_write_to_file(fullpath
, pidstr
, len
, false, 0666);
2340 return log_error_errno(false, errno
, "Failed to attach %d to %s",
2341 (int)pid
, fullpath
);
2347 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2348 * don't have a cgroup_data set up, so we ask the running container through the
2349 * commands API for the cgroup path.
2351 __cgfsng_ops
static int cgfsng_get(struct cgroup_ops
*ops
, const char *filename
,
2352 char *value
, size_t len
, const char *name
,
2353 const char *lxcpath
)
2355 __do_free
char *path
= NULL
;
2356 __do_free
char *controller
= NULL
;
2358 struct hierarchy
*h
;
2362 return ret_set_errno(-1, ENOENT
);
2364 controller
= strdup(filename
);
2366 return ret_errno(ENOMEM
);
2368 p
= strchr(controller
, '.');
2372 path
= lxc_cmd_get_limiting_cgroup_path(name
, lxcpath
, controller
);
2377 h
= get_hierarchy(ops
, controller
);
2379 __do_free
char *fullpath
= NULL
;
2381 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, filename
);
2382 ret
= lxc_read_from_file(fullpath
, value
, len
);
2388 static int device_cgroup_parse_access(struct device_item
*device
, const char *val
)
2390 for (int count
= 0; count
< 3; count
++, val
++) {
2393 device
->access
[count
] = *val
;
2396 device
->access
[count
] = *val
;
2399 device
->access
[count
] = *val
;
2406 return ret_errno(EINVAL
);
2413 static int device_cgroup_rule_parse(struct device_item
*device
, const char *key
,
2419 if (strequal("devices.allow", key
))
2420 device
->allow
= 1; /* allow the device */
2422 device
->allow
= 0; /* deny the device */
2424 if (strequal(val
, "a")) {
2438 device
->type
= *val
;
2451 } else if (isdigit(*val
)) {
2452 memset(temp
, 0, sizeof(temp
));
2453 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2459 ret
= lxc_safe_int(temp
, &device
->major
);
2473 } else if (isdigit(*val
)) {
2474 memset(temp
, 0, sizeof(temp
));
2475 for (count
= 0; count
< sizeof(temp
) - 1; count
++) {
2481 ret
= lxc_safe_int(temp
, &device
->minor
);
2490 return device_cgroup_parse_access(device
, ++val
);
2493 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2494 * don't have a cgroup_data set up, so we ask the running container through the
2495 * commands API for the cgroup path.
2497 __cgfsng_ops
static int cgfsng_set(struct cgroup_ops
*ops
,
2498 const char *key
, const char *value
,
2499 const char *name
, const char *lxcpath
)
2501 __do_free
char *path
= NULL
;
2502 __do_free
char *controller
= NULL
;
2504 struct hierarchy
*h
;
2507 if (!ops
|| is_empty_string(key
) || is_empty_string(value
) ||
2508 is_empty_string(name
) || is_empty_string(lxcpath
))
2509 return ret_errno(EINVAL
);
2511 controller
= strdup(key
);
2513 return ret_errno(ENOMEM
);
2515 p
= strchr(controller
, '.');
2519 if (pure_unified_layout(ops
) && strequal(controller
, "devices")) {
2520 struct device_item device
= {};
2522 ret
= device_cgroup_rule_parse(&device
, key
, value
);
2524 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s",
2527 ret
= lxc_cmd_add_bpf_device_cgroup(name
, lxcpath
, &device
);
2534 path
= lxc_cmd_get_limiting_cgroup_path(name
, lxcpath
, controller
);
2539 h
= get_hierarchy(ops
, controller
);
2541 __do_free
char *fullpath
= NULL
;
2543 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, key
);
2544 ret
= lxc_write_to_file(fullpath
, value
, strlen(value
), false, 0666);
2550 /* take devices cgroup line
2552 * and convert it to a valid
2553 * type major:minor mode
2554 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2557 static int device_cgroup_rule_parse_devpath(struct device_item
*device
,
2558 const char *devpath
)
2560 __do_free
char *path
= NULL
;
2566 path
= strdup(devpath
);
2568 return ret_errno(ENOMEM
);
2571 * Read path followed by mode. Ignore any trailing text.
2572 * A ' # comment' would be legal. Technically other text is not
2573 * legal, we could check for that if we cared to.
2575 for (n_parts
= 1, p
= path
; *p
; p
++) {
2591 return ret_set_errno(-1, EINVAL
);
2595 return ret_errno(EINVAL
);
2597 if (device_cgroup_parse_access(device
, mode
) < 0)
2600 ret
= stat(path
, &sb
);
2602 return ret_set_errno(-1, errno
);
2604 mode_t m
= sb
.st_mode
& S_IFMT
;
2613 return log_error_errno(-1, EINVAL
, "Unsupported device type %i for \"%s\"", m
, path
);
2616 device
->major
= MAJOR(sb
.st_rdev
);
2617 device
->minor
= MINOR(sb
.st_rdev
);
2623 static int convert_devpath(const char *invalue
, char *dest
)
2625 struct device_item device
= {};
2628 ret
= device_cgroup_rule_parse_devpath(&device
, invalue
);
2632 ret
= strnprintf(dest
, 50, "%c %d:%d %s", device
.type
, device
.major
,
2633 device
.minor
, device
.access
);
2635 return log_error_errno(ret
, -ret
,
2636 "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2637 device
.type
, device
.major
, device
.minor
,
2643 /* Called from setup_limits - here we have the container's cgroup_data because
2644 * we created the cgroups.
2646 static int cg_legacy_set_data(struct cgroup_ops
*ops
, const char *filename
,
2647 const char *value
, bool is_cpuset
)
2649 __do_free
char *controller
= NULL
;
2651 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2652 char converted_value
[50];
2653 struct hierarchy
*h
;
2655 controller
= strdup(filename
);
2657 return ret_errno(ENOMEM
);
2659 p
= strchr(controller
, '.');
2663 if (strequal("devices.allow", filename
) && value
[0] == '/') {
2666 ret
= convert_devpath(value
, converted_value
);
2669 value
= converted_value
;
2672 h
= get_hierarchy(ops
, controller
);
2674 return log_error_errno(-ENOENT
, ENOENT
, "Failed to setup limits for the \"%s\" controller. The controller seems to be unused by \"cgfsng\" cgroup driver or not enabled on the cgroup hierarchy", controller
);
2677 int ret
= lxc_write_openat(h
->path_con
, filename
, value
, strlen(value
));
2681 return lxc_write_openat(h
->path_lim
, filename
, value
, strlen(value
));
2684 __cgfsng_ops
static bool cgfsng_setup_limits_legacy(struct cgroup_ops
*ops
,
2685 struct lxc_conf
*conf
,
2688 __do_free
struct lxc_list
*sorted_cgroup_settings
= NULL
;
2689 struct lxc_list
*cgroup_settings
= &conf
->cgroup
;
2690 struct lxc_list
*iterator
, *next
;
2691 struct lxc_cgroup
*cg
;
2695 return ret_set_errno(false, ENOENT
);
2698 return ret_set_errno(false, EINVAL
);
2700 cgroup_settings
= &conf
->cgroup
;
2701 if (lxc_list_empty(cgroup_settings
))
2704 if (!ops
->hierarchies
)
2705 return ret_set_errno(false, EINVAL
);
2707 if (pure_unified_layout(ops
))
2708 return log_warn_errno(true, EINVAL
, "Ignoring legacy cgroup limits on pure cgroup2 system");
2710 sorted_cgroup_settings
= sort_cgroup_settings(cgroup_settings
);
2711 if (!sorted_cgroup_settings
)
2714 lxc_list_for_each(iterator
, sorted_cgroup_settings
) {
2715 cg
= iterator
->elem
;
2717 if (do_devices
== strnequal("devices", cg
->subsystem
, 7)) {
2718 if (cg_legacy_set_data(ops
, cg
->subsystem
, cg
->value
, strnequal("cpuset", cg
->subsystem
, 6))) {
2719 if (do_devices
&& (errno
== EACCES
|| errno
== EPERM
)) {
2720 SYSWARN("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2723 SYSERROR("Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2726 DEBUG("Set controller \"%s\" set to \"%s\"", cg
->subsystem
, cg
->value
);
2731 INFO("Limits for the legacy cgroup hierarchies have been setup");
2733 lxc_list_for_each_safe(iterator
, sorted_cgroup_settings
, next
) {
2734 lxc_list_del(iterator
);
2742 * Some of the parsing logic comes from the original cgroup device v1
2743 * implementation in the kernel.
2745 static int bpf_device_cgroup_prepare(struct cgroup_ops
*ops
,
2746 struct lxc_conf
*conf
, const char *key
,
2749 struct device_item device_item
= {};
2752 if (strequal("devices.allow", key
) && abspath(val
))
2753 ret
= device_cgroup_rule_parse_devpath(&device_item
, val
);
2755 ret
= device_cgroup_rule_parse(&device_item
, key
, val
);
2757 return syserrno_set(EINVAL
, "Failed to parse device rule %s=%s", key
, val
);
2760 * Note that bpf_list_add_device() returns 1 if it altered the device
2761 * list and 0 if it didn't; both return values indicate success.
2762 * Only a negative return value indicates an error.
2764 ret
= bpf_list_add_device(&conf
->bpf_devices
, &device_item
);
2771 __cgfsng_ops
static bool cgfsng_setup_limits(struct cgroup_ops
*ops
,
2772 struct lxc_handler
*handler
)
2774 struct lxc_list
*cgroup_settings
, *iterator
;
2775 struct hierarchy
*h
;
2776 struct lxc_conf
*conf
;
2779 return ret_set_errno(false, ENOENT
);
2781 if (!ops
->hierarchies
)
2784 if (!ops
->container_cgroup
)
2785 return ret_set_errno(false, EINVAL
);
2787 if (!handler
|| !handler
->conf
)
2788 return ret_set_errno(false, EINVAL
);
2789 conf
= handler
->conf
;
2791 cgroup_settings
= &conf
->cgroup2
;
2792 if (lxc_list_empty(cgroup_settings
))
2795 if (!pure_unified_layout(ops
))
2796 return log_warn_errno(true, EINVAL
, "Ignoring cgroup2 limits on legacy cgroup system");
2802 lxc_list_for_each (iterator
, cgroup_settings
) {
2803 struct lxc_cgroup
*cg
= iterator
->elem
;
2806 if (strnequal("devices", cg
->subsystem
, 7))
2807 ret
= bpf_device_cgroup_prepare(ops
, conf
, cg
->subsystem
, cg
->value
);
2809 ret
= lxc_write_openat(h
->path_lim
, cg
->subsystem
, cg
->value
, strlen(cg
->value
));
2811 return log_error_errno(false, errno
, "Failed to set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2813 TRACE("Set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2816 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
2819 __cgfsng_ops
static bool cgfsng_devices_activate(struct cgroup_ops
*ops
, struct lxc_handler
*handler
)
2821 struct lxc_conf
*conf
;
2822 struct hierarchy
*unified
;
2825 return ret_set_errno(false, ENOENT
);
2827 if (!ops
->hierarchies
)
2830 if (!ops
->container_cgroup
)
2831 return ret_set_errno(false, EEXIST
);
2833 if (!handler
|| !handler
->conf
)
2834 return ret_set_errno(false, EINVAL
);
2835 conf
= handler
->conf
;
2837 unified
= ops
->unified
;
2838 if (!unified
|| !device_utility_controller(unified
) ||
2839 !unified
->path_con
||
2840 lxc_list_empty(&(conf
->bpf_devices
).device_item
))
2843 return bpf_cgroup_devices_attach(ops
, &conf
->bpf_devices
);
2846 static bool __cgfsng_delegate_controllers(struct cgroup_ops
*ops
, const char *cgroup
)
2848 __do_close
int dfd_final
= -EBADF
;
2849 __do_free
char *add_controllers
= NULL
, *copy
= NULL
;
2850 size_t full_len
= 0;
2851 struct hierarchy
*unified
;
2856 if (!ops
->hierarchies
|| !pure_unified_layout(ops
))
2859 unified
= ops
->unified
;
2860 if (!unified
->controllers
[0])
2863 /* For now we simply enable all controllers that we have detected by
2864 * creating a string like "+memory +pids +cpu +io".
2865 * TODO: In the near future we might want to support "-<controller>"
2866 * etc. but whether supporting semantics like this make sense will need
2869 for (it
= unified
->controllers
; it
&& *it
; it
++) {
2870 full_len
+= strlen(*it
) + 2;
2871 add_controllers
= must_realloc(add_controllers
, full_len
+ 1);
2873 if (unified
->controllers
[0] == *it
)
2874 add_controllers
[0] = '\0';
2876 (void)strlcat(add_controllers
, "+", full_len
+ 1);
2877 (void)strlcat(add_controllers
, *it
, full_len
+ 1);
2879 if ((it
+ 1) && *(it
+ 1))
2880 (void)strlcat(add_controllers
, " ", full_len
+ 1);
2883 copy
= strdup(cgroup
);
2888 * Placing the write to cgroup.subtree_control before the open() is
2889 * intentional because of the cgroup2 delegation model. It enforces
2890 * that leaf cgroups don't have any controllers enabled for delegation.
2892 dfd_cur
= unified
->dfd_base
;
2893 lxc_iterate_parts(cur
, copy
, "/") {
2895 * Even though we vetted the paths when we parsed the config
2896 * we're paranoid here and check that the path is neither
2897 * absolute nor walks upwards.
2900 return syserrno_set(-EINVAL
, "No absolute paths allowed");
2902 if (strnequal(cur
, "..", STRLITERALLEN("..")))
2903 return syserrno_set(-EINVAL
, "No upward walking paths allowed");
2905 ret
= lxc_writeat(dfd_cur
, "cgroup.subtree_control", add_controllers
, full_len
);
2907 return syserrno(-errno
, "Could not enable \"%s\" controllers in the unified cgroup %d", add_controllers
, dfd_cur
);
2909 TRACE("Enabled \"%s\" controllers in the unified cgroup %d", add_controllers
, dfd_cur
);
2911 dfd_final
= open_at(dfd_cur
, cur
, PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_BENEATH
, 0);
2913 return syserrno(-errno
, "Fail to open directory %d(%s)", dfd_cur
, cur
);
2914 if (dfd_cur
!= unified
->dfd_base
)
2917 * Leave dfd_final pointing to the last fd we opened so
2918 * it will be automatically zapped if we return early.
2920 dfd_cur
= dfd_final
;
2926 __cgfsng_ops
static bool cgfsng_monitor_delegate_controllers(struct cgroup_ops
*ops
)
2929 return ret_set_errno(false, ENOENT
);
2931 return __cgfsng_delegate_controllers(ops
, ops
->monitor_cgroup
);
2934 __cgfsng_ops
static bool cgfsng_payload_delegate_controllers(struct cgroup_ops
*ops
)
2937 return ret_set_errno(false, ENOENT
);
2939 return __cgfsng_delegate_controllers(ops
, ops
->container_cgroup
);
2942 static inline bool unified_cgroup(const char *line
)
2944 return *line
== '0';
2947 static inline char *current_unified_cgroup(bool relative
, char *line
)
2949 char *current_cgroup
;
2951 line
+= STRLITERALLEN("0::");
2954 return ERR_PTR(-EINVAL
);
2956 /* remove init.scope */
2958 line
= prune_init_scope(line
);
2960 /* create a relative path */
2963 current_cgroup
= strdup(line
);
2964 if (!current_cgroup
)
2965 return ERR_PTR(-ENOMEM
);
2967 return current_cgroup
;
2970 static inline const char *unprefix(const char *controllers
)
2972 if (strnequal(controllers
, "name=", STRLITERALLEN("name=")))
2973 return controllers
+ STRLITERALLEN("name=");
2977 static int __list_cgroup_delegate(char ***delegate
)
2979 __do_free
char **list
= NULL
;
2980 __do_free
char *buf
= NULL
;
2981 char *standard
[] = {
2984 "cgroup.subtree_control",
2991 buf
= read_file_at(-EBADF
, "/sys/kernel/cgroup/delegate", PROTECT_OPEN
, 0);
2993 for (char **p
= standard
; p
&& *p
; p
++) {
2994 ret
= list_add_string(&list
, *p
);
2999 *delegate
= move_ptr(list
);
3000 return syswarn(0, "Failed to read /sys/kernel/cgroup/delegate");
3003 lxc_iterate_parts(token
, buf
, " \t\n") {
3005 * We always need to chown this for both cgroup and
3008 if (strequal(token
, "cgroup.procs"))
3011 ret
= list_add_string(&list
, token
);
3016 *delegate
= move_ptr(list
);
3020 static bool unified_hierarchy_delegated(int dfd_base
, char ***ret_files
)
3022 __do_free_string_list
char **list
= NULL
;
3025 ret
= __list_cgroup_delegate(&list
);
3027 return syserrno(ret
, "Failed to determine unified cgroup delegation requirements");
3029 for (char *const *s
= list
; s
&& *s
; s
++) {
3030 if (!faccessat(dfd_base
, *s
, W_OK
, 0) || errno
== ENOENT
)
3033 return sysinfo(false, "The %s file is not writable, skipping unified hierarchy", *s
);
3036 *ret_files
= move_ptr(list
);
3040 static bool legacy_hierarchy_delegated(int dfd_base
)
3042 if (faccessat(dfd_base
, "cgroup.procs", W_OK
, 0) && errno
!= ENOENT
)
3043 return sysinfo(false, "The cgroup.procs file is not writable, skipping legacy hierarchy");
3048 static int __initialize_cgroups(struct cgroup_ops
*ops
, bool relative
,
3051 __do_free
char *cgroup_info
= NULL
;
3055 * Root spawned containers escape the current cgroup, so use init's
3056 * cgroups as our base in that case.
3058 if (!relative
&& (geteuid() == 0))
3059 cgroup_info
= read_file_at(-EBADF
, "/proc/1/cgroup", PROTECT_OPEN
, 0);
3061 cgroup_info
= read_file_at(-EBADF
, "/proc/self/cgroup", PROTECT_OPEN
, 0);
3063 return ret_errno(ENOMEM
);
3065 lxc_iterate_parts(it
, cgroup_info
, "\n") {
3066 __do_close
int dfd_base
= -EBADF
, dfd_mnt
= -EBADF
;
3067 __do_free
char *controllers
= NULL
, *current_cgroup
= NULL
;
3068 __do_free_string_list
char **controller_list
= NULL
,
3073 /* Handle the unified cgroup hierarchy. */
3075 if (unified_cgroup(line
)) {
3078 type
= UNIFIED_HIERARCHY
;
3080 current_cgroup
= current_unified_cgroup(relative
, line
);
3081 if (IS_ERR(current_cgroup
))
3082 return PTR_ERR(current_cgroup
);
3084 if (unified_cgroup_fd(ops
->dfd_mnt
)) {
3085 dfd_mnt
= dup_cloexec(ops
->dfd_mnt
);
3088 dfd_mnt
= open_at(ops
->dfd_mnt
,
3090 PROTECT_OPATH_DIRECTORY
,
3091 PROTECT_LOOKUP_ABSOLUTE_XDEV
, 0);
3092 unified_mnt
= "unified";
3095 if (errno
!= ENOENT
)
3096 return syserrno(-errno
, "Failed to open %d/unified", ops
->dfd_mnt
);
3098 SYSTRACE("Unified cgroup not mounted");
3103 if (!is_empty_string(current_cgroup
)) {
3104 dfd_base
= open_at(dfd_mnt
, current_cgroup
,
3105 PROTECT_OPATH_DIRECTORY
,
3106 PROTECT_LOOKUP_BENEATH_XDEV
, 0);
3108 return syserrno(-errno
, "Failed to open %d/%s", dfd_mnt
, current_cgroup
);
3112 if (!unified_hierarchy_delegated(dfd
, &delegate
))
3115 controller_list
= unified_controllers(dfd
, "cgroup.controllers");
3116 if (!controller_list
) {
3117 TRACE("No controllers are enabled for delegation in the unified hierarchy");
3118 controller_list
= list_new();
3119 if (!controller_list
)
3120 return syserrno(-ENOMEM
, "Failed to create empty controller list");
3123 controllers
= strdup(unified_mnt
);
3125 return ret_errno(ENOMEM
);
3127 char *__controllers
, *__current_cgroup
;
3129 type
= LEGACY_HIERARCHY
;
3131 __controllers
= strchr(line
, ':');
3133 return ret_errno(EINVAL
);
3136 __current_cgroup
= strchr(__controllers
, ':');
3137 if (!__current_cgroup
)
3138 return ret_errno(EINVAL
);
3139 *__current_cgroup
= '\0';
3142 controllers
= strdup(unprefix(__controllers
));
3144 return ret_errno(ENOMEM
);
3146 dfd_mnt
= open_at(ops
->dfd_mnt
,
3147 controllers
, PROTECT_OPATH_DIRECTORY
,
3148 PROTECT_LOOKUP_ABSOLUTE_XDEV
, 0);
3150 if (errno
!= ENOENT
)
3151 return syserrno(-errno
, "Failed to open %d/%s",
3152 ops
->dfd_mnt
, controllers
);
3154 SYSTRACE("%s not mounted", controllers
);
3159 if (!abspath(__current_cgroup
))
3160 return ret_errno(EINVAL
);
3162 /* remove init.scope */
3164 __current_cgroup
= prune_init_scope(__current_cgroup
);
3166 /* create a relative path */
3167 __current_cgroup
= deabs(__current_cgroup
);
3169 current_cgroup
= strdup(__current_cgroup
);
3170 if (!current_cgroup
)
3171 return ret_errno(ENOMEM
);
3173 if (!is_empty_string(current_cgroup
)) {
3174 dfd_base
= open_at(dfd_mnt
, current_cgroup
,
3175 PROTECT_OPATH_DIRECTORY
,
3176 PROTECT_LOOKUP_BENEATH_XDEV
, 0);
3178 return syserrno(-errno
, "Failed to open %d/%s",
3179 dfd_mnt
, current_cgroup
);
3183 if (!legacy_hierarchy_delegated(dfd
))
3187 * We intentionally pass __current_cgroup here and not
3188 * controllers because we would otherwise chop the
3191 controller_list
= list_add_controllers(__controllers
);
3192 if (!controller_list
)
3193 return syserrno(-ENOMEM
, "Failed to create controller list from %s", __controllers
);
3195 if (skip_hierarchy(ops
, controller_list
))
3198 ops
->cgroup_layout
= CGROUP_LAYOUT_LEGACY
;
3201 ret
= cgroup_hierarchy_add(ops
, dfd_mnt
, controllers
, dfd
,
3202 current_cgroup
, controller_list
, type
);
3204 return syserrno(ret
, "Failed to add %s hierarchy", controllers
);
3206 /* Transfer ownership. */
3209 move_ptr(current_cgroup
);
3210 move_ptr(controllers
);
3211 move_ptr(controller_list
);
3212 if (type
== UNIFIED_HIERARCHY
)
3213 ops
->unified
->delegate
= move_ptr(delegate
);
3216 /* determine cgroup layout */
3218 if (ops
->cgroup_layout
== CGROUP_LAYOUT_LEGACY
) {
3219 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
3221 if (bpf_devices_cgroup_supported())
3222 ops
->unified
->utilities
|= DEVICES_CONTROLLER
;
3223 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
3227 if (!controllers_available(ops
))
3228 return syserrno_set(-ENOENT
, "One or more requested controllers unavailable or not delegated");
3233 static int initialize_cgroups(struct cgroup_ops
*ops
, struct lxc_conf
*conf
)
3235 __do_close
int dfd
= -EBADF
;
3237 const char *controllers_use
;
3239 if (ops
->dfd_mnt
>= 0)
3240 return ret_errno(EBUSY
);
3243 * I don't see the need for allowing symlinks here. If users want to
3244 * have their hierarchy available in different locations I strongly
3245 * suggest bind-mounts.
3247 dfd
= open_at(-EBADF
, DEFAULT_CGROUP_MOUNTPOINT
,
3248 PROTECT_OPATH_DIRECTORY
, PROTECT_LOOKUP_ABSOLUTE_XDEV
, 0);
3250 return syserrno(-errno
, "Failed to open " DEFAULT_CGROUP_MOUNTPOINT
);
3252 controllers_use
= lxc_global_config_value("lxc.cgroup.use");
3253 if (controllers_use
) {
3254 __do_free
char *dup
= NULL
;
3257 dup
= strdup(controllers_use
);
3261 lxc_iterate_parts(it
, dup
, ",") {
3262 ret
= list_add_string(&ops
->cgroup_use
, it
);
3269 * Keep dfd referenced by the cleanup function and actually move the fd
3270 * once we know the initialization succeeded. So if we fail we clean up
3275 ret
= __initialize_cgroups(ops
, conf
->cgroup_meta
.relative
, !lxc_list_empty(&conf
->id_map
));
3277 return syserrno(ret
, "Failed to initialize cgroups");
3279 /* Transfer ownership to cgroup_ops. */
3284 __cgfsng_ops
static int cgfsng_data_init(struct cgroup_ops
*ops
)
3286 const char *cgroup_pattern
;
3289 return ret_set_errno(-1, ENOENT
);
3291 /* copy system-wide cgroup information */
3292 cgroup_pattern
= lxc_global_config_value("lxc.cgroup.pattern");
3293 if (cgroup_pattern
&& !strequal(cgroup_pattern
, "")) {
3294 ops
->cgroup_pattern
= strdup(cgroup_pattern
);
3295 if (!ops
->cgroup_pattern
)
3296 return ret_errno(ENOMEM
);
3302 struct cgroup_ops
*cgroup_ops_init(struct lxc_conf
*conf
)
3304 __do_free
struct cgroup_ops
*cgfsng_ops
= NULL
;
3306 cgfsng_ops
= zalloc(sizeof(struct cgroup_ops
));
3308 return ret_set_errno(NULL
, ENOMEM
);
3310 cgfsng_ops
->cgroup_layout
= CGROUP_LAYOUT_UNKNOWN
;
3311 cgfsng_ops
->dfd_mnt
= -EBADF
;
3313 if (initialize_cgroups(cgfsng_ops
, conf
))
3316 cgfsng_ops
->data_init
= cgfsng_data_init
;
3317 cgfsng_ops
->payload_destroy
= cgfsng_payload_destroy
;
3318 cgfsng_ops
->monitor_destroy
= cgfsng_monitor_destroy
;
3319 cgfsng_ops
->monitor_create
= cgfsng_monitor_create
;
3320 cgfsng_ops
->monitor_enter
= cgfsng_monitor_enter
;
3321 cgfsng_ops
->monitor_delegate_controllers
= cgfsng_monitor_delegate_controllers
;
3322 cgfsng_ops
->payload_delegate_controllers
= cgfsng_payload_delegate_controllers
;
3323 cgfsng_ops
->payload_create
= cgfsng_payload_create
;
3324 cgfsng_ops
->payload_enter
= cgfsng_payload_enter
;
3325 cgfsng_ops
->finalize
= cgfsng_finalize
;
3326 cgfsng_ops
->get_cgroup
= cgfsng_get_cgroup
;
3327 cgfsng_ops
->get
= cgfsng_get
;
3328 cgfsng_ops
->set
= cgfsng_set
;
3329 cgfsng_ops
->freeze
= cgfsng_freeze
;
3330 cgfsng_ops
->unfreeze
= cgfsng_unfreeze
;
3331 cgfsng_ops
->setup_limits_legacy
= cgfsng_setup_limits_legacy
;
3332 cgfsng_ops
->setup_limits
= cgfsng_setup_limits
;
3333 cgfsng_ops
->driver
= "cgfsng";
3334 cgfsng_ops
->version
= "1.0.0";
3335 cgfsng_ops
->attach
= cgfsng_attach
;
3336 cgfsng_ops
->chown
= cgfsng_chown
;
3337 cgfsng_ops
->mount
= cgfsng_mount
;
3338 cgfsng_ops
->devices_activate
= cgfsng_devices_activate
;
3339 cgfsng_ops
->get_limiting_cgroup
= cgfsng_get_limiting_cgroup
;
3341 cgfsng_ops
->criu_escape
= cgfsng_criu_escape
;
3342 cgfsng_ops
->criu_num_hierarchies
= cgfsng_criu_num_hierarchies
;
3343 cgfsng_ops
->criu_get_hierarchies
= cgfsng_criu_get_hierarchies
;
3345 return move_ptr(cgfsng_ops
);
3348 int cgroup_attach(const struct lxc_conf
*conf
, const char *name
,
3349 const char *lxcpath
, pid_t pid
)
3351 __do_close
int unified_fd
= -EBADF
;
3354 if (!conf
|| is_empty_string(name
) || is_empty_string(lxcpath
) || pid
<= 0)
3355 return ret_errno(EINVAL
);
3357 unified_fd
= lxc_cmd_get_cgroup2_fd(name
, lxcpath
);
3359 return ret_errno(ENOCGROUP2
);
3361 if (!lxc_list_empty(&conf
->id_map
)) {
3362 struct userns_exec_unified_attach_data args
= {
3364 .unified_fd
= unified_fd
,
3368 ret
= socketpair(PF_LOCAL
, SOCK_STREAM
| SOCK_CLOEXEC
, 0, args
.sk_pair
);
3372 ret
= userns_exec_minimal(conf
,
3373 cgroup_unified_attach_parent_wrapper
,
3375 cgroup_unified_attach_child_wrapper
,
3378 ret
= cgroup_attach_leaf(conf
, unified_fd
, pid
);
3384 /* Connects to command socket therefore isn't callable from command handler. */
3385 int cgroup_get(const char *name
, const char *lxcpath
,
3386 const char *filename
, char *buf
, size_t len
)
3388 __do_close
int unified_fd
= -EBADF
;
3391 if (is_empty_string(filename
) || is_empty_string(name
) ||
3392 is_empty_string(lxcpath
))
3393 return ret_errno(EINVAL
);
3395 if ((buf
&& !len
) || (len
&& !buf
))
3396 return ret_errno(EINVAL
);
3398 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3400 return ret_errno(ENOCGROUP2
);
3402 ret
= lxc_read_try_buf_at(unified_fd
, filename
, buf
, len
);
3404 SYSERROR("Failed to read cgroup value");
3409 /* Connects to command socket therefore isn't callable from command handler. */
3410 int cgroup_set(const char *name
, const char *lxcpath
,
3411 const char *filename
, const char *value
)
3413 __do_close
int unified_fd
= -EBADF
;
3416 if (is_empty_string(filename
) || is_empty_string(value
) ||
3417 is_empty_string(name
) || is_empty_string(lxcpath
))
3418 return ret_errno(EINVAL
);
3420 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3422 return ret_errno(ENOCGROUP2
);
3424 if (strnequal(filename
, "devices.", STRLITERALLEN("devices."))) {
3425 struct device_item device
= {};
3427 ret
= device_cgroup_rule_parse(&device
, filename
, value
);
3429 return log_error_errno(-1, EINVAL
, "Failed to parse device string %s=%s", filename
, value
);
3431 ret
= lxc_cmd_add_bpf_device_cgroup(name
, lxcpath
, &device
);
3433 ret
= lxc_writeat(unified_fd
, filename
, value
, strlen(value
));
3439 static int do_cgroup_freeze(int unified_fd
,
3440 const char *state_string
,
3443 const char *epoll_error
,
3444 const char *wait_error
)
3446 __do_close
int events_fd
= -EBADF
;
3447 call_cleaner(lxc_mainloop_close
) struct lxc_epoll_descr
*descr_ptr
= NULL
;
3449 struct lxc_epoll_descr descr
= {};
3452 ret
= lxc_mainloop_open(&descr
);
3454 return log_error_errno(-1, errno
, "%s", epoll_error
);
3456 /* automatically cleaned up now */
3459 events_fd
= open_at(unified_fd
, "cgroup.events", PROTECT_OPEN
, PROTECT_LOOKUP_BENEATH
, 0);
3461 return log_error_errno(-errno
, errno
, "Failed to open cgroup.events file");
3463 ret
= lxc_mainloop_add_handler_events(&descr
, events_fd
, EPOLLPRI
, freezer_cgroup_events_cb
, INT_TO_PTR(state_num
));
3465 return log_error_errno(-1, errno
, "Failed to add cgroup.events fd handler to mainloop");
3468 ret
= lxc_writeat(unified_fd
, "cgroup.freeze", state_string
, 1);
3470 return log_error_errno(-1, errno
, "Failed to open cgroup.freeze file");
3473 ret
= lxc_mainloop(&descr
, timeout
);
3475 return log_error_errno(-1, errno
, "%s", wait_error
);
3478 return log_trace(0, "Container now %s", (state_num
== 1) ? "frozen" : "unfrozen");
3481 static inline int __cgroup_freeze(int unified_fd
, int timeout
)
3483 return do_cgroup_freeze(unified_fd
, "1", 1, timeout
,
3484 "Failed to create epoll instance to wait for container freeze",
3485 "Failed to wait for container to be frozen");
3488 int cgroup_freeze(const char *name
, const char *lxcpath
, int timeout
)
3490 __do_close
int unified_fd
= -EBADF
;
3493 if (is_empty_string(name
) || is_empty_string(lxcpath
))
3494 return ret_errno(EINVAL
);
3496 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3498 return ret_errno(ENOCGROUP2
);
3500 lxc_cmd_notify_state_listeners(name
, lxcpath
, FREEZING
);
3501 ret
= __cgroup_freeze(unified_fd
, timeout
);
3502 lxc_cmd_notify_state_listeners(name
, lxcpath
, !ret
? FROZEN
: RUNNING
);
3506 int __cgroup_unfreeze(int unified_fd
, int timeout
)
3508 return do_cgroup_freeze(unified_fd
, "0", 0, timeout
,
3509 "Failed to create epoll instance to wait for container freeze",
3510 "Failed to wait for container to be frozen");
3513 int cgroup_unfreeze(const char *name
, const char *lxcpath
, int timeout
)
3515 __do_close
int unified_fd
= -EBADF
;
3518 if (is_empty_string(name
) || is_empty_string(lxcpath
))
3519 return ret_errno(EINVAL
);
3521 unified_fd
= lxc_cmd_get_limiting_cgroup2_fd(name
, lxcpath
);
3523 return ret_errno(ENOCGROUP2
);
3525 lxc_cmd_notify_state_listeners(name
, lxcpath
, THAWED
);
3526 ret
= __cgroup_unfreeze(unified_fd
, timeout
);
3527 lxc_cmd_notify_state_listeners(name
, lxcpath
, !ret
? RUNNING
: FROZEN
);