2 * lxc: linux Container library
4 * Copyright © 2016 Canonical Ltd.
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comma-separated list of controllers.
44 #include <linux/kdev_t.h>
45 #include <linux/types.h>
50 #include <sys/types.h>
55 #include "cgroup_utils.h"
61 #include "memory_utils.h"
62 #include "storage/storage.h"
66 #include "include/strlcpy.h"
70 #include "include/strlcat.h"
73 lxc_log_define(cgfsng
, cgroup
);
75 static void free_string_list(char **clist
)
82 for (i
= 0; clist
[i
]; i
++)
88 /* Given a pointer to a null-terminated array of pointers, realloc to add one
89 * entry, and point the new entry to NULL. Do not fail. Return the index to the
90 * second-to-last entry - that is, the one which is now available for use
91 * (keeping the list null-terminated).
93 static int append_null_to_list(void ***list
)
98 for (; (*list
)[newentry
]; newentry
++)
101 *list
= must_realloc(*list
, (newentry
+ 2) * sizeof(void **));
102 (*list
)[newentry
+ 1] = NULL
;
106 /* Given a null-terminated array of strings, check whether @entry is one of the
109 static bool string_in_list(char **list
, const char *entry
)
116 for (i
= 0; list
[i
]; i
++)
117 if (strcmp(list
[i
], entry
) == 0)
123 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
124 * "name=systemd". Do not fail.
126 static char *cg_legacy_must_prefix_named(char *entry
)
132 prefixed
= must_realloc(NULL
, len
+ 6);
134 memcpy(prefixed
, "name=", STRLITERALLEN("name="));
135 memcpy(prefixed
+ STRLITERALLEN("name="), entry
, len
);
136 prefixed
[len
+ 5] = '\0';
141 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
144 * We also handle named subsystems here. Any controller which is not a kernel
145 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
146 * we refuse to use because we're not sure which we have here.
147 * (TODO: We could work around this in some cases by just remounting to be
148 * unambiguous, or by comparing mountpoint contents with current cgroup.)
150 * The last entry will always be NULL.
152 static void must_append_controller(char **klist
, char **nlist
, char ***clist
,
158 if (string_in_list(klist
, entry
) && string_in_list(nlist
, entry
)) {
159 ERROR("Refusing to use ambiguous controller \"%s\"", entry
);
160 ERROR("It is both a named and kernel subsystem");
164 newentry
= append_null_to_list((void ***)clist
);
166 if (strncmp(entry
, "name=", 5) == 0)
167 copy
= must_copy_string(entry
);
168 else if (string_in_list(klist
, entry
))
169 copy
= must_copy_string(entry
);
171 copy
= cg_legacy_must_prefix_named(entry
);
173 (*clist
)[newentry
] = copy
;
176 /* Given a handler's cgroup data, return the struct hierarchy for the controller
177 * @c, or NULL if there is none.
179 struct hierarchy
*get_hierarchy(struct cgroup_ops
*ops
, const char *controller
)
185 if (!ops
->hierarchies
) {
186 TRACE("There are no useable cgroup controllers");
190 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
192 /* This is the empty unified hierarchy. */
193 if (ops
->hierarchies
[i
]->controllers
&&
194 !ops
->hierarchies
[i
]->controllers
[0])
195 return ops
->hierarchies
[i
];
200 if (string_in_list(ops
->hierarchies
[i
]->controllers
, controller
))
201 return ops
->hierarchies
[i
];
205 WARN("There is no useable %s controller", controller
);
207 WARN("There is no empty unified cgroup hierarchy");
212 #define BATCH_SIZE 50
213 static void batch_realloc(char **mem
, size_t oldlen
, size_t newlen
)
215 int newbatches
= (newlen
/ BATCH_SIZE
) + 1;
216 int oldbatches
= (oldlen
/ BATCH_SIZE
) + 1;
218 if (!*mem
|| newbatches
> oldbatches
) {
219 *mem
= must_realloc(*mem
, newbatches
* BATCH_SIZE
);
223 static void append_line(char **dest
, size_t oldlen
, char *new, size_t newlen
)
225 size_t full
= oldlen
+ newlen
;
227 batch_realloc(dest
, oldlen
, full
+ 1);
229 memcpy(*dest
+ oldlen
, new, newlen
+ 1);
232 /* Slurp in a whole file */
233 static char *read_file(const char *fnam
)
235 __do_free
char *line
= NULL
;
236 __do_fclose
FILE *f
= NULL
;
239 size_t len
= 0, fulllen
= 0;
241 f
= fopen(fnam
, "r");
244 while ((linelen
= getline(&line
, &len
, f
)) != -1) {
245 append_line(&buf
, fulllen
, line
, linelen
);
251 /* Taken over modified from the kernel sources. */
252 #define NBITS 32 /* bits in uint32_t */
253 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
254 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
256 static void set_bit(unsigned bit
, uint32_t *bitarr
)
258 bitarr
[bit
/ NBITS
] |= (1 << (bit
% NBITS
));
261 static void clear_bit(unsigned bit
, uint32_t *bitarr
)
263 bitarr
[bit
/ NBITS
] &= ~(1 << (bit
% NBITS
));
266 static bool is_set(unsigned bit
, uint32_t *bitarr
)
268 return (bitarr
[bit
/ NBITS
] & (1 << (bit
% NBITS
))) != 0;
271 /* Create cpumask from cpulist aka turn:
279 static uint32_t *lxc_cpumask(char *buf
, size_t nbits
)
285 arrlen
= BITS_TO_LONGS(nbits
);
286 bitarr
= calloc(arrlen
, sizeof(uint32_t));
290 lxc_iterate_parts(token
, buf
, ",") {
295 start
= strtoul(token
, NULL
, 0);
297 range
= strchr(token
, '-');
299 end
= strtoul(range
+ 1, NULL
, 0);
301 if (!(start
<= end
)) {
312 set_bit(start
++, bitarr
);
318 /* Turn cpumask into simple, comma-separated cpulist. */
319 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr
, size_t nbits
)
323 char **cpulist
= NULL
;
324 char numstr
[INTTYPE_TO_STRLEN(size_t)] = {0};
326 for (i
= 0; i
<= nbits
; i
++) {
327 if (!is_set(i
, bitarr
))
330 ret
= snprintf(numstr
, sizeof(numstr
), "%zu", i
);
331 if (ret
< 0 || (size_t)ret
>= sizeof(numstr
)) {
332 lxc_free_array((void **)cpulist
, free
);
336 ret
= lxc_append_string(&cpulist
, numstr
);
338 lxc_free_array((void **)cpulist
, free
);
346 return lxc_string_join(",", (const char **)cpulist
, false);
349 static ssize_t
get_max_cpus(char *cpulist
)
352 char *maxcpus
= cpulist
;
355 c1
= strrchr(maxcpus
, ',');
359 c2
= strrchr(maxcpus
, '-');
373 cpus
= strtoul(c1
, NULL
, 0);
380 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
381 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
382 static bool cg_legacy_filter_and_set_cpus(char *path
, bool am_initialized
)
384 __do_free
char *cpulist
= NULL
, *fpath
= NULL
, *isolcpus
= NULL
,
385 *offlinecpus
= NULL
, *posscpus
= NULL
;
386 __do_free
uint32_t *isolmask
= NULL
, *offlinemask
= NULL
,
392 ssize_t maxisol
= 0, maxoffline
= 0, maxposs
= 0;
393 bool bret
= false, flipped_bit
= false;
395 lastslash
= strrchr(path
, '/');
397 ERROR("Failed to detect \"/\" in \"%s\"", path
);
402 fpath
= must_make_path(path
, "cpuset.cpus", NULL
);
403 posscpus
= read_file(fpath
);
405 SYSERROR("Failed to read file \"%s\"", fpath
);
409 /* Get maximum number of cpus found in possible cpuset. */
410 maxposs
= get_max_cpus(posscpus
);
411 if (maxposs
< 0 || maxposs
>= INT_MAX
- 1)
414 if (file_exists(__ISOL_CPUS
)) {
415 isolcpus
= read_file(__ISOL_CPUS
);
417 SYSERROR("Failed to read file \"%s\"", __ISOL_CPUS
);
421 if (isdigit(isolcpus
[0])) {
422 /* Get maximum number of cpus found in isolated cpuset. */
423 maxisol
= get_max_cpus(isolcpus
);
424 if (maxisol
< 0 || maxisol
>= INT_MAX
- 1)
428 if (maxposs
< maxisol
)
432 TRACE("The path \""__ISOL_CPUS
"\" to read isolated cpus from does not exist");
435 if (file_exists(__OFFLINE_CPUS
)) {
436 offlinecpus
= read_file(__OFFLINE_CPUS
);
438 SYSERROR("Failed to read file \"%s\"", __OFFLINE_CPUS
);
442 if (isdigit(offlinecpus
[0])) {
443 /* Get maximum number of cpus found in offline cpuset. */
444 maxoffline
= get_max_cpus(offlinecpus
);
445 if (maxoffline
< 0 || maxoffline
>= INT_MAX
- 1)
449 if (maxposs
< maxoffline
)
450 maxposs
= maxoffline
;
453 TRACE("The path \""__OFFLINE_CPUS
"\" to read offline cpus from does not exist");
456 if ((maxisol
== 0) && (maxoffline
== 0))
459 possmask
= lxc_cpumask(posscpus
, maxposs
);
461 ERROR("Failed to create cpumask for possible cpus");
466 isolmask
= lxc_cpumask(isolcpus
, maxposs
);
468 ERROR("Failed to create cpumask for isolated cpus");
473 if (maxoffline
> 0) {
474 offlinemask
= lxc_cpumask(offlinecpus
, maxposs
);
476 ERROR("Failed to create cpumask for offline cpus");
481 for (i
= 0; i
<= maxposs
; i
++) {
482 if ((isolmask
&& !is_set(i
, isolmask
)) ||
483 (offlinemask
&& !is_set(i
, offlinemask
)) ||
484 !is_set(i
, possmask
))
488 clear_bit(i
, possmask
);
492 DEBUG("No isolated or offline cpus present in cpuset");
495 DEBUG("Removed isolated or offline cpus from cpuset");
497 cpulist
= lxc_cpumask_to_cpulist(possmask
, maxposs
);
499 ERROR("Failed to create cpu list");
504 if (!am_initialized
) {
506 fpath
= must_make_path(path
, "cpuset.cpus", NULL
);
507 ret
= lxc_write_to_file(fpath
, cpulist
, strlen(cpulist
), false,
509 if (cpulist
== posscpus
)
512 SYSERROR("Failed to write cpu list to \"%s\"", fpath
);
516 TRACE("Copied cpu settings of parent cgroup");
522 /* Copy contents of parent(@path)/@file to @path/@file */
523 static bool copy_parent_file(char *path
, char *file
)
525 __do_free
char *child_path
= NULL
, *parent_path
= NULL
, *value
= NULL
;
529 char *lastslash
= NULL
;
531 lastslash
= strrchr(path
, '/');
533 ERROR("Failed to detect \"/\" in \"%s\"", path
);
538 parent_path
= must_make_path(path
, file
, NULL
);
539 len
= lxc_read_from_file(parent_path
, NULL
, 0);
541 SYSERROR("Failed to determine buffer size");
545 value
= must_realloc(NULL
, len
+ 1);
546 ret
= lxc_read_from_file(parent_path
, value
, len
);
548 SYSERROR("Failed to read from parent file \"%s\"", parent_path
);
553 child_path
= must_make_path(path
, file
, NULL
);
554 ret
= lxc_write_to_file(child_path
, value
, len
, false, 0666);
556 SYSERROR("Failed to write \"%s\" to file \"%s\"", value
, child_path
);
560 /* Initialize the cpuset hierarchy in first directory of @gname and set
561 * cgroup.clone_children so that children inherit settings. Since the
562 * h->base_path is populated by init or ourselves, we know it is already
565 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy
*h
, char *cgname
)
567 __do_free
char *cgpath
= NULL
, *clonechildrenpath
= NULL
;
572 if (!string_in_list(h
->controllers
, "cpuset"))
577 slash
= strchr(cgname
, '/');
581 cgpath
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgname
, NULL
);
585 ret
= mkdir(cgpath
, 0755);
587 if (errno
!= EEXIST
) {
588 SYSERROR("Failed to create directory \"%s\"", cgpath
);
593 clonechildrenpath
= must_make_path(cgpath
, "cgroup.clone_children", NULL
);
594 /* unified hierarchy doesn't have clone_children */
595 if (!file_exists(clonechildrenpath
))
598 ret
= lxc_read_from_file(clonechildrenpath
, &v
, 1);
600 SYSERROR("Failed to read file \"%s\"", clonechildrenpath
);
604 /* Make sure any isolated cpus are removed from cpuset.cpus. */
605 if (!cg_legacy_filter_and_set_cpus(cgpath
, v
== '1')) {
606 SYSERROR("Failed to remove isolated cpus");
610 /* Already set for us by someone else. */
612 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
616 /* copy parent's settings */
617 if (!copy_parent_file(cgpath
, "cpuset.mems")) {
618 SYSERROR("Failed to copy \"cpuset.mems\" settings");
622 ret
= lxc_write_to_file(clonechildrenpath
, "1", 1, false, 0666);
624 /* Set clone_children so children inherit our settings */
625 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath
);
632 /* Given two null-terminated lists of strings, return true if any string is in
635 static bool controller_lists_intersect(char **l1
, char **l2
)
642 for (i
= 0; l1
[i
]; i
++) {
643 if (string_in_list(l2
, l1
[i
]))
650 /* For a null-terminated list of controllers @clist, return true if any of those
651 * controllers is already listed the null-terminated list of hierarchies @hlist.
652 * Realistically, if one is present, all must be present.
654 static bool controller_list_is_dup(struct hierarchy
**hlist
, char **clist
)
661 for (i
= 0; hlist
[i
]; i
++)
662 if (controller_lists_intersect(hlist
[i
]->controllers
, clist
))
668 /* Return true if the controller @entry is found in the null-terminated list of
669 * hierarchies @hlist.
671 static bool controller_found(struct hierarchy
**hlist
, char *entry
)
678 for (i
= 0; hlist
[i
]; i
++)
679 if (string_in_list(hlist
[i
]->controllers
, entry
))
685 /* Return true if all of the controllers which we require have been found. The
686 * required list is freezer and anything in lxc.cgroup.use.
688 static bool all_controllers_found(struct cgroup_ops
*ops
)
691 struct hierarchy
**hlist
= ops
->hierarchies
;
693 if (!ops
->cgroup_use
)
696 for (cur
= ops
->cgroup_use
; cur
&& *cur
; cur
++)
697 if (!controller_found(hlist
, *cur
)) {
698 ERROR("No %s controller mountpoint found", *cur
);
705 /* Get the controllers from a mountinfo line There are other ways we could get
706 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
707 * could parse the mount options. But we simply assume that the mountpoint must
708 * be /sys/fs/cgroup/controller-list
710 static char **cg_hybrid_get_controllers(char **klist
, char **nlist
, char *line
,
713 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
714 * for legacy hierarchies.
718 char *p
= line
, *sep
= ",";
721 for (i
= 0; i
< 4; i
++) {
728 /* Note, if we change how mountinfo works, then our caller will need to
729 * verify /sys/fs/cgroup/ in this field.
731 if (strncmp(p
, "/sys/fs/cgroup/", 15) != 0) {
732 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p
);
739 ERROR("Corrupt mountinfo");
744 if (type
== CGROUP_SUPER_MAGIC
) {
745 __do_free
char *dup
= NULL
;
747 /* strdup() here for v1 hierarchies. Otherwise
748 * lxc_iterate_parts() will destroy mountpoints such as
749 * "/sys/fs/cgroup/cpu,cpuacct".
751 dup
= must_copy_string(p
);
755 lxc_iterate_parts (tok
, dup
, sep
)
756 must_append_controller(klist
, nlist
, &aret
, tok
);
763 static char **cg_unified_make_empty_controller(void)
768 newentry
= append_null_to_list((void ***)&aret
);
769 aret
[newentry
] = NULL
;
773 static char **cg_unified_get_controllers(const char *file
)
775 __do_free
char *buf
= NULL
;
780 buf
= read_file(file
);
784 lxc_iterate_parts(tok
, buf
, sep
) {
788 newentry
= append_null_to_list((void ***)&aret
);
789 copy
= must_copy_string(tok
);
790 aret
[newentry
] = copy
;
796 static struct hierarchy
*add_hierarchy(struct hierarchy
***h
, char **clist
, char *mountpoint
,
797 char *container_base_path
, int type
)
799 struct hierarchy
*new;
802 new = must_realloc(NULL
, sizeof(*new));
803 new->controllers
= clist
;
804 new->mountpoint
= mountpoint
;
805 new->container_base_path
= container_base_path
;
806 new->container_full_path
= NULL
;
807 new->monitor_full_path
= NULL
;
809 new->cgroup2_chown
= NULL
;
811 newentry
= append_null_to_list((void ***)h
);
812 (*h
)[newentry
] = new;
816 /* Get a copy of the mountpoint from @line, which is a line from
817 * /proc/self/mountinfo.
819 static char *cg_hybrid_get_mountpoint(char *line
)
824 char *p
= line
, *sret
= NULL
;
826 for (i
= 0; i
< 4; i
++) {
833 if (strncmp(p
, "/sys/fs/cgroup/", 15) != 0)
836 p2
= strchr(p
+ 15, ' ');
842 sret
= must_realloc(NULL
, len
+ 1);
843 memcpy(sret
, p
, len
);
848 /* Given a multi-line string, return a null-terminated copy of the current line. */
849 static char *copy_to_eol(char *p
)
851 char *p2
= strchr(p
, '\n'), *sret
;
858 sret
= must_realloc(NULL
, len
+ 1);
859 memcpy(sret
, p
, len
);
864 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
865 * /proc/self/cgroup file. Check whether controller c is present.
867 static bool controller_in_clist(char *cgline
, char *c
)
869 __do_free
char *tmp
= NULL
;
873 eol
= strchr(cgline
, ':');
878 tmp
= must_realloc(NULL
, len
+ 1);
879 memcpy(tmp
, cgline
, len
);
882 lxc_iterate_parts(tok
, tmp
, ",")
883 if (strcmp(tok
, c
) == 0)
889 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
892 static char *cg_hybrid_get_current_cgroup(char *basecginfo
, char *controller
,
895 char *p
= basecginfo
;
898 bool is_cgv2_base_cgroup
= false;
900 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
901 if ((type
== CGROUP2_SUPER_MAGIC
) && (*p
== '0'))
902 is_cgv2_base_cgroup
= true;
909 if (is_cgv2_base_cgroup
|| (controller
&& controller_in_clist(p
, controller
))) {
914 return copy_to_eol(p
);
924 static void must_append_string(char ***list
, char *entry
)
929 newentry
= append_null_to_list((void ***)list
);
930 copy
= must_copy_string(entry
);
931 (*list
)[newentry
] = copy
;
934 static int get_existing_subsystems(char ***klist
, char ***nlist
)
936 __do_free
char *line
= NULL
;
937 __do_fclose
FILE *f
= NULL
;
940 f
= fopen("/proc/self/cgroup", "r");
944 while (getline(&line
, &len
, f
) != -1) {
946 p
= strchr(line
, ':');
955 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
956 * contains an entry of the form:
960 * In this case we use "cgroup2" as controller name.
963 must_append_string(klist
, "cgroup2");
967 lxc_iterate_parts(tok
, p
, ",") {
968 if (strncmp(tok
, "name=", 5) == 0)
969 must_append_string(nlist
, tok
);
971 must_append_string(klist
, tok
);
978 static void trim(char *s
)
983 while ((len
> 1) && (s
[len
- 1] == '\n'))
987 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops
*ops
)
990 struct hierarchy
**it
;
992 if (!ops
->hierarchies
) {
993 TRACE(" No hierarchies found");
997 TRACE(" Hierarchies:");
998 for (i
= 0, it
= ops
->hierarchies
; it
&& *it
; it
++, i
++) {
1002 TRACE(" %d: base_cgroup: %s", i
, (*it
)->container_base_path
? (*it
)->container_base_path
: "(null)");
1003 TRACE(" mountpoint: %s", (*it
)->mountpoint
? (*it
)->mountpoint
: "(null)");
1004 TRACE(" controllers:");
1005 for (j
= 0, cit
= (*it
)->controllers
; cit
&& *cit
; cit
++, j
++)
1006 TRACE(" %d: %s", j
, *cit
);
1010 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo
, char **klist
,
1016 TRACE("basecginfo is:");
1017 TRACE("%s", basecginfo
);
1019 for (k
= 0, it
= klist
; it
&& *it
; it
++, k
++)
1020 TRACE("kernel subsystem %d: %s", k
, *it
);
1022 for (k
= 0, it
= nlist
; it
&& *it
; it
++, k
++)
1023 TRACE("named subsystem %d: %s", k
, *it
);
1026 static int cgroup_rmdir(struct hierarchy
**hierarchies
,
1027 const char *container_cgroup
)
1031 if (!container_cgroup
|| !hierarchies
)
1034 for (i
= 0; hierarchies
[i
]; i
++) {
1036 struct hierarchy
*h
= hierarchies
[i
];
1038 if (!h
->container_full_path
)
1041 ret
= recursive_destroy(h
->container_full_path
);
1043 WARN("Failed to destroy \"%s\"", h
->container_full_path
);
1045 free(h
->container_full_path
);
1046 h
->container_full_path
= NULL
;
1052 struct generic_userns_exec_data
{
1053 struct hierarchy
**hierarchies
;
1054 const char *container_cgroup
;
1055 struct lxc_conf
*conf
;
1056 uid_t origuid
; /* target uid in parent namespace */
1060 static int cgroup_rmdir_wrapper(void *data
)
1063 struct generic_userns_exec_data
*arg
= data
;
1064 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1065 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1067 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1069 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid
,
1070 (int)nsgid
, (int)nsgid
);
1074 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1076 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid
,
1077 (int)nsuid
, (int)nsuid
);
1081 ret
= setgroups(0, NULL
);
1082 if (ret
< 0 && errno
!= EPERM
) {
1083 SYSERROR("Failed to setgroups(0, NULL)");
1087 return cgroup_rmdir(arg
->hierarchies
, arg
->container_cgroup
);
1090 __cgfsng_ops
static void cgfsng_payload_destroy(struct cgroup_ops
*ops
,
1091 struct lxc_handler
*handler
)
1094 struct generic_userns_exec_data wrap
;
1096 if (!ops
->hierarchies
)
1100 wrap
.container_cgroup
= ops
->container_cgroup
;
1101 wrap
.hierarchies
= ops
->hierarchies
;
1102 wrap
.conf
= handler
->conf
;
1104 if (handler
->conf
&& !lxc_list_empty(&handler
->conf
->id_map
))
1105 ret
= userns_exec_1(handler
->conf
, cgroup_rmdir_wrapper
, &wrap
,
1106 "cgroup_rmdir_wrapper");
1108 ret
= cgroup_rmdir(ops
->hierarchies
, ops
->container_cgroup
);
1110 WARN("Failed to destroy cgroups");
1115 __cgfsng_ops
static void cgfsng_monitor_destroy(struct cgroup_ops
*ops
,
1116 struct lxc_handler
*handler
)
1119 struct lxc_conf
*conf
= handler
->conf
;
1120 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1122 if (!ops
->hierarchies
)
1125 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", handler
->monitor_pid
);
1126 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
1129 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1130 __do_free
char *pivot_path
= NULL
;
1133 char pivot_cgroup
[] = PIVOT_CGROUP
;
1134 struct hierarchy
*h
= ops
->hierarchies
[i
];
1136 if (!h
->monitor_full_path
)
1139 if (conf
&& conf
->cgroup_meta
.dir
)
1140 pivot_path
= must_make_path(h
->mountpoint
,
1141 h
->container_base_path
,
1142 conf
->cgroup_meta
.dir
,
1144 "cgroup.procs", NULL
);
1146 pivot_path
= must_make_path(h
->mountpoint
,
1147 h
->container_base_path
,
1149 "cgroup.procs", NULL
);
1151 chop
= strrchr(pivot_path
, '/');
1156 * Make sure not to pass in the ro string literal PIVOT_CGROUP
1159 if (!cg_legacy_handle_cpuset_hierarchy(h
, pivot_cgroup
)) {
1160 WARN("Failed to handle legacy cpuset controller");
1164 ret
= mkdir_p(pivot_path
, 0755);
1165 if (ret
< 0 && errno
!= EEXIST
) {
1166 SYSWARN("Failed to create cgroup \"%s\"\n", pivot_path
);
1173 /* Move ourselves into the pivot cgroup to delete our own
1176 ret
= lxc_write_to_file(pivot_path
, pidstr
, len
, false, 0666);
1178 SYSWARN("Failed to move monitor %s to \"%s\"\n", pidstr
, pivot_path
);
1182 ret
= recursive_destroy(h
->monitor_full_path
);
1184 WARN("Failed to destroy \"%s\"", h
->monitor_full_path
);
1188 static bool cg_unified_create_cgroup(struct hierarchy
*h
, char *cgname
)
1190 __do_free
char *add_controllers
= NULL
, *cgroup
= NULL
;
1191 size_t i
, parts_len
;
1193 size_t full_len
= 0;
1194 char **parts
= NULL
;
1197 if (h
->version
!= CGROUP2_SUPER_MAGIC
)
1200 if (!h
->controllers
)
1203 /* For now we simply enable all controllers that we have detected by
1204 * creating a string like "+memory +pids +cpu +io".
1205 * TODO: In the near future we might want to support "-<controller>"
1206 * etc. but whether supporting semantics like this make sense will need
1209 for (it
= h
->controllers
; it
&& *it
; it
++) {
1210 full_len
+= strlen(*it
) + 2;
1211 add_controllers
= must_realloc(add_controllers
, full_len
+ 1);
1213 if (h
->controllers
[0] == *it
)
1214 add_controllers
[0] = '\0';
1216 (void)strlcat(add_controllers
, "+", full_len
+ 1);
1217 (void)strlcat(add_controllers
, *it
, full_len
+ 1);
1219 if ((it
+ 1) && *(it
+ 1))
1220 (void)strlcat(add_controllers
, " ", full_len
+ 1);
1223 parts
= lxc_string_split(cgname
, '/');
1227 parts_len
= lxc_array_len((void **)parts
);
1231 cgroup
= must_make_path(h
->mountpoint
, h
->container_base_path
, NULL
);
1232 for (i
= 0; i
< parts_len
; i
++) {
1234 __do_free
char *target
= NULL
;
1236 cgroup
= must_append_path(cgroup
, parts
[i
], NULL
);
1237 target
= must_make_path(cgroup
, "cgroup.subtree_control", NULL
);
1238 ret
= lxc_write_to_file(target
, add_controllers
, full_len
, false, 0666);
1240 SYSERROR("Could not enable \"%s\" controllers in the "
1241 "unified cgroup \"%s\"", add_controllers
, cgroup
);
1249 lxc_free_array((void **)parts
, free
);
1253 static int mkdir_eexist_on_last(const char *dir
, mode_t mode
)
1255 const char *tmp
= dir
;
1256 const char *orig
= dir
;
1259 orig_len
= strlen(dir
);
1261 __do_free
char *makeme
;
1265 dir
= tmp
+ strspn(tmp
, "/");
1266 tmp
= dir
+ strcspn(dir
, "/");
1269 cur_len
= dir
- orig
;
1270 makeme
= strndup(orig
, cur_len
);
1274 ret
= mkdir(makeme
, mode
);
1276 if ((errno
!= EEXIST
) || (orig_len
== cur_len
)) {
1277 SYSERROR("Failed to create directory \"%s\"", makeme
);
1281 } while (tmp
!= dir
);
1286 static bool monitor_create_path_for_hierarchy(struct hierarchy
*h
, char *cgname
)
1290 if (!cg_legacy_handle_cpuset_hierarchy(h
, cgname
)) {
1291 ERROR("Failed to handle legacy cpuset controller");
1295 h
->monitor_full_path
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgname
, NULL
);
1296 ret
= mkdir_eexist_on_last(h
->monitor_full_path
, 0755);
1298 ERROR("Failed to create cgroup \"%s\"", h
->monitor_full_path
);
1302 return cg_unified_create_cgroup(h
, cgname
);
1305 static bool container_create_path_for_hierarchy(struct hierarchy
*h
, char *cgname
)
1309 if (!cg_legacy_handle_cpuset_hierarchy(h
, cgname
)) {
1310 ERROR("Failed to handle legacy cpuset controller");
1314 h
->container_full_path
= must_make_path(h
->mountpoint
, h
->container_base_path
, cgname
, NULL
);
1315 ret
= mkdir_eexist_on_last(h
->container_full_path
, 0755);
1317 ERROR("Failed to create cgroup \"%s\"", h
->container_full_path
);
1321 return cg_unified_create_cgroup(h
, cgname
);
1324 static void remove_path_for_hierarchy(struct hierarchy
*h
, char *cgname
, bool monitor
)
1330 full_path
= h
->monitor_full_path
;
1332 full_path
= h
->container_full_path
;
1334 ret
= rmdir(full_path
);
1336 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", full_path
);
1341 h
->monitor_full_path
= NULL
;
1343 h
->container_full_path
= NULL
;
1346 __cgfsng_ops
static inline bool cgfsng_monitor_create(struct cgroup_ops
*ops
,
1347 struct lxc_handler
*handler
)
1349 __do_free
char *monitor_cgroup
= NULL
;
1353 struct lxc_conf
*conf
= handler
->conf
;
1358 if (!ops
->hierarchies
)
1361 if (conf
->cgroup_meta
.dir
)
1362 tmp
= lxc_string_join("/",
1363 (const char *[]){conf
->cgroup_meta
.dir
,
1364 ops
->monitor_pattern
,
1365 handler
->name
, NULL
},
1368 tmp
= must_make_path(ops
->monitor_pattern
, handler
->name
, NULL
);
1372 len
= strlen(tmp
) + 5; /* leave room for -NNN\0 */
1373 monitor_cgroup
= must_realloc(tmp
, len
);
1374 offset
= monitor_cgroup
+ len
- 5;
1379 int ret
= snprintf(offset
, 5, "-%d", idx
);
1380 if (ret
< 0 || (size_t)ret
>= 5)
1384 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1385 if (!monitor_create_path_for_hierarchy(ops
->hierarchies
[i
],
1387 ERROR("Failed to create cgroup \"%s\"",
1388 ops
->hierarchies
[i
]->monitor_full_path
);
1389 for (int j
= 0; j
< i
; j
++)
1390 remove_path_for_hierarchy(ops
->hierarchies
[j
],
1398 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000);
1403 INFO("The monitor process uses \"%s\" as cgroup", monitor_cgroup
);
1407 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1408 * next cgroup_pattern-1, -2, ..., -999.
1410 __cgfsng_ops
static inline bool cgfsng_payload_create(struct cgroup_ops
*ops
,
1411 struct lxc_handler
*handler
)
1413 __do_free
char *container_cgroup
= NULL
, *tmp
= NULL
;
1418 struct lxc_conf
*conf
= handler
->conf
;
1420 if (ops
->container_cgroup
)
1426 if (!ops
->hierarchies
)
1429 if (conf
->cgroup_meta
.dir
)
1430 tmp
= lxc_string_join("/", (const char *[]){conf
->cgroup_meta
.dir
, handler
->name
, NULL
}, false);
1432 tmp
= lxc_string_replace("%n", handler
->name
, ops
->cgroup_pattern
);
1434 ERROR("Failed expanding cgroup name pattern");
1438 len
= strlen(tmp
) + 5; /* leave room for -NNN\0 */
1439 container_cgroup
= must_realloc(NULL
, len
);
1440 (void)strlcpy(container_cgroup
, tmp
, len
);
1441 offset
= container_cgroup
+ len
- 5;
1445 int ret
= snprintf(offset
, 5, "-%d", idx
);
1446 if (ret
< 0 || (size_t)ret
>= 5)
1450 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1451 if (!container_create_path_for_hierarchy(ops
->hierarchies
[i
],
1452 container_cgroup
)) {
1453 ERROR("Failed to create cgroup \"%s\"",
1454 ops
->hierarchies
[i
]->container_full_path
);
1455 for (int j
= 0; j
< i
; j
++)
1456 remove_path_for_hierarchy(ops
->hierarchies
[j
],
1463 } while (ops
->hierarchies
[i
] && idx
> 0 && idx
< 1000);
1468 INFO("The container process uses \"%s\" as cgroup", container_cgroup
);
1469 ops
->container_cgroup
= move_ptr(container_cgroup
);
1473 __cgfsng_ops
static bool __do_cgroup_enter(struct cgroup_ops
*ops
, pid_t pid
,
1477 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
1479 if (!ops
->hierarchies
)
1482 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", pid
);
1483 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
1486 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
1488 __do_free
char *path
= NULL
;
1491 path
= must_make_path(ops
->hierarchies
[i
]->monitor_full_path
,
1492 "cgroup.procs", NULL
);
1494 path
= must_make_path(ops
->hierarchies
[i
]->container_full_path
,
1495 "cgroup.procs", NULL
);
1496 ret
= lxc_write_to_file(path
, pidstr
, len
, false, 0666);
1498 SYSERROR("Failed to enter cgroup \"%s\"", path
);
1506 __cgfsng_ops
static bool cgfsng_monitor_enter(struct cgroup_ops
*ops
, pid_t pid
)
1508 return __do_cgroup_enter(ops
, pid
, true);
1511 static bool cgfsng_payload_enter(struct cgroup_ops
*ops
, pid_t pid
)
1513 return __do_cgroup_enter(ops
, pid
, false);
1516 static int chowmod(char *path
, uid_t chown_uid
, gid_t chown_gid
,
1521 ret
= chown(path
, chown_uid
, chown_gid
);
1523 SYSWARN("Failed to chown(%s, %d, %d)", path
, (int)chown_uid
, (int)chown_gid
);
1527 ret
= chmod(path
, chmod_mode
);
1529 SYSWARN("Failed to chmod(%s, %d)", path
, (int)chmod_mode
);
1536 /* chgrp the container cgroups to container group. We leave
1537 * the container owner as cgroup owner. So we must make the
1538 * directories 775 so that the container can create sub-cgroups.
1540 * Also chown the tasks and cgroup.procs files. Those may not
1541 * exist depending on kernel version.
1543 static int chown_cgroup_wrapper(void *data
)
1547 struct generic_userns_exec_data
*arg
= data
;
1548 uid_t nsuid
= (arg
->conf
->root_nsuid_map
!= NULL
) ? 0 : arg
->conf
->init_uid
;
1549 gid_t nsgid
= (arg
->conf
->root_nsgid_map
!= NULL
) ? 0 : arg
->conf
->init_gid
;
1551 ret
= setresgid(nsgid
, nsgid
, nsgid
);
1553 SYSERROR("Failed to setresgid(%d, %d, %d)",
1554 (int)nsgid
, (int)nsgid
, (int)nsgid
);
1558 ret
= setresuid(nsuid
, nsuid
, nsuid
);
1560 SYSERROR("Failed to setresuid(%d, %d, %d)",
1561 (int)nsuid
, (int)nsuid
, (int)nsuid
);
1565 ret
= setgroups(0, NULL
);
1566 if (ret
< 0 && errno
!= EPERM
) {
1567 SYSERROR("Failed to setgroups(0, NULL)");
1571 destuid
= get_ns_uid(arg
->origuid
);
1572 if (destuid
== LXC_INVALID_UID
)
1575 for (i
= 0; arg
->hierarchies
[i
]; i
++) {
1576 __do_free
char *fullpath
= NULL
;
1577 char *path
= arg
->hierarchies
[i
]->container_full_path
;
1579 ret
= chowmod(path
, destuid
, nsgid
, 0775);
1583 /* Failures to chown() these are inconvenient but not
1584 * detrimental We leave these owned by the container launcher,
1585 * so that container root can write to the files to attach. We
1586 * chmod() them 664 so that container systemd can write to the
1587 * files (which systemd in wily insists on doing).
1590 if (arg
->hierarchies
[i
]->version
== CGROUP_SUPER_MAGIC
) {
1591 fullpath
= must_make_path(path
, "tasks", NULL
);
1592 (void)chowmod(fullpath
, destuid
, nsgid
, 0664);
1595 fullpath
= must_make_path(path
, "cgroup.procs", NULL
);
1596 (void)chowmod(fullpath
, destuid
, nsgid
, 0664);
1598 if (arg
->hierarchies
[i
]->version
!= CGROUP2_SUPER_MAGIC
)
1601 for (char **p
= arg
->hierarchies
[i
]->cgroup2_chown
; p
&& *p
; p
++) {
1602 fullpath
= must_make_path(path
, *p
, NULL
);
1603 (void)chowmod(fullpath
, destuid
, nsgid
, 0664);
1610 __cgfsng_ops
static bool cgfsng_chown(struct cgroup_ops
*ops
,
1611 struct lxc_conf
*conf
)
1613 struct generic_userns_exec_data wrap
;
1615 if (lxc_list_empty(&conf
->id_map
))
1618 if (!ops
->hierarchies
)
1621 wrap
.origuid
= geteuid();
1623 wrap
.hierarchies
= ops
->hierarchies
;
1626 if (userns_exec_1(conf
, chown_cgroup_wrapper
, &wrap
,
1627 "chown_cgroup_wrapper") < 0) {
1628 ERROR("Error requesting cgroup chown in new user namespace");
1635 /* cgroup-full:* is done, no need to create subdirs */
1636 static bool cg_mount_needs_subdirs(int type
)
1638 if (type
>= LXC_AUTO_CGROUP_FULL_RO
)
1644 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1645 * remount controller ro if needed and bindmount the cgroupfs onto
1646 * control/the/cg/path.
1648 static int cg_legacy_mount_controllers(int type
, struct hierarchy
*h
,
1649 char *controllerpath
, char *cgpath
,
1650 const char *container_cgroup
)
1652 __do_free
char *sourcepath
= NULL
;
1653 int ret
, remount_flags
;
1654 int flags
= MS_BIND
;
1656 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_MIXED
) {
1657 ret
= mount(controllerpath
, controllerpath
, "cgroup", MS_BIND
, NULL
);
1659 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1660 controllerpath
, controllerpath
);
1664 remount_flags
= add_required_remount_flags(controllerpath
,
1666 flags
| MS_REMOUNT
);
1667 ret
= mount(controllerpath
, controllerpath
, "cgroup",
1668 remount_flags
| MS_REMOUNT
| MS_BIND
| MS_RDONLY
,
1671 SYSERROR("Failed to remount \"%s\" ro", controllerpath
);
1675 INFO("Remounted %s read-only", controllerpath
);
1678 sourcepath
= must_make_path(h
->mountpoint
, h
->container_base_path
,
1679 container_cgroup
, NULL
);
1680 if (type
== LXC_AUTO_CGROUP_RO
)
1683 ret
= mount(sourcepath
, cgpath
, "cgroup", flags
, NULL
);
1685 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1688 INFO("Mounted \"%s\" onto \"%s\"", h
->controllers
[0], cgpath
);
1690 if (flags
& MS_RDONLY
) {
1691 remount_flags
= add_required_remount_flags(sourcepath
, cgpath
,
1692 flags
| MS_REMOUNT
);
1693 ret
= mount(sourcepath
, cgpath
, "cgroup", remount_flags
, NULL
);
1695 SYSERROR("Failed to remount \"%s\" ro", cgpath
);
1698 INFO("Remounted %s read-only", cgpath
);
1701 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath
);
1705 /* __cg_mount_direct
1707 * Mount cgroup hierarchies directly without using bind-mounts. The main
1708 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1709 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1711 static int __cg_mount_direct(int type
, struct hierarchy
*h
,
1712 const char *controllerpath
)
1715 __do_free
char *controllers
= NULL
;
1716 char *fstype
= "cgroup2";
1717 unsigned long flags
= 0;
1722 flags
|= MS_RELATIME
;
1724 if (type
== LXC_AUTO_CGROUP_RO
|| type
== LXC_AUTO_CGROUP_FULL_RO
)
1727 if (h
->version
!= CGROUP2_SUPER_MAGIC
) {
1728 controllers
= lxc_string_join(",", (const char **)h
->controllers
, false);
1734 ret
= mount("cgroup", controllerpath
, fstype
, flags
, controllers
);
1736 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath
, fstype
);
1740 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath
, fstype
);
1744 static inline int cg_mount_in_cgroup_namespace(int type
, struct hierarchy
*h
,
1745 const char *controllerpath
)
1747 return __cg_mount_direct(type
, h
, controllerpath
);
1750 static inline int cg_mount_cgroup_full(int type
, struct hierarchy
*h
,
1751 const char *controllerpath
)
1753 if (type
< LXC_AUTO_CGROUP_FULL_RO
|| type
> LXC_AUTO_CGROUP_FULL_MIXED
)
1756 return __cg_mount_direct(type
, h
, controllerpath
);
1759 __cgfsng_ops
static bool cgfsng_mount(struct cgroup_ops
*ops
,
1760 struct lxc_handler
*handler
,
1761 const char *root
, int type
)
1763 __do_free
char *tmpfspath
= NULL
;
1765 bool has_cgns
= false, retval
= false, wants_force_mount
= false;
1767 if (!ops
->hierarchies
)
1770 if ((type
& LXC_AUTO_CGROUP_MASK
) == 0)
1773 if (type
& LXC_AUTO_CGROUP_FORCE
) {
1774 type
&= ~LXC_AUTO_CGROUP_FORCE
;
1775 wants_force_mount
= true;
1778 if (!wants_force_mount
){
1779 if (!lxc_list_empty(&handler
->conf
->keepcaps
))
1780 wants_force_mount
= !in_caplist(CAP_SYS_ADMIN
, &handler
->conf
->keepcaps
);
1782 wants_force_mount
= in_caplist(CAP_SYS_ADMIN
, &handler
->conf
->caps
);
1785 has_cgns
= cgns_supported();
1786 if (has_cgns
&& !wants_force_mount
)
1789 if (type
== LXC_AUTO_CGROUP_NOSPEC
)
1790 type
= LXC_AUTO_CGROUP_MIXED
;
1791 else if (type
== LXC_AUTO_CGROUP_FULL_NOSPEC
)
1792 type
= LXC_AUTO_CGROUP_FULL_MIXED
;
1795 tmpfspath
= must_make_path(root
, "/sys/fs/cgroup", NULL
);
1796 ret
= safe_mount(NULL
, tmpfspath
, "tmpfs",
1797 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
1798 "size=10240k,mode=755", root
);
1802 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1803 __do_free
char *controllerpath
= NULL
, *path2
= NULL
;
1804 struct hierarchy
*h
= ops
->hierarchies
[i
];
1805 char *controller
= strrchr(h
->mountpoint
, '/');
1811 controllerpath
= must_make_path(tmpfspath
, controller
, NULL
);
1812 if (dir_exists(controllerpath
))
1815 ret
= mkdir(controllerpath
, 0755);
1817 SYSERROR("Error creating cgroup path: %s", controllerpath
);
1821 if (has_cgns
&& wants_force_mount
) {
1822 /* If cgroup namespaces are supported but the container
1823 * will not have CAP_SYS_ADMIN after it has started we
1824 * need to mount the cgroups manually.
1826 ret
= cg_mount_in_cgroup_namespace(type
, h
, controllerpath
);
1833 ret
= cg_mount_cgroup_full(type
, h
, controllerpath
);
1837 if (!cg_mount_needs_subdirs(type
))
1840 path2
= must_make_path(controllerpath
, h
->container_base_path
,
1841 ops
->container_cgroup
, NULL
);
1842 ret
= mkdir_p(path2
, 0755);
1846 ret
= cg_legacy_mount_controllers(type
, h
, controllerpath
,
1847 path2
, ops
->container_cgroup
);
1857 static int recursive_count_nrtasks(char *dirname
)
1859 __do_free
char *path
= NULL
;
1860 __do_closedir
DIR *dir
= NULL
;
1861 struct dirent
*direntp
;
1864 dir
= opendir(dirname
);
1868 while ((direntp
= readdir(dir
))) {
1871 if (!strcmp(direntp
->d_name
, ".") ||
1872 !strcmp(direntp
->d_name
, ".."))
1875 path
= must_make_path(dirname
, direntp
->d_name
, NULL
);
1877 if (lstat(path
, &mystat
))
1880 if (!S_ISDIR(mystat
.st_mode
))
1883 count
+= recursive_count_nrtasks(path
);
1886 path
= must_make_path(dirname
, "cgroup.procs", NULL
);
1887 ret
= lxc_count_file_lines(path
);
1894 __cgfsng_ops
static int cgfsng_nrtasks(struct cgroup_ops
*ops
)
1896 __do_free
char *path
= NULL
;
1899 if (!ops
->container_cgroup
|| !ops
->hierarchies
)
1902 path
= must_make_path(ops
->hierarchies
[0]->container_full_path
, NULL
);
1903 count
= recursive_count_nrtasks(path
);
1907 /* Only root needs to escape to the cgroup of its init. */
1908 __cgfsng_ops
static bool cgfsng_escape(const struct cgroup_ops
*ops
,
1909 struct lxc_conf
*conf
)
1913 if (conf
->cgroup_meta
.relative
|| geteuid() || !ops
->hierarchies
)
1916 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
1918 __do_free
char *fullpath
= NULL
;
1920 fullpath
= must_make_path(ops
->hierarchies
[i
]->mountpoint
,
1921 ops
->hierarchies
[i
]->container_base_path
,
1922 "cgroup.procs", NULL
);
1923 ret
= lxc_write_to_file(fullpath
, "0", 2, false, 0666);
1925 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath
);
1933 __cgfsng_ops
static int cgfsng_num_hierarchies(struct cgroup_ops
*ops
)
1937 if (!ops
->hierarchies
)
1940 for (; ops
->hierarchies
[i
]; i
++)
1946 __cgfsng_ops
static bool cgfsng_get_hierarchies(struct cgroup_ops
*ops
, int n
, char ***out
)
1950 if (!ops
->hierarchies
)
1953 /* sanity check n */
1954 for (i
= 0; i
< n
; i
++)
1955 if (!ops
->hierarchies
[i
])
1958 *out
= ops
->hierarchies
[i
]->controllers
;
1963 #define THAWED "THAWED"
1964 #define THAWED_LEN (strlen(THAWED))
1966 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1969 __cgfsng_ops
static bool cgfsng_unfreeze(struct cgroup_ops
*ops
)
1972 __do_free
char *fullpath
= NULL
;
1973 struct hierarchy
*h
;
1975 h
= get_hierarchy(ops
, "freezer");
1979 fullpath
= must_make_path(h
->container_full_path
, "freezer.state", NULL
);
1980 ret
= lxc_write_to_file(fullpath
, THAWED
, THAWED_LEN
, false, 0666);
1987 __cgfsng_ops
static const char *cgfsng_get_cgroup(struct cgroup_ops
*ops
,
1988 const char *controller
)
1990 struct hierarchy
*h
;
1992 h
= get_hierarchy(ops
, controller
);
1994 WARN("Failed to find hierarchy for controller \"%s\"",
1995 controller
? controller
: "(null)");
1999 return h
->container_full_path
? h
->container_full_path
+ strlen(h
->mountpoint
) : NULL
;
2002 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2003 * which must be freed by the caller.
2005 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy
*h
,
2007 const char *filename
)
2009 return must_make_path(h
->mountpoint
, inpath
, filename
, NULL
);
2012 /* Technically, we're always at a delegation boundary here (This is especially
2013 * true when cgroup namespaces are available.). The reasoning is that in order
2014 * for us to have been able to start a container in the first place the root
2015 * cgroup must have been a leaf node. Now, either the container's init system
2016 * has populated the cgroup and kept it as a leaf node or it has created
2017 * subtrees. In the former case we will simply attach to the leaf node we
2018 * created when we started the container in the latter case we create our own
2019 * cgroup for the attaching process.
2021 static int __cg_unified_attach(const struct hierarchy
*h
, const char *name
,
2022 const char *lxcpath
, const char *pidstr
,
2023 size_t pidstr_len
, const char *controller
)
2025 __do_free
char *base_path
= NULL
, *container_cgroup
= NULL
,
2029 int fret
= -1, idx
= 0;
2031 container_cgroup
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2033 if (!container_cgroup
)
2036 base_path
= must_make_path(h
->mountpoint
, container_cgroup
, NULL
);
2037 full_path
= must_make_path(base_path
, "cgroup.procs", NULL
);
2038 /* cgroup is populated */
2039 ret
= lxc_write_to_file(full_path
, pidstr
, pidstr_len
, false, 0666);
2040 if (ret
< 0 && errno
!= EBUSY
)
2046 len
= strlen(base_path
) + STRLITERALLEN("/lxc-1000") +
2047 STRLITERALLEN("/cgroup-procs");
2048 full_path
= must_realloc(NULL
, len
+ 1);
2051 ret
= snprintf(full_path
, len
+ 1, "%s/lxc-%d",
2054 ret
= snprintf(full_path
, len
+ 1, "%s/lxc", base_path
);
2055 if (ret
< 0 || (size_t)ret
>= len
+ 1)
2058 ret
= mkdir_p(full_path
, 0755);
2059 if (ret
< 0 && errno
!= EEXIST
)
2062 (void)strlcat(full_path
, "/cgroup.procs", len
+ 1);
2063 ret
= lxc_write_to_file(full_path
, pidstr
, len
, false, 0666);
2067 /* this is a non-leaf node */
2072 } while (idx
< 1000);
2082 __cgfsng_ops
static bool cgfsng_attach(struct cgroup_ops
*ops
, const char *name
,
2083 const char *lxcpath
, pid_t pid
)
2086 char pidstr
[INTTYPE_TO_STRLEN(pid_t
)];
2088 if (!ops
->hierarchies
)
2091 len
= snprintf(pidstr
, sizeof(pidstr
), "%d", pid
);
2092 if (len
< 0 || (size_t)len
>= sizeof(pidstr
))
2095 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
2096 __do_free
char *path
= NULL
;
2097 char *fullpath
= NULL
;
2098 struct hierarchy
*h
= ops
->hierarchies
[i
];
2100 if (h
->version
== CGROUP2_SUPER_MAGIC
) {
2101 ret
= __cg_unified_attach(h
, name
, lxcpath
, pidstr
, len
,
2109 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, h
->controllers
[0]);
2114 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, "cgroup.procs");
2115 ret
= lxc_write_to_file(fullpath
, pidstr
, len
, false, 0666);
2117 SYSERROR("Failed to attach %d to %s", (int)pid
, fullpath
);
2125 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2126 * don't have a cgroup_data set up, so we ask the running container through the
2127 * commands API for the cgroup path.
2129 __cgfsng_ops
static int cgfsng_get(struct cgroup_ops
*ops
, const char *filename
,
2130 char *value
, size_t len
, const char *name
,
2131 const char *lxcpath
)
2133 __do_free
char *path
= NULL
;
2134 __do_free
char *controller
= NULL
;
2136 struct hierarchy
*h
;
2139 controller
= must_copy_string(filename
);
2140 p
= strchr(controller
, '.');
2144 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2149 h
= get_hierarchy(ops
, controller
);
2151 __do_free
char *fullpath
= NULL
;
2153 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, filename
);
2154 ret
= lxc_read_from_file(fullpath
, value
, len
);
2160 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2161 * don't have a cgroup_data set up, so we ask the running container through the
2162 * commands API for the cgroup path.
2164 __cgfsng_ops
static int cgfsng_set(struct cgroup_ops
*ops
,
2165 const char *filename
, const char *value
,
2166 const char *name
, const char *lxcpath
)
2168 __do_free
char *path
= NULL
;
2169 __do_free
char *controller
= NULL
;
2171 struct hierarchy
*h
;
2174 controller
= must_copy_string(filename
);
2175 p
= strchr(controller
, '.');
2179 path
= lxc_cmd_get_cgroup_path(name
, lxcpath
, controller
);
2184 h
= get_hierarchy(ops
, controller
);
2186 __do_free
char *fullpath
= NULL
;
2188 fullpath
= build_full_cgpath_from_monitorpath(h
, path
, filename
);
2189 ret
= lxc_write_to_file(fullpath
, value
, strlen(value
), false, 0666);
2195 /* take devices cgroup line
2197 * and convert it to a valid
2198 * type major:minor mode
2199 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2202 static int convert_devpath(const char *invalue
, char *dest
)
2204 __do_free
char *path
= NULL
;
2207 unsigned long minor
, major
;
2212 path
= must_copy_string(invalue
);
2214 /* Read path followed by mode. Ignore any trailing text.
2215 * A ' # comment' would be legal. Technically other text is not
2216 * legal, we could check for that if we cared to.
2218 for (n_parts
= 1, p
= path
; *p
; p
++) {
2240 ret
= stat(path
, &sb
);
2244 mode_t m
= sb
.st_mode
& S_IFMT
;
2253 ERROR("Unsupported device type %i for \"%s\"", m
, path
);
2258 major
= MAJOR(sb
.st_rdev
);
2259 minor
= MINOR(sb
.st_rdev
);
2260 ret
= snprintf(dest
, 50, "%c %lu:%lu %s", type
, major
, minor
, mode
);
2261 if (ret
< 0 || ret
>= 50) {
2262 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2263 "chars)", type
, major
, minor
, mode
);
2264 ret
= -ENAMETOOLONG
;
2273 /* Called from setup_limits - here we have the container's cgroup_data because
2274 * we created the cgroups.
2276 static int cg_legacy_set_data(struct cgroup_ops
*ops
, const char *filename
,
2279 __do_free
char *controller
= NULL
;
2280 __do_free
char *fullpath
= NULL
;
2282 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2283 char converted_value
[50];
2284 struct hierarchy
*h
;
2287 controller
= must_copy_string(filename
);
2288 p
= strchr(controller
, '.');
2292 if (strcmp("devices.allow", filename
) == 0 && value
[0] == '/') {
2293 ret
= convert_devpath(value
, converted_value
);
2296 value
= converted_value
;
2299 h
= get_hierarchy(ops
, controller
);
2301 ERROR("Failed to setup limits for the \"%s\" controller. "
2302 "The controller seems to be unused by \"cgfsng\" cgroup "
2303 "driver or not enabled on the cgroup hierarchy",
2309 fullpath
= must_make_path(h
->container_full_path
, filename
, NULL
);
2310 ret
= lxc_write_to_file(fullpath
, value
, strlen(value
), false, 0666);
2314 static bool __cg_legacy_setup_limits(struct cgroup_ops
*ops
,
2315 struct lxc_list
*cgroup_settings
,
2318 __do_free
struct lxc_list
*sorted_cgroup_settings
= NULL
;
2319 struct lxc_list
*iterator
, *next
;
2320 struct lxc_cgroup
*cg
;
2323 if (lxc_list_empty(cgroup_settings
))
2326 if (!ops
->hierarchies
)
2329 sorted_cgroup_settings
= sort_cgroup_settings(cgroup_settings
);
2330 if (!sorted_cgroup_settings
)
2333 lxc_list_for_each(iterator
, sorted_cgroup_settings
) {
2334 cg
= iterator
->elem
;
2336 if (do_devices
== !strncmp("devices", cg
->subsystem
, 7)) {
2337 if (cg_legacy_set_data(ops
, cg
->subsystem
, cg
->value
)) {
2338 if (do_devices
&& (errno
== EACCES
|| errno
== EPERM
)) {
2339 WARN("Failed to set \"%s\" to \"%s\"",
2340 cg
->subsystem
, cg
->value
);
2343 WARN("Failed to set \"%s\" to \"%s\"",
2344 cg
->subsystem
, cg
->value
);
2347 DEBUG("Set controller \"%s\" set to \"%s\"",
2348 cg
->subsystem
, cg
->value
);
2353 INFO("Limits for the legacy cgroup hierarchies have been setup");
2355 lxc_list_for_each_safe(iterator
, sorted_cgroup_settings
, next
) {
2356 lxc_list_del(iterator
);
2363 static bool __cg_unified_setup_limits(struct cgroup_ops
*ops
,
2364 struct lxc_list
*cgroup_settings
)
2366 struct lxc_list
*iterator
;
2367 struct hierarchy
*h
= ops
->unified
;
2369 if (lxc_list_empty(cgroup_settings
))
2375 lxc_list_for_each(iterator
, cgroup_settings
) {
2376 __do_free
char *fullpath
= NULL
;
2378 struct lxc_cgroup
*cg
= iterator
->elem
;
2380 fullpath
= must_make_path(h
->container_full_path
, cg
->subsystem
, NULL
);
2381 ret
= lxc_write_to_file(fullpath
, cg
->value
, strlen(cg
->value
), false, 0666);
2383 SYSERROR("Failed to set \"%s\" to \"%s\"",
2384 cg
->subsystem
, cg
->value
);
2387 TRACE("Set \"%s\" to \"%s\"", cg
->subsystem
, cg
->value
);
2390 INFO("Limits for the unified cgroup hierarchy have been setup");
2394 __cgfsng_ops
static bool cgfsng_setup_limits(struct cgroup_ops
*ops
,
2395 struct lxc_conf
*conf
,
2400 bret
= __cg_legacy_setup_limits(ops
, &conf
->cgroup
, do_devices
);
2404 return __cg_unified_setup_limits(ops
, &conf
->cgroup2
);
2407 static bool cgroup_use_wants_controllers(const struct cgroup_ops
*ops
,
2410 char **cur_ctrl
, **cur_use
;
2412 if (!ops
->cgroup_use
)
2415 for (cur_ctrl
= controllers
; cur_ctrl
&& *cur_ctrl
; cur_ctrl
++) {
2418 for (cur_use
= ops
->cgroup_use
; cur_use
&& *cur_use
; cur_use
++) {
2419 if (strcmp(*cur_use
, *cur_ctrl
) != 0)
2435 static void cg_unified_delegate(char ***delegate
)
2437 __do_free
char *tmp
= NULL
;
2439 char *standard
[] = {"cgroup.subtree_control", "cgroup.threads", NULL
};
2441 tmp
= read_file("/sys/kernel/cgroup/delegate");
2443 for (char **p
= standard
; p
&& *p
; p
++) {
2444 idx
= append_null_to_list((void ***)delegate
);
2445 (*delegate
)[idx
] = must_copy_string(*p
);
2449 lxc_iterate_parts (token
, tmp
, " \t\n") {
2451 * We always need to chown this for both cgroup and
2454 if (strcmp(token
, "cgroup.procs") == 0)
2457 idx
= append_null_to_list((void ***)delegate
);
2458 (*delegate
)[idx
] = must_copy_string(token
);
2463 /* At startup, parse_hierarchies finds all the info we need about cgroup
2464 * mountpoints and current cgroups, and stores it in @d.
2466 static bool cg_hybrid_init(struct cgroup_ops
*ops
, bool relative
,
2469 __do_free
char *basecginfo
= NULL
;
2470 __do_free
char *line
= NULL
;
2471 __do_fclose
FILE *f
= NULL
;
2474 char **klist
= NULL
, **nlist
= NULL
;
2476 /* Root spawned containers escape the current cgroup, so use init's
2477 * cgroups as our base in that case.
2479 if (!relative
&& (geteuid() == 0))
2480 basecginfo
= read_file("/proc/1/cgroup");
2482 basecginfo
= read_file("/proc/self/cgroup");
2486 ret
= get_existing_subsystems(&klist
, &nlist
);
2488 ERROR("Failed to retrieve available legacy cgroup controllers");
2492 f
= fopen("/proc/self/mountinfo", "r");
2494 ERROR("Failed to open \"/proc/self/mountinfo\"");
2498 lxc_cgfsng_print_basecg_debuginfo(basecginfo
, klist
, nlist
);
2500 while (getline(&line
, &len
, f
) != -1) {
2503 struct hierarchy
*new;
2504 char *base_cgroup
= NULL
, *mountpoint
= NULL
;
2505 char **controller_list
= NULL
;
2507 type
= get_cgroup_version(line
);
2511 if (type
== CGROUP2_SUPER_MAGIC
&& ops
->unified
)
2514 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNKNOWN
) {
2515 if (type
== CGROUP2_SUPER_MAGIC
)
2516 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
2517 else if (type
== CGROUP_SUPER_MAGIC
)
2518 ops
->cgroup_layout
= CGROUP_LAYOUT_LEGACY
;
2519 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
2520 if (type
== CGROUP_SUPER_MAGIC
)
2521 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
2522 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_LEGACY
) {
2523 if (type
== CGROUP2_SUPER_MAGIC
)
2524 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
2527 controller_list
= cg_hybrid_get_controllers(klist
, nlist
, line
, type
);
2528 if (!controller_list
&& type
== CGROUP_SUPER_MAGIC
)
2531 if (type
== CGROUP_SUPER_MAGIC
)
2532 if (controller_list_is_dup(ops
->hierarchies
, controller_list
))
2535 mountpoint
= cg_hybrid_get_mountpoint(line
);
2537 ERROR("Failed parsing mountpoint from \"%s\"", line
);
2541 if (type
== CGROUP_SUPER_MAGIC
)
2542 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, controller_list
[0], CGROUP_SUPER_MAGIC
);
2544 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, NULL
, CGROUP2_SUPER_MAGIC
);
2546 ERROR("Failed to find current cgroup");
2551 prune_init_scope(base_cgroup
);
2552 if (type
== CGROUP2_SUPER_MAGIC
)
2553 writeable
= test_writeable_v2(mountpoint
, base_cgroup
);
2555 writeable
= test_writeable_v1(mountpoint
, base_cgroup
);
2559 if (type
== CGROUP2_SUPER_MAGIC
) {
2560 char *cgv2_ctrl_path
;
2562 cgv2_ctrl_path
= must_make_path(mountpoint
, base_cgroup
,
2563 "cgroup.controllers",
2566 controller_list
= cg_unified_get_controllers(cgv2_ctrl_path
);
2567 free(cgv2_ctrl_path
);
2568 if (!controller_list
) {
2569 controller_list
= cg_unified_make_empty_controller();
2570 TRACE("No controllers are enabled for "
2571 "delegation in the unified hierarchy");
2575 /* Exclude all controllers that cgroup use does not want. */
2576 if (!cgroup_use_wants_controllers(ops
, controller_list
))
2579 new = add_hierarchy(&ops
->hierarchies
, controller_list
, mountpoint
, base_cgroup
, type
);
2580 if (type
== CGROUP2_SUPER_MAGIC
&& !ops
->unified
) {
2582 cg_unified_delegate(&new->cgroup2_chown
);
2589 free_string_list(controller_list
);
2594 free_string_list(klist
);
2595 free_string_list(nlist
);
2597 TRACE("Writable cgroup hierarchies:");
2598 lxc_cgfsng_print_hierarchies(ops
);
2600 /* verify that all controllers in cgroup.use and all crucial
2601 * controllers are accounted for
2603 if (!all_controllers_found(ops
))
2609 static int cg_is_pure_unified(void)
2615 ret
= statfs("/sys/fs/cgroup", &fs
);
2619 if (is_fs_type(&fs
, CGROUP2_SUPER_MAGIC
))
2620 return CGROUP2_SUPER_MAGIC
;
2625 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2626 static char *cg_unified_get_current_cgroup(bool relative
)
2628 __do_free
char *basecginfo
= NULL
;
2632 if (!relative
&& (geteuid() == 0))
2633 basecginfo
= read_file("/proc/1/cgroup");
2635 basecginfo
= read_file("/proc/self/cgroup");
2639 base_cgroup
= strstr(basecginfo
, "0::/");
2641 goto cleanup_on_err
;
2643 base_cgroup
= base_cgroup
+ 3;
2644 copy
= copy_to_eol(base_cgroup
);
2646 goto cleanup_on_err
;
2655 static int cg_unified_init(struct cgroup_ops
*ops
, bool relative
,
2658 __do_free
char *subtree_path
= NULL
;
2662 struct hierarchy
*new;
2663 char *base_cgroup
= NULL
;
2665 ret
= cg_is_pure_unified();
2666 if (ret
== -ENOMEDIUM
)
2669 if (ret
!= CGROUP2_SUPER_MAGIC
)
2672 base_cgroup
= cg_unified_get_current_cgroup(relative
);
2675 prune_init_scope(base_cgroup
);
2677 /* We assume that we have already been given controllers to delegate
2678 * further down the hierarchy. If not it is up to the user to delegate
2681 mountpoint
= must_copy_string("/sys/fs/cgroup");
2682 subtree_path
= must_make_path(mountpoint
, base_cgroup
,
2683 "cgroup.subtree_control", NULL
);
2684 delegatable
= cg_unified_get_controllers(subtree_path
);
2686 delegatable
= cg_unified_make_empty_controller();
2687 if (!delegatable
[0])
2688 TRACE("No controllers are enabled for delegation");
2690 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2691 * we should verify here. The reason I'm not doing it right is that I'm
2692 * not convinced that lxc.cgroup.use will be the future since it is a
2693 * global property. I much rather have an option that lets you request
2694 * controllers per container.
2697 new = add_hierarchy(&ops
->hierarchies
, delegatable
, mountpoint
, base_cgroup
, CGROUP2_SUPER_MAGIC
);
2699 cg_unified_delegate(&new->cgroup2_chown
);
2701 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
2703 return CGROUP2_SUPER_MAGIC
;
2706 static bool cg_init(struct cgroup_ops
*ops
, struct lxc_conf
*conf
)
2710 bool relative
= conf
->cgroup_meta
.relative
;
2712 tmp
= lxc_global_config_value("lxc.cgroup.use");
2714 __do_free
char *pin
= NULL
;
2717 pin
= must_copy_string(tmp
);
2720 lxc_iterate_parts(cur
, chop
, ",")
2721 must_append_string(&ops
->cgroup_use
, cur
);
2724 ret
= cg_unified_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
2728 if (ret
== CGROUP2_SUPER_MAGIC
)
2731 return cg_hybrid_init(ops
, relative
, !lxc_list_empty(&conf
->id_map
));
2734 __cgfsng_ops
static bool cgfsng_data_init(struct cgroup_ops
*ops
)
2736 const char *cgroup_pattern
;
2738 /* copy system-wide cgroup information */
2739 cgroup_pattern
= lxc_global_config_value("lxc.cgroup.pattern");
2740 if (!cgroup_pattern
) {
2741 /* lxc.cgroup.pattern is only NULL on error. */
2742 ERROR("Failed to retrieve cgroup pattern");
2745 ops
->cgroup_pattern
= must_copy_string(cgroup_pattern
);
2746 ops
->monitor_pattern
= MONITOR_CGROUP
;
2751 struct cgroup_ops
*cgfsng_ops_init(struct lxc_conf
*conf
)
2753 struct cgroup_ops
*cgfsng_ops
;
2755 cgfsng_ops
= malloc(sizeof(struct cgroup_ops
));
2759 memset(cgfsng_ops
, 0, sizeof(struct cgroup_ops
));
2760 cgfsng_ops
->cgroup_layout
= CGROUP_LAYOUT_UNKNOWN
;
2762 if (!cg_init(cgfsng_ops
, conf
)) {
2767 cgfsng_ops
->data_init
= cgfsng_data_init
;
2768 cgfsng_ops
->payload_destroy
= cgfsng_payload_destroy
;
2769 cgfsng_ops
->monitor_destroy
= cgfsng_monitor_destroy
;
2770 cgfsng_ops
->monitor_create
= cgfsng_monitor_create
;
2771 cgfsng_ops
->monitor_enter
= cgfsng_monitor_enter
;
2772 cgfsng_ops
->payload_create
= cgfsng_payload_create
;
2773 cgfsng_ops
->payload_enter
= cgfsng_payload_enter
;
2774 cgfsng_ops
->escape
= cgfsng_escape
;
2775 cgfsng_ops
->num_hierarchies
= cgfsng_num_hierarchies
;
2776 cgfsng_ops
->get_hierarchies
= cgfsng_get_hierarchies
;
2777 cgfsng_ops
->get_cgroup
= cgfsng_get_cgroup
;
2778 cgfsng_ops
->get
= cgfsng_get
;
2779 cgfsng_ops
->set
= cgfsng_set
;
2780 cgfsng_ops
->unfreeze
= cgfsng_unfreeze
;
2781 cgfsng_ops
->setup_limits
= cgfsng_setup_limits
;
2782 cgfsng_ops
->driver
= "cgfsng";
2783 cgfsng_ops
->version
= "1.0.0";
2784 cgfsng_ops
->attach
= cgfsng_attach
;
2785 cgfsng_ops
->chown
= cgfsng_chown
;
2786 cgfsng_ops
->mount
= cgfsng_mount
;
2787 cgfsng_ops
->nrtasks
= cgfsng_nrtasks
;