1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
19 #ifndef FUSE_USE_VERSION
20 #define FUSE_USE_VERSION 26
23 #define _FILE_OFFSET_BITS 64
29 #include <linux/kdev_t.h>
30 #include <linux/types.h>
37 #include <sys/mount.h>
38 #include <sys/types.h>
41 #include "../config.h"
43 #include "../memory_utils.h"
45 #include "cgroup2_devices.h"
46 #include "cgroup_utils.h"
48 static void free_string_list(char **clist
)
55 for (i
= 0; clist
[i
]; i
++)
61 /* Given a pointer to a null-terminated array of pointers, realloc to add one
62 * entry, and point the new entry to NULL. Do not fail. Return the index to the
63 * second-to-last entry - that is, the one which is now available for use
64 * (keeping the list null-terminated).
66 static int append_null_to_list(void ***list
)
71 for (; (*list
)[newentry
]; newentry
++)
74 *list
= must_realloc(*list
, (newentry
+ 2) * sizeof(void **));
75 (*list
)[newentry
+ 1] = NULL
;
79 /* Given a null-terminated array of strings, check whether @entry is one of the
82 static bool string_in_list(char **list
, const char *entry
)
89 for (i
= 0; list
[i
]; i
++)
90 if (strcmp(list
[i
], entry
) == 0)
96 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
97 * "name=systemd". Do not fail.
99 static char *cg_legacy_must_prefix_named(char *entry
)
105 prefixed
= must_realloc(NULL
, len
+ 6);
107 memcpy(prefixed
, "name=", STRLITERALLEN("name="));
108 memcpy(prefixed
+ STRLITERALLEN("name="), entry
, len
);
109 prefixed
[len
+ 5] = '\0';
114 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
117 * We also handle named subsystems here. Any controller which is not a kernel
118 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
119 * we refuse to use because we're not sure which we have here.
120 * (TODO: We could work around this in some cases by just remounting to be
121 * unambiguous, or by comparing mountpoint contents with current cgroup.)
123 * The last entry will always be NULL.
125 static void must_append_controller(char **klist
, char **nlist
, char ***clist
,
131 if (string_in_list(klist
, entry
) && string_in_list(nlist
, entry
))
134 newentry
= append_null_to_list((void ***)clist
);
136 if (strncmp(entry
, "name=", 5) == 0)
137 copy
= must_copy_string(entry
);
138 else if (string_in_list(klist
, entry
))
139 copy
= must_copy_string(entry
);
141 copy
= cg_legacy_must_prefix_named(entry
);
143 (*clist
)[newentry
] = copy
;
146 /* Given a handler's cgroup data, return the struct hierarchy for the controller
147 * @c, or NULL if there is none.
149 static struct hierarchy
*cgfsng_get_hierarchy(struct cgroup_ops
*ops
,
150 const char *controller
)
156 if (!ops
->hierarchies
)
159 for (i
= 0; ops
->hierarchies
[i
]; i
++) {
161 /* This is the empty unified hierarchy. */
162 if (ops
->hierarchies
[i
]->controllers
&&
163 !ops
->hierarchies
[i
]->controllers
[0])
164 return ops
->hierarchies
[i
];
166 } else if (pure_unified_layout(ops
) &&
167 strcmp(controller
, "devices") == 0) {
168 if (ops
->unified
->bpf_device_controller
)
173 if (string_in_list(ops
->hierarchies
[i
]->controllers
, controller
))
174 return ops
->hierarchies
[i
];
180 static inline struct hierarchy
*get_hierarchy(struct cgroup_ops
*ops
,
181 const char *controller
)
183 return cgfsng_get_hierarchy(ops
, controller
);
186 /* Given two null-terminated lists of strings, return true if any string is in
189 static bool controller_lists_intersect(char **l1
, char **l2
)
196 for (i
= 0; l1
[i
]; i
++) {
197 if (string_in_list(l2
, l1
[i
]))
204 /* For a null-terminated list of controllers @clist, return true if any of those
205 * controllers is already listed the null-terminated list of hierarchies @hlist.
206 * Realistically, if one is present, all must be present.
208 static bool controller_list_is_dup(struct hierarchy
**hlist
, char **clist
)
215 for (i
= 0; hlist
[i
]; i
++)
216 if (controller_lists_intersect(hlist
[i
]->controllers
, clist
))
222 /* Get the controllers from a mountinfo line There are other ways we could get
223 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
224 * could parse the mount options. But we simply assume that the mountpoint must
225 * be /sys/fs/cgroup/controller-list
227 static char **cg_hybrid_get_controllers(char **klist
, char **nlist
, char *line
,
228 int type
, char **controllers
)
230 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
231 * for legacy hierarchies.
235 char *p
= line
, *sep
= ",";
238 for (i
= 0; i
< 4; i
++) {
245 /* Note, if we change how mountinfo works, then our caller will need to
246 * verify /sys/fs/cgroup/ in this field.
248 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0)
257 if (type
== CGROUP_SUPER_MAGIC
) {
258 __do_free
char *dup
= NULL
;
260 /* strdup() here for v1 hierarchies. Otherwise
261 * lxc_iterate_parts() will destroy mountpoints such as
262 * "/sys/fs/cgroup/cpu,cpuacct".
264 dup
= must_copy_string(p
);
268 lxc_iterate_parts (tok
, dup
, sep
)
269 must_append_controller(klist
, nlist
, &aret
, tok
);
270 *controllers
= move_ptr(dup
);
277 static char **cg_unified_make_empty_controller(void)
282 newentry
= append_null_to_list((void ***)&aret
);
283 aret
[newentry
] = NULL
;
287 static char **cg_unified_get_controllers(const char *file
)
289 __do_free
char *buf
= NULL
;
294 buf
= read_file(file
);
298 lxc_iterate_parts(tok
, buf
, sep
) {
302 newentry
= append_null_to_list((void ***)&aret
);
303 copy
= must_copy_string(tok
);
304 aret
[newentry
] = copy
;
310 static struct hierarchy
*add_hierarchy(struct hierarchy
***h
, char **clist
, char *mountpoint
,
311 char *base_path
, int type
)
313 struct hierarchy
*new;
316 new = zalloc(sizeof(*new));
317 new->controllers
= clist
;
318 new->mountpoint
= mountpoint
;
319 new->base_path
= base_path
;
322 newentry
= append_null_to_list((void ***)h
);
323 (*h
)[newentry
] = new;
327 /* Get a copy of the mountpoint from @line, which is a line from
328 * /proc/self/mountinfo.
330 static char *cg_hybrid_get_mountpoint(char *line
)
335 char *p
= line
, *sret
= NULL
;
337 for (i
= 0; i
< 4; i
++) {
344 if (strncmp(p
, DEFAULT_CGROUP_MOUNTPOINT
"/", 15) != 0)
347 p2
= strchr(p
+ 15, ' ');
353 sret
= must_realloc(NULL
, len
+ 1);
354 memcpy(sret
, p
, len
);
359 static void must_append_string(char ***list
, char *entry
)
364 newentry
= append_null_to_list((void ***)list
);
365 copy
= must_copy_string(entry
);
366 (*list
)[newentry
] = copy
;
369 static int get_existing_subsystems(char ***klist
, char ***nlist
)
371 __do_free
char *line
= NULL
;
372 __do_fclose
FILE *f
= NULL
;
375 f
= fopen("/proc/self/cgroup", "re");
379 while (getline(&line
, &len
, f
) != -1) {
381 p
= strchr(line
, ':');
390 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
391 * contains an entry of the form:
395 * In this case we use "cgroup2" as controller name.
398 must_append_string(klist
, "cgroup2");
402 lxc_iterate_parts(tok
, p
, ",") {
403 if (strncmp(tok
, "name=", 5) == 0)
404 must_append_string(nlist
, tok
);
406 must_append_string(klist
, tok
);
413 static void trim(char *s
)
418 while ((len
> 1) && (s
[len
- 1] == '\n'))
424 * Mount cgroup hierarchies directly without using bind-mounts. The main
425 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
426 * cgroups for the LXC_AUTO_CGROUP_FULL option.
428 static int __cg_mount_direct(struct hierarchy
*h
, const char *controllerpath
)
430 __do_free
char *controllers
= NULL
;
431 char *fstype
= "cgroup2";
432 unsigned long flags
= 0;
438 flags
|= MS_RELATIME
;
440 if (h
->version
!= CGROUP2_SUPER_MAGIC
) {
441 controllers
= lxc_string_join(",", (const char **)h
->controllers
, false);
447 ret
= mount("cgroup", controllerpath
, fstype
, flags
, controllers
);
454 static inline int cg_mount_cgroup_full(struct hierarchy
*h
,
455 const char *controllerpath
)
457 return __cg_mount_direct(h
, controllerpath
);
460 static bool cgfsng_mount(struct cgroup_ops
*ops
, const char *root
)
462 __do_free
char *cgroup_root
= NULL
;
467 return ret_set_errno(false, ENOENT
);
469 if (!ops
->hierarchies
)
472 cgroup_root
= must_make_path(root
, DEFAULT_CGROUP_MOUNTPOINT
, NULL
);
473 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
)
474 return cg_mount_cgroup_full(ops
->unified
, cgroup_root
) == 0;
477 ret
= safe_mount(NULL
, cgroup_root
, "tmpfs",
478 MS_NOSUID
| MS_NODEV
| MS_NOEXEC
| MS_RELATIME
,
479 "size=10240k,mode=755", root
);
483 for (int i
= 0; ops
->hierarchies
[i
]; i
++) {
484 __do_free
char *controllerpath
= NULL
;
485 struct hierarchy
*h
= ops
->hierarchies
[i
];
486 char *controller
= strrchr(h
->mountpoint
, '/');
492 controllerpath
= must_make_path(cgroup_root
, controller
, NULL
);
493 if (dir_exists(controllerpath
))
496 ret
= mkdir(controllerpath
, 0755);
498 log_error_errno(goto on_error
, errno
,
499 "Error creating cgroup path: %s",
502 ret
= cg_mount_cgroup_full( h
, controllerpath
);
512 static int cgfsng_num_hierarchies(struct cgroup_ops
*ops
)
517 return ret_set_errno(-1, ENOENT
);
519 if (!ops
->hierarchies
)
522 for (; ops
->hierarchies
[i
]; i
++)
528 static bool cgfsng_get_hierarchies(struct cgroup_ops
*ops
, int n
, char ***out
)
533 return ret_set_errno(false, ENOENT
);
535 if (!ops
->hierarchies
)
539 for (i
= 0; i
< n
; i
++)
540 if (!ops
->hierarchies
[i
])
541 return ret_set_errno(false, ENOENT
);
543 *out
= ops
->hierarchies
[i
]->controllers
;
548 static bool cgfsng_get(struct cgroup_ops
*ops
, const char *controller
,
549 const char *cgroup
, const char *file
, char **value
)
551 __do_free
char *path
= NULL
;
554 h
= ops
->get_hierarchy(ops
, controller
);
558 path
= must_make_path(dot_or_empty(cgroup
), cgroup
, file
, NULL
);
559 *value
= readat_file(h
->fd
, path
);
560 return *value
!= NULL
;
563 static int cgfsng_get_memory(struct cgroup_ops
*ops
, const char *cgroup
,
564 const char *file
, char **value
)
566 __do_free
char *path
= NULL
;
570 h
= ops
->get_hierarchy(ops
, "memory");
574 if (!is_unified_hierarchy(h
)) {
575 if (strcmp(file
, "memory.max") == 0)
576 file
= "memory.limit_in_bytes";
577 else if (strcmp(file
, "memory.swap.max") == 0)
578 file
= "memory.memsw.limit_in_bytes";
579 else if (strcmp(file
, "memory.swap.current") == 0)
580 file
= "memory.memsw.usage_in_bytes";
581 else if (strcmp(file
, "memory.current") == 0)
582 file
= "memory.usage_in_bytes";
583 ret
= CGROUP_SUPER_MAGIC
;
585 ret
= CGROUP2_SUPER_MAGIC
;
588 path
= must_make_path(dot_or_empty(cgroup
), cgroup
, file
, NULL
);
589 *value
= readat_file(h
->fd
, path
);
596 static int cgfsng_get_memory_stats_fd(struct cgroup_ops
*ops
, const char *cgroup
)
598 __do_free
char *path
= NULL
;
601 h
= ops
->get_hierarchy(ops
, "memory");
605 path
= must_make_path(dot_or_empty(cgroup
), cgroup
, "memory.stat", NULL
);
606 return openat(h
->fd
, path
, O_RDONLY
| O_CLOEXEC
| O_NOFOLLOW
);
609 static int cgfsng_get_memory_current(struct cgroup_ops
*ops
, const char *cgroup
,
612 return cgfsng_get_memory(ops
, cgroup
, "memory.current", value
);
615 static int cgfsng_get_memory_swap_current(struct cgroup_ops
*ops
,
616 const char *cgroup
, char **value
)
618 return cgfsng_get_memory(ops
, cgroup
, "memory.swap.current", value
);
621 static int cgfsng_get_memory_max(struct cgroup_ops
*ops
, const char *cgroup
,
624 return cgfsng_get_memory(ops
, cgroup
, "memory.max", value
);
627 static int cgfsng_get_memory_swap_max(struct cgroup_ops
*ops
,
628 const char *cgroup
, char **value
)
630 return cgfsng_get_memory(ops
, cgroup
, "memory.swap.max", value
);
633 static int cgfsng_get_memory_stats(struct cgroup_ops
*ops
, const char *cgroup
,
636 return cgfsng_get_memory(ops
, cgroup
, "memory.stat", value
);
639 static char *readat_cpuset(int cgroup_fd
)
641 __do_free
char *val
= NULL
;
643 val
= readat_file(cgroup_fd
, "cpuset.cpus");
644 if (val
&& strcmp(val
, "") != 0)
645 return move_ptr(val
);
648 val
= readat_file(cgroup_fd
, "cpuset.cpus.effective");
649 if (val
&& strcmp(val
, "") != 0)
650 return move_ptr(val
);
655 static int cgfsng_get_cpuset_cpus(struct cgroup_ops
*ops
, const char *cgroup
,
658 __do_close_prot_errno
int cgroup_fd
= -EBADF
;
659 __do_free
char *path
= NULL
;
664 h
= ops
->get_hierarchy(ops
, "cpuset");
668 if (!is_unified_hierarchy(h
))
669 ret
= CGROUP_SUPER_MAGIC
;
671 ret
= CGROUP2_SUPER_MAGIC
;
674 path
= must_make_path(dot_or_empty(cgroup
), cgroup
, NULL
);
675 cgroup_fd
= openat_safe(h
->fd
, path
);
679 v
= readat_cpuset(cgroup_fd
);
686 * cpuset.cpus and cpuset.cpus.effective are empty so we need to look
687 * the nearest ancestor with a non-empty cpuset.cpus{.effective} file.
692 fd
= openat_safe(cgroup_fd
, "../");
693 if (fd
< 0 || !is_cgroup_fd(fd
)) {
694 fprintf(stderr
, "2222: %s\n", strerror(errno
));
698 close_prot_errno_replace(cgroup_fd
, fd
);
700 v
= readat_cpuset(fd
);
710 static int cgfsng_get_io(struct cgroup_ops
*ops
, const char *cgroup
,
711 const char *file
, char **value
)
713 __do_free
char *path
= NULL
;
717 h
= ops
->get_hierarchy(ops
, "blkio");
721 if (!is_unified_hierarchy(h
))
722 ret
= CGROUP_SUPER_MAGIC
;
724 ret
= CGROUP2_SUPER_MAGIC
;
726 path
= must_make_path(dot_or_empty(cgroup
), cgroup
, file
, NULL
);
727 *value
= readat_file(h
->fd
, path
);
731 return ret_errno(errno
);
737 static int cgfsng_get_io_service_bytes(struct cgroup_ops
*ops
,
738 const char *cgroup
, char **value
)
740 return cgfsng_get_io(ops
, cgroup
, "blkio.io_service_bytes_recursive", value
);
743 static int cgfsng_get_io_service_time(struct cgroup_ops
*ops
,
744 const char *cgroup
, char **value
)
746 return cgfsng_get_io(ops
, cgroup
, "blkio.io_service_time_recursive", value
);
749 static int cgfsng_get_io_serviced(struct cgroup_ops
*ops
, const char *cgroup
,
752 return cgfsng_get_io(ops
, cgroup
, "blkio.io_serviced_recursive", value
);
755 static int cgfsng_get_io_merged(struct cgroup_ops
*ops
, const char *cgroup
,
758 return cgfsng_get_io(ops
, cgroup
, "blkio.io_merged_recursive", value
);
761 static int cgfsng_get_io_wait_time(struct cgroup_ops
*ops
, const char *cgroup
,
764 return cgfsng_get_io(ops
, cgroup
, "blkio.io_wait_time_recursive", value
);
767 static bool cgfsng_can_use_cpuview(struct cgroup_ops
*ops
)
769 struct hierarchy
*cpu
, *cpuacct
;
771 if (pure_unified_layout(ops
))
774 cpu
= ops
->get_hierarchy(ops
, "cpu");
775 if (!cpu
|| is_unified_hierarchy(cpu
))
778 cpuacct
= ops
->get_hierarchy(ops
, "cpuacct");
779 if (!cpuacct
|| is_unified_hierarchy(cpuacct
))
785 /* At startup, parse_hierarchies finds all the info we need about cgroup
786 * mountpoints and current cgroups, and stores it in @d.
788 static int cg_hybrid_init(struct cgroup_ops
*ops
)
790 __do_free
char *basecginfo
= NULL
;
791 __do_free
char *line
= NULL
;
792 __do_fclose
FILE *f
= NULL
;
795 char **klist
= NULL
, **nlist
= NULL
;
797 /* Root spawned containers escape the current cgroup, so use init's
798 * cgroups as our base in that case.
800 basecginfo
= read_file("/proc/1/cgroup");
802 return ret_set_errno(-1, ENOMEM
);
804 ret
= get_existing_subsystems(&klist
, &nlist
);
806 return log_error_errno(-1, errno
, "Failed to retrieve available legacy cgroup controllers");
808 f
= fopen("/proc/self/mountinfo", "re");
810 return log_error_errno(-1, errno
, "Failed to open \"/proc/self/mountinfo\"");
812 while (getline(&line
, &len
, f
) != -1) {
814 struct hierarchy
*new;
815 char *base_cgroup
= NULL
, *mountpoint
= NULL
;
816 char **controller_list
= NULL
;
817 __do_free
char *controllers
= NULL
;
819 type
= get_cgroup_version(line
);
823 if (type
== CGROUP2_SUPER_MAGIC
&& ops
->unified
)
826 if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNKNOWN
) {
827 if (type
== CGROUP2_SUPER_MAGIC
)
828 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
829 else if (type
== CGROUP_SUPER_MAGIC
)
830 ops
->cgroup_layout
= CGROUP_LAYOUT_LEGACY
;
831 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_UNIFIED
) {
832 if (type
== CGROUP_SUPER_MAGIC
)
833 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
834 } else if (ops
->cgroup_layout
== CGROUP_LAYOUT_LEGACY
) {
835 if (type
== CGROUP2_SUPER_MAGIC
)
836 ops
->cgroup_layout
= CGROUP_LAYOUT_HYBRID
;
839 controller_list
= cg_hybrid_get_controllers(klist
, nlist
, line
,
841 if (!controller_list
&& type
== CGROUP_SUPER_MAGIC
)
844 if (type
== CGROUP_SUPER_MAGIC
)
845 if (controller_list_is_dup(ops
->hierarchies
, controller_list
))
846 ret_set_errno(goto next
, EEXIST
);
848 mountpoint
= cg_hybrid_get_mountpoint(line
);
850 log_error_errno(goto next
, EINVAL
, "Failed parsing mountpoint from \"%s\"", line
);
852 if (type
== CGROUP_SUPER_MAGIC
)
853 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, controller_list
[0], CGROUP_SUPER_MAGIC
);
855 base_cgroup
= cg_hybrid_get_current_cgroup(basecginfo
, NULL
, CGROUP2_SUPER_MAGIC
);
857 log_error_errno(goto next
, EINVAL
, "Failed to find current cgroup %s", mountpoint
);
860 prune_init_scope(base_cgroup
);
862 if (type
== CGROUP2_SUPER_MAGIC
) {
863 char *cgv2_ctrl_path
;
865 cgv2_ctrl_path
= must_make_path(mountpoint
, base_cgroup
,
866 "cgroup.controllers",
869 controller_list
= cg_unified_get_controllers(cgv2_ctrl_path
);
870 free(cgv2_ctrl_path
);
871 if (!controller_list
)
872 controller_list
= cg_unified_make_empty_controller();
875 new = add_hierarchy(&ops
->hierarchies
, controller_list
, mountpoint
, base_cgroup
, type
);
876 new->__controllers
= move_ptr(controllers
);
877 if (type
== CGROUP2_SUPER_MAGIC
&& !ops
->unified
)
883 free_string_list(controller_list
);
888 free_string_list(klist
);
889 free_string_list(nlist
);
894 static int cg_unified_init(struct cgroup_ops
*ops
)
896 __do_free
char *subtree_path
= NULL
;
900 struct hierarchy
*new;
901 char *base_cgroup
= NULL
;
903 ret
= unified_cgroup_hierarchy();
904 if (ret
== -ENOMEDIUM
)
905 return ret_errno(ENOMEDIUM
);
907 if (ret
!= CGROUP2_SUPER_MAGIC
)
910 base_cgroup
= cg_unified_get_current_cgroup(1);
912 return ret_errno(EINVAL
);
913 prune_init_scope(base_cgroup
);
916 * We assume that the cgroup we're currently in has been delegated to
917 * us and we are free to further delege all of the controllers listed
918 * in cgroup.controllers further down the hierarchy.
920 mountpoint
= must_copy_string(DEFAULT_CGROUP_MOUNTPOINT
);
921 subtree_path
= must_make_path(mountpoint
, base_cgroup
, "cgroup.controllers", NULL
);
922 delegatable
= cg_unified_get_controllers(subtree_path
);
924 delegatable
= cg_unified_make_empty_controller();
926 /* TODO: If the user requested specific controllers via lxc.cgroup.use
927 * we should verify here. The reason I'm not doing it right is that I'm
928 * not convinced that lxc.cgroup.use will be the future since it is a
929 * global property. I much rather have an option that lets you request
930 * controllers per container.
933 new = add_hierarchy(&ops
->hierarchies
, delegatable
, mountpoint
, base_cgroup
, CGROUP2_SUPER_MAGIC
);
935 if (bpf_devices_cgroup_supported())
936 new->bpf_device_controller
= 1;
938 ops
->cgroup_layout
= CGROUP_LAYOUT_UNIFIED
;
940 return CGROUP2_SUPER_MAGIC
;
943 static int cg_init(struct cgroup_ops
*ops
)
947 ret
= cg_unified_init(ops
);
951 if (ret
== CGROUP2_SUPER_MAGIC
)
954 return cg_hybrid_init(ops
);
957 struct cgroup_ops
*cgfsng_ops_init(void)
959 __do_free
struct cgroup_ops
*cgfsng_ops
= NULL
;
961 cgfsng_ops
= malloc(sizeof(struct cgroup_ops
));
963 return ret_set_errno(NULL
, ENOMEM
);
965 memset(cgfsng_ops
, 0, sizeof(struct cgroup_ops
));
966 cgfsng_ops
->cgroup_layout
= CGROUP_LAYOUT_UNKNOWN
;
968 if (cg_init(cgfsng_ops
))
971 cgfsng_ops
->num_hierarchies
= cgfsng_num_hierarchies
;
972 cgfsng_ops
->get
= cgfsng_get
;
973 cgfsng_ops
->get_hierarchies
= cgfsng_get_hierarchies
;
974 cgfsng_ops
->get_hierarchy
= get_hierarchy
;
975 cgfsng_ops
->driver
= "cgfsng";
976 cgfsng_ops
->version
= "1.0.0";
977 cgfsng_ops
->mount
= cgfsng_mount
;
980 cgfsng_ops
->get_memory_stats_fd
= cgfsng_get_memory_stats_fd
;
981 cgfsng_ops
->get_memory_stats
= cgfsng_get_memory_stats
;
982 cgfsng_ops
->get_memory_max
= cgfsng_get_memory_max
;
983 cgfsng_ops
->get_memory_swap_max
= cgfsng_get_memory_swap_max
;
984 cgfsng_ops
->get_memory_current
= cgfsng_get_memory_current
;
985 cgfsng_ops
->get_memory_swap_current
= cgfsng_get_memory_swap_current
;
988 cgfsng_ops
->get_cpuset_cpus
= cgfsng_get_cpuset_cpus
;
989 cgfsng_ops
->can_use_cpuview
= cgfsng_can_use_cpuview
;
992 cgfsng_ops
->get_io_service_bytes
= cgfsng_get_io_service_bytes
;
993 cgfsng_ops
->get_io_service_time
= cgfsng_get_io_service_time
;
994 cgfsng_ops
->get_io_serviced
= cgfsng_get_io_serviced
;
995 cgfsng_ops
->get_io_merged
= cgfsng_get_io_merged
;
996 cgfsng_ops
->get_io_wait_time
= cgfsng_get_io_wait_time
;
999 return move_ptr(cgfsng_ops
);