]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
cgroups: premount cgroups on cgroup2-only systems
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 /*
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
8 * each controller.
9 *
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
13 */
14
15 #ifndef _GNU_SOURCE
16 #define _GNU_SOURCE 1
17 #endif
18 #include <ctype.h>
19 #include <dirent.h>
20 #include <errno.h>
21 #include <grp.h>
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
24 #include <poll.h>
25 #include <signal.h>
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/types.h>
31 #include <unistd.h>
32
33 #include "af_unix.h"
34 #include "caps.h"
35 #include "cgroup.h"
36 #include "cgroup2_devices.h"
37 #include "cgroup_utils.h"
38 #include "commands.h"
39 #include "conf.h"
40 #include "config.h"
41 #include "log.h"
42 #include "macro.h"
43 #include "mainloop.h"
44 #include "memory_utils.h"
45 #include "storage/storage.h"
46 #include "utils.h"
47
48 #ifndef HAVE_STRLCPY
49 #include "include/strlcpy.h"
50 #endif
51
52 #ifndef HAVE_STRLCAT
53 #include "include/strlcat.h"
54 #endif
55
56 lxc_log_define(cgfsng, cgroup);
57
58 /* Given a pointer to a null-terminated array of pointers, realloc to add one
59 * entry, and point the new entry to NULL. Do not fail. Return the index to the
60 * second-to-last entry - that is, the one which is now available for use
61 * (keeping the list null-terminated).
62 */
63 static int append_null_to_list(void ***list)
64 {
65 int newentry = 0;
66
67 if (*list)
68 for (; (*list)[newentry]; newentry++)
69 ;
70
71 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
72 (*list)[newentry + 1] = NULL;
73 return newentry;
74 }
75
76 /* Given a null-terminated array of strings, check whether @entry is one of the
77 * strings.
78 */
79 static bool string_in_list(char **list, const char *entry)
80 {
81 if (!list)
82 return false;
83
84 for (int i = 0; list[i]; i++)
85 if (strcmp(list[i], entry) == 0)
86 return true;
87
88 return false;
89 }
90
91 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
92 * "name=systemd". Do not fail.
93 */
94 static char *cg_legacy_must_prefix_named(char *entry)
95 {
96 size_t len;
97 char *prefixed;
98
99 len = strlen(entry);
100 prefixed = must_realloc(NULL, len + 6);
101
102 memcpy(prefixed, "name=", STRLITERALLEN("name="));
103 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
104 prefixed[len + 5] = '\0';
105
106 return prefixed;
107 }
108
109 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
110 * we are called.
111 *
112 * We also handle named subsystems here. Any controller which is not a kernel
113 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
114 * we refuse to use because we're not sure which we have here.
115 * (TODO: We could work around this in some cases by just remounting to be
116 * unambiguous, or by comparing mountpoint contents with current cgroup.)
117 *
118 * The last entry will always be NULL.
119 */
120 static void must_append_controller(char **klist, char **nlist, char ***clist,
121 char *entry)
122 {
123 int newentry;
124 char *copy;
125
126 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
127 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
128 ERROR("It is both a named and kernel subsystem");
129 return;
130 }
131
132 newentry = append_null_to_list((void ***)clist);
133
134 if (strncmp(entry, "name=", 5) == 0)
135 copy = must_copy_string(entry);
136 else if (string_in_list(klist, entry))
137 copy = must_copy_string(entry);
138 else
139 copy = cg_legacy_must_prefix_named(entry);
140
141 (*clist)[newentry] = copy;
142 }
143
144 /* Given a handler's cgroup data, return the struct hierarchy for the controller
145 * @c, or NULL if there is none.
146 */
147 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
148 {
149 if (!ops->hierarchies)
150 return log_trace_errno(NULL, errno, "There are no useable cgroup controllers");
151
152 for (int i = 0; ops->hierarchies[i]; i++) {
153 if (!controller) {
154 /* This is the empty unified hierarchy. */
155 if (ops->hierarchies[i]->controllers &&
156 !ops->hierarchies[i]->controllers[0])
157 return ops->hierarchies[i];
158 continue;
159 } else if (pure_unified_layout(ops) &&
160 strcmp(controller, "devices") == 0) {
161 if (ops->unified->bpf_device_controller)
162 return ops->unified;
163 break;
164 }
165
166 if (string_in_list(ops->hierarchies[i]->controllers, controller))
167 return ops->hierarchies[i];
168 }
169
170 if (controller)
171 WARN("There is no useable %s controller", controller);
172 else
173 WARN("There is no empty unified cgroup hierarchy");
174
175 return ret_set_errno(NULL, ENOENT);
176 }
177
178 #define BATCH_SIZE 50
179 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
180 {
181 int newbatches = (newlen / BATCH_SIZE) + 1;
182 int oldbatches = (oldlen / BATCH_SIZE) + 1;
183
184 if (!*mem || newbatches > oldbatches)
185 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
186 }
187
188 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
189 {
190 size_t full = oldlen + newlen;
191
192 batch_realloc(dest, oldlen, full + 1);
193
194 memcpy(*dest + oldlen, new, newlen + 1);
195 }
196
197 /* Slurp in a whole file */
198 static char *read_file(const char *fnam)
199 {
200 __do_free char *buf = NULL, *line = NULL;
201 __do_fclose FILE *f = NULL;
202 size_t len = 0, fulllen = 0;
203 int linelen;
204
205 f = fopen(fnam, "re");
206 if (!f)
207 return NULL;
208
209 while ((linelen = getline(&line, &len, f)) != -1) {
210 append_line(&buf, fulllen, line, linelen);
211 fulllen += linelen;
212 }
213
214 return move_ptr(buf);
215 }
216
217 /* Taken over modified from the kernel sources. */
218 #define NBITS 32 /* bits in uint32_t */
219 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
220 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
221
222 static void set_bit(unsigned bit, uint32_t *bitarr)
223 {
224 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
225 }
226
227 static void clear_bit(unsigned bit, uint32_t *bitarr)
228 {
229 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
230 }
231
232 static bool is_set(unsigned bit, uint32_t *bitarr)
233 {
234 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
235 }
236
237 /* Create cpumask from cpulist aka turn:
238 *
239 * 0,2-3
240 *
241 * into bit array
242 *
243 * 1 0 1 1
244 */
245 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
246 {
247 __do_free uint32_t *bitarr = NULL;
248 char *token;
249 size_t arrlen;
250
251 arrlen = BITS_TO_LONGS(nbits);
252 bitarr = calloc(arrlen, sizeof(uint32_t));
253 if (!bitarr)
254 return ret_set_errno(NULL, ENOMEM);
255
256 lxc_iterate_parts(token, buf, ",") {
257 errno = 0;
258 unsigned end, start;
259 char *range;
260
261 start = strtoul(token, NULL, 0);
262 end = start;
263 range = strchr(token, '-');
264 if (range)
265 end = strtoul(range + 1, NULL, 0);
266
267 if (!(start <= end))
268 return ret_set_errno(NULL, EINVAL);
269
270 if (end >= nbits)
271 return ret_set_errno(NULL, EINVAL);
272
273 while (start <= end)
274 set_bit(start++, bitarr);
275 }
276
277 return move_ptr(bitarr);
278 }
279
280 /* Turn cpumask into simple, comma-separated cpulist. */
281 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
282 {
283 __do_free_string_list char **cpulist = NULL;
284 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
285 int ret;
286
287 for (size_t i = 0; i <= nbits; i++) {
288 if (!is_set(i, bitarr))
289 continue;
290
291 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
292 if (ret < 0 || (size_t)ret >= sizeof(numstr))
293 return NULL;
294
295 ret = lxc_append_string(&cpulist, numstr);
296 if (ret < 0)
297 return ret_set_errno(NULL, ENOMEM);
298 }
299
300 if (!cpulist)
301 return ret_set_errno(NULL, ENOMEM);
302
303 return lxc_string_join(",", (const char **)cpulist, false);
304 }
305
306 static ssize_t get_max_cpus(char *cpulist)
307 {
308 char *c1, *c2;
309 char *maxcpus = cpulist;
310 size_t cpus = 0;
311
312 c1 = strrchr(maxcpus, ',');
313 if (c1)
314 c1++;
315
316 c2 = strrchr(maxcpus, '-');
317 if (c2)
318 c2++;
319
320 if (!c1 && !c2)
321 c1 = maxcpus;
322 else if (c1 > c2)
323 c2 = c1;
324 else if (c1 < c2)
325 c1 = c2;
326 else if (!c1 && c2)
327 c1 = c2;
328
329 errno = 0;
330 cpus = strtoul(c1, NULL, 0);
331 if (errno != 0)
332 return -1;
333
334 return cpus;
335 }
336
337 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
338 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
339 static bool cg_legacy_filter_and_set_cpus(const char *parent_cgroup,
340 char *child_cgroup, bool am_initialized)
341 {
342 __do_free char *cpulist = NULL, *fpath = NULL, *isolcpus = NULL,
343 *offlinecpus = NULL, *posscpus = NULL;
344 __do_free uint32_t *isolmask = NULL, *offlinemask = NULL,
345 *possmask = NULL;
346 int ret;
347 ssize_t i;
348 ssize_t maxisol = 0, maxoffline = 0, maxposs = 0;
349 bool flipped_bit = false;
350
351 fpath = must_make_path(parent_cgroup, "cpuset.cpus", NULL);
352 posscpus = read_file(fpath);
353 if (!posscpus)
354 return log_error_errno(false, errno, "Failed to read file \"%s\"", fpath);
355
356 /* Get maximum number of cpus found in possible cpuset. */
357 maxposs = get_max_cpus(posscpus);
358 if (maxposs < 0 || maxposs >= INT_MAX - 1)
359 return false;
360
361 if (file_exists(__ISOL_CPUS)) {
362 isolcpus = read_file(__ISOL_CPUS);
363 if (!isolcpus)
364 return log_error_errno(false, errno, "Failed to read file \"%s\"", __ISOL_CPUS);
365
366 if (isdigit(isolcpus[0])) {
367 /* Get maximum number of cpus found in isolated cpuset. */
368 maxisol = get_max_cpus(isolcpus);
369 if (maxisol < 0 || maxisol >= INT_MAX - 1)
370 return false;
371 }
372
373 if (maxposs < maxisol)
374 maxposs = maxisol;
375 maxposs++;
376 } else {
377 TRACE("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
378 }
379
380 if (file_exists(__OFFLINE_CPUS)) {
381 offlinecpus = read_file(__OFFLINE_CPUS);
382 if (!offlinecpus)
383 return log_error_errno(false, errno, "Failed to read file \"%s\"", __OFFLINE_CPUS);
384
385 if (isdigit(offlinecpus[0])) {
386 /* Get maximum number of cpus found in offline cpuset. */
387 maxoffline = get_max_cpus(offlinecpus);
388 if (maxoffline < 0 || maxoffline >= INT_MAX - 1)
389 return false;
390 }
391
392 if (maxposs < maxoffline)
393 maxposs = maxoffline;
394 maxposs++;
395 } else {
396 TRACE("The path \""__OFFLINE_CPUS"\" to read offline cpus from does not exist");
397 }
398
399 if ((maxisol == 0) && (maxoffline == 0)) {
400 cpulist = move_ptr(posscpus);
401 goto copy_parent;
402 }
403
404 possmask = lxc_cpumask(posscpus, maxposs);
405 if (!possmask)
406 return log_error_errno(false, errno, "Failed to create cpumask for possible cpus");
407
408 if (maxisol > 0) {
409 isolmask = lxc_cpumask(isolcpus, maxposs);
410 if (!isolmask)
411 return log_error_errno(false, errno, "Failed to create cpumask for isolated cpus");
412 }
413
414 if (maxoffline > 0) {
415 offlinemask = lxc_cpumask(offlinecpus, maxposs);
416 if (!offlinemask)
417 return log_error_errno(false, errno, "Failed to create cpumask for offline cpus");
418 }
419
420 for (i = 0; i <= maxposs; i++) {
421 if ((isolmask && !is_set(i, isolmask)) ||
422 (offlinemask && !is_set(i, offlinemask)) ||
423 !is_set(i, possmask))
424 continue;
425
426 flipped_bit = true;
427 clear_bit(i, possmask);
428 }
429
430 if (!flipped_bit) {
431 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
432 TRACE("No isolated or offline cpus present in cpuset");
433 } else {
434 cpulist = move_ptr(posscpus);
435 TRACE("Removed isolated or offline cpus from cpuset");
436 }
437 if (!cpulist)
438 return log_error_errno(false, errno, "Failed to create cpu list");
439
440 copy_parent:
441 if (!am_initialized) {
442 ret = lxc_write_openat(child_cgroup, "cpuset.cpus", cpulist, strlen(cpulist));
443 if (ret < 0)
444 return log_error_errno(false,
445 errno, "Failed to write cpu list to \"%s/cpuset.cpus\"",
446 child_cgroup);
447
448 TRACE("Copied cpu settings of parent cgroup");
449 }
450
451 return true;
452 }
453
454 /* Copy contents of parent(@path)/@file to @path/@file */
455 static bool copy_parent_file(const char *parent_cgroup,
456 const char *child_cgroup, const char *file)
457 {
458 __do_free char *parent_file = NULL, *value = NULL;
459 int len = 0;
460 int ret;
461
462 parent_file = must_make_path(parent_cgroup, file, NULL);
463 len = lxc_read_from_file(parent_file, NULL, 0);
464 if (len <= 0)
465 return log_error_errno(false, errno, "Failed to determine buffer size");
466
467 value = must_realloc(NULL, len + 1);
468 value[len] = '\0';
469 ret = lxc_read_from_file(parent_file, value, len);
470 if (ret != len)
471 return log_error_errno(false, errno, "Failed to read from parent file \"%s\"", parent_file);
472
473 ret = lxc_write_openat(child_cgroup, file, value, len);
474 if (ret < 0 && errno != EACCES)
475 return log_error_errno(false, errno, "Failed to write \"%s\" to file \"%s/%s\"",
476 value, child_cgroup, file);
477 return true;
478 }
479
480 static inline bool is_unified_hierarchy(const struct hierarchy *h)
481 {
482 return h->version == CGROUP2_SUPER_MAGIC;
483 }
484
485 /*
486 * Initialize the cpuset hierarchy in first directory of @cgroup_leaf and set
487 * cgroup.clone_children so that children inherit settings. Since the
488 * h->base_path is populated by init or ourselves, we know it is already
489 * initialized.
490 *
491 * returns -1 on error, 0 when we didn't created a cgroup, 1 if we created a
492 * cgroup.
493 */
494 static int cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h,
495 const char *cgroup_leaf)
496 {
497 __do_free char *parent_cgroup = NULL, *child_cgroup = NULL, *dup = NULL;
498 __do_close int cgroup_fd = -EBADF;
499 int fret = -1;
500 int ret;
501 char v;
502 char *leaf, *slash;
503
504 if (is_unified_hierarchy(h))
505 return 0;
506
507 if (!string_in_list(h->controllers, "cpuset"))
508 return 0;
509
510 if (!cgroup_leaf)
511 return ret_set_errno(-1, EINVAL);
512
513 dup = strdup(cgroup_leaf);
514 if (!dup)
515 return ret_set_errno(-1, ENOMEM);
516
517 parent_cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
518
519 leaf = dup;
520 leaf += strspn(leaf, "/");
521 slash = strchr(leaf, '/');
522 if (slash)
523 *slash = '\0';
524 child_cgroup = must_make_path(parent_cgroup, leaf, NULL);
525 if (slash)
526 *slash = '/';
527
528 fret = 1;
529 ret = mkdir(child_cgroup, 0755);
530 if (ret < 0) {
531 if (errno != EEXIST)
532 return log_error_errno(-1, errno, "Failed to create directory \"%s\"", child_cgroup);
533
534 fret = 0;
535 }
536
537 cgroup_fd = lxc_open_dirfd(child_cgroup);
538 if (cgroup_fd < 0)
539 return -1;
540
541 ret = lxc_readat(cgroup_fd, "cgroup.clone_children", &v, 1);
542 if (ret < 0)
543 return log_error_errno(-1, errno, "Failed to read file \"%s/cgroup.clone_children\"", child_cgroup);
544
545 /* Make sure any isolated cpus are removed from cpuset.cpus. */
546 if (!cg_legacy_filter_and_set_cpus(parent_cgroup, child_cgroup, v == '1'))
547 return log_error_errno(-1, errno, "Failed to remove isolated cpus");
548
549 /* Already set for us by someone else. */
550 if (v == '1')
551 TRACE("\"cgroup.clone_children\" was already set to \"1\"");
552
553 /* copy parent's settings */
554 if (!copy_parent_file(parent_cgroup, child_cgroup, "cpuset.mems"))
555 return log_error_errno(-1, errno, "Failed to copy \"cpuset.mems\" settings");
556
557 /* Set clone_children so children inherit our settings */
558 ret = lxc_writeat(cgroup_fd, "cgroup.clone_children", "1", 1);
559 if (ret < 0)
560 return log_error_errno(-1, errno, "Failed to write 1 to \"%s/cgroup.clone_children\"", child_cgroup);
561
562 return fret;
563 }
564
565 /* Given two null-terminated lists of strings, return true if any string is in
566 * both.
567 */
568 static bool controller_lists_intersect(char **l1, char **l2)
569 {
570 if (!l1 || !l2)
571 return false;
572
573 for (int i = 0; l1[i]; i++)
574 if (string_in_list(l2, l1[i]))
575 return true;
576
577 return false;
578 }
579
580 /* For a null-terminated list of controllers @clist, return true if any of those
581 * controllers is already listed the null-terminated list of hierarchies @hlist.
582 * Realistically, if one is present, all must be present.
583 */
584 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
585 {
586 if (!hlist)
587 return false;
588
589 for (int i = 0; hlist[i]; i++)
590 if (controller_lists_intersect(hlist[i]->controllers, clist))
591 return true;
592
593 return false;
594 }
595
596 /* Return true if the controller @entry is found in the null-terminated list of
597 * hierarchies @hlist.
598 */
599 static bool controller_found(struct hierarchy **hlist, char *entry)
600 {
601 if (!hlist)
602 return false;
603
604 for (int i = 0; hlist[i]; i++)
605 if (string_in_list(hlist[i]->controllers, entry))
606 return true;
607
608 return false;
609 }
610
611 /* Return true if all of the controllers which we require have been found. The
612 * required list is freezer and anything in lxc.cgroup.use.
613 */
614 static bool all_controllers_found(struct cgroup_ops *ops)
615 {
616 struct hierarchy **hlist;
617
618 if (!ops->cgroup_use)
619 return true;
620
621 hlist = ops->hierarchies;
622 for (char **cur = ops->cgroup_use; cur && *cur; cur++)
623 if (!controller_found(hlist, *cur))
624 return log_error(false, "No %s controller mountpoint found", *cur);
625
626 return true;
627 }
628
629 /* Get the controllers from a mountinfo line There are other ways we could get
630 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
631 * could parse the mount options. But we simply assume that the mountpoint must
632 * be /sys/fs/cgroup/controller-list
633 */
634 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
635 int type)
636 {
637 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
638 * for legacy hierarchies.
639 */
640 __do_free_string_list char **aret = NULL;
641 int i;
642 char *p2, *tok;
643 char *p = line, *sep = ",";
644
645 for (i = 0; i < 4; i++) {
646 p = strchr(p, ' ');
647 if (!p)
648 return NULL;
649 p++;
650 }
651
652 /* Note, if we change how mountinfo works, then our caller will need to
653 * verify /sys/fs/cgroup/ in this field.
654 */
655 if (strncmp(p, DEFAULT_CGROUP_MOUNTPOINT "/", 15) != 0)
656 return log_error(NULL, "Found hierarchy not under " DEFAULT_CGROUP_MOUNTPOINT ": \"%s\"", p);
657
658 p += 15;
659 p2 = strchr(p, ' ');
660 if (!p2)
661 return log_error(NULL, "Corrupt mountinfo");
662 *p2 = '\0';
663
664 if (type == CGROUP_SUPER_MAGIC) {
665 __do_free char *dup = NULL;
666
667 /* strdup() here for v1 hierarchies. Otherwise
668 * lxc_iterate_parts() will destroy mountpoints such as
669 * "/sys/fs/cgroup/cpu,cpuacct".
670 */
671 dup = must_copy_string(p);
672 if (!dup)
673 return NULL;
674
675 lxc_iterate_parts(tok, dup, sep)
676 must_append_controller(klist, nlist, &aret, tok);
677 }
678 *p2 = ' ';
679
680 return move_ptr(aret);
681 }
682
683 static char **cg_unified_make_empty_controller(void)
684 {
685 __do_free_string_list char **aret = NULL;
686 int newentry;
687
688 newentry = append_null_to_list((void ***)&aret);
689 aret[newentry] = NULL;
690 return move_ptr(aret);
691 }
692
693 static char **cg_unified_get_controllers(const char *file)
694 {
695 __do_free char *buf = NULL;
696 __do_free_string_list char **aret = NULL;
697 char *sep = " \t\n";
698 char *tok;
699
700 buf = read_file(file);
701 if (!buf)
702 return NULL;
703
704 lxc_iterate_parts(tok, buf, sep) {
705 int newentry;
706 char *copy;
707
708 newentry = append_null_to_list((void ***)&aret);
709 copy = must_copy_string(tok);
710 aret[newentry] = copy;
711 }
712
713 return move_ptr(aret);
714 }
715
716 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
717 char *container_base_path, int type)
718 {
719 struct hierarchy *new;
720 int newentry;
721
722 new = zalloc(sizeof(*new));
723 new->controllers = clist;
724 new->mountpoint = mountpoint;
725 new->container_base_path = container_base_path;
726 new->version = type;
727 new->cgfd_con = -EBADF;
728 new->cgfd_limit = -EBADF;
729 new->cgfd_mon = -EBADF;
730
731 newentry = append_null_to_list((void ***)h);
732 (*h)[newentry] = new;
733 return new;
734 }
735
736 /* Get a copy of the mountpoint from @line, which is a line from
737 * /proc/self/mountinfo.
738 */
739 static char *cg_hybrid_get_mountpoint(char *line)
740 {
741 char *p = line, *sret = NULL;
742 size_t len;
743 char *p2;
744
745 for (int i = 0; i < 4; i++) {
746 p = strchr(p, ' ');
747 if (!p)
748 return NULL;
749 p++;
750 }
751
752 if (strncmp(p, DEFAULT_CGROUP_MOUNTPOINT "/", 15) != 0)
753 return NULL;
754
755 p2 = strchr(p + 15, ' ');
756 if (!p2)
757 return NULL;
758 *p2 = '\0';
759
760 len = strlen(p);
761 sret = must_realloc(NULL, len + 1);
762 memcpy(sret, p, len);
763 sret[len] = '\0';
764
765 return sret;
766 }
767
768 /* Given a multi-line string, return a null-terminated copy of the current line. */
769 static char *copy_to_eol(char *p)
770 {
771 char *p2, *sret;
772 size_t len;
773
774 p2 = strchr(p, '\n');
775 if (!p2)
776 return NULL;
777
778 len = p2 - p;
779 sret = must_realloc(NULL, len + 1);
780 memcpy(sret, p, len);
781 sret[len] = '\0';
782
783 return sret;
784 }
785
786 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
787 * /proc/self/cgroup file. Check whether controller c is present.
788 */
789 static bool controller_in_clist(char *cgline, char *c)
790 {
791 __do_free char *tmp = NULL;
792 char *tok, *eol;
793 size_t len;
794
795 eol = strchr(cgline, ':');
796 if (!eol)
797 return false;
798
799 len = eol - cgline;
800 tmp = must_realloc(NULL, len + 1);
801 memcpy(tmp, cgline, len);
802 tmp[len] = '\0';
803
804 lxc_iterate_parts(tok, tmp, ",")
805 if (strcmp(tok, c) == 0)
806 return true;
807
808 return false;
809 }
810
811 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
812 * @controller.
813 */
814 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
815 int type)
816 {
817 char *p = basecginfo;
818
819 for (;;) {
820 bool is_cgv2_base_cgroup = false;
821
822 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
823 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
824 is_cgv2_base_cgroup = true;
825
826 p = strchr(p, ':');
827 if (!p)
828 return NULL;
829 p++;
830
831 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
832 p = strchr(p, ':');
833 if (!p)
834 return NULL;
835 p++;
836 return copy_to_eol(p);
837 }
838
839 p = strchr(p, '\n');
840 if (!p)
841 return NULL;
842 p++;
843 }
844 }
845
846 static void must_append_string(char ***list, char *entry)
847 {
848 int newentry;
849 char *copy;
850
851 newentry = append_null_to_list((void ***)list);
852 copy = must_copy_string(entry);
853 (*list)[newentry] = copy;
854 }
855
856 static int get_existing_subsystems(char ***klist, char ***nlist)
857 {
858 __do_free char *line = NULL;
859 __do_fclose FILE *f = NULL;
860 size_t len = 0;
861
862 f = fopen("/proc/self/cgroup", "re");
863 if (!f)
864 return -1;
865
866 while (getline(&line, &len, f) != -1) {
867 char *p, *p2, *tok;
868 p = strchr(line, ':');
869 if (!p)
870 continue;
871 p++;
872 p2 = strchr(p, ':');
873 if (!p2)
874 continue;
875 *p2 = '\0';
876
877 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
878 * contains an entry of the form:
879 *
880 * 0::/some/path
881 *
882 * In this case we use "cgroup2" as controller name.
883 */
884 if ((p2 - p) == 0) {
885 must_append_string(klist, "cgroup2");
886 continue;
887 }
888
889 lxc_iterate_parts(tok, p, ",") {
890 if (strncmp(tok, "name=", 5) == 0)
891 must_append_string(nlist, tok);
892 else
893 must_append_string(klist, tok);
894 }
895 }
896
897 return 0;
898 }
899
900 static char *trim(char *s)
901 {
902 size_t len;
903
904 len = strlen(s);
905 while ((len > 1) && (s[len - 1] == '\n'))
906 s[--len] = '\0';
907
908 return s;
909 }
910
911 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
912 {
913 int i;
914 struct hierarchy **it;
915
916 if (!ops->hierarchies) {
917 TRACE(" No hierarchies found");
918 return;
919 }
920
921 TRACE(" Hierarchies:");
922 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
923 int j;
924 char **cit;
925
926 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
927 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
928 TRACE(" controllers:");
929 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
930 TRACE(" %d: %s", j, *cit);
931 }
932 }
933
934 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
935 char **nlist)
936 {
937 int k;
938 char **it;
939
940 TRACE("basecginfo is:");
941 TRACE("%s", basecginfo);
942
943 for (k = 0, it = klist; it && *it; it++, k++)
944 TRACE("kernel subsystem %d: %s", k, *it);
945
946 for (k = 0, it = nlist; it && *it; it++, k++)
947 TRACE("named subsystem %d: %s", k, *it);
948 }
949
950 static int cgroup_tree_remove(struct hierarchy **hierarchies,
951 const char *container_cgroup)
952 {
953 if (!container_cgroup || !hierarchies)
954 return 0;
955
956 for (int i = 0; hierarchies[i]; i++) {
957 struct hierarchy *h = hierarchies[i];
958 int ret;
959
960 if (!h->container_limit_path)
961 continue;
962
963 ret = lxc_rm_rf(h->container_limit_path);
964 if (ret < 0)
965 WARN("Failed to destroy \"%s\"", h->container_limit_path);
966
967 if (h->container_limit_path != h->container_full_path)
968 free_disarm(h->container_limit_path);
969 free_disarm(h->container_full_path);
970 }
971
972 return 0;
973 }
974
975 struct generic_userns_exec_data {
976 struct hierarchy **hierarchies;
977 const char *container_cgroup;
978 struct lxc_conf *conf;
979 uid_t origuid; /* target uid in parent namespace */
980 char *path;
981 };
982
983 static int cgroup_tree_remove_wrapper(void *data)
984 {
985 struct generic_userns_exec_data *arg = data;
986 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
987 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
988 int ret;
989
990 if (!lxc_setgroups(0, NULL) && errno != EPERM)
991 return log_error_errno(-1, errno, "Failed to setgroups(0, NULL)");
992
993 ret = setresgid(nsgid, nsgid, nsgid);
994 if (ret < 0)
995 return log_error_errno(-1, errno, "Failed to setresgid(%d, %d, %d)",
996 (int)nsgid, (int)nsgid, (int)nsgid);
997
998 ret = setresuid(nsuid, nsuid, nsuid);
999 if (ret < 0)
1000 return log_error_errno(-1, errno, "Failed to setresuid(%d, %d, %d)",
1001 (int)nsuid, (int)nsuid, (int)nsuid);
1002
1003 return cgroup_tree_remove(arg->hierarchies, arg->container_cgroup);
1004 }
1005
1006 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1007 struct lxc_handler *handler)
1008 {
1009 int ret;
1010
1011 if (!ops) {
1012 ERROR("Called with uninitialized cgroup operations");
1013 return;
1014 }
1015
1016 if (!ops->hierarchies)
1017 return;
1018
1019 if (!handler) {
1020 ERROR("Called with uninitialized handler");
1021 return;
1022 }
1023
1024 if (!handler->conf) {
1025 ERROR("Called with uninitialized conf");
1026 return;
1027 }
1028
1029 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
1030 ret = bpf_program_cgroup_detach(handler->conf->cgroup2_devices);
1031 if (ret < 0)
1032 WARN("Failed to detach bpf program from cgroup");
1033 #endif
1034
1035 if (handler->conf && !lxc_list_empty(&handler->conf->id_map)) {
1036 struct generic_userns_exec_data wrap = {
1037 .conf = handler->conf,
1038 .container_cgroup = ops->container_cgroup,
1039 .hierarchies = ops->hierarchies,
1040 .origuid = 0,
1041 };
1042 ret = userns_exec_1(handler->conf, cgroup_tree_remove_wrapper,
1043 &wrap, "cgroup_tree_remove_wrapper");
1044 } else {
1045 ret = cgroup_tree_remove(ops->hierarchies, ops->container_cgroup);
1046 }
1047 if (ret < 0)
1048 SYSWARN("Failed to destroy cgroups");
1049 }
1050
1051 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1052 struct lxc_handler *handler)
1053 {
1054 int len;
1055 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1056 const struct lxc_conf *conf;
1057
1058 if (!ops) {
1059 ERROR("Called with uninitialized cgroup operations");
1060 return;
1061 }
1062
1063 if (!ops->hierarchies)
1064 return;
1065
1066 if (!handler) {
1067 ERROR("Called with uninitialized handler");
1068 return;
1069 }
1070
1071 if (!handler->conf) {
1072 ERROR("Called with uninitialized conf");
1073 return;
1074 }
1075 conf = handler->conf;
1076
1077 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1078 if (len < 0 || (size_t)len >= sizeof(pidstr))
1079 return;
1080
1081 for (int i = 0; ops->hierarchies[i]; i++) {
1082 __do_free char *pivot_path = NULL;
1083 struct hierarchy *h = ops->hierarchies[i];
1084 int ret;
1085
1086 if (!h->monitor_full_path)
1087 continue;
1088
1089 /* Monitor might have died before we entered the cgroup. */
1090 if (handler->monitor_pid <= 0) {
1091 WARN("No valid monitor process found while destroying cgroups");
1092 goto try_lxc_rm_rf;
1093 }
1094
1095 if (conf && conf->cgroup_meta.monitor_dir)
1096 pivot_path = must_make_path(h->mountpoint,
1097 h->container_base_path,
1098 conf->cgroup_meta.monitor_dir,
1099 CGROUP_PIVOT, NULL);
1100 else if (conf && conf->cgroup_meta.dir)
1101 pivot_path = must_make_path(h->mountpoint,
1102 h->container_base_path,
1103 conf->cgroup_meta.dir,
1104 CGROUP_PIVOT, NULL);
1105 else
1106 pivot_path = must_make_path(h->mountpoint,
1107 h->container_base_path,
1108 CGROUP_PIVOT, NULL);
1109
1110 ret = mkdir_p(pivot_path, 0755);
1111 if (ret < 0 && errno != EEXIST) {
1112 ERROR("Failed to create %s", pivot_path);
1113 goto try_lxc_rm_rf;
1114 }
1115
1116 ret = lxc_write_openat(pivot_path, "cgroup.procs", pidstr, len);
1117 if (ret != 0) {
1118 SYSWARN("Failed to move monitor %s to \"%s\"", pidstr, pivot_path);
1119 continue;
1120 }
1121
1122 try_lxc_rm_rf:
1123 ret = lxc_rm_rf(h->monitor_full_path);
1124 if (ret < 0)
1125 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1126 }
1127 }
1128
1129 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1130 {
1131 const char *tmp = dir;
1132 const char *orig = dir;
1133 size_t orig_len;
1134
1135 orig_len = strlen(dir);
1136 do {
1137 __do_free char *makeme = NULL;
1138 int ret;
1139 size_t cur_len;
1140
1141 dir = tmp + strspn(tmp, "/");
1142 tmp = dir + strcspn(dir, "/");
1143
1144 cur_len = dir - orig;
1145 makeme = strndup(orig, cur_len);
1146 if (!makeme)
1147 return ret_set_errno(-1, ENOMEM);
1148
1149 ret = mkdir(makeme, mode);
1150 if (ret < 0 && ((errno != EEXIST) || (orig_len == cur_len)))
1151 return log_error_errno(-1, errno, "Failed to create directory \"%s\"", makeme);
1152 } while (tmp != dir);
1153
1154 return 0;
1155 }
1156
1157 static bool cgroup_tree_create(struct cgroup_ops *ops, struct lxc_conf *conf,
1158 struct hierarchy *h, const char *cgroup_tree,
1159 const char *cgroup_leaf, bool payload,
1160 const char *cgroup_limit_dir)
1161 {
1162 __do_free char *path = NULL, *limit_path = NULL;
1163 int ret, ret_cpuset;
1164
1165 path = must_make_path(h->mountpoint, h->container_base_path, cgroup_leaf, NULL);
1166 if (dir_exists(path))
1167 return log_warn_errno(false, errno, "The %s cgroup already existed", path);
1168
1169 ret_cpuset = cg_legacy_handle_cpuset_hierarchy(h, cgroup_leaf);
1170 if (ret_cpuset < 0)
1171 return log_error_errno(false, errno, "Failed to handle legacy cpuset controller");
1172
1173 if (payload && cgroup_limit_dir) {
1174 /* with isolation both parts need to not already exist */
1175 limit_path = must_make_path(h->mountpoint,
1176 h->container_base_path,
1177 cgroup_limit_dir, NULL);
1178
1179 ret = mkdir_eexist_on_last(limit_path, 0755);
1180 if (ret < 0)
1181 return log_error_errno(false, errno,
1182 "Failed to create %s limiting cgroup",
1183 limit_path);
1184
1185 h->cgfd_limit = lxc_open_dirfd(limit_path);
1186 if (h->cgfd_limit < 0)
1187 return log_error_errno(false, errno,
1188 "Failed to open %s", path);
1189 h->container_limit_path = move_ptr(limit_path);
1190
1191 /*
1192 * With isolation the devices legacy cgroup needs to be
1193 * iinitialized early, as it typically contains an 'a' (all)
1194 * line, which is not possible once a subdirectory has been
1195 * created.
1196 */
1197 if (string_in_list(h->controllers, "devices") &&
1198 !ops->setup_limits_legacy(ops, conf, true))
1199 return log_error(false, "Failed to setup legacy device limits");
1200 }
1201
1202 ret = mkdir_eexist_on_last(path, 0755);
1203 if (ret < 0) {
1204 /*
1205 * This is the cpuset controller and
1206 * cg_legacy_handle_cpuset_hierarchy() has created our target
1207 * directory for us to ensure correct initialization.
1208 */
1209 if (ret_cpuset != 1 || cgroup_tree)
1210 return log_error_errno(false, errno, "Failed to create %s cgroup", path);
1211 }
1212
1213 if (payload) {
1214 h->cgfd_con = lxc_open_dirfd(path);
1215 if (h->cgfd_con < 0)
1216 return log_error_errno(false, errno, "Failed to open %s", path);
1217 h->container_full_path = move_ptr(path);
1218 if (h->cgfd_limit < 0)
1219 h->cgfd_limit = h->cgfd_con;
1220 if (!h->container_limit_path)
1221 h->container_limit_path = h->container_full_path;
1222 } else {
1223 h->cgfd_mon = lxc_open_dirfd(path);
1224 if (h->cgfd_mon < 0)
1225 return log_error_errno(false, errno, "Failed to open %s", path);
1226 h->monitor_full_path = move_ptr(path);
1227 }
1228
1229 return true;
1230 }
1231
1232 static void cgroup_tree_leaf_remove(struct hierarchy *h, bool payload)
1233 {
1234 __do_free char *full_path = NULL, *__limit_path = NULL;
1235 char *limit_path = NULL;
1236
1237 if (payload) {
1238 __lxc_unused __do_close int fd = move_fd(h->cgfd_con);
1239 full_path = move_ptr(h->container_full_path);
1240 limit_path = move_ptr(h->container_limit_path);
1241 if (limit_path != full_path)
1242 __limit_path = limit_path;
1243 } else {
1244 __lxc_unused __do_close int fd = move_fd(h->cgfd_mon);
1245 full_path = move_ptr(h->monitor_full_path);
1246 }
1247
1248 if (full_path && rmdir(full_path))
1249 SYSWARN("Failed to rmdir(\"%s\") cgroup", full_path);
1250 if (limit_path && rmdir(limit_path))
1251 SYSWARN("Failed to rmdir(\"%s\") cgroup", limit_path);
1252 }
1253
1254 /*
1255 * Check we have no lxc.cgroup.dir, and that lxc.cgroup.dir.limit_prefix is a
1256 * proper prefix directory of lxc.cgroup.dir.payload.
1257 *
1258 * Returns the prefix length if it is set, otherwise zero on success.
1259 */
1260 static bool check_cgroup_dir_config(struct lxc_conf *conf)
1261 {
1262 const char *monitor_dir = conf->cgroup_meta.monitor_dir,
1263 *container_dir = conf->cgroup_meta.container_dir,
1264 *namespace_dir = conf->cgroup_meta.namespace_dir;
1265
1266 /* none of the new options are set, all is fine */
1267 if (!monitor_dir && !container_dir && !namespace_dir)
1268 return true;
1269
1270 /* some are set, make sure lxc.cgroup.dir is not also set*/
1271 if (conf->cgroup_meta.dir)
1272 return log_error_errno(false, EINVAL,
1273 "lxc.cgroup.dir conflicts with lxc.cgroup.dir.payload/monitor");
1274
1275 /* make sure both monitor and payload are set */
1276 if (!monitor_dir || !container_dir)
1277 return log_error_errno(false, EINVAL,
1278 "lxc.cgroup.dir.payload and lxc.cgroup.dir.monitor must both be set");
1279
1280 /* namespace_dir may be empty */
1281 return true;
1282 }
1283
1284 __cgfsng_ops static inline bool cgfsng_monitor_create(struct cgroup_ops *ops,
1285 struct lxc_handler *handler)
1286 {
1287 __do_free char *monitor_cgroup = NULL, *__cgroup_tree = NULL;
1288 const char *cgroup_tree;
1289 int idx = 0;
1290 int i;
1291 size_t len;
1292 char *suffix = NULL;
1293 struct lxc_conf *conf;
1294
1295 if (!ops)
1296 return ret_set_errno(false, ENOENT);
1297
1298 if (!ops->hierarchies)
1299 return true;
1300
1301 if (ops->monitor_cgroup)
1302 return ret_set_errno(false, EEXIST);
1303
1304 if (!handler || !handler->conf)
1305 return ret_set_errno(false, EINVAL);
1306
1307 conf = handler->conf;
1308
1309 if (!check_cgroup_dir_config(conf))
1310 return false;
1311
1312 if (conf->cgroup_meta.monitor_dir) {
1313 cgroup_tree = NULL;
1314 monitor_cgroup = strdup(conf->cgroup_meta.monitor_dir);
1315 } else if (conf->cgroup_meta.dir) {
1316 cgroup_tree = conf->cgroup_meta.dir;
1317 monitor_cgroup = must_concat(&len, conf->cgroup_meta.dir, "/",
1318 DEFAULT_MONITOR_CGROUP_PREFIX,
1319 handler->name,
1320 CGROUP_CREATE_RETRY, NULL);
1321 } else if (ops->cgroup_pattern) {
1322 __cgroup_tree = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1323 if (!__cgroup_tree)
1324 return ret_set_errno(false, ENOMEM);
1325
1326 cgroup_tree = __cgroup_tree;
1327 monitor_cgroup = must_concat(&len, cgroup_tree, "/",
1328 DEFAULT_MONITOR_CGROUP,
1329 CGROUP_CREATE_RETRY, NULL);
1330 } else {
1331 cgroup_tree = NULL;
1332 monitor_cgroup = must_concat(&len, DEFAULT_MONITOR_CGROUP_PREFIX,
1333 handler->name,
1334 CGROUP_CREATE_RETRY, NULL);
1335 }
1336 if (!monitor_cgroup)
1337 return ret_set_errno(false, ENOMEM);
1338
1339 if (!conf->cgroup_meta.monitor_dir) {
1340 suffix = monitor_cgroup + len - CGROUP_CREATE_RETRY_LEN;
1341 *suffix = '\0';
1342 }
1343 do {
1344 if (idx && suffix)
1345 sprintf(suffix, "-%d", idx);
1346
1347 for (i = 0; ops->hierarchies[i]; i++) {
1348 if (cgroup_tree_create(ops, handler->conf,
1349 ops->hierarchies[i], cgroup_tree,
1350 monitor_cgroup, false, NULL))
1351 continue;
1352
1353 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->monitor_full_path ?: "(null)");
1354 for (int j = 0; j < i; j++)
1355 cgroup_tree_leaf_remove(ops->hierarchies[j], false);
1356
1357 idx++;
1358 break;
1359 }
1360 } while (ops->hierarchies[i] && idx > 0 && idx < 1000 && suffix);
1361
1362 if (idx == 1000 || (!suffix && idx != 0))
1363 return ret_set_errno(false, ERANGE);
1364
1365 ops->monitor_cgroup = move_ptr(monitor_cgroup);
1366 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops->monitor_cgroup);
1367 }
1368
1369 /*
1370 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1371 * next cgroup_pattern-1, -2, ..., -999.
1372 */
1373 __cgfsng_ops static inline bool cgfsng_payload_create(struct cgroup_ops *ops,
1374 struct lxc_handler *handler)
1375 {
1376 __do_free char *container_cgroup = NULL,
1377 *__cgroup_tree = NULL,
1378 *limiting_cgroup = NULL;
1379 const char *cgroup_tree;
1380 int idx = 0;
1381 int i;
1382 size_t len;
1383 char *suffix = NULL;
1384 struct lxc_conf *conf;
1385
1386 if (!ops)
1387 return ret_set_errno(false, ENOENT);
1388
1389 if (!ops->hierarchies)
1390 return true;
1391
1392 if (ops->container_cgroup)
1393 return ret_set_errno(false, EEXIST);
1394
1395 if (!handler || !handler->conf)
1396 return ret_set_errno(false, EINVAL);
1397
1398 conf = handler->conf;
1399
1400 if (!check_cgroup_dir_config(conf))
1401 return false;
1402
1403 if (conf->cgroup_meta.container_dir) {
1404 cgroup_tree = NULL;
1405
1406 limiting_cgroup = strdup(conf->cgroup_meta.container_dir);
1407 if (!limiting_cgroup)
1408 return ret_set_errno(false, ENOMEM);
1409
1410 if (conf->cgroup_meta.namespace_dir) {
1411 container_cgroup = must_make_path(limiting_cgroup,
1412 conf->cgroup_meta.namespace_dir,
1413 NULL);
1414 } else {
1415 /* explicit paths but without isolation */
1416 container_cgroup = move_ptr(limiting_cgroup);
1417 }
1418 } else if (conf->cgroup_meta.dir) {
1419 cgroup_tree = conf->cgroup_meta.dir;
1420 container_cgroup = must_concat(&len, cgroup_tree, "/",
1421 DEFAULT_PAYLOAD_CGROUP_PREFIX,
1422 handler->name,
1423 CGROUP_CREATE_RETRY, NULL);
1424 } else if (ops->cgroup_pattern) {
1425 __cgroup_tree = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1426 if (!__cgroup_tree)
1427 return ret_set_errno(false, ENOMEM);
1428
1429 cgroup_tree = __cgroup_tree;
1430 container_cgroup = must_concat(&len, cgroup_tree, "/",
1431 DEFAULT_PAYLOAD_CGROUP,
1432 CGROUP_CREATE_RETRY, NULL);
1433 } else {
1434 cgroup_tree = NULL;
1435 container_cgroup = must_concat(&len, DEFAULT_PAYLOAD_CGROUP_PREFIX,
1436 handler->name,
1437 CGROUP_CREATE_RETRY, NULL);
1438 }
1439 if (!container_cgroup)
1440 return ret_set_errno(false, ENOMEM);
1441
1442 if (!conf->cgroup_meta.container_dir) {
1443 suffix = container_cgroup + len - CGROUP_CREATE_RETRY_LEN;
1444 *suffix = '\0';
1445 }
1446 do {
1447 if (idx && suffix)
1448 sprintf(suffix, "-%d", idx);
1449
1450 for (i = 0; ops->hierarchies[i]; i++) {
1451 if (cgroup_tree_create(ops, handler->conf,
1452 ops->hierarchies[i], cgroup_tree,
1453 container_cgroup, true,
1454 limiting_cgroup))
1455 continue;
1456
1457 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path ?: "(null)");
1458 for (int j = 0; j < i; j++)
1459 cgroup_tree_leaf_remove(ops->hierarchies[j], true);
1460
1461 idx++;
1462 break;
1463 }
1464 } while (ops->hierarchies[i] && idx > 0 && idx < 1000 && suffix);
1465
1466 if (idx == 1000 || (!suffix && idx != 0))
1467 return ret_set_errno(false, ERANGE);
1468
1469 ops->container_cgroup = move_ptr(container_cgroup);
1470 INFO("The container process uses \"%s\" as cgroup", ops->container_cgroup);
1471 return true;
1472 }
1473
1474 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops,
1475 struct lxc_handler *handler)
1476 {
1477 int monitor_len, transient_len = 0;
1478 char monitor[INTTYPE_TO_STRLEN(pid_t)],
1479 transient[INTTYPE_TO_STRLEN(pid_t)];
1480
1481 if (!ops)
1482 return ret_set_errno(false, ENOENT);
1483
1484 if (!ops->hierarchies)
1485 return true;
1486
1487 if (!ops->monitor_cgroup)
1488 return ret_set_errno(false, ENOENT);
1489
1490 if (!handler || !handler->conf)
1491 return ret_set_errno(false, EINVAL);
1492
1493 monitor_len = snprintf(monitor, sizeof(monitor), "%d", handler->monitor_pid);
1494 if (handler->transient_pid > 0)
1495 transient_len = snprintf(transient, sizeof(transient), "%d", handler->transient_pid);
1496
1497 for (int i = 0; ops->hierarchies[i]; i++) {
1498 struct hierarchy *h = ops->hierarchies[i];
1499 int ret;
1500
1501 ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", monitor, monitor_len);
1502 if (ret)
1503 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
1504
1505 if (handler->transient_pid <= 0)
1506 return true;
1507
1508 ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", transient, transient_len);
1509 if (ret)
1510 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
1511
1512 /*
1513 * we don't keep the fds for non-unified hierarchies around
1514 * mainly because we don't make use of them anymore after the
1515 * core cgroup setup is done but also because there are quite a
1516 * lot of them.
1517 */
1518 if (!is_unified_hierarchy(h))
1519 close_prot_errno_disarm(h->cgfd_mon);
1520 }
1521 handler->transient_pid = -1;
1522
1523 return true;
1524 }
1525
1526 __cgfsng_ops static bool cgfsng_payload_enter(struct cgroup_ops *ops,
1527 struct lxc_handler *handler)
1528 {
1529 int len;
1530 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1531
1532 if (!ops)
1533 return ret_set_errno(false, ENOENT);
1534
1535 if (!ops->hierarchies)
1536 return true;
1537
1538 if (!ops->container_cgroup)
1539 return ret_set_errno(false, ENOENT);
1540
1541 if (!handler || !handler->conf)
1542 return ret_set_errno(false, EINVAL);
1543
1544 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->pid);
1545
1546 for (int i = 0; ops->hierarchies[i]; i++) {
1547 struct hierarchy *h = ops->hierarchies[i];
1548 int ret;
1549
1550 ret = lxc_writeat(h->cgfd_con, "cgroup.procs", pidstr, len);
1551 if (ret != 0)
1552 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->container_full_path);
1553 }
1554
1555 return true;
1556 }
1557
1558 static int fchowmodat(int dirfd, const char *path, uid_t chown_uid,
1559 gid_t chown_gid, mode_t chmod_mode)
1560 {
1561 int ret;
1562
1563 ret = fchownat(dirfd, path, chown_uid, chown_gid,
1564 AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);
1565 if (ret < 0)
1566 return log_warn_errno(-1,
1567 errno, "Failed to fchownat(%d, %s, %d, %d, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )",
1568 dirfd, path, (int)chown_uid,
1569 (int)chown_gid);
1570
1571 ret = fchmodat(dirfd, (*path != '\0') ? path : ".", chmod_mode, 0);
1572 if (ret < 0)
1573 return log_warn_errno(-1, errno, "Failed to fchmodat(%d, %s, %d, AT_SYMLINK_NOFOLLOW)",
1574 dirfd, path, (int)chmod_mode);
1575
1576 return 0;
1577 }
1578
1579 /* chgrp the container cgroups to container group. We leave
1580 * the container owner as cgroup owner. So we must make the
1581 * directories 775 so that the container can create sub-cgroups.
1582 *
1583 * Also chown the tasks and cgroup.procs files. Those may not
1584 * exist depending on kernel version.
1585 */
1586 static int chown_cgroup_wrapper(void *data)
1587 {
1588 int ret;
1589 uid_t destuid;
1590 struct generic_userns_exec_data *arg = data;
1591 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1592 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1593
1594 if (!lxc_setgroups(0, NULL) && errno != EPERM)
1595 return log_error_errno(-1, errno, "Failed to setgroups(0, NULL)");
1596
1597 ret = setresgid(nsgid, nsgid, nsgid);
1598 if (ret < 0)
1599 return log_error_errno(-1, errno, "Failed to setresgid(%d, %d, %d)",
1600 (int)nsgid, (int)nsgid, (int)nsgid);
1601
1602 ret = setresuid(nsuid, nsuid, nsuid);
1603 if (ret < 0)
1604 return log_error_errno(-1, errno, "Failed to setresuid(%d, %d, %d)",
1605 (int)nsuid, (int)nsuid, (int)nsuid);
1606
1607 destuid = get_ns_uid(arg->origuid);
1608 if (destuid == LXC_INVALID_UID)
1609 destuid = 0;
1610
1611 for (int i = 0; arg->hierarchies[i]; i++) {
1612 int dirfd = arg->hierarchies[i]->cgfd_con;
1613
1614 (void)fchowmodat(dirfd, "", destuid, nsgid, 0775);
1615
1616 /*
1617 * Failures to chown() these are inconvenient but not
1618 * detrimental We leave these owned by the container launcher,
1619 * so that container root can write to the files to attach. We
1620 * chmod() them 664 so that container systemd can write to the
1621 * files (which systemd in wily insists on doing).
1622 */
1623
1624 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC)
1625 (void)fchowmodat(dirfd, "tasks", destuid, nsgid, 0664);
1626
1627 (void)fchowmodat(dirfd, "cgroup.procs", destuid, nsgid, 0664);
1628
1629 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1630 continue;
1631
1632 for (char **p = arg->hierarchies[i]->cgroup2_chown; p && *p; p++)
1633 (void)fchowmodat(dirfd, *p, destuid, nsgid, 0664);
1634 }
1635
1636 return 0;
1637 }
1638
1639 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1640 struct lxc_conf *conf)
1641 {
1642 struct generic_userns_exec_data wrap;
1643
1644 if (!ops)
1645 return ret_set_errno(false, ENOENT);
1646
1647 if (!ops->hierarchies)
1648 return true;
1649
1650 if (!ops->container_cgroup)
1651 return ret_set_errno(false, ENOENT);
1652
1653 if (!conf)
1654 return ret_set_errno(false, EINVAL);
1655
1656 if (lxc_list_empty(&conf->id_map))
1657 return true;
1658
1659 wrap.origuid = geteuid();
1660 wrap.path = NULL;
1661 wrap.hierarchies = ops->hierarchies;
1662 wrap.conf = conf;
1663
1664 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap, "chown_cgroup_wrapper") < 0)
1665 return log_error_errno(false, errno, "Error requesting cgroup chown in new user namespace");
1666
1667 return true;
1668 }
1669
1670 __cgfsng_ops void cgfsng_payload_finalize(struct cgroup_ops *ops)
1671 {
1672 if (!ops)
1673 return;
1674
1675 if (!ops->hierarchies)
1676 return;
1677
1678 for (int i = 0; ops->hierarchies[i]; i++) {
1679 struct hierarchy *h = ops->hierarchies[i];
1680 /*
1681 * we don't keep the fds for non-unified hierarchies around
1682 * mainly because we don't make use of them anymore after the
1683 * core cgroup setup is done but also because there are quite a
1684 * lot of them.
1685 */
1686 if (!is_unified_hierarchy(h))
1687 close_prot_errno_disarm(h->cgfd_con);
1688 }
1689 }
1690
1691 /* cgroup-full:* is done, no need to create subdirs */
1692 static inline bool cg_mount_needs_subdirs(int type)
1693 {
1694 return !(type >= LXC_AUTO_CGROUP_FULL_RO);
1695 }
1696
1697 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1698 * remount controller ro if needed and bindmount the cgroupfs onto
1699 * control/the/cg/path.
1700 */
1701 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1702 char *controllerpath, char *cgpath,
1703 const char *container_cgroup)
1704 {
1705 __do_free char *sourcepath = NULL;
1706 int ret, remount_flags;
1707 int flags = MS_BIND;
1708
1709 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1710 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1711 if (ret < 0)
1712 return log_error_errno(-1, errno, "Failed to bind mount \"%s\" onto \"%s\"",
1713 controllerpath, controllerpath);
1714
1715 remount_flags = add_required_remount_flags(controllerpath,
1716 controllerpath,
1717 flags | MS_REMOUNT);
1718 ret = mount(controllerpath, controllerpath, "cgroup",
1719 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1720 NULL);
1721 if (ret < 0)
1722 return log_error_errno(-1, errno, "Failed to remount \"%s\" ro", controllerpath);
1723
1724 INFO("Remounted %s read-only", controllerpath);
1725 }
1726
1727 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1728 container_cgroup, NULL);
1729 if (type == LXC_AUTO_CGROUP_RO)
1730 flags |= MS_RDONLY;
1731
1732 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1733 if (ret < 0)
1734 return log_error_errno(-1, errno, "Failed to mount \"%s\" onto \"%s\"",
1735 h->controllers[0], cgpath);
1736 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1737
1738 if (flags & MS_RDONLY) {
1739 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1740 flags | MS_REMOUNT);
1741 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1742 if (ret < 0)
1743 return log_error_errno(-1, errno, "Failed to remount \"%s\" ro", cgpath);
1744 INFO("Remounted %s read-only", cgpath);
1745 }
1746
1747 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1748 return 0;
1749 }
1750
1751 /* __cg_mount_direct
1752 *
1753 * Mount cgroup hierarchies directly without using bind-mounts. The main
1754 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1755 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1756 */
1757 static int __cg_mount_direct(int type, struct hierarchy *h,
1758 const char *controllerpath)
1759 {
1760 __do_free char *controllers = NULL;
1761 char *fstype = "cgroup2";
1762 unsigned long flags = 0;
1763 int ret;
1764
1765 flags |= MS_NOSUID;
1766 flags |= MS_NOEXEC;
1767 flags |= MS_NODEV;
1768 flags |= MS_RELATIME;
1769
1770 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1771 flags |= MS_RDONLY;
1772
1773 if (h->version != CGROUP2_SUPER_MAGIC) {
1774 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1775 if (!controllers)
1776 return -ENOMEM;
1777 fstype = "cgroup";
1778 }
1779
1780 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1781 if (ret < 0)
1782 return log_error_errno(-1, errno, "Failed to mount \"%s\" with cgroup filesystem type %s",
1783 controllerpath, fstype);
1784
1785 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1786 return 0;
1787 }
1788
1789 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1790 const char *controllerpath)
1791 {
1792 return __cg_mount_direct(type, h, controllerpath);
1793 }
1794
1795 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1796 const char *controllerpath)
1797 {
1798 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1799 return 0;
1800
1801 return __cg_mount_direct(type, h, controllerpath);
1802 }
1803
1804 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1805 struct lxc_handler *handler,
1806 const char *root, int type)
1807 {
1808 __do_free char *cgroup_root = NULL;
1809 bool has_cgns = false, wants_force_mount = false;
1810 int ret;
1811
1812 if (!ops)
1813 return ret_set_errno(false, ENOENT);
1814
1815 if (!ops->hierarchies)
1816 return true;
1817
1818 if (!handler || !handler->conf)
1819 return ret_set_errno(false, EINVAL);
1820
1821 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1822 return true;
1823
1824 if (type & LXC_AUTO_CGROUP_FORCE) {
1825 type &= ~LXC_AUTO_CGROUP_FORCE;
1826 wants_force_mount = true;
1827 }
1828
1829 if (!wants_force_mount) {
1830 if (!lxc_list_empty(&handler->conf->keepcaps))
1831 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1832 else
1833 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1834
1835 /*
1836 * Most recent distro versions currently have init system that
1837 * do support cgroup2 but do not mount it by default unless
1838 * explicitly told so even if the host is cgroup2 only. That
1839 * means they often will fail to boot. Fix this by pre-mounting
1840 * cgroup2 by default. We will likely need to be doing this a
1841 * few years until all distros have switched over to cgroup2 at
1842 * which point we can safely assume that their init systems
1843 * will mount it themselves.
1844 */
1845 if (pure_unified_layout(ops))
1846 wants_force_mount = true;
1847 }
1848
1849 has_cgns = cgns_supported();
1850 if (has_cgns && !wants_force_mount)
1851 return true;
1852
1853 if (type == LXC_AUTO_CGROUP_NOSPEC)
1854 type = LXC_AUTO_CGROUP_MIXED;
1855 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1856 type = LXC_AUTO_CGROUP_FULL_MIXED;
1857
1858 cgroup_root = must_make_path(root, DEFAULT_CGROUP_MOUNTPOINT, NULL);
1859 if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
1860 if (has_cgns && wants_force_mount) {
1861 /*
1862 * If cgroup namespaces are supported but the container
1863 * will not have CAP_SYS_ADMIN after it has started we
1864 * need to mount the cgroups manually.
1865 */
1866 return cg_mount_in_cgroup_namespace(type, ops->unified, cgroup_root) == 0;
1867 }
1868
1869 return cg_mount_cgroup_full(type, ops->unified, cgroup_root) == 0;
1870 }
1871
1872 /* mount tmpfs */
1873 ret = safe_mount(NULL, cgroup_root, "tmpfs",
1874 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1875 "size=10240k,mode=755", root);
1876 if (ret < 0)
1877 return false;
1878
1879 for (int i = 0; ops->hierarchies[i]; i++) {
1880 __do_free char *controllerpath = NULL, *path2 = NULL;
1881 struct hierarchy *h = ops->hierarchies[i];
1882 char *controller = strrchr(h->mountpoint, '/');
1883
1884 if (!controller)
1885 continue;
1886 controller++;
1887
1888 controllerpath = must_make_path(cgroup_root, controller, NULL);
1889 if (dir_exists(controllerpath))
1890 continue;
1891
1892 ret = mkdir(controllerpath, 0755);
1893 if (ret < 0)
1894 return log_error_errno(false, errno, "Error creating cgroup path: %s", controllerpath);
1895
1896 if (has_cgns && wants_force_mount) {
1897 /* If cgroup namespaces are supported but the container
1898 * will not have CAP_SYS_ADMIN after it has started we
1899 * need to mount the cgroups manually.
1900 */
1901 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1902 if (ret < 0)
1903 return false;
1904
1905 continue;
1906 }
1907
1908 ret = cg_mount_cgroup_full(type, h, controllerpath);
1909 if (ret < 0)
1910 return false;
1911
1912 if (!cg_mount_needs_subdirs(type))
1913 continue;
1914
1915 path2 = must_make_path(controllerpath, h->container_base_path,
1916 ops->container_cgroup, NULL);
1917 ret = mkdir_p(path2, 0755);
1918 if (ret < 0)
1919 return false;
1920
1921 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1922 path2, ops->container_cgroup);
1923 if (ret < 0)
1924 return false;
1925 }
1926
1927 return true;
1928 }
1929
1930 /* Only root needs to escape to the cgroup of its init. */
1931 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1932 struct lxc_conf *conf)
1933 {
1934 if (!ops)
1935 return ret_set_errno(false, ENOENT);
1936
1937 if (!ops->hierarchies)
1938 return true;
1939
1940 if (!conf)
1941 return ret_set_errno(false, EINVAL);
1942
1943 if (conf->cgroup_meta.relative || geteuid())
1944 return true;
1945
1946 for (int i = 0; ops->hierarchies[i]; i++) {
1947 __do_free char *fullpath = NULL;
1948 int ret;
1949
1950 fullpath =
1951 must_make_path(ops->hierarchies[i]->mountpoint,
1952 ops->hierarchies[i]->container_base_path,
1953 "cgroup.procs", NULL);
1954 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1955 if (ret != 0)
1956 return log_error_errno(false, errno, "Failed to escape to cgroup \"%s\"", fullpath);
1957 }
1958
1959 return true;
1960 }
1961
1962 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1963 {
1964 int i = 0;
1965
1966 if (!ops)
1967 return ret_set_errno(-1, ENOENT);
1968
1969 if (!ops->hierarchies)
1970 return 0;
1971
1972 for (; ops->hierarchies[i]; i++)
1973 ;
1974
1975 return i;
1976 }
1977
1978 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n,
1979 char ***out)
1980 {
1981 int i;
1982
1983 if (!ops)
1984 return ret_set_errno(false, ENOENT);
1985
1986 if (!ops->hierarchies)
1987 return ret_set_errno(false, ENOENT);
1988
1989 /* sanity check n */
1990 for (i = 0; i < n; i++)
1991 if (!ops->hierarchies[i])
1992 return ret_set_errno(false, ENOENT);
1993
1994 *out = ops->hierarchies[i]->controllers;
1995
1996 return true;
1997 }
1998
1999 static bool cg_legacy_freeze(struct cgroup_ops *ops)
2000 {
2001 struct hierarchy *h;
2002
2003 h = get_hierarchy(ops, "freezer");
2004 if (!h)
2005 return ret_set_errno(-1, ENOENT);
2006
2007 return lxc_write_openat(h->container_full_path, "freezer.state",
2008 "FROZEN", STRLITERALLEN("FROZEN"));
2009 }
2010
2011 static int freezer_cgroup_events_cb(int fd, uint32_t events, void *cbdata,
2012 struct lxc_epoll_descr *descr)
2013 {
2014 __do_close int duped_fd = -EBADF;
2015 __do_free char *line = NULL;
2016 __do_fclose FILE *f = NULL;
2017 int state = PTR_TO_INT(cbdata);
2018 size_t len;
2019 const char *state_string;
2020
2021 duped_fd = dup(fd);
2022 if (duped_fd < 0)
2023 return LXC_MAINLOOP_ERROR;
2024
2025 if (lseek(duped_fd, 0, SEEK_SET) < (off_t)-1)
2026 return LXC_MAINLOOP_ERROR;
2027
2028 f = fdopen(duped_fd, "re");
2029 if (!f)
2030 return LXC_MAINLOOP_ERROR;
2031 move_fd(duped_fd);
2032
2033 if (state == 1)
2034 state_string = "frozen 1";
2035 else
2036 state_string = "frozen 0";
2037
2038 while (getline(&line, &len, f) != -1)
2039 if (strncmp(line, state_string, STRLITERALLEN("frozen") + 2) == 0)
2040 return LXC_MAINLOOP_CLOSE;
2041
2042 return LXC_MAINLOOP_CONTINUE;
2043 }
2044
2045 static int cg_unified_freeze(struct cgroup_ops *ops, int timeout)
2046 {
2047 __do_close int fd = -EBADF;
2048 call_cleaner(lxc_mainloop_close) struct lxc_epoll_descr *descr_ptr = NULL;
2049 int ret;
2050 struct lxc_epoll_descr descr;
2051 struct hierarchy *h;
2052
2053 h = ops->unified;
2054 if (!h)
2055 return ret_set_errno(-1, ENOENT);
2056
2057 if (!h->container_full_path)
2058 return ret_set_errno(-1, EEXIST);
2059
2060 if (timeout != 0) {
2061 __do_free char *events_file = NULL;
2062
2063 events_file = must_make_path(h->container_full_path, "cgroup.events", NULL);
2064 fd = open(events_file, O_RDONLY | O_CLOEXEC);
2065 if (fd < 0)
2066 return log_error_errno(-1, errno, "Failed to open cgroup.events file");
2067
2068 ret = lxc_mainloop_open(&descr);
2069 if (ret)
2070 return log_error_errno(-1, errno, "Failed to create epoll instance to wait for container freeze");
2071
2072 /* automatically cleaned up now */
2073 descr_ptr = &descr;
2074
2075 ret = lxc_mainloop_add_handler(&descr, fd, freezer_cgroup_events_cb, INT_TO_PTR((int){1}));
2076 if (ret < 0)
2077 return log_error_errno(-1, errno, "Failed to add cgroup.events fd handler to mainloop");
2078 }
2079
2080 ret = lxc_write_openat(h->container_full_path, "cgroup.freeze", "1", 1);
2081 if (ret < 0)
2082 return log_error_errno(-1, errno, "Failed to open cgroup.freeze file");
2083
2084 if (timeout != 0 && lxc_mainloop(&descr, timeout))
2085 return log_error_errno(-1, errno, "Failed to wait for container to be frozen");
2086
2087 return 0;
2088 }
2089
2090 __cgfsng_ops static int cgfsng_freeze(struct cgroup_ops *ops, int timeout)
2091 {
2092 if (!ops->hierarchies)
2093 return ret_set_errno(-1, ENOENT);
2094
2095 if (ops->cgroup_layout != CGROUP_LAYOUT_UNIFIED)
2096 return cg_legacy_freeze(ops);
2097
2098 return cg_unified_freeze(ops, timeout);
2099 }
2100
2101 static int cg_legacy_unfreeze(struct cgroup_ops *ops)
2102 {
2103 struct hierarchy *h;
2104
2105 h = get_hierarchy(ops, "freezer");
2106 if (!h)
2107 return ret_set_errno(-1, ENOENT);
2108
2109 return lxc_write_openat(h->container_full_path, "freezer.state",
2110 "THAWED", STRLITERALLEN("THAWED"));
2111 }
2112
2113 static int cg_unified_unfreeze(struct cgroup_ops *ops, int timeout)
2114 {
2115 __do_close int fd = -EBADF;
2116 call_cleaner(lxc_mainloop_close)struct lxc_epoll_descr *descr_ptr = NULL;
2117 int ret;
2118 struct lxc_epoll_descr descr;
2119 struct hierarchy *h;
2120
2121 h = ops->unified;
2122 if (!h)
2123 return ret_set_errno(-1, ENOENT);
2124
2125 if (!h->container_full_path)
2126 return ret_set_errno(-1, EEXIST);
2127
2128 if (timeout != 0) {
2129 __do_free char *events_file = NULL;
2130
2131 events_file = must_make_path(h->container_full_path, "cgroup.events", NULL);
2132 fd = open(events_file, O_RDONLY | O_CLOEXEC);
2133 if (fd < 0)
2134 return log_error_errno(-1, errno, "Failed to open cgroup.events file");
2135
2136 ret = lxc_mainloop_open(&descr);
2137 if (ret)
2138 return log_error_errno(-1, errno, "Failed to create epoll instance to wait for container unfreeze");
2139
2140 /* automatically cleaned up now */
2141 descr_ptr = &descr;
2142
2143 ret = lxc_mainloop_add_handler(&descr, fd, freezer_cgroup_events_cb, INT_TO_PTR((int){0}));
2144 if (ret < 0)
2145 return log_error_errno(-1, errno, "Failed to add cgroup.events fd handler to mainloop");
2146 }
2147
2148 ret = lxc_write_openat(h->container_full_path, "cgroup.freeze", "0", 1);
2149 if (ret < 0)
2150 return log_error_errno(-1, errno, "Failed to open cgroup.freeze file");
2151
2152 if (timeout != 0 && lxc_mainloop(&descr, timeout))
2153 return log_error_errno(-1, errno, "Failed to wait for container to be unfrozen");
2154
2155 return 0;
2156 }
2157
2158 __cgfsng_ops static int cgfsng_unfreeze(struct cgroup_ops *ops, int timeout)
2159 {
2160 if (!ops->hierarchies)
2161 return ret_set_errno(-1, ENOENT);
2162
2163 if (ops->cgroup_layout != CGROUP_LAYOUT_UNIFIED)
2164 return cg_legacy_unfreeze(ops);
2165
2166 return cg_unified_unfreeze(ops, timeout);
2167 }
2168
2169 static const char *cgfsng_get_cgroup_do(struct cgroup_ops *ops,
2170 const char *controller, bool limiting)
2171 {
2172 struct hierarchy *h;
2173
2174 h = get_hierarchy(ops, controller);
2175 if (!h)
2176 return log_warn_errno(NULL, ENOENT, "Failed to find hierarchy for controller \"%s\"",
2177 controller ? controller : "(null)");
2178
2179 if (limiting)
2180 return h->container_limit_path
2181 ? h->container_limit_path + strlen(h->mountpoint)
2182 : NULL;
2183
2184 return h->container_full_path
2185 ? h->container_full_path + strlen(h->mountpoint)
2186 : NULL;
2187 }
2188
2189 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
2190 const char *controller)
2191 {
2192 return cgfsng_get_cgroup_do(ops, controller, false);
2193 }
2194
2195 __cgfsng_ops static const char *cgfsng_get_limiting_cgroup(struct cgroup_ops *ops,
2196 const char *controller)
2197 {
2198 return cgfsng_get_cgroup_do(ops, controller, true);
2199 }
2200
2201 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2202 * which must be freed by the caller.
2203 */
2204 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
2205 const char *inpath,
2206 const char *filename)
2207 {
2208 return must_make_path(h->mountpoint, inpath, filename, NULL);
2209 }
2210
2211 static int cgroup_attach_leaf(const struct lxc_conf *conf, int unified_fd, pid_t pid)
2212 {
2213 int idx = 1;
2214 int ret;
2215 char pidstr[INTTYPE_TO_STRLEN(int64_t) + 1];
2216 size_t pidstr_len;
2217
2218 /* Create leaf cgroup. */
2219 ret = mkdirat(unified_fd, ".lxc", 0755);
2220 if (ret < 0 && errno != EEXIST)
2221 return log_error_errno(-1, errno, "Failed to create leaf cgroup \".lxc\"");
2222
2223 pidstr_len = sprintf(pidstr, INT64_FMT, (int64_t)pid);
2224 ret = lxc_writeat(unified_fd, ".lxc/cgroup.procs", pidstr, pidstr_len);
2225 if (ret < 0)
2226 ret = lxc_writeat(unified_fd, "cgroup.procs", pidstr, pidstr_len);
2227 if (ret == 0)
2228 return 0;
2229
2230 /* this is a non-leaf node */
2231 if (errno != EBUSY)
2232 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2233
2234 do {
2235 bool rm = false;
2236 char attach_cgroup[STRLITERALLEN(".lxc-1000/cgroup.procs") + 1];
2237 char *slash;
2238
2239 ret = snprintf(attach_cgroup, sizeof(attach_cgroup), ".lxc-%d/cgroup.procs", idx);
2240 if (ret < 0 || (size_t)ret >= sizeof(attach_cgroup))
2241 return ret_errno(EIO);
2242
2243 slash = &attach_cgroup[ret] - STRLITERALLEN("/cgroup.procs");
2244 *slash = '\0';
2245
2246 ret = mkdirat(unified_fd, attach_cgroup, 0755);
2247 if (ret < 0 && errno != EEXIST)
2248 return log_error_errno(-1, errno, "Failed to create cgroup %s", attach_cgroup);
2249 if (ret == 0)
2250 rm = true;
2251
2252 *slash = '/';
2253
2254 ret = lxc_writeat(unified_fd, attach_cgroup, pidstr, pidstr_len);
2255 if (ret == 0)
2256 return 0;
2257
2258 if (rm && unlinkat(unified_fd, attach_cgroup, AT_REMOVEDIR))
2259 SYSERROR("Failed to remove cgroup \"%d(%s)\"", unified_fd, attach_cgroup);
2260
2261 /* this is a non-leaf node */
2262 if (errno != EBUSY)
2263 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2264
2265 idx++;
2266 } while (idx < 1000);
2267
2268 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2269 }
2270
2271 static int cgroup_attach_create_leaf(const struct lxc_conf *conf,
2272 int unified_fd, int *sk_fd)
2273 {
2274 __do_close int sk = *sk_fd, target_fd0 = -EBADF, target_fd1 = -EBADF;
2275 int target_fds[2];
2276 ssize_t ret;
2277
2278 /* Create leaf cgroup. */
2279 ret = mkdirat(unified_fd, ".lxc", 0755);
2280 if (ret < 0 && errno != EEXIST)
2281 return log_error_errno(-1, errno, "Failed to create leaf cgroup \".lxc\"");
2282
2283 target_fd0 = openat(unified_fd, ".lxc/cgroup.procs", O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
2284 if (target_fd0 < 0)
2285 return log_error_errno(-errno, errno, "Failed to open \".lxc/cgroup.procs\"");
2286 target_fds[0] = target_fd0;
2287
2288 target_fd1 = openat(unified_fd, "cgroup.procs", O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
2289 if (target_fd1 < 0)
2290 return log_error_errno(-errno, errno, "Failed to open \".lxc/cgroup.procs\"");
2291 target_fds[1] = target_fd1;
2292
2293 ret = lxc_abstract_unix_send_fds(sk, target_fds, 2, NULL, 0);
2294 if (ret <= 0)
2295 return log_error_errno(-errno, errno, "Failed to send \".lxc/cgroup.procs\" fds %d and %d",
2296 target_fd0, target_fd1);
2297
2298 return log_debug(0, "Sent target cgroup fds %d and %d", target_fd0, target_fd1);
2299 }
2300
2301 static int cgroup_attach_move_into_leaf(const struct lxc_conf *conf,
2302 int *sk_fd, pid_t pid)
2303 {
2304 __do_close int sk = *sk_fd, target_fd0 = -EBADF, target_fd1 = -EBADF;
2305 int target_fds[2];
2306 char pidstr[INTTYPE_TO_STRLEN(int64_t) + 1];
2307 size_t pidstr_len;
2308 ssize_t ret;
2309
2310 ret = lxc_abstract_unix_recv_fds(sk, target_fds, 2, NULL, 0);
2311 if (ret <= 0)
2312 return log_error_errno(-1, errno, "Failed to receive target cgroup fd");
2313 target_fd0 = target_fds[0];
2314 target_fd1 = target_fds[1];
2315
2316 pidstr_len = sprintf(pidstr, INT64_FMT, (int64_t)pid);
2317
2318 ret = lxc_write_nointr(target_fd0, pidstr, pidstr_len);
2319 if (ret > 0 && ret == pidstr_len)
2320 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd0);
2321
2322 ret = lxc_write_nointr(target_fd1, pidstr, pidstr_len);
2323 if (ret > 0 && ret == pidstr_len)
2324 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd1);
2325
2326 return log_debug_errno(-1, errno, "Failed to move process into target cgroup via fd %d and %d",
2327 target_fd0, target_fd1);
2328 }
2329
2330 struct userns_exec_unified_attach_data {
2331 const struct lxc_conf *conf;
2332 int unified_fd;
2333 int sk_pair[2];
2334 pid_t pid;
2335 };
2336
2337 static int cgroup_unified_attach_child_wrapper(void *data)
2338 {
2339 struct userns_exec_unified_attach_data *args = data;
2340
2341 if (!args->conf || args->unified_fd < 0 || args->pid <= 0 ||
2342 args->sk_pair[0] < 0 || args->sk_pair[1] < 0)
2343 return ret_errno(EINVAL);
2344
2345 close_prot_errno_disarm(args->sk_pair[0]);
2346 return cgroup_attach_create_leaf(args->conf, args->unified_fd,
2347 &args->sk_pair[1]);
2348 }
2349
2350 static int cgroup_unified_attach_parent_wrapper(void *data)
2351 {
2352 struct userns_exec_unified_attach_data *args = data;
2353
2354 if (!args->conf || args->unified_fd < 0 || args->pid <= 0 ||
2355 args->sk_pair[0] < 0 || args->sk_pair[1] < 0)
2356 return ret_errno(EINVAL);
2357
2358 close_prot_errno_disarm(args->sk_pair[1]);
2359 return cgroup_attach_move_into_leaf(args->conf, &args->sk_pair[0],
2360 args->pid);
2361 }
2362
2363 int cgroup_attach(const struct lxc_conf *conf, const char *name,
2364 const char *lxcpath, pid_t pid)
2365 {
2366 __do_close int unified_fd = -EBADF;
2367 int ret;
2368
2369 if (!conf || !name || !lxcpath || pid <= 0)
2370 return ret_errno(EINVAL);
2371
2372 unified_fd = lxc_cmd_get_cgroup2_fd(name, lxcpath);
2373 if (unified_fd < 0)
2374 return ret_errno(EBADF);
2375
2376 if (!lxc_list_empty(&conf->id_map)) {
2377 struct userns_exec_unified_attach_data args = {
2378 .conf = conf,
2379 .unified_fd = unified_fd,
2380 .pid = pid,
2381 };
2382
2383 ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, args.sk_pair);
2384 if (ret < 0)
2385 return -errno;
2386
2387 ret = userns_exec_minimal(conf,
2388 cgroup_unified_attach_parent_wrapper,
2389 &args,
2390 cgroup_unified_attach_child_wrapper,
2391 &args);
2392 } else {
2393 ret = cgroup_attach_leaf(conf, unified_fd, pid);
2394 }
2395
2396 return ret;
2397 }
2398
2399 /* Technically, we're always at a delegation boundary here (This is especially
2400 * true when cgroup namespaces are available.). The reasoning is that in order
2401 * for us to have been able to start a container in the first place the root
2402 * cgroup must have been a leaf node. Now, either the container's init system
2403 * has populated the cgroup and kept it as a leaf node or it has created
2404 * subtrees. In the former case we will simply attach to the leaf node we
2405 * created when we started the container in the latter case we create our own
2406 * cgroup for the attaching process.
2407 */
2408 static int __cg_unified_attach(const struct hierarchy *h,
2409 const struct lxc_conf *conf, const char *name,
2410 const char *lxcpath, pid_t pid,
2411 const char *controller)
2412 {
2413 __do_close int unified_fd = -EBADF;
2414 __do_free char *path = NULL, *cgroup = NULL;
2415 int ret;
2416
2417 if (!conf || !name || !lxcpath || pid <= 0)
2418 return ret_errno(EINVAL);
2419
2420 ret = cgroup_attach(conf, name, lxcpath, pid);
2421 if (ret == 0)
2422 return log_trace(0, "Attached to unified cgroup via command handler");
2423 if (ret != -EBADF)
2424 return log_error_errno(ret, errno, "Failed to attach to unified cgroup");
2425
2426 /* Fall back to retrieving the path for the unified cgroup. */
2427 cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2428 /* not running */
2429 if (!cgroup)
2430 return 0;
2431
2432 path = must_make_path(h->mountpoint, cgroup, NULL);
2433
2434 unified_fd = open(path, O_PATH | O_DIRECTORY | O_CLOEXEC);
2435 if (unified_fd < 0)
2436 return ret_errno(EBADF);
2437
2438 if (!lxc_list_empty(&conf->id_map)) {
2439 struct userns_exec_unified_attach_data args = {
2440 .conf = conf,
2441 .unified_fd = unified_fd,
2442 .pid = pid,
2443 };
2444
2445 ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, args.sk_pair);
2446 if (ret < 0)
2447 return -errno;
2448
2449 ret = userns_exec_minimal(conf,
2450 cgroup_unified_attach_parent_wrapper,
2451 &args,
2452 cgroup_unified_attach_child_wrapper,
2453 &args);
2454 } else {
2455 ret = cgroup_attach_leaf(conf, unified_fd, pid);
2456 }
2457
2458 return ret;
2459 }
2460
2461 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops,
2462 const struct lxc_conf *conf,
2463 const char *name, const char *lxcpath,
2464 pid_t pid)
2465 {
2466 int len, ret;
2467 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2468
2469 if (!ops)
2470 return ret_set_errno(false, ENOENT);
2471
2472 if (!ops->hierarchies)
2473 return true;
2474
2475 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2476 if (len < 0 || (size_t)len >= sizeof(pidstr))
2477 return false;
2478
2479 for (int i = 0; ops->hierarchies[i]; i++) {
2480 __do_free char *fullpath = NULL, *path = NULL;
2481 struct hierarchy *h = ops->hierarchies[i];
2482
2483 if (h->version == CGROUP2_SUPER_MAGIC) {
2484 ret = __cg_unified_attach(h, conf, name, lxcpath, pid,
2485 h->controllers[0]);
2486 if (ret < 0)
2487 return false;
2488
2489 continue;
2490 }
2491
2492 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2493 /* not running */
2494 if (!path)
2495 return false;
2496
2497 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2498 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2499 if (ret < 0)
2500 return log_error_errno(false, errno, "Failed to attach %d to %s",
2501 (int)pid, fullpath);
2502 }
2503
2504 return true;
2505 }
2506
2507 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2508 * don't have a cgroup_data set up, so we ask the running container through the
2509 * commands API for the cgroup path.
2510 */
2511 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2512 char *value, size_t len, const char *name,
2513 const char *lxcpath)
2514 {
2515 __do_free char *path = NULL;
2516 __do_free char *controller = NULL;
2517 char *p;
2518 struct hierarchy *h;
2519 int ret = -1;
2520
2521 if (!ops)
2522 return ret_set_errno(-1, ENOENT);
2523
2524 controller = must_copy_string(filename);
2525 p = strchr(controller, '.');
2526 if (p)
2527 *p = '\0';
2528
2529 path = lxc_cmd_get_limiting_cgroup_path(name, lxcpath, controller);
2530 /* not running */
2531 if (!path)
2532 return -1;
2533
2534 h = get_hierarchy(ops, controller);
2535 if (h) {
2536 __do_free char *fullpath = NULL;
2537
2538 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2539 ret = lxc_read_from_file(fullpath, value, len);
2540 }
2541
2542 return ret;
2543 }
2544
2545 static int device_cgroup_parse_access(struct device_item *device, const char *val)
2546 {
2547 for (int count = 0; count < 3; count++, val++) {
2548 switch (*val) {
2549 case 'r':
2550 device->access[count] = *val;
2551 break;
2552 case 'w':
2553 device->access[count] = *val;
2554 break;
2555 case 'm':
2556 device->access[count] = *val;
2557 break;
2558 case '\n':
2559 case '\0':
2560 count = 3;
2561 break;
2562 default:
2563 return ret_errno(EINVAL);
2564 }
2565 }
2566
2567 return 0;
2568 }
2569
2570 static int device_cgroup_rule_parse(struct device_item *device, const char *key,
2571 const char *val)
2572 {
2573 int count, ret;
2574 char temp[50];
2575
2576 if (strcmp("devices.allow", key) == 0)
2577 device->allow = 1;
2578 else
2579 device->allow = 0;
2580
2581 if (strcmp(val, "a") == 0) {
2582 /* global rule */
2583 device->type = 'a';
2584 device->major = -1;
2585 device->minor = -1;
2586 device->global_rule = device->allow
2587 ? LXC_BPF_DEVICE_CGROUP_BLACKLIST
2588 : LXC_BPF_DEVICE_CGROUP_WHITELIST;
2589 device->allow = -1;
2590 return 0;
2591 }
2592
2593 /* local rule */
2594 device->global_rule = LXC_BPF_DEVICE_CGROUP_LOCAL_RULE;
2595
2596 switch (*val) {
2597 case 'a':
2598 __fallthrough;
2599 case 'b':
2600 __fallthrough;
2601 case 'c':
2602 device->type = *val;
2603 break;
2604 default:
2605 return -1;
2606 }
2607
2608 val++;
2609 if (!isspace(*val))
2610 return -1;
2611 val++;
2612 if (*val == '*') {
2613 device->major = -1;
2614 val++;
2615 } else if (isdigit(*val)) {
2616 memset(temp, 0, sizeof(temp));
2617 for (count = 0; count < sizeof(temp) - 1; count++) {
2618 temp[count] = *val;
2619 val++;
2620 if (!isdigit(*val))
2621 break;
2622 }
2623 ret = lxc_safe_int(temp, &device->major);
2624 if (ret)
2625 return -1;
2626 } else {
2627 return -1;
2628 }
2629 if (*val != ':')
2630 return -1;
2631 val++;
2632
2633 /* read minor */
2634 if (*val == '*') {
2635 device->minor = -1;
2636 val++;
2637 } else if (isdigit(*val)) {
2638 memset(temp, 0, sizeof(temp));
2639 for (count = 0; count < sizeof(temp) - 1; count++) {
2640 temp[count] = *val;
2641 val++;
2642 if (!isdigit(*val))
2643 break;
2644 }
2645 ret = lxc_safe_int(temp, &device->minor);
2646 if (ret)
2647 return -1;
2648 } else {
2649 return -1;
2650 }
2651 if (!isspace(*val))
2652 return -1;
2653
2654 return device_cgroup_parse_access(device, ++val);
2655 }
2656
2657 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2658 * don't have a cgroup_data set up, so we ask the running container through the
2659 * commands API for the cgroup path.
2660 */
2661 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2662 const char *key, const char *value,
2663 const char *name, const char *lxcpath)
2664 {
2665 __do_free char *path = NULL;
2666 __do_free char *controller = NULL;
2667 char *p;
2668 struct hierarchy *h;
2669 int ret = -1;
2670
2671 if (!ops)
2672 return ret_set_errno(-1, ENOENT);
2673
2674 controller = must_copy_string(key);
2675 p = strchr(controller, '.');
2676 if (p)
2677 *p = '\0';
2678
2679 if (pure_unified_layout(ops) && strcmp(controller, "devices") == 0) {
2680 struct device_item device = {0};
2681
2682 ret = device_cgroup_rule_parse(&device, key, value);
2683 if (ret < 0)
2684 return log_error_errno(-1, EINVAL, "Failed to parse device string %s=%s",
2685 key, value);
2686
2687 ret = lxc_cmd_add_bpf_device_cgroup(name, lxcpath, &device);
2688 if (ret < 0)
2689 return -1;
2690
2691 return 0;
2692 }
2693
2694 path = lxc_cmd_get_limiting_cgroup_path(name, lxcpath, controller);
2695 /* not running */
2696 if (!path)
2697 return -1;
2698
2699 h = get_hierarchy(ops, controller);
2700 if (h) {
2701 __do_free char *fullpath = NULL;
2702
2703 fullpath = build_full_cgpath_from_monitorpath(h, path, key);
2704 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2705 }
2706
2707 return ret;
2708 }
2709
2710 /* take devices cgroup line
2711 * /dev/foo rwx
2712 * and convert it to a valid
2713 * type major:minor mode
2714 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2715 * the output.
2716 */
2717 static int device_cgroup_rule_parse_devpath(struct device_item *device,
2718 const char *devpath)
2719 {
2720 __do_free char *path = NULL;
2721 char *mode = NULL;
2722 int n_parts, ret;
2723 char *p;
2724 struct stat sb;
2725
2726 path = must_copy_string(devpath);
2727
2728 /*
2729 * Read path followed by mode. Ignore any trailing text.
2730 * A ' # comment' would be legal. Technically other text is not
2731 * legal, we could check for that if we cared to.
2732 */
2733 for (n_parts = 1, p = path; *p; p++) {
2734 if (*p != ' ')
2735 continue;
2736 *p = '\0';
2737
2738 if (n_parts != 1)
2739 break;
2740 p++;
2741 n_parts++;
2742
2743 while (*p == ' ')
2744 p++;
2745
2746 mode = p;
2747
2748 if (*p == '\0')
2749 return ret_set_errno(-1, EINVAL);
2750 }
2751
2752 if (!mode)
2753 return ret_errno(EINVAL);
2754
2755 if (device_cgroup_parse_access(device, mode) < 0)
2756 return -1;
2757
2758 if (n_parts == 1)
2759 return ret_set_errno(-1, EINVAL);
2760
2761 ret = stat(path, &sb);
2762 if (ret < 0)
2763 return ret_set_errno(-1, errno);
2764
2765 mode_t m = sb.st_mode & S_IFMT;
2766 switch (m) {
2767 case S_IFBLK:
2768 device->type = 'b';
2769 break;
2770 case S_IFCHR:
2771 device->type = 'c';
2772 break;
2773 default:
2774 return log_error_errno(-1, EINVAL, "Unsupported device type %i for \"%s\"", m, path);
2775 }
2776
2777 device->major = MAJOR(sb.st_rdev);
2778 device->minor = MINOR(sb.st_rdev);
2779 device->allow = 1;
2780 device->global_rule = LXC_BPF_DEVICE_CGROUP_LOCAL_RULE;
2781
2782 return 0;
2783 }
2784
2785 static int convert_devpath(const char *invalue, char *dest)
2786 {
2787 struct device_item device = {0};
2788 int ret;
2789
2790 ret = device_cgroup_rule_parse_devpath(&device, invalue);
2791 if (ret < 0)
2792 return -1;
2793
2794 ret = snprintf(dest, 50, "%c %d:%d %s", device.type, device.major,
2795 device.minor, device.access);
2796 if (ret < 0 || ret >= 50)
2797 return log_error_errno(-1, ENAMETOOLONG, "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2798 device.type, device.major, device.minor, device.access);
2799
2800 return 0;
2801 }
2802
2803 /* Called from setup_limits - here we have the container's cgroup_data because
2804 * we created the cgroups.
2805 */
2806 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2807 const char *value, bool is_cpuset)
2808 {
2809 __do_free char *controller = NULL;
2810 char *p;
2811 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2812 char converted_value[50];
2813 struct hierarchy *h;
2814
2815 controller = must_copy_string(filename);
2816 p = strchr(controller, '.');
2817 if (p)
2818 *p = '\0';
2819
2820 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2821 int ret;
2822
2823 ret = convert_devpath(value, converted_value);
2824 if (ret < 0)
2825 return ret;
2826 value = converted_value;
2827 }
2828
2829 h = get_hierarchy(ops, controller);
2830 if (!h)
2831 return log_error_errno(-ENOENT, ENOENT, "Failed to setup limits for the \"%s\" controller. The controller seems to be unused by \"cgfsng\" cgroup driver or not enabled on the cgroup hierarchy", controller);
2832
2833 if (is_cpuset) {
2834 int ret = lxc_write_openat(h->container_full_path, filename, value, strlen(value));
2835 if (ret)
2836 return ret;
2837 }
2838 return lxc_write_openat(h->container_limit_path, filename, value, strlen(value));
2839 }
2840
2841 __cgfsng_ops static bool cgfsng_setup_limits_legacy(struct cgroup_ops *ops,
2842 struct lxc_conf *conf,
2843 bool do_devices)
2844 {
2845 __do_free struct lxc_list *sorted_cgroup_settings = NULL;
2846 struct lxc_list *cgroup_settings = &conf->cgroup;
2847 struct lxc_list *iterator, *next;
2848 struct lxc_cgroup *cg;
2849 bool ret = false;
2850
2851 if (!ops)
2852 return ret_set_errno(false, ENOENT);
2853
2854 if (!conf)
2855 return ret_set_errno(false, EINVAL);
2856
2857 cgroup_settings = &conf->cgroup;
2858 if (lxc_list_empty(cgroup_settings))
2859 return true;
2860
2861 if (!ops->hierarchies)
2862 return ret_set_errno(false, EINVAL);
2863
2864 if (pure_unified_layout(ops))
2865 return log_warn_errno(true, EINVAL, "Ignoring legacy cgroup limits on pure cgroup2 system");
2866
2867 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2868 if (!sorted_cgroup_settings)
2869 return false;
2870
2871 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2872 cg = iterator->elem;
2873
2874 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2875 if (cg_legacy_set_data(ops, cg->subsystem, cg->value, strncmp("cpuset", cg->subsystem, 6) == 0)) {
2876 if (do_devices && (errno == EACCES || errno == EPERM)) {
2877 SYSWARN("Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2878 continue;
2879 }
2880 SYSERROR("Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2881 goto out;
2882 }
2883 DEBUG("Set controller \"%s\" set to \"%s\"", cg->subsystem, cg->value);
2884 }
2885 }
2886
2887 ret = true;
2888 INFO("Limits for the legacy cgroup hierarchies have been setup");
2889 out:
2890 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2891 lxc_list_del(iterator);
2892 free(iterator);
2893 }
2894
2895 return ret;
2896 }
2897
2898 /*
2899 * Some of the parsing logic comes from the original cgroup device v1
2900 * implementation in the kernel.
2901 */
2902 static int bpf_device_cgroup_prepare(struct cgroup_ops *ops,
2903 struct lxc_conf *conf, const char *key,
2904 const char *val)
2905 {
2906 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2907 struct device_item device_item = {0};
2908 int ret;
2909
2910 if (strcmp("devices.allow", key) == 0 && *val == '/')
2911 ret = device_cgroup_rule_parse_devpath(&device_item, val);
2912 else
2913 ret = device_cgroup_rule_parse(&device_item, key, val);
2914 if (ret < 0)
2915 return log_error_errno(-1, EINVAL, "Failed to parse device string %s=%s", key, val);
2916
2917 ret = bpf_list_add_device(conf, &device_item);
2918 if (ret < 0)
2919 return -1;
2920 #endif
2921 return 0;
2922 }
2923
2924 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2925 struct lxc_handler *handler)
2926 {
2927 struct lxc_list *cgroup_settings, *iterator;
2928 struct hierarchy *h;
2929 struct lxc_conf *conf;
2930
2931 if (!ops)
2932 return ret_set_errno(false, ENOENT);
2933
2934 if (!ops->hierarchies)
2935 return true;
2936
2937 if (!ops->container_cgroup)
2938 return ret_set_errno(false, EINVAL);
2939
2940 if (!handler || !handler->conf)
2941 return ret_set_errno(false, EINVAL);
2942 conf = handler->conf;
2943
2944 cgroup_settings = &conf->cgroup2;
2945 if (lxc_list_empty(cgroup_settings))
2946 return true;
2947
2948 if (!pure_unified_layout(ops))
2949 return log_warn_errno(true, EINVAL, "Ignoring cgroup2 limits on legacy cgroup system");
2950
2951 if (!ops->unified)
2952 return false;
2953 h = ops->unified;
2954
2955 lxc_list_for_each (iterator, cgroup_settings) {
2956 struct lxc_cgroup *cg = iterator->elem;
2957 int ret;
2958
2959 if (strncmp("devices", cg->subsystem, 7) == 0) {
2960 ret = bpf_device_cgroup_prepare(ops, conf, cg->subsystem,
2961 cg->value);
2962 } else {
2963 ret = lxc_write_openat(h->container_limit_path,
2964 cg->subsystem, cg->value,
2965 strlen(cg->value));
2966 if (ret < 0)
2967 return log_error_errno(false, errno, "Failed to set \"%s\" to \"%s\"",
2968 cg->subsystem, cg->value);
2969 }
2970 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2971 }
2972
2973 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
2974 }
2975
2976 __cgfsng_ops bool cgfsng_devices_activate(struct cgroup_ops *ops,
2977 struct lxc_handler *handler)
2978 {
2979 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2980 __do_bpf_program_free struct bpf_program *devices = NULL;
2981 int ret;
2982 struct lxc_conf *conf;
2983 struct hierarchy *unified;
2984 struct lxc_list *it;
2985 struct bpf_program *devices_old;
2986
2987 if (!ops)
2988 return ret_set_errno(false, ENOENT);
2989
2990 if (!ops->hierarchies)
2991 return true;
2992
2993 if (!ops->container_cgroup)
2994 return ret_set_errno(false, EEXIST);
2995
2996 if (!handler || !handler->conf)
2997 return ret_set_errno(false, EINVAL);
2998 conf = handler->conf;
2999
3000 unified = ops->unified;
3001 if (!unified || !unified->bpf_device_controller ||
3002 !unified->container_full_path || lxc_list_empty(&conf->devices))
3003 return true;
3004
3005 devices = bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE);
3006 if (!devices)
3007 return log_error_errno(false, ENOMEM, "Failed to create new bpf program");
3008
3009 ret = bpf_program_init(devices);
3010 if (ret)
3011 return log_error_errno(false, ENOMEM, "Failed to initialize bpf program");
3012
3013 lxc_list_for_each(it, &conf->devices) {
3014 struct device_item *cur = it->elem;
3015
3016 ret = bpf_program_append_device(devices, cur);
3017 if (ret)
3018 return log_error_errno(false, ENOMEM, "Failed to add new rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3019 cur->type,
3020 cur->major,
3021 cur->minor,
3022 cur->access,
3023 cur->allow,
3024 cur->global_rule);
3025 TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3026 cur->type,
3027 cur->major,
3028 cur->minor,
3029 cur->access,
3030 cur->allow,
3031 cur->global_rule);
3032 }
3033
3034 ret = bpf_program_finalize(devices);
3035 if (ret)
3036 return log_error_errno(false, ENOMEM, "Failed to finalize bpf program");
3037
3038 ret = bpf_program_cgroup_attach(devices, BPF_CGROUP_DEVICE,
3039 unified->container_limit_path,
3040 BPF_F_ALLOW_MULTI);
3041 if (ret)
3042 return log_error_errno(false, ENOMEM, "Failed to attach bpf program");
3043
3044 /* Replace old bpf program. */
3045 devices_old = move_ptr(conf->cgroup2_devices);
3046 conf->cgroup2_devices = move_ptr(devices);
3047 devices = move_ptr(devices_old);
3048 #endif
3049 return true;
3050 }
3051
3052 bool __cgfsng_delegate_controllers(struct cgroup_ops *ops, const char *cgroup)
3053 {
3054 __do_free char *add_controllers = NULL, *base_path = NULL;
3055 __do_free_string_list char **parts = NULL;
3056 struct hierarchy *unified = ops->unified;
3057 ssize_t parts_len;
3058 char **it;
3059 size_t full_len = 0;
3060
3061 if (!ops->hierarchies || !pure_unified_layout(ops) ||
3062 !unified->controllers[0])
3063 return true;
3064
3065 /* For now we simply enable all controllers that we have detected by
3066 * creating a string like "+memory +pids +cpu +io".
3067 * TODO: In the near future we might want to support "-<controller>"
3068 * etc. but whether supporting semantics like this make sense will need
3069 * some thinking.
3070 */
3071 for (it = unified->controllers; it && *it; it++) {
3072 full_len += strlen(*it) + 2;
3073 add_controllers = must_realloc(add_controllers, full_len + 1);
3074
3075 if (unified->controllers[0] == *it)
3076 add_controllers[0] = '\0';
3077
3078 (void)strlcat(add_controllers, "+", full_len + 1);
3079 (void)strlcat(add_controllers, *it, full_len + 1);
3080
3081 if ((it + 1) && *(it + 1))
3082 (void)strlcat(add_controllers, " ", full_len + 1);
3083 }
3084
3085 parts = lxc_string_split(cgroup, '/');
3086 if (!parts)
3087 return false;
3088
3089 parts_len = lxc_array_len((void **)parts);
3090 if (parts_len > 0)
3091 parts_len--;
3092
3093 base_path = must_make_path(unified->mountpoint, unified->container_base_path, NULL);
3094 for (ssize_t i = -1; i < parts_len; i++) {
3095 int ret;
3096 __do_free char *target = NULL;
3097
3098 if (i >= 0)
3099 base_path = must_append_path(base_path, parts[i], NULL);
3100 target = must_make_path(base_path, "cgroup.subtree_control", NULL);
3101 ret = lxc_writeat(-1, target, add_controllers, full_len);
3102 if (ret < 0)
3103 return log_error_errno(false, errno, "Could not enable \"%s\" controllers in the unified cgroup \"%s\"",
3104 add_controllers, target);
3105 TRACE("Enable \"%s\" controllers in the unified cgroup \"%s\"", add_controllers, target);
3106 }
3107
3108 return true;
3109 }
3110
3111 __cgfsng_ops bool cgfsng_monitor_delegate_controllers(struct cgroup_ops *ops)
3112 {
3113 if (!ops)
3114 return ret_set_errno(false, ENOENT);
3115
3116 return __cgfsng_delegate_controllers(ops, ops->monitor_cgroup);
3117 }
3118
3119 __cgfsng_ops bool cgfsng_payload_delegate_controllers(struct cgroup_ops *ops)
3120 {
3121 if (!ops)
3122 return ret_set_errno(false, ENOENT);
3123
3124 return __cgfsng_delegate_controllers(ops, ops->container_cgroup);
3125 }
3126
3127 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
3128 char **controllers)
3129 {
3130 if (!ops->cgroup_use)
3131 return true;
3132
3133 for (char **cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
3134 bool found = false;
3135
3136 for (char **cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
3137 if (strcmp(*cur_use, *cur_ctrl) != 0)
3138 continue;
3139
3140 found = true;
3141 break;
3142 }
3143
3144 if (found)
3145 continue;
3146
3147 return false;
3148 }
3149
3150 return true;
3151 }
3152
3153 static void cg_unified_delegate(char ***delegate)
3154 {
3155 __do_free char *buf = NULL;
3156 char *standard[] = {"cgroup.subtree_control", "cgroup.threads", NULL};
3157 char *token;
3158 int idx;
3159
3160 buf = read_file("/sys/kernel/cgroup/delegate");
3161 if (!buf) {
3162 for (char **p = standard; p && *p; p++) {
3163 idx = append_null_to_list((void ***)delegate);
3164 (*delegate)[idx] = must_copy_string(*p);
3165 }
3166 SYSWARN("Failed to read /sys/kernel/cgroup/delegate");
3167 return;
3168 }
3169
3170 lxc_iterate_parts(token, buf, " \t\n") {
3171 /*
3172 * We always need to chown this for both cgroup and
3173 * cgroup2.
3174 */
3175 if (strcmp(token, "cgroup.procs") == 0)
3176 continue;
3177
3178 idx = append_null_to_list((void ***)delegate);
3179 (*delegate)[idx] = must_copy_string(token);
3180 }
3181 }
3182
3183 /* At startup, parse_hierarchies finds all the info we need about cgroup
3184 * mountpoints and current cgroups, and stores it in @d.
3185 */
3186 static int cg_hybrid_init(struct cgroup_ops *ops, bool relative, bool unprivileged)
3187 {
3188 __do_free char *basecginfo = NULL, *line = NULL;
3189 __do_free_string_list char **klist = NULL, **nlist = NULL;
3190 __do_fclose FILE *f = NULL;
3191 int ret;
3192 size_t len = 0;
3193
3194 /* Root spawned containers escape the current cgroup, so use init's
3195 * cgroups as our base in that case.
3196 */
3197 if (!relative && (geteuid() == 0))
3198 basecginfo = read_file("/proc/1/cgroup");
3199 else
3200 basecginfo = read_file("/proc/self/cgroup");
3201 if (!basecginfo)
3202 return ret_set_errno(-1, ENOMEM);
3203
3204 ret = get_existing_subsystems(&klist, &nlist);
3205 if (ret < 0)
3206 return log_error_errno(-1, errno, "Failed to retrieve available legacy cgroup controllers");
3207
3208 f = fopen("/proc/self/mountinfo", "re");
3209 if (!f)
3210 return log_error_errno(-1, errno, "Failed to open \"/proc/self/mountinfo\"");
3211
3212 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
3213
3214 while (getline(&line, &len, f) != -1) {
3215 __do_free char *base_cgroup = NULL, *mountpoint = NULL;
3216 __do_free_string_list char **controller_list = NULL;
3217 int type;
3218 bool writeable;
3219 struct hierarchy *new;
3220
3221 type = get_cgroup_version(line);
3222 if (type == 0)
3223 continue;
3224
3225 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
3226 continue;
3227
3228 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
3229 if (type == CGROUP2_SUPER_MAGIC)
3230 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
3231 else if (type == CGROUP_SUPER_MAGIC)
3232 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
3233 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
3234 if (type == CGROUP_SUPER_MAGIC)
3235 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
3236 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
3237 if (type == CGROUP2_SUPER_MAGIC)
3238 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
3239 }
3240
3241 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
3242 if (!controller_list && type == CGROUP_SUPER_MAGIC)
3243 continue;
3244
3245 if (type == CGROUP_SUPER_MAGIC)
3246 if (controller_list_is_dup(ops->hierarchies, controller_list)) {
3247 TRACE("Skipping duplicating controller");
3248 continue;
3249 }
3250
3251 mountpoint = cg_hybrid_get_mountpoint(line);
3252 if (!mountpoint) {
3253 ERROR("Failed parsing mountpoint from \"%s\"", line);
3254 continue;
3255 }
3256
3257 if (type == CGROUP_SUPER_MAGIC)
3258 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
3259 else
3260 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
3261 if (!base_cgroup) {
3262 ERROR("Failed to find current cgroup");
3263 continue;
3264 }
3265
3266 trim(base_cgroup);
3267 prune_init_scope(base_cgroup);
3268 if (type == CGROUP2_SUPER_MAGIC)
3269 writeable = test_writeable_v2(mountpoint, base_cgroup);
3270 else
3271 writeable = test_writeable_v1(mountpoint, base_cgroup);
3272 if (!writeable) {
3273 TRACE("The %s group is not writeable", base_cgroup);
3274 continue;
3275 }
3276
3277 if (type == CGROUP2_SUPER_MAGIC) {
3278 char *cgv2_ctrl_path;
3279
3280 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
3281 "cgroup.controllers",
3282 NULL);
3283
3284 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
3285 free(cgv2_ctrl_path);
3286 if (!controller_list) {
3287 controller_list = cg_unified_make_empty_controller();
3288 TRACE("No controllers are enabled for "
3289 "delegation in the unified hierarchy");
3290 }
3291 }
3292
3293 /* Exclude all controllers that cgroup use does not want. */
3294 if (!cgroup_use_wants_controllers(ops, controller_list)) {
3295 TRACE("Skipping controller");
3296 continue;
3297 }
3298
3299 new = add_hierarchy(&ops->hierarchies, move_ptr(controller_list), move_ptr(mountpoint), move_ptr(base_cgroup), type);
3300 if (type == CGROUP2_SUPER_MAGIC && !ops->unified) {
3301 if (unprivileged)
3302 cg_unified_delegate(&new->cgroup2_chown);
3303 ops->unified = new;
3304 }
3305 }
3306
3307 TRACE("Writable cgroup hierarchies:");
3308 lxc_cgfsng_print_hierarchies(ops);
3309
3310 /* verify that all controllers in cgroup.use and all crucial
3311 * controllers are accounted for
3312 */
3313 if (!all_controllers_found(ops))
3314 return log_error_errno(-1, ENOENT, "Failed to find all required controllers");
3315
3316 return 0;
3317 }
3318
3319 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
3320 static char *cg_unified_get_current_cgroup(bool relative)
3321 {
3322 __do_free char *basecginfo = NULL;
3323 char *copy;
3324 char *base_cgroup;
3325
3326 if (!relative && (geteuid() == 0))
3327 basecginfo = read_file("/proc/1/cgroup");
3328 else
3329 basecginfo = read_file("/proc/self/cgroup");
3330 if (!basecginfo)
3331 return NULL;
3332
3333 base_cgroup = strstr(basecginfo, "0::/");
3334 if (!base_cgroup)
3335 return NULL;
3336
3337 base_cgroup = base_cgroup + 3;
3338 copy = copy_to_eol(base_cgroup);
3339 if (!copy)
3340 return NULL;
3341
3342 return trim(copy);
3343 }
3344
3345 static int cg_unified_init(struct cgroup_ops *ops, bool relative,
3346 bool unprivileged)
3347 {
3348 __do_free char *subtree_path = NULL;
3349 int ret;
3350 char *mountpoint;
3351 char **delegatable;
3352 struct hierarchy *new;
3353 char *base_cgroup = NULL;
3354
3355 ret = unified_cgroup_hierarchy();
3356 if (ret == -ENOMEDIUM)
3357 return ret_errno(ENOMEDIUM);
3358
3359 if (ret != CGROUP2_SUPER_MAGIC)
3360 return 0;
3361
3362 base_cgroup = cg_unified_get_current_cgroup(relative);
3363 if (!base_cgroup)
3364 return ret_errno(EINVAL);
3365 if (!relative)
3366 prune_init_scope(base_cgroup);
3367
3368 /*
3369 * We assume that the cgroup we're currently in has been delegated to
3370 * us and we are free to further delege all of the controllers listed
3371 * in cgroup.controllers further down the hierarchy.
3372 */
3373 mountpoint = must_copy_string(DEFAULT_CGROUP_MOUNTPOINT);
3374 subtree_path = must_make_path(mountpoint, base_cgroup, "cgroup.controllers", NULL);
3375 delegatable = cg_unified_get_controllers(subtree_path);
3376 if (!delegatable)
3377 delegatable = cg_unified_make_empty_controller();
3378 if (!delegatable[0])
3379 TRACE("No controllers are enabled for delegation");
3380
3381 /* TODO: If the user requested specific controllers via lxc.cgroup.use
3382 * we should verify here. The reason I'm not doing it right is that I'm
3383 * not convinced that lxc.cgroup.use will be the future since it is a
3384 * global property. I much rather have an option that lets you request
3385 * controllers per container.
3386 */
3387
3388 new = add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
3389 if (unprivileged)
3390 cg_unified_delegate(&new->cgroup2_chown);
3391
3392 if (bpf_devices_cgroup_supported())
3393 new->bpf_device_controller = 1;
3394
3395 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
3396 ops->unified = new;
3397
3398 return CGROUP2_SUPER_MAGIC;
3399 }
3400
3401 static int cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
3402 {
3403 int ret;
3404 const char *tmp;
3405 bool relative = conf->cgroup_meta.relative;
3406
3407 tmp = lxc_global_config_value("lxc.cgroup.use");
3408 if (tmp) {
3409 __do_free char *pin = NULL;
3410 char *chop, *cur;
3411
3412 pin = must_copy_string(tmp);
3413 chop = pin;
3414
3415 lxc_iterate_parts(cur, chop, ",")
3416 must_append_string(&ops->cgroup_use, cur);
3417 }
3418
3419 ret = cg_unified_init(ops, relative, !lxc_list_empty(&conf->id_map));
3420 if (ret < 0)
3421 return -1;
3422
3423 if (ret == CGROUP2_SUPER_MAGIC)
3424 return 0;
3425
3426 return cg_hybrid_init(ops, relative, !lxc_list_empty(&conf->id_map));
3427 }
3428
3429 __cgfsng_ops static int cgfsng_data_init(struct cgroup_ops *ops)
3430 {
3431 const char *cgroup_pattern;
3432
3433 if (!ops)
3434 return ret_set_errno(-1, ENOENT);
3435
3436 /* copy system-wide cgroup information */
3437 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
3438 if (cgroup_pattern && strcmp(cgroup_pattern, "") != 0)
3439 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
3440
3441 return 0;
3442 }
3443
3444 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
3445 {
3446 __do_free struct cgroup_ops *cgfsng_ops = NULL;
3447
3448 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
3449 if (!cgfsng_ops)
3450 return ret_set_errno(NULL, ENOMEM);
3451
3452 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
3453 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
3454
3455 if (cg_init(cgfsng_ops, conf))
3456 return NULL;
3457
3458 cgfsng_ops->data_init = cgfsng_data_init;
3459 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
3460 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
3461 cgfsng_ops->monitor_create = cgfsng_monitor_create;
3462 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
3463 cgfsng_ops->monitor_delegate_controllers = cgfsng_monitor_delegate_controllers;
3464 cgfsng_ops->payload_delegate_controllers = cgfsng_payload_delegate_controllers;
3465 cgfsng_ops->payload_create = cgfsng_payload_create;
3466 cgfsng_ops->payload_enter = cgfsng_payload_enter;
3467 cgfsng_ops->payload_finalize = cgfsng_payload_finalize;
3468 cgfsng_ops->escape = cgfsng_escape;
3469 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
3470 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
3471 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
3472 cgfsng_ops->get = cgfsng_get;
3473 cgfsng_ops->set = cgfsng_set;
3474 cgfsng_ops->freeze = cgfsng_freeze;
3475 cgfsng_ops->unfreeze = cgfsng_unfreeze;
3476 cgfsng_ops->setup_limits_legacy = cgfsng_setup_limits_legacy;
3477 cgfsng_ops->setup_limits = cgfsng_setup_limits;
3478 cgfsng_ops->driver = "cgfsng";
3479 cgfsng_ops->version = "1.0.0";
3480 cgfsng_ops->attach = cgfsng_attach;
3481 cgfsng_ops->chown = cgfsng_chown;
3482 cgfsng_ops->mount = cgfsng_mount;
3483 cgfsng_ops->devices_activate = cgfsng_devices_activate;
3484 cgfsng_ops->get_limiting_cgroup = cgfsng_get_limiting_cgroup;
3485
3486 return move_ptr(cgfsng_ops);
3487 }