]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
Merge pull request #3608 from brauner/2020-12-27/no_rootfs
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 /*
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
8 * each controller.
9 *
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
13 */
14
15 #ifndef _GNU_SOURCE
16 #define _GNU_SOURCE 1
17 #endif
18 #include <ctype.h>
19 #include <dirent.h>
20 #include <errno.h>
21 #include <grp.h>
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
24 #include <poll.h>
25 #include <signal.h>
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/epoll.h>
31 #include <sys/types.h>
32 #include <unistd.h>
33
34 #include "af_unix.h"
35 #include "caps.h"
36 #include "cgroup.h"
37 #include "cgroup2_devices.h"
38 #include "cgroup_utils.h"
39 #include "commands.h"
40 #include "conf.h"
41 #include "config.h"
42 #include "log.h"
43 #include "macro.h"
44 #include "mainloop.h"
45 #include "memory_utils.h"
46 #include "storage/storage.h"
47 #include "utils.h"
48
49 #ifndef HAVE_STRLCPY
50 #include "include/strlcpy.h"
51 #endif
52
53 #ifndef HAVE_STRLCAT
54 #include "include/strlcat.h"
55 #endif
56
57 lxc_log_define(cgfsng, cgroup);
58
59 /* Given a pointer to a null-terminated array of pointers, realloc to add one
60 * entry, and point the new entry to NULL. Do not fail. Return the index to the
61 * second-to-last entry - that is, the one which is now available for use
62 * (keeping the list null-terminated).
63 */
64 static int append_null_to_list(void ***list)
65 {
66 int newentry = 0;
67
68 if (*list)
69 for (; (*list)[newentry]; newentry++)
70 ;
71
72 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
73 (*list)[newentry + 1] = NULL;
74 return newentry;
75 }
76
77 /* Given a null-terminated array of strings, check whether @entry is one of the
78 * strings.
79 */
80 static bool string_in_list(char **list, const char *entry)
81 {
82 if (!list)
83 return false;
84
85 for (int i = 0; list[i]; i++)
86 if (strcmp(list[i], entry) == 0)
87 return true;
88
89 return false;
90 }
91
92 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
93 * "name=systemd". Do not fail.
94 */
95 static char *cg_legacy_must_prefix_named(char *entry)
96 {
97 size_t len;
98 char *prefixed;
99
100 len = strlen(entry);
101 prefixed = must_realloc(NULL, len + 6);
102
103 memcpy(prefixed, "name=", STRLITERALLEN("name="));
104 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
105 prefixed[len + 5] = '\0';
106
107 return prefixed;
108 }
109
110 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
111 * we are called.
112 *
113 * We also handle named subsystems here. Any controller which is not a kernel
114 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
115 * we refuse to use because we're not sure which we have here.
116 * (TODO: We could work around this in some cases by just remounting to be
117 * unambiguous, or by comparing mountpoint contents with current cgroup.)
118 *
119 * The last entry will always be NULL.
120 */
121 static void must_append_controller(char **klist, char **nlist, char ***clist,
122 char *entry)
123 {
124 int newentry;
125 char *copy;
126
127 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
128 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
129 ERROR("It is both a named and kernel subsystem");
130 return;
131 }
132
133 newentry = append_null_to_list((void ***)clist);
134
135 if (strncmp(entry, "name=", 5) == 0)
136 copy = must_copy_string(entry);
137 else if (string_in_list(klist, entry))
138 copy = must_copy_string(entry);
139 else
140 copy = cg_legacy_must_prefix_named(entry);
141
142 (*clist)[newentry] = copy;
143 }
144
145 /* Given a handler's cgroup data, return the struct hierarchy for the controller
146 * @c, or NULL if there is none.
147 */
148 static struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
149 {
150 if (!ops->hierarchies)
151 return log_trace_errno(NULL, errno, "There are no useable cgroup controllers");
152
153 for (int i = 0; ops->hierarchies[i]; i++) {
154 if (!controller) {
155 /* This is the empty unified hierarchy. */
156 if (ops->hierarchies[i]->controllers &&
157 !ops->hierarchies[i]->controllers[0])
158 return ops->hierarchies[i];
159 continue;
160 } else if (pure_unified_layout(ops) &&
161 strcmp(controller, "devices") == 0) {
162 if (ops->unified->bpf_device_controller)
163 return ops->unified;
164 break;
165 }
166
167 if (string_in_list(ops->hierarchies[i]->controllers, controller))
168 return ops->hierarchies[i];
169 }
170
171 if (controller)
172 WARN("There is no useable %s controller", controller);
173 else
174 WARN("There is no empty unified cgroup hierarchy");
175
176 return ret_set_errno(NULL, ENOENT);
177 }
178
179 #define BATCH_SIZE 50
180 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
181 {
182 int newbatches = (newlen / BATCH_SIZE) + 1;
183 int oldbatches = (oldlen / BATCH_SIZE) + 1;
184
185 if (!*mem || newbatches > oldbatches)
186 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
187 }
188
189 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
190 {
191 size_t full = oldlen + newlen;
192
193 batch_realloc(dest, oldlen, full + 1);
194
195 memcpy(*dest + oldlen, new, newlen + 1);
196 }
197
198 /* Slurp in a whole file */
199 static char *read_file(const char *fnam)
200 {
201 __do_free char *buf = NULL, *line = NULL;
202 __do_fclose FILE *f = NULL;
203 size_t len = 0, fulllen = 0;
204 int linelen;
205
206 f = fopen(fnam, "re");
207 if (!f)
208 return NULL;
209
210 while ((linelen = getline(&line, &len, f)) != -1) {
211 append_line(&buf, fulllen, line, linelen);
212 fulllen += linelen;
213 }
214
215 return move_ptr(buf);
216 }
217
218 /* Taken over modified from the kernel sources. */
219 #define NBITS 32 /* bits in uint32_t */
220 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
221 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
222
223 static void set_bit(unsigned bit, uint32_t *bitarr)
224 {
225 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
226 }
227
228 static void clear_bit(unsigned bit, uint32_t *bitarr)
229 {
230 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
231 }
232
233 static bool is_set(unsigned bit, uint32_t *bitarr)
234 {
235 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
236 }
237
238 /* Create cpumask from cpulist aka turn:
239 *
240 * 0,2-3
241 *
242 * into bit array
243 *
244 * 1 0 1 1
245 */
246 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
247 {
248 __do_free uint32_t *bitarr = NULL;
249 char *token;
250 size_t arrlen;
251
252 arrlen = BITS_TO_LONGS(nbits);
253 bitarr = calloc(arrlen, sizeof(uint32_t));
254 if (!bitarr)
255 return ret_set_errno(NULL, ENOMEM);
256
257 lxc_iterate_parts(token, buf, ",") {
258 errno = 0;
259 unsigned end, start;
260 char *range;
261
262 start = strtoul(token, NULL, 0);
263 end = start;
264 range = strchr(token, '-');
265 if (range)
266 end = strtoul(range + 1, NULL, 0);
267
268 if (!(start <= end))
269 return ret_set_errno(NULL, EINVAL);
270
271 if (end >= nbits)
272 return ret_set_errno(NULL, EINVAL);
273
274 while (start <= end)
275 set_bit(start++, bitarr);
276 }
277
278 return move_ptr(bitarr);
279 }
280
281 /* Turn cpumask into simple, comma-separated cpulist. */
282 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
283 {
284 __do_free_string_list char **cpulist = NULL;
285 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
286 int ret;
287
288 for (size_t i = 0; i <= nbits; i++) {
289 if (!is_set(i, bitarr))
290 continue;
291
292 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
293 if (ret < 0 || (size_t)ret >= sizeof(numstr))
294 return NULL;
295
296 ret = lxc_append_string(&cpulist, numstr);
297 if (ret < 0)
298 return ret_set_errno(NULL, ENOMEM);
299 }
300
301 if (!cpulist)
302 return ret_set_errno(NULL, ENOMEM);
303
304 return lxc_string_join(",", (const char **)cpulist, false);
305 }
306
307 static ssize_t get_max_cpus(char *cpulist)
308 {
309 char *c1, *c2;
310 char *maxcpus = cpulist;
311 size_t cpus = 0;
312
313 c1 = strrchr(maxcpus, ',');
314 if (c1)
315 c1++;
316
317 c2 = strrchr(maxcpus, '-');
318 if (c2)
319 c2++;
320
321 if (!c1 && !c2)
322 c1 = maxcpus;
323 else if (c1 > c2)
324 c2 = c1;
325 else if (c1 < c2)
326 c1 = c2;
327 else if (!c1 && c2)
328 c1 = c2;
329
330 errno = 0;
331 cpus = strtoul(c1, NULL, 0);
332 if (errno != 0)
333 return -1;
334
335 return cpus;
336 }
337
338 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
339 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
340 static bool cg_legacy_filter_and_set_cpus(const char *parent_cgroup,
341 char *child_cgroup, bool am_initialized)
342 {
343 __do_free char *cpulist = NULL, *fpath = NULL, *isolcpus = NULL,
344 *offlinecpus = NULL, *posscpus = NULL;
345 __do_free uint32_t *isolmask = NULL, *offlinemask = NULL,
346 *possmask = NULL;
347 int ret;
348 ssize_t i;
349 ssize_t maxisol = 0, maxoffline = 0, maxposs = 0;
350 bool flipped_bit = false;
351
352 fpath = must_make_path(parent_cgroup, "cpuset.cpus", NULL);
353 posscpus = read_file(fpath);
354 if (!posscpus)
355 return log_error_errno(false, errno, "Failed to read file \"%s\"", fpath);
356
357 /* Get maximum number of cpus found in possible cpuset. */
358 maxposs = get_max_cpus(posscpus);
359 if (maxposs < 0 || maxposs >= INT_MAX - 1)
360 return false;
361
362 if (file_exists(__ISOL_CPUS)) {
363 isolcpus = read_file(__ISOL_CPUS);
364 if (!isolcpus)
365 return log_error_errno(false, errno, "Failed to read file \"%s\"", __ISOL_CPUS);
366
367 if (isdigit(isolcpus[0])) {
368 /* Get maximum number of cpus found in isolated cpuset. */
369 maxisol = get_max_cpus(isolcpus);
370 if (maxisol < 0 || maxisol >= INT_MAX - 1)
371 return false;
372 }
373
374 if (maxposs < maxisol)
375 maxposs = maxisol;
376 maxposs++;
377 } else {
378 TRACE("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
379 }
380
381 if (file_exists(__OFFLINE_CPUS)) {
382 offlinecpus = read_file(__OFFLINE_CPUS);
383 if (!offlinecpus)
384 return log_error_errno(false, errno, "Failed to read file \"%s\"", __OFFLINE_CPUS);
385
386 if (isdigit(offlinecpus[0])) {
387 /* Get maximum number of cpus found in offline cpuset. */
388 maxoffline = get_max_cpus(offlinecpus);
389 if (maxoffline < 0 || maxoffline >= INT_MAX - 1)
390 return false;
391 }
392
393 if (maxposs < maxoffline)
394 maxposs = maxoffline;
395 maxposs++;
396 } else {
397 TRACE("The path \""__OFFLINE_CPUS"\" to read offline cpus from does not exist");
398 }
399
400 if ((maxisol == 0) && (maxoffline == 0)) {
401 cpulist = move_ptr(posscpus);
402 goto copy_parent;
403 }
404
405 possmask = lxc_cpumask(posscpus, maxposs);
406 if (!possmask)
407 return log_error_errno(false, errno, "Failed to create cpumask for possible cpus");
408
409 if (maxisol > 0) {
410 isolmask = lxc_cpumask(isolcpus, maxposs);
411 if (!isolmask)
412 return log_error_errno(false, errno, "Failed to create cpumask for isolated cpus");
413 }
414
415 if (maxoffline > 0) {
416 offlinemask = lxc_cpumask(offlinecpus, maxposs);
417 if (!offlinemask)
418 return log_error_errno(false, errno, "Failed to create cpumask for offline cpus");
419 }
420
421 for (i = 0; i <= maxposs; i++) {
422 if ((isolmask && !is_set(i, isolmask)) ||
423 (offlinemask && !is_set(i, offlinemask)) ||
424 !is_set(i, possmask))
425 continue;
426
427 flipped_bit = true;
428 clear_bit(i, possmask);
429 }
430
431 if (!flipped_bit) {
432 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
433 TRACE("No isolated or offline cpus present in cpuset");
434 } else {
435 cpulist = move_ptr(posscpus);
436 TRACE("Removed isolated or offline cpus from cpuset");
437 }
438 if (!cpulist)
439 return log_error_errno(false, errno, "Failed to create cpu list");
440
441 copy_parent:
442 if (!am_initialized) {
443 ret = lxc_write_openat(child_cgroup, "cpuset.cpus", cpulist, strlen(cpulist));
444 if (ret < 0)
445 return log_error_errno(false,
446 errno, "Failed to write cpu list to \"%s/cpuset.cpus\"",
447 child_cgroup);
448
449 TRACE("Copied cpu settings of parent cgroup");
450 }
451
452 return true;
453 }
454
455 /* Copy contents of parent(@path)/@file to @path/@file */
456 static bool copy_parent_file(const char *parent_cgroup,
457 const char *child_cgroup, const char *file)
458 {
459 __do_free char *parent_file = NULL, *value = NULL;
460 int len = 0;
461 int ret;
462
463 parent_file = must_make_path(parent_cgroup, file, NULL);
464 len = lxc_read_from_file(parent_file, NULL, 0);
465 if (len <= 0)
466 return log_error_errno(false, errno, "Failed to determine buffer size");
467
468 value = must_realloc(NULL, len + 1);
469 value[len] = '\0';
470 ret = lxc_read_from_file(parent_file, value, len);
471 if (ret != len)
472 return log_error_errno(false, errno, "Failed to read from parent file \"%s\"", parent_file);
473
474 ret = lxc_write_openat(child_cgroup, file, value, len);
475 if (ret < 0 && errno != EACCES)
476 return log_error_errno(false, errno, "Failed to write \"%s\" to file \"%s/%s\"",
477 value, child_cgroup, file);
478 return true;
479 }
480
481 static inline bool is_unified_hierarchy(const struct hierarchy *h)
482 {
483 return h->version == CGROUP2_SUPER_MAGIC;
484 }
485
486 /*
487 * Initialize the cpuset hierarchy in first directory of @cgroup_leaf and set
488 * cgroup.clone_children so that children inherit settings. Since the
489 * h->base_path is populated by init or ourselves, we know it is already
490 * initialized.
491 *
492 * returns -1 on error, 0 when we didn't created a cgroup, 1 if we created a
493 * cgroup.
494 */
495 static int cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h,
496 const char *cgroup_leaf)
497 {
498 __do_free char *parent_cgroup = NULL, *child_cgroup = NULL, *dup = NULL;
499 __do_close int cgroup_fd = -EBADF;
500 int fret = -1;
501 int ret;
502 char v;
503 char *leaf, *slash;
504
505 if (is_unified_hierarchy(h))
506 return 0;
507
508 if (!string_in_list(h->controllers, "cpuset"))
509 return 0;
510
511 if (!cgroup_leaf)
512 return ret_set_errno(-1, EINVAL);
513
514 dup = strdup(cgroup_leaf);
515 if (!dup)
516 return ret_set_errno(-1, ENOMEM);
517
518 parent_cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
519
520 leaf = dup;
521 leaf += strspn(leaf, "/");
522 slash = strchr(leaf, '/');
523 if (slash)
524 *slash = '\0';
525 child_cgroup = must_make_path(parent_cgroup, leaf, NULL);
526 if (slash)
527 *slash = '/';
528
529 fret = 1;
530 ret = mkdir(child_cgroup, 0755);
531 if (ret < 0) {
532 if (errno != EEXIST)
533 return log_error_errno(-1, errno, "Failed to create directory \"%s\"", child_cgroup);
534
535 fret = 0;
536 }
537
538 cgroup_fd = lxc_open_dirfd(child_cgroup);
539 if (cgroup_fd < 0)
540 return -1;
541
542 ret = lxc_readat(cgroup_fd, "cgroup.clone_children", &v, 1);
543 if (ret < 0)
544 return log_error_errno(-1, errno, "Failed to read file \"%s/cgroup.clone_children\"", child_cgroup);
545
546 /* Make sure any isolated cpus are removed from cpuset.cpus. */
547 if (!cg_legacy_filter_and_set_cpus(parent_cgroup, child_cgroup, v == '1'))
548 return log_error_errno(-1, errno, "Failed to remove isolated cpus");
549
550 /* Already set for us by someone else. */
551 if (v == '1')
552 TRACE("\"cgroup.clone_children\" was already set to \"1\"");
553
554 /* copy parent's settings */
555 if (!copy_parent_file(parent_cgroup, child_cgroup, "cpuset.mems"))
556 return log_error_errno(-1, errno, "Failed to copy \"cpuset.mems\" settings");
557
558 /* Set clone_children so children inherit our settings */
559 ret = lxc_writeat(cgroup_fd, "cgroup.clone_children", "1", 1);
560 if (ret < 0)
561 return log_error_errno(-1, errno, "Failed to write 1 to \"%s/cgroup.clone_children\"", child_cgroup);
562
563 return fret;
564 }
565
566 /* Given two null-terminated lists of strings, return true if any string is in
567 * both.
568 */
569 static bool controller_lists_intersect(char **l1, char **l2)
570 {
571 if (!l1 || !l2)
572 return false;
573
574 for (int i = 0; l1[i]; i++)
575 if (string_in_list(l2, l1[i]))
576 return true;
577
578 return false;
579 }
580
581 /* For a null-terminated list of controllers @clist, return true if any of those
582 * controllers is already listed the null-terminated list of hierarchies @hlist.
583 * Realistically, if one is present, all must be present.
584 */
585 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
586 {
587 if (!hlist)
588 return false;
589
590 for (int i = 0; hlist[i]; i++)
591 if (controller_lists_intersect(hlist[i]->controllers, clist))
592 return true;
593
594 return false;
595 }
596
597 /* Return true if the controller @entry is found in the null-terminated list of
598 * hierarchies @hlist.
599 */
600 static bool controller_found(struct hierarchy **hlist, char *entry)
601 {
602 if (!hlist)
603 return false;
604
605 for (int i = 0; hlist[i]; i++)
606 if (string_in_list(hlist[i]->controllers, entry))
607 return true;
608
609 return false;
610 }
611
612 /* Return true if all of the controllers which we require have been found. The
613 * required list is freezer and anything in lxc.cgroup.use.
614 */
615 static bool all_controllers_found(struct cgroup_ops *ops)
616 {
617 struct hierarchy **hlist;
618
619 if (!ops->cgroup_use)
620 return true;
621
622 hlist = ops->hierarchies;
623 for (char **cur = ops->cgroup_use; cur && *cur; cur++)
624 if (!controller_found(hlist, *cur))
625 return log_error(false, "No %s controller mountpoint found", *cur);
626
627 return true;
628 }
629
630 /* Get the controllers from a mountinfo line There are other ways we could get
631 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
632 * could parse the mount options. But we simply assume that the mountpoint must
633 * be /sys/fs/cgroup/controller-list
634 */
635 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
636 int type)
637 {
638 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
639 * for legacy hierarchies.
640 */
641 __do_free_string_list char **aret = NULL;
642 int i;
643 char *p2, *tok;
644 char *p = line, *sep = ",";
645
646 for (i = 0; i < 4; i++) {
647 p = strchr(p, ' ');
648 if (!p)
649 return NULL;
650 p++;
651 }
652
653 /* Note, if we change how mountinfo works, then our caller will need to
654 * verify /sys/fs/cgroup/ in this field.
655 */
656 if (strncmp(p, DEFAULT_CGROUP_MOUNTPOINT "/", 15) != 0)
657 return log_warn(NULL, "Found hierarchy not under " DEFAULT_CGROUP_MOUNTPOINT ": \"%s\"", p);
658
659 p += 15;
660 p2 = strchr(p, ' ');
661 if (!p2)
662 return log_error(NULL, "Corrupt mountinfo");
663 *p2 = '\0';
664
665 if (type == CGROUP_SUPER_MAGIC) {
666 __do_free char *dup = NULL;
667
668 /* strdup() here for v1 hierarchies. Otherwise
669 * lxc_iterate_parts() will destroy mountpoints such as
670 * "/sys/fs/cgroup/cpu,cpuacct".
671 */
672 dup = must_copy_string(p);
673 if (!dup)
674 return NULL;
675
676 lxc_iterate_parts(tok, dup, sep)
677 must_append_controller(klist, nlist, &aret, tok);
678 }
679 *p2 = ' ';
680
681 return move_ptr(aret);
682 }
683
684 static char **cg_unified_make_empty_controller(void)
685 {
686 __do_free_string_list char **aret = NULL;
687 int newentry;
688
689 newentry = append_null_to_list((void ***)&aret);
690 aret[newentry] = NULL;
691 return move_ptr(aret);
692 }
693
694 static char **cg_unified_get_controllers(const char *file)
695 {
696 __do_free char *buf = NULL;
697 __do_free_string_list char **aret = NULL;
698 char *sep = " \t\n";
699 char *tok;
700
701 buf = read_file(file);
702 if (!buf)
703 return NULL;
704
705 lxc_iterate_parts(tok, buf, sep) {
706 int newentry;
707 char *copy;
708
709 newentry = append_null_to_list((void ***)&aret);
710 copy = must_copy_string(tok);
711 aret[newentry] = copy;
712 }
713
714 return move_ptr(aret);
715 }
716
717 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
718 char *container_base_path, int type)
719 {
720 struct hierarchy *new;
721 int newentry;
722
723 new = zalloc(sizeof(*new));
724 new->controllers = clist;
725 new->mountpoint = mountpoint;
726 new->container_base_path = container_base_path;
727 new->version = type;
728 new->cgfd_con = -EBADF;
729 new->cgfd_limit = -EBADF;
730 new->cgfd_mon = -EBADF;
731
732 newentry = append_null_to_list((void ***)h);
733 (*h)[newentry] = new;
734 return new;
735 }
736
737 /* Get a copy of the mountpoint from @line, which is a line from
738 * /proc/self/mountinfo.
739 */
740 static char *cg_hybrid_get_mountpoint(char *line)
741 {
742 char *p = line, *sret = NULL;
743 size_t len;
744 char *p2;
745
746 for (int i = 0; i < 4; i++) {
747 p = strchr(p, ' ');
748 if (!p)
749 return NULL;
750 p++;
751 }
752
753 if (strncmp(p, DEFAULT_CGROUP_MOUNTPOINT "/", 15) != 0)
754 return NULL;
755
756 p2 = strchr(p + 15, ' ');
757 if (!p2)
758 return NULL;
759 *p2 = '\0';
760
761 len = strlen(p);
762 sret = must_realloc(NULL, len + 1);
763 memcpy(sret, p, len);
764 sret[len] = '\0';
765
766 return sret;
767 }
768
769 /* Given a multi-line string, return a null-terminated copy of the current line. */
770 static char *copy_to_eol(char *p)
771 {
772 char *p2, *sret;
773 size_t len;
774
775 p2 = strchr(p, '\n');
776 if (!p2)
777 return NULL;
778
779 len = p2 - p;
780 sret = must_realloc(NULL, len + 1);
781 memcpy(sret, p, len);
782 sret[len] = '\0';
783
784 return sret;
785 }
786
787 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
788 * /proc/self/cgroup file. Check whether controller c is present.
789 */
790 static bool controller_in_clist(char *cgline, char *c)
791 {
792 __do_free char *tmp = NULL;
793 char *tok, *eol;
794 size_t len;
795
796 eol = strchr(cgline, ':');
797 if (!eol)
798 return false;
799
800 len = eol - cgline;
801 tmp = must_realloc(NULL, len + 1);
802 memcpy(tmp, cgline, len);
803 tmp[len] = '\0';
804
805 lxc_iterate_parts(tok, tmp, ",")
806 if (strcmp(tok, c) == 0)
807 return true;
808
809 return false;
810 }
811
812 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
813 * @controller.
814 */
815 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
816 int type)
817 {
818 char *p = basecginfo;
819
820 for (;;) {
821 bool is_cgv2_base_cgroup = false;
822
823 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
824 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
825 is_cgv2_base_cgroup = true;
826
827 p = strchr(p, ':');
828 if (!p)
829 return NULL;
830 p++;
831
832 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
833 p = strchr(p, ':');
834 if (!p)
835 return NULL;
836 p++;
837 return copy_to_eol(p);
838 }
839
840 p = strchr(p, '\n');
841 if (!p)
842 return NULL;
843 p++;
844 }
845 }
846
847 static void must_append_string(char ***list, char *entry)
848 {
849 int newentry;
850 char *copy;
851
852 newentry = append_null_to_list((void ***)list);
853 copy = must_copy_string(entry);
854 (*list)[newentry] = copy;
855 }
856
857 static int get_existing_subsystems(char ***klist, char ***nlist)
858 {
859 __do_free char *line = NULL;
860 __do_fclose FILE *f = NULL;
861 size_t len = 0;
862
863 f = fopen("/proc/self/cgroup", "re");
864 if (!f)
865 return -1;
866
867 while (getline(&line, &len, f) != -1) {
868 char *p, *p2, *tok;
869 p = strchr(line, ':');
870 if (!p)
871 continue;
872 p++;
873 p2 = strchr(p, ':');
874 if (!p2)
875 continue;
876 *p2 = '\0';
877
878 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
879 * contains an entry of the form:
880 *
881 * 0::/some/path
882 *
883 * In this case we use "cgroup2" as controller name.
884 */
885 if ((p2 - p) == 0) {
886 must_append_string(klist, "cgroup2");
887 continue;
888 }
889
890 lxc_iterate_parts(tok, p, ",") {
891 if (strncmp(tok, "name=", 5) == 0)
892 must_append_string(nlist, tok);
893 else
894 must_append_string(klist, tok);
895 }
896 }
897
898 return 0;
899 }
900
901 static char *trim(char *s)
902 {
903 size_t len;
904
905 len = strlen(s);
906 while ((len > 1) && (s[len - 1] == '\n'))
907 s[--len] = '\0';
908
909 return s;
910 }
911
912 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
913 {
914 int i;
915 struct hierarchy **it;
916
917 if (!ops->hierarchies) {
918 TRACE(" No hierarchies found");
919 return;
920 }
921
922 TRACE(" Hierarchies:");
923 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
924 int j;
925 char **cit;
926
927 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
928 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
929 TRACE(" controllers:");
930 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
931 TRACE(" %d: %s", j, *cit);
932 }
933 }
934
935 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
936 char **nlist)
937 {
938 int k;
939 char **it;
940
941 TRACE("basecginfo is:");
942 TRACE("%s", basecginfo);
943
944 for (k = 0, it = klist; it && *it; it++, k++)
945 TRACE("kernel subsystem %d: %s", k, *it);
946
947 for (k = 0, it = nlist; it && *it; it++, k++)
948 TRACE("named subsystem %d: %s", k, *it);
949 }
950
951 static int cgroup_tree_remove(struct hierarchy **hierarchies, const char *container_cgroup)
952 {
953 if (!container_cgroup || !hierarchies)
954 return 0;
955
956 for (int i = 0; hierarchies[i]; i++) {
957 struct hierarchy *h = hierarchies[i];
958 int ret;
959
960 if (!h->container_limit_path)
961 continue;
962
963 ret = lxc_rm_rf(h->container_limit_path);
964 if (ret < 0)
965 WARN("Failed to destroy \"%s\"", h->container_limit_path);
966
967 if (h->container_limit_path != h->container_full_path)
968 free_disarm(h->container_limit_path);
969 free_disarm(h->container_full_path);
970 }
971
972 return 0;
973 }
974
975 struct generic_userns_exec_data {
976 struct hierarchy **hierarchies;
977 const char *container_cgroup;
978 struct lxc_conf *conf;
979 uid_t origuid; /* target uid in parent namespace */
980 char *path;
981 };
982
983 static int cgroup_tree_remove_wrapper(void *data)
984 {
985 struct generic_userns_exec_data *arg = data;
986 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
987 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
988 int ret;
989
990 if (!lxc_setgroups(0, NULL) && errno != EPERM)
991 return log_error_errno(-1, errno, "Failed to setgroups(0, NULL)");
992
993 ret = setresgid(nsgid, nsgid, nsgid);
994 if (ret < 0)
995 return log_error_errno(-1, errno, "Failed to setresgid(%d, %d, %d)",
996 (int)nsgid, (int)nsgid, (int)nsgid);
997
998 ret = setresuid(nsuid, nsuid, nsuid);
999 if (ret < 0)
1000 return log_error_errno(-1, errno, "Failed to setresuid(%d, %d, %d)",
1001 (int)nsuid, (int)nsuid, (int)nsuid);
1002
1003 return cgroup_tree_remove(arg->hierarchies, arg->container_cgroup);
1004 }
1005
1006 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1007 struct lxc_handler *handler)
1008 {
1009 int ret;
1010
1011 if (!ops) {
1012 ERROR("Called with uninitialized cgroup operations");
1013 return;
1014 }
1015
1016 if (!ops->hierarchies)
1017 return;
1018
1019 if (!handler) {
1020 ERROR("Called with uninitialized handler");
1021 return;
1022 }
1023
1024 if (!handler->conf) {
1025 ERROR("Called with uninitialized conf");
1026 return;
1027 }
1028
1029 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
1030 ret = bpf_program_cgroup_detach(handler->cgroup_ops->cgroup2_devices);
1031 if (ret < 0)
1032 WARN("Failed to detach bpf program from cgroup");
1033 #endif
1034
1035 if (handler->conf && !lxc_list_empty(&handler->conf->id_map)) {
1036 struct generic_userns_exec_data wrap = {
1037 .conf = handler->conf,
1038 .container_cgroup = ops->container_cgroup,
1039 .hierarchies = ops->hierarchies,
1040 .origuid = 0,
1041 };
1042 ret = userns_exec_1(handler->conf, cgroup_tree_remove_wrapper,
1043 &wrap, "cgroup_tree_remove_wrapper");
1044 } else {
1045 ret = cgroup_tree_remove(ops->hierarchies, ops->container_cgroup);
1046 }
1047 if (ret < 0)
1048 SYSWARN("Failed to destroy cgroups");
1049 }
1050
1051 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1052 struct lxc_handler *handler)
1053 {
1054 int len;
1055 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1056 const struct lxc_conf *conf;
1057
1058 if (!ops) {
1059 ERROR("Called with uninitialized cgroup operations");
1060 return;
1061 }
1062
1063 if (!ops->hierarchies)
1064 return;
1065
1066 if (!handler) {
1067 ERROR("Called with uninitialized handler");
1068 return;
1069 }
1070
1071 if (!handler->conf) {
1072 ERROR("Called with uninitialized conf");
1073 return;
1074 }
1075 conf = handler->conf;
1076
1077 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1078 if (len < 0 || (size_t)len >= sizeof(pidstr))
1079 return;
1080
1081 for (int i = 0; ops->hierarchies[i]; i++) {
1082 __do_free char *pivot_path = NULL;
1083 struct hierarchy *h = ops->hierarchies[i];
1084 size_t offset;
1085 int ret;
1086
1087 if (!h->monitor_full_path)
1088 continue;
1089
1090 /* Monitor might have died before we entered the cgroup. */
1091 if (handler->monitor_pid <= 0) {
1092 WARN("No valid monitor process found while destroying cgroups");
1093 goto try_lxc_rm_rf;
1094 }
1095
1096 if (conf && conf->cgroup_meta.monitor_pivot_dir)
1097 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1098 conf->cgroup_meta.monitor_pivot_dir, CGROUP_PIVOT, NULL);
1099 else if (conf && conf->cgroup_meta.monitor_dir)
1100 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1101 conf->cgroup_meta.monitor_dir, CGROUP_PIVOT, NULL);
1102 else if (conf && conf->cgroup_meta.dir)
1103 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1104 conf->cgroup_meta.dir, CGROUP_PIVOT, NULL);
1105 else
1106 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1107 CGROUP_PIVOT, NULL);
1108
1109 offset = strlen(h->mountpoint) + strlen(h->container_base_path);
1110
1111 if (cg_legacy_handle_cpuset_hierarchy(h, pivot_path + offset))
1112 SYSWARN("Failed to initialize cpuset %s/" CGROUP_PIVOT, pivot_path);
1113
1114 ret = mkdir_p(pivot_path, 0755);
1115 if (ret < 0 && errno != EEXIST) {
1116 ERROR("Failed to create %s", pivot_path);
1117 goto try_lxc_rm_rf;
1118 }
1119
1120 ret = lxc_write_openat(pivot_path, "cgroup.procs", pidstr, len);
1121 if (ret != 0) {
1122 SYSWARN("Failed to move monitor %s to \"%s\"", pidstr, pivot_path);
1123 continue;
1124 }
1125
1126 try_lxc_rm_rf:
1127 ret = lxc_rm_rf(h->monitor_full_path);
1128 if (ret < 0)
1129 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1130 }
1131 }
1132
1133 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1134 {
1135 const char *tmp = dir;
1136 const char *orig = dir;
1137 size_t orig_len;
1138
1139 orig_len = strlen(dir);
1140 do {
1141 __do_free char *makeme = NULL;
1142 int ret;
1143 size_t cur_len;
1144
1145 dir = tmp + strspn(tmp, "/");
1146 tmp = dir + strcspn(dir, "/");
1147
1148 cur_len = dir - orig;
1149 makeme = strndup(orig, cur_len);
1150 if (!makeme)
1151 return ret_set_errno(-1, ENOMEM);
1152
1153 ret = mkdir(makeme, mode);
1154 if (ret < 0 && ((errno != EEXIST) || (orig_len == cur_len)))
1155 return log_warn_errno(-1, errno, "Failed to create directory \"%s\"", makeme);
1156 } while (tmp != dir);
1157
1158 return 0;
1159 }
1160
1161 static bool cgroup_tree_create(struct cgroup_ops *ops, struct lxc_conf *conf,
1162 struct hierarchy *h, const char *cgroup_tree,
1163 const char *cgroup_leaf, bool payload,
1164 const char *cgroup_limit_dir)
1165 {
1166 __do_free char *path = NULL, *limit_path = NULL;
1167 int ret, ret_cpuset;
1168
1169 path = must_make_path(h->mountpoint, h->container_base_path, cgroup_leaf, NULL);
1170 if (dir_exists(path))
1171 return log_warn_errno(false, errno, "The %s cgroup already existed", path);
1172
1173 ret_cpuset = cg_legacy_handle_cpuset_hierarchy(h, cgroup_leaf);
1174 if (ret_cpuset < 0)
1175 return log_error_errno(false, errno, "Failed to handle legacy cpuset controller");
1176
1177 if (payload && cgroup_limit_dir) {
1178 /* with isolation both parts need to not already exist */
1179 limit_path = must_make_path(h->mountpoint,
1180 h->container_base_path,
1181 cgroup_limit_dir, NULL);
1182
1183 ret = mkdir_eexist_on_last(limit_path, 0755);
1184 if (ret < 0)
1185 return log_debug_errno(false,
1186 errno, "Failed to create %s limiting cgroup",
1187 limit_path);
1188
1189 h->cgfd_limit = lxc_open_dirfd(limit_path);
1190 if (h->cgfd_limit < 0)
1191 return log_error_errno(false, errno,
1192 "Failed to open %s", path);
1193 h->container_limit_path = move_ptr(limit_path);
1194
1195 /*
1196 * With isolation the devices legacy cgroup needs to be
1197 * iinitialized early, as it typically contains an 'a' (all)
1198 * line, which is not possible once a subdirectory has been
1199 * created.
1200 */
1201 if (string_in_list(h->controllers, "devices") &&
1202 !ops->setup_limits_legacy(ops, conf, true))
1203 return log_error(false, "Failed to setup legacy device limits");
1204 }
1205
1206 ret = mkdir_eexist_on_last(path, 0755);
1207 if (ret < 0) {
1208 /*
1209 * This is the cpuset controller and
1210 * cg_legacy_handle_cpuset_hierarchy() has created our target
1211 * directory for us to ensure correct initialization.
1212 */
1213 if (ret_cpuset != 1 || cgroup_tree)
1214 return log_debug_errno(false, errno, "Failed to create %s cgroup", path);
1215 }
1216
1217 if (payload) {
1218 h->cgfd_con = lxc_open_dirfd(path);
1219 if (h->cgfd_con < 0)
1220 return log_error_errno(false, errno, "Failed to open %s", path);
1221 h->container_full_path = move_ptr(path);
1222 if (h->cgfd_limit < 0)
1223 h->cgfd_limit = h->cgfd_con;
1224 if (!h->container_limit_path)
1225 h->container_limit_path = h->container_full_path;
1226 } else {
1227 h->cgfd_mon = lxc_open_dirfd(path);
1228 if (h->cgfd_mon < 0)
1229 return log_error_errno(false, errno, "Failed to open %s", path);
1230 h->monitor_full_path = move_ptr(path);
1231 }
1232
1233 return true;
1234 }
1235
1236 static void cgroup_tree_leaf_remove(struct hierarchy *h, bool payload)
1237 {
1238 __do_free char *full_path = NULL, *__limit_path = NULL;
1239 char *limit_path = NULL;
1240
1241 if (payload) {
1242 __lxc_unused __do_close int fd = move_fd(h->cgfd_con);
1243 full_path = move_ptr(h->container_full_path);
1244 limit_path = move_ptr(h->container_limit_path);
1245 if (limit_path != full_path)
1246 __limit_path = limit_path;
1247 } else {
1248 __lxc_unused __do_close int fd = move_fd(h->cgfd_mon);
1249 full_path = move_ptr(h->monitor_full_path);
1250 }
1251
1252 if (full_path && rmdir(full_path))
1253 SYSWARN("Failed to rmdir(\"%s\") cgroup", full_path);
1254 if (limit_path && rmdir(limit_path))
1255 SYSWARN("Failed to rmdir(\"%s\") cgroup", limit_path);
1256 }
1257
1258 /*
1259 * Check we have no lxc.cgroup.dir, and that lxc.cgroup.dir.limit_prefix is a
1260 * proper prefix directory of lxc.cgroup.dir.payload.
1261 *
1262 * Returns the prefix length if it is set, otherwise zero on success.
1263 */
1264 static bool check_cgroup_dir_config(struct lxc_conf *conf)
1265 {
1266 const char *monitor_dir = conf->cgroup_meta.monitor_dir,
1267 *container_dir = conf->cgroup_meta.container_dir,
1268 *namespace_dir = conf->cgroup_meta.namespace_dir;
1269
1270 /* none of the new options are set, all is fine */
1271 if (!monitor_dir && !container_dir && !namespace_dir)
1272 return true;
1273
1274 /* some are set, make sure lxc.cgroup.dir is not also set*/
1275 if (conf->cgroup_meta.dir)
1276 return log_error_errno(false, EINVAL,
1277 "lxc.cgroup.dir conflicts with lxc.cgroup.dir.payload/monitor");
1278
1279 /* make sure both monitor and payload are set */
1280 if (!monitor_dir || !container_dir)
1281 return log_error_errno(false, EINVAL,
1282 "lxc.cgroup.dir.payload and lxc.cgroup.dir.monitor must both be set");
1283
1284 /* namespace_dir may be empty */
1285 return true;
1286 }
1287
1288 __cgfsng_ops static bool cgfsng_monitor_create(struct cgroup_ops *ops, struct lxc_handler *handler)
1289 {
1290 __do_free char *monitor_cgroup = NULL, *__cgroup_tree = NULL;
1291 const char *cgroup_tree;
1292 int idx = 0;
1293 int i;
1294 size_t len;
1295 char *suffix = NULL;
1296 struct lxc_conf *conf;
1297
1298 if (!ops)
1299 return ret_set_errno(false, ENOENT);
1300
1301 if (!ops->hierarchies)
1302 return true;
1303
1304 if (ops->monitor_cgroup)
1305 return ret_set_errno(false, EEXIST);
1306
1307 if (!handler || !handler->conf)
1308 return ret_set_errno(false, EINVAL);
1309
1310 conf = handler->conf;
1311
1312 if (!check_cgroup_dir_config(conf))
1313 return false;
1314
1315 if (conf->cgroup_meta.monitor_dir) {
1316 cgroup_tree = NULL;
1317 monitor_cgroup = strdup(conf->cgroup_meta.monitor_dir);
1318 } else if (conf->cgroup_meta.dir) {
1319 cgroup_tree = conf->cgroup_meta.dir;
1320 monitor_cgroup = must_concat(&len, conf->cgroup_meta.dir, "/",
1321 DEFAULT_MONITOR_CGROUP_PREFIX,
1322 handler->name,
1323 CGROUP_CREATE_RETRY, NULL);
1324 } else if (ops->cgroup_pattern) {
1325 __cgroup_tree = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1326 if (!__cgroup_tree)
1327 return ret_set_errno(false, ENOMEM);
1328
1329 cgroup_tree = __cgroup_tree;
1330 monitor_cgroup = must_concat(&len, cgroup_tree, "/",
1331 DEFAULT_MONITOR_CGROUP,
1332 CGROUP_CREATE_RETRY, NULL);
1333 } else {
1334 cgroup_tree = NULL;
1335 monitor_cgroup = must_concat(&len, DEFAULT_MONITOR_CGROUP_PREFIX,
1336 handler->name,
1337 CGROUP_CREATE_RETRY, NULL);
1338 }
1339 if (!monitor_cgroup)
1340 return ret_set_errno(false, ENOMEM);
1341
1342 if (!conf->cgroup_meta.monitor_dir) {
1343 suffix = monitor_cgroup + len - CGROUP_CREATE_RETRY_LEN;
1344 *suffix = '\0';
1345 }
1346 do {
1347 if (idx && suffix)
1348 sprintf(suffix, "-%d", idx);
1349
1350 for (i = 0; ops->hierarchies[i]; i++) {
1351 if (cgroup_tree_create(ops, handler->conf,
1352 ops->hierarchies[i], cgroup_tree,
1353 monitor_cgroup, false, NULL))
1354 continue;
1355
1356 DEBUG("Failed to create cgroup \"%s\"", ops->hierarchies[i]->monitor_full_path ?: "(null)");
1357 for (int j = 0; j < i; j++)
1358 cgroup_tree_leaf_remove(ops->hierarchies[j], false);
1359
1360 idx++;
1361 break;
1362 }
1363 } while (ops->hierarchies[i] && idx > 0 && idx < 1000 && suffix);
1364
1365 if (idx == 1000 || (!suffix && idx != 0))
1366 return log_error_errno(false, ERANGE, "Failed to create monitor cgroup");
1367
1368 ops->monitor_cgroup = move_ptr(monitor_cgroup);
1369 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops->monitor_cgroup);
1370 }
1371
1372 /*
1373 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1374 * next cgroup_pattern-1, -2, ..., -999.
1375 */
1376 __cgfsng_ops static bool cgfsng_payload_create(struct cgroup_ops *ops, struct lxc_handler *handler)
1377 {
1378 __do_free char *container_cgroup = NULL,
1379 *__cgroup_tree = NULL,
1380 *limiting_cgroup = NULL;
1381 const char *cgroup_tree;
1382 int idx = 0;
1383 int i;
1384 size_t len;
1385 char *suffix = NULL;
1386 struct lxc_conf *conf;
1387
1388 if (!ops)
1389 return ret_set_errno(false, ENOENT);
1390
1391 if (!ops->hierarchies)
1392 return true;
1393
1394 if (ops->container_cgroup)
1395 return ret_set_errno(false, EEXIST);
1396
1397 if (!handler || !handler->conf)
1398 return ret_set_errno(false, EINVAL);
1399
1400 conf = handler->conf;
1401
1402 if (!check_cgroup_dir_config(conf))
1403 return false;
1404
1405 if (conf->cgroup_meta.container_dir) {
1406 cgroup_tree = NULL;
1407
1408 limiting_cgroup = strdup(conf->cgroup_meta.container_dir);
1409 if (!limiting_cgroup)
1410 return ret_set_errno(false, ENOMEM);
1411
1412 if (conf->cgroup_meta.namespace_dir) {
1413 container_cgroup = must_make_path(limiting_cgroup,
1414 conf->cgroup_meta.namespace_dir,
1415 NULL);
1416 } else {
1417 /* explicit paths but without isolation */
1418 container_cgroup = move_ptr(limiting_cgroup);
1419 }
1420 } else if (conf->cgroup_meta.dir) {
1421 cgroup_tree = conf->cgroup_meta.dir;
1422 container_cgroup = must_concat(&len, cgroup_tree, "/",
1423 DEFAULT_PAYLOAD_CGROUP_PREFIX,
1424 handler->name,
1425 CGROUP_CREATE_RETRY, NULL);
1426 } else if (ops->cgroup_pattern) {
1427 __cgroup_tree = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1428 if (!__cgroup_tree)
1429 return ret_set_errno(false, ENOMEM);
1430
1431 cgroup_tree = __cgroup_tree;
1432 container_cgroup = must_concat(&len, cgroup_tree, "/",
1433 DEFAULT_PAYLOAD_CGROUP,
1434 CGROUP_CREATE_RETRY, NULL);
1435 } else {
1436 cgroup_tree = NULL;
1437 container_cgroup = must_concat(&len, DEFAULT_PAYLOAD_CGROUP_PREFIX,
1438 handler->name,
1439 CGROUP_CREATE_RETRY, NULL);
1440 }
1441 if (!container_cgroup)
1442 return ret_set_errno(false, ENOMEM);
1443
1444 if (!conf->cgroup_meta.container_dir) {
1445 suffix = container_cgroup + len - CGROUP_CREATE_RETRY_LEN;
1446 *suffix = '\0';
1447 }
1448 do {
1449 if (idx && suffix)
1450 sprintf(suffix, "-%d", idx);
1451
1452 for (i = 0; ops->hierarchies[i]; i++) {
1453 if (cgroup_tree_create(ops, handler->conf,
1454 ops->hierarchies[i], cgroup_tree,
1455 container_cgroup, true,
1456 limiting_cgroup))
1457 continue;
1458
1459 DEBUG("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path ?: "(null)");
1460 for (int j = 0; j < i; j++)
1461 cgroup_tree_leaf_remove(ops->hierarchies[j], true);
1462
1463 idx++;
1464 break;
1465 }
1466 } while (ops->hierarchies[i] && idx > 0 && idx < 1000 && suffix);
1467
1468 if (idx == 1000 || (!suffix && idx != 0))
1469 return log_error_errno(false, ERANGE, "Failed to create container cgroup");
1470
1471 ops->container_cgroup = move_ptr(container_cgroup);
1472 INFO("The container process uses \"%s\" as cgroup", ops->container_cgroup);
1473 return true;
1474 }
1475
1476 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops,
1477 struct lxc_handler *handler)
1478 {
1479 int monitor_len, transient_len = 0;
1480 char monitor[INTTYPE_TO_STRLEN(pid_t)],
1481 transient[INTTYPE_TO_STRLEN(pid_t)];
1482
1483 if (!ops)
1484 return ret_set_errno(false, ENOENT);
1485
1486 if (!ops->hierarchies)
1487 return true;
1488
1489 if (!ops->monitor_cgroup)
1490 return ret_set_errno(false, ENOENT);
1491
1492 if (!handler || !handler->conf)
1493 return ret_set_errno(false, EINVAL);
1494
1495 monitor_len = snprintf(monitor, sizeof(monitor), "%d", handler->monitor_pid);
1496 if (handler->transient_pid > 0)
1497 transient_len = snprintf(transient, sizeof(transient), "%d", handler->transient_pid);
1498
1499 for (int i = 0; ops->hierarchies[i]; i++) {
1500 struct hierarchy *h = ops->hierarchies[i];
1501 int ret;
1502
1503 ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", monitor, monitor_len);
1504 if (ret)
1505 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
1506
1507 if (handler->transient_pid <= 0)
1508 return true;
1509
1510 ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", transient, transient_len);
1511 if (ret)
1512 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
1513
1514 /*
1515 * we don't keep the fds for non-unified hierarchies around
1516 * mainly because we don't make use of them anymore after the
1517 * core cgroup setup is done but also because there are quite a
1518 * lot of them.
1519 */
1520 if (!is_unified_hierarchy(h))
1521 close_prot_errno_disarm(h->cgfd_mon);
1522 }
1523 handler->transient_pid = -1;
1524
1525 return true;
1526 }
1527
1528 __cgfsng_ops static bool cgfsng_payload_enter(struct cgroup_ops *ops,
1529 struct lxc_handler *handler)
1530 {
1531 int len;
1532 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1533
1534 if (!ops)
1535 return ret_set_errno(false, ENOENT);
1536
1537 if (!ops->hierarchies)
1538 return true;
1539
1540 if (!ops->container_cgroup)
1541 return ret_set_errno(false, ENOENT);
1542
1543 if (!handler || !handler->conf)
1544 return ret_set_errno(false, EINVAL);
1545
1546 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->pid);
1547
1548 for (int i = 0; ops->hierarchies[i]; i++) {
1549 struct hierarchy *h = ops->hierarchies[i];
1550 int ret;
1551
1552 if (is_unified_hierarchy(h) && handler->clone_flags & CLONE_INTO_CGROUP)
1553 continue;
1554
1555 ret = lxc_writeat(h->cgfd_con, "cgroup.procs", pidstr, len);
1556 if (ret != 0)
1557 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->container_full_path);
1558 }
1559
1560 return true;
1561 }
1562
1563 static int fchowmodat(int dirfd, const char *path, uid_t chown_uid,
1564 gid_t chown_gid, mode_t chmod_mode)
1565 {
1566 int ret;
1567
1568 ret = fchownat(dirfd, path, chown_uid, chown_gid,
1569 AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);
1570 if (ret < 0)
1571 return log_warn_errno(-1,
1572 errno, "Failed to fchownat(%d, %s, %d, %d, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )",
1573 dirfd, path, (int)chown_uid,
1574 (int)chown_gid);
1575
1576 ret = fchmodat(dirfd, (*path != '\0') ? path : ".", chmod_mode, 0);
1577 if (ret < 0)
1578 return log_warn_errno(-1, errno, "Failed to fchmodat(%d, %s, %d, AT_SYMLINK_NOFOLLOW)",
1579 dirfd, path, (int)chmod_mode);
1580
1581 return 0;
1582 }
1583
1584 /* chgrp the container cgroups to container group. We leave
1585 * the container owner as cgroup owner. So we must make the
1586 * directories 775 so that the container can create sub-cgroups.
1587 *
1588 * Also chown the tasks and cgroup.procs files. Those may not
1589 * exist depending on kernel version.
1590 */
1591 static int chown_cgroup_wrapper(void *data)
1592 {
1593 int ret;
1594 uid_t destuid;
1595 struct generic_userns_exec_data *arg = data;
1596 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1597 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1598
1599 if (!lxc_setgroups(0, NULL) && errno != EPERM)
1600 return log_error_errno(-1, errno, "Failed to setgroups(0, NULL)");
1601
1602 ret = setresgid(nsgid, nsgid, nsgid);
1603 if (ret < 0)
1604 return log_error_errno(-1, errno, "Failed to setresgid(%d, %d, %d)",
1605 (int)nsgid, (int)nsgid, (int)nsgid);
1606
1607 ret = setresuid(nsuid, nsuid, nsuid);
1608 if (ret < 0)
1609 return log_error_errno(-1, errno, "Failed to setresuid(%d, %d, %d)",
1610 (int)nsuid, (int)nsuid, (int)nsuid);
1611
1612 destuid = get_ns_uid(arg->origuid);
1613 if (destuid == LXC_INVALID_UID)
1614 destuid = 0;
1615
1616 for (int i = 0; arg->hierarchies[i]; i++) {
1617 int dirfd = arg->hierarchies[i]->cgfd_con;
1618
1619 (void)fchowmodat(dirfd, "", destuid, nsgid, 0775);
1620
1621 /*
1622 * Failures to chown() these are inconvenient but not
1623 * detrimental We leave these owned by the container launcher,
1624 * so that container root can write to the files to attach. We
1625 * chmod() them 664 so that container systemd can write to the
1626 * files (which systemd in wily insists on doing).
1627 */
1628
1629 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC)
1630 (void)fchowmodat(dirfd, "tasks", destuid, nsgid, 0664);
1631
1632 (void)fchowmodat(dirfd, "cgroup.procs", destuid, nsgid, 0664);
1633
1634 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1635 continue;
1636
1637 for (char **p = arg->hierarchies[i]->cgroup2_chown; p && *p; p++)
1638 (void)fchowmodat(dirfd, *p, destuid, nsgid, 0664);
1639 }
1640
1641 return 0;
1642 }
1643
1644 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1645 struct lxc_conf *conf)
1646 {
1647 struct generic_userns_exec_data wrap;
1648
1649 if (!ops)
1650 return ret_set_errno(false, ENOENT);
1651
1652 if (!ops->hierarchies)
1653 return true;
1654
1655 if (!ops->container_cgroup)
1656 return ret_set_errno(false, ENOENT);
1657
1658 if (!conf)
1659 return ret_set_errno(false, EINVAL);
1660
1661 if (lxc_list_empty(&conf->id_map))
1662 return true;
1663
1664 wrap.origuid = geteuid();
1665 wrap.path = NULL;
1666 wrap.hierarchies = ops->hierarchies;
1667 wrap.conf = conf;
1668
1669 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap, "chown_cgroup_wrapper") < 0)
1670 return log_error_errno(false, errno, "Error requesting cgroup chown in new user namespace");
1671
1672 return true;
1673 }
1674
1675 __cgfsng_ops static void cgfsng_payload_finalize(struct cgroup_ops *ops)
1676 {
1677 if (!ops)
1678 return;
1679
1680 if (!ops->hierarchies)
1681 return;
1682
1683 for (int i = 0; ops->hierarchies[i]; i++) {
1684 struct hierarchy *h = ops->hierarchies[i];
1685 /*
1686 * we don't keep the fds for non-unified hierarchies around
1687 * mainly because we don't make use of them anymore after the
1688 * core cgroup setup is done but also because there are quite a
1689 * lot of them.
1690 */
1691 if (!is_unified_hierarchy(h))
1692 close_prot_errno_disarm(h->cgfd_con);
1693 }
1694 }
1695
1696 /* cgroup-full:* is done, no need to create subdirs */
1697 static inline bool cg_mount_needs_subdirs(int type)
1698 {
1699 return !(type >= LXC_AUTO_CGROUP_FULL_RO);
1700 }
1701
1702 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1703 * remount controller ro if needed and bindmount the cgroupfs onto
1704 * control/the/cg/path.
1705 */
1706 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1707 char *controllerpath, char *cgpath,
1708 const char *container_cgroup)
1709 {
1710 __do_free char *sourcepath = NULL;
1711 int ret, remount_flags;
1712 int flags = MS_BIND;
1713
1714 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1715 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1716 if (ret < 0)
1717 return log_error_errno(-1, errno, "Failed to bind mount \"%s\" onto \"%s\"",
1718 controllerpath, controllerpath);
1719
1720 remount_flags = add_required_remount_flags(controllerpath,
1721 controllerpath,
1722 flags | MS_REMOUNT);
1723 ret = mount(controllerpath, controllerpath, "cgroup",
1724 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1725 NULL);
1726 if (ret < 0)
1727 return log_error_errno(-1, errno, "Failed to remount \"%s\" ro", controllerpath);
1728
1729 INFO("Remounted %s read-only", controllerpath);
1730 }
1731
1732 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1733 container_cgroup, NULL);
1734 if (type == LXC_AUTO_CGROUP_RO)
1735 flags |= MS_RDONLY;
1736
1737 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1738 if (ret < 0)
1739 return log_error_errno(-1, errno, "Failed to mount \"%s\" onto \"%s\"",
1740 h->controllers[0], cgpath);
1741 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1742
1743 if (flags & MS_RDONLY) {
1744 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1745 flags | MS_REMOUNT);
1746 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1747 if (ret < 0)
1748 return log_error_errno(-1, errno, "Failed to remount \"%s\" ro", cgpath);
1749 INFO("Remounted %s read-only", cgpath);
1750 }
1751
1752 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1753 return 0;
1754 }
1755
1756 /* __cg_mount_direct
1757 *
1758 * Mount cgroup hierarchies directly without using bind-mounts. The main
1759 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1760 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1761 */
1762 static int __cg_mount_direct(int type, struct hierarchy *h,
1763 const char *controllerpath)
1764 {
1765 __do_free char *controllers = NULL;
1766 char *fstype = "cgroup2";
1767 unsigned long flags = 0;
1768 int ret;
1769
1770 flags |= MS_NOSUID;
1771 flags |= MS_NOEXEC;
1772 flags |= MS_NODEV;
1773 flags |= MS_RELATIME;
1774
1775 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1776 flags |= MS_RDONLY;
1777
1778 if (h->version != CGROUP2_SUPER_MAGIC) {
1779 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1780 if (!controllers)
1781 return -ENOMEM;
1782 fstype = "cgroup";
1783 }
1784
1785 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1786 if (ret < 0)
1787 return log_error_errno(-1, errno, "Failed to mount \"%s\" with cgroup filesystem type %s",
1788 controllerpath, fstype);
1789
1790 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1791 return 0;
1792 }
1793
1794 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1795 const char *controllerpath)
1796 {
1797 return __cg_mount_direct(type, h, controllerpath);
1798 }
1799
1800 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1801 const char *controllerpath)
1802 {
1803 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1804 return 0;
1805
1806 return __cg_mount_direct(type, h, controllerpath);
1807 }
1808
1809 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1810 struct lxc_handler *handler,
1811 const char *root, int type)
1812 {
1813 __do_free char *cgroup_root = NULL;
1814 bool has_cgns = false, wants_force_mount = false;
1815 int ret;
1816
1817 if (!ops)
1818 return ret_set_errno(false, ENOENT);
1819
1820 if (!ops->hierarchies)
1821 return true;
1822
1823 if (!handler || !handler->conf)
1824 return ret_set_errno(false, EINVAL);
1825
1826 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1827 return true;
1828
1829 if (type & LXC_AUTO_CGROUP_FORCE) {
1830 type &= ~LXC_AUTO_CGROUP_FORCE;
1831 wants_force_mount = true;
1832 }
1833
1834 if (!wants_force_mount) {
1835 if (!lxc_list_empty(&handler->conf->keepcaps))
1836 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1837 else
1838 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1839
1840 /*
1841 * Most recent distro versions currently have init system that
1842 * do support cgroup2 but do not mount it by default unless
1843 * explicitly told so even if the host is cgroup2 only. That
1844 * means they often will fail to boot. Fix this by pre-mounting
1845 * cgroup2 by default. We will likely need to be doing this a
1846 * few years until all distros have switched over to cgroup2 at
1847 * which point we can safely assume that their init systems
1848 * will mount it themselves.
1849 */
1850 if (pure_unified_layout(ops))
1851 wants_force_mount = true;
1852 }
1853
1854 has_cgns = cgns_supported();
1855 if (has_cgns && !wants_force_mount)
1856 return true;
1857
1858 if (type == LXC_AUTO_CGROUP_NOSPEC)
1859 type = LXC_AUTO_CGROUP_MIXED;
1860 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1861 type = LXC_AUTO_CGROUP_FULL_MIXED;
1862
1863 cgroup_root = must_make_path(root, DEFAULT_CGROUP_MOUNTPOINT, NULL);
1864 if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
1865 if (has_cgns && wants_force_mount) {
1866 /*
1867 * If cgroup namespaces are supported but the container
1868 * will not have CAP_SYS_ADMIN after it has started we
1869 * need to mount the cgroups manually.
1870 */
1871 return cg_mount_in_cgroup_namespace(type, ops->unified, cgroup_root) == 0;
1872 }
1873
1874 return cg_mount_cgroup_full(type, ops->unified, cgroup_root) == 0;
1875 }
1876
1877 /* mount tmpfs */
1878 ret = safe_mount_beneath(root, NULL, DEFAULT_CGROUP_MOUNTPOINT, "tmpfs",
1879 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1880 "size=10240k,mode=755");
1881 if (ret < 0) {
1882 if (errno != ENOSYS)
1883 return false;
1884
1885 ret = safe_mount(NULL, cgroup_root, "tmpfs",
1886 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1887 "size=10240k,mode=755", root);
1888 }
1889 if (ret < 0)
1890 return false;
1891
1892 for (int i = 0; ops->hierarchies[i]; i++) {
1893 __do_free char *controllerpath = NULL, *path2 = NULL;
1894 struct hierarchy *h = ops->hierarchies[i];
1895 char *controller = strrchr(h->mountpoint, '/');
1896
1897 if (!controller)
1898 continue;
1899 controller++;
1900
1901 controllerpath = must_make_path(cgroup_root, controller, NULL);
1902 if (dir_exists(controllerpath))
1903 continue;
1904
1905 ret = mkdir(controllerpath, 0755);
1906 if (ret < 0)
1907 return log_error_errno(false, errno, "Error creating cgroup path: %s", controllerpath);
1908
1909 if (has_cgns && wants_force_mount) {
1910 /* If cgroup namespaces are supported but the container
1911 * will not have CAP_SYS_ADMIN after it has started we
1912 * need to mount the cgroups manually.
1913 */
1914 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1915 if (ret < 0)
1916 return false;
1917
1918 continue;
1919 }
1920
1921 ret = cg_mount_cgroup_full(type, h, controllerpath);
1922 if (ret < 0)
1923 return false;
1924
1925 if (!cg_mount_needs_subdirs(type))
1926 continue;
1927
1928 path2 = must_make_path(controllerpath, h->container_base_path,
1929 ops->container_cgroup, NULL);
1930 ret = mkdir_p(path2, 0755);
1931 if (ret < 0)
1932 return false;
1933
1934 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1935 path2, ops->container_cgroup);
1936 if (ret < 0)
1937 return false;
1938 }
1939
1940 return true;
1941 }
1942
1943 /* Only root needs to escape to the cgroup of its init. */
1944 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1945 struct lxc_conf *conf)
1946 {
1947 if (!ops)
1948 return ret_set_errno(false, ENOENT);
1949
1950 if (!ops->hierarchies)
1951 return true;
1952
1953 if (!conf)
1954 return ret_set_errno(false, EINVAL);
1955
1956 if (conf->cgroup_meta.relative || geteuid())
1957 return true;
1958
1959 for (int i = 0; ops->hierarchies[i]; i++) {
1960 __do_free char *fullpath = NULL;
1961 int ret;
1962
1963 fullpath =
1964 must_make_path(ops->hierarchies[i]->mountpoint,
1965 ops->hierarchies[i]->container_base_path,
1966 "cgroup.procs", NULL);
1967 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1968 if (ret != 0)
1969 return log_error_errno(false, errno, "Failed to escape to cgroup \"%s\"", fullpath);
1970 }
1971
1972 return true;
1973 }
1974
1975 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1976 {
1977 int i = 0;
1978
1979 if (!ops)
1980 return ret_set_errno(-1, ENOENT);
1981
1982 if (!ops->hierarchies)
1983 return 0;
1984
1985 for (; ops->hierarchies[i]; i++)
1986 ;
1987
1988 return i;
1989 }
1990
1991 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n,
1992 char ***out)
1993 {
1994 int i;
1995
1996 if (!ops)
1997 return ret_set_errno(false, ENOENT);
1998
1999 if (!ops->hierarchies)
2000 return ret_set_errno(false, ENOENT);
2001
2002 /* sanity check n */
2003 for (i = 0; i < n; i++)
2004 if (!ops->hierarchies[i])
2005 return ret_set_errno(false, ENOENT);
2006
2007 *out = ops->hierarchies[i]->controllers;
2008
2009 return true;
2010 }
2011
2012 static bool cg_legacy_freeze(struct cgroup_ops *ops)
2013 {
2014 struct hierarchy *h;
2015
2016 h = get_hierarchy(ops, "freezer");
2017 if (!h)
2018 return ret_set_errno(-1, ENOENT);
2019
2020 return lxc_write_openat(h->container_full_path, "freezer.state",
2021 "FROZEN", STRLITERALLEN("FROZEN"));
2022 }
2023
2024 static int freezer_cgroup_events_cb(int fd, uint32_t events, void *cbdata,
2025 struct lxc_epoll_descr *descr)
2026 {
2027 __do_close int duped_fd = -EBADF;
2028 __do_free char *line = NULL;
2029 __do_fclose FILE *f = NULL;
2030 int state = PTR_TO_INT(cbdata);
2031 size_t len;
2032 const char *state_string;
2033
2034 duped_fd = dup(fd);
2035 if (duped_fd < 0)
2036 return LXC_MAINLOOP_ERROR;
2037
2038 if (lseek(duped_fd, 0, SEEK_SET) < (off_t)-1)
2039 return LXC_MAINLOOP_ERROR;
2040
2041 f = fdopen(duped_fd, "re");
2042 if (!f)
2043 return LXC_MAINLOOP_ERROR;
2044 move_fd(duped_fd);
2045
2046 if (state == 1)
2047 state_string = "frozen 1";
2048 else
2049 state_string = "frozen 0";
2050
2051 while (getline(&line, &len, f) != -1)
2052 if (strncmp(line, state_string, STRLITERALLEN("frozen") + 2) == 0)
2053 return LXC_MAINLOOP_CLOSE;
2054
2055 return LXC_MAINLOOP_CONTINUE;
2056 }
2057
2058 static int cg_unified_freeze_do(struct cgroup_ops *ops, int timeout,
2059 const char *state_string,
2060 int state_num,
2061 const char *epoll_error,
2062 const char *wait_error)
2063 {
2064 __do_close int fd = -EBADF;
2065 call_cleaner(lxc_mainloop_close) struct lxc_epoll_descr *descr_ptr = NULL;
2066 int ret;
2067 struct lxc_epoll_descr descr;
2068 struct hierarchy *h;
2069
2070 h = ops->unified;
2071 if (!h)
2072 return ret_set_errno(-1, ENOENT);
2073
2074 if (!h->container_full_path)
2075 return ret_set_errno(-1, EEXIST);
2076
2077 if (timeout != 0) {
2078 __do_free char *events_file = NULL;
2079
2080 events_file = must_make_path(h->container_full_path, "cgroup.events", NULL);
2081 fd = open(events_file, O_RDONLY | O_CLOEXEC);
2082 if (fd < 0)
2083 return log_error_errno(-1, errno, "Failed to open cgroup.events file");
2084
2085 ret = lxc_mainloop_open(&descr);
2086 if (ret)
2087 return log_error_errno(-1, errno, "%s", epoll_error);
2088
2089 /* automatically cleaned up now */
2090 descr_ptr = &descr;
2091
2092 ret = lxc_mainloop_add_handler_events(&descr, fd, EPOLLPRI, freezer_cgroup_events_cb, INT_TO_PTR(state_num));
2093 if (ret < 0)
2094 return log_error_errno(-1, errno, "Failed to add cgroup.events fd handler to mainloop");
2095 }
2096
2097 ret = lxc_write_openat(h->container_full_path, "cgroup.freeze", state_string, 1);
2098 if (ret < 0)
2099 return log_error_errno(-1, errno, "Failed to open cgroup.freeze file");
2100
2101 if (timeout != 0 && lxc_mainloop(&descr, timeout))
2102 return log_error_errno(-1, errno, "%s", wait_error);
2103
2104 return 0;
2105 }
2106
2107 static int cg_unified_freeze(struct cgroup_ops *ops, int timeout)
2108 {
2109 return cg_unified_freeze_do(ops, timeout, "1", 1,
2110 "Failed to create epoll instance to wait for container freeze",
2111 "Failed to wait for container to be frozen");
2112 }
2113
2114 __cgfsng_ops static int cgfsng_freeze(struct cgroup_ops *ops, int timeout)
2115 {
2116 if (!ops->hierarchies)
2117 return ret_set_errno(-1, ENOENT);
2118
2119 if (ops->cgroup_layout != CGROUP_LAYOUT_UNIFIED)
2120 return cg_legacy_freeze(ops);
2121
2122 return cg_unified_freeze(ops, timeout);
2123 }
2124
2125 static int cg_legacy_unfreeze(struct cgroup_ops *ops)
2126 {
2127 struct hierarchy *h;
2128
2129 h = get_hierarchy(ops, "freezer");
2130 if (!h)
2131 return ret_set_errno(-1, ENOENT);
2132
2133 return lxc_write_openat(h->container_full_path, "freezer.state",
2134 "THAWED", STRLITERALLEN("THAWED"));
2135 }
2136
2137 static int cg_unified_unfreeze(struct cgroup_ops *ops, int timeout)
2138 {
2139 return cg_unified_freeze_do(ops, timeout, "0", 0,
2140 "Failed to create epoll instance to wait for container unfreeze",
2141 "Failed to wait for container to be unfrozen");
2142 }
2143
2144 __cgfsng_ops static int cgfsng_unfreeze(struct cgroup_ops *ops, int timeout)
2145 {
2146 if (!ops->hierarchies)
2147 return ret_set_errno(-1, ENOENT);
2148
2149 if (ops->cgroup_layout != CGROUP_LAYOUT_UNIFIED)
2150 return cg_legacy_unfreeze(ops);
2151
2152 return cg_unified_unfreeze(ops, timeout);
2153 }
2154
2155 static const char *cgfsng_get_cgroup_do(struct cgroup_ops *ops,
2156 const char *controller, bool limiting)
2157 {
2158 struct hierarchy *h;
2159
2160 h = get_hierarchy(ops, controller);
2161 if (!h)
2162 return log_warn_errno(NULL, ENOENT, "Failed to find hierarchy for controller \"%s\"",
2163 controller ? controller : "(null)");
2164
2165 if (limiting)
2166 return h->container_limit_path
2167 ? h->container_limit_path + strlen(h->mountpoint)
2168 : NULL;
2169
2170 return h->container_full_path
2171 ? h->container_full_path + strlen(h->mountpoint)
2172 : NULL;
2173 }
2174
2175 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
2176 const char *controller)
2177 {
2178 return cgfsng_get_cgroup_do(ops, controller, false);
2179 }
2180
2181 __cgfsng_ops static const char *cgfsng_get_limiting_cgroup(struct cgroup_ops *ops,
2182 const char *controller)
2183 {
2184 return cgfsng_get_cgroup_do(ops, controller, true);
2185 }
2186
2187 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2188 * which must be freed by the caller.
2189 */
2190 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
2191 const char *inpath,
2192 const char *filename)
2193 {
2194 return must_make_path(h->mountpoint, inpath, filename, NULL);
2195 }
2196
2197 static int cgroup_attach_leaf(const struct lxc_conf *conf, int unified_fd, pid_t pid)
2198 {
2199 int idx = 1;
2200 int ret;
2201 char pidstr[INTTYPE_TO_STRLEN(int64_t) + 1];
2202 size_t pidstr_len;
2203
2204 /* Create leaf cgroup. */
2205 ret = mkdirat(unified_fd, ".lxc", 0755);
2206 if (ret < 0 && errno != EEXIST)
2207 return log_error_errno(-1, errno, "Failed to create leaf cgroup \".lxc\"");
2208
2209 pidstr_len = sprintf(pidstr, INT64_FMT, (int64_t)pid);
2210 ret = lxc_writeat(unified_fd, ".lxc/cgroup.procs", pidstr, pidstr_len);
2211 if (ret < 0)
2212 ret = lxc_writeat(unified_fd, "cgroup.procs", pidstr, pidstr_len);
2213 if (ret == 0)
2214 return 0;
2215
2216 /* this is a non-leaf node */
2217 if (errno != EBUSY)
2218 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2219
2220 do {
2221 bool rm = false;
2222 char attach_cgroup[STRLITERALLEN(".lxc-/cgroup.procs") + INTTYPE_TO_STRLEN(int) + 1];
2223 char *slash = attach_cgroup;
2224
2225 ret = snprintf(attach_cgroup, sizeof(attach_cgroup), ".lxc-%d/cgroup.procs", idx);
2226 if (ret < 0 || (size_t)ret >= sizeof(attach_cgroup))
2227 return ret_errno(EIO);
2228
2229 /*
2230 * This shouldn't really happen but the compiler might complain
2231 * that a short write would cause a buffer overrun. So be on
2232 * the safe side.
2233 */
2234 if (ret < STRLITERALLEN(".lxc-/cgroup.procs"))
2235 return log_error_errno(-EINVAL, EINVAL, "Unexpected short write would cause buffer-overrun");
2236
2237 slash += (ret - STRLITERALLEN("/cgroup.procs"));
2238 *slash = '\0';
2239
2240 ret = mkdirat(unified_fd, attach_cgroup, 0755);
2241 if (ret < 0 && errno != EEXIST)
2242 return log_error_errno(-1, errno, "Failed to create cgroup %s", attach_cgroup);
2243 if (ret == 0)
2244 rm = true;
2245
2246 *slash = '/';
2247
2248 ret = lxc_writeat(unified_fd, attach_cgroup, pidstr, pidstr_len);
2249 if (ret == 0)
2250 return 0;
2251
2252 if (rm && unlinkat(unified_fd, attach_cgroup, AT_REMOVEDIR))
2253 SYSERROR("Failed to remove cgroup \"%d(%s)\"", unified_fd, attach_cgroup);
2254
2255 /* this is a non-leaf node */
2256 if (errno != EBUSY)
2257 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2258
2259 idx++;
2260 } while (idx < 1000);
2261
2262 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2263 }
2264
2265 static int cgroup_attach_create_leaf(const struct lxc_conf *conf,
2266 int unified_fd, int *sk_fd)
2267 {
2268 __do_close int sk = *sk_fd, target_fd0 = -EBADF, target_fd1 = -EBADF;
2269 int target_fds[2];
2270 ssize_t ret;
2271
2272 /* Create leaf cgroup. */
2273 ret = mkdirat(unified_fd, ".lxc", 0755);
2274 if (ret < 0 && errno != EEXIST)
2275 return log_error_errno(-1, errno, "Failed to create leaf cgroup \".lxc\"");
2276
2277 target_fd0 = openat(unified_fd, ".lxc/cgroup.procs", O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
2278 if (target_fd0 < 0)
2279 return log_error_errno(-errno, errno, "Failed to open \".lxc/cgroup.procs\"");
2280 target_fds[0] = target_fd0;
2281
2282 target_fd1 = openat(unified_fd, "cgroup.procs", O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
2283 if (target_fd1 < 0)
2284 return log_error_errno(-errno, errno, "Failed to open \".lxc/cgroup.procs\"");
2285 target_fds[1] = target_fd1;
2286
2287 ret = lxc_abstract_unix_send_fds(sk, target_fds, 2, NULL, 0);
2288 if (ret <= 0)
2289 return log_error_errno(-errno, errno, "Failed to send \".lxc/cgroup.procs\" fds %d and %d",
2290 target_fd0, target_fd1);
2291
2292 return log_debug(0, "Sent target cgroup fds %d and %d", target_fd0, target_fd1);
2293 }
2294
2295 static int cgroup_attach_move_into_leaf(const struct lxc_conf *conf,
2296 int *sk_fd, pid_t pid)
2297 {
2298 __do_close int sk = *sk_fd, target_fd0 = -EBADF, target_fd1 = -EBADF;
2299 int target_fds[2];
2300 char pidstr[INTTYPE_TO_STRLEN(int64_t) + 1];
2301 size_t pidstr_len;
2302 ssize_t ret;
2303
2304 ret = lxc_abstract_unix_recv_fds(sk, target_fds, 2, NULL, 0);
2305 if (ret <= 0)
2306 return log_error_errno(-1, errno, "Failed to receive target cgroup fd");
2307 target_fd0 = target_fds[0];
2308 target_fd1 = target_fds[1];
2309
2310 pidstr_len = sprintf(pidstr, INT64_FMT, (int64_t)pid);
2311
2312 ret = lxc_write_nointr(target_fd0, pidstr, pidstr_len);
2313 if (ret > 0 && ret == pidstr_len)
2314 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd0);
2315
2316 ret = lxc_write_nointr(target_fd1, pidstr, pidstr_len);
2317 if (ret > 0 && ret == pidstr_len)
2318 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd1);
2319
2320 return log_debug_errno(-1, errno, "Failed to move process into target cgroup via fd %d and %d",
2321 target_fd0, target_fd1);
2322 }
2323
2324 struct userns_exec_unified_attach_data {
2325 const struct lxc_conf *conf;
2326 int unified_fd;
2327 int sk_pair[2];
2328 pid_t pid;
2329 };
2330
2331 static int cgroup_unified_attach_child_wrapper(void *data)
2332 {
2333 struct userns_exec_unified_attach_data *args = data;
2334
2335 if (!args->conf || args->unified_fd < 0 || args->pid <= 0 ||
2336 args->sk_pair[0] < 0 || args->sk_pair[1] < 0)
2337 return ret_errno(EINVAL);
2338
2339 close_prot_errno_disarm(args->sk_pair[0]);
2340 return cgroup_attach_create_leaf(args->conf, args->unified_fd,
2341 &args->sk_pair[1]);
2342 }
2343
2344 static int cgroup_unified_attach_parent_wrapper(void *data)
2345 {
2346 struct userns_exec_unified_attach_data *args = data;
2347
2348 if (!args->conf || args->unified_fd < 0 || args->pid <= 0 ||
2349 args->sk_pair[0] < 0 || args->sk_pair[1] < 0)
2350 return ret_errno(EINVAL);
2351
2352 close_prot_errno_disarm(args->sk_pair[1]);
2353 return cgroup_attach_move_into_leaf(args->conf, &args->sk_pair[0],
2354 args->pid);
2355 }
2356
2357 int cgroup_attach(const struct lxc_conf *conf, const char *name,
2358 const char *lxcpath, pid_t pid)
2359 {
2360 __do_close int unified_fd = -EBADF;
2361 int ret;
2362
2363 if (!conf || !name || !lxcpath || pid <= 0)
2364 return ret_errno(EINVAL);
2365
2366 unified_fd = lxc_cmd_get_cgroup2_fd(name, lxcpath);
2367 if (unified_fd < 0)
2368 return ret_errno(EBADF);
2369
2370 if (!lxc_list_empty(&conf->id_map)) {
2371 struct userns_exec_unified_attach_data args = {
2372 .conf = conf,
2373 .unified_fd = unified_fd,
2374 .pid = pid,
2375 };
2376
2377 ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, args.sk_pair);
2378 if (ret < 0)
2379 return -errno;
2380
2381 ret = userns_exec_minimal(conf,
2382 cgroup_unified_attach_parent_wrapper,
2383 &args,
2384 cgroup_unified_attach_child_wrapper,
2385 &args);
2386 } else {
2387 ret = cgroup_attach_leaf(conf, unified_fd, pid);
2388 }
2389
2390 return ret;
2391 }
2392
2393 /* Technically, we're always at a delegation boundary here (This is especially
2394 * true when cgroup namespaces are available.). The reasoning is that in order
2395 * for us to have been able to start a container in the first place the root
2396 * cgroup must have been a leaf node. Now, either the container's init system
2397 * has populated the cgroup and kept it as a leaf node or it has created
2398 * subtrees. In the former case we will simply attach to the leaf node we
2399 * created when we started the container in the latter case we create our own
2400 * cgroup for the attaching process.
2401 */
2402 static int __cg_unified_attach(const struct hierarchy *h,
2403 const struct lxc_conf *conf, const char *name,
2404 const char *lxcpath, pid_t pid,
2405 const char *controller)
2406 {
2407 __do_close int unified_fd = -EBADF;
2408 __do_free char *path = NULL, *cgroup = NULL;
2409 int ret;
2410
2411 if (!conf || !name || !lxcpath || pid <= 0)
2412 return ret_errno(EINVAL);
2413
2414 ret = cgroup_attach(conf, name, lxcpath, pid);
2415 if (ret == 0)
2416 return log_trace(0, "Attached to unified cgroup via command handler");
2417 if (ret != -EBADF)
2418 return log_error_errno(ret, errno, "Failed to attach to unified cgroup");
2419
2420 /* Fall back to retrieving the path for the unified cgroup. */
2421 cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2422 /* not running */
2423 if (!cgroup)
2424 return 0;
2425
2426 path = must_make_path(h->mountpoint, cgroup, NULL);
2427
2428 unified_fd = open(path, O_PATH | O_DIRECTORY | O_CLOEXEC);
2429 if (unified_fd < 0)
2430 return ret_errno(EBADF);
2431
2432 if (!lxc_list_empty(&conf->id_map)) {
2433 struct userns_exec_unified_attach_data args = {
2434 .conf = conf,
2435 .unified_fd = unified_fd,
2436 .pid = pid,
2437 };
2438
2439 ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, args.sk_pair);
2440 if (ret < 0)
2441 return -errno;
2442
2443 ret = userns_exec_minimal(conf,
2444 cgroup_unified_attach_parent_wrapper,
2445 &args,
2446 cgroup_unified_attach_child_wrapper,
2447 &args);
2448 } else {
2449 ret = cgroup_attach_leaf(conf, unified_fd, pid);
2450 }
2451
2452 return ret;
2453 }
2454
2455 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops,
2456 const struct lxc_conf *conf,
2457 const char *name, const char *lxcpath,
2458 pid_t pid)
2459 {
2460 int len, ret;
2461 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2462
2463 if (!ops)
2464 return ret_set_errno(false, ENOENT);
2465
2466 if (!ops->hierarchies)
2467 return true;
2468
2469 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2470 if (len < 0 || (size_t)len >= sizeof(pidstr))
2471 return false;
2472
2473 for (int i = 0; ops->hierarchies[i]; i++) {
2474 __do_free char *fullpath = NULL, *path = NULL;
2475 struct hierarchy *h = ops->hierarchies[i];
2476
2477 if (h->version == CGROUP2_SUPER_MAGIC) {
2478 ret = __cg_unified_attach(h, conf, name, lxcpath, pid,
2479 h->controllers[0]);
2480 if (ret < 0)
2481 return false;
2482
2483 continue;
2484 }
2485
2486 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2487 /* not running */
2488 if (!path)
2489 return false;
2490
2491 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2492 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2493 if (ret < 0)
2494 return log_error_errno(false, errno, "Failed to attach %d to %s",
2495 (int)pid, fullpath);
2496 }
2497
2498 return true;
2499 }
2500
2501 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2502 * don't have a cgroup_data set up, so we ask the running container through the
2503 * commands API for the cgroup path.
2504 */
2505 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2506 char *value, size_t len, const char *name,
2507 const char *lxcpath)
2508 {
2509 __do_free char *path = NULL;
2510 __do_free char *controller = NULL;
2511 char *p;
2512 struct hierarchy *h;
2513 int ret = -1;
2514
2515 if (!ops)
2516 return ret_set_errno(-1, ENOENT);
2517
2518 controller = must_copy_string(filename);
2519 p = strchr(controller, '.');
2520 if (p)
2521 *p = '\0';
2522
2523 path = lxc_cmd_get_limiting_cgroup_path(name, lxcpath, controller);
2524 /* not running */
2525 if (!path)
2526 return -1;
2527
2528 h = get_hierarchy(ops, controller);
2529 if (h) {
2530 __do_free char *fullpath = NULL;
2531
2532 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2533 ret = lxc_read_from_file(fullpath, value, len);
2534 }
2535
2536 return ret;
2537 }
2538
2539 static int device_cgroup_parse_access(struct device_item *device, const char *val)
2540 {
2541 for (int count = 0; count < 3; count++, val++) {
2542 switch (*val) {
2543 case 'r':
2544 device->access[count] = *val;
2545 break;
2546 case 'w':
2547 device->access[count] = *val;
2548 break;
2549 case 'm':
2550 device->access[count] = *val;
2551 break;
2552 case '\n':
2553 case '\0':
2554 count = 3;
2555 break;
2556 default:
2557 return ret_errno(EINVAL);
2558 }
2559 }
2560
2561 return 0;
2562 }
2563
2564 static int device_cgroup_rule_parse(struct device_item *device, const char *key,
2565 const char *val)
2566 {
2567 int count, ret;
2568 char temp[50];
2569
2570 if (strcmp("devices.allow", key) == 0)
2571 device->allow = 1;
2572 else
2573 device->allow = 0;
2574
2575 if (strcmp(val, "a") == 0) {
2576 /* global rule */
2577 device->type = 'a';
2578 device->major = -1;
2579 device->minor = -1;
2580 device->global_rule = device->allow
2581 ? LXC_BPF_DEVICE_CGROUP_DENYLIST
2582 : LXC_BPF_DEVICE_CGROUP_ALLOWLIST;
2583 device->allow = -1;
2584 return 0;
2585 }
2586
2587 /* local rule */
2588 device->global_rule = LXC_BPF_DEVICE_CGROUP_LOCAL_RULE;
2589
2590 switch (*val) {
2591 case 'a':
2592 __fallthrough;
2593 case 'b':
2594 __fallthrough;
2595 case 'c':
2596 device->type = *val;
2597 break;
2598 default:
2599 return -1;
2600 }
2601
2602 val++;
2603 if (!isspace(*val))
2604 return -1;
2605 val++;
2606 if (*val == '*') {
2607 device->major = -1;
2608 val++;
2609 } else if (isdigit(*val)) {
2610 memset(temp, 0, sizeof(temp));
2611 for (count = 0; count < sizeof(temp) - 1; count++) {
2612 temp[count] = *val;
2613 val++;
2614 if (!isdigit(*val))
2615 break;
2616 }
2617 ret = lxc_safe_int(temp, &device->major);
2618 if (ret)
2619 return -1;
2620 } else {
2621 return -1;
2622 }
2623 if (*val != ':')
2624 return -1;
2625 val++;
2626
2627 /* read minor */
2628 if (*val == '*') {
2629 device->minor = -1;
2630 val++;
2631 } else if (isdigit(*val)) {
2632 memset(temp, 0, sizeof(temp));
2633 for (count = 0; count < sizeof(temp) - 1; count++) {
2634 temp[count] = *val;
2635 val++;
2636 if (!isdigit(*val))
2637 break;
2638 }
2639 ret = lxc_safe_int(temp, &device->minor);
2640 if (ret)
2641 return -1;
2642 } else {
2643 return -1;
2644 }
2645 if (!isspace(*val))
2646 return -1;
2647
2648 return device_cgroup_parse_access(device, ++val);
2649 }
2650
2651 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2652 * don't have a cgroup_data set up, so we ask the running container through the
2653 * commands API for the cgroup path.
2654 */
2655 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2656 const char *key, const char *value,
2657 const char *name, const char *lxcpath)
2658 {
2659 __do_free char *path = NULL;
2660 __do_free char *controller = NULL;
2661 char *p;
2662 struct hierarchy *h;
2663 int ret = -1;
2664
2665 if (!ops)
2666 return ret_set_errno(-1, ENOENT);
2667
2668 controller = must_copy_string(key);
2669 p = strchr(controller, '.');
2670 if (p)
2671 *p = '\0';
2672
2673 if (pure_unified_layout(ops) && strcmp(controller, "devices") == 0) {
2674 struct device_item device = {};
2675
2676 ret = device_cgroup_rule_parse(&device, key, value);
2677 if (ret < 0)
2678 return log_error_errno(-1, EINVAL, "Failed to parse device string %s=%s",
2679 key, value);
2680
2681 ret = lxc_cmd_add_bpf_device_cgroup(name, lxcpath, &device);
2682 if (ret < 0)
2683 return -1;
2684
2685 return 0;
2686 }
2687
2688 path = lxc_cmd_get_limiting_cgroup_path(name, lxcpath, controller);
2689 /* not running */
2690 if (!path)
2691 return -1;
2692
2693 h = get_hierarchy(ops, controller);
2694 if (h) {
2695 __do_free char *fullpath = NULL;
2696
2697 fullpath = build_full_cgpath_from_monitorpath(h, path, key);
2698 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2699 }
2700
2701 return ret;
2702 }
2703
2704 /* take devices cgroup line
2705 * /dev/foo rwx
2706 * and convert it to a valid
2707 * type major:minor mode
2708 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2709 * the output.
2710 */
2711 static int device_cgroup_rule_parse_devpath(struct device_item *device,
2712 const char *devpath)
2713 {
2714 __do_free char *path = NULL;
2715 char *mode = NULL;
2716 int n_parts, ret;
2717 char *p;
2718 struct stat sb;
2719
2720 path = must_copy_string(devpath);
2721
2722 /*
2723 * Read path followed by mode. Ignore any trailing text.
2724 * A ' # comment' would be legal. Technically other text is not
2725 * legal, we could check for that if we cared to.
2726 */
2727 for (n_parts = 1, p = path; *p; p++) {
2728 if (*p != ' ')
2729 continue;
2730 *p = '\0';
2731
2732 if (n_parts != 1)
2733 break;
2734 p++;
2735 n_parts++;
2736
2737 while (*p == ' ')
2738 p++;
2739
2740 mode = p;
2741
2742 if (*p == '\0')
2743 return ret_set_errno(-1, EINVAL);
2744 }
2745
2746 if (!mode)
2747 return ret_errno(EINVAL);
2748
2749 if (device_cgroup_parse_access(device, mode) < 0)
2750 return -1;
2751
2752 ret = stat(path, &sb);
2753 if (ret < 0)
2754 return ret_set_errno(-1, errno);
2755
2756 mode_t m = sb.st_mode & S_IFMT;
2757 switch (m) {
2758 case S_IFBLK:
2759 device->type = 'b';
2760 break;
2761 case S_IFCHR:
2762 device->type = 'c';
2763 break;
2764 default:
2765 return log_error_errno(-1, EINVAL, "Unsupported device type %i for \"%s\"", m, path);
2766 }
2767
2768 device->major = MAJOR(sb.st_rdev);
2769 device->minor = MINOR(sb.st_rdev);
2770 device->allow = 1;
2771 device->global_rule = LXC_BPF_DEVICE_CGROUP_LOCAL_RULE;
2772
2773 return 0;
2774 }
2775
2776 static int convert_devpath(const char *invalue, char *dest)
2777 {
2778 struct device_item device = {};
2779 int ret;
2780
2781 ret = device_cgroup_rule_parse_devpath(&device, invalue);
2782 if (ret < 0)
2783 return -1;
2784
2785 ret = snprintf(dest, 50, "%c %d:%d %s", device.type, device.major,
2786 device.minor, device.access);
2787 if (ret < 0 || ret >= 50)
2788 return log_error_errno(-1, ENAMETOOLONG, "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2789 device.type, device.major, device.minor, device.access);
2790
2791 return 0;
2792 }
2793
2794 /* Called from setup_limits - here we have the container's cgroup_data because
2795 * we created the cgroups.
2796 */
2797 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2798 const char *value, bool is_cpuset)
2799 {
2800 __do_free char *controller = NULL;
2801 char *p;
2802 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2803 char converted_value[50];
2804 struct hierarchy *h;
2805
2806 controller = must_copy_string(filename);
2807 p = strchr(controller, '.');
2808 if (p)
2809 *p = '\0';
2810
2811 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2812 int ret;
2813
2814 ret = convert_devpath(value, converted_value);
2815 if (ret < 0)
2816 return ret;
2817 value = converted_value;
2818 }
2819
2820 h = get_hierarchy(ops, controller);
2821 if (!h)
2822 return log_error_errno(-ENOENT, ENOENT, "Failed to setup limits for the \"%s\" controller. The controller seems to be unused by \"cgfsng\" cgroup driver or not enabled on the cgroup hierarchy", controller);
2823
2824 if (is_cpuset) {
2825 int ret = lxc_write_openat(h->container_full_path, filename, value, strlen(value));
2826 if (ret)
2827 return ret;
2828 }
2829 return lxc_write_openat(h->container_limit_path, filename, value, strlen(value));
2830 }
2831
2832 __cgfsng_ops static bool cgfsng_setup_limits_legacy(struct cgroup_ops *ops,
2833 struct lxc_conf *conf,
2834 bool do_devices)
2835 {
2836 __do_free struct lxc_list *sorted_cgroup_settings = NULL;
2837 struct lxc_list *cgroup_settings = &conf->cgroup;
2838 struct lxc_list *iterator, *next;
2839 struct lxc_cgroup *cg;
2840 bool ret = false;
2841
2842 if (!ops)
2843 return ret_set_errno(false, ENOENT);
2844
2845 if (!conf)
2846 return ret_set_errno(false, EINVAL);
2847
2848 cgroup_settings = &conf->cgroup;
2849 if (lxc_list_empty(cgroup_settings))
2850 return true;
2851
2852 if (!ops->hierarchies)
2853 return ret_set_errno(false, EINVAL);
2854
2855 if (pure_unified_layout(ops))
2856 return log_warn_errno(true, EINVAL, "Ignoring legacy cgroup limits on pure cgroup2 system");
2857
2858 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2859 if (!sorted_cgroup_settings)
2860 return false;
2861
2862 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2863 cg = iterator->elem;
2864
2865 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2866 if (cg_legacy_set_data(ops, cg->subsystem, cg->value, strncmp("cpuset", cg->subsystem, 6) == 0)) {
2867 if (do_devices && (errno == EACCES || errno == EPERM)) {
2868 SYSWARN("Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2869 continue;
2870 }
2871 SYSERROR("Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2872 goto out;
2873 }
2874 DEBUG("Set controller \"%s\" set to \"%s\"", cg->subsystem, cg->value);
2875 }
2876 }
2877
2878 ret = true;
2879 INFO("Limits for the legacy cgroup hierarchies have been setup");
2880 out:
2881 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2882 lxc_list_del(iterator);
2883 free(iterator);
2884 }
2885
2886 return ret;
2887 }
2888
2889 /*
2890 * Some of the parsing logic comes from the original cgroup device v1
2891 * implementation in the kernel.
2892 */
2893 static int bpf_device_cgroup_prepare(struct cgroup_ops *ops,
2894 struct lxc_conf *conf, const char *key,
2895 const char *val)
2896 {
2897 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2898 struct device_item device_item = {};
2899 int ret;
2900
2901 if (strcmp("devices.allow", key) == 0 && *val == '/')
2902 ret = device_cgroup_rule_parse_devpath(&device_item, val);
2903 else
2904 ret = device_cgroup_rule_parse(&device_item, key, val);
2905 if (ret < 0)
2906 return log_error_errno(-1, EINVAL, "Failed to parse device string %s=%s", key, val);
2907
2908 ret = bpf_list_add_device(conf, &device_item);
2909 if (ret < 0)
2910 return -1;
2911 #endif
2912 return 0;
2913 }
2914
2915 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2916 struct lxc_handler *handler)
2917 {
2918 struct lxc_list *cgroup_settings, *iterator;
2919 struct hierarchy *h;
2920 struct lxc_conf *conf;
2921
2922 if (!ops)
2923 return ret_set_errno(false, ENOENT);
2924
2925 if (!ops->hierarchies)
2926 return true;
2927
2928 if (!ops->container_cgroup)
2929 return ret_set_errno(false, EINVAL);
2930
2931 if (!handler || !handler->conf)
2932 return ret_set_errno(false, EINVAL);
2933 conf = handler->conf;
2934
2935 cgroup_settings = &conf->cgroup2;
2936 if (lxc_list_empty(cgroup_settings))
2937 return true;
2938
2939 if (!pure_unified_layout(ops))
2940 return log_warn_errno(true, EINVAL, "Ignoring cgroup2 limits on legacy cgroup system");
2941
2942 if (!ops->unified)
2943 return false;
2944 h = ops->unified;
2945
2946 lxc_list_for_each (iterator, cgroup_settings) {
2947 struct lxc_cgroup *cg = iterator->elem;
2948 int ret;
2949
2950 if (strncmp("devices", cg->subsystem, 7) == 0)
2951 ret = bpf_device_cgroup_prepare(ops, conf, cg->subsystem, cg->value);
2952 else
2953 ret = lxc_write_openat(h->container_limit_path, cg->subsystem, cg->value, strlen(cg->value));
2954 if (ret < 0)
2955 return log_error_errno(false, errno, "Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2956
2957 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2958 }
2959
2960 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
2961 }
2962
2963 __cgfsng_ops static bool cgfsng_devices_activate(struct cgroup_ops *ops, struct lxc_handler *handler)
2964 {
2965 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2966 __do_bpf_program_free struct bpf_program *devices = NULL;
2967 int ret;
2968 struct lxc_conf *conf;
2969 struct hierarchy *unified;
2970 struct lxc_list *it;
2971 struct bpf_program *devices_old;
2972
2973 if (!ops)
2974 return ret_set_errno(false, ENOENT);
2975
2976 if (!ops->hierarchies)
2977 return true;
2978
2979 if (!ops->container_cgroup)
2980 return ret_set_errno(false, EEXIST);
2981
2982 if (!handler || !handler->conf)
2983 return ret_set_errno(false, EINVAL);
2984 conf = handler->conf;
2985
2986 unified = ops->unified;
2987 if (!unified || !unified->bpf_device_controller ||
2988 !unified->container_full_path || lxc_list_empty(&conf->devices))
2989 return true;
2990
2991 devices = bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE);
2992 if (!devices)
2993 return log_error_errno(false, ENOMEM, "Failed to create new bpf program");
2994
2995 ret = bpf_program_init(devices);
2996 if (ret)
2997 return log_error_errno(false, ENOMEM, "Failed to initialize bpf program");
2998
2999 lxc_list_for_each(it, &conf->devices) {
3000 struct device_item *cur = it->elem;
3001
3002 ret = bpf_program_append_device(devices, cur);
3003 if (ret)
3004 return log_error_errno(false, ENOMEM, "Failed to add new rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3005 cur->type,
3006 cur->major,
3007 cur->minor,
3008 cur->access,
3009 cur->allow,
3010 cur->global_rule);
3011 TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3012 cur->type,
3013 cur->major,
3014 cur->minor,
3015 cur->access,
3016 cur->allow,
3017 cur->global_rule);
3018 }
3019
3020 ret = bpf_program_finalize(devices);
3021 if (ret)
3022 return log_error_errno(false, ENOMEM, "Failed to finalize bpf program");
3023
3024 ret = bpf_program_cgroup_attach(devices, BPF_CGROUP_DEVICE,
3025 unified->container_limit_path,
3026 BPF_F_ALLOW_MULTI);
3027 if (ret)
3028 return log_error_errno(false, ENOMEM, "Failed to attach bpf program");
3029
3030 /* Replace old bpf program. */
3031 devices_old = move_ptr(ops->cgroup2_devices);
3032 ops->cgroup2_devices = move_ptr(devices);
3033 devices = move_ptr(devices_old);
3034 #endif
3035 return true;
3036 }
3037
3038 static bool __cgfsng_delegate_controllers(struct cgroup_ops *ops, const char *cgroup)
3039 {
3040 __do_free char *add_controllers = NULL, *base_path = NULL;
3041 __do_free_string_list char **parts = NULL;
3042 struct hierarchy *unified = ops->unified;
3043 ssize_t parts_len;
3044 char **it;
3045 size_t full_len = 0;
3046
3047 if (!ops->hierarchies || !pure_unified_layout(ops) ||
3048 !unified->controllers[0])
3049 return true;
3050
3051 /* For now we simply enable all controllers that we have detected by
3052 * creating a string like "+memory +pids +cpu +io".
3053 * TODO: In the near future we might want to support "-<controller>"
3054 * etc. but whether supporting semantics like this make sense will need
3055 * some thinking.
3056 */
3057 for (it = unified->controllers; it && *it; it++) {
3058 full_len += strlen(*it) + 2;
3059 add_controllers = must_realloc(add_controllers, full_len + 1);
3060
3061 if (unified->controllers[0] == *it)
3062 add_controllers[0] = '\0';
3063
3064 (void)strlcat(add_controllers, "+", full_len + 1);
3065 (void)strlcat(add_controllers, *it, full_len + 1);
3066
3067 if ((it + 1) && *(it + 1))
3068 (void)strlcat(add_controllers, " ", full_len + 1);
3069 }
3070
3071 parts = lxc_string_split(cgroup, '/');
3072 if (!parts)
3073 return false;
3074
3075 parts_len = lxc_array_len((void **)parts);
3076 if (parts_len > 0)
3077 parts_len--;
3078
3079 base_path = must_make_path(unified->mountpoint, unified->container_base_path, NULL);
3080 for (ssize_t i = -1; i < parts_len; i++) {
3081 int ret;
3082 __do_free char *target = NULL;
3083
3084 if (i >= 0)
3085 base_path = must_append_path(base_path, parts[i], NULL);
3086 target = must_make_path(base_path, "cgroup.subtree_control", NULL);
3087 ret = lxc_writeat(-1, target, add_controllers, full_len);
3088 if (ret < 0)
3089 return log_error_errno(false, errno, "Could not enable \"%s\" controllers in the unified cgroup \"%s\"",
3090 add_controllers, target);
3091 TRACE("Enable \"%s\" controllers in the unified cgroup \"%s\"", add_controllers, target);
3092 }
3093
3094 return true;
3095 }
3096
3097 __cgfsng_ops static bool cgfsng_monitor_delegate_controllers(struct cgroup_ops *ops)
3098 {
3099 if (!ops)
3100 return ret_set_errno(false, ENOENT);
3101
3102 return __cgfsng_delegate_controllers(ops, ops->monitor_cgroup);
3103 }
3104
3105 __cgfsng_ops static bool cgfsng_payload_delegate_controllers(struct cgroup_ops *ops)
3106 {
3107 if (!ops)
3108 return ret_set_errno(false, ENOENT);
3109
3110 return __cgfsng_delegate_controllers(ops, ops->container_cgroup);
3111 }
3112
3113 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
3114 char **controllers)
3115 {
3116 if (!ops->cgroup_use)
3117 return true;
3118
3119 for (char **cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
3120 bool found = false;
3121
3122 for (char **cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
3123 if (strcmp(*cur_use, *cur_ctrl) != 0)
3124 continue;
3125
3126 found = true;
3127 break;
3128 }
3129
3130 if (found)
3131 continue;
3132
3133 return false;
3134 }
3135
3136 return true;
3137 }
3138
3139 static void cg_unified_delegate(char ***delegate)
3140 {
3141 __do_free char *buf = NULL;
3142 char *standard[] = {"cgroup.subtree_control", "cgroup.threads", NULL};
3143 char *token;
3144 int idx;
3145
3146 buf = read_file("/sys/kernel/cgroup/delegate");
3147 if (!buf) {
3148 for (char **p = standard; p && *p; p++) {
3149 idx = append_null_to_list((void ***)delegate);
3150 (*delegate)[idx] = must_copy_string(*p);
3151 }
3152 SYSWARN("Failed to read /sys/kernel/cgroup/delegate");
3153 return;
3154 }
3155
3156 lxc_iterate_parts(token, buf, " \t\n") {
3157 /*
3158 * We always need to chown this for both cgroup and
3159 * cgroup2.
3160 */
3161 if (strcmp(token, "cgroup.procs") == 0)
3162 continue;
3163
3164 idx = append_null_to_list((void ***)delegate);
3165 (*delegate)[idx] = must_copy_string(token);
3166 }
3167 }
3168
3169 /* At startup, parse_hierarchies finds all the info we need about cgroup
3170 * mountpoints and current cgroups, and stores it in @d.
3171 */
3172 static int cg_hybrid_init(struct cgroup_ops *ops, bool relative, bool unprivileged)
3173 {
3174 __do_free char *basecginfo = NULL, *line = NULL;
3175 __do_free_string_list char **klist = NULL, **nlist = NULL;
3176 __do_fclose FILE *f = NULL;
3177 int ret;
3178 size_t len = 0;
3179
3180 /* Root spawned containers escape the current cgroup, so use init's
3181 * cgroups as our base in that case.
3182 */
3183 if (!relative && (geteuid() == 0))
3184 basecginfo = read_file("/proc/1/cgroup");
3185 else
3186 basecginfo = read_file("/proc/self/cgroup");
3187 if (!basecginfo)
3188 return ret_set_errno(-1, ENOMEM);
3189
3190 ret = get_existing_subsystems(&klist, &nlist);
3191 if (ret < 0)
3192 return log_error_errno(-1, errno, "Failed to retrieve available legacy cgroup controllers");
3193
3194 f = fopen("/proc/self/mountinfo", "re");
3195 if (!f)
3196 return log_error_errno(-1, errno, "Failed to open \"/proc/self/mountinfo\"");
3197
3198 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
3199
3200 while (getline(&line, &len, f) != -1) {
3201 __do_free char *base_cgroup = NULL, *mountpoint = NULL;
3202 __do_free_string_list char **controller_list = NULL;
3203 int type;
3204 bool writeable;
3205 struct hierarchy *new;
3206
3207 type = get_cgroup_version(line);
3208 if (type == 0)
3209 continue;
3210
3211 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
3212 continue;
3213
3214 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
3215 if (type == CGROUP2_SUPER_MAGIC)
3216 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
3217 else if (type == CGROUP_SUPER_MAGIC)
3218 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
3219 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
3220 if (type == CGROUP_SUPER_MAGIC)
3221 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
3222 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
3223 if (type == CGROUP2_SUPER_MAGIC)
3224 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
3225 }
3226
3227 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
3228 if (!controller_list && type == CGROUP_SUPER_MAGIC)
3229 continue;
3230
3231 if (type == CGROUP_SUPER_MAGIC)
3232 if (controller_list_is_dup(ops->hierarchies, controller_list)) {
3233 TRACE("Skipping duplicating controller");
3234 continue;
3235 }
3236
3237 mountpoint = cg_hybrid_get_mountpoint(line);
3238 if (!mountpoint) {
3239 WARN("Failed parsing mountpoint from \"%s\"", line);
3240 continue;
3241 }
3242
3243 if (type == CGROUP_SUPER_MAGIC)
3244 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
3245 else
3246 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
3247 if (!base_cgroup) {
3248 WARN("Failed to find current cgroup");
3249 continue;
3250 }
3251
3252 trim(base_cgroup);
3253 prune_init_scope(base_cgroup);
3254 if (type == CGROUP2_SUPER_MAGIC)
3255 writeable = test_writeable_v2(mountpoint, base_cgroup);
3256 else
3257 writeable = test_writeable_v1(mountpoint, base_cgroup);
3258 if (!writeable) {
3259 TRACE("The %s group is not writeable", base_cgroup);
3260 continue;
3261 }
3262
3263 if (type == CGROUP2_SUPER_MAGIC) {
3264 char *cgv2_ctrl_path;
3265
3266 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
3267 "cgroup.controllers",
3268 NULL);
3269
3270 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
3271 free(cgv2_ctrl_path);
3272 if (!controller_list) {
3273 controller_list = cg_unified_make_empty_controller();
3274 TRACE("No controllers are enabled for "
3275 "delegation in the unified hierarchy");
3276 }
3277 }
3278
3279 /* Exclude all controllers that cgroup use does not want. */
3280 if (!cgroup_use_wants_controllers(ops, controller_list)) {
3281 TRACE("Skipping controller");
3282 continue;
3283 }
3284
3285 new = add_hierarchy(&ops->hierarchies, move_ptr(controller_list), move_ptr(mountpoint), move_ptr(base_cgroup), type);
3286 if (type == CGROUP2_SUPER_MAGIC && !ops->unified) {
3287 if (unprivileged)
3288 cg_unified_delegate(&new->cgroup2_chown);
3289 ops->unified = new;
3290 }
3291 }
3292
3293 TRACE("Writable cgroup hierarchies:");
3294 lxc_cgfsng_print_hierarchies(ops);
3295
3296 /* verify that all controllers in cgroup.use and all crucial
3297 * controllers are accounted for
3298 */
3299 if (!all_controllers_found(ops))
3300 return log_error_errno(-1, ENOENT, "Failed to find all required controllers");
3301
3302 return 0;
3303 }
3304
3305 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
3306 static char *cg_unified_get_current_cgroup(bool relative)
3307 {
3308 __do_free char *basecginfo = NULL;
3309 char *copy;
3310 char *base_cgroup;
3311
3312 if (!relative && (geteuid() == 0))
3313 basecginfo = read_file("/proc/1/cgroup");
3314 else
3315 basecginfo = read_file("/proc/self/cgroup");
3316 if (!basecginfo)
3317 return NULL;
3318
3319 base_cgroup = strstr(basecginfo, "0::/");
3320 if (!base_cgroup)
3321 return NULL;
3322
3323 base_cgroup = base_cgroup + 3;
3324 copy = copy_to_eol(base_cgroup);
3325 if (!copy)
3326 return NULL;
3327
3328 return trim(copy);
3329 }
3330
3331 static int cg_unified_init(struct cgroup_ops *ops, bool relative,
3332 bool unprivileged)
3333 {
3334 __do_free char *subtree_path = NULL;
3335 int ret;
3336 char *mountpoint;
3337 char **delegatable;
3338 struct hierarchy *new;
3339 char *base_cgroup = NULL;
3340
3341 ret = unified_cgroup_hierarchy();
3342 if (ret == -ENOMEDIUM)
3343 return ret_errno(ENOMEDIUM);
3344
3345 if (ret != CGROUP2_SUPER_MAGIC)
3346 return 0;
3347
3348 base_cgroup = cg_unified_get_current_cgroup(relative);
3349 if (!base_cgroup)
3350 return ret_errno(EINVAL);
3351 if (!relative)
3352 prune_init_scope(base_cgroup);
3353
3354 /*
3355 * We assume that the cgroup we're currently in has been delegated to
3356 * us and we are free to further delege all of the controllers listed
3357 * in cgroup.controllers further down the hierarchy.
3358 */
3359 mountpoint = must_copy_string(DEFAULT_CGROUP_MOUNTPOINT);
3360 subtree_path = must_make_path(mountpoint, base_cgroup, "cgroup.controllers", NULL);
3361 delegatable = cg_unified_get_controllers(subtree_path);
3362 if (!delegatable)
3363 delegatable = cg_unified_make_empty_controller();
3364 if (!delegatable[0])
3365 TRACE("No controllers are enabled for delegation");
3366
3367 /* TODO: If the user requested specific controllers via lxc.cgroup.use
3368 * we should verify here. The reason I'm not doing it right is that I'm
3369 * not convinced that lxc.cgroup.use will be the future since it is a
3370 * global property. I much rather have an option that lets you request
3371 * controllers per container.
3372 */
3373
3374 new = add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
3375 if (unprivileged)
3376 cg_unified_delegate(&new->cgroup2_chown);
3377
3378 if (bpf_devices_cgroup_supported())
3379 new->bpf_device_controller = 1;
3380
3381 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
3382 ops->unified = new;
3383
3384 return CGROUP2_SUPER_MAGIC;
3385 }
3386
3387 static int cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
3388 {
3389 int ret;
3390 const char *tmp;
3391 bool relative = conf->cgroup_meta.relative;
3392
3393 tmp = lxc_global_config_value("lxc.cgroup.use");
3394 if (tmp) {
3395 __do_free char *pin = NULL;
3396 char *chop, *cur;
3397
3398 pin = must_copy_string(tmp);
3399 chop = pin;
3400
3401 lxc_iterate_parts(cur, chop, ",")
3402 must_append_string(&ops->cgroup_use, cur);
3403 }
3404
3405 ret = cg_unified_init(ops, relative, !lxc_list_empty(&conf->id_map));
3406 if (ret < 0)
3407 return -1;
3408
3409 if (ret == CGROUP2_SUPER_MAGIC)
3410 return 0;
3411
3412 return cg_hybrid_init(ops, relative, !lxc_list_empty(&conf->id_map));
3413 }
3414
3415 __cgfsng_ops static int cgfsng_data_init(struct cgroup_ops *ops)
3416 {
3417 const char *cgroup_pattern;
3418
3419 if (!ops)
3420 return ret_set_errno(-1, ENOENT);
3421
3422 /* copy system-wide cgroup information */
3423 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
3424 if (cgroup_pattern && strcmp(cgroup_pattern, "") != 0)
3425 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
3426
3427 return 0;
3428 }
3429
3430 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
3431 {
3432 __do_free struct cgroup_ops *cgfsng_ops = NULL;
3433
3434 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
3435 if (!cgfsng_ops)
3436 return ret_set_errno(NULL, ENOMEM);
3437
3438 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
3439 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
3440
3441 if (cg_init(cgfsng_ops, conf))
3442 return NULL;
3443
3444 cgfsng_ops->data_init = cgfsng_data_init;
3445 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
3446 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
3447 cgfsng_ops->monitor_create = cgfsng_monitor_create;
3448 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
3449 cgfsng_ops->monitor_delegate_controllers = cgfsng_monitor_delegate_controllers;
3450 cgfsng_ops->payload_delegate_controllers = cgfsng_payload_delegate_controllers;
3451 cgfsng_ops->payload_create = cgfsng_payload_create;
3452 cgfsng_ops->payload_enter = cgfsng_payload_enter;
3453 cgfsng_ops->payload_finalize = cgfsng_payload_finalize;
3454 cgfsng_ops->escape = cgfsng_escape;
3455 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
3456 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
3457 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
3458 cgfsng_ops->get = cgfsng_get;
3459 cgfsng_ops->set = cgfsng_set;
3460 cgfsng_ops->freeze = cgfsng_freeze;
3461 cgfsng_ops->unfreeze = cgfsng_unfreeze;
3462 cgfsng_ops->setup_limits_legacy = cgfsng_setup_limits_legacy;
3463 cgfsng_ops->setup_limits = cgfsng_setup_limits;
3464 cgfsng_ops->driver = "cgfsng";
3465 cgfsng_ops->version = "1.0.0";
3466 cgfsng_ops->attach = cgfsng_attach;
3467 cgfsng_ops->chown = cgfsng_chown;
3468 cgfsng_ops->mount = cgfsng_mount;
3469 cgfsng_ops->devices_activate = cgfsng_devices_activate;
3470 cgfsng_ops->get_limiting_cgroup = cgfsng_get_limiting_cgroup;
3471
3472 return move_ptr(cgfsng_ops);
3473 }