]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
cgroups: remove pointless NULL checks
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 /*
4 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
5 * cgroup backend. The original cgfs.c was designed to be as flexible
6 * as possible. It would try to find cgroup filesystems no matter where
7 * or how you had them mounted, and deduce the most usable mount for
8 * each controller.
9 *
10 * This new implementation assumes that cgroup filesystems are mounted
11 * under /sys/fs/cgroup/clist where clist is either the controller, or
12 * a comma-separated list of controllers.
13 */
14
15 #ifndef _GNU_SOURCE
16 #define _GNU_SOURCE 1
17 #endif
18 #include <ctype.h>
19 #include <dirent.h>
20 #include <errno.h>
21 #include <grp.h>
22 #include <linux/kdev_t.h>
23 #include <linux/types.h>
24 #include <poll.h>
25 #include <signal.h>
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <sys/epoll.h>
31 #include <sys/types.h>
32 #include <unistd.h>
33
34 #include "af_unix.h"
35 #include "caps.h"
36 #include "cgroup.h"
37 #include "cgroup2_devices.h"
38 #include "cgroup_utils.h"
39 #include "commands.h"
40 #include "conf.h"
41 #include "config.h"
42 #include "log.h"
43 #include "macro.h"
44 #include "mainloop.h"
45 #include "memory_utils.h"
46 #include "storage/storage.h"
47 #include "utils.h"
48
49 #ifndef HAVE_STRLCPY
50 #include "include/strlcpy.h"
51 #endif
52
53 #ifndef HAVE_STRLCAT
54 #include "include/strlcat.h"
55 #endif
56
57 lxc_log_define(cgfsng, cgroup);
58
59 /* Given a pointer to a null-terminated array of pointers, realloc to add one
60 * entry, and point the new entry to NULL. Do not fail. Return the index to the
61 * second-to-last entry - that is, the one which is now available for use
62 * (keeping the list null-terminated).
63 */
64 static int append_null_to_list(void ***list)
65 {
66 int newentry = 0;
67
68 if (*list)
69 for (; (*list)[newentry]; newentry++)
70 ;
71
72 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
73 (*list)[newentry + 1] = NULL;
74 return newentry;
75 }
76
77 /* Given a null-terminated array of strings, check whether @entry is one of the
78 * strings.
79 */
80 static bool string_in_list(char **list, const char *entry)
81 {
82 if (!list)
83 return false;
84
85 for (int i = 0; list[i]; i++)
86 if (strcmp(list[i], entry) == 0)
87 return true;
88
89 return false;
90 }
91
92 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
93 * "name=systemd". Do not fail.
94 */
95 static char *cg_legacy_must_prefix_named(char *entry)
96 {
97 size_t len;
98 char *prefixed;
99
100 len = strlen(entry);
101 prefixed = must_realloc(NULL, len + 6);
102
103 memcpy(prefixed, "name=", STRLITERALLEN("name="));
104 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
105 prefixed[len + 5] = '\0';
106
107 return prefixed;
108 }
109
110 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
111 * we are called.
112 *
113 * We also handle named subsystems here. Any controller which is not a kernel
114 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
115 * we refuse to use because we're not sure which we have here.
116 * (TODO: We could work around this in some cases by just remounting to be
117 * unambiguous, or by comparing mountpoint contents with current cgroup.)
118 *
119 * The last entry will always be NULL.
120 */
121 static void must_append_controller(char **klist, char **nlist, char ***clist,
122 char *entry)
123 {
124 int newentry;
125 char *copy;
126
127 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
128 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
129 ERROR("It is both a named and kernel subsystem");
130 return;
131 }
132
133 newentry = append_null_to_list((void ***)clist);
134
135 if (strncmp(entry, "name=", 5) == 0)
136 copy = must_copy_string(entry);
137 else if (string_in_list(klist, entry))
138 copy = must_copy_string(entry);
139 else
140 copy = cg_legacy_must_prefix_named(entry);
141
142 (*clist)[newentry] = copy;
143 }
144
145 /* Given a handler's cgroup data, return the struct hierarchy for the controller
146 * @c, or NULL if there is none.
147 */
148 static struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
149 {
150 if (!ops->hierarchies)
151 return log_trace_errno(NULL, errno, "There are no useable cgroup controllers");
152
153 for (int i = 0; ops->hierarchies[i]; i++) {
154 if (!controller) {
155 /* This is the empty unified hierarchy. */
156 if (ops->hierarchies[i]->controllers && !ops->hierarchies[i]->controllers[0])
157 return ops->hierarchies[i];
158
159 continue;
160 }
161
162 /*
163 * Handle controllers with significant implementation changes
164 * from cgroup to cgroup2.
165 */
166 if (pure_unified_layout(ops)) {
167 if (strcmp(controller, "devices") == 0) {
168 if (ops->unified->bpf_device_controller)
169 return ops->unified;
170
171 break;
172 } else if (strcmp(controller, "freezer") == 0) {
173 if (ops->unified->freezer_controller)
174 return ops->unified;
175
176 break;
177 }
178 }
179
180 if (string_in_list(ops->hierarchies[i]->controllers, controller))
181 return ops->hierarchies[i];
182 }
183
184 if (controller)
185 WARN("There is no useable %s controller", controller);
186 else
187 WARN("There is no empty unified cgroup hierarchy");
188
189 return ret_set_errno(NULL, ENOENT);
190 }
191
192 /* Taken over modified from the kernel sources. */
193 #define NBITS 32 /* bits in uint32_t */
194 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
195 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
196
197 static void set_bit(unsigned bit, uint32_t *bitarr)
198 {
199 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
200 }
201
202 static void clear_bit(unsigned bit, uint32_t *bitarr)
203 {
204 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
205 }
206
207 static bool is_set(unsigned bit, uint32_t *bitarr)
208 {
209 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
210 }
211
212 /* Create cpumask from cpulist aka turn:
213 *
214 * 0,2-3
215 *
216 * into bit array
217 *
218 * 1 0 1 1
219 */
220 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
221 {
222 __do_free uint32_t *bitarr = NULL;
223 char *token;
224 size_t arrlen;
225
226 arrlen = BITS_TO_LONGS(nbits);
227 bitarr = calloc(arrlen, sizeof(uint32_t));
228 if (!bitarr)
229 return ret_set_errno(NULL, ENOMEM);
230
231 lxc_iterate_parts(token, buf, ",") {
232 errno = 0;
233 unsigned end, start;
234 char *range;
235
236 start = strtoul(token, NULL, 0);
237 end = start;
238 range = strchr(token, '-');
239 if (range)
240 end = strtoul(range + 1, NULL, 0);
241
242 if (!(start <= end))
243 return ret_set_errno(NULL, EINVAL);
244
245 if (end >= nbits)
246 return ret_set_errno(NULL, EINVAL);
247
248 while (start <= end)
249 set_bit(start++, bitarr);
250 }
251
252 return move_ptr(bitarr);
253 }
254
255 /* Turn cpumask into simple, comma-separated cpulist. */
256 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
257 {
258 __do_free_string_list char **cpulist = NULL;
259 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
260 int ret;
261
262 for (size_t i = 0; i <= nbits; i++) {
263 if (!is_set(i, bitarr))
264 continue;
265
266 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
267 if (ret < 0 || (size_t)ret >= sizeof(numstr))
268 return NULL;
269
270 ret = lxc_append_string(&cpulist, numstr);
271 if (ret < 0)
272 return ret_set_errno(NULL, ENOMEM);
273 }
274
275 if (!cpulist)
276 return ret_set_errno(NULL, ENOMEM);
277
278 return lxc_string_join(",", (const char **)cpulist, false);
279 }
280
281 static ssize_t get_max_cpus(char *cpulist)
282 {
283 char *c1, *c2;
284 char *maxcpus = cpulist;
285 size_t cpus = 0;
286
287 c1 = strrchr(maxcpus, ',');
288 if (c1)
289 c1++;
290
291 c2 = strrchr(maxcpus, '-');
292 if (c2)
293 c2++;
294
295 if (!c1 && !c2)
296 c1 = maxcpus;
297 else if (c1 > c2)
298 c2 = c1;
299 else if (c1 < c2)
300 c1 = c2;
301 else if (!c1 && c2)
302 c1 = c2;
303
304 errno = 0;
305 cpus = strtoul(c1, NULL, 0);
306 if (errno != 0)
307 return -1;
308
309 return cpus;
310 }
311
312 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
313 #define __OFFLINE_CPUS "/sys/devices/system/cpu/offline"
314 static bool cg_legacy_filter_and_set_cpus(const char *parent_cgroup,
315 char *child_cgroup, bool am_initialized)
316 {
317 __do_free char *cpulist = NULL, *fpath = NULL, *isolcpus = NULL,
318 *offlinecpus = NULL, *posscpus = NULL;
319 __do_free uint32_t *isolmask = NULL, *offlinemask = NULL,
320 *possmask = NULL;
321 int ret;
322 ssize_t i;
323 ssize_t maxisol = 0, maxoffline = 0, maxposs = 0;
324 bool flipped_bit = false;
325
326 fpath = must_make_path(parent_cgroup, "cpuset.cpus", NULL);
327 posscpus = read_file_at(-EBADF, fpath);
328 if (!posscpus)
329 return log_error_errno(false, errno, "Failed to read file \"%s\"", fpath);
330
331 /* Get maximum number of cpus found in possible cpuset. */
332 maxposs = get_max_cpus(posscpus);
333 if (maxposs < 0 || maxposs >= INT_MAX - 1)
334 return false;
335
336 if (file_exists(__ISOL_CPUS)) {
337 isolcpus = read_file_at(-EBADF, __ISOL_CPUS);
338 if (!isolcpus)
339 return log_error_errno(false, errno, "Failed to read file \"%s\"", __ISOL_CPUS);
340
341 if (isdigit(isolcpus[0])) {
342 /* Get maximum number of cpus found in isolated cpuset. */
343 maxisol = get_max_cpus(isolcpus);
344 if (maxisol < 0 || maxisol >= INT_MAX - 1)
345 return false;
346 }
347
348 if (maxposs < maxisol)
349 maxposs = maxisol;
350 maxposs++;
351 } else {
352 TRACE("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
353 }
354
355 if (file_exists(__OFFLINE_CPUS)) {
356 offlinecpus = read_file_at(-EBADF, __OFFLINE_CPUS);
357 if (!offlinecpus)
358 return log_error_errno(false, errno, "Failed to read file \"%s\"", __OFFLINE_CPUS);
359
360 if (isdigit(offlinecpus[0])) {
361 /* Get maximum number of cpus found in offline cpuset. */
362 maxoffline = get_max_cpus(offlinecpus);
363 if (maxoffline < 0 || maxoffline >= INT_MAX - 1)
364 return false;
365 }
366
367 if (maxposs < maxoffline)
368 maxposs = maxoffline;
369 maxposs++;
370 } else {
371 TRACE("The path \""__OFFLINE_CPUS"\" to read offline cpus from does not exist");
372 }
373
374 if ((maxisol == 0) && (maxoffline == 0)) {
375 cpulist = move_ptr(posscpus);
376 goto copy_parent;
377 }
378
379 possmask = lxc_cpumask(posscpus, maxposs);
380 if (!possmask)
381 return log_error_errno(false, errno, "Failed to create cpumask for possible cpus");
382
383 if (maxisol > 0) {
384 isolmask = lxc_cpumask(isolcpus, maxposs);
385 if (!isolmask)
386 return log_error_errno(false, errno, "Failed to create cpumask for isolated cpus");
387 }
388
389 if (maxoffline > 0) {
390 offlinemask = lxc_cpumask(offlinecpus, maxposs);
391 if (!offlinemask)
392 return log_error_errno(false, errno, "Failed to create cpumask for offline cpus");
393 }
394
395 for (i = 0; i <= maxposs; i++) {
396 if ((isolmask && !is_set(i, isolmask)) ||
397 (offlinemask && !is_set(i, offlinemask)) ||
398 !is_set(i, possmask))
399 continue;
400
401 flipped_bit = true;
402 clear_bit(i, possmask);
403 }
404
405 if (!flipped_bit) {
406 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
407 TRACE("No isolated or offline cpus present in cpuset");
408 } else {
409 cpulist = move_ptr(posscpus);
410 TRACE("Removed isolated or offline cpus from cpuset");
411 }
412 if (!cpulist)
413 return log_error_errno(false, errno, "Failed to create cpu list");
414
415 copy_parent:
416 if (!am_initialized) {
417 ret = lxc_write_openat(child_cgroup, "cpuset.cpus", cpulist, strlen(cpulist));
418 if (ret < 0)
419 return log_error_errno(false,
420 errno, "Failed to write cpu list to \"%s/cpuset.cpus\"",
421 child_cgroup);
422
423 TRACE("Copied cpu settings of parent cgroup");
424 }
425
426 return true;
427 }
428
429 /* Copy contents of parent(@path)/@file to @path/@file */
430 static bool copy_parent_file(const char *parent_cgroup,
431 const char *child_cgroup, const char *file)
432 {
433 __do_free char *parent_file = NULL, *value = NULL;
434 int len = 0;
435 int ret;
436
437 parent_file = must_make_path(parent_cgroup, file, NULL);
438 len = lxc_read_from_file(parent_file, NULL, 0);
439 if (len <= 0)
440 return log_error_errno(false, errno, "Failed to determine buffer size");
441
442 value = must_realloc(NULL, len + 1);
443 value[len] = '\0';
444 ret = lxc_read_from_file(parent_file, value, len);
445 if (ret != len)
446 return log_error_errno(false, errno, "Failed to read from parent file \"%s\"", parent_file);
447
448 ret = lxc_write_openat(child_cgroup, file, value, len);
449 if (ret < 0 && errno != EACCES)
450 return log_error_errno(false, errno, "Failed to write \"%s\" to file \"%s/%s\"",
451 value, child_cgroup, file);
452 return true;
453 }
454
455 static inline bool is_unified_hierarchy(const struct hierarchy *h)
456 {
457 return h->version == CGROUP2_SUPER_MAGIC;
458 }
459
460 /*
461 * Initialize the cpuset hierarchy in first directory of @cgroup_leaf and set
462 * cgroup.clone_children so that children inherit settings. Since the
463 * h->base_path is populated by init or ourselves, we know it is already
464 * initialized.
465 *
466 * returns -1 on error, 0 when we didn't created a cgroup, 1 if we created a
467 * cgroup.
468 */
469 static int cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h,
470 const char *cgroup_leaf)
471 {
472 __do_free char *parent_cgroup = NULL, *child_cgroup = NULL, *dup = NULL;
473 __do_close int cgroup_fd = -EBADF;
474 int fret = -1;
475 int ret;
476 char v;
477 char *leaf, *slash;
478
479 if (is_unified_hierarchy(h))
480 return 0;
481
482 if (!string_in_list(h->controllers, "cpuset"))
483 return 0;
484
485 if (!cgroup_leaf)
486 return ret_set_errno(-1, EINVAL);
487
488 dup = strdup(cgroup_leaf);
489 if (!dup)
490 return ret_set_errno(-1, ENOMEM);
491
492 parent_cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
493
494 leaf = dup;
495 leaf += strspn(leaf, "/");
496 slash = strchr(leaf, '/');
497 if (slash)
498 *slash = '\0';
499 child_cgroup = must_make_path(parent_cgroup, leaf, NULL);
500 if (slash)
501 *slash = '/';
502
503 fret = 1;
504 ret = mkdir(child_cgroup, 0755);
505 if (ret < 0) {
506 if (errno != EEXIST)
507 return log_error_errno(-1, errno, "Failed to create directory \"%s\"", child_cgroup);
508
509 fret = 0;
510 }
511
512 cgroup_fd = lxc_open_dirfd(child_cgroup);
513 if (cgroup_fd < 0)
514 return -1;
515
516 ret = lxc_readat(cgroup_fd, "cgroup.clone_children", &v, 1);
517 if (ret < 0)
518 return log_error_errno(-1, errno, "Failed to read file \"%s/cgroup.clone_children\"", child_cgroup);
519
520 /* Make sure any isolated cpus are removed from cpuset.cpus. */
521 if (!cg_legacy_filter_and_set_cpus(parent_cgroup, child_cgroup, v == '1'))
522 return log_error_errno(-1, errno, "Failed to remove isolated cpus");
523
524 /* Already set for us by someone else. */
525 if (v == '1')
526 TRACE("\"cgroup.clone_children\" was already set to \"1\"");
527
528 /* copy parent's settings */
529 if (!copy_parent_file(parent_cgroup, child_cgroup, "cpuset.mems"))
530 return log_error_errno(-1, errno, "Failed to copy \"cpuset.mems\" settings");
531
532 /* Set clone_children so children inherit our settings */
533 ret = lxc_writeat(cgroup_fd, "cgroup.clone_children", "1", 1);
534 if (ret < 0)
535 return log_error_errno(-1, errno, "Failed to write 1 to \"%s/cgroup.clone_children\"", child_cgroup);
536
537 return fret;
538 }
539
540 /* Given two null-terminated lists of strings, return true if any string is in
541 * both.
542 */
543 static bool controller_lists_intersect(char **l1, char **l2)
544 {
545 if (!l1 || !l2)
546 return false;
547
548 for (int i = 0; l1[i]; i++)
549 if (string_in_list(l2, l1[i]))
550 return true;
551
552 return false;
553 }
554
555 /* For a null-terminated list of controllers @clist, return true if any of those
556 * controllers is already listed the null-terminated list of hierarchies @hlist.
557 * Realistically, if one is present, all must be present.
558 */
559 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
560 {
561 if (!hlist)
562 return false;
563
564 for (int i = 0; hlist[i]; i++)
565 if (controller_lists_intersect(hlist[i]->controllers, clist))
566 return true;
567
568 return false;
569 }
570
571 /* Return true if the controller @entry is found in the null-terminated list of
572 * hierarchies @hlist.
573 */
574 static bool controller_found(struct hierarchy **hlist, char *entry)
575 {
576 if (!hlist)
577 return false;
578
579 for (int i = 0; hlist[i]; i++)
580 if (string_in_list(hlist[i]->controllers, entry))
581 return true;
582
583 return false;
584 }
585
586 /* Return true if all of the controllers which we require have been found. The
587 * required list is freezer and anything in lxc.cgroup.use.
588 */
589 static bool all_controllers_found(struct cgroup_ops *ops)
590 {
591 struct hierarchy **hlist;
592
593 if (!ops->cgroup_use)
594 return true;
595
596 hlist = ops->hierarchies;
597 for (char **cur = ops->cgroup_use; cur && *cur; cur++)
598 if (!controller_found(hlist, *cur))
599 return log_error(false, "No %s controller mountpoint found", *cur);
600
601 return true;
602 }
603
604 /* Get the controllers from a mountinfo line There are other ways we could get
605 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
606 * could parse the mount options. But we simply assume that the mountpoint must
607 * be /sys/fs/cgroup/controller-list
608 */
609 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
610 int type)
611 {
612 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
613 * for legacy hierarchies.
614 */
615 __do_free_string_list char **aret = NULL;
616 int i;
617 char *p2, *tok;
618 char *p = line, *sep = ",";
619
620 for (i = 0; i < 4; i++) {
621 p = strchr(p, ' ');
622 if (!p)
623 return NULL;
624 p++;
625 }
626
627 /* Note, if we change how mountinfo works, then our caller will need to
628 * verify /sys/fs/cgroup/ in this field.
629 */
630 if (strncmp(p, DEFAULT_CGROUP_MOUNTPOINT "/", 15) != 0)
631 return log_warn(NULL, "Found hierarchy not under " DEFAULT_CGROUP_MOUNTPOINT ": \"%s\"", p);
632
633 p += 15;
634 p2 = strchr(p, ' ');
635 if (!p2)
636 return log_error(NULL, "Corrupt mountinfo");
637 *p2 = '\0';
638
639 if (type == CGROUP_SUPER_MAGIC) {
640 __do_free char *dup = NULL;
641
642 /* strdup() here for v1 hierarchies. Otherwise
643 * lxc_iterate_parts() will destroy mountpoints such as
644 * "/sys/fs/cgroup/cpu,cpuacct".
645 */
646 dup = must_copy_string(p);
647 if (!dup)
648 return NULL;
649
650 lxc_iterate_parts(tok, dup, sep)
651 must_append_controller(klist, nlist, &aret, tok);
652 }
653 *p2 = ' ';
654
655 return move_ptr(aret);
656 }
657
658 static char **cg_unified_make_empty_controller(void)
659 {
660 __do_free_string_list char **aret = NULL;
661 int newentry;
662
663 newentry = append_null_to_list((void ***)&aret);
664 aret[newentry] = NULL;
665 return move_ptr(aret);
666 }
667
668 static char **cg_unified_get_controllers(int dfd, const char *file)
669 {
670 __do_free char *buf = NULL;
671 __do_free_string_list char **aret = NULL;
672 char *sep = " \t\n";
673 char *tok;
674
675 buf = read_file_at(dfd, file);
676 if (!buf)
677 return NULL;
678
679 lxc_iterate_parts(tok, buf, sep) {
680 int newentry;
681 char *copy;
682
683 newentry = append_null_to_list((void ***)&aret);
684 copy = must_copy_string(tok);
685 aret[newentry] = copy;
686 }
687
688 return move_ptr(aret);
689 }
690
691 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
692 char *container_base_path, int type)
693 {
694 struct hierarchy *new;
695 int newentry;
696
697 new = zalloc(sizeof(*new));
698 if (!new)
699 return ret_set_errno(NULL, ENOMEM);
700 new->controllers = clist;
701 new->mountpoint = mountpoint;
702 new->container_base_path = container_base_path;
703 new->version = type;
704 new->cgfd_con = -EBADF;
705 new->cgfd_limit = -EBADF;
706 new->cgfd_mon = -EBADF;
707
708 newentry = append_null_to_list((void ***)h);
709 (*h)[newentry] = new;
710 return new;
711 }
712
713 /* Get a copy of the mountpoint from @line, which is a line from
714 * /proc/self/mountinfo.
715 */
716 static char *cg_hybrid_get_mountpoint(char *line)
717 {
718 char *p = line, *sret = NULL;
719 size_t len;
720 char *p2;
721
722 for (int i = 0; i < 4; i++) {
723 p = strchr(p, ' ');
724 if (!p)
725 return NULL;
726 p++;
727 }
728
729 if (strncmp(p, DEFAULT_CGROUP_MOUNTPOINT "/", 15) != 0)
730 return NULL;
731
732 p2 = strchr(p + 15, ' ');
733 if (!p2)
734 return NULL;
735 *p2 = '\0';
736
737 len = strlen(p);
738 sret = must_realloc(NULL, len + 1);
739 memcpy(sret, p, len);
740 sret[len] = '\0';
741
742 return sret;
743 }
744
745 /* Given a multi-line string, return a null-terminated copy of the current line. */
746 static char *copy_to_eol(char *p)
747 {
748 char *p2, *sret;
749 size_t len;
750
751 p2 = strchr(p, '\n');
752 if (!p2)
753 return NULL;
754
755 len = p2 - p;
756 sret = must_realloc(NULL, len + 1);
757 memcpy(sret, p, len);
758 sret[len] = '\0';
759
760 return sret;
761 }
762
763 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
764 * /proc/self/cgroup file. Check whether controller c is present.
765 */
766 static bool controller_in_clist(char *cgline, char *c)
767 {
768 __do_free char *tmp = NULL;
769 char *tok, *eol;
770 size_t len;
771
772 eol = strchr(cgline, ':');
773 if (!eol)
774 return false;
775
776 len = eol - cgline;
777 tmp = must_realloc(NULL, len + 1);
778 memcpy(tmp, cgline, len);
779 tmp[len] = '\0';
780
781 lxc_iterate_parts(tok, tmp, ",")
782 if (strcmp(tok, c) == 0)
783 return true;
784
785 return false;
786 }
787
788 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
789 * @controller.
790 */
791 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
792 int type)
793 {
794 char *p = basecginfo;
795
796 for (;;) {
797 bool is_cgv2_base_cgroup = false;
798
799 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
800 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
801 is_cgv2_base_cgroup = true;
802
803 p = strchr(p, ':');
804 if (!p)
805 return NULL;
806 p++;
807
808 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
809 p = strchr(p, ':');
810 if (!p)
811 return NULL;
812 p++;
813 return copy_to_eol(p);
814 }
815
816 p = strchr(p, '\n');
817 if (!p)
818 return NULL;
819 p++;
820 }
821 }
822
823 static void must_append_string(char ***list, char *entry)
824 {
825 int newentry;
826 char *copy;
827
828 newentry = append_null_to_list((void ***)list);
829 copy = must_copy_string(entry);
830 (*list)[newentry] = copy;
831 }
832
833 static int get_existing_subsystems(char ***klist, char ***nlist)
834 {
835 __do_free char *line = NULL;
836 __do_fclose FILE *f = NULL;
837 size_t len = 0;
838
839 f = fopen("/proc/self/cgroup", "re");
840 if (!f)
841 return -1;
842
843 while (getline(&line, &len, f) != -1) {
844 char *p, *p2, *tok;
845 p = strchr(line, ':');
846 if (!p)
847 continue;
848 p++;
849 p2 = strchr(p, ':');
850 if (!p2)
851 continue;
852 *p2 = '\0';
853
854 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
855 * contains an entry of the form:
856 *
857 * 0::/some/path
858 *
859 * In this case we use "cgroup2" as controller name.
860 */
861 if ((p2 - p) == 0) {
862 must_append_string(klist, "cgroup2");
863 continue;
864 }
865
866 lxc_iterate_parts(tok, p, ",") {
867 if (strncmp(tok, "name=", 5) == 0)
868 must_append_string(nlist, tok);
869 else
870 must_append_string(klist, tok);
871 }
872 }
873
874 return 0;
875 }
876
877 static char *trim(char *s)
878 {
879 size_t len;
880
881 len = strlen(s);
882 while ((len > 1) && (s[len - 1] == '\n'))
883 s[--len] = '\0';
884
885 return s;
886 }
887
888 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
889 {
890 int i;
891 struct hierarchy **it;
892
893 if (!ops->hierarchies) {
894 TRACE(" No hierarchies found");
895 return;
896 }
897
898 TRACE(" Hierarchies:");
899 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
900 int j;
901 char **cit;
902
903 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
904 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
905 TRACE(" controllers:");
906 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
907 TRACE(" %d: %s", j, *cit);
908 }
909 }
910
911 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
912 char **nlist)
913 {
914 int k;
915 char **it;
916
917 TRACE("basecginfo is:");
918 TRACE("%s", basecginfo);
919
920 for (k = 0, it = klist; it && *it; it++, k++)
921 TRACE("kernel subsystem %d: %s", k, *it);
922
923 for (k = 0, it = nlist; it && *it; it++, k++)
924 TRACE("named subsystem %d: %s", k, *it);
925 }
926
927 static int cgroup_tree_remove(struct hierarchy **hierarchies, const char *container_cgroup)
928 {
929 if (!container_cgroup || !hierarchies)
930 return 0;
931
932 for (int i = 0; hierarchies[i]; i++) {
933 struct hierarchy *h = hierarchies[i];
934 int ret;
935
936 if (!h->container_limit_path)
937 continue;
938
939 ret = lxc_rm_rf(h->container_limit_path);
940 if (ret < 0)
941 WARN("Failed to destroy \"%s\"", h->container_limit_path);
942
943 if (h->container_limit_path != h->container_full_path)
944 free_disarm(h->container_limit_path);
945 free_disarm(h->container_full_path);
946 }
947
948 return 0;
949 }
950
951 struct generic_userns_exec_data {
952 struct hierarchy **hierarchies;
953 const char *container_cgroup;
954 struct lxc_conf *conf;
955 uid_t origuid; /* target uid in parent namespace */
956 char *path;
957 };
958
959 static int cgroup_tree_remove_wrapper(void *data)
960 {
961 struct generic_userns_exec_data *arg = data;
962 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
963 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
964 int ret;
965
966 if (!lxc_setgroups(0, NULL) && errno != EPERM)
967 return log_error_errno(-1, errno, "Failed to setgroups(0, NULL)");
968
969 ret = setresgid(nsgid, nsgid, nsgid);
970 if (ret < 0)
971 return log_error_errno(-1, errno, "Failed to setresgid(%d, %d, %d)",
972 (int)nsgid, (int)nsgid, (int)nsgid);
973
974 ret = setresuid(nsuid, nsuid, nsuid);
975 if (ret < 0)
976 return log_error_errno(-1, errno, "Failed to setresuid(%d, %d, %d)",
977 (int)nsuid, (int)nsuid, (int)nsuid);
978
979 return cgroup_tree_remove(arg->hierarchies, arg->container_cgroup);
980 }
981
982 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
983 struct lxc_handler *handler)
984 {
985 int ret;
986
987 if (!ops) {
988 ERROR("Called with uninitialized cgroup operations");
989 return;
990 }
991
992 if (!ops->hierarchies)
993 return;
994
995 if (!handler) {
996 ERROR("Called with uninitialized handler");
997 return;
998 }
999
1000 if (!handler->conf) {
1001 ERROR("Called with uninitialized conf");
1002 return;
1003 }
1004
1005 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
1006 ret = bpf_program_cgroup_detach(handler->cgroup_ops->cgroup2_devices);
1007 if (ret < 0)
1008 WARN("Failed to detach bpf program from cgroup");
1009 #endif
1010
1011 if (!lxc_list_empty(&handler->conf->id_map)) {
1012 struct generic_userns_exec_data wrap = {
1013 .conf = handler->conf,
1014 .container_cgroup = ops->container_cgroup,
1015 .hierarchies = ops->hierarchies,
1016 .origuid = 0,
1017 };
1018 ret = userns_exec_1(handler->conf, cgroup_tree_remove_wrapper,
1019 &wrap, "cgroup_tree_remove_wrapper");
1020 } else {
1021 ret = cgroup_tree_remove(ops->hierarchies, ops->container_cgroup);
1022 }
1023 if (ret < 0)
1024 SYSWARN("Failed to destroy cgroups");
1025 }
1026
1027 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1028 struct lxc_handler *handler)
1029 {
1030 int len;
1031 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1032 const struct lxc_conf *conf;
1033
1034 if (!ops) {
1035 ERROR("Called with uninitialized cgroup operations");
1036 return;
1037 }
1038
1039 if (!ops->hierarchies)
1040 return;
1041
1042 if (!handler) {
1043 ERROR("Called with uninitialized handler");
1044 return;
1045 }
1046
1047 if (!handler->conf) {
1048 ERROR("Called with uninitialized conf");
1049 return;
1050 }
1051 conf = handler->conf;
1052
1053 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1054 if (len < 0 || (size_t)len >= sizeof(pidstr))
1055 return;
1056
1057 for (int i = 0; ops->hierarchies[i]; i++) {
1058 __do_free char *pivot_path = NULL;
1059 struct hierarchy *h = ops->hierarchies[i];
1060 size_t offset;
1061 int ret;
1062
1063 if (!h->monitor_full_path)
1064 continue;
1065
1066 /* Monitor might have died before we entered the cgroup. */
1067 if (handler->monitor_pid <= 0) {
1068 WARN("No valid monitor process found while destroying cgroups");
1069 goto try_lxc_rm_rf;
1070 }
1071
1072 if (conf->cgroup_meta.monitor_pivot_dir)
1073 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1074 conf->cgroup_meta.monitor_pivot_dir, CGROUP_PIVOT, NULL);
1075 else if (conf->cgroup_meta.monitor_dir)
1076 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1077 conf->cgroup_meta.monitor_dir, CGROUP_PIVOT, NULL);
1078 else if (conf->cgroup_meta.dir)
1079 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1080 conf->cgroup_meta.dir, CGROUP_PIVOT, NULL);
1081 else
1082 pivot_path = must_make_path(h->mountpoint, h->container_base_path,
1083 CGROUP_PIVOT, NULL);
1084
1085 offset = strlen(h->mountpoint) + strlen(h->container_base_path);
1086
1087 if (cg_legacy_handle_cpuset_hierarchy(h, pivot_path + offset))
1088 SYSWARN("Failed to initialize cpuset %s/" CGROUP_PIVOT, pivot_path);
1089
1090 ret = mkdir_p(pivot_path, 0755);
1091 if (ret < 0 && errno != EEXIST) {
1092 ERROR("Failed to create %s", pivot_path);
1093 goto try_lxc_rm_rf;
1094 }
1095
1096 ret = lxc_write_openat(pivot_path, "cgroup.procs", pidstr, len);
1097 if (ret != 0) {
1098 SYSWARN("Failed to move monitor %s to \"%s\"", pidstr, pivot_path);
1099 continue;
1100 }
1101
1102 try_lxc_rm_rf:
1103 ret = lxc_rm_rf(h->monitor_full_path);
1104 if (ret < 0)
1105 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1106 }
1107 }
1108
1109 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1110 {
1111 const char *tmp = dir;
1112 const char *orig = dir;
1113 size_t orig_len;
1114
1115 orig_len = strlen(dir);
1116 do {
1117 __do_free char *makeme = NULL;
1118 int ret;
1119 size_t cur_len;
1120
1121 dir = tmp + strspn(tmp, "/");
1122 tmp = dir + strcspn(dir, "/");
1123
1124 cur_len = dir - orig;
1125 makeme = strndup(orig, cur_len);
1126 if (!makeme)
1127 return ret_set_errno(-1, ENOMEM);
1128
1129 ret = mkdir(makeme, mode);
1130 if (ret < 0 && ((errno != EEXIST) || (orig_len == cur_len)))
1131 return log_warn_errno(-1, errno, "Failed to create directory \"%s\"", makeme);
1132 } while (tmp != dir);
1133
1134 return 0;
1135 }
1136
1137 static bool cgroup_tree_create(struct cgroup_ops *ops, struct lxc_conf *conf,
1138 struct hierarchy *h, const char *cgroup_tree,
1139 const char *cgroup_leaf, bool payload,
1140 const char *cgroup_limit_dir)
1141 {
1142 __do_free char *path = NULL, *limit_path = NULL;
1143 int ret, ret_cpuset;
1144
1145 path = must_make_path(h->mountpoint, h->container_base_path, cgroup_leaf, NULL);
1146 if (dir_exists(path))
1147 return log_warn_errno(false, errno, "The %s cgroup already existed", path);
1148
1149 ret_cpuset = cg_legacy_handle_cpuset_hierarchy(h, cgroup_leaf);
1150 if (ret_cpuset < 0)
1151 return log_error_errno(false, errno, "Failed to handle legacy cpuset controller");
1152
1153 if (payload && cgroup_limit_dir) {
1154 /* with isolation both parts need to not already exist */
1155 limit_path = must_make_path(h->mountpoint,
1156 h->container_base_path,
1157 cgroup_limit_dir, NULL);
1158
1159 ret = mkdir_eexist_on_last(limit_path, 0755);
1160 if (ret < 0)
1161 return log_debug_errno(false,
1162 errno, "Failed to create %s limiting cgroup",
1163 limit_path);
1164
1165 h->cgfd_limit = lxc_open_dirfd(limit_path);
1166 if (h->cgfd_limit < 0)
1167 return log_error_errno(false, errno,
1168 "Failed to open %s", path);
1169 h->container_limit_path = move_ptr(limit_path);
1170
1171 /*
1172 * With isolation the devices legacy cgroup needs to be
1173 * iinitialized early, as it typically contains an 'a' (all)
1174 * line, which is not possible once a subdirectory has been
1175 * created.
1176 */
1177 if (string_in_list(h->controllers, "devices") &&
1178 !ops->setup_limits_legacy(ops, conf, true))
1179 return log_error(false, "Failed to setup legacy device limits");
1180 }
1181
1182 ret = mkdir_eexist_on_last(path, 0755);
1183 if (ret < 0) {
1184 /*
1185 * This is the cpuset controller and
1186 * cg_legacy_handle_cpuset_hierarchy() has created our target
1187 * directory for us to ensure correct initialization.
1188 */
1189 if (ret_cpuset != 1 || cgroup_tree)
1190 return log_debug_errno(false, errno, "Failed to create %s cgroup", path);
1191 }
1192
1193 if (payload) {
1194 h->cgfd_con = lxc_open_dirfd(path);
1195 if (h->cgfd_con < 0)
1196 return log_error_errno(false, errno, "Failed to open %s", path);
1197 h->container_full_path = move_ptr(path);
1198 if (h->cgfd_limit < 0)
1199 h->cgfd_limit = h->cgfd_con;
1200 if (!h->container_limit_path)
1201 h->container_limit_path = h->container_full_path;
1202 } else {
1203 h->cgfd_mon = lxc_open_dirfd(path);
1204 if (h->cgfd_mon < 0)
1205 return log_error_errno(false, errno, "Failed to open %s", path);
1206 h->monitor_full_path = move_ptr(path);
1207 }
1208
1209 return true;
1210 }
1211
1212 static void cgroup_tree_leaf_remove(struct hierarchy *h, bool payload)
1213 {
1214 __do_free char *full_path = NULL, *__limit_path = NULL;
1215 char *limit_path = NULL;
1216
1217 if (payload) {
1218 __lxc_unused __do_close int fd = move_fd(h->cgfd_con);
1219 full_path = move_ptr(h->container_full_path);
1220 limit_path = move_ptr(h->container_limit_path);
1221 if (limit_path != full_path)
1222 __limit_path = limit_path;
1223 } else {
1224 __lxc_unused __do_close int fd = move_fd(h->cgfd_mon);
1225 full_path = move_ptr(h->monitor_full_path);
1226 }
1227
1228 if (full_path && rmdir(full_path))
1229 SYSWARN("Failed to rmdir(\"%s\") cgroup", full_path);
1230 if (limit_path && rmdir(limit_path))
1231 SYSWARN("Failed to rmdir(\"%s\") cgroup", limit_path);
1232 }
1233
1234 /*
1235 * Check we have no lxc.cgroup.dir, and that lxc.cgroup.dir.limit_prefix is a
1236 * proper prefix directory of lxc.cgroup.dir.payload.
1237 *
1238 * Returns the prefix length if it is set, otherwise zero on success.
1239 */
1240 static bool check_cgroup_dir_config(struct lxc_conf *conf)
1241 {
1242 const char *monitor_dir = conf->cgroup_meta.monitor_dir,
1243 *container_dir = conf->cgroup_meta.container_dir,
1244 *namespace_dir = conf->cgroup_meta.namespace_dir;
1245
1246 /* none of the new options are set, all is fine */
1247 if (!monitor_dir && !container_dir && !namespace_dir)
1248 return true;
1249
1250 /* some are set, make sure lxc.cgroup.dir is not also set*/
1251 if (conf->cgroup_meta.dir)
1252 return log_error_errno(false, EINVAL,
1253 "lxc.cgroup.dir conflicts with lxc.cgroup.dir.payload/monitor");
1254
1255 /* make sure both monitor and payload are set */
1256 if (!monitor_dir || !container_dir)
1257 return log_error_errno(false, EINVAL,
1258 "lxc.cgroup.dir.payload and lxc.cgroup.dir.monitor must both be set");
1259
1260 /* namespace_dir may be empty */
1261 return true;
1262 }
1263
1264 __cgfsng_ops static bool cgfsng_monitor_create(struct cgroup_ops *ops, struct lxc_handler *handler)
1265 {
1266 __do_free char *monitor_cgroup = NULL, *__cgroup_tree = NULL;
1267 const char *cgroup_tree;
1268 int idx = 0;
1269 int i;
1270 size_t len;
1271 char *suffix = NULL;
1272 struct lxc_conf *conf;
1273
1274 if (!ops)
1275 return ret_set_errno(false, ENOENT);
1276
1277 if (!ops->hierarchies)
1278 return true;
1279
1280 if (ops->monitor_cgroup)
1281 return ret_set_errno(false, EEXIST);
1282
1283 if (!handler || !handler->conf)
1284 return ret_set_errno(false, EINVAL);
1285
1286 conf = handler->conf;
1287
1288 if (!check_cgroup_dir_config(conf))
1289 return false;
1290
1291 if (conf->cgroup_meta.monitor_dir) {
1292 cgroup_tree = NULL;
1293 monitor_cgroup = strdup(conf->cgroup_meta.monitor_dir);
1294 } else if (conf->cgroup_meta.dir) {
1295 cgroup_tree = conf->cgroup_meta.dir;
1296 monitor_cgroup = must_concat(&len, conf->cgroup_meta.dir, "/",
1297 DEFAULT_MONITOR_CGROUP_PREFIX,
1298 handler->name,
1299 CGROUP_CREATE_RETRY, NULL);
1300 } else if (ops->cgroup_pattern) {
1301 __cgroup_tree = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1302 if (!__cgroup_tree)
1303 return ret_set_errno(false, ENOMEM);
1304
1305 cgroup_tree = __cgroup_tree;
1306 monitor_cgroup = must_concat(&len, cgroup_tree, "/",
1307 DEFAULT_MONITOR_CGROUP,
1308 CGROUP_CREATE_RETRY, NULL);
1309 } else {
1310 cgroup_tree = NULL;
1311 monitor_cgroup = must_concat(&len, DEFAULT_MONITOR_CGROUP_PREFIX,
1312 handler->name,
1313 CGROUP_CREATE_RETRY, NULL);
1314 }
1315 if (!monitor_cgroup)
1316 return ret_set_errno(false, ENOMEM);
1317
1318 if (!conf->cgroup_meta.monitor_dir) {
1319 suffix = monitor_cgroup + len - CGROUP_CREATE_RETRY_LEN;
1320 *suffix = '\0';
1321 }
1322 do {
1323 if (idx && suffix)
1324 sprintf(suffix, "-%d", idx);
1325
1326 for (i = 0; ops->hierarchies[i]; i++) {
1327 if (cgroup_tree_create(ops, handler->conf,
1328 ops->hierarchies[i], cgroup_tree,
1329 monitor_cgroup, false, NULL))
1330 continue;
1331
1332 DEBUG("Failed to create cgroup \"%s\"", ops->hierarchies[i]->monitor_full_path ?: "(null)");
1333 for (int j = 0; j < i; j++)
1334 cgroup_tree_leaf_remove(ops->hierarchies[j], false);
1335
1336 idx++;
1337 break;
1338 }
1339 } while (ops->hierarchies[i] && idx > 0 && idx < 1000 && suffix);
1340
1341 if (idx == 1000 || (!suffix && idx != 0))
1342 return log_error_errno(false, ERANGE, "Failed to create monitor cgroup");
1343
1344 ops->monitor_cgroup = move_ptr(monitor_cgroup);
1345 return log_info(true, "The monitor process uses \"%s\" as cgroup", ops->monitor_cgroup);
1346 }
1347
1348 /*
1349 * Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1350 * next cgroup_pattern-1, -2, ..., -999.
1351 */
1352 __cgfsng_ops static bool cgfsng_payload_create(struct cgroup_ops *ops, struct lxc_handler *handler)
1353 {
1354 __do_free char *container_cgroup = NULL,
1355 *__cgroup_tree = NULL,
1356 *limiting_cgroup = NULL;
1357 const char *cgroup_tree;
1358 int idx = 0;
1359 int i;
1360 size_t len;
1361 char *suffix = NULL;
1362 struct lxc_conf *conf;
1363
1364 if (!ops)
1365 return ret_set_errno(false, ENOENT);
1366
1367 if (!ops->hierarchies)
1368 return true;
1369
1370 if (ops->container_cgroup)
1371 return ret_set_errno(false, EEXIST);
1372
1373 if (!handler || !handler->conf)
1374 return ret_set_errno(false, EINVAL);
1375
1376 conf = handler->conf;
1377
1378 if (!check_cgroup_dir_config(conf))
1379 return false;
1380
1381 if (conf->cgroup_meta.container_dir) {
1382 cgroup_tree = NULL;
1383
1384 limiting_cgroup = strdup(conf->cgroup_meta.container_dir);
1385 if (!limiting_cgroup)
1386 return ret_set_errno(false, ENOMEM);
1387
1388 if (conf->cgroup_meta.namespace_dir) {
1389 container_cgroup = must_make_path(limiting_cgroup,
1390 conf->cgroup_meta.namespace_dir,
1391 NULL);
1392 } else {
1393 /* explicit paths but without isolation */
1394 container_cgroup = move_ptr(limiting_cgroup);
1395 }
1396 } else if (conf->cgroup_meta.dir) {
1397 cgroup_tree = conf->cgroup_meta.dir;
1398 container_cgroup = must_concat(&len, cgroup_tree, "/",
1399 DEFAULT_PAYLOAD_CGROUP_PREFIX,
1400 handler->name,
1401 CGROUP_CREATE_RETRY, NULL);
1402 } else if (ops->cgroup_pattern) {
1403 __cgroup_tree = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1404 if (!__cgroup_tree)
1405 return ret_set_errno(false, ENOMEM);
1406
1407 cgroup_tree = __cgroup_tree;
1408 container_cgroup = must_concat(&len, cgroup_tree, "/",
1409 DEFAULT_PAYLOAD_CGROUP,
1410 CGROUP_CREATE_RETRY, NULL);
1411 } else {
1412 cgroup_tree = NULL;
1413 container_cgroup = must_concat(&len, DEFAULT_PAYLOAD_CGROUP_PREFIX,
1414 handler->name,
1415 CGROUP_CREATE_RETRY, NULL);
1416 }
1417 if (!container_cgroup)
1418 return ret_set_errno(false, ENOMEM);
1419
1420 if (!conf->cgroup_meta.container_dir) {
1421 suffix = container_cgroup + len - CGROUP_CREATE_RETRY_LEN;
1422 *suffix = '\0';
1423 }
1424 do {
1425 if (idx && suffix)
1426 sprintf(suffix, "-%d", idx);
1427
1428 for (i = 0; ops->hierarchies[i]; i++) {
1429 if (cgroup_tree_create(ops, handler->conf,
1430 ops->hierarchies[i], cgroup_tree,
1431 container_cgroup, true,
1432 limiting_cgroup))
1433 continue;
1434
1435 DEBUG("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path ?: "(null)");
1436 for (int j = 0; j < i; j++)
1437 cgroup_tree_leaf_remove(ops->hierarchies[j], true);
1438
1439 idx++;
1440 break;
1441 }
1442 } while (ops->hierarchies[i] && idx > 0 && idx < 1000 && suffix);
1443
1444 if (idx == 1000 || (!suffix && idx != 0))
1445 return log_error_errno(false, ERANGE, "Failed to create container cgroup");
1446
1447 ops->container_cgroup = move_ptr(container_cgroup);
1448 INFO("The container process uses \"%s\" as cgroup", ops->container_cgroup);
1449 return true;
1450 }
1451
1452 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops,
1453 struct lxc_handler *handler)
1454 {
1455 int monitor_len, transient_len = 0;
1456 char monitor[INTTYPE_TO_STRLEN(pid_t)],
1457 transient[INTTYPE_TO_STRLEN(pid_t)];
1458
1459 if (!ops)
1460 return ret_set_errno(false, ENOENT);
1461
1462 if (!ops->hierarchies)
1463 return true;
1464
1465 if (!ops->monitor_cgroup)
1466 return ret_set_errno(false, ENOENT);
1467
1468 if (!handler || !handler->conf)
1469 return ret_set_errno(false, EINVAL);
1470
1471 monitor_len = snprintf(monitor, sizeof(monitor), "%d", handler->monitor_pid);
1472 if (handler->transient_pid > 0)
1473 transient_len = snprintf(transient, sizeof(transient), "%d", handler->transient_pid);
1474
1475 for (int i = 0; ops->hierarchies[i]; i++) {
1476 struct hierarchy *h = ops->hierarchies[i];
1477 int ret;
1478
1479 ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", monitor, monitor_len);
1480 if (ret)
1481 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
1482
1483 if (handler->transient_pid <= 0)
1484 return true;
1485
1486 ret = lxc_writeat(h->cgfd_mon, "cgroup.procs", transient, transient_len);
1487 if (ret)
1488 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->monitor_full_path);
1489
1490 /*
1491 * we don't keep the fds for non-unified hierarchies around
1492 * mainly because we don't make use of them anymore after the
1493 * core cgroup setup is done but also because there are quite a
1494 * lot of them.
1495 */
1496 if (!is_unified_hierarchy(h))
1497 close_prot_errno_disarm(h->cgfd_mon);
1498 }
1499 handler->transient_pid = -1;
1500
1501 return true;
1502 }
1503
1504 __cgfsng_ops static bool cgfsng_payload_enter(struct cgroup_ops *ops,
1505 struct lxc_handler *handler)
1506 {
1507 int len;
1508 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1509
1510 if (!ops)
1511 return ret_set_errno(false, ENOENT);
1512
1513 if (!ops->hierarchies)
1514 return true;
1515
1516 if (!ops->container_cgroup)
1517 return ret_set_errno(false, ENOENT);
1518
1519 if (!handler || !handler->conf)
1520 return ret_set_errno(false, EINVAL);
1521
1522 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->pid);
1523
1524 for (int i = 0; ops->hierarchies[i]; i++) {
1525 struct hierarchy *h = ops->hierarchies[i];
1526 int ret;
1527
1528 if (is_unified_hierarchy(h) && handler->clone_flags & CLONE_INTO_CGROUP)
1529 continue;
1530
1531 ret = lxc_writeat(h->cgfd_con, "cgroup.procs", pidstr, len);
1532 if (ret != 0)
1533 return log_error_errno(false, errno, "Failed to enter cgroup \"%s\"", h->container_full_path);
1534 }
1535
1536 return true;
1537 }
1538
1539 static int fchowmodat(int dirfd, const char *path, uid_t chown_uid,
1540 gid_t chown_gid, mode_t chmod_mode)
1541 {
1542 int ret;
1543
1544 ret = fchownat(dirfd, path, chown_uid, chown_gid,
1545 AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);
1546 if (ret < 0)
1547 return log_warn_errno(-1,
1548 errno, "Failed to fchownat(%d, %s, %d, %d, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )",
1549 dirfd, path, (int)chown_uid,
1550 (int)chown_gid);
1551
1552 ret = fchmodat(dirfd, (*path != '\0') ? path : ".", chmod_mode, 0);
1553 if (ret < 0)
1554 return log_warn_errno(-1, errno, "Failed to fchmodat(%d, %s, %d, AT_SYMLINK_NOFOLLOW)",
1555 dirfd, path, (int)chmod_mode);
1556
1557 return 0;
1558 }
1559
1560 /* chgrp the container cgroups to container group. We leave
1561 * the container owner as cgroup owner. So we must make the
1562 * directories 775 so that the container can create sub-cgroups.
1563 *
1564 * Also chown the tasks and cgroup.procs files. Those may not
1565 * exist depending on kernel version.
1566 */
1567 static int chown_cgroup_wrapper(void *data)
1568 {
1569 int ret;
1570 uid_t destuid;
1571 struct generic_userns_exec_data *arg = data;
1572 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1573 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1574
1575 if (!lxc_setgroups(0, NULL) && errno != EPERM)
1576 return log_error_errno(-1, errno, "Failed to setgroups(0, NULL)");
1577
1578 ret = setresgid(nsgid, nsgid, nsgid);
1579 if (ret < 0)
1580 return log_error_errno(-1, errno, "Failed to setresgid(%d, %d, %d)",
1581 (int)nsgid, (int)nsgid, (int)nsgid);
1582
1583 ret = setresuid(nsuid, nsuid, nsuid);
1584 if (ret < 0)
1585 return log_error_errno(-1, errno, "Failed to setresuid(%d, %d, %d)",
1586 (int)nsuid, (int)nsuid, (int)nsuid);
1587
1588 destuid = get_ns_uid(arg->origuid);
1589 if (destuid == LXC_INVALID_UID)
1590 destuid = 0;
1591
1592 for (int i = 0; arg->hierarchies[i]; i++) {
1593 int dirfd = arg->hierarchies[i]->cgfd_con;
1594
1595 (void)fchowmodat(dirfd, "", destuid, nsgid, 0775);
1596
1597 /*
1598 * Failures to chown() these are inconvenient but not
1599 * detrimental We leave these owned by the container launcher,
1600 * so that container root can write to the files to attach. We
1601 * chmod() them 664 so that container systemd can write to the
1602 * files (which systemd in wily insists on doing).
1603 */
1604
1605 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC)
1606 (void)fchowmodat(dirfd, "tasks", destuid, nsgid, 0664);
1607
1608 (void)fchowmodat(dirfd, "cgroup.procs", destuid, nsgid, 0664);
1609
1610 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1611 continue;
1612
1613 for (char **p = arg->hierarchies[i]->cgroup2_chown; p && *p; p++)
1614 (void)fchowmodat(dirfd, *p, destuid, nsgid, 0664);
1615 }
1616
1617 return 0;
1618 }
1619
1620 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1621 struct lxc_conf *conf)
1622 {
1623 struct generic_userns_exec_data wrap;
1624
1625 if (!ops)
1626 return ret_set_errno(false, ENOENT);
1627
1628 if (!ops->hierarchies)
1629 return true;
1630
1631 if (!ops->container_cgroup)
1632 return ret_set_errno(false, ENOENT);
1633
1634 if (!conf)
1635 return ret_set_errno(false, EINVAL);
1636
1637 if (lxc_list_empty(&conf->id_map))
1638 return true;
1639
1640 wrap.origuid = geteuid();
1641 wrap.path = NULL;
1642 wrap.hierarchies = ops->hierarchies;
1643 wrap.conf = conf;
1644
1645 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap, "chown_cgroup_wrapper") < 0)
1646 return log_error_errno(false, errno, "Error requesting cgroup chown in new user namespace");
1647
1648 return true;
1649 }
1650
1651 __cgfsng_ops static void cgfsng_payload_finalize(struct cgroup_ops *ops)
1652 {
1653 if (!ops)
1654 return;
1655
1656 if (!ops->hierarchies)
1657 return;
1658
1659 for (int i = 0; ops->hierarchies[i]; i++) {
1660 struct hierarchy *h = ops->hierarchies[i];
1661 /*
1662 * we don't keep the fds for non-unified hierarchies around
1663 * mainly because we don't make use of them anymore after the
1664 * core cgroup setup is done but also because there are quite a
1665 * lot of them.
1666 */
1667 if (!is_unified_hierarchy(h))
1668 close_prot_errno_disarm(h->cgfd_con);
1669 }
1670
1671 /*
1672 * The checking for freezer support should obviously be done at cgroup
1673 * initialization time but that doesn't work reliable. The freezer
1674 * controller has been demoted (rightly so) to a simple file located in
1675 * each non-root cgroup. At the time when the container is created we
1676 * might still be located in /sys/fs/cgroup and so checking for
1677 * cgroup.freeze won't tell us anything because this file doesn't exist
1678 * in the root cgroup. We could then iterate through /sys/fs/cgroup and
1679 * find an already existing cgroup and then check within that cgroup
1680 * for the existence of cgroup.freeze but that will only work on
1681 * systemd based hosts. Other init systems might not manage cgroups and
1682 * so no cgroup will exist. So we defer until we have created cgroups
1683 * for our container which means we check here.
1684 */
1685 if (pure_unified_layout(ops) &&
1686 !faccessat(ops->unified->cgfd_con, "cgroup.freeze", F_OK,
1687 AT_SYMLINK_NOFOLLOW)) {
1688 TRACE("Unified hierarchy supports freezer");
1689 ops->unified->freezer_controller = 1;
1690 }
1691 }
1692
1693 /* cgroup-full:* is done, no need to create subdirs */
1694 static inline bool cg_mount_needs_subdirs(int type)
1695 {
1696 return !(type >= LXC_AUTO_CGROUP_FULL_RO);
1697 }
1698
1699 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1700 * remount controller ro if needed and bindmount the cgroupfs onto
1701 * control/the/cg/path.
1702 */
1703 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1704 char *controllerpath, char *cgpath,
1705 const char *container_cgroup)
1706 {
1707 __do_free char *sourcepath = NULL;
1708 int ret, remount_flags;
1709 int flags = MS_BIND;
1710
1711 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1712 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1713 if (ret < 0)
1714 return log_error_errno(-1, errno, "Failed to bind mount \"%s\" onto \"%s\"",
1715 controllerpath, controllerpath);
1716
1717 remount_flags = add_required_remount_flags(controllerpath,
1718 controllerpath,
1719 flags | MS_REMOUNT);
1720 ret = mount(controllerpath, controllerpath, "cgroup",
1721 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1722 NULL);
1723 if (ret < 0)
1724 return log_error_errno(-1, errno, "Failed to remount \"%s\" ro", controllerpath);
1725
1726 INFO("Remounted %s read-only", controllerpath);
1727 }
1728
1729 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1730 container_cgroup, NULL);
1731 if (type == LXC_AUTO_CGROUP_RO)
1732 flags |= MS_RDONLY;
1733
1734 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1735 if (ret < 0)
1736 return log_error_errno(-1, errno, "Failed to mount \"%s\" onto \"%s\"",
1737 h->controllers[0], cgpath);
1738 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1739
1740 if (flags & MS_RDONLY) {
1741 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1742 flags | MS_REMOUNT);
1743 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1744 if (ret < 0)
1745 return log_error_errno(-1, errno, "Failed to remount \"%s\" ro", cgpath);
1746 INFO("Remounted %s read-only", cgpath);
1747 }
1748
1749 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1750 return 0;
1751 }
1752
1753 /* __cg_mount_direct
1754 *
1755 * Mount cgroup hierarchies directly without using bind-mounts. The main
1756 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1757 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1758 */
1759 static int __cg_mount_direct(int type, struct hierarchy *h,
1760 const char *controllerpath)
1761 {
1762 __do_free char *controllers = NULL;
1763 char *fstype = "cgroup2";
1764 unsigned long flags = 0;
1765 int ret;
1766
1767 flags |= MS_NOSUID;
1768 flags |= MS_NOEXEC;
1769 flags |= MS_NODEV;
1770 flags |= MS_RELATIME;
1771
1772 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1773 flags |= MS_RDONLY;
1774
1775 if (h->version != CGROUP2_SUPER_MAGIC) {
1776 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1777 if (!controllers)
1778 return -ENOMEM;
1779 fstype = "cgroup";
1780 }
1781
1782 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1783 if (ret < 0)
1784 return log_error_errno(-1, errno, "Failed to mount \"%s\" with cgroup filesystem type %s",
1785 controllerpath, fstype);
1786
1787 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1788 return 0;
1789 }
1790
1791 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1792 const char *controllerpath)
1793 {
1794 return __cg_mount_direct(type, h, controllerpath);
1795 }
1796
1797 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1798 const char *controllerpath)
1799 {
1800 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1801 return 0;
1802
1803 return __cg_mount_direct(type, h, controllerpath);
1804 }
1805
1806 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1807 struct lxc_handler *handler,
1808 const char *root, int type)
1809 {
1810 __do_free char *cgroup_root = NULL;
1811 bool has_cgns = false, wants_force_mount = false;
1812 int ret;
1813
1814 if (!ops)
1815 return ret_set_errno(false, ENOENT);
1816
1817 if (!ops->hierarchies)
1818 return true;
1819
1820 if (!handler || !handler->conf)
1821 return ret_set_errno(false, EINVAL);
1822
1823 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1824 return true;
1825
1826 if (type & LXC_AUTO_CGROUP_FORCE) {
1827 type &= ~LXC_AUTO_CGROUP_FORCE;
1828 wants_force_mount = true;
1829 }
1830
1831 if (!wants_force_mount) {
1832 wants_force_mount = !lxc_wants_cap(CAP_SYS_ADMIN, handler->conf);
1833
1834 /*
1835 * Most recent distro versions currently have init system that
1836 * do support cgroup2 but do not mount it by default unless
1837 * explicitly told so even if the host is cgroup2 only. That
1838 * means they often will fail to boot. Fix this by pre-mounting
1839 * cgroup2 by default. We will likely need to be doing this a
1840 * few years until all distros have switched over to cgroup2 at
1841 * which point we can safely assume that their init systems
1842 * will mount it themselves.
1843 */
1844 if (pure_unified_layout(ops))
1845 wants_force_mount = true;
1846 }
1847
1848 has_cgns = cgns_supported();
1849 if (has_cgns && !wants_force_mount)
1850 return true;
1851
1852 if (type == LXC_AUTO_CGROUP_NOSPEC)
1853 type = LXC_AUTO_CGROUP_MIXED;
1854 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1855 type = LXC_AUTO_CGROUP_FULL_MIXED;
1856
1857 cgroup_root = must_make_path(root, DEFAULT_CGROUP_MOUNTPOINT, NULL);
1858 if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
1859 if (has_cgns && wants_force_mount) {
1860 /*
1861 * If cgroup namespaces are supported but the container
1862 * will not have CAP_SYS_ADMIN after it has started we
1863 * need to mount the cgroups manually.
1864 */
1865 return cg_mount_in_cgroup_namespace(type, ops->unified, cgroup_root) == 0;
1866 }
1867
1868 return cg_mount_cgroup_full(type, ops->unified, cgroup_root) == 0;
1869 }
1870
1871 /*
1872 * Mount a tmpfs over DEFAULT_CGROUP_MOUNTPOINT. Note that we're
1873 * relying on RESOLVE_BENEATH so we need to skip the leading "/" in the
1874 * DEFAULT_CGROUP_MOUNTPOINT define.
1875 */
1876 ret = safe_mount_beneath(root, NULL,
1877 DEFAULT_CGROUP_MOUNTPOINT_RELATIVE,
1878 "tmpfs",
1879 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1880 "size=10240k,mode=755");
1881 if (ret < 0) {
1882 if (errno != ENOSYS)
1883 return log_error_errno(false, errno,
1884 "Failed to mount tmpfs on %s",
1885 DEFAULT_CGROUP_MOUNTPOINT);
1886
1887 ret = safe_mount(NULL, cgroup_root, "tmpfs",
1888 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1889 "size=10240k,mode=755", root);
1890 }
1891 if (ret < 0)
1892 return false;
1893
1894 for (int i = 0; ops->hierarchies[i]; i++) {
1895 __do_free char *controllerpath = NULL, *path2 = NULL;
1896 struct hierarchy *h = ops->hierarchies[i];
1897 char *controller = strrchr(h->mountpoint, '/');
1898
1899 if (!controller)
1900 continue;
1901 controller++;
1902
1903 controllerpath = must_make_path(cgroup_root, controller, NULL);
1904 if (dir_exists(controllerpath))
1905 continue;
1906
1907 ret = mkdir(controllerpath, 0755);
1908 if (ret < 0)
1909 return log_error_errno(false, errno, "Error creating cgroup path: %s", controllerpath);
1910
1911 if (has_cgns && wants_force_mount) {
1912 /* If cgroup namespaces are supported but the container
1913 * will not have CAP_SYS_ADMIN after it has started we
1914 * need to mount the cgroups manually.
1915 */
1916 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1917 if (ret < 0)
1918 return false;
1919
1920 continue;
1921 }
1922
1923 ret = cg_mount_cgroup_full(type, h, controllerpath);
1924 if (ret < 0)
1925 return false;
1926
1927 if (!cg_mount_needs_subdirs(type))
1928 continue;
1929
1930 path2 = must_make_path(controllerpath, h->container_base_path,
1931 ops->container_cgroup, NULL);
1932 ret = mkdir_p(path2, 0755);
1933 if (ret < 0)
1934 return false;
1935
1936 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1937 path2, ops->container_cgroup);
1938 if (ret < 0)
1939 return false;
1940 }
1941
1942 return true;
1943 }
1944
1945 /* Only root needs to escape to the cgroup of its init. */
1946 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1947 struct lxc_conf *conf)
1948 {
1949 if (!ops)
1950 return ret_set_errno(false, ENOENT);
1951
1952 if (!ops->hierarchies)
1953 return true;
1954
1955 if (!conf)
1956 return ret_set_errno(false, EINVAL);
1957
1958 if (conf->cgroup_meta.relative || geteuid())
1959 return true;
1960
1961 for (int i = 0; ops->hierarchies[i]; i++) {
1962 __do_free char *fullpath = NULL;
1963 int ret;
1964
1965 fullpath =
1966 must_make_path(ops->hierarchies[i]->mountpoint,
1967 ops->hierarchies[i]->container_base_path,
1968 "cgroup.procs", NULL);
1969 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1970 if (ret != 0)
1971 return log_error_errno(false, errno, "Failed to escape to cgroup \"%s\"", fullpath);
1972 }
1973
1974 return true;
1975 }
1976
1977 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1978 {
1979 int i = 0;
1980
1981 if (!ops)
1982 return ret_set_errno(-1, ENOENT);
1983
1984 if (!ops->hierarchies)
1985 return 0;
1986
1987 for (; ops->hierarchies[i]; i++)
1988 ;
1989
1990 return i;
1991 }
1992
1993 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n,
1994 char ***out)
1995 {
1996 int i;
1997
1998 if (!ops)
1999 return ret_set_errno(false, ENOENT);
2000
2001 if (!ops->hierarchies)
2002 return ret_set_errno(false, ENOENT);
2003
2004 /* sanity check n */
2005 for (i = 0; i < n; i++)
2006 if (!ops->hierarchies[i])
2007 return ret_set_errno(false, ENOENT);
2008
2009 *out = ops->hierarchies[i]->controllers;
2010
2011 return true;
2012 }
2013
2014 static bool cg_legacy_freeze(struct cgroup_ops *ops)
2015 {
2016 struct hierarchy *h;
2017
2018 h = get_hierarchy(ops, "freezer");
2019 if (!h)
2020 return ret_set_errno(-1, ENOENT);
2021
2022 return lxc_write_openat(h->container_full_path, "freezer.state",
2023 "FROZEN", STRLITERALLEN("FROZEN"));
2024 }
2025
2026 static int freezer_cgroup_events_cb(int fd, uint32_t events, void *cbdata,
2027 struct lxc_epoll_descr *descr)
2028 {
2029 __do_close int duped_fd = -EBADF;
2030 __do_free char *line = NULL;
2031 __do_fclose FILE *f = NULL;
2032 int state = PTR_TO_INT(cbdata);
2033 size_t len;
2034 const char *state_string;
2035
2036 duped_fd = dup(fd);
2037 if (duped_fd < 0)
2038 return LXC_MAINLOOP_ERROR;
2039
2040 if (lseek(duped_fd, 0, SEEK_SET) < (off_t)-1)
2041 return LXC_MAINLOOP_ERROR;
2042
2043 f = fdopen(duped_fd, "re");
2044 if (!f)
2045 return LXC_MAINLOOP_ERROR;
2046 move_fd(duped_fd);
2047
2048 if (state == 1)
2049 state_string = "frozen 1";
2050 else
2051 state_string = "frozen 0";
2052
2053 while (getline(&line, &len, f) != -1)
2054 if (strncmp(line, state_string, STRLITERALLEN("frozen") + 2) == 0)
2055 return LXC_MAINLOOP_CLOSE;
2056
2057 return LXC_MAINLOOP_CONTINUE;
2058 }
2059
2060 static int cg_unified_freeze_do(struct cgroup_ops *ops, int timeout,
2061 const char *state_string,
2062 int state_num,
2063 const char *epoll_error,
2064 const char *wait_error)
2065 {
2066 __do_close int fd = -EBADF;
2067 call_cleaner(lxc_mainloop_close) struct lxc_epoll_descr *descr_ptr = NULL;
2068 int ret;
2069 struct lxc_epoll_descr descr;
2070 struct hierarchy *h;
2071
2072 h = ops->unified;
2073 if (!h)
2074 return ret_set_errno(-1, ENOENT);
2075
2076 if (!h->container_full_path)
2077 return ret_set_errno(-1, EEXIST);
2078
2079 if (timeout != 0) {
2080 __do_free char *events_file = NULL;
2081
2082 events_file = must_make_path(h->container_full_path, "cgroup.events", NULL);
2083 fd = open(events_file, O_RDONLY | O_CLOEXEC);
2084 if (fd < 0)
2085 return log_error_errno(-1, errno, "Failed to open cgroup.events file");
2086
2087 ret = lxc_mainloop_open(&descr);
2088 if (ret)
2089 return log_error_errno(-1, errno, "%s", epoll_error);
2090
2091 /* automatically cleaned up now */
2092 descr_ptr = &descr;
2093
2094 ret = lxc_mainloop_add_handler_events(&descr, fd, EPOLLPRI, freezer_cgroup_events_cb, INT_TO_PTR(state_num));
2095 if (ret < 0)
2096 return log_error_errno(-1, errno, "Failed to add cgroup.events fd handler to mainloop");
2097 }
2098
2099 ret = lxc_write_openat(h->container_full_path, "cgroup.freeze", state_string, 1);
2100 if (ret < 0)
2101 return log_error_errno(-1, errno, "Failed to open cgroup.freeze file");
2102
2103 if (timeout != 0 && lxc_mainloop(&descr, timeout))
2104 return log_error_errno(-1, errno, "%s", wait_error);
2105
2106 return 0;
2107 }
2108
2109 static int cg_unified_freeze(struct cgroup_ops *ops, int timeout)
2110 {
2111 return cg_unified_freeze_do(ops, timeout, "1", 1,
2112 "Failed to create epoll instance to wait for container freeze",
2113 "Failed to wait for container to be frozen");
2114 }
2115
2116 __cgfsng_ops static int cgfsng_freeze(struct cgroup_ops *ops, int timeout)
2117 {
2118 if (!ops->hierarchies)
2119 return ret_set_errno(-1, ENOENT);
2120
2121 if (ops->cgroup_layout != CGROUP_LAYOUT_UNIFIED)
2122 return cg_legacy_freeze(ops);
2123
2124 return cg_unified_freeze(ops, timeout);
2125 }
2126
2127 static int cg_legacy_unfreeze(struct cgroup_ops *ops)
2128 {
2129 struct hierarchy *h;
2130
2131 h = get_hierarchy(ops, "freezer");
2132 if (!h)
2133 return ret_set_errno(-1, ENOENT);
2134
2135 return lxc_write_openat(h->container_full_path, "freezer.state",
2136 "THAWED", STRLITERALLEN("THAWED"));
2137 }
2138
2139 static int cg_unified_unfreeze(struct cgroup_ops *ops, int timeout)
2140 {
2141 return cg_unified_freeze_do(ops, timeout, "0", 0,
2142 "Failed to create epoll instance to wait for container unfreeze",
2143 "Failed to wait for container to be unfrozen");
2144 }
2145
2146 __cgfsng_ops static int cgfsng_unfreeze(struct cgroup_ops *ops, int timeout)
2147 {
2148 if (!ops->hierarchies)
2149 return ret_set_errno(-1, ENOENT);
2150
2151 if (ops->cgroup_layout != CGROUP_LAYOUT_UNIFIED)
2152 return cg_legacy_unfreeze(ops);
2153
2154 return cg_unified_unfreeze(ops, timeout);
2155 }
2156
2157 static const char *cgfsng_get_cgroup_do(struct cgroup_ops *ops,
2158 const char *controller, bool limiting)
2159 {
2160 struct hierarchy *h;
2161
2162 h = get_hierarchy(ops, controller);
2163 if (!h)
2164 return log_warn_errno(NULL, ENOENT, "Failed to find hierarchy for controller \"%s\"",
2165 controller ? controller : "(null)");
2166
2167 if (limiting)
2168 return h->container_limit_path
2169 ? h->container_limit_path + strlen(h->mountpoint)
2170 : NULL;
2171
2172 return h->container_full_path
2173 ? h->container_full_path + strlen(h->mountpoint)
2174 : NULL;
2175 }
2176
2177 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
2178 const char *controller)
2179 {
2180 return cgfsng_get_cgroup_do(ops, controller, false);
2181 }
2182
2183 __cgfsng_ops static const char *cgfsng_get_limiting_cgroup(struct cgroup_ops *ops,
2184 const char *controller)
2185 {
2186 return cgfsng_get_cgroup_do(ops, controller, true);
2187 }
2188
2189 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2190 * which must be freed by the caller.
2191 */
2192 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
2193 const char *inpath,
2194 const char *filename)
2195 {
2196 return must_make_path(h->mountpoint, inpath, filename, NULL);
2197 }
2198
2199 static int cgroup_attach_leaf(const struct lxc_conf *conf, int unified_fd, pid_t pid)
2200 {
2201 int idx = 1;
2202 int ret;
2203 char pidstr[INTTYPE_TO_STRLEN(int64_t) + 1];
2204 size_t pidstr_len;
2205
2206 /* Create leaf cgroup. */
2207 ret = mkdirat(unified_fd, ".lxc", 0755);
2208 if (ret < 0 && errno != EEXIST)
2209 return log_error_errno(-1, errno, "Failed to create leaf cgroup \".lxc\"");
2210
2211 pidstr_len = sprintf(pidstr, INT64_FMT, (int64_t)pid);
2212 ret = lxc_writeat(unified_fd, ".lxc/cgroup.procs", pidstr, pidstr_len);
2213 if (ret < 0)
2214 ret = lxc_writeat(unified_fd, "cgroup.procs", pidstr, pidstr_len);
2215 if (ret == 0)
2216 return 0;
2217
2218 /* this is a non-leaf node */
2219 if (errno != EBUSY)
2220 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2221
2222 do {
2223 bool rm = false;
2224 char attach_cgroup[STRLITERALLEN(".lxc-/cgroup.procs") + INTTYPE_TO_STRLEN(int) + 1];
2225 char *slash = attach_cgroup;
2226
2227 ret = snprintf(attach_cgroup, sizeof(attach_cgroup), ".lxc-%d/cgroup.procs", idx);
2228 if (ret < 0 || (size_t)ret >= sizeof(attach_cgroup))
2229 return ret_errno(EIO);
2230
2231 /*
2232 * This shouldn't really happen but the compiler might complain
2233 * that a short write would cause a buffer overrun. So be on
2234 * the safe side.
2235 */
2236 if (ret < STRLITERALLEN(".lxc-/cgroup.procs"))
2237 return log_error_errno(-EINVAL, EINVAL, "Unexpected short write would cause buffer-overrun");
2238
2239 slash += (ret - STRLITERALLEN("/cgroup.procs"));
2240 *slash = '\0';
2241
2242 ret = mkdirat(unified_fd, attach_cgroup, 0755);
2243 if (ret < 0 && errno != EEXIST)
2244 return log_error_errno(-1, errno, "Failed to create cgroup %s", attach_cgroup);
2245 if (ret == 0)
2246 rm = true;
2247
2248 *slash = '/';
2249
2250 ret = lxc_writeat(unified_fd, attach_cgroup, pidstr, pidstr_len);
2251 if (ret == 0)
2252 return 0;
2253
2254 if (rm && unlinkat(unified_fd, attach_cgroup, AT_REMOVEDIR))
2255 SYSERROR("Failed to remove cgroup \"%d(%s)\"", unified_fd, attach_cgroup);
2256
2257 /* this is a non-leaf node */
2258 if (errno != EBUSY)
2259 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2260
2261 idx++;
2262 } while (idx < 1000);
2263
2264 return log_error_errno(-1, errno, "Failed to attach to unified cgroup");
2265 }
2266
2267 static int cgroup_attach_create_leaf(const struct lxc_conf *conf,
2268 int unified_fd, int *sk_fd)
2269 {
2270 __do_close int sk = *sk_fd, target_fd0 = -EBADF, target_fd1 = -EBADF;
2271 int target_fds[2];
2272 ssize_t ret;
2273
2274 /* Create leaf cgroup. */
2275 ret = mkdirat(unified_fd, ".lxc", 0755);
2276 if (ret < 0 && errno != EEXIST)
2277 return log_error_errno(-1, errno, "Failed to create leaf cgroup \".lxc\"");
2278
2279 target_fd0 = openat(unified_fd, ".lxc/cgroup.procs", O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
2280 if (target_fd0 < 0)
2281 return log_error_errno(-errno, errno, "Failed to open \".lxc/cgroup.procs\"");
2282 target_fds[0] = target_fd0;
2283
2284 target_fd1 = openat(unified_fd, "cgroup.procs", O_WRONLY | O_CLOEXEC | O_NOFOLLOW);
2285 if (target_fd1 < 0)
2286 return log_error_errno(-errno, errno, "Failed to open \".lxc/cgroup.procs\"");
2287 target_fds[1] = target_fd1;
2288
2289 ret = lxc_abstract_unix_send_fds(sk, target_fds, 2, NULL, 0);
2290 if (ret <= 0)
2291 return log_error_errno(-errno, errno, "Failed to send \".lxc/cgroup.procs\" fds %d and %d",
2292 target_fd0, target_fd1);
2293
2294 return log_debug(0, "Sent target cgroup fds %d and %d", target_fd0, target_fd1);
2295 }
2296
2297 static int cgroup_attach_move_into_leaf(const struct lxc_conf *conf,
2298 int *sk_fd, pid_t pid)
2299 {
2300 __do_close int sk = *sk_fd, target_fd0 = -EBADF, target_fd1 = -EBADF;
2301 int target_fds[2];
2302 char pidstr[INTTYPE_TO_STRLEN(int64_t) + 1];
2303 size_t pidstr_len;
2304 ssize_t ret;
2305
2306 ret = lxc_abstract_unix_recv_fds(sk, target_fds, 2, NULL, 0);
2307 if (ret <= 0)
2308 return log_error_errno(-1, errno, "Failed to receive target cgroup fd");
2309 target_fd0 = target_fds[0];
2310 target_fd1 = target_fds[1];
2311
2312 pidstr_len = sprintf(pidstr, INT64_FMT, (int64_t)pid);
2313
2314 ret = lxc_write_nointr(target_fd0, pidstr, pidstr_len);
2315 if (ret > 0 && ret == pidstr_len)
2316 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd0);
2317
2318 ret = lxc_write_nointr(target_fd1, pidstr, pidstr_len);
2319 if (ret > 0 && ret == pidstr_len)
2320 return log_debug(0, "Moved process into target cgroup via fd %d", target_fd1);
2321
2322 return log_debug_errno(-1, errno, "Failed to move process into target cgroup via fd %d and %d",
2323 target_fd0, target_fd1);
2324 }
2325
2326 struct userns_exec_unified_attach_data {
2327 const struct lxc_conf *conf;
2328 int unified_fd;
2329 int sk_pair[2];
2330 pid_t pid;
2331 };
2332
2333 static int cgroup_unified_attach_child_wrapper(void *data)
2334 {
2335 struct userns_exec_unified_attach_data *args = data;
2336
2337 if (!args->conf || args->unified_fd < 0 || args->pid <= 0 ||
2338 args->sk_pair[0] < 0 || args->sk_pair[1] < 0)
2339 return ret_errno(EINVAL);
2340
2341 close_prot_errno_disarm(args->sk_pair[0]);
2342 return cgroup_attach_create_leaf(args->conf, args->unified_fd,
2343 &args->sk_pair[1]);
2344 }
2345
2346 static int cgroup_unified_attach_parent_wrapper(void *data)
2347 {
2348 struct userns_exec_unified_attach_data *args = data;
2349
2350 if (!args->conf || args->unified_fd < 0 || args->pid <= 0 ||
2351 args->sk_pair[0] < 0 || args->sk_pair[1] < 0)
2352 return ret_errno(EINVAL);
2353
2354 close_prot_errno_disarm(args->sk_pair[1]);
2355 return cgroup_attach_move_into_leaf(args->conf, &args->sk_pair[0],
2356 args->pid);
2357 }
2358
2359 int cgroup_attach(const struct lxc_conf *conf, const char *name,
2360 const char *lxcpath, pid_t pid)
2361 {
2362 __do_close int unified_fd = -EBADF;
2363 int ret;
2364
2365 if (!conf || !name || !lxcpath || pid <= 0)
2366 return ret_errno(EINVAL);
2367
2368 unified_fd = lxc_cmd_get_cgroup2_fd(name, lxcpath);
2369 if (unified_fd < 0)
2370 return ret_errno(EBADF);
2371
2372 if (!lxc_list_empty(&conf->id_map)) {
2373 struct userns_exec_unified_attach_data args = {
2374 .conf = conf,
2375 .unified_fd = unified_fd,
2376 .pid = pid,
2377 };
2378
2379 ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, args.sk_pair);
2380 if (ret < 0)
2381 return -errno;
2382
2383 ret = userns_exec_minimal(conf,
2384 cgroup_unified_attach_parent_wrapper,
2385 &args,
2386 cgroup_unified_attach_child_wrapper,
2387 &args);
2388 } else {
2389 ret = cgroup_attach_leaf(conf, unified_fd, pid);
2390 }
2391
2392 return ret;
2393 }
2394
2395 /* Technically, we're always at a delegation boundary here (This is especially
2396 * true when cgroup namespaces are available.). The reasoning is that in order
2397 * for us to have been able to start a container in the first place the root
2398 * cgroup must have been a leaf node. Now, either the container's init system
2399 * has populated the cgroup and kept it as a leaf node or it has created
2400 * subtrees. In the former case we will simply attach to the leaf node we
2401 * created when we started the container in the latter case we create our own
2402 * cgroup for the attaching process.
2403 */
2404 static int __cg_unified_attach(const struct hierarchy *h,
2405 const struct lxc_conf *conf, const char *name,
2406 const char *lxcpath, pid_t pid,
2407 const char *controller)
2408 {
2409 __do_close int unified_fd = -EBADF;
2410 __do_free char *path = NULL, *cgroup = NULL;
2411 int ret;
2412
2413 if (!conf || !name || !lxcpath || pid <= 0)
2414 return ret_errno(EINVAL);
2415
2416 ret = cgroup_attach(conf, name, lxcpath, pid);
2417 if (ret == 0)
2418 return log_trace(0, "Attached to unified cgroup via command handler");
2419 if (ret != -EBADF)
2420 return log_error_errno(ret, errno, "Failed to attach to unified cgroup");
2421
2422 /* Fall back to retrieving the path for the unified cgroup. */
2423 cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2424 /* not running */
2425 if (!cgroup)
2426 return 0;
2427
2428 path = must_make_path(h->mountpoint, cgroup, NULL);
2429
2430 unified_fd = open(path, O_PATH | O_DIRECTORY | O_CLOEXEC);
2431 if (unified_fd < 0)
2432 return ret_errno(EBADF);
2433
2434 if (!lxc_list_empty(&conf->id_map)) {
2435 struct userns_exec_unified_attach_data args = {
2436 .conf = conf,
2437 .unified_fd = unified_fd,
2438 .pid = pid,
2439 };
2440
2441 ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, args.sk_pair);
2442 if (ret < 0)
2443 return -errno;
2444
2445 ret = userns_exec_minimal(conf,
2446 cgroup_unified_attach_parent_wrapper,
2447 &args,
2448 cgroup_unified_attach_child_wrapper,
2449 &args);
2450 } else {
2451 ret = cgroup_attach_leaf(conf, unified_fd, pid);
2452 }
2453
2454 return ret;
2455 }
2456
2457 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops,
2458 const struct lxc_conf *conf,
2459 const char *name, const char *lxcpath,
2460 pid_t pid)
2461 {
2462 int len, ret;
2463 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2464
2465 if (!ops)
2466 return ret_set_errno(false, ENOENT);
2467
2468 if (!ops->hierarchies)
2469 return true;
2470
2471 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2472 if (len < 0 || (size_t)len >= sizeof(pidstr))
2473 return false;
2474
2475 for (int i = 0; ops->hierarchies[i]; i++) {
2476 __do_free char *fullpath = NULL, *path = NULL;
2477 struct hierarchy *h = ops->hierarchies[i];
2478
2479 if (h->version == CGROUP2_SUPER_MAGIC) {
2480 ret = __cg_unified_attach(h, conf, name, lxcpath, pid,
2481 h->controllers[0]);
2482 if (ret < 0)
2483 return false;
2484
2485 continue;
2486 }
2487
2488 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2489 /* not running */
2490 if (!path)
2491 return false;
2492
2493 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2494 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2495 if (ret < 0)
2496 return log_error_errno(false, errno, "Failed to attach %d to %s",
2497 (int)pid, fullpath);
2498 }
2499
2500 return true;
2501 }
2502
2503 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2504 * don't have a cgroup_data set up, so we ask the running container through the
2505 * commands API for the cgroup path.
2506 */
2507 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2508 char *value, size_t len, const char *name,
2509 const char *lxcpath)
2510 {
2511 __do_free char *path = NULL;
2512 __do_free char *controller = NULL;
2513 char *p;
2514 struct hierarchy *h;
2515 int ret = -1;
2516
2517 if (!ops)
2518 return ret_set_errno(-1, ENOENT);
2519
2520 controller = must_copy_string(filename);
2521 p = strchr(controller, '.');
2522 if (p)
2523 *p = '\0';
2524
2525 path = lxc_cmd_get_limiting_cgroup_path(name, lxcpath, controller);
2526 /* not running */
2527 if (!path)
2528 return -1;
2529
2530 h = get_hierarchy(ops, controller);
2531 if (h) {
2532 __do_free char *fullpath = NULL;
2533
2534 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2535 ret = lxc_read_from_file(fullpath, value, len);
2536 }
2537
2538 return ret;
2539 }
2540
2541 static int device_cgroup_parse_access(struct device_item *device, const char *val)
2542 {
2543 for (int count = 0; count < 3; count++, val++) {
2544 switch (*val) {
2545 case 'r':
2546 device->access[count] = *val;
2547 break;
2548 case 'w':
2549 device->access[count] = *val;
2550 break;
2551 case 'm':
2552 device->access[count] = *val;
2553 break;
2554 case '\n':
2555 case '\0':
2556 count = 3;
2557 break;
2558 default:
2559 return ret_errno(EINVAL);
2560 }
2561 }
2562
2563 return 0;
2564 }
2565
2566 static int device_cgroup_rule_parse(struct device_item *device, const char *key,
2567 const char *val)
2568 {
2569 int count, ret;
2570 char temp[50];
2571
2572 if (strcmp("devices.allow", key) == 0)
2573 device->allow = 1;
2574 else
2575 device->allow = 0;
2576
2577 if (strcmp(val, "a") == 0) {
2578 /* global rule */
2579 device->type = 'a';
2580 device->major = -1;
2581 device->minor = -1;
2582 device->global_rule = device->allow
2583 ? LXC_BPF_DEVICE_CGROUP_DENYLIST
2584 : LXC_BPF_DEVICE_CGROUP_ALLOWLIST;
2585 device->allow = -1;
2586 return 0;
2587 }
2588
2589 /* local rule */
2590 device->global_rule = LXC_BPF_DEVICE_CGROUP_LOCAL_RULE;
2591
2592 switch (*val) {
2593 case 'a':
2594 __fallthrough;
2595 case 'b':
2596 __fallthrough;
2597 case 'c':
2598 device->type = *val;
2599 break;
2600 default:
2601 return -1;
2602 }
2603
2604 val++;
2605 if (!isspace(*val))
2606 return -1;
2607 val++;
2608 if (*val == '*') {
2609 device->major = -1;
2610 val++;
2611 } else if (isdigit(*val)) {
2612 memset(temp, 0, sizeof(temp));
2613 for (count = 0; count < sizeof(temp) - 1; count++) {
2614 temp[count] = *val;
2615 val++;
2616 if (!isdigit(*val))
2617 break;
2618 }
2619 ret = lxc_safe_int(temp, &device->major);
2620 if (ret)
2621 return -1;
2622 } else {
2623 return -1;
2624 }
2625 if (*val != ':')
2626 return -1;
2627 val++;
2628
2629 /* read minor */
2630 if (*val == '*') {
2631 device->minor = -1;
2632 val++;
2633 } else if (isdigit(*val)) {
2634 memset(temp, 0, sizeof(temp));
2635 for (count = 0; count < sizeof(temp) - 1; count++) {
2636 temp[count] = *val;
2637 val++;
2638 if (!isdigit(*val))
2639 break;
2640 }
2641 ret = lxc_safe_int(temp, &device->minor);
2642 if (ret)
2643 return -1;
2644 } else {
2645 return -1;
2646 }
2647 if (!isspace(*val))
2648 return -1;
2649
2650 return device_cgroup_parse_access(device, ++val);
2651 }
2652
2653 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2654 * don't have a cgroup_data set up, so we ask the running container through the
2655 * commands API for the cgroup path.
2656 */
2657 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2658 const char *key, const char *value,
2659 const char *name, const char *lxcpath)
2660 {
2661 __do_free char *path = NULL;
2662 __do_free char *controller = NULL;
2663 char *p;
2664 struct hierarchy *h;
2665 int ret = -1;
2666
2667 if (!ops || !key || !value || !name || !lxcpath)
2668 return ret_errno(ENOENT);
2669
2670 controller = must_copy_string(key);
2671 p = strchr(controller, '.');
2672 if (p)
2673 *p = '\0';
2674
2675 if (pure_unified_layout(ops) && strcmp(controller, "devices") == 0) {
2676 struct device_item device = {};
2677
2678 ret = device_cgroup_rule_parse(&device, key, value);
2679 if (ret < 0)
2680 return log_error_errno(-1, EINVAL, "Failed to parse device string %s=%s",
2681 key, value);
2682
2683 ret = lxc_cmd_add_bpf_device_cgroup(name, lxcpath, &device);
2684 if (ret < 0)
2685 return -1;
2686
2687 return 0;
2688 }
2689
2690 path = lxc_cmd_get_limiting_cgroup_path(name, lxcpath, controller);
2691 /* not running */
2692 if (!path)
2693 return -1;
2694
2695 h = get_hierarchy(ops, controller);
2696 if (h) {
2697 __do_free char *fullpath = NULL;
2698
2699 fullpath = build_full_cgpath_from_monitorpath(h, path, key);
2700 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2701 }
2702
2703 return ret;
2704 }
2705
2706 /* take devices cgroup line
2707 * /dev/foo rwx
2708 * and convert it to a valid
2709 * type major:minor mode
2710 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2711 * the output.
2712 */
2713 static int device_cgroup_rule_parse_devpath(struct device_item *device,
2714 const char *devpath)
2715 {
2716 __do_free char *path = NULL;
2717 char *mode = NULL;
2718 int n_parts, ret;
2719 char *p;
2720 struct stat sb;
2721
2722 path = must_copy_string(devpath);
2723
2724 /*
2725 * Read path followed by mode. Ignore any trailing text.
2726 * A ' # comment' would be legal. Technically other text is not
2727 * legal, we could check for that if we cared to.
2728 */
2729 for (n_parts = 1, p = path; *p; p++) {
2730 if (*p != ' ')
2731 continue;
2732 *p = '\0';
2733
2734 if (n_parts != 1)
2735 break;
2736 p++;
2737 n_parts++;
2738
2739 while (*p == ' ')
2740 p++;
2741
2742 mode = p;
2743
2744 if (*p == '\0')
2745 return ret_set_errno(-1, EINVAL);
2746 }
2747
2748 if (!mode)
2749 return ret_errno(EINVAL);
2750
2751 if (device_cgroup_parse_access(device, mode) < 0)
2752 return -1;
2753
2754 ret = stat(path, &sb);
2755 if (ret < 0)
2756 return ret_set_errno(-1, errno);
2757
2758 mode_t m = sb.st_mode & S_IFMT;
2759 switch (m) {
2760 case S_IFBLK:
2761 device->type = 'b';
2762 break;
2763 case S_IFCHR:
2764 device->type = 'c';
2765 break;
2766 default:
2767 return log_error_errno(-1, EINVAL, "Unsupported device type %i for \"%s\"", m, path);
2768 }
2769
2770 device->major = MAJOR(sb.st_rdev);
2771 device->minor = MINOR(sb.st_rdev);
2772 device->allow = 1;
2773 device->global_rule = LXC_BPF_DEVICE_CGROUP_LOCAL_RULE;
2774
2775 return 0;
2776 }
2777
2778 static int convert_devpath(const char *invalue, char *dest)
2779 {
2780 struct device_item device = {};
2781 int ret;
2782
2783 ret = device_cgroup_rule_parse_devpath(&device, invalue);
2784 if (ret < 0)
2785 return -1;
2786
2787 ret = snprintf(dest, 50, "%c %d:%d %s", device.type, device.major,
2788 device.minor, device.access);
2789 if (ret < 0 || ret >= 50)
2790 return log_error_errno(-1, ENAMETOOLONG, "Error on configuration value \"%c %d:%d %s\" (max 50 chars)",
2791 device.type, device.major, device.minor, device.access);
2792
2793 return 0;
2794 }
2795
2796 /* Called from setup_limits - here we have the container's cgroup_data because
2797 * we created the cgroups.
2798 */
2799 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2800 const char *value, bool is_cpuset)
2801 {
2802 __do_free char *controller = NULL;
2803 char *p;
2804 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2805 char converted_value[50];
2806 struct hierarchy *h;
2807
2808 controller = must_copy_string(filename);
2809 p = strchr(controller, '.');
2810 if (p)
2811 *p = '\0';
2812
2813 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2814 int ret;
2815
2816 ret = convert_devpath(value, converted_value);
2817 if (ret < 0)
2818 return ret;
2819 value = converted_value;
2820 }
2821
2822 h = get_hierarchy(ops, controller);
2823 if (!h)
2824 return log_error_errno(-ENOENT, ENOENT, "Failed to setup limits for the \"%s\" controller. The controller seems to be unused by \"cgfsng\" cgroup driver or not enabled on the cgroup hierarchy", controller);
2825
2826 if (is_cpuset) {
2827 int ret = lxc_write_openat(h->container_full_path, filename, value, strlen(value));
2828 if (ret)
2829 return ret;
2830 }
2831 return lxc_write_openat(h->container_limit_path, filename, value, strlen(value));
2832 }
2833
2834 __cgfsng_ops static bool cgfsng_setup_limits_legacy(struct cgroup_ops *ops,
2835 struct lxc_conf *conf,
2836 bool do_devices)
2837 {
2838 __do_free struct lxc_list *sorted_cgroup_settings = NULL;
2839 struct lxc_list *cgroup_settings = &conf->cgroup;
2840 struct lxc_list *iterator, *next;
2841 struct lxc_cgroup *cg;
2842 bool ret = false;
2843
2844 if (!ops)
2845 return ret_set_errno(false, ENOENT);
2846
2847 if (!conf)
2848 return ret_set_errno(false, EINVAL);
2849
2850 cgroup_settings = &conf->cgroup;
2851 if (lxc_list_empty(cgroup_settings))
2852 return true;
2853
2854 if (!ops->hierarchies)
2855 return ret_set_errno(false, EINVAL);
2856
2857 if (pure_unified_layout(ops))
2858 return log_warn_errno(true, EINVAL, "Ignoring legacy cgroup limits on pure cgroup2 system");
2859
2860 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2861 if (!sorted_cgroup_settings)
2862 return false;
2863
2864 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2865 cg = iterator->elem;
2866
2867 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2868 if (cg_legacy_set_data(ops, cg->subsystem, cg->value, strncmp("cpuset", cg->subsystem, 6) == 0)) {
2869 if (do_devices && (errno == EACCES || errno == EPERM)) {
2870 SYSWARN("Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2871 continue;
2872 }
2873 SYSERROR("Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2874 goto out;
2875 }
2876 DEBUG("Set controller \"%s\" set to \"%s\"", cg->subsystem, cg->value);
2877 }
2878 }
2879
2880 ret = true;
2881 INFO("Limits for the legacy cgroup hierarchies have been setup");
2882 out:
2883 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2884 lxc_list_del(iterator);
2885 free(iterator);
2886 }
2887
2888 return ret;
2889 }
2890
2891 /*
2892 * Some of the parsing logic comes from the original cgroup device v1
2893 * implementation in the kernel.
2894 */
2895 static int bpf_device_cgroup_prepare(struct cgroup_ops *ops,
2896 struct lxc_conf *conf, const char *key,
2897 const char *val)
2898 {
2899 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2900 struct device_item device_item = {};
2901 int ret;
2902
2903 if (strcmp("devices.allow", key) == 0 && *val == '/')
2904 ret = device_cgroup_rule_parse_devpath(&device_item, val);
2905 else
2906 ret = device_cgroup_rule_parse(&device_item, key, val);
2907 if (ret < 0)
2908 return log_error_errno(-1, EINVAL, "Failed to parse device string %s=%s", key, val);
2909
2910 ret = bpf_list_add_device(conf, &device_item);
2911 if (ret < 0)
2912 return -1;
2913 #endif
2914 return 0;
2915 }
2916
2917 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2918 struct lxc_handler *handler)
2919 {
2920 struct lxc_list *cgroup_settings, *iterator;
2921 struct hierarchy *h;
2922 struct lxc_conf *conf;
2923
2924 if (!ops)
2925 return ret_set_errno(false, ENOENT);
2926
2927 if (!ops->hierarchies)
2928 return true;
2929
2930 if (!ops->container_cgroup)
2931 return ret_set_errno(false, EINVAL);
2932
2933 if (!handler || !handler->conf)
2934 return ret_set_errno(false, EINVAL);
2935 conf = handler->conf;
2936
2937 cgroup_settings = &conf->cgroup2;
2938 if (lxc_list_empty(cgroup_settings))
2939 return true;
2940
2941 if (!pure_unified_layout(ops))
2942 return log_warn_errno(true, EINVAL, "Ignoring cgroup2 limits on legacy cgroup system");
2943
2944 if (!ops->unified)
2945 return false;
2946 h = ops->unified;
2947
2948 lxc_list_for_each (iterator, cgroup_settings) {
2949 struct lxc_cgroup *cg = iterator->elem;
2950 int ret;
2951
2952 if (strncmp("devices", cg->subsystem, 7) == 0)
2953 ret = bpf_device_cgroup_prepare(ops, conf, cg->subsystem, cg->value);
2954 else
2955 ret = lxc_write_openat(h->container_limit_path, cg->subsystem, cg->value, strlen(cg->value));
2956 if (ret < 0)
2957 return log_error_errno(false, errno, "Failed to set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2958
2959 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2960 }
2961
2962 return log_info(true, "Limits for the unified cgroup hierarchy have been setup");
2963 }
2964
2965 __cgfsng_ops static bool cgfsng_devices_activate(struct cgroup_ops *ops, struct lxc_handler *handler)
2966 {
2967 #ifdef HAVE_STRUCT_BPF_CGROUP_DEV_CTX
2968 __do_bpf_program_free struct bpf_program *prog = NULL;
2969 int ret;
2970 struct lxc_conf *conf;
2971 struct hierarchy *unified;
2972 struct lxc_list *it;
2973 struct bpf_program *prog_old;
2974
2975 if (!ops)
2976 return ret_set_errno(false, ENOENT);
2977
2978 if (!ops->hierarchies)
2979 return true;
2980
2981 if (!ops->container_cgroup)
2982 return ret_set_errno(false, EEXIST);
2983
2984 if (!handler || !handler->conf)
2985 return ret_set_errno(false, EINVAL);
2986 conf = handler->conf;
2987
2988 unified = ops->unified;
2989 if (!unified || !unified->bpf_device_controller ||
2990 !unified->container_full_path || lxc_list_empty(&conf->devices))
2991 return true;
2992
2993 prog = bpf_program_new(BPF_PROG_TYPE_CGROUP_DEVICE);
2994 if (!prog)
2995 return log_error_errno(false, ENOMEM, "Failed to create new bpf program");
2996
2997 ret = bpf_program_init(prog);
2998 if (ret)
2999 return log_error_errno(false, ENOMEM, "Failed to initialize bpf program");
3000
3001 lxc_list_for_each(it, &conf->devices) {
3002 struct device_item *cur = it->elem;
3003
3004 ret = bpf_program_append_device(prog, cur);
3005 if (ret)
3006 return log_error_errno(false, ENOMEM, "Failed to add new rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3007 cur->type,
3008 cur->major,
3009 cur->minor,
3010 cur->access,
3011 cur->allow,
3012 cur->global_rule);
3013 TRACE("Added rule to bpf device program: type %c, major %d, minor %d, access %s, allow %d, global_rule %d",
3014 cur->type,
3015 cur->major,
3016 cur->minor,
3017 cur->access,
3018 cur->allow,
3019 cur->global_rule);
3020 }
3021
3022 ret = bpf_program_finalize(prog);
3023 if (ret)
3024 return log_error_errno(false, ENOMEM, "Failed to finalize bpf program");
3025
3026 ret = bpf_program_cgroup_attach(prog, BPF_CGROUP_DEVICE,
3027 unified->container_limit_path,
3028 BPF_F_ALLOW_MULTI);
3029 if (ret)
3030 return log_error_errno(false, ENOMEM, "Failed to attach bpf program");
3031
3032 /* Replace old bpf program. */
3033 prog_old = move_ptr(ops->cgroup2_devices);
3034 ops->cgroup2_devices = move_ptr(prog);
3035 prog = move_ptr(prog_old);
3036 #endif
3037 return true;
3038 }
3039
3040 static bool __cgfsng_delegate_controllers(struct cgroup_ops *ops, const char *cgroup)
3041 {
3042 __do_free char *add_controllers = NULL, *base_path = NULL;
3043 __do_free_string_list char **parts = NULL;
3044 struct hierarchy *unified = ops->unified;
3045 ssize_t parts_len;
3046 char **it;
3047 size_t full_len = 0;
3048
3049 if (!ops->hierarchies || !pure_unified_layout(ops) ||
3050 !unified->controllers[0])
3051 return true;
3052
3053 /* For now we simply enable all controllers that we have detected by
3054 * creating a string like "+memory +pids +cpu +io".
3055 * TODO: In the near future we might want to support "-<controller>"
3056 * etc. but whether supporting semantics like this make sense will need
3057 * some thinking.
3058 */
3059 for (it = unified->controllers; it && *it; it++) {
3060 full_len += strlen(*it) + 2;
3061 add_controllers = must_realloc(add_controllers, full_len + 1);
3062
3063 if (unified->controllers[0] == *it)
3064 add_controllers[0] = '\0';
3065
3066 (void)strlcat(add_controllers, "+", full_len + 1);
3067 (void)strlcat(add_controllers, *it, full_len + 1);
3068
3069 if ((it + 1) && *(it + 1))
3070 (void)strlcat(add_controllers, " ", full_len + 1);
3071 }
3072
3073 parts = lxc_string_split(cgroup, '/');
3074 if (!parts)
3075 return false;
3076
3077 parts_len = lxc_array_len((void **)parts);
3078 if (parts_len > 0)
3079 parts_len--;
3080
3081 base_path = must_make_path(unified->mountpoint, unified->container_base_path, NULL);
3082 for (ssize_t i = -1; i < parts_len; i++) {
3083 int ret;
3084 __do_free char *target = NULL;
3085
3086 if (i >= 0)
3087 base_path = must_append_path(base_path, parts[i], NULL);
3088 target = must_make_path(base_path, "cgroup.subtree_control", NULL);
3089 ret = lxc_writeat(-1, target, add_controllers, full_len);
3090 if (ret < 0)
3091 return log_error_errno(false, errno, "Could not enable \"%s\" controllers in the unified cgroup \"%s\"",
3092 add_controllers, target);
3093 TRACE("Enable \"%s\" controllers in the unified cgroup \"%s\"", add_controllers, target);
3094 }
3095
3096 return true;
3097 }
3098
3099 __cgfsng_ops static bool cgfsng_monitor_delegate_controllers(struct cgroup_ops *ops)
3100 {
3101 if (!ops)
3102 return ret_set_errno(false, ENOENT);
3103
3104 return __cgfsng_delegate_controllers(ops, ops->monitor_cgroup);
3105 }
3106
3107 __cgfsng_ops static bool cgfsng_payload_delegate_controllers(struct cgroup_ops *ops)
3108 {
3109 if (!ops)
3110 return ret_set_errno(false, ENOENT);
3111
3112 return __cgfsng_delegate_controllers(ops, ops->container_cgroup);
3113 }
3114
3115 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
3116 char **controllers)
3117 {
3118 if (!ops->cgroup_use)
3119 return true;
3120
3121 for (char **cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
3122 bool found = false;
3123
3124 for (char **cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
3125 if (strcmp(*cur_use, *cur_ctrl) != 0)
3126 continue;
3127
3128 found = true;
3129 break;
3130 }
3131
3132 if (found)
3133 continue;
3134
3135 return false;
3136 }
3137
3138 return true;
3139 }
3140
3141 static void cg_unified_delegate(char ***delegate)
3142 {
3143 __do_free char *buf = NULL;
3144 char *standard[] = {"cgroup.subtree_control", "cgroup.threads", NULL};
3145 char *token;
3146 int idx;
3147
3148 buf = read_file_at(-EBADF, "/sys/kernel/cgroup/delegate");
3149 if (!buf) {
3150 for (char **p = standard; p && *p; p++) {
3151 idx = append_null_to_list((void ***)delegate);
3152 (*delegate)[idx] = must_copy_string(*p);
3153 }
3154 SYSWARN("Failed to read /sys/kernel/cgroup/delegate");
3155 return;
3156 }
3157
3158 lxc_iterate_parts(token, buf, " \t\n") {
3159 /*
3160 * We always need to chown this for both cgroup and
3161 * cgroup2.
3162 */
3163 if (strcmp(token, "cgroup.procs") == 0)
3164 continue;
3165
3166 idx = append_null_to_list((void ***)delegate);
3167 (*delegate)[idx] = must_copy_string(token);
3168 }
3169 }
3170
3171 /* At startup, parse_hierarchies finds all the info we need about cgroup
3172 * mountpoints and current cgroups, and stores it in @d.
3173 */
3174 static int cg_hybrid_init(struct cgroup_ops *ops, bool relative, bool unprivileged)
3175 {
3176 __do_free char *basecginfo = NULL, *line = NULL;
3177 __do_free_string_list char **klist = NULL, **nlist = NULL;
3178 __do_fclose FILE *f = NULL;
3179 int ret;
3180 size_t len = 0;
3181
3182 /* Root spawned containers escape the current cgroup, so use init's
3183 * cgroups as our base in that case.
3184 */
3185 if (!relative && (geteuid() == 0))
3186 basecginfo = read_file_at(-EBADF, "/proc/1/cgroup");
3187 else
3188 basecginfo = read_file_at(-EBADF, "/proc/self/cgroup");
3189 if (!basecginfo)
3190 return ret_set_errno(-1, ENOMEM);
3191
3192 ret = get_existing_subsystems(&klist, &nlist);
3193 if (ret < 0)
3194 return log_error_errno(-1, errno, "Failed to retrieve available legacy cgroup controllers");
3195
3196 f = fopen("/proc/self/mountinfo", "re");
3197 if (!f)
3198 return log_error_errno(-1, errno, "Failed to open \"/proc/self/mountinfo\"");
3199
3200 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
3201
3202 while (getline(&line, &len, f) != -1) {
3203 __do_free char *base_cgroup = NULL, *mountpoint = NULL;
3204 __do_free_string_list char **controller_list = NULL;
3205 int type;
3206 bool writeable;
3207 struct hierarchy *new;
3208
3209 type = get_cgroup_version(line);
3210 if (type == 0)
3211 continue;
3212
3213 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
3214 continue;
3215
3216 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
3217 if (type == CGROUP2_SUPER_MAGIC)
3218 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
3219 else if (type == CGROUP_SUPER_MAGIC)
3220 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
3221 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
3222 if (type == CGROUP_SUPER_MAGIC)
3223 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
3224 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
3225 if (type == CGROUP2_SUPER_MAGIC)
3226 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
3227 }
3228
3229 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
3230 if (!controller_list && type == CGROUP_SUPER_MAGIC)
3231 continue;
3232
3233 if (type == CGROUP_SUPER_MAGIC)
3234 if (controller_list_is_dup(ops->hierarchies, controller_list)) {
3235 TRACE("Skipping duplicating controller");
3236 continue;
3237 }
3238
3239 mountpoint = cg_hybrid_get_mountpoint(line);
3240 if (!mountpoint) {
3241 WARN("Failed parsing mountpoint from \"%s\"", line);
3242 continue;
3243 }
3244
3245 if (type == CGROUP_SUPER_MAGIC)
3246 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
3247 else
3248 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
3249 if (!base_cgroup) {
3250 WARN("Failed to find current cgroup");
3251 continue;
3252 }
3253
3254 trim(base_cgroup);
3255 prune_init_scope(base_cgroup);
3256 if (type == CGROUP2_SUPER_MAGIC)
3257 writeable = test_writeable_v2(mountpoint, base_cgroup);
3258 else
3259 writeable = test_writeable_v1(mountpoint, base_cgroup);
3260 if (!writeable) {
3261 TRACE("The %s group is not writeable", base_cgroup);
3262 continue;
3263 }
3264
3265 if (type == CGROUP2_SUPER_MAGIC) {
3266 char *cgv2_ctrl_path;
3267
3268 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
3269 "cgroup.controllers",
3270 NULL);
3271
3272 controller_list = cg_unified_get_controllers(-EBADF, cgv2_ctrl_path);
3273 free(cgv2_ctrl_path);
3274 if (!controller_list) {
3275 controller_list = cg_unified_make_empty_controller();
3276 TRACE("No controllers are enabled for "
3277 "delegation in the unified hierarchy");
3278 }
3279 }
3280
3281 /* Exclude all controllers that cgroup use does not want. */
3282 if (!cgroup_use_wants_controllers(ops, controller_list)) {
3283 TRACE("Skipping controller");
3284 continue;
3285 }
3286
3287 new = add_hierarchy(&ops->hierarchies, move_ptr(controller_list), move_ptr(mountpoint), move_ptr(base_cgroup), type);
3288 if (!new)
3289 return log_error_errno(-1, errno, "Failed to add cgroup hierarchy");
3290 if (type == CGROUP2_SUPER_MAGIC && !ops->unified) {
3291 if (unprivileged)
3292 cg_unified_delegate(&new->cgroup2_chown);
3293 ops->unified = new;
3294 }
3295 }
3296
3297 TRACE("Writable cgroup hierarchies:");
3298 lxc_cgfsng_print_hierarchies(ops);
3299
3300 /* verify that all controllers in cgroup.use and all crucial
3301 * controllers are accounted for
3302 */
3303 if (!all_controllers_found(ops))
3304 return log_error_errno(-1, ENOENT, "Failed to find all required controllers");
3305
3306 return 0;
3307 }
3308
3309 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
3310 static char *cg_unified_get_current_cgroup(bool relative)
3311 {
3312 __do_free char *basecginfo = NULL;
3313 char *copy;
3314 char *base_cgroup;
3315
3316 if (!relative && (geteuid() == 0))
3317 basecginfo = read_file_at(-EBADF, "/proc/1/cgroup");
3318 else
3319 basecginfo = read_file_at(-EBADF, "/proc/self/cgroup");
3320 if (!basecginfo)
3321 return NULL;
3322
3323 base_cgroup = strstr(basecginfo, "0::/");
3324 if (!base_cgroup)
3325 return NULL;
3326
3327 base_cgroup = base_cgroup + 3;
3328 copy = copy_to_eol(base_cgroup);
3329 if (!copy)
3330 return NULL;
3331
3332 return trim(copy);
3333 }
3334
3335 static int cg_unified_init(struct cgroup_ops *ops, bool relative,
3336 bool unprivileged)
3337 {
3338 __do_close int cgroup_root_fd = -EBADF;
3339 __do_free char *base_cgroup = NULL, *controllers_path = NULL;
3340 __do_free_string_list char **delegatable;
3341 __do_free struct hierarchy *new = NULL;
3342 int ret;
3343
3344 ret = unified_cgroup_hierarchy();
3345 if (ret == -ENOMEDIUM)
3346 return ret_errno(ENOMEDIUM);
3347
3348 if (ret != CGROUP2_SUPER_MAGIC)
3349 return 0;
3350
3351 base_cgroup = cg_unified_get_current_cgroup(relative);
3352 if (!base_cgroup)
3353 return ret_errno(EINVAL);
3354 if (!relative)
3355 prune_init_scope(base_cgroup);
3356
3357 cgroup_root_fd = openat(-EBADF, DEFAULT_CGROUP_MOUNTPOINT,
3358 O_NOCTTY | O_CLOEXEC | O_NOFOLLOW | O_DIRECTORY);
3359 if (cgroup_root_fd < 0)
3360 return -errno;
3361
3362 /*
3363 * We assume that the cgroup we're currently in has been delegated to
3364 * us and we are free to further delege all of the controllers listed
3365 * in cgroup.controllers further down the hierarchy.
3366 */
3367 controllers_path = must_make_path_relative(base_cgroup, "cgroup.controllers", NULL);
3368 delegatable = cg_unified_get_controllers(cgroup_root_fd, controllers_path);
3369 if (!delegatable)
3370 delegatable = cg_unified_make_empty_controller();
3371 if (!delegatable[0])
3372 TRACE("No controllers are enabled for delegation");
3373
3374 /* TODO: If the user requested specific controllers via lxc.cgroup.use
3375 * we should verify here. The reason I'm not doing it right is that I'm
3376 * not convinced that lxc.cgroup.use will be the future since it is a
3377 * global property. I much rather have an option that lets you request
3378 * controllers per container.
3379 */
3380
3381 new = add_hierarchy(&ops->hierarchies,
3382 move_ptr(delegatable),
3383 must_copy_string(DEFAULT_CGROUP_MOUNTPOINT),
3384 move_ptr(base_cgroup),
3385 CGROUP2_SUPER_MAGIC);
3386 if (!new)
3387 return log_error_errno(-1, errno, "Failed to add unified cgroup hierarchy");
3388
3389 if (unprivileged)
3390 cg_unified_delegate(&new->cgroup2_chown);
3391
3392 if (bpf_devices_cgroup_supported())
3393 new->bpf_device_controller = 1;
3394
3395 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
3396 ops->unified = move_ptr(new);
3397
3398 return CGROUP2_SUPER_MAGIC;
3399 }
3400
3401 static int cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
3402 {
3403 int ret;
3404 const char *tmp;
3405 bool relative = conf->cgroup_meta.relative;
3406
3407 tmp = lxc_global_config_value("lxc.cgroup.use");
3408 if (tmp) {
3409 __do_free char *pin = NULL;
3410 char *chop, *cur;
3411
3412 pin = must_copy_string(tmp);
3413 chop = pin;
3414
3415 lxc_iterate_parts(cur, chop, ",")
3416 must_append_string(&ops->cgroup_use, cur);
3417 }
3418
3419 ret = cg_unified_init(ops, relative, !lxc_list_empty(&conf->id_map));
3420 if (ret < 0)
3421 return -1;
3422
3423 if (ret == CGROUP2_SUPER_MAGIC)
3424 return 0;
3425
3426 return cg_hybrid_init(ops, relative, !lxc_list_empty(&conf->id_map));
3427 }
3428
3429 __cgfsng_ops static int cgfsng_data_init(struct cgroup_ops *ops)
3430 {
3431 const char *cgroup_pattern;
3432
3433 if (!ops)
3434 return ret_set_errno(-1, ENOENT);
3435
3436 /* copy system-wide cgroup information */
3437 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
3438 if (cgroup_pattern && strcmp(cgroup_pattern, "") != 0)
3439 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
3440
3441 return 0;
3442 }
3443
3444 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
3445 {
3446 __do_free struct cgroup_ops *cgfsng_ops = NULL;
3447
3448 cgfsng_ops = zalloc(sizeof(struct cgroup_ops));
3449 if (!cgfsng_ops)
3450 return ret_set_errno(NULL, ENOMEM);
3451
3452 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
3453
3454 if (cg_init(cgfsng_ops, conf))
3455 return NULL;
3456
3457 cgfsng_ops->data_init = cgfsng_data_init;
3458 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
3459 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
3460 cgfsng_ops->monitor_create = cgfsng_monitor_create;
3461 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
3462 cgfsng_ops->monitor_delegate_controllers = cgfsng_monitor_delegate_controllers;
3463 cgfsng_ops->payload_delegate_controllers = cgfsng_payload_delegate_controllers;
3464 cgfsng_ops->payload_create = cgfsng_payload_create;
3465 cgfsng_ops->payload_enter = cgfsng_payload_enter;
3466 cgfsng_ops->payload_finalize = cgfsng_payload_finalize;
3467 cgfsng_ops->escape = cgfsng_escape;
3468 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
3469 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
3470 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
3471 cgfsng_ops->get = cgfsng_get;
3472 cgfsng_ops->set = cgfsng_set;
3473 cgfsng_ops->freeze = cgfsng_freeze;
3474 cgfsng_ops->unfreeze = cgfsng_unfreeze;
3475 cgfsng_ops->setup_limits_legacy = cgfsng_setup_limits_legacy;
3476 cgfsng_ops->setup_limits = cgfsng_setup_limits;
3477 cgfsng_ops->driver = "cgfsng";
3478 cgfsng_ops->version = "1.0.0";
3479 cgfsng_ops->attach = cgfsng_attach;
3480 cgfsng_ops->chown = cgfsng_chown;
3481 cgfsng_ops->mount = cgfsng_mount;
3482 cgfsng_ops->devices_activate = cgfsng_devices_activate;
3483 cgfsng_ops->get_limiting_cgroup = cgfsng_get_limiting_cgroup;
3484
3485 return move_ptr(cgfsng_ops);
3486 }