]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
tree-wide: use sizeof on static arrays
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comman-separated list of controllers.
35 */
36
37 #include "config.h"
38
39 #include <ctype.h>
40 #include <dirent.h>
41 #include <errno.h>
42 #include <grp.h>
43 #include <stdint.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <linux/kdev_t.h>
49 #include <linux/types.h>
50 #include <sys/types.h>
51
52 #include "caps.h"
53 #include "cgroup.h"
54 #include "cgroup_utils.h"
55 #include "commands.h"
56 #include "conf.h"
57 #include "log.h"
58 #include "macro.h"
59 #include "storage/storage.h"
60 #include "utils.h"
61
62 #ifndef HAVE_STRLCPY
63 #include "include/strlcpy.h"
64 #endif
65
66 #ifndef HAVE_STRLCAT
67 #include "include/strlcat.h"
68 #endif
69
70 lxc_log_define(cgfsng, cgroup);
71
72 static void free_string_list(char **clist)
73 {
74 int i;
75
76 if (!clist)
77 return;
78
79 for (i = 0; clist[i]; i++)
80 free(clist[i]);
81
82 free(clist);
83 }
84
85 /* Allocate a pointer, do not fail. */
86 static void *must_alloc(size_t sz)
87 {
88 return must_realloc(NULL, sz);
89 }
90
91 /* Given a pointer to a null-terminated array of pointers, realloc to add one
92 * entry, and point the new entry to NULL. Do not fail. Return the index to the
93 * second-to-last entry - that is, the one which is now available for use
94 * (keeping the list null-terminated).
95 */
96 static int append_null_to_list(void ***list)
97 {
98 int newentry = 0;
99
100 if (*list)
101 for (; (*list)[newentry]; newentry++)
102 ;
103
104 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
105 (*list)[newentry + 1] = NULL;
106 return newentry;
107 }
108
109 /* Given a null-terminated array of strings, check whether @entry is one of the
110 * strings.
111 */
112 static bool string_in_list(char **list, const char *entry)
113 {
114 int i;
115
116 if (!list)
117 return false;
118
119 for (i = 0; list[i]; i++)
120 if (strcmp(list[i], entry) == 0)
121 return true;
122
123 return false;
124 }
125
126 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
127 * "name=systemd". Do not fail.
128 */
129 static char *cg_legacy_must_prefix_named(char *entry)
130 {
131 size_t len;
132 char *prefixed;
133
134 len = strlen(entry);
135 prefixed = must_alloc(len + 6);
136
137 memcpy(prefixed, "name=", sizeof("name=") - 1);
138 memcpy(prefixed + sizeof("name=") - 1, entry, len);
139 prefixed[len + 5] = '\0';
140 return prefixed;
141 }
142
143 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
144 * we are called.
145 *
146 * We also handle named subsystems here. Any controller which is not a kernel
147 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
148 * we refuse to use because we're not sure which we have here.
149 * (TODO: We could work around this in some cases by just remounting to be
150 * unambiguous, or by comparing mountpoint contents with current cgroup.)
151 *
152 * The last entry will always be NULL.
153 */
154 static void must_append_controller(char **klist, char **nlist, char ***clist,
155 char *entry)
156 {
157 int newentry;
158 char *copy;
159
160 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
161 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
162 ERROR("It is both a named and kernel subsystem");
163 return;
164 }
165
166 newentry = append_null_to_list((void ***)clist);
167
168 if (strncmp(entry, "name=", 5) == 0)
169 copy = must_copy_string(entry);
170 else if (string_in_list(klist, entry))
171 copy = must_copy_string(entry);
172 else
173 copy = cg_legacy_must_prefix_named(entry);
174
175 (*clist)[newentry] = copy;
176 }
177
178 /* Given a handler's cgroup data, return the struct hierarchy for the controller
179 * @c, or NULL if there is none.
180 */
181 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *c)
182 {
183 int i;
184
185 if (!ops->hierarchies)
186 return NULL;
187
188 for (i = 0; ops->hierarchies[i]; i++) {
189 if (!c) {
190 /* This is the empty unified hierarchy. */
191 if (ops->hierarchies[i]->controllers &&
192 !ops->hierarchies[i]->controllers[0])
193 return ops->hierarchies[i];
194
195 continue;
196 }
197
198 if (string_in_list(ops->hierarchies[i]->controllers, c))
199 return ops->hierarchies[i];
200 }
201
202 return NULL;
203 }
204
205 #define BATCH_SIZE 50
206 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
207 {
208 int newbatches = (newlen / BATCH_SIZE) + 1;
209 int oldbatches = (oldlen / BATCH_SIZE) + 1;
210
211 if (!*mem || newbatches > oldbatches) {
212 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
213 }
214 }
215
216 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
217 {
218 size_t full = oldlen + newlen;
219
220 batch_realloc(dest, oldlen, full + 1);
221
222 memcpy(*dest + oldlen, new, newlen + 1);
223 }
224
225 /* Slurp in a whole file */
226 static char *read_file(const char *fnam)
227 {
228 FILE *f;
229 char *line = NULL, *buf = NULL;
230 size_t len = 0, fulllen = 0;
231 int linelen;
232
233 f = fopen(fnam, "r");
234 if (!f)
235 return NULL;
236 while ((linelen = getline(&line, &len, f)) != -1) {
237 append_line(&buf, fulllen, line, linelen);
238 fulllen += linelen;
239 }
240 fclose(f);
241 free(line);
242 return buf;
243 }
244
245 /* Taken over modified from the kernel sources. */
246 #define NBITS 32 /* bits in uint32_t */
247 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
248 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
249
250 static void set_bit(unsigned bit, uint32_t *bitarr)
251 {
252 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
253 }
254
255 static void clear_bit(unsigned bit, uint32_t *bitarr)
256 {
257 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
258 }
259
260 static bool is_set(unsigned bit, uint32_t *bitarr)
261 {
262 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
263 }
264
265 /* Create cpumask from cpulist aka turn:
266 *
267 * 0,2-3
268 *
269 * into bit array
270 *
271 * 1 0 1 1
272 */
273 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
274 {
275 char *token;
276 size_t arrlen;
277 uint32_t *bitarr;
278
279 arrlen = BITS_TO_LONGS(nbits);
280 bitarr = calloc(arrlen, sizeof(uint32_t));
281 if (!bitarr)
282 return NULL;
283
284 lxc_iterate_parts(token, buf, ",") {
285 errno = 0;
286 unsigned end, start;
287 char *range;
288
289 start = strtoul(token, NULL, 0);
290 end = start;
291 range = strchr(token, '-');
292 if (range)
293 end = strtoul(range + 1, NULL, 0);
294
295 if (!(start <= end)) {
296 free(bitarr);
297 return NULL;
298 }
299
300 if (end >= nbits) {
301 free(bitarr);
302 return NULL;
303 }
304
305 while (start <= end)
306 set_bit(start++, bitarr);
307 }
308
309 return bitarr;
310 }
311
312 /* Turn cpumask into simple, comma-separated cpulist. */
313 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
314 {
315 int ret;
316 size_t i;
317 char **cpulist = NULL;
318 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
319
320 for (i = 0; i <= nbits; i++) {
321 if (!is_set(i, bitarr))
322 continue;
323
324 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
325 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
326 lxc_free_array((void **)cpulist, free);
327 return NULL;
328 }
329
330 ret = lxc_append_string(&cpulist, numstr);
331 if (ret < 0) {
332 lxc_free_array((void **)cpulist, free);
333 return NULL;
334 }
335 }
336
337 if (!cpulist)
338 return NULL;
339
340 return lxc_string_join(",", (const char **)cpulist, false);
341 }
342
343 static ssize_t get_max_cpus(char *cpulist)
344 {
345 char *c1, *c2;
346 char *maxcpus = cpulist;
347 size_t cpus = 0;
348
349 c1 = strrchr(maxcpus, ',');
350 if (c1)
351 c1++;
352
353 c2 = strrchr(maxcpus, '-');
354 if (c2)
355 c2++;
356
357 if (!c1 && !c2)
358 c1 = maxcpus;
359 else if (c1 > c2)
360 c2 = c1;
361 else if (c1 < c2)
362 c1 = c2;
363 else if (!c1 && c2)
364 c1 = c2;
365
366 errno = 0;
367 cpus = strtoul(c1, NULL, 0);
368 if (errno != 0)
369 return -1;
370
371 return cpus;
372 }
373
374 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
375 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
376 {
377 int ret;
378 ssize_t i;
379 char *lastslash, *fpath, oldv;
380 ssize_t maxisol = 0, maxposs = 0;
381 char *cpulist = NULL, *isolcpus = NULL, *posscpus = NULL;
382 uint32_t *isolmask = NULL, *possmask = NULL;
383 bool bret = false, flipped_bit = false;
384
385 lastslash = strrchr(path, '/');
386 if (!lastslash) {
387 ERROR("Failed to detect \"/\" in \"%s\"", path);
388 return bret;
389 }
390 oldv = *lastslash;
391 *lastslash = '\0';
392 fpath = must_make_path(path, "cpuset.cpus", NULL);
393 posscpus = read_file(fpath);
394 if (!posscpus) {
395 SYSERROR("Failed to read file \"%s\"", fpath);
396 goto on_error;
397 }
398
399 /* Get maximum number of cpus found in possible cpuset. */
400 maxposs = get_max_cpus(posscpus);
401 if (maxposs < 0 || maxposs >= INT_MAX - 1)
402 goto on_error;
403
404 if (!file_exists(__ISOL_CPUS)) {
405 /* This system doesn't expose isolated cpus. */
406 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
407 cpulist = posscpus;
408 /* No isolated cpus but we weren't already initialized by
409 * someone. We should simply copy the parents cpuset.cpus
410 * values.
411 */
412 if (!am_initialized) {
413 DEBUG("Copying cpu settings of parent cgroup");
414 goto copy_parent;
415 }
416 /* No isolated cpus but we were already initialized by someone.
417 * Nothing more to do for us.
418 */
419 goto on_success;
420 }
421
422 isolcpus = read_file(__ISOL_CPUS);
423 if (!isolcpus) {
424 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
425 goto on_error;
426 }
427 if (!isdigit(isolcpus[0])) {
428 TRACE("No isolated cpus detected");
429 cpulist = posscpus;
430 /* No isolated cpus but we weren't already initialized by
431 * someone. We should simply copy the parents cpuset.cpus
432 * values.
433 */
434 if (!am_initialized) {
435 DEBUG("Copying cpu settings of parent cgroup");
436 goto copy_parent;
437 }
438 /* No isolated cpus but we were already initialized by someone.
439 * Nothing more to do for us.
440 */
441 goto on_success;
442 }
443
444 /* Get maximum number of cpus found in isolated cpuset. */
445 maxisol = get_max_cpus(isolcpus);
446 if (maxisol < 0 || maxisol >= INT_MAX - 1)
447 goto on_error;
448
449 if (maxposs < maxisol)
450 maxposs = maxisol;
451 maxposs++;
452
453 possmask = lxc_cpumask(posscpus, maxposs);
454 if (!possmask) {
455 ERROR("Failed to create cpumask for possible cpus");
456 goto on_error;
457 }
458
459 isolmask = lxc_cpumask(isolcpus, maxposs);
460 if (!isolmask) {
461 ERROR("Failed to create cpumask for isolated cpus");
462 goto on_error;
463 }
464
465 for (i = 0; i <= maxposs; i++) {
466 if (!is_set(i, isolmask) || !is_set(i, possmask))
467 continue;
468
469 flipped_bit = true;
470 clear_bit(i, possmask);
471 }
472
473 if (!flipped_bit) {
474 DEBUG("No isolated cpus present in cpuset");
475 goto on_success;
476 }
477 DEBUG("Removed isolated cpus from cpuset");
478
479 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
480 if (!cpulist) {
481 ERROR("Failed to create cpu list");
482 goto on_error;
483 }
484
485 copy_parent:
486 *lastslash = oldv;
487 free(fpath);
488 fpath = must_make_path(path, "cpuset.cpus", NULL);
489 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
490 if (ret < 0) {
491 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
492 goto on_error;
493 }
494
495 on_success:
496 bret = true;
497
498 on_error:
499 free(fpath);
500
501 free(isolcpus);
502 free(isolmask);
503
504 if (posscpus != cpulist)
505 free(posscpus);
506 free(possmask);
507
508 free(cpulist);
509 return bret;
510 }
511
512 /* Copy contents of parent(@path)/@file to @path/@file */
513 static bool copy_parent_file(char *path, char *file)
514 {
515 int ret;
516 char *fpath, *lastslash, oldv;
517 int len = 0;
518 char *value = NULL;
519
520 lastslash = strrchr(path, '/');
521 if (!lastslash) {
522 ERROR("Failed to detect \"/\" in \"%s\"", path);
523 return false;
524 }
525 oldv = *lastslash;
526 *lastslash = '\0';
527 fpath = must_make_path(path, file, NULL);
528 len = lxc_read_from_file(fpath, NULL, 0);
529 if (len <= 0)
530 goto on_error;
531
532 value = must_alloc(len + 1);
533 ret = lxc_read_from_file(fpath, value, len);
534 if (ret != len)
535 goto on_error;
536 free(fpath);
537
538 *lastslash = oldv;
539 fpath = must_make_path(path, file, NULL);
540 ret = lxc_write_to_file(fpath, value, len, false, 0666);
541 if (ret < 0)
542 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, fpath);
543 free(fpath);
544 free(value);
545 return ret >= 0;
546
547 on_error:
548 SYSERROR("Failed to read file \"%s\"", fpath);
549 free(fpath);
550 free(value);
551 return false;
552 }
553
554 /* Initialize the cpuset hierarchy in first directory of @gname and set
555 * cgroup.clone_children so that children inherit settings. Since the
556 * h->base_path is populated by init or ourselves, we know it is already
557 * initialized.
558 */
559 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
560 {
561 int ret;
562 char v;
563 char *cgpath, *clonechildrenpath, *slash;
564
565 if (!string_in_list(h->controllers, "cpuset"))
566 return true;
567
568 if (*cgname == '/')
569 cgname++;
570 slash = strchr(cgname, '/');
571 if (slash)
572 *slash = '\0';
573
574 cgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
575 if (slash)
576 *slash = '/';
577
578 ret = mkdir(cgpath, 0755);
579 if (ret < 0) {
580 if (errno != EEXIST) {
581 SYSERROR("Failed to create directory \"%s\"", cgpath);
582 free(cgpath);
583 return false;
584 }
585 }
586
587 clonechildrenpath =
588 must_make_path(cgpath, "cgroup.clone_children", NULL);
589 /* unified hierarchy doesn't have clone_children */
590 if (!file_exists(clonechildrenpath)) {
591 free(clonechildrenpath);
592 free(cgpath);
593 return true;
594 }
595
596 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
597 if (ret < 0) {
598 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
599 free(clonechildrenpath);
600 free(cgpath);
601 return false;
602 }
603
604 /* Make sure any isolated cpus are removed from cpuset.cpus. */
605 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
606 SYSERROR("Failed to remove isolated cpus");
607 free(clonechildrenpath);
608 free(cgpath);
609 return false;
610 }
611
612 /* Already set for us by someone else. */
613 if (v == '1') {
614 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
615 free(clonechildrenpath);
616 free(cgpath);
617 return true;
618 }
619
620 /* copy parent's settings */
621 if (!copy_parent_file(cgpath, "cpuset.mems")) {
622 SYSERROR("Failed to copy \"cpuset.mems\" settings");
623 free(cgpath);
624 free(clonechildrenpath);
625 return false;
626 }
627 free(cgpath);
628
629 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
630 if (ret < 0) {
631 /* Set clone_children so children inherit our settings */
632 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
633 free(clonechildrenpath);
634 return false;
635 }
636 free(clonechildrenpath);
637 return true;
638 }
639
640 /* Given two null-terminated lists of strings, return true if any string is in
641 * both.
642 */
643 static bool controller_lists_intersect(char **l1, char **l2)
644 {
645 int i;
646
647 if (!l1 || !l2)
648 return false;
649
650 for (i = 0; l1[i]; i++) {
651 if (string_in_list(l2, l1[i]))
652 return true;
653 }
654
655 return false;
656 }
657
658 /* For a null-terminated list of controllers @clist, return true if any of those
659 * controllers is already listed the null-terminated list of hierarchies @hlist.
660 * Realistically, if one is present, all must be present.
661 */
662 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
663 {
664 int i;
665
666 if (!hlist)
667 return false;
668
669 for (i = 0; hlist[i]; i++)
670 if (controller_lists_intersect(hlist[i]->controllers, clist))
671 return true;
672
673 return false;
674 }
675
676 /* Return true if the controller @entry is found in the null-terminated list of
677 * hierarchies @hlist.
678 */
679 static bool controller_found(struct hierarchy **hlist, char *entry)
680 {
681 int i;
682
683 if (!hlist)
684 return false;
685
686 for (i = 0; hlist[i]; i++)
687 if (string_in_list(hlist[i]->controllers, entry))
688 return true;
689
690 return false;
691 }
692
693 /* Return true if all of the controllers which we require have been found. The
694 * required list is freezer and anything in lxc.cgroup.use.
695 */
696 static bool all_controllers_found(struct cgroup_ops *ops)
697 {
698 char **cur;
699 struct hierarchy **hlist = ops->hierarchies;
700
701 if (!controller_found(hlist, "freezer")) {
702 ERROR("No freezer controller mountpoint found");
703 return false;
704 }
705
706 if (!ops->cgroup_use)
707 return true;
708
709 for (cur = ops->cgroup_use; cur && *cur; cur++)
710 if (!controller_found(hlist, *cur)) {
711 ERROR("No %s controller mountpoint found", *cur);
712 return false;
713 }
714
715 return true;
716 }
717
718 /* Get the controllers from a mountinfo line There are other ways we could get
719 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
720 * could parse the mount options. But we simply assume that the mountpoint must
721 * be /sys/fs/cgroup/controller-list
722 */
723 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
724 int type)
725 {
726 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
727 * for legacy hierarchies.
728 */
729 int i;
730 char *dup, *p2, *tok;
731 char *p = line, *sep = ",";
732 char **aret = NULL;
733
734 for (i = 0; i < 4; i++) {
735 p = strchr(p, ' ');
736 if (!p)
737 return NULL;
738 p++;
739 }
740
741 /* Note, if we change how mountinfo works, then our caller will need to
742 * verify /sys/fs/cgroup/ in this field.
743 */
744 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
745 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
746 return NULL;
747 }
748
749 p += 15;
750 p2 = strchr(p, ' ');
751 if (!p2) {
752 ERROR("Corrupt mountinfo");
753 return NULL;
754 }
755 *p2 = '\0';
756
757 if (type == CGROUP_SUPER_MAGIC) {
758 /* strdup() here for v1 hierarchies. Otherwise
759 * lxc_iterate_parts() will destroy mountpoints such as
760 * "/sys/fs/cgroup/cpu,cpuacct".
761 */
762 dup = strdup(p);
763 if (!dup)
764 return NULL;
765
766 lxc_iterate_parts(tok, dup, sep) {
767 must_append_controller(klist, nlist, &aret, tok);
768 }
769
770 free(dup);
771 }
772 *p2 = ' ';
773
774 return aret;
775 }
776
777 static char **cg_unified_make_empty_controller(void)
778 {
779 int newentry;
780 char **aret = NULL;
781
782 newentry = append_null_to_list((void ***)&aret);
783 aret[newentry] = NULL;
784 return aret;
785 }
786
787 static char **cg_unified_get_controllers(const char *file)
788 {
789 char *buf, *tok;
790 char *sep = " \t\n";
791 char **aret = NULL;
792
793 buf = read_file(file);
794 if (!buf)
795 return NULL;
796
797 lxc_iterate_parts(tok, buf, sep) {
798 int newentry;
799 char *copy;
800
801 newentry = append_null_to_list((void ***)&aret);
802 copy = must_copy_string(tok);
803 aret[newentry] = copy;
804 }
805
806 free(buf);
807 return aret;
808 }
809
810 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
811 char *base_cgroup, int type)
812 {
813 struct hierarchy *new;
814 int newentry;
815
816 new = must_alloc(sizeof(*new));
817 new->controllers = clist;
818 new->mountpoint = mountpoint;
819 new->base_cgroup = base_cgroup;
820 new->fullcgpath = NULL;
821 new->version = type;
822
823 newentry = append_null_to_list((void ***)h);
824 (*h)[newentry] = new;
825 return new;
826 }
827
828 /* Get a copy of the mountpoint from @line, which is a line from
829 * /proc/self/mountinfo.
830 */
831 static char *cg_hybrid_get_mountpoint(char *line)
832 {
833 int i;
834 size_t len;
835 char *p2;
836 char *p = line, *sret = NULL;
837
838 for (i = 0; i < 4; i++) {
839 p = strchr(p, ' ');
840 if (!p)
841 return NULL;
842 p++;
843 }
844
845 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
846 return NULL;
847
848 p2 = strchr(p + 15, ' ');
849 if (!p2)
850 return NULL;
851 *p2 = '\0';
852
853 len = strlen(p);
854 sret = must_alloc(len + 1);
855 memcpy(sret, p, len);
856 sret[len] = '\0';
857 return sret;
858 }
859
860 /* Given a multi-line string, return a null-terminated copy of the current line. */
861 static char *copy_to_eol(char *p)
862 {
863 char *p2 = strchr(p, '\n'), *sret;
864 size_t len;
865
866 if (!p2)
867 return NULL;
868
869 len = p2 - p;
870 sret = must_alloc(len + 1);
871 memcpy(sret, p, len);
872 sret[len] = '\0';
873 return sret;
874 }
875
876 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
877 * /proc/self/cgroup file. Check whether controller c is present.
878 */
879 static bool controller_in_clist(char *cgline, char *c)
880 {
881 char *tok, *eol, *tmp;
882 size_t len;
883
884 eol = strchr(cgline, ':');
885 if (!eol)
886 return false;
887
888 len = eol - cgline;
889 tmp = alloca(len + 1);
890 memcpy(tmp, cgline, len);
891 tmp[len] = '\0';
892
893 lxc_iterate_parts(tok, tmp, ",") {
894 if (strcmp(tok, c) == 0)
895 return true;
896 }
897
898 return false;
899 }
900
901 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
902 * @controller.
903 */
904 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
905 int type)
906 {
907 char *p = basecginfo;
908
909 for (;;) {
910 bool is_cgv2_base_cgroup = false;
911
912 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
913 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
914 is_cgv2_base_cgroup = true;
915
916 p = strchr(p, ':');
917 if (!p)
918 return NULL;
919 p++;
920
921 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
922 p = strchr(p, ':');
923 if (!p)
924 return NULL;
925 p++;
926 return copy_to_eol(p);
927 }
928
929 p = strchr(p, '\n');
930 if (!p)
931 return NULL;
932 p++;
933 }
934 }
935
936 static void must_append_string(char ***list, char *entry)
937 {
938 int newentry;
939 char *copy;
940
941 newentry = append_null_to_list((void ***)list);
942 copy = must_copy_string(entry);
943 (*list)[newentry] = copy;
944 }
945
946 static int get_existing_subsystems(char ***klist, char ***nlist)
947 {
948 FILE *f;
949 char *line = NULL;
950 size_t len = 0;
951
952 f = fopen("/proc/self/cgroup", "r");
953 if (!f)
954 return -1;
955
956 while (getline(&line, &len, f) != -1) {
957 char *p, *p2, *tok;
958 p = strchr(line, ':');
959 if (!p)
960 continue;
961 p++;
962 p2 = strchr(p, ':');
963 if (!p2)
964 continue;
965 *p2 = '\0';
966
967 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
968 * contains an entry of the form:
969 *
970 * 0::/some/path
971 *
972 * In this case we use "cgroup2" as controller name.
973 */
974 if ((p2 - p) == 0) {
975 must_append_string(klist, "cgroup2");
976 continue;
977 }
978
979 lxc_iterate_parts(tok, p, ",") {
980 if (strncmp(tok, "name=", 5) == 0)
981 must_append_string(nlist, tok);
982 else
983 must_append_string(klist, tok);
984 }
985 }
986
987 free(line);
988 fclose(f);
989 return 0;
990 }
991
992 static void trim(char *s)
993 {
994 size_t len;
995
996 len = strlen(s);
997 while ((len > 1) && (s[len - 1] == '\n'))
998 s[--len] = '\0';
999 }
1000
1001 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
1002 {
1003 int i;
1004 struct hierarchy **it;
1005
1006 if (!ops->hierarchies) {
1007 TRACE(" No hierarchies found");
1008 return;
1009 }
1010
1011 TRACE(" Hierarchies:");
1012 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
1013 int j;
1014 char **cit;
1015
1016 TRACE(" %d: base_cgroup: %s", i, (*it)->base_cgroup ? (*it)->base_cgroup : "(null)");
1017 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
1018 TRACE(" controllers:");
1019 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
1020 TRACE(" %d: %s", j, *cit);
1021 }
1022 }
1023
1024 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
1025 char **nlist)
1026 {
1027 int k;
1028 char **it;
1029
1030 TRACE("basecginfo is:");
1031 TRACE("%s", basecginfo);
1032
1033 for (k = 0, it = klist; it && *it; it++, k++)
1034 TRACE("kernel subsystem %d: %s", k, *it);
1035
1036 for (k = 0, it = nlist; it && *it; it++, k++)
1037 TRACE("named subsystem %d: %s", k, *it);
1038 }
1039
1040 static int cgroup_rmdir(struct hierarchy **hierarchies,
1041 const char *container_cgroup)
1042 {
1043 int i;
1044
1045 if (!container_cgroup || !hierarchies)
1046 return 0;
1047
1048 for (i = 0; hierarchies[i]; i++) {
1049 int ret;
1050 struct hierarchy *h = hierarchies[i];
1051
1052 if (!h->fullcgpath)
1053 continue;
1054
1055 ret = recursive_destroy(h->fullcgpath);
1056 if (ret < 0)
1057 WARN("Failed to destroy \"%s\"", h->fullcgpath);
1058
1059 free(h->fullcgpath);
1060 h->fullcgpath = NULL;
1061 }
1062
1063 return 0;
1064 }
1065
1066 struct generic_userns_exec_data {
1067 struct hierarchy **hierarchies;
1068 const char *container_cgroup;
1069 struct lxc_conf *conf;
1070 uid_t origuid; /* target uid in parent namespace */
1071 char *path;
1072 };
1073
1074 static int cgroup_rmdir_wrapper(void *data)
1075 {
1076 int ret;
1077 struct generic_userns_exec_data *arg = data;
1078 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1079 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1080
1081 ret = setresgid(nsgid, nsgid, nsgid);
1082 if (ret < 0) {
1083 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1084 (int)nsgid, (int)nsgid);
1085 return -1;
1086 }
1087
1088 ret = setresuid(nsuid, nsuid, nsuid);
1089 if (ret < 0) {
1090 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1091 (int)nsuid, (int)nsuid);
1092 return -1;
1093 }
1094
1095 ret = setgroups(0, NULL);
1096 if (ret < 0 && errno != EPERM) {
1097 SYSERROR("Failed to setgroups(0, NULL)");
1098 return -1;
1099 }
1100
1101 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1102 }
1103
1104 static void cgfsng_destroy(struct cgroup_ops *ops, struct lxc_handler *handler)
1105 {
1106 int ret;
1107 struct generic_userns_exec_data wrap;
1108
1109 wrap.origuid = 0;
1110 wrap.container_cgroup = ops->container_cgroup;
1111 wrap.hierarchies = ops->hierarchies;
1112 wrap.conf = handler->conf;
1113
1114 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1115 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1116 "cgroup_rmdir_wrapper");
1117 else
1118 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1119 if (ret < 0) {
1120 WARN("Failed to destroy cgroups");
1121 return;
1122 }
1123 }
1124
1125 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1126 {
1127 size_t i, parts_len;
1128 char **it;
1129 size_t full_len = 0;
1130 char *add_controllers = NULL, *cgroup = NULL;
1131 char **parts = NULL;
1132 bool bret = false;
1133
1134 if (h->version != CGROUP2_SUPER_MAGIC)
1135 return true;
1136
1137 if (!h->controllers)
1138 return true;
1139
1140 /* For now we simply enable all controllers that we have detected by
1141 * creating a string like "+memory +pids +cpu +io".
1142 * TODO: In the near future we might want to support "-<controller>"
1143 * etc. but whether supporting semantics like this make sense will need
1144 * some thinking.
1145 */
1146 for (it = h->controllers; it && *it; it++) {
1147 full_len += strlen(*it) + 2;
1148 add_controllers = must_realloc(add_controllers, full_len + 1);
1149
1150 if (h->controllers[0] == *it)
1151 add_controllers[0] = '\0';
1152
1153 (void)strlcat(add_controllers, "+", full_len + 1);
1154 (void)strlcat(add_controllers, *it, full_len + 1);
1155
1156 if ((it + 1) && *(it + 1))
1157 (void)strlcat(add_controllers, " ", full_len + 1);
1158 }
1159
1160 parts = lxc_string_split(cgname, '/');
1161 if (!parts)
1162 goto on_error;
1163
1164 parts_len = lxc_array_len((void **)parts);
1165 if (parts_len > 0)
1166 parts_len--;
1167
1168 cgroup = must_make_path(h->mountpoint, h->base_cgroup, NULL);
1169 for (i = 0; i < parts_len; i++) {
1170 int ret;
1171 char *target;
1172
1173 cgroup = must_append_path(cgroup, parts[i], NULL);
1174 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1175 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1176 free(target);
1177 if (ret < 0) {
1178 SYSERROR("Could not enable \"%s\" controllers in the "
1179 "unified cgroup \"%s\"", add_controllers, cgroup);
1180 goto on_error;
1181 }
1182 }
1183
1184 bret = true;
1185
1186 on_error:
1187 lxc_free_array((void **)parts, free);
1188 free(add_controllers);
1189 free(cgroup);
1190 return bret;
1191 }
1192
1193 static bool create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1194 {
1195 int ret;
1196
1197 h->fullcgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
1198 if (dir_exists(h->fullcgpath)) {
1199 ERROR("The cgroup \"%s\" already existed", h->fullcgpath);
1200 return false;
1201 }
1202
1203 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1204 ERROR("Failed to handle legacy cpuset controller");
1205 return false;
1206 }
1207
1208 ret = mkdir_p(h->fullcgpath, 0755);
1209 if (ret < 0) {
1210 ERROR("Failed to create cgroup \"%s\"", h->fullcgpath);
1211 return false;
1212 }
1213
1214 return cg_unified_create_cgroup(h, cgname);
1215 }
1216
1217 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname)
1218 {
1219 int ret;
1220
1221 ret = rmdir(h->fullcgpath);
1222 if (ret < 0)
1223 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", h->fullcgpath);
1224
1225 free(h->fullcgpath);
1226 h->fullcgpath = NULL;
1227 }
1228
1229 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1230 * next cgroup_pattern-1, -2, ..., -999.
1231 */
1232 static inline bool cgfsng_create(struct cgroup_ops *ops,
1233 struct lxc_handler *handler)
1234 {
1235 int i;
1236 size_t len;
1237 char *container_cgroup, *offset, *tmp;
1238 int idx = 0;
1239 struct lxc_conf *conf = handler->conf;
1240
1241 if (ops->container_cgroup) {
1242 WARN("cgfsng_create called a second time: %s", ops->container_cgroup);
1243 return false;
1244 }
1245
1246 if (!conf)
1247 return false;
1248
1249 if (conf->cgroup_meta.dir)
1250 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1251 else
1252 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1253 if (!tmp) {
1254 ERROR("Failed expanding cgroup name pattern");
1255 return false;
1256 }
1257
1258 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1259 container_cgroup = must_alloc(len);
1260 (void)strlcpy(container_cgroup, tmp, len);
1261 free(tmp);
1262 offset = container_cgroup + len - 5;
1263
1264 again:
1265 if (idx == 1000) {
1266 ERROR("Too many conflicting cgroup names");
1267 goto out_free;
1268 }
1269
1270 if (idx) {
1271 int ret;
1272
1273 ret = snprintf(offset, 5, "-%d", idx);
1274 if (ret < 0 || (size_t)ret >= 5) {
1275 FILE *f = fopen("/dev/null", "w");
1276 if (f) {
1277 fprintf(f, "Workaround for GCC7 bug: "
1278 "https://gcc.gnu.org/bugzilla/"
1279 "show_bug.cgi?id=78969");
1280 fclose(f);
1281 }
1282 }
1283 }
1284
1285 for (i = 0; ops->hierarchies[i]; i++) {
1286 if (!create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1287 int j;
1288 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->fullcgpath);
1289 free(ops->hierarchies[i]->fullcgpath);
1290 ops->hierarchies[i]->fullcgpath = NULL;
1291 for (j = 0; j < i; j++)
1292 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup);
1293 idx++;
1294 goto again;
1295 }
1296 }
1297
1298 ops->container_cgroup = container_cgroup;
1299
1300 return true;
1301
1302 out_free:
1303 free(container_cgroup);
1304
1305 return false;
1306 }
1307
1308 static bool cgfsng_enter(struct cgroup_ops *ops, pid_t pid)
1309 {
1310 int i, len;
1311 char pidstr[25];
1312
1313 len = snprintf(pidstr, 25, "%d", pid);
1314 if (len < 0 || len >= 25)
1315 return false;
1316
1317 for (i = 0; ops->hierarchies[i]; i++) {
1318 int ret;
1319 char *fullpath;
1320
1321 fullpath = must_make_path(ops->hierarchies[i]->fullcgpath,
1322 "cgroup.procs", NULL);
1323 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
1324 if (ret != 0) {
1325 SYSERROR("Failed to enter cgroup \"%s\"", fullpath);
1326 free(fullpath);
1327 return false;
1328 }
1329 free(fullpath);
1330 }
1331
1332 return true;
1333 }
1334
1335 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1336 mode_t chmod_mode)
1337 {
1338 int ret;
1339
1340 ret = chown(path, chown_uid, chown_gid);
1341 if (ret < 0) {
1342 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1343 return -1;
1344 }
1345
1346 ret = chmod(path, chmod_mode);
1347 if (ret < 0) {
1348 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1349 return -1;
1350 }
1351
1352 return 0;
1353 }
1354
1355 /* chgrp the container cgroups to container group. We leave
1356 * the container owner as cgroup owner. So we must make the
1357 * directories 775 so that the container can create sub-cgroups.
1358 *
1359 * Also chown the tasks and cgroup.procs files. Those may not
1360 * exist depending on kernel version.
1361 */
1362 static int chown_cgroup_wrapper(void *data)
1363 {
1364 int i, ret;
1365 uid_t destuid;
1366 struct generic_userns_exec_data *arg = data;
1367 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1368 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1369
1370 ret = setresgid(nsgid, nsgid, nsgid);
1371 if (ret < 0) {
1372 SYSERROR("Failed to setresgid(%d, %d, %d)",
1373 (int)nsgid, (int)nsgid, (int)nsgid);
1374 return -1;
1375 }
1376
1377 ret = setresuid(nsuid, nsuid, nsuid);
1378 if (ret < 0) {
1379 SYSERROR("Failed to setresuid(%d, %d, %d)",
1380 (int)nsuid, (int)nsuid, (int)nsuid);
1381 return -1;
1382 }
1383
1384 ret = setgroups(0, NULL);
1385 if (ret < 0 && errno != EPERM) {
1386 SYSERROR("Failed to setgroups(0, NULL)");
1387 return -1;
1388 }
1389
1390 destuid = get_ns_uid(arg->origuid);
1391
1392 for (i = 0; arg->hierarchies[i]; i++) {
1393 char *fullpath;
1394 char *path = arg->hierarchies[i]->fullcgpath;
1395
1396 ret = chowmod(path, destuid, nsgid, 0775);
1397 if (ret < 0)
1398 return -1;
1399
1400 /* Failures to chown() these are inconvenient but not
1401 * detrimental We leave these owned by the container launcher,
1402 * so that container root can write to the files to attach. We
1403 * chmod() them 664 so that container systemd can write to the
1404 * files (which systemd in wily insists on doing).
1405 */
1406
1407 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1408 fullpath = must_make_path(path, "tasks", NULL);
1409 (void)chowmod(fullpath, destuid, nsgid, 0664);
1410 free(fullpath);
1411 }
1412
1413 fullpath = must_make_path(path, "cgroup.procs", NULL);
1414 (void)chowmod(fullpath, destuid, nsgid, 0664);
1415 free(fullpath);
1416
1417 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1418 continue;
1419
1420 fullpath = must_make_path(path, "cgroup.subtree_control", NULL);
1421 (void)chowmod(fullpath, destuid, nsgid, 0664);
1422 free(fullpath);
1423
1424 fullpath = must_make_path(path, "cgroup.threads", NULL);
1425 (void)chowmod(fullpath, destuid, nsgid, 0664);
1426 free(fullpath);
1427 }
1428
1429 return 0;
1430 }
1431
1432 static bool cgfsng_chown(struct cgroup_ops *ops, struct lxc_conf *conf)
1433 {
1434 struct generic_userns_exec_data wrap;
1435
1436 if (lxc_list_empty(&conf->id_map))
1437 return true;
1438
1439 wrap.origuid = geteuid();
1440 wrap.path = NULL;
1441 wrap.hierarchies = ops->hierarchies;
1442 wrap.conf = conf;
1443
1444 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1445 "chown_cgroup_wrapper") < 0) {
1446 ERROR("Error requesting cgroup chown in new user namespace");
1447 return false;
1448 }
1449
1450 return true;
1451 }
1452
1453 /* cgroup-full:* is done, no need to create subdirs */
1454 static bool cg_mount_needs_subdirs(int type)
1455 {
1456 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1457 return false;
1458
1459 return true;
1460 }
1461
1462 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1463 * remount controller ro if needed and bindmount the cgroupfs onto
1464 * controll/the/cg/path.
1465 */
1466 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1467 char *controllerpath, char *cgpath,
1468 const char *container_cgroup)
1469 {
1470 int ret, remount_flags;
1471 char *sourcepath;
1472 int flags = MS_BIND;
1473
1474 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1475 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1476 if (ret < 0) {
1477 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1478 controllerpath, controllerpath);
1479 return -1;
1480 }
1481
1482 remount_flags = add_required_remount_flags(controllerpath,
1483 controllerpath,
1484 flags | MS_REMOUNT);
1485 ret = mount(controllerpath, controllerpath, "cgroup",
1486 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1487 NULL);
1488 if (ret < 0) {
1489 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1490 return -1;
1491 }
1492
1493 INFO("Remounted %s read-only", controllerpath);
1494 }
1495
1496 sourcepath = must_make_path(h->mountpoint, h->base_cgroup,
1497 container_cgroup, NULL);
1498 if (type == LXC_AUTO_CGROUP_RO)
1499 flags |= MS_RDONLY;
1500
1501 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1502 if (ret < 0) {
1503 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1504 free(sourcepath);
1505 return -1;
1506 }
1507 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1508
1509 if (flags & MS_RDONLY) {
1510 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1511 flags | MS_REMOUNT);
1512 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1513 if (ret < 0) {
1514 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1515 free(sourcepath);
1516 return -1;
1517 }
1518 INFO("Remounted %s read-only", cgpath);
1519 }
1520
1521 free(sourcepath);
1522 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1523 return 0;
1524 }
1525
1526 /* __cg_mount_direct
1527 *
1528 * Mount cgroup hierarchies directly without using bind-mounts. The main
1529 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1530 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1531 */
1532 static int __cg_mount_direct(int type, struct hierarchy *h,
1533 const char *controllerpath)
1534 {
1535 int ret;
1536 char *controllers = NULL;
1537 char *fstype = "cgroup2";
1538 unsigned long flags = 0;
1539
1540 flags |= MS_NOSUID;
1541 flags |= MS_NOEXEC;
1542 flags |= MS_NODEV;
1543 flags |= MS_RELATIME;
1544
1545 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1546 flags |= MS_RDONLY;
1547
1548 if (h->version != CGROUP2_SUPER_MAGIC) {
1549 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1550 if (!controllers)
1551 return -ENOMEM;
1552 fstype = "cgroup";
1553 }
1554
1555 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1556 free(controllers);
1557 if (ret < 0) {
1558 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1559 return -1;
1560 }
1561
1562 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1563 return 0;
1564 }
1565
1566 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1567 const char *controllerpath)
1568 {
1569 return __cg_mount_direct(type, h, controllerpath);
1570 }
1571
1572 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1573 const char *controllerpath)
1574 {
1575 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1576 return 0;
1577
1578 return __cg_mount_direct(type, h, controllerpath);
1579 }
1580
1581 static bool cgfsng_mount(struct cgroup_ops *ops, struct lxc_handler *handler,
1582 const char *root, int type)
1583 {
1584 int i, ret;
1585 char *tmpfspath = NULL;
1586 bool has_cgns = false, retval = false, wants_force_mount = false;
1587
1588 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1589 return true;
1590
1591 if (type & LXC_AUTO_CGROUP_FORCE) {
1592 type &= ~LXC_AUTO_CGROUP_FORCE;
1593 wants_force_mount = true;
1594 }
1595
1596 if (!wants_force_mount){
1597 if (!lxc_list_empty(&handler->conf->keepcaps))
1598 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1599 else
1600 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1601 }
1602
1603 has_cgns = cgns_supported();
1604 if (has_cgns && !wants_force_mount)
1605 return true;
1606
1607 if (type == LXC_AUTO_CGROUP_NOSPEC)
1608 type = LXC_AUTO_CGROUP_MIXED;
1609 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1610 type = LXC_AUTO_CGROUP_FULL_MIXED;
1611
1612 /* Mount tmpfs */
1613 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1614 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1615 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1616 "size=10240k,mode=755", root);
1617 if (ret < 0)
1618 goto on_error;
1619
1620 for (i = 0; ops->hierarchies[i]; i++) {
1621 char *controllerpath, *path2;
1622 struct hierarchy *h = ops->hierarchies[i];
1623 char *controller = strrchr(h->mountpoint, '/');
1624
1625 if (!controller)
1626 continue;
1627 controller++;
1628
1629 controllerpath = must_make_path(tmpfspath, controller, NULL);
1630 if (dir_exists(controllerpath)) {
1631 free(controllerpath);
1632 continue;
1633 }
1634
1635 ret = mkdir(controllerpath, 0755);
1636 if (ret < 0) {
1637 SYSERROR("Error creating cgroup path: %s", controllerpath);
1638 free(controllerpath);
1639 goto on_error;
1640 }
1641
1642 if (has_cgns && wants_force_mount) {
1643 /* If cgroup namespaces are supported but the container
1644 * will not have CAP_SYS_ADMIN after it has started we
1645 * need to mount the cgroups manually.
1646 */
1647 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1648 free(controllerpath);
1649 if (ret < 0)
1650 goto on_error;
1651
1652 continue;
1653 }
1654
1655 ret = cg_mount_cgroup_full(type, h, controllerpath);
1656 if (ret < 0) {
1657 free(controllerpath);
1658 goto on_error;
1659 }
1660
1661 if (!cg_mount_needs_subdirs(type)) {
1662 free(controllerpath);
1663 continue;
1664 }
1665
1666 path2 = must_make_path(controllerpath, h->base_cgroup,
1667 ops->container_cgroup, NULL);
1668 ret = mkdir_p(path2, 0755);
1669 if (ret < 0) {
1670 free(controllerpath);
1671 free(path2);
1672 goto on_error;
1673 }
1674
1675 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1676 path2, ops->container_cgroup);
1677 free(controllerpath);
1678 free(path2);
1679 if (ret < 0)
1680 goto on_error;
1681 }
1682 retval = true;
1683
1684 on_error:
1685 free(tmpfspath);
1686 return retval;
1687 }
1688
1689 static int recursive_count_nrtasks(char *dirname)
1690 {
1691 struct dirent *direntp;
1692 DIR *dir;
1693 int count = 0, ret;
1694 char *path;
1695
1696 dir = opendir(dirname);
1697 if (!dir)
1698 return 0;
1699
1700 while ((direntp = readdir(dir))) {
1701 struct stat mystat;
1702
1703 if (!strcmp(direntp->d_name, ".") ||
1704 !strcmp(direntp->d_name, ".."))
1705 continue;
1706
1707 path = must_make_path(dirname, direntp->d_name, NULL);
1708
1709 if (lstat(path, &mystat))
1710 goto next;
1711
1712 if (!S_ISDIR(mystat.st_mode))
1713 goto next;
1714
1715 count += recursive_count_nrtasks(path);
1716 next:
1717 free(path);
1718 }
1719
1720 path = must_make_path(dirname, "cgroup.procs", NULL);
1721 ret = lxc_count_file_lines(path);
1722 if (ret != -1)
1723 count += ret;
1724 free(path);
1725
1726 (void)closedir(dir);
1727
1728 return count;
1729 }
1730
1731 static int cgfsng_nrtasks(struct cgroup_ops *ops)
1732 {
1733 int count;
1734 char *path;
1735
1736 if (!ops->container_cgroup || !ops->hierarchies)
1737 return -1;
1738
1739 path = must_make_path(ops->hierarchies[0]->fullcgpath, NULL);
1740 count = recursive_count_nrtasks(path);
1741 free(path);
1742 return count;
1743 }
1744
1745 /* Only root needs to escape to the cgroup of its init. */
1746 static bool cgfsng_escape(const struct cgroup_ops *ops, struct lxc_conf *conf)
1747 {
1748 int i;
1749
1750 if (conf->cgroup_meta.keep || geteuid())
1751 return true;
1752
1753 for (i = 0; ops->hierarchies[i]; i++) {
1754 int ret;
1755 char *fullpath;
1756
1757 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1758 ops->hierarchies[i]->base_cgroup,
1759 "cgroup.procs", NULL);
1760 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1761 if (ret != 0) {
1762 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1763 free(fullpath);
1764 return false;
1765 }
1766 free(fullpath);
1767 }
1768
1769 return true;
1770 }
1771
1772 static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1773 {
1774 int i;
1775
1776 for (i = 0; ops->hierarchies[i]; i++)
1777 ;
1778
1779 return i;
1780 }
1781
1782 static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
1783 {
1784 int i;
1785
1786 /* sanity check n */
1787 for (i = 0; i < n; i++)
1788 if (!ops->hierarchies[i])
1789 return false;
1790
1791 *out = ops->hierarchies[i]->controllers;
1792
1793 return true;
1794 }
1795
1796 #define THAWED "THAWED"
1797 #define THAWED_LEN (strlen(THAWED))
1798
1799 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1800 * to be adapted.
1801 */
1802 static bool cgfsng_unfreeze(struct cgroup_ops *ops)
1803 {
1804 int ret;
1805 char *fullpath;
1806 struct hierarchy *h;
1807
1808 h = get_hierarchy(ops, "freezer");
1809 if (!h)
1810 return false;
1811
1812 fullpath = must_make_path(h->fullcgpath, "freezer.state", NULL);
1813 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
1814 free(fullpath);
1815 if (ret < 0)
1816 return false;
1817
1818 return true;
1819 }
1820
1821 static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
1822 const char *controller)
1823 {
1824 struct hierarchy *h;
1825
1826 h = get_hierarchy(ops, controller);
1827 if (!h) {
1828 WARN("Failed to find hierarchy for controller \"%s\"",
1829 controller ? controller : "(null)");
1830 return NULL;
1831 }
1832
1833 return h->fullcgpath ? h->fullcgpath + strlen(h->mountpoint) : NULL;
1834 }
1835
1836 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
1837 * which must be freed by the caller.
1838 */
1839 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
1840 const char *inpath,
1841 const char *filename)
1842 {
1843 return must_make_path(h->mountpoint, inpath, filename, NULL);
1844 }
1845
1846 /* Technically, we're always at a delegation boundary here (This is especially
1847 * true when cgroup namespaces are available.). The reasoning is that in order
1848 * for us to have been able to start a container in the first place the root
1849 * cgroup must have been a leaf node. Now, either the container's init system
1850 * has populated the cgroup and kept it as a leaf node or it has created
1851 * subtrees. In the former case we will simply attach to the leaf node we
1852 * created when we started the container in the latter case we create our own
1853 * cgroup for the attaching process.
1854 */
1855 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
1856 const char *lxcpath, const char *pidstr,
1857 size_t pidstr_len, const char *controller)
1858 {
1859 int ret;
1860 size_t len;
1861 int fret = -1, idx = 0;
1862 char *base_path = NULL, *container_cgroup = NULL, *full_path = NULL;
1863
1864 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
1865 /* not running */
1866 if (!container_cgroup)
1867 return 0;
1868
1869 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
1870 full_path = must_make_path(base_path, "cgroup.procs", NULL);
1871 /* cgroup is populated */
1872 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
1873 if (ret < 0 && errno != EBUSY)
1874 goto on_error;
1875
1876 if (ret == 0)
1877 goto on_success;
1878
1879 free(full_path);
1880
1881 len = strlen(base_path) + sizeof("/lxc-1000") - 1 +
1882 sizeof("/cgroup-procs") - 1;
1883 full_path = must_alloc(len + 1);
1884 do {
1885 if (idx)
1886 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
1887 base_path, idx);
1888 else
1889 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
1890 if (ret < 0 || (size_t)ret >= len + 1)
1891 goto on_error;
1892
1893 ret = mkdir_p(full_path, 0755);
1894 if (ret < 0 && errno != EEXIST)
1895 goto on_error;
1896
1897 (void)strlcat(full_path, "/cgroup.procs", len + 1);
1898 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
1899 if (ret == 0)
1900 goto on_success;
1901
1902 /* this is a non-leaf node */
1903 if (errno != EBUSY)
1904 goto on_error;
1905
1906 } while (++idx > 0 && idx < 1000);
1907
1908 on_success:
1909 if (idx < 1000)
1910 fret = 0;
1911
1912 on_error:
1913 free(base_path);
1914 free(container_cgroup);
1915 free(full_path);
1916
1917 return fret;
1918 }
1919
1920 static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
1921 const char *lxcpath, pid_t pid)
1922 {
1923 int i, len, ret;
1924 char pidstr[25];
1925
1926 len = snprintf(pidstr, 25, "%d", pid);
1927 if (len < 0 || len >= 25)
1928 return false;
1929
1930 for (i = 0; ops->hierarchies[i]; i++) {
1931 char *path;
1932 char *fullpath = NULL;
1933 struct hierarchy *h = ops->hierarchies[i];
1934
1935 if (h->version == CGROUP2_SUPER_MAGIC) {
1936 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
1937 h->controllers[0]);
1938 if (ret < 0)
1939 return false;
1940
1941 continue;
1942 }
1943
1944 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
1945 /* not running */
1946 if (!path)
1947 continue;
1948
1949 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
1950 free(path);
1951 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
1952 if (ret < 0) {
1953 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
1954 free(fullpath);
1955 return false;
1956 }
1957 free(fullpath);
1958 }
1959
1960 return true;
1961 }
1962
1963 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
1964 * don't have a cgroup_data set up, so we ask the running container through the
1965 * commands API for the cgroup path.
1966 */
1967 static int cgfsng_get(struct cgroup_ops *ops, const char *filename, char *value,
1968 size_t len, const char *name, const char *lxcpath)
1969 {
1970 int ret = -1;
1971 size_t controller_len;
1972 char *controller, *p, *path;
1973 struct hierarchy *h;
1974
1975 controller_len = strlen(filename);
1976 controller = alloca(controller_len + 1);
1977 (void)strlcpy(controller, filename, controller_len + 1);
1978
1979 p = strchr(controller, '.');
1980 if (p)
1981 *p = '\0';
1982
1983 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
1984 /* not running */
1985 if (!path)
1986 return -1;
1987
1988 h = get_hierarchy(ops, controller);
1989 if (h) {
1990 char *fullpath;
1991
1992 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
1993 ret = lxc_read_from_file(fullpath, value, len);
1994 free(fullpath);
1995 }
1996 free(path);
1997
1998 return ret;
1999 }
2000
2001 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2002 * don't have a cgroup_data set up, so we ask the running container through the
2003 * commands API for the cgroup path.
2004 */
2005 static int cgfsng_set(struct cgroup_ops *ops, const char *filename,
2006 const char *value, const char *name, const char *lxcpath)
2007 {
2008 int ret = -1;
2009 size_t controller_len;
2010 char *controller, *p, *path;
2011 struct hierarchy *h;
2012
2013 controller_len = strlen(filename);
2014 controller = alloca(controller_len + 1);
2015 (void)strlcpy(controller, filename, controller_len + 1);
2016
2017 p = strchr(controller, '.');
2018 if (p)
2019 *p = '\0';
2020
2021 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2022 /* not running */
2023 if (!path)
2024 return -1;
2025
2026 h = get_hierarchy(ops, controller);
2027 if (h) {
2028 char *fullpath;
2029
2030 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2031 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2032 free(fullpath);
2033 }
2034 free(path);
2035
2036 return ret;
2037 }
2038
2039 /* take devices cgroup line
2040 * /dev/foo rwx
2041 * and convert it to a valid
2042 * type major:minor mode
2043 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2044 * the output.
2045 */
2046 static int convert_devpath(const char *invalue, char *dest)
2047 {
2048 int n_parts;
2049 char *p, *path, type;
2050 unsigned long minor, major;
2051 struct stat sb;
2052 int ret = -EINVAL;
2053 char *mode = NULL;
2054
2055 path = must_copy_string(invalue);
2056
2057 /* Read path followed by mode. Ignore any trailing text.
2058 * A ' # comment' would be legal. Technically other text is not
2059 * legal, we could check for that if we cared to.
2060 */
2061 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2062 if (*p != ' ')
2063 continue;
2064 *p = '\0';
2065
2066 if (n_parts != 1)
2067 break;
2068 p++;
2069 n_parts++;
2070
2071 while (*p == ' ')
2072 p++;
2073
2074 mode = p;
2075
2076 if (*p == '\0')
2077 goto out;
2078 }
2079
2080 if (n_parts == 1)
2081 goto out;
2082
2083 ret = stat(path, &sb);
2084 if (ret < 0)
2085 goto out;
2086
2087 mode_t m = sb.st_mode & S_IFMT;
2088 switch (m) {
2089 case S_IFBLK:
2090 type = 'b';
2091 break;
2092 case S_IFCHR:
2093 type = 'c';
2094 break;
2095 default:
2096 ERROR("Unsupported device type %i for \"%s\"", m, path);
2097 ret = -EINVAL;
2098 goto out;
2099 }
2100
2101 major = MAJOR(sb.st_rdev);
2102 minor = MINOR(sb.st_rdev);
2103 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2104 if (ret < 0 || ret >= 50) {
2105 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2106 "chars)", type, major, minor, mode);
2107 ret = -ENAMETOOLONG;
2108 goto out;
2109 }
2110 ret = 0;
2111
2112 out:
2113 free(path);
2114 return ret;
2115 }
2116
2117 /* Called from setup_limits - here we have the container's cgroup_data because
2118 * we created the cgroups.
2119 */
2120 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2121 const char *value)
2122 {
2123 size_t len;
2124 char *fullpath, *p;
2125 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2126 char converted_value[50];
2127 struct hierarchy *h;
2128 int ret = 0;
2129 char *controller = NULL;
2130
2131 len = strlen(filename);
2132 controller = alloca(len + 1);
2133 (void)strlcpy(controller, filename, len + 1);
2134
2135 p = strchr(controller, '.');
2136 if (p)
2137 *p = '\0';
2138
2139 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2140 ret = convert_devpath(value, converted_value);
2141 if (ret < 0)
2142 return ret;
2143 value = converted_value;
2144 }
2145
2146 h = get_hierarchy(ops, controller);
2147 if (!h) {
2148 ERROR("Failed to setup limits for the \"%s\" controller. "
2149 "The controller seems to be unused by \"cgfsng\" cgroup "
2150 "driver or not enabled on the cgroup hierarchy",
2151 controller);
2152 errno = ENOENT;
2153 return -ENOENT;
2154 }
2155
2156 fullpath = must_make_path(h->fullcgpath, filename, NULL);
2157 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2158 free(fullpath);
2159 return ret;
2160 }
2161
2162 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2163 struct lxc_list *cgroup_settings,
2164 bool do_devices)
2165 {
2166 struct lxc_list *iterator, *next, *sorted_cgroup_settings;
2167 struct lxc_cgroup *cg;
2168 bool ret = false;
2169
2170 if (lxc_list_empty(cgroup_settings))
2171 return true;
2172
2173 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2174 if (!sorted_cgroup_settings)
2175 return false;
2176
2177 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2178 cg = iterator->elem;
2179
2180 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2181 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2182 if (do_devices && (errno == EACCES || errno == EPERM)) {
2183 WARN("Failed to set \"%s\" to \"%s\"",
2184 cg->subsystem, cg->value);
2185 continue;
2186 }
2187 WARN("Failed to set \"%s\" to \"%s\"",
2188 cg->subsystem, cg->value);
2189 goto out;
2190 }
2191 DEBUG("Set controller \"%s\" set to \"%s\"",
2192 cg->subsystem, cg->value);
2193 }
2194 }
2195
2196 ret = true;
2197 INFO("Limits for the legacy cgroup hierarchies have been setup");
2198 out:
2199 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2200 lxc_list_del(iterator);
2201 free(iterator);
2202 }
2203 free(sorted_cgroup_settings);
2204 return ret;
2205 }
2206
2207 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2208 struct lxc_list *cgroup_settings)
2209 {
2210 struct lxc_list *iterator;
2211 struct hierarchy *h = ops->unified;
2212
2213 if (lxc_list_empty(cgroup_settings))
2214 return true;
2215
2216 if (!h)
2217 return false;
2218
2219 lxc_list_for_each(iterator, cgroup_settings) {
2220 int ret;
2221 char *fullpath;
2222 struct lxc_cgroup *cg = iterator->elem;
2223
2224 fullpath = must_make_path(h->fullcgpath, cg->subsystem, NULL);
2225 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2226 free(fullpath);
2227 if (ret < 0) {
2228 SYSERROR("Failed to set \"%s\" to \"%s\"",
2229 cg->subsystem, cg->value);
2230 return false;
2231 }
2232 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2233 }
2234
2235 INFO("Limits for the unified cgroup hierarchy have been setup");
2236 return true;
2237 }
2238
2239 static bool cgfsng_setup_limits(struct cgroup_ops *ops, struct lxc_conf *conf,
2240 bool do_devices)
2241 {
2242 bool bret;
2243
2244 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2245 if (!bret)
2246 return false;
2247
2248 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2249 }
2250
2251 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2252 char **controllers)
2253 {
2254 char **cur_ctrl, **cur_use;
2255
2256 if (!ops->cgroup_use)
2257 return true;
2258
2259 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2260 bool found = false;
2261
2262 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2263 if (strcmp(*cur_use, *cur_ctrl) != 0)
2264 continue;
2265
2266 found = true;
2267 break;
2268 }
2269
2270 if (found)
2271 continue;
2272
2273 return false;
2274 }
2275
2276 return true;
2277 }
2278
2279 /* At startup, parse_hierarchies finds all the info we need about cgroup
2280 * mountpoints and current cgroups, and stores it in @d.
2281 */
2282 static bool cg_hybrid_init(struct cgroup_ops *ops, bool keep)
2283 {
2284 int ret;
2285 char *basecginfo;
2286 FILE *f;
2287 size_t len = 0;
2288 char *line = NULL;
2289 char **klist = NULL, **nlist = NULL;
2290
2291 /* Root spawned containers escape the current cgroup, so use init's
2292 * cgroups as our base in that case.
2293 */
2294 if (!keep && (geteuid() == 0))
2295 basecginfo = read_file("/proc/1/cgroup");
2296 else
2297 basecginfo = read_file("/proc/self/cgroup");
2298 if (!basecginfo)
2299 return false;
2300
2301 ret = get_existing_subsystems(&klist, &nlist);
2302 if (ret < 0) {
2303 ERROR("Failed to retrieve available legacy cgroup controllers");
2304 free(basecginfo);
2305 return false;
2306 }
2307
2308 f = fopen("/proc/self/mountinfo", "r");
2309 if (!f) {
2310 ERROR("Failed to open \"/proc/self/mountinfo\"");
2311 free(basecginfo);
2312 return false;
2313 }
2314
2315 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2316
2317 while (getline(&line, &len, f) != -1) {
2318 int type;
2319 bool writeable;
2320 struct hierarchy *new;
2321 char *base_cgroup = NULL, *mountpoint = NULL;
2322 char **controller_list = NULL;
2323
2324 type = get_cgroup_version(line);
2325 if (type == 0)
2326 continue;
2327
2328 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2329 continue;
2330
2331 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2332 if (type == CGROUP2_SUPER_MAGIC)
2333 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2334 else if (type == CGROUP_SUPER_MAGIC)
2335 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2336 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2337 if (type == CGROUP_SUPER_MAGIC)
2338 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2339 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2340 if (type == CGROUP2_SUPER_MAGIC)
2341 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2342 }
2343
2344 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2345 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2346 continue;
2347
2348 if (type == CGROUP_SUPER_MAGIC)
2349 if (controller_list_is_dup(ops->hierarchies, controller_list))
2350 goto next;
2351
2352 mountpoint = cg_hybrid_get_mountpoint(line);
2353 if (!mountpoint) {
2354 ERROR("Failed parsing mountpoint from \"%s\"", line);
2355 goto next;
2356 }
2357
2358 if (type == CGROUP_SUPER_MAGIC)
2359 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2360 else
2361 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2362 if (!base_cgroup) {
2363 ERROR("Failed to find current cgroup");
2364 goto next;
2365 }
2366
2367 trim(base_cgroup);
2368 prune_init_scope(base_cgroup);
2369 if (type == CGROUP2_SUPER_MAGIC)
2370 writeable = test_writeable_v2(mountpoint, base_cgroup);
2371 else
2372 writeable = test_writeable_v1(mountpoint, base_cgroup);
2373 if (!writeable)
2374 goto next;
2375
2376 if (type == CGROUP2_SUPER_MAGIC) {
2377 char *cgv2_ctrl_path;
2378
2379 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2380 "cgroup.controllers",
2381 NULL);
2382
2383 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2384 free(cgv2_ctrl_path);
2385 if (!controller_list) {
2386 controller_list = cg_unified_make_empty_controller();
2387 TRACE("No controllers are enabled for "
2388 "delegation in the unified hierarchy");
2389 }
2390 }
2391
2392 /* Exclude all controllers that cgroup use does not want. */
2393 if (!cgroup_use_wants_controllers(ops, controller_list))
2394 goto next;
2395
2396 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2397 if (type == CGROUP2_SUPER_MAGIC && !ops->unified)
2398 ops->unified = new;
2399
2400 continue;
2401
2402 next:
2403 free_string_list(controller_list);
2404 free(mountpoint);
2405 free(base_cgroup);
2406 }
2407
2408 free_string_list(klist);
2409 free_string_list(nlist);
2410
2411 free(basecginfo);
2412
2413 fclose(f);
2414 free(line);
2415
2416 TRACE("Writable cgroup hierarchies:");
2417 lxc_cgfsng_print_hierarchies(ops);
2418
2419 /* verify that all controllers in cgroup.use and all crucial
2420 * controllers are accounted for
2421 */
2422 if (!all_controllers_found(ops))
2423 return false;
2424
2425 return true;
2426 }
2427
2428 static int cg_is_pure_unified(void)
2429 {
2430
2431 int ret;
2432 struct statfs fs;
2433
2434 ret = statfs("/sys/fs/cgroup", &fs);
2435 if (ret < 0)
2436 return -ENOMEDIUM;
2437
2438 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2439 return CGROUP2_SUPER_MAGIC;
2440
2441 return 0;
2442 }
2443
2444 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2445 static char *cg_unified_get_current_cgroup(bool keep)
2446 {
2447 char *basecginfo, *base_cgroup;
2448 char *copy = NULL;
2449
2450 if (!keep && (geteuid() == 0))
2451 basecginfo = read_file("/proc/1/cgroup");
2452 else
2453 basecginfo = read_file("/proc/self/cgroup");
2454 if (!basecginfo)
2455 return NULL;
2456
2457 base_cgroup = strstr(basecginfo, "0::/");
2458 if (!base_cgroup)
2459 goto cleanup_on_err;
2460
2461 base_cgroup = base_cgroup + 3;
2462 copy = copy_to_eol(base_cgroup);
2463 if (!copy)
2464 goto cleanup_on_err;
2465
2466 cleanup_on_err:
2467 free(basecginfo);
2468 if (copy)
2469 trim(copy);
2470
2471 return copy;
2472 }
2473
2474 static int cg_unified_init(struct cgroup_ops *ops, bool keep)
2475 {
2476 int ret;
2477 char *mountpoint, *subtree_path;
2478 char **delegatable;
2479 char *base_cgroup = NULL;
2480
2481 ret = cg_is_pure_unified();
2482 if (ret == -ENOMEDIUM)
2483 return -ENOMEDIUM;
2484
2485 if (ret != CGROUP2_SUPER_MAGIC)
2486 return 0;
2487
2488 base_cgroup = cg_unified_get_current_cgroup(keep);
2489 if (!base_cgroup)
2490 return -EINVAL;
2491 prune_init_scope(base_cgroup);
2492
2493 /* We assume that we have already been given controllers to delegate
2494 * further down the hierarchy. If not it is up to the user to delegate
2495 * them to us.
2496 */
2497 mountpoint = must_copy_string("/sys/fs/cgroup");
2498 subtree_path = must_make_path(mountpoint, base_cgroup,
2499 "cgroup.subtree_control", NULL);
2500 delegatable = cg_unified_get_controllers(subtree_path);
2501 free(subtree_path);
2502 if (!delegatable)
2503 delegatable = cg_unified_make_empty_controller();
2504 if (!delegatable[0])
2505 TRACE("No controllers are enabled for delegation");
2506
2507 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2508 * we should verify here. The reason I'm not doing it right is that I'm
2509 * not convinced that lxc.cgroup.use will be the future since it is a
2510 * global property. I much rather have an option that lets you request
2511 * controllers per container.
2512 */
2513
2514 add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2515
2516 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2517 return CGROUP2_SUPER_MAGIC;
2518 }
2519
2520 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2521 {
2522 int ret;
2523 const char *tmp;
2524 bool keep = conf->cgroup_meta.keep;
2525
2526 tmp = lxc_global_config_value("lxc.cgroup.use");
2527 if (tmp) {
2528 char *chop, *cur, *pin;
2529
2530 pin = must_copy_string(tmp);
2531 chop = pin;
2532
2533 lxc_iterate_parts(cur, chop, ",") {
2534 must_append_string(&ops->cgroup_use, cur);
2535 }
2536
2537 free(pin);
2538 }
2539
2540 ret = cg_unified_init(ops, keep);
2541 if (ret < 0)
2542 return false;
2543
2544 if (ret == CGROUP2_SUPER_MAGIC)
2545 return true;
2546
2547 return cg_hybrid_init(ops, keep);
2548 }
2549
2550 static bool cgfsng_data_init(struct cgroup_ops *ops)
2551 {
2552 const char *cgroup_pattern;
2553
2554 /* copy system-wide cgroup information */
2555 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2556 if (!cgroup_pattern) {
2557 /* lxc.cgroup.pattern is only NULL on error. */
2558 ERROR("Failed to retrieve cgroup pattern");
2559 return false;
2560 }
2561 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2562
2563 return true;
2564 }
2565
2566 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2567 {
2568 struct cgroup_ops *cgfsng_ops;
2569
2570 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2571 if (!cgfsng_ops)
2572 return NULL;
2573
2574 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2575 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2576
2577 if (!cg_init(cgfsng_ops, conf)) {
2578 free(cgfsng_ops);
2579 return NULL;
2580 }
2581
2582 cgfsng_ops->data_init = cgfsng_data_init;
2583 cgfsng_ops->destroy = cgfsng_destroy;
2584 cgfsng_ops->create = cgfsng_create;
2585 cgfsng_ops->enter = cgfsng_enter;
2586 cgfsng_ops->escape = cgfsng_escape;
2587 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2588 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2589 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2590 cgfsng_ops->get = cgfsng_get;
2591 cgfsng_ops->set = cgfsng_set;
2592 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2593 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2594 cgfsng_ops->driver = "cgfsng";
2595 cgfsng_ops->version = "1.0.0";
2596 cgfsng_ops->attach = cgfsng_attach;
2597 cgfsng_ops->chown = cgfsng_chown;
2598 cgfsng_ops->mount = cgfsng_mount;
2599 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2600
2601 return cgfsng_ops;
2602 }