]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
confile: s/lxc.cgroup.keep/lxc.cgroup.relative/g
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comman-separated list of controllers.
35 */
36
37 #include "config.h"
38
39 #include <ctype.h>
40 #include <dirent.h>
41 #include <errno.h>
42 #include <grp.h>
43 #include <stdint.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <linux/kdev_t.h>
49 #include <linux/types.h>
50 #include <sys/types.h>
51
52 #include "caps.h"
53 #include "cgroup.h"
54 #include "cgroup_utils.h"
55 #include "commands.h"
56 #include "conf.h"
57 #include "log.h"
58 #include "macro.h"
59 #include "storage/storage.h"
60 #include "utils.h"
61
62 #ifndef HAVE_STRLCPY
63 #include "include/strlcpy.h"
64 #endif
65
66 #ifndef HAVE_STRLCAT
67 #include "include/strlcat.h"
68 #endif
69
70 lxc_log_define(cgfsng, cgroup);
71
72 static void free_string_list(char **clist)
73 {
74 int i;
75
76 if (!clist)
77 return;
78
79 for (i = 0; clist[i]; i++)
80 free(clist[i]);
81
82 free(clist);
83 }
84
85 /* Allocate a pointer, do not fail. */
86 static void *must_alloc(size_t sz)
87 {
88 return must_realloc(NULL, sz);
89 }
90
91 /* Given a pointer to a null-terminated array of pointers, realloc to add one
92 * entry, and point the new entry to NULL. Do not fail. Return the index to the
93 * second-to-last entry - that is, the one which is now available for use
94 * (keeping the list null-terminated).
95 */
96 static int append_null_to_list(void ***list)
97 {
98 int newentry = 0;
99
100 if (*list)
101 for (; (*list)[newentry]; newentry++)
102 ;
103
104 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
105 (*list)[newentry + 1] = NULL;
106 return newentry;
107 }
108
109 /* Given a null-terminated array of strings, check whether @entry is one of the
110 * strings.
111 */
112 static bool string_in_list(char **list, const char *entry)
113 {
114 int i;
115
116 if (!list)
117 return false;
118
119 for (i = 0; list[i]; i++)
120 if (strcmp(list[i], entry) == 0)
121 return true;
122
123 return false;
124 }
125
126 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
127 * "name=systemd". Do not fail.
128 */
129 static char *cg_legacy_must_prefix_named(char *entry)
130 {
131 size_t len;
132 char *prefixed;
133
134 len = strlen(entry);
135 prefixed = must_alloc(len + 6);
136
137 memcpy(prefixed, "name=", sizeof("name=") - 1);
138 memcpy(prefixed + sizeof("name=") - 1, entry, len);
139 prefixed[len + 5] = '\0';
140 return prefixed;
141 }
142
143 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
144 * we are called.
145 *
146 * We also handle named subsystems here. Any controller which is not a kernel
147 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
148 * we refuse to use because we're not sure which we have here.
149 * (TODO: We could work around this in some cases by just remounting to be
150 * unambiguous, or by comparing mountpoint contents with current cgroup.)
151 *
152 * The last entry will always be NULL.
153 */
154 static void must_append_controller(char **klist, char **nlist, char ***clist,
155 char *entry)
156 {
157 int newentry;
158 char *copy;
159
160 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
161 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
162 ERROR("It is both a named and kernel subsystem");
163 return;
164 }
165
166 newentry = append_null_to_list((void ***)clist);
167
168 if (strncmp(entry, "name=", 5) == 0)
169 copy = must_copy_string(entry);
170 else if (string_in_list(klist, entry))
171 copy = must_copy_string(entry);
172 else
173 copy = cg_legacy_must_prefix_named(entry);
174
175 (*clist)[newentry] = copy;
176 }
177
178 /* Given a handler's cgroup data, return the struct hierarchy for the controller
179 * @c, or NULL if there is none.
180 */
181 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *c)
182 {
183 int i;
184
185 if (!ops->hierarchies)
186 return NULL;
187
188 for (i = 0; ops->hierarchies[i]; i++) {
189 if (!c) {
190 /* This is the empty unified hierarchy. */
191 if (ops->hierarchies[i]->controllers &&
192 !ops->hierarchies[i]->controllers[0])
193 return ops->hierarchies[i];
194
195 continue;
196 }
197
198 if (string_in_list(ops->hierarchies[i]->controllers, c))
199 return ops->hierarchies[i];
200 }
201
202 return NULL;
203 }
204
205 #define BATCH_SIZE 50
206 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
207 {
208 int newbatches = (newlen / BATCH_SIZE) + 1;
209 int oldbatches = (oldlen / BATCH_SIZE) + 1;
210
211 if (!*mem || newbatches > oldbatches) {
212 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
213 }
214 }
215
216 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
217 {
218 size_t full = oldlen + newlen;
219
220 batch_realloc(dest, oldlen, full + 1);
221
222 memcpy(*dest + oldlen, new, newlen + 1);
223 }
224
225 /* Slurp in a whole file */
226 static char *read_file(const char *fnam)
227 {
228 FILE *f;
229 char *line = NULL, *buf = NULL;
230 size_t len = 0, fulllen = 0;
231 int linelen;
232
233 f = fopen(fnam, "r");
234 if (!f)
235 return NULL;
236 while ((linelen = getline(&line, &len, f)) != -1) {
237 append_line(&buf, fulllen, line, linelen);
238 fulllen += linelen;
239 }
240 fclose(f);
241 free(line);
242 return buf;
243 }
244
245 /* Taken over modified from the kernel sources. */
246 #define NBITS 32 /* bits in uint32_t */
247 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
248 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
249
250 static void set_bit(unsigned bit, uint32_t *bitarr)
251 {
252 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
253 }
254
255 static void clear_bit(unsigned bit, uint32_t *bitarr)
256 {
257 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
258 }
259
260 static bool is_set(unsigned bit, uint32_t *bitarr)
261 {
262 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
263 }
264
265 /* Create cpumask from cpulist aka turn:
266 *
267 * 0,2-3
268 *
269 * into bit array
270 *
271 * 1 0 1 1
272 */
273 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
274 {
275 char *token;
276 size_t arrlen;
277 uint32_t *bitarr;
278
279 arrlen = BITS_TO_LONGS(nbits);
280 bitarr = calloc(arrlen, sizeof(uint32_t));
281 if (!bitarr)
282 return NULL;
283
284 lxc_iterate_parts(token, buf, ",") {
285 errno = 0;
286 unsigned end, start;
287 char *range;
288
289 start = strtoul(token, NULL, 0);
290 end = start;
291 range = strchr(token, '-');
292 if (range)
293 end = strtoul(range + 1, NULL, 0);
294
295 if (!(start <= end)) {
296 free(bitarr);
297 return NULL;
298 }
299
300 if (end >= nbits) {
301 free(bitarr);
302 return NULL;
303 }
304
305 while (start <= end)
306 set_bit(start++, bitarr);
307 }
308
309 return bitarr;
310 }
311
312 /* Turn cpumask into simple, comma-separated cpulist. */
313 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
314 {
315 int ret;
316 size_t i;
317 char **cpulist = NULL;
318 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
319
320 for (i = 0; i <= nbits; i++) {
321 if (!is_set(i, bitarr))
322 continue;
323
324 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
325 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
326 lxc_free_array((void **)cpulist, free);
327 return NULL;
328 }
329
330 ret = lxc_append_string(&cpulist, numstr);
331 if (ret < 0) {
332 lxc_free_array((void **)cpulist, free);
333 return NULL;
334 }
335 }
336
337 if (!cpulist)
338 return NULL;
339
340 return lxc_string_join(",", (const char **)cpulist, false);
341 }
342
343 static ssize_t get_max_cpus(char *cpulist)
344 {
345 char *c1, *c2;
346 char *maxcpus = cpulist;
347 size_t cpus = 0;
348
349 c1 = strrchr(maxcpus, ',');
350 if (c1)
351 c1++;
352
353 c2 = strrchr(maxcpus, '-');
354 if (c2)
355 c2++;
356
357 if (!c1 && !c2)
358 c1 = maxcpus;
359 else if (c1 > c2)
360 c2 = c1;
361 else if (c1 < c2)
362 c1 = c2;
363 else if (!c1 && c2)
364 c1 = c2;
365
366 errno = 0;
367 cpus = strtoul(c1, NULL, 0);
368 if (errno != 0)
369 return -1;
370
371 return cpus;
372 }
373
374 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
375 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
376 {
377 int ret;
378 ssize_t i;
379 char *lastslash, *fpath, oldv;
380 ssize_t maxisol = 0, maxposs = 0;
381 char *cpulist = NULL, *isolcpus = NULL, *posscpus = NULL;
382 uint32_t *isolmask = NULL, *possmask = NULL;
383 bool bret = false, flipped_bit = false;
384
385 lastslash = strrchr(path, '/');
386 if (!lastslash) {
387 ERROR("Failed to detect \"/\" in \"%s\"", path);
388 return bret;
389 }
390 oldv = *lastslash;
391 *lastslash = '\0';
392 fpath = must_make_path(path, "cpuset.cpus", NULL);
393 posscpus = read_file(fpath);
394 if (!posscpus) {
395 SYSERROR("Failed to read file \"%s\"", fpath);
396 goto on_error;
397 }
398
399 /* Get maximum number of cpus found in possible cpuset. */
400 maxposs = get_max_cpus(posscpus);
401 if (maxposs < 0 || maxposs >= INT_MAX - 1)
402 goto on_error;
403
404 if (!file_exists(__ISOL_CPUS)) {
405 /* This system doesn't expose isolated cpus. */
406 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
407 cpulist = posscpus;
408 /* No isolated cpus but we weren't already initialized by
409 * someone. We should simply copy the parents cpuset.cpus
410 * values.
411 */
412 if (!am_initialized) {
413 DEBUG("Copying cpu settings of parent cgroup");
414 goto copy_parent;
415 }
416 /* No isolated cpus but we were already initialized by someone.
417 * Nothing more to do for us.
418 */
419 goto on_success;
420 }
421
422 isolcpus = read_file(__ISOL_CPUS);
423 if (!isolcpus) {
424 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
425 goto on_error;
426 }
427 if (!isdigit(isolcpus[0])) {
428 TRACE("No isolated cpus detected");
429 cpulist = posscpus;
430 /* No isolated cpus but we weren't already initialized by
431 * someone. We should simply copy the parents cpuset.cpus
432 * values.
433 */
434 if (!am_initialized) {
435 DEBUG("Copying cpu settings of parent cgroup");
436 goto copy_parent;
437 }
438 /* No isolated cpus but we were already initialized by someone.
439 * Nothing more to do for us.
440 */
441 goto on_success;
442 }
443
444 /* Get maximum number of cpus found in isolated cpuset. */
445 maxisol = get_max_cpus(isolcpus);
446 if (maxisol < 0 || maxisol >= INT_MAX - 1)
447 goto on_error;
448
449 if (maxposs < maxisol)
450 maxposs = maxisol;
451 maxposs++;
452
453 possmask = lxc_cpumask(posscpus, maxposs);
454 if (!possmask) {
455 ERROR("Failed to create cpumask for possible cpus");
456 goto on_error;
457 }
458
459 isolmask = lxc_cpumask(isolcpus, maxposs);
460 if (!isolmask) {
461 ERROR("Failed to create cpumask for isolated cpus");
462 goto on_error;
463 }
464
465 for (i = 0; i <= maxposs; i++) {
466 if (!is_set(i, isolmask) || !is_set(i, possmask))
467 continue;
468
469 flipped_bit = true;
470 clear_bit(i, possmask);
471 }
472
473 if (!flipped_bit) {
474 DEBUG("No isolated cpus present in cpuset");
475 goto on_success;
476 }
477 DEBUG("Removed isolated cpus from cpuset");
478
479 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
480 if (!cpulist) {
481 ERROR("Failed to create cpu list");
482 goto on_error;
483 }
484
485 copy_parent:
486 *lastslash = oldv;
487 free(fpath);
488 fpath = must_make_path(path, "cpuset.cpus", NULL);
489 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
490 if (ret < 0) {
491 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
492 goto on_error;
493 }
494
495 on_success:
496 bret = true;
497
498 on_error:
499 free(fpath);
500
501 free(isolcpus);
502 free(isolmask);
503
504 if (posscpus != cpulist)
505 free(posscpus);
506 free(possmask);
507
508 free(cpulist);
509 return bret;
510 }
511
512 /* Copy contents of parent(@path)/@file to @path/@file */
513 static bool copy_parent_file(char *path, char *file)
514 {
515 int ret;
516 char *fpath, *lastslash, oldv;
517 int len = 0;
518 char *value = NULL;
519
520 lastslash = strrchr(path, '/');
521 if (!lastslash) {
522 ERROR("Failed to detect \"/\" in \"%s\"", path);
523 return false;
524 }
525 oldv = *lastslash;
526 *lastslash = '\0';
527 fpath = must_make_path(path, file, NULL);
528 len = lxc_read_from_file(fpath, NULL, 0);
529 if (len <= 0)
530 goto on_error;
531
532 value = must_alloc(len + 1);
533 ret = lxc_read_from_file(fpath, value, len);
534 if (ret != len)
535 goto on_error;
536 free(fpath);
537
538 *lastslash = oldv;
539 fpath = must_make_path(path, file, NULL);
540 ret = lxc_write_to_file(fpath, value, len, false, 0666);
541 if (ret < 0)
542 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, fpath);
543 free(fpath);
544 free(value);
545 return ret >= 0;
546
547 on_error:
548 SYSERROR("Failed to read file \"%s\"", fpath);
549 free(fpath);
550 free(value);
551 return false;
552 }
553
554 /* Initialize the cpuset hierarchy in first directory of @gname and set
555 * cgroup.clone_children so that children inherit settings. Since the
556 * h->base_path is populated by init or ourselves, we know it is already
557 * initialized.
558 */
559 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
560 {
561 int ret;
562 char v;
563 char *cgpath, *clonechildrenpath, *slash;
564
565 if (!string_in_list(h->controllers, "cpuset"))
566 return true;
567
568 if (*cgname == '/')
569 cgname++;
570 slash = strchr(cgname, '/');
571 if (slash)
572 *slash = '\0';
573
574 cgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
575 if (slash)
576 *slash = '/';
577
578 ret = mkdir(cgpath, 0755);
579 if (ret < 0) {
580 if (errno != EEXIST) {
581 SYSERROR("Failed to create directory \"%s\"", cgpath);
582 free(cgpath);
583 return false;
584 }
585 }
586
587 clonechildrenpath =
588 must_make_path(cgpath, "cgroup.clone_children", NULL);
589 /* unified hierarchy doesn't have clone_children */
590 if (!file_exists(clonechildrenpath)) {
591 free(clonechildrenpath);
592 free(cgpath);
593 return true;
594 }
595
596 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
597 if (ret < 0) {
598 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
599 free(clonechildrenpath);
600 free(cgpath);
601 return false;
602 }
603
604 /* Make sure any isolated cpus are removed from cpuset.cpus. */
605 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
606 SYSERROR("Failed to remove isolated cpus");
607 free(clonechildrenpath);
608 free(cgpath);
609 return false;
610 }
611
612 /* Already set for us by someone else. */
613 if (v == '1') {
614 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
615 free(clonechildrenpath);
616 free(cgpath);
617 return true;
618 }
619
620 /* copy parent's settings */
621 if (!copy_parent_file(cgpath, "cpuset.mems")) {
622 SYSERROR("Failed to copy \"cpuset.mems\" settings");
623 free(cgpath);
624 free(clonechildrenpath);
625 return false;
626 }
627 free(cgpath);
628
629 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
630 if (ret < 0) {
631 /* Set clone_children so children inherit our settings */
632 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
633 free(clonechildrenpath);
634 return false;
635 }
636 free(clonechildrenpath);
637 return true;
638 }
639
640 /* Given two null-terminated lists of strings, return true if any string is in
641 * both.
642 */
643 static bool controller_lists_intersect(char **l1, char **l2)
644 {
645 int i;
646
647 if (!l1 || !l2)
648 return false;
649
650 for (i = 0; l1[i]; i++) {
651 if (string_in_list(l2, l1[i]))
652 return true;
653 }
654
655 return false;
656 }
657
658 /* For a null-terminated list of controllers @clist, return true if any of those
659 * controllers is already listed the null-terminated list of hierarchies @hlist.
660 * Realistically, if one is present, all must be present.
661 */
662 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
663 {
664 int i;
665
666 if (!hlist)
667 return false;
668
669 for (i = 0; hlist[i]; i++)
670 if (controller_lists_intersect(hlist[i]->controllers, clist))
671 return true;
672
673 return false;
674 }
675
676 /* Return true if the controller @entry is found in the null-terminated list of
677 * hierarchies @hlist.
678 */
679 static bool controller_found(struct hierarchy **hlist, char *entry)
680 {
681 int i;
682
683 if (!hlist)
684 return false;
685
686 for (i = 0; hlist[i]; i++)
687 if (string_in_list(hlist[i]->controllers, entry))
688 return true;
689
690 return false;
691 }
692
693 /* Return true if all of the controllers which we require have been found. The
694 * required list is freezer and anything in lxc.cgroup.use.
695 */
696 static bool all_controllers_found(struct cgroup_ops *ops)
697 {
698 char **cur;
699 struct hierarchy **hlist = ops->hierarchies;
700
701 if (!controller_found(hlist, "freezer")) {
702 ERROR("No freezer controller mountpoint found");
703 return false;
704 }
705
706 if (!ops->cgroup_use)
707 return true;
708
709 for (cur = ops->cgroup_use; cur && *cur; cur++)
710 if (!controller_found(hlist, *cur)) {
711 ERROR("No %s controller mountpoint found", *cur);
712 return false;
713 }
714
715 return true;
716 }
717
718 /* Get the controllers from a mountinfo line There are other ways we could get
719 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
720 * could parse the mount options. But we simply assume that the mountpoint must
721 * be /sys/fs/cgroup/controller-list
722 */
723 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
724 int type)
725 {
726 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
727 * for legacy hierarchies.
728 */
729 int i;
730 char *dup, *p2, *tok;
731 char *p = line, *sep = ",";
732 char **aret = NULL;
733
734 for (i = 0; i < 4; i++) {
735 p = strchr(p, ' ');
736 if (!p)
737 return NULL;
738 p++;
739 }
740
741 /* Note, if we change how mountinfo works, then our caller will need to
742 * verify /sys/fs/cgroup/ in this field.
743 */
744 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
745 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
746 return NULL;
747 }
748
749 p += 15;
750 p2 = strchr(p, ' ');
751 if (!p2) {
752 ERROR("Corrupt mountinfo");
753 return NULL;
754 }
755 *p2 = '\0';
756
757 if (type == CGROUP_SUPER_MAGIC) {
758 /* strdup() here for v1 hierarchies. Otherwise
759 * lxc_iterate_parts() will destroy mountpoints such as
760 * "/sys/fs/cgroup/cpu,cpuacct".
761 */
762 dup = strdup(p);
763 if (!dup)
764 return NULL;
765
766 lxc_iterate_parts(tok, dup, sep) {
767 must_append_controller(klist, nlist, &aret, tok);
768 }
769
770 free(dup);
771 }
772 *p2 = ' ';
773
774 return aret;
775 }
776
777 static char **cg_unified_make_empty_controller(void)
778 {
779 int newentry;
780 char **aret = NULL;
781
782 newentry = append_null_to_list((void ***)&aret);
783 aret[newentry] = NULL;
784 return aret;
785 }
786
787 static char **cg_unified_get_controllers(const char *file)
788 {
789 char *buf, *tok;
790 char *sep = " \t\n";
791 char **aret = NULL;
792
793 buf = read_file(file);
794 if (!buf)
795 return NULL;
796
797 lxc_iterate_parts(tok, buf, sep) {
798 int newentry;
799 char *copy;
800
801 newentry = append_null_to_list((void ***)&aret);
802 copy = must_copy_string(tok);
803 aret[newentry] = copy;
804 }
805
806 free(buf);
807 return aret;
808 }
809
810 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
811 char *base_cgroup, int type)
812 {
813 struct hierarchy *new;
814 int newentry;
815
816 new = must_alloc(sizeof(*new));
817 new->controllers = clist;
818 new->mountpoint = mountpoint;
819 new->base_cgroup = base_cgroup;
820 new->fullcgpath = NULL;
821 new->version = type;
822
823 newentry = append_null_to_list((void ***)h);
824 (*h)[newentry] = new;
825 return new;
826 }
827
828 /* Get a copy of the mountpoint from @line, which is a line from
829 * /proc/self/mountinfo.
830 */
831 static char *cg_hybrid_get_mountpoint(char *line)
832 {
833 int i;
834 size_t len;
835 char *p2;
836 char *p = line, *sret = NULL;
837
838 for (i = 0; i < 4; i++) {
839 p = strchr(p, ' ');
840 if (!p)
841 return NULL;
842 p++;
843 }
844
845 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
846 return NULL;
847
848 p2 = strchr(p + 15, ' ');
849 if (!p2)
850 return NULL;
851 *p2 = '\0';
852
853 len = strlen(p);
854 sret = must_alloc(len + 1);
855 memcpy(sret, p, len);
856 sret[len] = '\0';
857 return sret;
858 }
859
860 /* Given a multi-line string, return a null-terminated copy of the current line. */
861 static char *copy_to_eol(char *p)
862 {
863 char *p2 = strchr(p, '\n'), *sret;
864 size_t len;
865
866 if (!p2)
867 return NULL;
868
869 len = p2 - p;
870 sret = must_alloc(len + 1);
871 memcpy(sret, p, len);
872 sret[len] = '\0';
873 return sret;
874 }
875
876 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
877 * /proc/self/cgroup file. Check whether controller c is present.
878 */
879 static bool controller_in_clist(char *cgline, char *c)
880 {
881 char *tok, *eol, *tmp;
882 size_t len;
883
884 eol = strchr(cgline, ':');
885 if (!eol)
886 return false;
887
888 len = eol - cgline;
889 tmp = alloca(len + 1);
890 memcpy(tmp, cgline, len);
891 tmp[len] = '\0';
892
893 lxc_iterate_parts(tok, tmp, ",") {
894 if (strcmp(tok, c) == 0)
895 return true;
896 }
897
898 return false;
899 }
900
901 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
902 * @controller.
903 */
904 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
905 int type)
906 {
907 char *p = basecginfo;
908
909 for (;;) {
910 bool is_cgv2_base_cgroup = false;
911
912 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
913 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
914 is_cgv2_base_cgroup = true;
915
916 p = strchr(p, ':');
917 if (!p)
918 return NULL;
919 p++;
920
921 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
922 p = strchr(p, ':');
923 if (!p)
924 return NULL;
925 p++;
926 return copy_to_eol(p);
927 }
928
929 p = strchr(p, '\n');
930 if (!p)
931 return NULL;
932 p++;
933 }
934 }
935
936 static void must_append_string(char ***list, char *entry)
937 {
938 int newentry;
939 char *copy;
940
941 newentry = append_null_to_list((void ***)list);
942 copy = must_copy_string(entry);
943 (*list)[newentry] = copy;
944 }
945
946 static int get_existing_subsystems(char ***klist, char ***nlist)
947 {
948 FILE *f;
949 char *line = NULL;
950 size_t len = 0;
951
952 f = fopen("/proc/self/cgroup", "r");
953 if (!f)
954 return -1;
955
956 while (getline(&line, &len, f) != -1) {
957 char *p, *p2, *tok;
958 p = strchr(line, ':');
959 if (!p)
960 continue;
961 p++;
962 p2 = strchr(p, ':');
963 if (!p2)
964 continue;
965 *p2 = '\0';
966
967 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
968 * contains an entry of the form:
969 *
970 * 0::/some/path
971 *
972 * In this case we use "cgroup2" as controller name.
973 */
974 if ((p2 - p) == 0) {
975 must_append_string(klist, "cgroup2");
976 continue;
977 }
978
979 lxc_iterate_parts(tok, p, ",") {
980 if (strncmp(tok, "name=", 5) == 0)
981 must_append_string(nlist, tok);
982 else
983 must_append_string(klist, tok);
984 }
985 }
986
987 free(line);
988 fclose(f);
989 return 0;
990 }
991
992 static void trim(char *s)
993 {
994 size_t len;
995
996 len = strlen(s);
997 while ((len > 1) && (s[len - 1] == '\n'))
998 s[--len] = '\0';
999 }
1000
1001 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
1002 {
1003 int i;
1004 struct hierarchy **it;
1005
1006 if (!ops->hierarchies) {
1007 TRACE(" No hierarchies found");
1008 return;
1009 }
1010
1011 TRACE(" Hierarchies:");
1012 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
1013 int j;
1014 char **cit;
1015
1016 TRACE(" %d: base_cgroup: %s", i, (*it)->base_cgroup ? (*it)->base_cgroup : "(null)");
1017 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
1018 TRACE(" controllers:");
1019 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
1020 TRACE(" %d: %s", j, *cit);
1021 }
1022 }
1023
1024 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
1025 char **nlist)
1026 {
1027 int k;
1028 char **it;
1029
1030 TRACE("basecginfo is:");
1031 TRACE("%s", basecginfo);
1032
1033 for (k = 0, it = klist; it && *it; it++, k++)
1034 TRACE("kernel subsystem %d: %s", k, *it);
1035
1036 for (k = 0, it = nlist; it && *it; it++, k++)
1037 TRACE("named subsystem %d: %s", k, *it);
1038 }
1039
1040 static int cgroup_rmdir(struct hierarchy **hierarchies,
1041 const char *container_cgroup)
1042 {
1043 int i;
1044
1045 if (!container_cgroup || !hierarchies)
1046 return 0;
1047
1048 for (i = 0; hierarchies[i]; i++) {
1049 int ret;
1050 struct hierarchy *h = hierarchies[i];
1051
1052 if (!h->fullcgpath)
1053 continue;
1054
1055 ret = recursive_destroy(h->fullcgpath);
1056 if (ret < 0)
1057 WARN("Failed to destroy \"%s\"", h->fullcgpath);
1058
1059 free(h->fullcgpath);
1060 h->fullcgpath = NULL;
1061 }
1062
1063 return 0;
1064 }
1065
1066 struct generic_userns_exec_data {
1067 struct hierarchy **hierarchies;
1068 const char *container_cgroup;
1069 struct lxc_conf *conf;
1070 uid_t origuid; /* target uid in parent namespace */
1071 char *path;
1072 };
1073
1074 static int cgroup_rmdir_wrapper(void *data)
1075 {
1076 int ret;
1077 struct generic_userns_exec_data *arg = data;
1078 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1079 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1080
1081 ret = setresgid(nsgid, nsgid, nsgid);
1082 if (ret < 0) {
1083 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1084 (int)nsgid, (int)nsgid);
1085 return -1;
1086 }
1087
1088 ret = setresuid(nsuid, nsuid, nsuid);
1089 if (ret < 0) {
1090 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1091 (int)nsuid, (int)nsuid);
1092 return -1;
1093 }
1094
1095 ret = setgroups(0, NULL);
1096 if (ret < 0 && errno != EPERM) {
1097 SYSERROR("Failed to setgroups(0, NULL)");
1098 return -1;
1099 }
1100
1101 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1102 }
1103
1104 static void cgfsng_destroy(struct cgroup_ops *ops, struct lxc_handler *handler)
1105 {
1106 int ret;
1107 struct generic_userns_exec_data wrap;
1108
1109 wrap.origuid = 0;
1110 wrap.container_cgroup = ops->container_cgroup;
1111 wrap.hierarchies = ops->hierarchies;
1112 wrap.conf = handler->conf;
1113
1114 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1115 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1116 "cgroup_rmdir_wrapper");
1117 else
1118 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1119 if (ret < 0) {
1120 WARN("Failed to destroy cgroups");
1121 return;
1122 }
1123 }
1124
1125 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1126 {
1127 size_t i, parts_len;
1128 char **it;
1129 size_t full_len = 0;
1130 char *add_controllers = NULL, *cgroup = NULL;
1131 char **parts = NULL;
1132 bool bret = false;
1133
1134 if (h->version != CGROUP2_SUPER_MAGIC)
1135 return true;
1136
1137 if (!h->controllers)
1138 return true;
1139
1140 /* For now we simply enable all controllers that we have detected by
1141 * creating a string like "+memory +pids +cpu +io".
1142 * TODO: In the near future we might want to support "-<controller>"
1143 * etc. but whether supporting semantics like this make sense will need
1144 * some thinking.
1145 */
1146 for (it = h->controllers; it && *it; it++) {
1147 full_len += strlen(*it) + 2;
1148 add_controllers = must_realloc(add_controllers, full_len + 1);
1149
1150 if (h->controllers[0] == *it)
1151 add_controllers[0] = '\0';
1152
1153 (void)strlcat(add_controllers, "+", full_len + 1);
1154 (void)strlcat(add_controllers, *it, full_len + 1);
1155
1156 if ((it + 1) && *(it + 1))
1157 (void)strlcat(add_controllers, " ", full_len + 1);
1158 }
1159
1160 parts = lxc_string_split(cgname, '/');
1161 if (!parts)
1162 goto on_error;
1163
1164 parts_len = lxc_array_len((void **)parts);
1165 if (parts_len > 0)
1166 parts_len--;
1167
1168 cgroup = must_make_path(h->mountpoint, h->base_cgroup, NULL);
1169 for (i = 0; i < parts_len; i++) {
1170 int ret;
1171 char *target;
1172
1173 cgroup = must_append_path(cgroup, parts[i], NULL);
1174 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1175 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1176 free(target);
1177 if (ret < 0) {
1178 SYSERROR("Could not enable \"%s\" controllers in the "
1179 "unified cgroup \"%s\"", add_controllers, cgroup);
1180 goto on_error;
1181 }
1182 }
1183
1184 bret = true;
1185
1186 on_error:
1187 lxc_free_array((void **)parts, free);
1188 free(add_controllers);
1189 free(cgroup);
1190 return bret;
1191 }
1192
1193 static bool create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1194 {
1195 int ret;
1196
1197 h->fullcgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
1198 if (dir_exists(h->fullcgpath)) {
1199 ERROR("The cgroup \"%s\" already existed", h->fullcgpath);
1200 return false;
1201 }
1202
1203 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1204 ERROR("Failed to handle legacy cpuset controller");
1205 return false;
1206 }
1207
1208 ret = mkdir_p(h->fullcgpath, 0755);
1209 if (ret < 0) {
1210 ERROR("Failed to create cgroup \"%s\"", h->fullcgpath);
1211 return false;
1212 }
1213
1214 return cg_unified_create_cgroup(h, cgname);
1215 }
1216
1217 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname)
1218 {
1219 int ret;
1220
1221 ret = rmdir(h->fullcgpath);
1222 if (ret < 0)
1223 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", h->fullcgpath);
1224
1225 free(h->fullcgpath);
1226 h->fullcgpath = NULL;
1227 }
1228
1229 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1230 * next cgroup_pattern-1, -2, ..., -999.
1231 */
1232 static inline bool cgfsng_create(struct cgroup_ops *ops,
1233 struct lxc_handler *handler)
1234 {
1235 int i;
1236 size_t len;
1237 char *container_cgroup, *offset, *tmp;
1238 int idx = 0;
1239 struct lxc_conf *conf = handler->conf;
1240
1241 if (ops->container_cgroup) {
1242 WARN("cgfsng_create called a second time: %s", ops->container_cgroup);
1243 return false;
1244 }
1245
1246 if (!conf)
1247 return false;
1248
1249 if (conf->cgroup_meta.dir)
1250 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1251 else
1252 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1253 if (!tmp) {
1254 ERROR("Failed expanding cgroup name pattern");
1255 return false;
1256 }
1257
1258 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1259 container_cgroup = must_alloc(len);
1260 (void)strlcpy(container_cgroup, tmp, len);
1261 free(tmp);
1262 offset = container_cgroup + len - 5;
1263
1264 again:
1265 if (idx == 1000) {
1266 ERROR("Too many conflicting cgroup names");
1267 goto out_free;
1268 }
1269
1270 if (idx) {
1271 int ret;
1272
1273 ret = snprintf(offset, 5, "-%d", idx);
1274 if (ret < 0 || (size_t)ret >= 5) {
1275 FILE *f = fopen("/dev/null", "w");
1276 if (f) {
1277 fprintf(f, "Workaround for GCC7 bug: "
1278 "https://gcc.gnu.org/bugzilla/"
1279 "show_bug.cgi?id=78969");
1280 fclose(f);
1281 }
1282 }
1283 }
1284
1285 for (i = 0; ops->hierarchies[i]; i++) {
1286 if (!create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1287 int j;
1288 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->fullcgpath);
1289 free(ops->hierarchies[i]->fullcgpath);
1290 ops->hierarchies[i]->fullcgpath = NULL;
1291 for (j = 0; j < i; j++)
1292 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup);
1293 idx++;
1294 goto again;
1295 }
1296 }
1297
1298 ops->container_cgroup = container_cgroup;
1299
1300 return true;
1301
1302 out_free:
1303 free(container_cgroup);
1304
1305 return false;
1306 }
1307
1308 static bool cgfsng_enter(struct cgroup_ops *ops, pid_t pid)
1309 {
1310 int i, len;
1311 char pidstr[25];
1312
1313 len = snprintf(pidstr, 25, "%d", pid);
1314 if (len < 0 || len >= 25)
1315 return false;
1316
1317 for (i = 0; ops->hierarchies[i]; i++) {
1318 int ret;
1319 char *fullpath;
1320
1321 fullpath = must_make_path(ops->hierarchies[i]->fullcgpath,
1322 "cgroup.procs", NULL);
1323 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
1324 if (ret != 0) {
1325 SYSERROR("Failed to enter cgroup \"%s\"", fullpath);
1326 free(fullpath);
1327 return false;
1328 }
1329 free(fullpath);
1330 }
1331
1332 return true;
1333 }
1334
1335 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1336 mode_t chmod_mode)
1337 {
1338 int ret;
1339
1340 ret = chown(path, chown_uid, chown_gid);
1341 if (ret < 0) {
1342 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1343 return -1;
1344 }
1345
1346 ret = chmod(path, chmod_mode);
1347 if (ret < 0) {
1348 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1349 return -1;
1350 }
1351
1352 return 0;
1353 }
1354
1355 /* chgrp the container cgroups to container group. We leave
1356 * the container owner as cgroup owner. So we must make the
1357 * directories 775 so that the container can create sub-cgroups.
1358 *
1359 * Also chown the tasks and cgroup.procs files. Those may not
1360 * exist depending on kernel version.
1361 */
1362 static int chown_cgroup_wrapper(void *data)
1363 {
1364 int i, ret;
1365 uid_t destuid;
1366 struct generic_userns_exec_data *arg = data;
1367 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1368 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1369
1370 ret = setresgid(nsgid, nsgid, nsgid);
1371 if (ret < 0) {
1372 SYSERROR("Failed to setresgid(%d, %d, %d)",
1373 (int)nsgid, (int)nsgid, (int)nsgid);
1374 return -1;
1375 }
1376
1377 ret = setresuid(nsuid, nsuid, nsuid);
1378 if (ret < 0) {
1379 SYSERROR("Failed to setresuid(%d, %d, %d)",
1380 (int)nsuid, (int)nsuid, (int)nsuid);
1381 return -1;
1382 }
1383
1384 ret = setgroups(0, NULL);
1385 if (ret < 0 && errno != EPERM) {
1386 SYSERROR("Failed to setgroups(0, NULL)");
1387 return -1;
1388 }
1389
1390 destuid = get_ns_uid(arg->origuid);
1391 if (destuid == LXC_INVALID_UID)
1392 destuid = 0;
1393
1394 for (i = 0; arg->hierarchies[i]; i++) {
1395 char *fullpath;
1396 char *path = arg->hierarchies[i]->fullcgpath;
1397
1398 ret = chowmod(path, destuid, nsgid, 0775);
1399 if (ret < 0)
1400 return -1;
1401
1402 /* Failures to chown() these are inconvenient but not
1403 * detrimental We leave these owned by the container launcher,
1404 * so that container root can write to the files to attach. We
1405 * chmod() them 664 so that container systemd can write to the
1406 * files (which systemd in wily insists on doing).
1407 */
1408
1409 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1410 fullpath = must_make_path(path, "tasks", NULL);
1411 (void)chowmod(fullpath, destuid, nsgid, 0664);
1412 free(fullpath);
1413 }
1414
1415 fullpath = must_make_path(path, "cgroup.procs", NULL);
1416 (void)chowmod(fullpath, destuid, nsgid, 0664);
1417 free(fullpath);
1418
1419 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1420 continue;
1421
1422 fullpath = must_make_path(path, "cgroup.subtree_control", NULL);
1423 (void)chowmod(fullpath, destuid, nsgid, 0664);
1424 free(fullpath);
1425
1426 fullpath = must_make_path(path, "cgroup.threads", NULL);
1427 (void)chowmod(fullpath, destuid, nsgid, 0664);
1428 free(fullpath);
1429 }
1430
1431 return 0;
1432 }
1433
1434 static bool cgfsng_chown(struct cgroup_ops *ops, struct lxc_conf *conf)
1435 {
1436 struct generic_userns_exec_data wrap;
1437
1438 if (lxc_list_empty(&conf->id_map))
1439 return true;
1440
1441 wrap.origuid = geteuid();
1442 wrap.path = NULL;
1443 wrap.hierarchies = ops->hierarchies;
1444 wrap.conf = conf;
1445
1446 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1447 "chown_cgroup_wrapper") < 0) {
1448 ERROR("Error requesting cgroup chown in new user namespace");
1449 return false;
1450 }
1451
1452 return true;
1453 }
1454
1455 /* cgroup-full:* is done, no need to create subdirs */
1456 static bool cg_mount_needs_subdirs(int type)
1457 {
1458 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1459 return false;
1460
1461 return true;
1462 }
1463
1464 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1465 * remount controller ro if needed and bindmount the cgroupfs onto
1466 * controll/the/cg/path.
1467 */
1468 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1469 char *controllerpath, char *cgpath,
1470 const char *container_cgroup)
1471 {
1472 int ret, remount_flags;
1473 char *sourcepath;
1474 int flags = MS_BIND;
1475
1476 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1477 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1478 if (ret < 0) {
1479 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1480 controllerpath, controllerpath);
1481 return -1;
1482 }
1483
1484 remount_flags = add_required_remount_flags(controllerpath,
1485 controllerpath,
1486 flags | MS_REMOUNT);
1487 ret = mount(controllerpath, controllerpath, "cgroup",
1488 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1489 NULL);
1490 if (ret < 0) {
1491 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1492 return -1;
1493 }
1494
1495 INFO("Remounted %s read-only", controllerpath);
1496 }
1497
1498 sourcepath = must_make_path(h->mountpoint, h->base_cgroup,
1499 container_cgroup, NULL);
1500 if (type == LXC_AUTO_CGROUP_RO)
1501 flags |= MS_RDONLY;
1502
1503 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1504 if (ret < 0) {
1505 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1506 free(sourcepath);
1507 return -1;
1508 }
1509 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1510
1511 if (flags & MS_RDONLY) {
1512 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1513 flags | MS_REMOUNT);
1514 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1515 if (ret < 0) {
1516 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1517 free(sourcepath);
1518 return -1;
1519 }
1520 INFO("Remounted %s read-only", cgpath);
1521 }
1522
1523 free(sourcepath);
1524 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1525 return 0;
1526 }
1527
1528 /* __cg_mount_direct
1529 *
1530 * Mount cgroup hierarchies directly without using bind-mounts. The main
1531 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1532 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1533 */
1534 static int __cg_mount_direct(int type, struct hierarchy *h,
1535 const char *controllerpath)
1536 {
1537 int ret;
1538 char *controllers = NULL;
1539 char *fstype = "cgroup2";
1540 unsigned long flags = 0;
1541
1542 flags |= MS_NOSUID;
1543 flags |= MS_NOEXEC;
1544 flags |= MS_NODEV;
1545 flags |= MS_RELATIME;
1546
1547 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1548 flags |= MS_RDONLY;
1549
1550 if (h->version != CGROUP2_SUPER_MAGIC) {
1551 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1552 if (!controllers)
1553 return -ENOMEM;
1554 fstype = "cgroup";
1555 }
1556
1557 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1558 free(controllers);
1559 if (ret < 0) {
1560 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1561 return -1;
1562 }
1563
1564 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1565 return 0;
1566 }
1567
1568 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1569 const char *controllerpath)
1570 {
1571 return __cg_mount_direct(type, h, controllerpath);
1572 }
1573
1574 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1575 const char *controllerpath)
1576 {
1577 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1578 return 0;
1579
1580 return __cg_mount_direct(type, h, controllerpath);
1581 }
1582
1583 static bool cgfsng_mount(struct cgroup_ops *ops, struct lxc_handler *handler,
1584 const char *root, int type)
1585 {
1586 int i, ret;
1587 char *tmpfspath = NULL;
1588 bool has_cgns = false, retval = false, wants_force_mount = false;
1589
1590 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1591 return true;
1592
1593 if (type & LXC_AUTO_CGROUP_FORCE) {
1594 type &= ~LXC_AUTO_CGROUP_FORCE;
1595 wants_force_mount = true;
1596 }
1597
1598 if (!wants_force_mount){
1599 if (!lxc_list_empty(&handler->conf->keepcaps))
1600 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1601 else
1602 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1603 }
1604
1605 has_cgns = cgns_supported();
1606 if (has_cgns && !wants_force_mount)
1607 return true;
1608
1609 if (type == LXC_AUTO_CGROUP_NOSPEC)
1610 type = LXC_AUTO_CGROUP_MIXED;
1611 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1612 type = LXC_AUTO_CGROUP_FULL_MIXED;
1613
1614 /* Mount tmpfs */
1615 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1616 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1617 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1618 "size=10240k,mode=755", root);
1619 if (ret < 0)
1620 goto on_error;
1621
1622 for (i = 0; ops->hierarchies[i]; i++) {
1623 char *controllerpath, *path2;
1624 struct hierarchy *h = ops->hierarchies[i];
1625 char *controller = strrchr(h->mountpoint, '/');
1626
1627 if (!controller)
1628 continue;
1629 controller++;
1630
1631 controllerpath = must_make_path(tmpfspath, controller, NULL);
1632 if (dir_exists(controllerpath)) {
1633 free(controllerpath);
1634 continue;
1635 }
1636
1637 ret = mkdir(controllerpath, 0755);
1638 if (ret < 0) {
1639 SYSERROR("Error creating cgroup path: %s", controllerpath);
1640 free(controllerpath);
1641 goto on_error;
1642 }
1643
1644 if (has_cgns && wants_force_mount) {
1645 /* If cgroup namespaces are supported but the container
1646 * will not have CAP_SYS_ADMIN after it has started we
1647 * need to mount the cgroups manually.
1648 */
1649 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1650 free(controllerpath);
1651 if (ret < 0)
1652 goto on_error;
1653
1654 continue;
1655 }
1656
1657 ret = cg_mount_cgroup_full(type, h, controllerpath);
1658 if (ret < 0) {
1659 free(controllerpath);
1660 goto on_error;
1661 }
1662
1663 if (!cg_mount_needs_subdirs(type)) {
1664 free(controllerpath);
1665 continue;
1666 }
1667
1668 path2 = must_make_path(controllerpath, h->base_cgroup,
1669 ops->container_cgroup, NULL);
1670 ret = mkdir_p(path2, 0755);
1671 if (ret < 0) {
1672 free(controllerpath);
1673 free(path2);
1674 goto on_error;
1675 }
1676
1677 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1678 path2, ops->container_cgroup);
1679 free(controllerpath);
1680 free(path2);
1681 if (ret < 0)
1682 goto on_error;
1683 }
1684 retval = true;
1685
1686 on_error:
1687 free(tmpfspath);
1688 return retval;
1689 }
1690
1691 static int recursive_count_nrtasks(char *dirname)
1692 {
1693 struct dirent *direntp;
1694 DIR *dir;
1695 int count = 0, ret;
1696 char *path;
1697
1698 dir = opendir(dirname);
1699 if (!dir)
1700 return 0;
1701
1702 while ((direntp = readdir(dir))) {
1703 struct stat mystat;
1704
1705 if (!strcmp(direntp->d_name, ".") ||
1706 !strcmp(direntp->d_name, ".."))
1707 continue;
1708
1709 path = must_make_path(dirname, direntp->d_name, NULL);
1710
1711 if (lstat(path, &mystat))
1712 goto next;
1713
1714 if (!S_ISDIR(mystat.st_mode))
1715 goto next;
1716
1717 count += recursive_count_nrtasks(path);
1718 next:
1719 free(path);
1720 }
1721
1722 path = must_make_path(dirname, "cgroup.procs", NULL);
1723 ret = lxc_count_file_lines(path);
1724 if (ret != -1)
1725 count += ret;
1726 free(path);
1727
1728 (void)closedir(dir);
1729
1730 return count;
1731 }
1732
1733 static int cgfsng_nrtasks(struct cgroup_ops *ops)
1734 {
1735 int count;
1736 char *path;
1737
1738 if (!ops->container_cgroup || !ops->hierarchies)
1739 return -1;
1740
1741 path = must_make_path(ops->hierarchies[0]->fullcgpath, NULL);
1742 count = recursive_count_nrtasks(path);
1743 free(path);
1744 return count;
1745 }
1746
1747 /* Only root needs to escape to the cgroup of its init. */
1748 static bool cgfsng_escape(const struct cgroup_ops *ops, struct lxc_conf *conf)
1749 {
1750 int i;
1751
1752 if (conf->cgroup_meta.relative || geteuid())
1753 return true;
1754
1755 for (i = 0; ops->hierarchies[i]; i++) {
1756 int ret;
1757 char *fullpath;
1758
1759 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1760 ops->hierarchies[i]->base_cgroup,
1761 "cgroup.procs", NULL);
1762 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1763 if (ret != 0) {
1764 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1765 free(fullpath);
1766 return false;
1767 }
1768 free(fullpath);
1769 }
1770
1771 return true;
1772 }
1773
1774 static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1775 {
1776 int i;
1777
1778 for (i = 0; ops->hierarchies[i]; i++)
1779 ;
1780
1781 return i;
1782 }
1783
1784 static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
1785 {
1786 int i;
1787
1788 /* sanity check n */
1789 for (i = 0; i < n; i++)
1790 if (!ops->hierarchies[i])
1791 return false;
1792
1793 *out = ops->hierarchies[i]->controllers;
1794
1795 return true;
1796 }
1797
1798 #define THAWED "THAWED"
1799 #define THAWED_LEN (strlen(THAWED))
1800
1801 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1802 * to be adapted.
1803 */
1804 static bool cgfsng_unfreeze(struct cgroup_ops *ops)
1805 {
1806 int ret;
1807 char *fullpath;
1808 struct hierarchy *h;
1809
1810 h = get_hierarchy(ops, "freezer");
1811 if (!h)
1812 return false;
1813
1814 fullpath = must_make_path(h->fullcgpath, "freezer.state", NULL);
1815 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
1816 free(fullpath);
1817 if (ret < 0)
1818 return false;
1819
1820 return true;
1821 }
1822
1823 static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
1824 const char *controller)
1825 {
1826 struct hierarchy *h;
1827
1828 h = get_hierarchy(ops, controller);
1829 if (!h) {
1830 WARN("Failed to find hierarchy for controller \"%s\"",
1831 controller ? controller : "(null)");
1832 return NULL;
1833 }
1834
1835 return h->fullcgpath ? h->fullcgpath + strlen(h->mountpoint) : NULL;
1836 }
1837
1838 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
1839 * which must be freed by the caller.
1840 */
1841 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
1842 const char *inpath,
1843 const char *filename)
1844 {
1845 return must_make_path(h->mountpoint, inpath, filename, NULL);
1846 }
1847
1848 /* Technically, we're always at a delegation boundary here (This is especially
1849 * true when cgroup namespaces are available.). The reasoning is that in order
1850 * for us to have been able to start a container in the first place the root
1851 * cgroup must have been a leaf node. Now, either the container's init system
1852 * has populated the cgroup and kept it as a leaf node or it has created
1853 * subtrees. In the former case we will simply attach to the leaf node we
1854 * created when we started the container in the latter case we create our own
1855 * cgroup for the attaching process.
1856 */
1857 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
1858 const char *lxcpath, const char *pidstr,
1859 size_t pidstr_len, const char *controller)
1860 {
1861 int ret;
1862 size_t len;
1863 int fret = -1, idx = 0;
1864 char *base_path = NULL, *container_cgroup = NULL, *full_path = NULL;
1865
1866 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
1867 /* not running */
1868 if (!container_cgroup)
1869 return 0;
1870
1871 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
1872 full_path = must_make_path(base_path, "cgroup.procs", NULL);
1873 /* cgroup is populated */
1874 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
1875 if (ret < 0 && errno != EBUSY)
1876 goto on_error;
1877
1878 if (ret == 0)
1879 goto on_success;
1880
1881 free(full_path);
1882
1883 len = strlen(base_path) + sizeof("/lxc-1000") - 1 +
1884 sizeof("/cgroup-procs") - 1;
1885 full_path = must_alloc(len + 1);
1886 do {
1887 if (idx)
1888 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
1889 base_path, idx);
1890 else
1891 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
1892 if (ret < 0 || (size_t)ret >= len + 1)
1893 goto on_error;
1894
1895 ret = mkdir_p(full_path, 0755);
1896 if (ret < 0 && errno != EEXIST)
1897 goto on_error;
1898
1899 (void)strlcat(full_path, "/cgroup.procs", len + 1);
1900 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
1901 if (ret == 0)
1902 goto on_success;
1903
1904 /* this is a non-leaf node */
1905 if (errno != EBUSY)
1906 goto on_error;
1907
1908 } while (++idx > 0 && idx < 1000);
1909
1910 on_success:
1911 if (idx < 1000)
1912 fret = 0;
1913
1914 on_error:
1915 free(base_path);
1916 free(container_cgroup);
1917 free(full_path);
1918
1919 return fret;
1920 }
1921
1922 static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
1923 const char *lxcpath, pid_t pid)
1924 {
1925 int i, len, ret;
1926 char pidstr[25];
1927
1928 len = snprintf(pidstr, 25, "%d", pid);
1929 if (len < 0 || len >= 25)
1930 return false;
1931
1932 for (i = 0; ops->hierarchies[i]; i++) {
1933 char *path;
1934 char *fullpath = NULL;
1935 struct hierarchy *h = ops->hierarchies[i];
1936
1937 if (h->version == CGROUP2_SUPER_MAGIC) {
1938 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
1939 h->controllers[0]);
1940 if (ret < 0)
1941 return false;
1942
1943 continue;
1944 }
1945
1946 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
1947 /* not running */
1948 if (!path)
1949 continue;
1950
1951 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
1952 free(path);
1953 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
1954 if (ret < 0) {
1955 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
1956 free(fullpath);
1957 return false;
1958 }
1959 free(fullpath);
1960 }
1961
1962 return true;
1963 }
1964
1965 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
1966 * don't have a cgroup_data set up, so we ask the running container through the
1967 * commands API for the cgroup path.
1968 */
1969 static int cgfsng_get(struct cgroup_ops *ops, const char *filename, char *value,
1970 size_t len, const char *name, const char *lxcpath)
1971 {
1972 int ret = -1;
1973 size_t controller_len;
1974 char *controller, *p, *path;
1975 struct hierarchy *h;
1976
1977 controller_len = strlen(filename);
1978 controller = alloca(controller_len + 1);
1979 (void)strlcpy(controller, filename, controller_len + 1);
1980
1981 p = strchr(controller, '.');
1982 if (p)
1983 *p = '\0';
1984
1985 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
1986 /* not running */
1987 if (!path)
1988 return -1;
1989
1990 h = get_hierarchy(ops, controller);
1991 if (h) {
1992 char *fullpath;
1993
1994 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
1995 ret = lxc_read_from_file(fullpath, value, len);
1996 free(fullpath);
1997 }
1998 free(path);
1999
2000 return ret;
2001 }
2002
2003 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2004 * don't have a cgroup_data set up, so we ask the running container through the
2005 * commands API for the cgroup path.
2006 */
2007 static int cgfsng_set(struct cgroup_ops *ops, const char *filename,
2008 const char *value, const char *name, const char *lxcpath)
2009 {
2010 int ret = -1;
2011 size_t controller_len;
2012 char *controller, *p, *path;
2013 struct hierarchy *h;
2014
2015 controller_len = strlen(filename);
2016 controller = alloca(controller_len + 1);
2017 (void)strlcpy(controller, filename, controller_len + 1);
2018
2019 p = strchr(controller, '.');
2020 if (p)
2021 *p = '\0';
2022
2023 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2024 /* not running */
2025 if (!path)
2026 return -1;
2027
2028 h = get_hierarchy(ops, controller);
2029 if (h) {
2030 char *fullpath;
2031
2032 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2033 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2034 free(fullpath);
2035 }
2036 free(path);
2037
2038 return ret;
2039 }
2040
2041 /* take devices cgroup line
2042 * /dev/foo rwx
2043 * and convert it to a valid
2044 * type major:minor mode
2045 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2046 * the output.
2047 */
2048 static int convert_devpath(const char *invalue, char *dest)
2049 {
2050 int n_parts;
2051 char *p, *path, type;
2052 unsigned long minor, major;
2053 struct stat sb;
2054 int ret = -EINVAL;
2055 char *mode = NULL;
2056
2057 path = must_copy_string(invalue);
2058
2059 /* Read path followed by mode. Ignore any trailing text.
2060 * A ' # comment' would be legal. Technically other text is not
2061 * legal, we could check for that if we cared to.
2062 */
2063 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2064 if (*p != ' ')
2065 continue;
2066 *p = '\0';
2067
2068 if (n_parts != 1)
2069 break;
2070 p++;
2071 n_parts++;
2072
2073 while (*p == ' ')
2074 p++;
2075
2076 mode = p;
2077
2078 if (*p == '\0')
2079 goto out;
2080 }
2081
2082 if (n_parts == 1)
2083 goto out;
2084
2085 ret = stat(path, &sb);
2086 if (ret < 0)
2087 goto out;
2088
2089 mode_t m = sb.st_mode & S_IFMT;
2090 switch (m) {
2091 case S_IFBLK:
2092 type = 'b';
2093 break;
2094 case S_IFCHR:
2095 type = 'c';
2096 break;
2097 default:
2098 ERROR("Unsupported device type %i for \"%s\"", m, path);
2099 ret = -EINVAL;
2100 goto out;
2101 }
2102
2103 major = MAJOR(sb.st_rdev);
2104 minor = MINOR(sb.st_rdev);
2105 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2106 if (ret < 0 || ret >= 50) {
2107 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2108 "chars)", type, major, minor, mode);
2109 ret = -ENAMETOOLONG;
2110 goto out;
2111 }
2112 ret = 0;
2113
2114 out:
2115 free(path);
2116 return ret;
2117 }
2118
2119 /* Called from setup_limits - here we have the container's cgroup_data because
2120 * we created the cgroups.
2121 */
2122 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2123 const char *value)
2124 {
2125 size_t len;
2126 char *fullpath, *p;
2127 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2128 char converted_value[50];
2129 struct hierarchy *h;
2130 int ret = 0;
2131 char *controller = NULL;
2132
2133 len = strlen(filename);
2134 controller = alloca(len + 1);
2135 (void)strlcpy(controller, filename, len + 1);
2136
2137 p = strchr(controller, '.');
2138 if (p)
2139 *p = '\0';
2140
2141 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2142 ret = convert_devpath(value, converted_value);
2143 if (ret < 0)
2144 return ret;
2145 value = converted_value;
2146 }
2147
2148 h = get_hierarchy(ops, controller);
2149 if (!h) {
2150 ERROR("Failed to setup limits for the \"%s\" controller. "
2151 "The controller seems to be unused by \"cgfsng\" cgroup "
2152 "driver or not enabled on the cgroup hierarchy",
2153 controller);
2154 errno = ENOENT;
2155 return -ENOENT;
2156 }
2157
2158 fullpath = must_make_path(h->fullcgpath, filename, NULL);
2159 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2160 free(fullpath);
2161 return ret;
2162 }
2163
2164 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2165 struct lxc_list *cgroup_settings,
2166 bool do_devices)
2167 {
2168 struct lxc_list *iterator, *next, *sorted_cgroup_settings;
2169 struct lxc_cgroup *cg;
2170 bool ret = false;
2171
2172 if (lxc_list_empty(cgroup_settings))
2173 return true;
2174
2175 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2176 if (!sorted_cgroup_settings)
2177 return false;
2178
2179 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2180 cg = iterator->elem;
2181
2182 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2183 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2184 if (do_devices && (errno == EACCES || errno == EPERM)) {
2185 WARN("Failed to set \"%s\" to \"%s\"",
2186 cg->subsystem, cg->value);
2187 continue;
2188 }
2189 WARN("Failed to set \"%s\" to \"%s\"",
2190 cg->subsystem, cg->value);
2191 goto out;
2192 }
2193 DEBUG("Set controller \"%s\" set to \"%s\"",
2194 cg->subsystem, cg->value);
2195 }
2196 }
2197
2198 ret = true;
2199 INFO("Limits for the legacy cgroup hierarchies have been setup");
2200 out:
2201 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2202 lxc_list_del(iterator);
2203 free(iterator);
2204 }
2205 free(sorted_cgroup_settings);
2206 return ret;
2207 }
2208
2209 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2210 struct lxc_list *cgroup_settings)
2211 {
2212 struct lxc_list *iterator;
2213 struct hierarchy *h = ops->unified;
2214
2215 if (lxc_list_empty(cgroup_settings))
2216 return true;
2217
2218 if (!h)
2219 return false;
2220
2221 lxc_list_for_each(iterator, cgroup_settings) {
2222 int ret;
2223 char *fullpath;
2224 struct lxc_cgroup *cg = iterator->elem;
2225
2226 fullpath = must_make_path(h->fullcgpath, cg->subsystem, NULL);
2227 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2228 free(fullpath);
2229 if (ret < 0) {
2230 SYSERROR("Failed to set \"%s\" to \"%s\"",
2231 cg->subsystem, cg->value);
2232 return false;
2233 }
2234 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2235 }
2236
2237 INFO("Limits for the unified cgroup hierarchy have been setup");
2238 return true;
2239 }
2240
2241 static bool cgfsng_setup_limits(struct cgroup_ops *ops, struct lxc_conf *conf,
2242 bool do_devices)
2243 {
2244 bool bret;
2245
2246 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2247 if (!bret)
2248 return false;
2249
2250 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2251 }
2252
2253 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2254 char **controllers)
2255 {
2256 char **cur_ctrl, **cur_use;
2257
2258 if (!ops->cgroup_use)
2259 return true;
2260
2261 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2262 bool found = false;
2263
2264 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2265 if (strcmp(*cur_use, *cur_ctrl) != 0)
2266 continue;
2267
2268 found = true;
2269 break;
2270 }
2271
2272 if (found)
2273 continue;
2274
2275 return false;
2276 }
2277
2278 return true;
2279 }
2280
2281 /* At startup, parse_hierarchies finds all the info we need about cgroup
2282 * mountpoints and current cgroups, and stores it in @d.
2283 */
2284 static bool cg_hybrid_init(struct cgroup_ops *ops, bool relative)
2285 {
2286 int ret;
2287 char *basecginfo;
2288 FILE *f;
2289 size_t len = 0;
2290 char *line = NULL;
2291 char **klist = NULL, **nlist = NULL;
2292
2293 /* Root spawned containers escape the current cgroup, so use init's
2294 * cgroups as our base in that case.
2295 */
2296 if (!relative && (geteuid() == 0))
2297 basecginfo = read_file("/proc/1/cgroup");
2298 else
2299 basecginfo = read_file("/proc/self/cgroup");
2300 if (!basecginfo)
2301 return false;
2302
2303 ret = get_existing_subsystems(&klist, &nlist);
2304 if (ret < 0) {
2305 ERROR("Failed to retrieve available legacy cgroup controllers");
2306 free(basecginfo);
2307 return false;
2308 }
2309
2310 f = fopen("/proc/self/mountinfo", "r");
2311 if (!f) {
2312 ERROR("Failed to open \"/proc/self/mountinfo\"");
2313 free(basecginfo);
2314 return false;
2315 }
2316
2317 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2318
2319 while (getline(&line, &len, f) != -1) {
2320 int type;
2321 bool writeable;
2322 struct hierarchy *new;
2323 char *base_cgroup = NULL, *mountpoint = NULL;
2324 char **controller_list = NULL;
2325
2326 type = get_cgroup_version(line);
2327 if (type == 0)
2328 continue;
2329
2330 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2331 continue;
2332
2333 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2334 if (type == CGROUP2_SUPER_MAGIC)
2335 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2336 else if (type == CGROUP_SUPER_MAGIC)
2337 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2338 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2339 if (type == CGROUP_SUPER_MAGIC)
2340 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2341 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2342 if (type == CGROUP2_SUPER_MAGIC)
2343 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2344 }
2345
2346 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2347 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2348 continue;
2349
2350 if (type == CGROUP_SUPER_MAGIC)
2351 if (controller_list_is_dup(ops->hierarchies, controller_list))
2352 goto next;
2353
2354 mountpoint = cg_hybrid_get_mountpoint(line);
2355 if (!mountpoint) {
2356 ERROR("Failed parsing mountpoint from \"%s\"", line);
2357 goto next;
2358 }
2359
2360 if (type == CGROUP_SUPER_MAGIC)
2361 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2362 else
2363 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2364 if (!base_cgroup) {
2365 ERROR("Failed to find current cgroup");
2366 goto next;
2367 }
2368
2369 trim(base_cgroup);
2370 prune_init_scope(base_cgroup);
2371 if (type == CGROUP2_SUPER_MAGIC)
2372 writeable = test_writeable_v2(mountpoint, base_cgroup);
2373 else
2374 writeable = test_writeable_v1(mountpoint, base_cgroup);
2375 if (!writeable)
2376 goto next;
2377
2378 if (type == CGROUP2_SUPER_MAGIC) {
2379 char *cgv2_ctrl_path;
2380
2381 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2382 "cgroup.controllers",
2383 NULL);
2384
2385 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2386 free(cgv2_ctrl_path);
2387 if (!controller_list) {
2388 controller_list = cg_unified_make_empty_controller();
2389 TRACE("No controllers are enabled for "
2390 "delegation in the unified hierarchy");
2391 }
2392 }
2393
2394 /* Exclude all controllers that cgroup use does not want. */
2395 if (!cgroup_use_wants_controllers(ops, controller_list))
2396 goto next;
2397
2398 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2399 if (type == CGROUP2_SUPER_MAGIC && !ops->unified)
2400 ops->unified = new;
2401
2402 continue;
2403
2404 next:
2405 free_string_list(controller_list);
2406 free(mountpoint);
2407 free(base_cgroup);
2408 }
2409
2410 free_string_list(klist);
2411 free_string_list(nlist);
2412
2413 free(basecginfo);
2414
2415 fclose(f);
2416 free(line);
2417
2418 TRACE("Writable cgroup hierarchies:");
2419 lxc_cgfsng_print_hierarchies(ops);
2420
2421 /* verify that all controllers in cgroup.use and all crucial
2422 * controllers are accounted for
2423 */
2424 if (!all_controllers_found(ops))
2425 return false;
2426
2427 return true;
2428 }
2429
2430 static int cg_is_pure_unified(void)
2431 {
2432
2433 int ret;
2434 struct statfs fs;
2435
2436 ret = statfs("/sys/fs/cgroup", &fs);
2437 if (ret < 0)
2438 return -ENOMEDIUM;
2439
2440 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2441 return CGROUP2_SUPER_MAGIC;
2442
2443 return 0;
2444 }
2445
2446 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2447 static char *cg_unified_get_current_cgroup(bool relative)
2448 {
2449 char *basecginfo, *base_cgroup;
2450 char *copy = NULL;
2451
2452 if (!relative && (geteuid() == 0))
2453 basecginfo = read_file("/proc/1/cgroup");
2454 else
2455 basecginfo = read_file("/proc/self/cgroup");
2456 if (!basecginfo)
2457 return NULL;
2458
2459 base_cgroup = strstr(basecginfo, "0::/");
2460 if (!base_cgroup)
2461 goto cleanup_on_err;
2462
2463 base_cgroup = base_cgroup + 3;
2464 copy = copy_to_eol(base_cgroup);
2465 if (!copy)
2466 goto cleanup_on_err;
2467
2468 cleanup_on_err:
2469 free(basecginfo);
2470 if (copy)
2471 trim(copy);
2472
2473 return copy;
2474 }
2475
2476 static int cg_unified_init(struct cgroup_ops *ops, bool relative)
2477 {
2478 int ret;
2479 char *mountpoint, *subtree_path;
2480 char **delegatable;
2481 char *base_cgroup = NULL;
2482
2483 ret = cg_is_pure_unified();
2484 if (ret == -ENOMEDIUM)
2485 return -ENOMEDIUM;
2486
2487 if (ret != CGROUP2_SUPER_MAGIC)
2488 return 0;
2489
2490 base_cgroup = cg_unified_get_current_cgroup(relative);
2491 if (!base_cgroup)
2492 return -EINVAL;
2493 prune_init_scope(base_cgroup);
2494
2495 /* We assume that we have already been given controllers to delegate
2496 * further down the hierarchy. If not it is up to the user to delegate
2497 * them to us.
2498 */
2499 mountpoint = must_copy_string("/sys/fs/cgroup");
2500 subtree_path = must_make_path(mountpoint, base_cgroup,
2501 "cgroup.subtree_control", NULL);
2502 delegatable = cg_unified_get_controllers(subtree_path);
2503 free(subtree_path);
2504 if (!delegatable)
2505 delegatable = cg_unified_make_empty_controller();
2506 if (!delegatable[0])
2507 TRACE("No controllers are enabled for delegation");
2508
2509 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2510 * we should verify here. The reason I'm not doing it right is that I'm
2511 * not convinced that lxc.cgroup.use will be the future since it is a
2512 * global property. I much rather have an option that lets you request
2513 * controllers per container.
2514 */
2515
2516 add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2517
2518 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2519 return CGROUP2_SUPER_MAGIC;
2520 }
2521
2522 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2523 {
2524 int ret;
2525 const char *tmp;
2526 bool relative = conf->cgroup_meta.relative;
2527
2528 tmp = lxc_global_config_value("lxc.cgroup.use");
2529 if (tmp) {
2530 char *chop, *cur, *pin;
2531
2532 pin = must_copy_string(tmp);
2533 chop = pin;
2534
2535 lxc_iterate_parts(cur, chop, ",") {
2536 must_append_string(&ops->cgroup_use, cur);
2537 }
2538
2539 free(pin);
2540 }
2541
2542 ret = cg_unified_init(ops, relative);
2543 if (ret < 0)
2544 return false;
2545
2546 if (ret == CGROUP2_SUPER_MAGIC)
2547 return true;
2548
2549 return cg_hybrid_init(ops, relative);
2550 }
2551
2552 static bool cgfsng_data_init(struct cgroup_ops *ops)
2553 {
2554 const char *cgroup_pattern;
2555
2556 /* copy system-wide cgroup information */
2557 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2558 if (!cgroup_pattern) {
2559 /* lxc.cgroup.pattern is only NULL on error. */
2560 ERROR("Failed to retrieve cgroup pattern");
2561 return false;
2562 }
2563 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2564
2565 return true;
2566 }
2567
2568 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2569 {
2570 struct cgroup_ops *cgfsng_ops;
2571
2572 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2573 if (!cgfsng_ops)
2574 return NULL;
2575
2576 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2577 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2578
2579 if (!cg_init(cgfsng_ops, conf)) {
2580 free(cgfsng_ops);
2581 return NULL;
2582 }
2583
2584 cgfsng_ops->data_init = cgfsng_data_init;
2585 cgfsng_ops->destroy = cgfsng_destroy;
2586 cgfsng_ops->create = cgfsng_create;
2587 cgfsng_ops->enter = cgfsng_enter;
2588 cgfsng_ops->escape = cgfsng_escape;
2589 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2590 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2591 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2592 cgfsng_ops->get = cgfsng_get;
2593 cgfsng_ops->set = cgfsng_set;
2594 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2595 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2596 cgfsng_ops->driver = "cgfsng";
2597 cgfsng_ops->version = "1.0.0";
2598 cgfsng_ops->attach = cgfsng_attach;
2599 cgfsng_ops->chown = cgfsng_chown;
2600 cgfsng_ops->mount = cgfsng_mount;
2601 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2602
2603 return cgfsng_ops;
2604 }