]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
Merge pull request #2353 from brauner/2018-05-25/fix_lxc_create
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comman-separated list of controllers.
35 */
36
37 #include "config.h"
38
39 #include <ctype.h>
40 #include <dirent.h>
41 #include <errno.h>
42 #include <grp.h>
43 #include <stdint.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <linux/kdev_t.h>
49 #include <linux/types.h>
50 #include <sys/types.h>
51
52 #include "caps.h"
53 #include "cgroup.h"
54 #include "cgroup_utils.h"
55 #include "commands.h"
56 #include "conf.h"
57 #include "log.h"
58 #include "storage/storage.h"
59 #include "utils.h"
60
61 lxc_log_define(lxc_cgfsng, lxc);
62
63 static void free_string_list(char **clist)
64 {
65 int i;
66
67 if (!clist)
68 return;
69
70 for (i = 0; clist[i]; i++)
71 free(clist[i]);
72
73 free(clist);
74 }
75
76 /* Allocate a pointer, do not fail. */
77 static void *must_alloc(size_t sz)
78 {
79 return must_realloc(NULL, sz);
80 }
81
82 /* Given a pointer to a null-terminated array of pointers, realloc to add one
83 * entry, and point the new entry to NULL. Do not fail. Return the index to the
84 * second-to-last entry - that is, the one which is now available for use
85 * (keeping the list null-terminated).
86 */
87 static int append_null_to_list(void ***list)
88 {
89 int newentry = 0;
90
91 if (*list)
92 for (; (*list)[newentry]; newentry++)
93 ;
94
95 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
96 (*list)[newentry + 1] = NULL;
97 return newentry;
98 }
99
100 /* Given a null-terminated array of strings, check whether @entry is one of the
101 * strings.
102 */
103 static bool string_in_list(char **list, const char *entry)
104 {
105 int i;
106
107 if (!list)
108 return false;
109
110 for (i = 0; list[i]; i++)
111 if (strcmp(list[i], entry) == 0)
112 return true;
113
114 return false;
115 }
116
117 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
118 * "name=systemd". Do not fail.
119 */
120 static char *cg_legacy_must_prefix_named(char *entry)
121 {
122 size_t len;
123 char *prefixed;
124
125 len = strlen(entry);
126 prefixed = must_alloc(len + 6);
127
128 memcpy(prefixed, "name=", sizeof("name=") - 1);
129 memcpy(prefixed + sizeof("name=") - 1, entry, len);
130 prefixed[len + 5] = '\0';
131 return prefixed;
132 }
133
134 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
135 * we are called.
136 *
137 * We also handle named subsystems here. Any controller which is not a kernel
138 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
139 * we refuse to use because we're not sure which we have here.
140 * (TODO: We could work around this in some cases by just remounting to be
141 * unambiguous, or by comparing mountpoint contents with current cgroup.)
142 *
143 * The last entry will always be NULL.
144 */
145 static void must_append_controller(char **klist, char **nlist, char ***clist,
146 char *entry)
147 {
148 int newentry;
149 char *copy;
150
151 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
152 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
153 ERROR("It is both a named and kernel subsystem");
154 return;
155 }
156
157 newentry = append_null_to_list((void ***)clist);
158
159 if (strncmp(entry, "name=", 5) == 0)
160 copy = must_copy_string(entry);
161 else if (string_in_list(klist, entry))
162 copy = must_copy_string(entry);
163 else
164 copy = cg_legacy_must_prefix_named(entry);
165
166 (*clist)[newentry] = copy;
167 }
168
169 /* Given a handler's cgroup data, return the struct hierarchy for the controller
170 * @c, or NULL if there is none.
171 */
172 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *c)
173 {
174 int i;
175
176 if (!ops->hierarchies)
177 return NULL;
178
179 for (i = 0; ops->hierarchies[i]; i++) {
180 if (!c) {
181 /* This is the empty unified hierarchy. */
182 if (ops->hierarchies[i]->controllers &&
183 !ops->hierarchies[i]->controllers[0])
184 return ops->hierarchies[i];
185
186 continue;
187 }
188
189 if (string_in_list(ops->hierarchies[i]->controllers, c))
190 return ops->hierarchies[i];
191 }
192
193 return NULL;
194 }
195
196 #define BATCH_SIZE 50
197 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
198 {
199 int newbatches = (newlen / BATCH_SIZE) + 1;
200 int oldbatches = (oldlen / BATCH_SIZE) + 1;
201
202 if (!*mem || newbatches > oldbatches) {
203 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
204 }
205 }
206
207 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
208 {
209 size_t full = oldlen + newlen;
210
211 batch_realloc(dest, oldlen, full + 1);
212
213 memcpy(*dest + oldlen, new, newlen + 1);
214 }
215
216 /* Slurp in a whole file */
217 static char *read_file(const char *fnam)
218 {
219 FILE *f;
220 char *line = NULL, *buf = NULL;
221 size_t len = 0, fulllen = 0;
222 int linelen;
223
224 f = fopen(fnam, "r");
225 if (!f)
226 return NULL;
227 while ((linelen = getline(&line, &len, f)) != -1) {
228 append_line(&buf, fulllen, line, linelen);
229 fulllen += linelen;
230 }
231 fclose(f);
232 free(line);
233 return buf;
234 }
235
236 /* Taken over modified from the kernel sources. */
237 #define NBITS 32 /* bits in uint32_t */
238 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
239 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
240
241 static void set_bit(unsigned bit, uint32_t *bitarr)
242 {
243 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
244 }
245
246 static void clear_bit(unsigned bit, uint32_t *bitarr)
247 {
248 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
249 }
250
251 static bool is_set(unsigned bit, uint32_t *bitarr)
252 {
253 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
254 }
255
256 /* Create cpumask from cpulist aka turn:
257 *
258 * 0,2-3
259 *
260 * into bit array
261 *
262 * 1 0 1 1
263 */
264 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
265 {
266 char *token;
267 size_t arrlen;
268 uint32_t *bitarr;
269 char *saveptr = NULL;
270
271 arrlen = BITS_TO_LONGS(nbits);
272 bitarr = calloc(arrlen, sizeof(uint32_t));
273 if (!bitarr)
274 return NULL;
275
276 for (; (token = strtok_r(buf, ",", &saveptr)); buf = NULL) {
277 errno = 0;
278 unsigned end, start;
279 char *range;
280
281 start = strtoul(token, NULL, 0);
282 end = start;
283 range = strchr(token, '-');
284 if (range)
285 end = strtoul(range + 1, NULL, 0);
286
287 if (!(start <= end)) {
288 free(bitarr);
289 return NULL;
290 }
291
292 if (end >= nbits) {
293 free(bitarr);
294 return NULL;
295 }
296
297 while (start <= end)
298 set_bit(start++, bitarr);
299 }
300
301 return bitarr;
302 }
303
304 /* Turn cpumask into simple, comma-separated cpulist. */
305 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
306 {
307 int ret;
308 size_t i;
309 char **cpulist = NULL;
310 char numstr[LXC_NUMSTRLEN64] = {0};
311
312 for (i = 0; i <= nbits; i++) {
313 if (!is_set(i, bitarr))
314 continue;
315
316 ret = snprintf(numstr, LXC_NUMSTRLEN64, "%zu", i);
317 if (ret < 0 || (size_t)ret >= LXC_NUMSTRLEN64) {
318 lxc_free_array((void **)cpulist, free);
319 return NULL;
320 }
321
322 ret = lxc_append_string(&cpulist, numstr);
323 if (ret < 0) {
324 lxc_free_array((void **)cpulist, free);
325 return NULL;
326 }
327 }
328
329 if (!cpulist)
330 return NULL;
331
332 return lxc_string_join(",", (const char **)cpulist, false);
333 }
334
335 static ssize_t get_max_cpus(char *cpulist)
336 {
337 char *c1, *c2;
338 char *maxcpus = cpulist;
339 size_t cpus = 0;
340
341 c1 = strrchr(maxcpus, ',');
342 if (c1)
343 c1++;
344
345 c2 = strrchr(maxcpus, '-');
346 if (c2)
347 c2++;
348
349 if (!c1 && !c2)
350 c1 = maxcpus;
351 else if (c1 > c2)
352 c2 = c1;
353 else if (c1 < c2)
354 c1 = c2;
355 else if (!c1 && c2)
356 c1 = c2;
357
358 errno = 0;
359 cpus = strtoul(c1, NULL, 0);
360 if (errno != 0)
361 return -1;
362
363 return cpus;
364 }
365
366 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
367 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
368 {
369 int ret;
370 ssize_t i;
371 char *lastslash, *fpath, oldv;
372 ssize_t maxisol = 0, maxposs = 0;
373 char *cpulist = NULL, *isolcpus = NULL, *posscpus = NULL;
374 uint32_t *isolmask = NULL, *possmask = NULL;
375 bool bret = false, flipped_bit = false;
376
377 lastslash = strrchr(path, '/');
378 if (!lastslash) {
379 ERROR("Failed to detect \"/\" in \"%s\"", path);
380 return bret;
381 }
382 oldv = *lastslash;
383 *lastslash = '\0';
384 fpath = must_make_path(path, "cpuset.cpus", NULL);
385 posscpus = read_file(fpath);
386 if (!posscpus) {
387 SYSERROR("Failed to read file \"%s\"", fpath);
388 goto on_error;
389 }
390
391 /* Get maximum number of cpus found in possible cpuset. */
392 maxposs = get_max_cpus(posscpus);
393 if (maxposs < 0)
394 goto on_error;
395
396 if (!file_exists(__ISOL_CPUS)) {
397 /* This system doesn't expose isolated cpus. */
398 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
399 cpulist = posscpus;
400 /* No isolated cpus but we weren't already initialized by
401 * someone. We should simply copy the parents cpuset.cpus
402 * values.
403 */
404 if (!am_initialized) {
405 DEBUG("Copying cpu settings of parent cgroup");
406 goto copy_parent;
407 }
408 /* No isolated cpus but we were already initialized by someone.
409 * Nothing more to do for us.
410 */
411 goto on_success;
412 }
413
414 isolcpus = read_file(__ISOL_CPUS);
415 if (!isolcpus) {
416 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
417 goto on_error;
418 }
419 if (!isdigit(isolcpus[0])) {
420 TRACE("No isolated cpus detected");
421 cpulist = posscpus;
422 /* No isolated cpus but we weren't already initialized by
423 * someone. We should simply copy the parents cpuset.cpus
424 * values.
425 */
426 if (!am_initialized) {
427 DEBUG("Copying cpu settings of parent cgroup");
428 goto copy_parent;
429 }
430 /* No isolated cpus but we were already initialized by someone.
431 * Nothing more to do for us.
432 */
433 goto on_success;
434 }
435
436 /* Get maximum number of cpus found in isolated cpuset. */
437 maxisol = get_max_cpus(isolcpus);
438 if (maxisol < 0)
439 goto on_error;
440
441 if (maxposs < maxisol)
442 maxposs = maxisol;
443 maxposs++;
444
445 possmask = lxc_cpumask(posscpus, maxposs);
446 if (!possmask) {
447 ERROR("Failed to create cpumask for possible cpus");
448 goto on_error;
449 }
450
451 isolmask = lxc_cpumask(isolcpus, maxposs);
452 if (!isolmask) {
453 ERROR("Failed to create cpumask for isolated cpus");
454 goto on_error;
455 }
456
457 for (i = 0; i <= maxposs; i++) {
458 if (!is_set(i, isolmask) || !is_set(i, possmask))
459 continue;
460
461 flipped_bit = true;
462 clear_bit(i, possmask);
463 }
464
465 if (!flipped_bit) {
466 DEBUG("No isolated cpus present in cpuset");
467 goto on_success;
468 }
469 DEBUG("Removed isolated cpus from cpuset");
470
471 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
472 if (!cpulist) {
473 ERROR("Failed to create cpu list");
474 goto on_error;
475 }
476
477 copy_parent:
478 *lastslash = oldv;
479 free(fpath);
480 fpath = must_make_path(path, "cpuset.cpus", NULL);
481 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
482 if (ret < 0) {
483 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
484 goto on_error;
485 }
486
487 on_success:
488 bret = true;
489
490 on_error:
491 free(fpath);
492
493 free(isolcpus);
494 free(isolmask);
495
496 if (posscpus != cpulist)
497 free(posscpus);
498 free(possmask);
499
500 free(cpulist);
501 return bret;
502 }
503
504 /* Copy contents of parent(@path)/@file to @path/@file */
505 static bool copy_parent_file(char *path, char *file)
506 {
507 int ret;
508 char *fpath, *lastslash, oldv;
509 int len = 0;
510 char *value = NULL;
511
512 lastslash = strrchr(path, '/');
513 if (!lastslash) {
514 ERROR("Failed to detect \"/\" in \"%s\"", path);
515 return false;
516 }
517 oldv = *lastslash;
518 *lastslash = '\0';
519 fpath = must_make_path(path, file, NULL);
520 len = lxc_read_from_file(fpath, NULL, 0);
521 if (len <= 0)
522 goto on_error;
523
524 value = must_alloc(len + 1);
525 ret = lxc_read_from_file(fpath, value, len);
526 if (ret != len)
527 goto on_error;
528 free(fpath);
529
530 *lastslash = oldv;
531 fpath = must_make_path(path, file, NULL);
532 ret = lxc_write_to_file(fpath, value, len, false, 0666);
533 if (ret < 0)
534 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, fpath);
535 free(fpath);
536 free(value);
537 return ret >= 0;
538
539 on_error:
540 SYSERROR("Failed to read file \"%s\"", fpath);
541 free(fpath);
542 free(value);
543 return false;
544 }
545
546 /* Initialize the cpuset hierarchy in first directory of @gname and set
547 * cgroup.clone_children so that children inherit settings. Since the
548 * h->base_path is populated by init or ourselves, we know it is already
549 * initialized.
550 */
551 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
552 {
553 int ret;
554 char v;
555 char *cgpath, *clonechildrenpath, *slash;
556
557 if (!string_in_list(h->controllers, "cpuset"))
558 return true;
559
560 if (*cgname == '/')
561 cgname++;
562 slash = strchr(cgname, '/');
563 if (slash)
564 *slash = '\0';
565
566 cgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
567 if (slash)
568 *slash = '/';
569
570 ret = mkdir(cgpath, 0755);
571 if (ret < 0) {
572 if (errno != EEXIST) {
573 SYSERROR("Failed to create directory \"%s\"", cgpath);
574 free(cgpath);
575 return false;
576 }
577 }
578
579 clonechildrenpath =
580 must_make_path(cgpath, "cgroup.clone_children", NULL);
581 /* unified hierarchy doesn't have clone_children */
582 if (!file_exists(clonechildrenpath)) {
583 free(clonechildrenpath);
584 free(cgpath);
585 return true;
586 }
587
588 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
589 if (ret < 0) {
590 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
591 free(clonechildrenpath);
592 free(cgpath);
593 return false;
594 }
595
596 /* Make sure any isolated cpus are removed from cpuset.cpus. */
597 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
598 SYSERROR("Failed to remove isolated cpus");
599 free(clonechildrenpath);
600 free(cgpath);
601 return false;
602 }
603
604 /* Already set for us by someone else. */
605 if (v == '1') {
606 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
607 free(clonechildrenpath);
608 free(cgpath);
609 return true;
610 }
611
612 /* copy parent's settings */
613 if (!copy_parent_file(cgpath, "cpuset.mems")) {
614 SYSERROR("Failed to copy \"cpuset.mems\" settings");
615 free(cgpath);
616 free(clonechildrenpath);
617 return false;
618 }
619 free(cgpath);
620
621 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
622 if (ret < 0) {
623 /* Set clone_children so children inherit our settings */
624 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
625 free(clonechildrenpath);
626 return false;
627 }
628 free(clonechildrenpath);
629 return true;
630 }
631
632 /* Given two null-terminated lists of strings, return true if any string is in
633 * both.
634 */
635 static bool controller_lists_intersect(char **l1, char **l2)
636 {
637 int i;
638
639 if (!l1 || !l2)
640 return false;
641
642 for (i = 0; l1[i]; i++) {
643 if (string_in_list(l2, l1[i]))
644 return true;
645 }
646
647 return false;
648 }
649
650 /* For a null-terminated list of controllers @clist, return true if any of those
651 * controllers is already listed the null-terminated list of hierarchies @hlist.
652 * Realistically, if one is present, all must be present.
653 */
654 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
655 {
656 int i;
657
658 if (!hlist)
659 return false;
660
661 for (i = 0; hlist[i]; i++)
662 if (controller_lists_intersect(hlist[i]->controllers, clist))
663 return true;
664
665 return false;
666 }
667
668 /* Return true if the controller @entry is found in the null-terminated list of
669 * hierarchies @hlist.
670 */
671 static bool controller_found(struct hierarchy **hlist, char *entry)
672 {
673 int i;
674
675 if (!hlist)
676 return false;
677
678 for (i = 0; hlist[i]; i++)
679 if (string_in_list(hlist[i]->controllers, entry))
680 return true;
681
682 return false;
683 }
684
685 /* Return true if all of the controllers which we require have been found. The
686 * required list is freezer and anything in lxc.cgroup.use.
687 */
688 static bool all_controllers_found(struct cgroup_ops *ops)
689 {
690 char *p;
691 char *saveptr = NULL;
692 struct hierarchy **hlist = ops->hierarchies;
693
694 if (!controller_found(hlist, "freezer")) {
695 ERROR("No freezer controller mountpoint found");
696 return false;
697 }
698
699 if (!ops->cgroup_use)
700 return true;
701
702 for (; (p = strtok_r(ops->cgroup_use, ",", &saveptr)); ops->cgroup_use = NULL)
703 if (!controller_found(hlist, p)) {
704 ERROR("No %s controller mountpoint found", p);
705 return false;
706 }
707
708 return true;
709 }
710
711 /* Get the controllers from a mountinfo line There are other ways we could get
712 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
713 * could parse the mount options. But we simply assume that the mountpoint must
714 * be /sys/fs/cgroup/controller-list
715 */
716 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
717 int type)
718 {
719 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
720 * for legacy hierarchies.
721 */
722 int i;
723 char *dup, *p2, *tok;
724 char *p = line, *saveptr = NULL, *sep = ",";
725 char **aret = NULL;
726
727 for (i = 0; i < 4; i++) {
728 p = strchr(p, ' ');
729 if (!p)
730 return NULL;
731 p++;
732 }
733
734 /* Note, if we change how mountinfo works, then our caller will need to
735 * verify /sys/fs/cgroup/ in this field.
736 */
737 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
738 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
739 return NULL;
740 }
741
742 p += 15;
743 p2 = strchr(p, ' ');
744 if (!p2) {
745 ERROR("Corrupt mountinfo");
746 return NULL;
747 }
748 *p2 = '\0';
749
750 if (type == CGROUP_SUPER_MAGIC) {
751 /* strdup() here for v1 hierarchies. Otherwise strtok_r() will
752 * destroy mountpoints such as "/sys/fs/cgroup/cpu,cpuacct".
753 */
754 dup = strdup(p);
755 if (!dup)
756 return NULL;
757
758 for (tok = strtok_r(dup, sep, &saveptr); tok;
759 tok = strtok_r(NULL, sep, &saveptr))
760 must_append_controller(klist, nlist, &aret, tok);
761
762 free(dup);
763 }
764 *p2 = ' ';
765
766 return aret;
767 }
768
769 static char **cg_unified_make_empty_controller(void)
770 {
771 int newentry;
772 char **aret = NULL;
773
774 newentry = append_null_to_list((void ***)&aret);
775 aret[newentry] = NULL;
776 return aret;
777 }
778
779 static char **cg_unified_get_controllers(const char *file)
780 {
781 char *buf, *tok;
782 char *saveptr = NULL, *sep = " \t\n";
783 char **aret = NULL;
784
785 buf = read_file(file);
786 if (!buf)
787 return NULL;
788
789 for (tok = strtok_r(buf, sep, &saveptr); tok;
790 tok = strtok_r(NULL, sep, &saveptr)) {
791 int newentry;
792 char *copy;
793
794 newentry = append_null_to_list((void ***)&aret);
795 copy = must_copy_string(tok);
796 aret[newentry] = copy;
797 }
798
799 free(buf);
800 return aret;
801 }
802
803 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
804 char *base_cgroup, int type)
805 {
806 struct hierarchy *new;
807 int newentry;
808
809 new = must_alloc(sizeof(*new));
810 new->controllers = clist;
811 new->mountpoint = mountpoint;
812 new->base_cgroup = base_cgroup;
813 new->fullcgpath = NULL;
814 new->version = type;
815
816 newentry = append_null_to_list((void ***)h);
817 (*h)[newentry] = new;
818 return new;
819 }
820
821 /* Get a copy of the mountpoint from @line, which is a line from
822 * /proc/self/mountinfo.
823 */
824 static char *cg_hybrid_get_mountpoint(char *line)
825 {
826 int i;
827 size_t len;
828 char *p2;
829 char *p = line, *sret = NULL;
830
831 for (i = 0; i < 4; i++) {
832 p = strchr(p, ' ');
833 if (!p)
834 return NULL;
835 p++;
836 }
837
838 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
839 return NULL;
840
841 p2 = strchr(p + 15, ' ');
842 if (!p2)
843 return NULL;
844 *p2 = '\0';
845
846 len = strlen(p);
847 sret = must_alloc(len + 1);
848 memcpy(sret, p, len);
849 sret[len] = '\0';
850 return sret;
851 }
852
853 /* Given a multi-line string, return a null-terminated copy of the current line. */
854 static char *copy_to_eol(char *p)
855 {
856 char *p2 = strchr(p, '\n'), *sret;
857 size_t len;
858
859 if (!p2)
860 return NULL;
861
862 len = p2 - p;
863 sret = must_alloc(len + 1);
864 memcpy(sret, p, len);
865 sret[len] = '\0';
866 return sret;
867 }
868
869 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
870 * /proc/self/cgroup file. Check whether controller c is present.
871 */
872 static bool controller_in_clist(char *cgline, char *c)
873 {
874 char *tok, *saveptr = NULL, *eol, *tmp;
875 size_t len;
876
877 eol = strchr(cgline, ':');
878 if (!eol)
879 return false;
880
881 len = eol - cgline;
882 tmp = alloca(len + 1);
883 memcpy(tmp, cgline, len);
884 tmp[len] = '\0';
885
886 for (tok = strtok_r(tmp, ",", &saveptr); tok;
887 tok = strtok_r(NULL, ",", &saveptr)) {
888 if (strcmp(tok, c) == 0)
889 return true;
890 }
891
892 return false;
893 }
894
895 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
896 * @controller.
897 */
898 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
899 int type)
900 {
901 char *p = basecginfo;
902
903 for (;;) {
904 bool is_cgv2_base_cgroup = false;
905
906 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
907 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
908 is_cgv2_base_cgroup = true;
909
910 p = strchr(p, ':');
911 if (!p)
912 return NULL;
913 p++;
914
915 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
916 p = strchr(p, ':');
917 if (!p)
918 return NULL;
919 p++;
920 return copy_to_eol(p);
921 }
922
923 p = strchr(p, '\n');
924 if (!p)
925 return NULL;
926 p++;
927 }
928 }
929
930 static void must_append_string(char ***list, char *entry)
931 {
932 int newentry;
933 char *copy;
934
935 newentry = append_null_to_list((void ***)list);
936 copy = must_copy_string(entry);
937 (*list)[newentry] = copy;
938 }
939
940 static int get_existing_subsystems(char ***klist, char ***nlist)
941 {
942 FILE *f;
943 char *line = NULL;
944 size_t len = 0;
945
946 f = fopen("/proc/self/cgroup", "r");
947 if (!f)
948 return -1;
949
950 while (getline(&line, &len, f) != -1) {
951 char *p, *p2, *tok, *saveptr = NULL;
952 p = strchr(line, ':');
953 if (!p)
954 continue;
955 p++;
956 p2 = strchr(p, ':');
957 if (!p2)
958 continue;
959 *p2 = '\0';
960
961 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
962 * contains an entry of the form:
963 *
964 * 0::/some/path
965 *
966 * In this case we use "cgroup2" as controller name.
967 */
968 if ((p2 - p) == 0) {
969 must_append_string(klist, "cgroup2");
970 continue;
971 }
972
973 for (tok = strtok_r(p, ",", &saveptr); tok;
974 tok = strtok_r(NULL, ",", &saveptr)) {
975 if (strncmp(tok, "name=", 5) == 0)
976 must_append_string(nlist, tok);
977 else
978 must_append_string(klist, tok);
979 }
980 }
981
982 free(line);
983 fclose(f);
984 return 0;
985 }
986
987 static void trim(char *s)
988 {
989 size_t len;
990
991 len = strlen(s);
992 while ((len > 1) && (s[len - 1] == '\n'))
993 s[--len] = '\0';
994 }
995
996 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
997 {
998 int i;
999 struct hierarchy **it;
1000
1001 if (!ops->hierarchies) {
1002 TRACE(" No hierarchies found");
1003 return;
1004 }
1005
1006 TRACE(" Hierarchies:");
1007 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
1008 int j;
1009 char **cit;
1010
1011 TRACE(" %d: base_cgroup: %s", i, (*it)->base_cgroup ? (*it)->base_cgroup : "(null)");
1012 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
1013 TRACE(" controllers:");
1014 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
1015 TRACE(" %d: %s", j, *cit);
1016 }
1017 }
1018
1019 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
1020 char **nlist)
1021 {
1022 int k;
1023 char **it;
1024
1025 TRACE("basecginfo is:");
1026 TRACE("%s", basecginfo);
1027
1028 for (k = 0, it = klist; it && *it; it++, k++)
1029 TRACE("kernel subsystem %d: %s", k, *it);
1030
1031 for (k = 0, it = nlist; it && *it; it++, k++)
1032 TRACE("named subsystem %d: %s", k, *it);
1033 }
1034
1035 static int recursive_destroy(char *dirname)
1036 {
1037 int ret;
1038 struct dirent *direntp;
1039 DIR *dir;
1040 int r = 0;
1041
1042 dir = opendir(dirname);
1043 if (!dir)
1044 return -1;
1045
1046 while ((direntp = readdir(dir))) {
1047 char *pathname;
1048 struct stat mystat;
1049
1050 if (!strcmp(direntp->d_name, ".") ||
1051 !strcmp(direntp->d_name, ".."))
1052 continue;
1053
1054 pathname = must_make_path(dirname, direntp->d_name, NULL);
1055
1056 ret = lstat(pathname, &mystat);
1057 if (ret < 0) {
1058 if (!r)
1059 WARN("Failed to stat \"%s\"", pathname);
1060 r = -1;
1061 goto next;
1062 }
1063
1064 if (!S_ISDIR(mystat.st_mode))
1065 goto next;
1066
1067 ret = recursive_destroy(pathname);
1068 if (ret < 0)
1069 r = -1;
1070 next:
1071 free(pathname);
1072 }
1073
1074 ret = rmdir(dirname);
1075 if (ret < 0) {
1076 if (!r)
1077 WARN("%s - Failed to delete \"%s\"", strerror(errno), dirname);
1078 r = -1;
1079 }
1080
1081 ret = closedir(dir);
1082 if (ret < 0) {
1083 if (!r)
1084 WARN("%s - Failed to delete \"%s\"", strerror(errno), dirname);
1085 r = -1;
1086 }
1087
1088 return r;
1089 }
1090
1091 static int cgroup_rmdir(struct hierarchy **hierarchies,
1092 const char *container_cgroup)
1093 {
1094 int i;
1095
1096 if (!container_cgroup || !hierarchies)
1097 return 0;
1098
1099 for (i = 0; hierarchies[i]; i++) {
1100 int ret;
1101 struct hierarchy *h = hierarchies[i];
1102
1103 if (!h->fullcgpath)
1104 continue;
1105
1106 ret = recursive_destroy(h->fullcgpath);
1107 if (ret < 0)
1108 WARN("Failed to destroy \"%s\"", h->fullcgpath);
1109
1110 free(h->fullcgpath);
1111 h->fullcgpath = NULL;
1112 }
1113
1114 return 0;
1115 }
1116
1117 struct generic_userns_exec_data {
1118 struct hierarchy **hierarchies;
1119 const char *container_cgroup;
1120 struct lxc_conf *conf;
1121 uid_t origuid; /* target uid in parent namespace */
1122 char *path;
1123 };
1124
1125 static int cgroup_rmdir_wrapper(void *data)
1126 {
1127 int ret;
1128 struct generic_userns_exec_data *arg = data;
1129 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1130 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1131
1132 ret = setresgid(nsgid, nsgid, nsgid);
1133 if (ret < 0) {
1134 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1135 (int)nsgid, (int)nsgid);
1136 return -1;
1137 }
1138
1139 ret = setresuid(nsuid, nsuid, nsuid);
1140 if (ret < 0) {
1141 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1142 (int)nsuid, (int)nsuid);
1143 return -1;
1144 }
1145
1146 ret = setgroups(0, NULL);
1147 if (ret < 0 && errno != EPERM) {
1148 SYSERROR("Failed to setgroups(0, NULL)");
1149 return -1;
1150 }
1151
1152 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1153 }
1154
1155 static void cgfsng_destroy(struct cgroup_ops *ops, struct lxc_handler *handler)
1156 {
1157 int ret;
1158 struct generic_userns_exec_data wrap;
1159
1160 wrap.origuid = 0;
1161 wrap.container_cgroup = ops->container_cgroup;
1162 wrap.hierarchies = ops->hierarchies;
1163 wrap.conf = handler->conf;
1164
1165 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1166 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1167 "cgroup_rmdir_wrapper");
1168 else
1169 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1170 if (ret < 0) {
1171 WARN("Failed to destroy cgroups");
1172 return;
1173 }
1174 }
1175
1176 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1177 {
1178 size_t i, parts_len;
1179 char **it;
1180 size_t full_len = 0;
1181 char *add_controllers = NULL, *cgroup = NULL;
1182 char **parts = NULL;
1183 bool bret = false;
1184
1185 if (h->version != CGROUP2_SUPER_MAGIC)
1186 return true;
1187
1188 if (!h->controllers)
1189 return true;
1190
1191 /* For now we simply enable all controllers that we have detected by
1192 * creating a string like "+memory +pids +cpu +io".
1193 * TODO: In the near future we might want to support "-<controller>"
1194 * etc. but whether supporting semantics like this make sense will need
1195 * some thinking.
1196 */
1197 for (it = h->controllers; it && *it; it++) {
1198 full_len += strlen(*it) + 2;
1199 add_controllers = must_realloc(add_controllers, full_len + 1);
1200 if (h->controllers[0] == *it)
1201 add_controllers[0] = '\0';
1202 strcat(add_controllers, "+");
1203 strcat(add_controllers, *it);
1204 if ((it + 1) && *(it + 1))
1205 strcat(add_controllers, " ");
1206 }
1207
1208 parts = lxc_string_split(cgname, '/');
1209 if (!parts)
1210 goto on_error;
1211 parts_len = lxc_array_len((void **)parts);
1212 if (parts_len > 0)
1213 parts_len--;
1214
1215 cgroup = must_make_path(h->mountpoint, h->base_cgroup, NULL);
1216 for (i = 0; i < parts_len; i++) {
1217 int ret;
1218 char *target;
1219
1220 cgroup = must_append_path(cgroup, parts[i], NULL);
1221 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1222 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1223 free(target);
1224 if (ret < 0) {
1225 SYSERROR("Could not enable \"%s\" controllers in the "
1226 "unified cgroup \"%s\"", add_controllers, cgroup);
1227 goto on_error;
1228 }
1229 }
1230
1231 bret = true;
1232
1233 on_error:
1234 lxc_free_array((void **)parts, free);
1235 free(add_controllers);
1236 free(cgroup);
1237 return bret;
1238 }
1239
1240 static bool create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1241 {
1242 int ret;
1243
1244 h->fullcgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
1245 if (dir_exists(h->fullcgpath)) {
1246 ERROR("The cgroup \"%s\" already existed", h->fullcgpath);
1247 return false;
1248 }
1249
1250 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1251 ERROR("Failed to handle legacy cpuset controller");
1252 return false;
1253 }
1254
1255 ret = mkdir_p(h->fullcgpath, 0755);
1256 if (ret < 0) {
1257 ERROR("Failed to create cgroup \"%s\"", h->fullcgpath);
1258 return false;
1259 }
1260
1261 return cg_unified_create_cgroup(h, cgname);
1262 }
1263
1264 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname)
1265 {
1266 int ret;
1267
1268 ret = rmdir(h->fullcgpath);
1269 if (ret < 0)
1270 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", h->fullcgpath);
1271
1272 free(h->fullcgpath);
1273 h->fullcgpath = NULL;
1274 }
1275
1276 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1277 * next cgroup_pattern-1, -2, ..., -999.
1278 */
1279 static inline bool cgfsng_create(struct cgroup_ops *ops,
1280 struct lxc_handler *handler)
1281 {
1282 int i;
1283 size_t len;
1284 char *container_cgroup, *offset, *tmp;
1285 int idx = 0;
1286 struct lxc_conf *conf = handler->conf;
1287
1288 if (ops->container_cgroup) {
1289 WARN("cgfsng_create called a second time: %s", ops->container_cgroup);
1290 return false;
1291 }
1292
1293 if (!conf)
1294 return false;
1295
1296 if (conf->cgroup_meta.dir)
1297 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1298 else
1299 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1300 if (!tmp) {
1301 ERROR("Failed expanding cgroup name pattern");
1302 return false;
1303 }
1304 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1305 container_cgroup = must_alloc(len);
1306 strcpy(container_cgroup, tmp);
1307 free(tmp);
1308 offset = container_cgroup + len - 5;
1309
1310 again:
1311 if (idx == 1000) {
1312 ERROR("Too many conflicting cgroup names");
1313 goto out_free;
1314 }
1315
1316 if (idx) {
1317 int ret;
1318
1319 ret = snprintf(offset, 5, "-%d", idx);
1320 if (ret < 0 || (size_t)ret >= 5) {
1321 FILE *f = fopen("/dev/null", "w");
1322 if (f) {
1323 fprintf(f, "Workaround for GCC7 bug: "
1324 "https://gcc.gnu.org/bugzilla/"
1325 "show_bug.cgi?id=78969");
1326 fclose(f);
1327 }
1328 }
1329 }
1330
1331 for (i = 0; ops->hierarchies[i]; i++) {
1332 if (!create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1333 int j;
1334 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->fullcgpath);
1335 free(ops->hierarchies[i]->fullcgpath);
1336 ops->hierarchies[i]->fullcgpath = NULL;
1337 for (j = 0; j < i; j++)
1338 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup);
1339 idx++;
1340 goto again;
1341 }
1342 }
1343
1344 ops->container_cgroup = container_cgroup;
1345
1346 return true;
1347
1348 out_free:
1349 free(container_cgroup);
1350
1351 return false;
1352 }
1353
1354 static bool cgfsng_enter(struct cgroup_ops *ops, pid_t pid)
1355 {
1356 int i, len;
1357 char pidstr[25];
1358
1359 len = snprintf(pidstr, 25, "%d", pid);
1360 if (len < 0 || len >= 25)
1361 return false;
1362
1363 for (i = 0; ops->hierarchies[i]; i++) {
1364 int ret;
1365 char *fullpath;
1366
1367 fullpath = must_make_path(ops->hierarchies[i]->fullcgpath,
1368 "cgroup.procs", NULL);
1369 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
1370 if (ret != 0) {
1371 SYSERROR("Failed to enter cgroup \"%s\"", fullpath);
1372 free(fullpath);
1373 return false;
1374 }
1375 free(fullpath);
1376 }
1377
1378 return true;
1379 }
1380
1381 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1382 mode_t chmod_mode)
1383 {
1384 int ret;
1385
1386 ret = chown(path, chown_uid, chown_gid);
1387 if (ret < 0) {
1388 WARN("%s - Failed to chown(%s, %d, %d)", strerror(errno), path,
1389 (int)chown_uid, (int)chown_gid);
1390 return -1;
1391 }
1392
1393 ret = chmod(path, chmod_mode);
1394 if (ret < 0) {
1395 WARN("%s - Failed to chmod(%s, %d)", strerror(errno), path,
1396 (int)chmod_mode);
1397 return -1;
1398 }
1399
1400 return 0;
1401 }
1402
1403 /* chgrp the container cgroups to container group. We leave
1404 * the container owner as cgroup owner. So we must make the
1405 * directories 775 so that the container can create sub-cgroups.
1406 *
1407 * Also chown the tasks and cgroup.procs files. Those may not
1408 * exist depending on kernel version.
1409 */
1410 static int chown_cgroup_wrapper(void *data)
1411 {
1412 int i, ret;
1413 uid_t destuid;
1414 struct generic_userns_exec_data *arg = data;
1415 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1416 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1417
1418 ret = setresgid(nsgid, nsgid, nsgid);
1419 if (ret < 0) {
1420 SYSERROR("Failed to setresgid(%d, %d, %d)",
1421 (int)nsgid, (int)nsgid, (int)nsgid);
1422 return -1;
1423 }
1424
1425 ret = setresuid(nsuid, nsuid, nsuid);
1426 if (ret < 0) {
1427 SYSERROR("Failed to setresuid(%d, %d, %d)",
1428 (int)nsuid, (int)nsuid, (int)nsuid);
1429 return -1;
1430 }
1431
1432 ret = setgroups(0, NULL);
1433 if (ret < 0 && errno != EPERM) {
1434 SYSERROR("Failed to setgroups(0, NULL)");
1435 return -1;
1436 }
1437
1438 destuid = get_ns_uid(arg->origuid);
1439
1440 for (i = 0; arg->hierarchies[i]; i++) {
1441 char *fullpath;
1442 char *path = arg->hierarchies[i]->fullcgpath;
1443
1444 ret = chowmod(path, destuid, nsgid, 0775);
1445 if (ret < 0)
1446 return -1;
1447
1448 /* Failures to chown() these are inconvenient but not
1449 * detrimental We leave these owned by the container launcher,
1450 * so that container root can write to the files to attach. We
1451 * chmod() them 664 so that container systemd can write to the
1452 * files (which systemd in wily insists on doing).
1453 */
1454
1455 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1456 fullpath = must_make_path(path, "tasks", NULL);
1457 (void)chowmod(fullpath, destuid, nsgid, 0664);
1458 free(fullpath);
1459 }
1460
1461 fullpath = must_make_path(path, "cgroup.procs", NULL);
1462 (void)chowmod(fullpath, destuid, nsgid, 0664);
1463 free(fullpath);
1464
1465 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1466 continue;
1467
1468 fullpath = must_make_path(path, "cgroup.subtree_control", NULL);
1469 (void)chowmod(fullpath, destuid, nsgid, 0664);
1470 free(fullpath);
1471
1472 fullpath = must_make_path(path, "cgroup.threads", NULL);
1473 (void)chowmod(fullpath, destuid, nsgid, 0664);
1474 free(fullpath);
1475 }
1476
1477 return 0;
1478 }
1479
1480 static bool cgfsng_chown(struct cgroup_ops *ops, struct lxc_conf *conf)
1481 {
1482 struct generic_userns_exec_data wrap;
1483
1484 if (lxc_list_empty(&conf->id_map))
1485 return true;
1486
1487 wrap.origuid = geteuid();
1488 wrap.path = NULL;
1489 wrap.hierarchies = ops->hierarchies;
1490 wrap.conf = conf;
1491
1492 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1493 "chown_cgroup_wrapper") < 0) {
1494 ERROR("Error requesting cgroup chown in new user namespace");
1495 return false;
1496 }
1497
1498 return true;
1499 }
1500
1501 /* cgroup-full:* is done, no need to create subdirs */
1502 static bool cg_mount_needs_subdirs(int type)
1503 {
1504 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1505 return false;
1506
1507 return true;
1508 }
1509
1510 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1511 * remount controller ro if needed and bindmount the cgroupfs onto
1512 * controll/the/cg/path.
1513 */
1514 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1515 char *controllerpath, char *cgpath,
1516 const char *container_cgroup)
1517 {
1518 int ret, remount_flags;
1519 char *sourcepath;
1520 int flags = MS_BIND;
1521
1522 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1523 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1524 if (ret < 0) {
1525 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1526 controllerpath, controllerpath);
1527 return -1;
1528 }
1529
1530 remount_flags = add_required_remount_flags(controllerpath,
1531 controllerpath,
1532 flags | MS_REMOUNT);
1533 ret = mount(controllerpath, controllerpath, "cgroup",
1534 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1535 NULL);
1536 if (ret < 0) {
1537 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1538 return -1;
1539 }
1540
1541 INFO("Remounted %s read-only", controllerpath);
1542 }
1543
1544 sourcepath = must_make_path(h->mountpoint, h->base_cgroup,
1545 container_cgroup, NULL);
1546 if (type == LXC_AUTO_CGROUP_RO)
1547 flags |= MS_RDONLY;
1548
1549 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1550 if (ret < 0) {
1551 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1552 free(sourcepath);
1553 return -1;
1554 }
1555 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1556
1557 if (flags & MS_RDONLY) {
1558 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1559 flags | MS_REMOUNT);
1560 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1561 if (ret < 0) {
1562 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1563 free(sourcepath);
1564 return -1;
1565 }
1566 INFO("Remounted %s read-only", cgpath);
1567 }
1568
1569 free(sourcepath);
1570 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1571 return 0;
1572 }
1573
1574 /* __cg_mount_direct
1575 *
1576 * Mount cgroup hierarchies directly without using bind-mounts. The main
1577 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1578 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1579 */
1580 static int __cg_mount_direct(int type, struct hierarchy *h,
1581 const char *controllerpath)
1582 {
1583 int ret;
1584 char *controllers = NULL;
1585 char *fstype = "cgroup2";
1586 unsigned long flags = 0;
1587
1588 flags |= MS_NOSUID;
1589 flags |= MS_NOEXEC;
1590 flags |= MS_NODEV;
1591 flags |= MS_RELATIME;
1592
1593 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1594 flags |= MS_RDONLY;
1595
1596 if (h->version != CGROUP2_SUPER_MAGIC) {
1597 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1598 if (!controllers)
1599 return -ENOMEM;
1600 fstype = "cgroup";
1601 }
1602
1603 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1604 free(controllers);
1605 if (ret < 0) {
1606 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1607 return -1;
1608 }
1609
1610 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1611 return 0;
1612 }
1613
1614 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1615 const char *controllerpath)
1616 {
1617 return __cg_mount_direct(type, h, controllerpath);
1618 }
1619
1620 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1621 const char *controllerpath)
1622 {
1623 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1624 return 0;
1625
1626 return __cg_mount_direct(type, h, controllerpath);
1627 }
1628
1629 static bool cgfsng_mount(struct cgroup_ops *ops, struct lxc_handler *handler,
1630 const char *root, int type)
1631 {
1632 int i, ret;
1633 char *tmpfspath = NULL;
1634 bool has_cgns = false, retval = false, wants_force_mount = false;
1635
1636 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1637 return true;
1638
1639 if (type & LXC_AUTO_CGROUP_FORCE) {
1640 type &= ~LXC_AUTO_CGROUP_FORCE;
1641 wants_force_mount = true;
1642 }
1643
1644 if (!wants_force_mount){
1645 if (!lxc_list_empty(&handler->conf->keepcaps))
1646 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1647 else
1648 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1649 }
1650
1651 has_cgns = cgns_supported();
1652 if (has_cgns && !wants_force_mount)
1653 return true;
1654
1655 if (type == LXC_AUTO_CGROUP_NOSPEC)
1656 type = LXC_AUTO_CGROUP_MIXED;
1657 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1658 type = LXC_AUTO_CGROUP_FULL_MIXED;
1659
1660 /* Mount tmpfs */
1661 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1662 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1663 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1664 "size=10240k,mode=755", root);
1665 if (ret < 0)
1666 goto on_error;
1667
1668 for (i = 0; ops->hierarchies[i]; i++) {
1669 char *controllerpath, *path2;
1670 struct hierarchy *h = ops->hierarchies[i];
1671 char *controller = strrchr(h->mountpoint, '/');
1672
1673 if (!controller)
1674 continue;
1675 controller++;
1676
1677 controllerpath = must_make_path(tmpfspath, controller, NULL);
1678 if (dir_exists(controllerpath)) {
1679 free(controllerpath);
1680 continue;
1681 }
1682
1683 ret = mkdir(controllerpath, 0755);
1684 if (ret < 0) {
1685 SYSERROR("Error creating cgroup path: %s", controllerpath);
1686 free(controllerpath);
1687 goto on_error;
1688 }
1689
1690 if (has_cgns && wants_force_mount) {
1691 /* If cgroup namespaces are supported but the container
1692 * will not have CAP_SYS_ADMIN after it has started we
1693 * need to mount the cgroups manually.
1694 */
1695 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1696 free(controllerpath);
1697 if (ret < 0)
1698 goto on_error;
1699
1700 continue;
1701 }
1702
1703 ret = cg_mount_cgroup_full(type, h, controllerpath);
1704 if (ret < 0) {
1705 free(controllerpath);
1706 goto on_error;
1707 }
1708
1709 if (!cg_mount_needs_subdirs(type)) {
1710 free(controllerpath);
1711 continue;
1712 }
1713
1714 path2 = must_make_path(controllerpath, h->base_cgroup,
1715 ops->container_cgroup, NULL);
1716 ret = mkdir_p(path2, 0755);
1717 if (ret < 0) {
1718 free(controllerpath);
1719 free(path2);
1720 goto on_error;
1721 }
1722
1723 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1724 path2, ops->container_cgroup);
1725 free(controllerpath);
1726 free(path2);
1727 if (ret < 0)
1728 goto on_error;
1729 }
1730 retval = true;
1731
1732 on_error:
1733 free(tmpfspath);
1734 return retval;
1735 }
1736
1737 static int recursive_count_nrtasks(char *dirname)
1738 {
1739 struct dirent *direntp;
1740 DIR *dir;
1741 int count = 0, ret;
1742 char *path;
1743
1744 dir = opendir(dirname);
1745 if (!dir)
1746 return 0;
1747
1748 while ((direntp = readdir(dir))) {
1749 struct stat mystat;
1750
1751 if (!direntp)
1752 break;
1753
1754 if (!strcmp(direntp->d_name, ".") ||
1755 !strcmp(direntp->d_name, ".."))
1756 continue;
1757
1758 path = must_make_path(dirname, direntp->d_name, NULL);
1759
1760 if (lstat(path, &mystat))
1761 goto next;
1762
1763 if (!S_ISDIR(mystat.st_mode))
1764 goto next;
1765
1766 count += recursive_count_nrtasks(path);
1767 next:
1768 free(path);
1769 }
1770
1771 path = must_make_path(dirname, "cgroup.procs", NULL);
1772 ret = lxc_count_file_lines(path);
1773 if (ret != -1)
1774 count += ret;
1775 free(path);
1776
1777 (void)closedir(dir);
1778
1779 return count;
1780 }
1781
1782 static int cgfsng_nrtasks(struct cgroup_ops *ops)
1783 {
1784 int count;
1785 char *path;
1786
1787 if (!ops->container_cgroup || !ops->hierarchies)
1788 return -1;
1789
1790 path = must_make_path(ops->hierarchies[0]->fullcgpath, NULL);
1791 count = recursive_count_nrtasks(path);
1792 free(path);
1793 return count;
1794 }
1795
1796 /* Only root needs to escape to the cgroup of its init. */
1797 static bool cgfsng_escape(const struct cgroup_ops *ops)
1798 {
1799 int i;
1800
1801 if (geteuid())
1802 return true;
1803
1804 for (i = 0; ops->hierarchies[i]; i++) {
1805 int ret;
1806 char *fullpath;
1807
1808 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1809 ops->hierarchies[i]->base_cgroup,
1810 "cgroup.procs", NULL);
1811 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1812 if (ret != 0) {
1813 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1814 free(fullpath);
1815 return false;
1816 }
1817 free(fullpath);
1818 }
1819
1820 return true;
1821 }
1822
1823 static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1824 {
1825 int i;
1826
1827 for (i = 0; ops->hierarchies[i]; i++)
1828 ;
1829
1830 return i;
1831 }
1832
1833 static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
1834 {
1835 int i;
1836
1837 /* sanity check n */
1838 for (i = 0; i < n; i++)
1839 if (!ops->hierarchies[i])
1840 return false;
1841
1842 *out = ops->hierarchies[i]->controllers;
1843
1844 return true;
1845 }
1846
1847 #define THAWED "THAWED"
1848 #define THAWED_LEN (strlen(THAWED))
1849
1850 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1851 * to be adapted.
1852 */
1853 static bool cgfsng_unfreeze(struct cgroup_ops *ops)
1854 {
1855 int ret;
1856 char *fullpath;
1857 struct hierarchy *h;
1858
1859 h = get_hierarchy(ops, "freezer");
1860 if (!h)
1861 return false;
1862
1863 fullpath = must_make_path(h->fullcgpath, "freezer.state", NULL);
1864 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
1865 free(fullpath);
1866 if (ret < 0)
1867 return false;
1868
1869 return true;
1870 }
1871
1872 static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
1873 const char *controller)
1874 {
1875 struct hierarchy *h;
1876
1877 h = get_hierarchy(ops, controller);
1878 if (!h) {
1879 WARN("Failed to find hierarchy for controller \"%s\"",
1880 controller ? controller : "(null)");
1881 return NULL;
1882 }
1883
1884 return h->fullcgpath ? h->fullcgpath + strlen(h->mountpoint) : NULL;
1885 }
1886
1887 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
1888 * which must be freed by the caller.
1889 */
1890 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
1891 const char *inpath,
1892 const char *filename)
1893 {
1894 return must_make_path(h->mountpoint, inpath, filename, NULL);
1895 }
1896
1897 /* Technically, we're always at a delegation boundary here (This is especially
1898 * true when cgroup namespaces are available.). The reasoning is that in order
1899 * for us to have been able to start a container in the first place the root
1900 * cgroup must have been a leaf node. Now, either the container's init system
1901 * has populated the cgroup and kept it as a leaf node or it has created
1902 * subtrees. In the former case we will simply attach to the leaf node we
1903 * created when we started the container in the latter case we create our own
1904 * cgroup for the attaching process.
1905 */
1906 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
1907 const char *lxcpath, const char *pidstr,
1908 size_t pidstr_len, const char *controller)
1909 {
1910 int ret;
1911 size_t len;
1912 int fret = -1, idx = 0;
1913 char *base_path = NULL, *container_cgroup = NULL, *full_path = NULL;
1914
1915 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
1916 /* not running */
1917 if (!container_cgroup)
1918 return 0;
1919
1920 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
1921 full_path = must_make_path(base_path, "cgroup.procs", NULL);
1922 /* cgroup is populated */
1923 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
1924 if (ret < 0 && errno != EBUSY)
1925 goto on_error;
1926
1927 if (ret == 0)
1928 goto on_success;
1929
1930 free(full_path);
1931
1932 len = strlen(base_path) + sizeof("/lxc-1000") - 1 +
1933 sizeof("/cgroup-procs") - 1;
1934 full_path = must_alloc(len + 1);
1935 do {
1936 if (idx)
1937 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
1938 base_path, idx);
1939 else
1940 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
1941 if (ret < 0 || (size_t)ret >= len + 1)
1942 goto on_error;
1943
1944 ret = mkdir_p(full_path, 0755);
1945 if (ret < 0 && errno != EEXIST)
1946 goto on_error;
1947
1948 strcat(full_path, "/cgroup.procs");
1949 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
1950 if (ret == 0)
1951 goto on_success;
1952
1953 /* this is a non-leaf node */
1954 if (errno != EBUSY)
1955 goto on_error;
1956
1957 } while (++idx > 0 && idx < 1000);
1958
1959 on_success:
1960 if (idx < 1000)
1961 fret = 0;
1962
1963 on_error:
1964 free(base_path);
1965 free(container_cgroup);
1966 free(full_path);
1967
1968 return fret;
1969 }
1970
1971 static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
1972 const char *lxcpath, pid_t pid)
1973 {
1974 int i, len, ret;
1975 char pidstr[25];
1976
1977 len = snprintf(pidstr, 25, "%d", pid);
1978 if (len < 0 || len >= 25)
1979 return false;
1980
1981 for (i = 0; ops->hierarchies[i]; i++) {
1982 char *path;
1983 char *fullpath = NULL;
1984 struct hierarchy *h = ops->hierarchies[i];
1985
1986 if (h->version == CGROUP2_SUPER_MAGIC) {
1987 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
1988 h->controllers[0]);
1989 if (ret < 0)
1990 return false;
1991
1992 continue;
1993 }
1994
1995 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
1996 /* not running */
1997 if (!path)
1998 continue;
1999
2000 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2001 free(path);
2002 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2003 if (ret < 0) {
2004 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2005 free(fullpath);
2006 return false;
2007 }
2008 free(fullpath);
2009 }
2010
2011 return true;
2012 }
2013
2014 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2015 * don't have a cgroup_data set up, so we ask the running container through the
2016 * commands API for the cgroup path.
2017 */
2018 static int cgfsng_get(struct cgroup_ops *ops, const char *filename, char *value,
2019 size_t len, const char *name, const char *lxcpath)
2020 {
2021 int ret = -1;
2022 size_t controller_len;
2023 char *controller, *p, *path;
2024 struct hierarchy *h;
2025
2026 controller_len = strlen(filename);
2027 controller = alloca(controller_len + 1);
2028 strcpy(controller, filename);
2029 p = strchr(controller, '.');
2030 if (p)
2031 *p = '\0';
2032
2033 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2034 /* not running */
2035 if (!path)
2036 return -1;
2037
2038 h = get_hierarchy(ops, controller);
2039 if (h) {
2040 char *fullpath;
2041
2042 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2043 ret = lxc_read_from_file(fullpath, value, len);
2044 free(fullpath);
2045 }
2046 free(path);
2047
2048 return ret;
2049 }
2050
2051 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2052 * don't have a cgroup_data set up, so we ask the running container through the
2053 * commands API for the cgroup path.
2054 */
2055 static int cgfsng_set(struct cgroup_ops *ops, const char *filename,
2056 const char *value, const char *name, const char *lxcpath)
2057 {
2058 int ret = -1;
2059 size_t controller_len;
2060 char *controller, *p, *path;
2061 struct hierarchy *h;
2062
2063 controller_len = strlen(filename);
2064 controller = alloca(controller_len + 1);
2065 strcpy(controller, filename);
2066 p = strchr(controller, '.');
2067 if (p)
2068 *p = '\0';
2069
2070 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2071 /* not running */
2072 if (!path)
2073 return -1;
2074
2075 h = get_hierarchy(ops, controller);
2076 if (h) {
2077 char *fullpath;
2078
2079 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2080 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2081 free(fullpath);
2082 }
2083 free(path);
2084
2085 return ret;
2086 }
2087
2088 /* take devices cgroup line
2089 * /dev/foo rwx
2090 * and convert it to a valid
2091 * type major:minor mode
2092 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2093 * the output.
2094 */
2095 static int convert_devpath(const char *invalue, char *dest)
2096 {
2097 int n_parts;
2098 char *p, *path, type;
2099 unsigned long minor, major;
2100 struct stat sb;
2101 int ret = -EINVAL;
2102 char *mode = NULL;
2103
2104 path = must_copy_string(invalue);
2105
2106 /* Read path followed by mode. Ignore any trailing text.
2107 * A ' # comment' would be legal. Technically other text is not
2108 * legal, we could check for that if we cared to.
2109 */
2110 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2111 if (*p != ' ')
2112 continue;
2113 *p = '\0';
2114
2115 if (n_parts != 1)
2116 break;
2117 p++;
2118 n_parts++;
2119
2120 while (*p == ' ')
2121 p++;
2122
2123 mode = p;
2124
2125 if (*p == '\0')
2126 goto out;
2127 }
2128
2129 if (n_parts == 1)
2130 goto out;
2131
2132 ret = stat(path, &sb);
2133 if (ret < 0)
2134 goto out;
2135
2136 mode_t m = sb.st_mode & S_IFMT;
2137 switch (m) {
2138 case S_IFBLK:
2139 type = 'b';
2140 break;
2141 case S_IFCHR:
2142 type = 'c';
2143 break;
2144 default:
2145 ERROR("Unsupported device type %i for \"%s\"", m, path);
2146 ret = -EINVAL;
2147 goto out;
2148 }
2149
2150 major = MAJOR(sb.st_rdev);
2151 minor = MINOR(sb.st_rdev);
2152 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2153 if (ret < 0 || ret >= 50) {
2154 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2155 "chars)", type, major, minor, mode);
2156 ret = -ENAMETOOLONG;
2157 goto out;
2158 }
2159 ret = 0;
2160
2161 out:
2162 free(path);
2163 return ret;
2164 }
2165
2166 /* Called from setup_limits - here we have the container's cgroup_data because
2167 * we created the cgroups.
2168 */
2169 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2170 const char *value)
2171 {
2172 size_t len;
2173 char *fullpath, *p;
2174 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2175 char converted_value[50];
2176 struct hierarchy *h;
2177 int ret = 0;
2178 char *controller = NULL;
2179
2180 len = strlen(filename);
2181 controller = alloca(len + 1);
2182 strcpy(controller, filename);
2183 p = strchr(controller, '.');
2184 if (p)
2185 *p = '\0';
2186
2187 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2188 ret = convert_devpath(value, converted_value);
2189 if (ret < 0)
2190 return ret;
2191 value = converted_value;
2192 }
2193
2194 h = get_hierarchy(ops, controller);
2195 if (!h) {
2196 ERROR("Failed to setup limits for the \"%s\" controller. "
2197 "The controller seems to be unused by \"cgfsng\" cgroup "
2198 "driver or not enabled on the cgroup hierarchy",
2199 controller);
2200 errno = ENOENT;
2201 return -ENOENT;
2202 }
2203
2204 fullpath = must_make_path(h->fullcgpath, filename, NULL);
2205 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2206 free(fullpath);
2207 return ret;
2208 }
2209
2210 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2211 struct lxc_list *cgroup_settings,
2212 bool do_devices)
2213 {
2214 struct lxc_list *iterator, *next, *sorted_cgroup_settings;
2215 struct lxc_cgroup *cg;
2216 bool ret = false;
2217
2218 if (lxc_list_empty(cgroup_settings))
2219 return true;
2220
2221 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2222 if (!sorted_cgroup_settings)
2223 return false;
2224
2225 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2226 cg = iterator->elem;
2227
2228 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2229 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2230 if (do_devices && (errno == EACCES || errno == EPERM)) {
2231 WARN("Failed to set \"%s\" to \"%s\"",
2232 cg->subsystem, cg->value);
2233 continue;
2234 }
2235 WARN("Failed to set \"%s\" to \"%s\"",
2236 cg->subsystem, cg->value);
2237 goto out;
2238 }
2239 DEBUG("Set controller \"%s\" set to \"%s\"",
2240 cg->subsystem, cg->value);
2241 }
2242 }
2243
2244 ret = true;
2245 INFO("Limits for the legacy cgroup hierarchies have been setup");
2246 out:
2247 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2248 lxc_list_del(iterator);
2249 free(iterator);
2250 }
2251 free(sorted_cgroup_settings);
2252 return ret;
2253 }
2254
2255 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2256 struct lxc_list *cgroup_settings)
2257 {
2258 struct lxc_list *iterator;
2259 struct hierarchy *h = ops->unified;
2260
2261 if (lxc_list_empty(cgroup_settings))
2262 return true;
2263
2264 if (!h)
2265 return false;
2266
2267 lxc_list_for_each(iterator, cgroup_settings) {
2268 int ret;
2269 char *fullpath;
2270 struct lxc_cgroup *cg = iterator->elem;
2271
2272 fullpath = must_make_path(h->fullcgpath, cg->subsystem, NULL);
2273 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2274 free(fullpath);
2275 if (ret < 0) {
2276 SYSERROR("Failed to set \"%s\" to \"%s\"",
2277 cg->subsystem, cg->value);
2278 return false;
2279 }
2280 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2281 }
2282
2283 INFO("Limits for the unified cgroup hierarchy have been setup");
2284 return true;
2285 }
2286
2287 static bool cgfsng_setup_limits(struct cgroup_ops *ops, struct lxc_conf *conf,
2288 bool do_devices)
2289 {
2290 bool bret;
2291
2292 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2293 if (!bret)
2294 return false;
2295
2296 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2297 }
2298
2299 /* At startup, parse_hierarchies finds all the info we need about cgroup
2300 * mountpoints and current cgroups, and stores it in @d.
2301 */
2302 static bool cg_hybrid_init(struct cgroup_ops *ops)
2303 {
2304 int ret;
2305 char *basecginfo;
2306 bool will_escape;
2307 FILE *f;
2308 size_t len = 0;
2309 char *line = NULL;
2310 char **klist = NULL, **nlist = NULL;
2311
2312 /* Root spawned containers escape the current cgroup, so use init's
2313 * cgroups as our base in that case.
2314 */
2315 will_escape = (geteuid() == 0);
2316 if (will_escape)
2317 basecginfo = read_file("/proc/1/cgroup");
2318 else
2319 basecginfo = read_file("/proc/self/cgroup");
2320 if (!basecginfo)
2321 return false;
2322
2323 ret = get_existing_subsystems(&klist, &nlist);
2324 if (ret < 0) {
2325 ERROR("Failed to retrieve available legacy cgroup controllers");
2326 free(basecginfo);
2327 return false;
2328 }
2329
2330 f = fopen("/proc/self/mountinfo", "r");
2331 if (!f) {
2332 ERROR("Failed to open \"/proc/self/mountinfo\"");
2333 free(basecginfo);
2334 return false;
2335 }
2336
2337 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2338
2339 while (getline(&line, &len, f) != -1) {
2340 int type;
2341 bool writeable;
2342 struct hierarchy *new;
2343 char *base_cgroup = NULL, *mountpoint = NULL;
2344 char **controller_list = NULL;
2345
2346 type = get_cgroup_version(line);
2347 if (type == 0)
2348 continue;
2349
2350 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2351 continue;
2352
2353 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2354 if (type == CGROUP2_SUPER_MAGIC)
2355 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2356 else if (type == CGROUP_SUPER_MAGIC)
2357 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2358 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2359 if (type == CGROUP_SUPER_MAGIC)
2360 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2361 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2362 if (type == CGROUP2_SUPER_MAGIC)
2363 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2364 }
2365
2366 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2367 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2368 continue;
2369
2370 if (type == CGROUP_SUPER_MAGIC)
2371 if (controller_list_is_dup(ops->hierarchies, controller_list))
2372 goto next;
2373
2374 mountpoint = cg_hybrid_get_mountpoint(line);
2375 if (!mountpoint) {
2376 ERROR("Failed parsing mountpoint from \"%s\"", line);
2377 goto next;
2378 }
2379
2380 if (type == CGROUP_SUPER_MAGIC)
2381 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2382 else
2383 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2384 if (!base_cgroup) {
2385 ERROR("Failed to find current cgroup");
2386 goto next;
2387 }
2388
2389 trim(base_cgroup);
2390 prune_init_scope(base_cgroup);
2391 if (type == CGROUP2_SUPER_MAGIC)
2392 writeable = test_writeable_v2(mountpoint, base_cgroup);
2393 else
2394 writeable = test_writeable_v1(mountpoint, base_cgroup);
2395 if (!writeable)
2396 goto next;
2397
2398 if (type == CGROUP2_SUPER_MAGIC) {
2399 char *cgv2_ctrl_path;
2400
2401 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2402 "cgroup.controllers",
2403 NULL);
2404
2405 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2406 free(cgv2_ctrl_path);
2407 if (!controller_list) {
2408 controller_list = cg_unified_make_empty_controller();
2409 TRACE("No controllers are enabled for "
2410 "delegation in the unified hierarchy");
2411 }
2412 }
2413
2414 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2415 if (type == CGROUP2_SUPER_MAGIC && !ops->unified)
2416 ops->unified = new;
2417
2418 continue;
2419
2420 next:
2421 free_string_list(controller_list);
2422 free(mountpoint);
2423 free(base_cgroup);
2424 }
2425
2426 free_string_list(klist);
2427 free_string_list(nlist);
2428
2429 free(basecginfo);
2430
2431 fclose(f);
2432 free(line);
2433
2434 TRACE("Writable cgroup hierarchies:");
2435 lxc_cgfsng_print_hierarchies(ops);
2436
2437 /* verify that all controllers in cgroup.use and all crucial
2438 * controllers are accounted for
2439 */
2440 if (!all_controllers_found(ops))
2441 return false;
2442
2443 return true;
2444 }
2445
2446 static int cg_is_pure_unified(void)
2447 {
2448
2449 int ret;
2450 struct statfs fs;
2451
2452 ret = statfs("/sys/fs/cgroup", &fs);
2453 if (ret < 0)
2454 return -ENOMEDIUM;
2455
2456 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2457 return CGROUP2_SUPER_MAGIC;
2458
2459 return 0;
2460 }
2461
2462 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2463 static char *cg_unified_get_current_cgroup(void)
2464 {
2465 char *basecginfo, *base_cgroup;
2466 bool will_escape;
2467 char *copy = NULL;
2468
2469 will_escape = (geteuid() == 0);
2470 if (will_escape)
2471 basecginfo = read_file("/proc/1/cgroup");
2472 else
2473 basecginfo = read_file("/proc/self/cgroup");
2474 if (!basecginfo)
2475 return NULL;
2476
2477 base_cgroup = strstr(basecginfo, "0::/");
2478 if (!base_cgroup)
2479 goto cleanup_on_err;
2480
2481 base_cgroup = base_cgroup + 3;
2482 copy = copy_to_eol(base_cgroup);
2483 if (!copy)
2484 goto cleanup_on_err;
2485
2486 cleanup_on_err:
2487 free(basecginfo);
2488 if (copy)
2489 trim(copy);
2490
2491 return copy;
2492 }
2493
2494 static int cg_unified_init(struct cgroup_ops *ops)
2495 {
2496 int ret;
2497 char *mountpoint, *subtree_path;
2498 char **delegatable;
2499 char *base_cgroup = NULL;
2500
2501 ret = cg_is_pure_unified();
2502 if (ret == -ENOMEDIUM)
2503 return -ENOMEDIUM;
2504
2505 if (ret != CGROUP2_SUPER_MAGIC)
2506 return 0;
2507
2508 base_cgroup = cg_unified_get_current_cgroup();
2509 if (!base_cgroup)
2510 return -EINVAL;
2511 prune_init_scope(base_cgroup);
2512
2513 /* We assume that we have already been given controllers to delegate
2514 * further down the hierarchy. If not it is up to the user to delegate
2515 * them to us.
2516 */
2517 mountpoint = must_copy_string("/sys/fs/cgroup");
2518 subtree_path = must_make_path(mountpoint, base_cgroup,
2519 "cgroup.subtree_control", NULL);
2520 delegatable = cg_unified_get_controllers(subtree_path);
2521 free(subtree_path);
2522 if (!delegatable)
2523 delegatable = cg_unified_make_empty_controller();
2524 if (!delegatable[0])
2525 TRACE("No controllers are enabled for delegation");
2526
2527 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2528 * we should verify here. The reason I'm not doing it right is that I'm
2529 * not convinced that lxc.cgroup.use will be the future since it is a
2530 * global property. I much rather have an option that lets you request
2531 * controllers per container.
2532 */
2533
2534 add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2535
2536 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2537 return CGROUP2_SUPER_MAGIC;
2538 }
2539
2540 static bool cg_init(struct cgroup_ops *ops)
2541 {
2542 int ret;
2543 const char *tmp;
2544
2545 tmp = lxc_global_config_value("lxc.cgroup.use");
2546 if (tmp)
2547 ops->cgroup_use = must_copy_string(tmp);
2548
2549 ret = cg_unified_init(ops);
2550 if (ret < 0)
2551 return false;
2552
2553 if (ret == CGROUP2_SUPER_MAGIC)
2554 return true;
2555
2556 return cg_hybrid_init(ops);
2557 }
2558
2559 static bool cgfsng_data_init(struct cgroup_ops *ops)
2560 {
2561 const char *cgroup_pattern;
2562
2563 /* copy system-wide cgroup information */
2564 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2565 if (!cgroup_pattern) {
2566 /* lxc.cgroup.pattern is only NULL on error. */
2567 ERROR("Failed to retrieve cgroup pattern");
2568 return false;
2569 }
2570 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2571
2572 return true;
2573 }
2574
2575 struct cgroup_ops *cgfsng_ops_init(void)
2576 {
2577 struct cgroup_ops *cgfsng_ops;
2578
2579 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2580 if (!cgfsng_ops)
2581 return NULL;
2582
2583 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2584 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2585
2586 if (!cg_init(cgfsng_ops)) {
2587 free(cgfsng_ops);
2588 return NULL;
2589 }
2590
2591 cgfsng_ops->data_init = cgfsng_data_init;
2592 cgfsng_ops->destroy = cgfsng_destroy;
2593 cgfsng_ops->create = cgfsng_create;
2594 cgfsng_ops->enter = cgfsng_enter;
2595 cgfsng_ops->escape = cgfsng_escape;
2596 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2597 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2598 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2599 cgfsng_ops->get = cgfsng_get;
2600 cgfsng_ops->set = cgfsng_set;
2601 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2602 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2603 cgfsng_ops->driver = "cgfsng";
2604 cgfsng_ops->version = "1.0.0";
2605 cgfsng_ops->attach = cgfsng_attach;
2606 cgfsng_ops->chown = cgfsng_chown;
2607 cgfsng_ops->mount = cgfsng_mount;
2608 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2609
2610 return cgfsng_ops;
2611 }