]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
Merge pull request #2430 from duguhaotian/work
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comman-separated list of controllers.
35 */
36
37 #include "config.h"
38
39 #include <ctype.h>
40 #include <dirent.h>
41 #include <errno.h>
42 #include <grp.h>
43 #include <stdint.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <linux/kdev_t.h>
49 #include <linux/types.h>
50 #include <sys/types.h>
51
52 #include "caps.h"
53 #include "cgroup.h"
54 #include "cgroup_utils.h"
55 #include "commands.h"
56 #include "conf.h"
57 #include "log.h"
58 #include "storage/storage.h"
59 #include "utils.h"
60
61 #ifndef HAVE_STRLCPY
62 #include "include/strlcpy.h"
63 #endif
64
65 #ifndef HAVE_STRLCAT
66 #include "include/strlcat.h"
67 #endif
68
69 lxc_log_define(lxc_cgfsng, lxc);
70
71 static void free_string_list(char **clist)
72 {
73 int i;
74
75 if (!clist)
76 return;
77
78 for (i = 0; clist[i]; i++)
79 free(clist[i]);
80
81 free(clist);
82 }
83
84 /* Allocate a pointer, do not fail. */
85 static void *must_alloc(size_t sz)
86 {
87 return must_realloc(NULL, sz);
88 }
89
90 /* Given a pointer to a null-terminated array of pointers, realloc to add one
91 * entry, and point the new entry to NULL. Do not fail. Return the index to the
92 * second-to-last entry - that is, the one which is now available for use
93 * (keeping the list null-terminated).
94 */
95 static int append_null_to_list(void ***list)
96 {
97 int newentry = 0;
98
99 if (*list)
100 for (; (*list)[newentry]; newentry++)
101 ;
102
103 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
104 (*list)[newentry + 1] = NULL;
105 return newentry;
106 }
107
108 /* Given a null-terminated array of strings, check whether @entry is one of the
109 * strings.
110 */
111 static bool string_in_list(char **list, const char *entry)
112 {
113 int i;
114
115 if (!list)
116 return false;
117
118 for (i = 0; list[i]; i++)
119 if (strcmp(list[i], entry) == 0)
120 return true;
121
122 return false;
123 }
124
125 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
126 * "name=systemd". Do not fail.
127 */
128 static char *cg_legacy_must_prefix_named(char *entry)
129 {
130 size_t len;
131 char *prefixed;
132
133 len = strlen(entry);
134 prefixed = must_alloc(len + 6);
135
136 memcpy(prefixed, "name=", sizeof("name=") - 1);
137 memcpy(prefixed + sizeof("name=") - 1, entry, len);
138 prefixed[len + 5] = '\0';
139 return prefixed;
140 }
141
142 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
143 * we are called.
144 *
145 * We also handle named subsystems here. Any controller which is not a kernel
146 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
147 * we refuse to use because we're not sure which we have here.
148 * (TODO: We could work around this in some cases by just remounting to be
149 * unambiguous, or by comparing mountpoint contents with current cgroup.)
150 *
151 * The last entry will always be NULL.
152 */
153 static void must_append_controller(char **klist, char **nlist, char ***clist,
154 char *entry)
155 {
156 int newentry;
157 char *copy;
158
159 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
160 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
161 ERROR("It is both a named and kernel subsystem");
162 return;
163 }
164
165 newentry = append_null_to_list((void ***)clist);
166
167 if (strncmp(entry, "name=", 5) == 0)
168 copy = must_copy_string(entry);
169 else if (string_in_list(klist, entry))
170 copy = must_copy_string(entry);
171 else
172 copy = cg_legacy_must_prefix_named(entry);
173
174 (*clist)[newentry] = copy;
175 }
176
177 /* Given a handler's cgroup data, return the struct hierarchy for the controller
178 * @c, or NULL if there is none.
179 */
180 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *c)
181 {
182 int i;
183
184 if (!ops->hierarchies)
185 return NULL;
186
187 for (i = 0; ops->hierarchies[i]; i++) {
188 if (!c) {
189 /* This is the empty unified hierarchy. */
190 if (ops->hierarchies[i]->controllers &&
191 !ops->hierarchies[i]->controllers[0])
192 return ops->hierarchies[i];
193
194 continue;
195 }
196
197 if (string_in_list(ops->hierarchies[i]->controllers, c))
198 return ops->hierarchies[i];
199 }
200
201 return NULL;
202 }
203
204 #define BATCH_SIZE 50
205 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
206 {
207 int newbatches = (newlen / BATCH_SIZE) + 1;
208 int oldbatches = (oldlen / BATCH_SIZE) + 1;
209
210 if (!*mem || newbatches > oldbatches) {
211 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
212 }
213 }
214
215 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
216 {
217 size_t full = oldlen + newlen;
218
219 batch_realloc(dest, oldlen, full + 1);
220
221 memcpy(*dest + oldlen, new, newlen + 1);
222 }
223
224 /* Slurp in a whole file */
225 static char *read_file(const char *fnam)
226 {
227 FILE *f;
228 char *line = NULL, *buf = NULL;
229 size_t len = 0, fulllen = 0;
230 int linelen;
231
232 f = fopen(fnam, "r");
233 if (!f)
234 return NULL;
235 while ((linelen = getline(&line, &len, f)) != -1) {
236 append_line(&buf, fulllen, line, linelen);
237 fulllen += linelen;
238 }
239 fclose(f);
240 free(line);
241 return buf;
242 }
243
244 /* Taken over modified from the kernel sources. */
245 #define NBITS 32 /* bits in uint32_t */
246 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
247 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
248
249 static void set_bit(unsigned bit, uint32_t *bitarr)
250 {
251 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
252 }
253
254 static void clear_bit(unsigned bit, uint32_t *bitarr)
255 {
256 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
257 }
258
259 static bool is_set(unsigned bit, uint32_t *bitarr)
260 {
261 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
262 }
263
264 /* Create cpumask from cpulist aka turn:
265 *
266 * 0,2-3
267 *
268 * into bit array
269 *
270 * 1 0 1 1
271 */
272 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
273 {
274 char *token;
275 size_t arrlen;
276 uint32_t *bitarr;
277 char *saveptr = NULL;
278
279 arrlen = BITS_TO_LONGS(nbits);
280 bitarr = calloc(arrlen, sizeof(uint32_t));
281 if (!bitarr)
282 return NULL;
283
284 for (; (token = strtok_r(buf, ",", &saveptr)); buf = NULL) {
285 errno = 0;
286 unsigned end, start;
287 char *range;
288
289 start = strtoul(token, NULL, 0);
290 end = start;
291 range = strchr(token, '-');
292 if (range)
293 end = strtoul(range + 1, NULL, 0);
294
295 if (!(start <= end)) {
296 free(bitarr);
297 return NULL;
298 }
299
300 if (end >= nbits) {
301 free(bitarr);
302 return NULL;
303 }
304
305 while (start <= end)
306 set_bit(start++, bitarr);
307 }
308
309 return bitarr;
310 }
311
312 /* Turn cpumask into simple, comma-separated cpulist. */
313 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
314 {
315 int ret;
316 size_t i;
317 char **cpulist = NULL;
318 char numstr[LXC_NUMSTRLEN64] = {0};
319
320 for (i = 0; i <= nbits; i++) {
321 if (!is_set(i, bitarr))
322 continue;
323
324 ret = snprintf(numstr, LXC_NUMSTRLEN64, "%zu", i);
325 if (ret < 0 || (size_t)ret >= LXC_NUMSTRLEN64) {
326 lxc_free_array((void **)cpulist, free);
327 return NULL;
328 }
329
330 ret = lxc_append_string(&cpulist, numstr);
331 if (ret < 0) {
332 lxc_free_array((void **)cpulist, free);
333 return NULL;
334 }
335 }
336
337 if (!cpulist)
338 return NULL;
339
340 return lxc_string_join(",", (const char **)cpulist, false);
341 }
342
343 static ssize_t get_max_cpus(char *cpulist)
344 {
345 char *c1, *c2;
346 char *maxcpus = cpulist;
347 size_t cpus = 0;
348
349 c1 = strrchr(maxcpus, ',');
350 if (c1)
351 c1++;
352
353 c2 = strrchr(maxcpus, '-');
354 if (c2)
355 c2++;
356
357 if (!c1 && !c2)
358 c1 = maxcpus;
359 else if (c1 > c2)
360 c2 = c1;
361 else if (c1 < c2)
362 c1 = c2;
363 else if (!c1 && c2)
364 c1 = c2;
365
366 errno = 0;
367 cpus = strtoul(c1, NULL, 0);
368 if (errno != 0)
369 return -1;
370
371 return cpus;
372 }
373
374 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
375 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
376 {
377 int ret;
378 ssize_t i;
379 char *lastslash, *fpath, oldv;
380 ssize_t maxisol = 0, maxposs = 0;
381 char *cpulist = NULL, *isolcpus = NULL, *posscpus = NULL;
382 uint32_t *isolmask = NULL, *possmask = NULL;
383 bool bret = false, flipped_bit = false;
384
385 lastslash = strrchr(path, '/');
386 if (!lastslash) {
387 ERROR("Failed to detect \"/\" in \"%s\"", path);
388 return bret;
389 }
390 oldv = *lastslash;
391 *lastslash = '\0';
392 fpath = must_make_path(path, "cpuset.cpus", NULL);
393 posscpus = read_file(fpath);
394 if (!posscpus) {
395 SYSERROR("Failed to read file \"%s\"", fpath);
396 goto on_error;
397 }
398
399 /* Get maximum number of cpus found in possible cpuset. */
400 maxposs = get_max_cpus(posscpus);
401 if (maxposs < 0)
402 goto on_error;
403
404 if (!file_exists(__ISOL_CPUS)) {
405 /* This system doesn't expose isolated cpus. */
406 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
407 cpulist = posscpus;
408 /* No isolated cpus but we weren't already initialized by
409 * someone. We should simply copy the parents cpuset.cpus
410 * values.
411 */
412 if (!am_initialized) {
413 DEBUG("Copying cpu settings of parent cgroup");
414 goto copy_parent;
415 }
416 /* No isolated cpus but we were already initialized by someone.
417 * Nothing more to do for us.
418 */
419 goto on_success;
420 }
421
422 isolcpus = read_file(__ISOL_CPUS);
423 if (!isolcpus) {
424 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
425 goto on_error;
426 }
427 if (!isdigit(isolcpus[0])) {
428 TRACE("No isolated cpus detected");
429 cpulist = posscpus;
430 /* No isolated cpus but we weren't already initialized by
431 * someone. We should simply copy the parents cpuset.cpus
432 * values.
433 */
434 if (!am_initialized) {
435 DEBUG("Copying cpu settings of parent cgroup");
436 goto copy_parent;
437 }
438 /* No isolated cpus but we were already initialized by someone.
439 * Nothing more to do for us.
440 */
441 goto on_success;
442 }
443
444 /* Get maximum number of cpus found in isolated cpuset. */
445 maxisol = get_max_cpus(isolcpus);
446 if (maxisol < 0)
447 goto on_error;
448
449 if (maxposs < maxisol)
450 maxposs = maxisol;
451 maxposs++;
452
453 possmask = lxc_cpumask(posscpus, maxposs);
454 if (!possmask) {
455 ERROR("Failed to create cpumask for possible cpus");
456 goto on_error;
457 }
458
459 isolmask = lxc_cpumask(isolcpus, maxposs);
460 if (!isolmask) {
461 ERROR("Failed to create cpumask for isolated cpus");
462 goto on_error;
463 }
464
465 for (i = 0; i <= maxposs; i++) {
466 if (!is_set(i, isolmask) || !is_set(i, possmask))
467 continue;
468
469 flipped_bit = true;
470 clear_bit(i, possmask);
471 }
472
473 if (!flipped_bit) {
474 DEBUG("No isolated cpus present in cpuset");
475 goto on_success;
476 }
477 DEBUG("Removed isolated cpus from cpuset");
478
479 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
480 if (!cpulist) {
481 ERROR("Failed to create cpu list");
482 goto on_error;
483 }
484
485 copy_parent:
486 *lastslash = oldv;
487 free(fpath);
488 fpath = must_make_path(path, "cpuset.cpus", NULL);
489 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
490 if (ret < 0) {
491 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
492 goto on_error;
493 }
494
495 on_success:
496 bret = true;
497
498 on_error:
499 free(fpath);
500
501 free(isolcpus);
502 free(isolmask);
503
504 if (posscpus != cpulist)
505 free(posscpus);
506 free(possmask);
507
508 free(cpulist);
509 return bret;
510 }
511
512 /* Copy contents of parent(@path)/@file to @path/@file */
513 static bool copy_parent_file(char *path, char *file)
514 {
515 int ret;
516 char *fpath, *lastslash, oldv;
517 int len = 0;
518 char *value = NULL;
519
520 lastslash = strrchr(path, '/');
521 if (!lastslash) {
522 ERROR("Failed to detect \"/\" in \"%s\"", path);
523 return false;
524 }
525 oldv = *lastslash;
526 *lastslash = '\0';
527 fpath = must_make_path(path, file, NULL);
528 len = lxc_read_from_file(fpath, NULL, 0);
529 if (len <= 0)
530 goto on_error;
531
532 value = must_alloc(len + 1);
533 ret = lxc_read_from_file(fpath, value, len);
534 if (ret != len)
535 goto on_error;
536 free(fpath);
537
538 *lastslash = oldv;
539 fpath = must_make_path(path, file, NULL);
540 ret = lxc_write_to_file(fpath, value, len, false, 0666);
541 if (ret < 0)
542 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, fpath);
543 free(fpath);
544 free(value);
545 return ret >= 0;
546
547 on_error:
548 SYSERROR("Failed to read file \"%s\"", fpath);
549 free(fpath);
550 free(value);
551 return false;
552 }
553
554 /* Initialize the cpuset hierarchy in first directory of @gname and set
555 * cgroup.clone_children so that children inherit settings. Since the
556 * h->base_path is populated by init or ourselves, we know it is already
557 * initialized.
558 */
559 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
560 {
561 int ret;
562 char v;
563 char *cgpath, *clonechildrenpath, *slash;
564
565 if (!string_in_list(h->controllers, "cpuset"))
566 return true;
567
568 if (*cgname == '/')
569 cgname++;
570 slash = strchr(cgname, '/');
571 if (slash)
572 *slash = '\0';
573
574 cgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
575 if (slash)
576 *slash = '/';
577
578 ret = mkdir(cgpath, 0755);
579 if (ret < 0) {
580 if (errno != EEXIST) {
581 SYSERROR("Failed to create directory \"%s\"", cgpath);
582 free(cgpath);
583 return false;
584 }
585 }
586
587 clonechildrenpath =
588 must_make_path(cgpath, "cgroup.clone_children", NULL);
589 /* unified hierarchy doesn't have clone_children */
590 if (!file_exists(clonechildrenpath)) {
591 free(clonechildrenpath);
592 free(cgpath);
593 return true;
594 }
595
596 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
597 if (ret < 0) {
598 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
599 free(clonechildrenpath);
600 free(cgpath);
601 return false;
602 }
603
604 /* Make sure any isolated cpus are removed from cpuset.cpus. */
605 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
606 SYSERROR("Failed to remove isolated cpus");
607 free(clonechildrenpath);
608 free(cgpath);
609 return false;
610 }
611
612 /* Already set for us by someone else. */
613 if (v == '1') {
614 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
615 free(clonechildrenpath);
616 free(cgpath);
617 return true;
618 }
619
620 /* copy parent's settings */
621 if (!copy_parent_file(cgpath, "cpuset.mems")) {
622 SYSERROR("Failed to copy \"cpuset.mems\" settings");
623 free(cgpath);
624 free(clonechildrenpath);
625 return false;
626 }
627 free(cgpath);
628
629 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
630 if (ret < 0) {
631 /* Set clone_children so children inherit our settings */
632 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
633 free(clonechildrenpath);
634 return false;
635 }
636 free(clonechildrenpath);
637 return true;
638 }
639
640 /* Given two null-terminated lists of strings, return true if any string is in
641 * both.
642 */
643 static bool controller_lists_intersect(char **l1, char **l2)
644 {
645 int i;
646
647 if (!l1 || !l2)
648 return false;
649
650 for (i = 0; l1[i]; i++) {
651 if (string_in_list(l2, l1[i]))
652 return true;
653 }
654
655 return false;
656 }
657
658 /* For a null-terminated list of controllers @clist, return true if any of those
659 * controllers is already listed the null-terminated list of hierarchies @hlist.
660 * Realistically, if one is present, all must be present.
661 */
662 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
663 {
664 int i;
665
666 if (!hlist)
667 return false;
668
669 for (i = 0; hlist[i]; i++)
670 if (controller_lists_intersect(hlist[i]->controllers, clist))
671 return true;
672
673 return false;
674 }
675
676 /* Return true if the controller @entry is found in the null-terminated list of
677 * hierarchies @hlist.
678 */
679 static bool controller_found(struct hierarchy **hlist, char *entry)
680 {
681 int i;
682
683 if (!hlist)
684 return false;
685
686 for (i = 0; hlist[i]; i++)
687 if (string_in_list(hlist[i]->controllers, entry))
688 return true;
689
690 return false;
691 }
692
693 /* Return true if all of the controllers which we require have been found. The
694 * required list is freezer and anything in lxc.cgroup.use.
695 */
696 static bool all_controllers_found(struct cgroup_ops *ops)
697 {
698 char *p;
699 char *saveptr = NULL;
700 struct hierarchy **hlist = ops->hierarchies;
701
702 if (!controller_found(hlist, "freezer")) {
703 ERROR("No freezer controller mountpoint found");
704 return false;
705 }
706
707 if (!ops->cgroup_use)
708 return true;
709
710 for (; (p = strtok_r(ops->cgroup_use, ",", &saveptr)); ops->cgroup_use = NULL)
711 if (!controller_found(hlist, p)) {
712 ERROR("No %s controller mountpoint found", p);
713 return false;
714 }
715
716 return true;
717 }
718
719 /* Get the controllers from a mountinfo line There are other ways we could get
720 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
721 * could parse the mount options. But we simply assume that the mountpoint must
722 * be /sys/fs/cgroup/controller-list
723 */
724 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
725 int type)
726 {
727 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
728 * for legacy hierarchies.
729 */
730 int i;
731 char *dup, *p2, *tok;
732 char *p = line, *saveptr = NULL, *sep = ",";
733 char **aret = NULL;
734
735 for (i = 0; i < 4; i++) {
736 p = strchr(p, ' ');
737 if (!p)
738 return NULL;
739 p++;
740 }
741
742 /* Note, if we change how mountinfo works, then our caller will need to
743 * verify /sys/fs/cgroup/ in this field.
744 */
745 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
746 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
747 return NULL;
748 }
749
750 p += 15;
751 p2 = strchr(p, ' ');
752 if (!p2) {
753 ERROR("Corrupt mountinfo");
754 return NULL;
755 }
756 *p2 = '\0';
757
758 if (type == CGROUP_SUPER_MAGIC) {
759 /* strdup() here for v1 hierarchies. Otherwise strtok_r() will
760 * destroy mountpoints such as "/sys/fs/cgroup/cpu,cpuacct".
761 */
762 dup = strdup(p);
763 if (!dup)
764 return NULL;
765
766 for (tok = strtok_r(dup, sep, &saveptr); tok;
767 tok = strtok_r(NULL, sep, &saveptr))
768 must_append_controller(klist, nlist, &aret, tok);
769
770 free(dup);
771 }
772 *p2 = ' ';
773
774 return aret;
775 }
776
777 static char **cg_unified_make_empty_controller(void)
778 {
779 int newentry;
780 char **aret = NULL;
781
782 newentry = append_null_to_list((void ***)&aret);
783 aret[newentry] = NULL;
784 return aret;
785 }
786
787 static char **cg_unified_get_controllers(const char *file)
788 {
789 char *buf, *tok;
790 char *saveptr = NULL, *sep = " \t\n";
791 char **aret = NULL;
792
793 buf = read_file(file);
794 if (!buf)
795 return NULL;
796
797 for (tok = strtok_r(buf, sep, &saveptr); tok;
798 tok = strtok_r(NULL, sep, &saveptr)) {
799 int newentry;
800 char *copy;
801
802 newentry = append_null_to_list((void ***)&aret);
803 copy = must_copy_string(tok);
804 aret[newentry] = copy;
805 }
806
807 free(buf);
808 return aret;
809 }
810
811 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
812 char *base_cgroup, int type)
813 {
814 struct hierarchy *new;
815 int newentry;
816
817 new = must_alloc(sizeof(*new));
818 new->controllers = clist;
819 new->mountpoint = mountpoint;
820 new->base_cgroup = base_cgroup;
821 new->fullcgpath = NULL;
822 new->version = type;
823
824 newentry = append_null_to_list((void ***)h);
825 (*h)[newentry] = new;
826 return new;
827 }
828
829 /* Get a copy of the mountpoint from @line, which is a line from
830 * /proc/self/mountinfo.
831 */
832 static char *cg_hybrid_get_mountpoint(char *line)
833 {
834 int i;
835 size_t len;
836 char *p2;
837 char *p = line, *sret = NULL;
838
839 for (i = 0; i < 4; i++) {
840 p = strchr(p, ' ');
841 if (!p)
842 return NULL;
843 p++;
844 }
845
846 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
847 return NULL;
848
849 p2 = strchr(p + 15, ' ');
850 if (!p2)
851 return NULL;
852 *p2 = '\0';
853
854 len = strlen(p);
855 sret = must_alloc(len + 1);
856 memcpy(sret, p, len);
857 sret[len] = '\0';
858 return sret;
859 }
860
861 /* Given a multi-line string, return a null-terminated copy of the current line. */
862 static char *copy_to_eol(char *p)
863 {
864 char *p2 = strchr(p, '\n'), *sret;
865 size_t len;
866
867 if (!p2)
868 return NULL;
869
870 len = p2 - p;
871 sret = must_alloc(len + 1);
872 memcpy(sret, p, len);
873 sret[len] = '\0';
874 return sret;
875 }
876
877 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
878 * /proc/self/cgroup file. Check whether controller c is present.
879 */
880 static bool controller_in_clist(char *cgline, char *c)
881 {
882 char *tok, *saveptr = NULL, *eol, *tmp;
883 size_t len;
884
885 eol = strchr(cgline, ':');
886 if (!eol)
887 return false;
888
889 len = eol - cgline;
890 tmp = alloca(len + 1);
891 memcpy(tmp, cgline, len);
892 tmp[len] = '\0';
893
894 for (tok = strtok_r(tmp, ",", &saveptr); tok;
895 tok = strtok_r(NULL, ",", &saveptr)) {
896 if (strcmp(tok, c) == 0)
897 return true;
898 }
899
900 return false;
901 }
902
903 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
904 * @controller.
905 */
906 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
907 int type)
908 {
909 char *p = basecginfo;
910
911 for (;;) {
912 bool is_cgv2_base_cgroup = false;
913
914 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
915 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
916 is_cgv2_base_cgroup = true;
917
918 p = strchr(p, ':');
919 if (!p)
920 return NULL;
921 p++;
922
923 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
924 p = strchr(p, ':');
925 if (!p)
926 return NULL;
927 p++;
928 return copy_to_eol(p);
929 }
930
931 p = strchr(p, '\n');
932 if (!p)
933 return NULL;
934 p++;
935 }
936 }
937
938 static void must_append_string(char ***list, char *entry)
939 {
940 int newentry;
941 char *copy;
942
943 newentry = append_null_to_list((void ***)list);
944 copy = must_copy_string(entry);
945 (*list)[newentry] = copy;
946 }
947
948 static int get_existing_subsystems(char ***klist, char ***nlist)
949 {
950 FILE *f;
951 char *line = NULL;
952 size_t len = 0;
953
954 f = fopen("/proc/self/cgroup", "r");
955 if (!f)
956 return -1;
957
958 while (getline(&line, &len, f) != -1) {
959 char *p, *p2, *tok, *saveptr = NULL;
960 p = strchr(line, ':');
961 if (!p)
962 continue;
963 p++;
964 p2 = strchr(p, ':');
965 if (!p2)
966 continue;
967 *p2 = '\0';
968
969 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
970 * contains an entry of the form:
971 *
972 * 0::/some/path
973 *
974 * In this case we use "cgroup2" as controller name.
975 */
976 if ((p2 - p) == 0) {
977 must_append_string(klist, "cgroup2");
978 continue;
979 }
980
981 for (tok = strtok_r(p, ",", &saveptr); tok;
982 tok = strtok_r(NULL, ",", &saveptr)) {
983 if (strncmp(tok, "name=", 5) == 0)
984 must_append_string(nlist, tok);
985 else
986 must_append_string(klist, tok);
987 }
988 }
989
990 free(line);
991 fclose(f);
992 return 0;
993 }
994
995 static void trim(char *s)
996 {
997 size_t len;
998
999 len = strlen(s);
1000 while ((len > 1) && (s[len - 1] == '\n'))
1001 s[--len] = '\0';
1002 }
1003
1004 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
1005 {
1006 int i;
1007 struct hierarchy **it;
1008
1009 if (!ops->hierarchies) {
1010 TRACE(" No hierarchies found");
1011 return;
1012 }
1013
1014 TRACE(" Hierarchies:");
1015 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
1016 int j;
1017 char **cit;
1018
1019 TRACE(" %d: base_cgroup: %s", i, (*it)->base_cgroup ? (*it)->base_cgroup : "(null)");
1020 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
1021 TRACE(" controllers:");
1022 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
1023 TRACE(" %d: %s", j, *cit);
1024 }
1025 }
1026
1027 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
1028 char **nlist)
1029 {
1030 int k;
1031 char **it;
1032
1033 TRACE("basecginfo is:");
1034 TRACE("%s", basecginfo);
1035
1036 for (k = 0, it = klist; it && *it; it++, k++)
1037 TRACE("kernel subsystem %d: %s", k, *it);
1038
1039 for (k = 0, it = nlist; it && *it; it++, k++)
1040 TRACE("named subsystem %d: %s", k, *it);
1041 }
1042
1043 static int recursive_destroy(char *dirname)
1044 {
1045 int ret;
1046 struct dirent *direntp;
1047 DIR *dir;
1048 int r = 0;
1049
1050 dir = opendir(dirname);
1051 if (!dir)
1052 return -1;
1053
1054 while ((direntp = readdir(dir))) {
1055 char *pathname;
1056 struct stat mystat;
1057
1058 if (!strcmp(direntp->d_name, ".") ||
1059 !strcmp(direntp->d_name, ".."))
1060 continue;
1061
1062 pathname = must_make_path(dirname, direntp->d_name, NULL);
1063
1064 ret = lstat(pathname, &mystat);
1065 if (ret < 0) {
1066 if (!r)
1067 WARN("Failed to stat \"%s\"", pathname);
1068 r = -1;
1069 goto next;
1070 }
1071
1072 if (!S_ISDIR(mystat.st_mode))
1073 goto next;
1074
1075 ret = recursive_destroy(pathname);
1076 if (ret < 0)
1077 r = -1;
1078 next:
1079 free(pathname);
1080 }
1081
1082 ret = rmdir(dirname);
1083 if (ret < 0) {
1084 if (!r)
1085 SYSWARN("Failed to delete \"%s\"", dirname);
1086 r = -1;
1087 }
1088
1089 ret = closedir(dir);
1090 if (ret < 0) {
1091 if (!r)
1092 SYSWARN("Failed to delete \"%s\"", dirname);
1093 r = -1;
1094 }
1095
1096 return r;
1097 }
1098
1099 static int cgroup_rmdir(struct hierarchy **hierarchies,
1100 const char *container_cgroup)
1101 {
1102 int i;
1103
1104 if (!container_cgroup || !hierarchies)
1105 return 0;
1106
1107 for (i = 0; hierarchies[i]; i++) {
1108 int ret;
1109 struct hierarchy *h = hierarchies[i];
1110
1111 if (!h->fullcgpath)
1112 continue;
1113
1114 ret = recursive_destroy(h->fullcgpath);
1115 if (ret < 0)
1116 WARN("Failed to destroy \"%s\"", h->fullcgpath);
1117
1118 free(h->fullcgpath);
1119 h->fullcgpath = NULL;
1120 }
1121
1122 return 0;
1123 }
1124
1125 struct generic_userns_exec_data {
1126 struct hierarchy **hierarchies;
1127 const char *container_cgroup;
1128 struct lxc_conf *conf;
1129 uid_t origuid; /* target uid in parent namespace */
1130 char *path;
1131 };
1132
1133 static int cgroup_rmdir_wrapper(void *data)
1134 {
1135 int ret;
1136 struct generic_userns_exec_data *arg = data;
1137 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1138 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1139
1140 ret = setresgid(nsgid, nsgid, nsgid);
1141 if (ret < 0) {
1142 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1143 (int)nsgid, (int)nsgid);
1144 return -1;
1145 }
1146
1147 ret = setresuid(nsuid, nsuid, nsuid);
1148 if (ret < 0) {
1149 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1150 (int)nsuid, (int)nsuid);
1151 return -1;
1152 }
1153
1154 ret = setgroups(0, NULL);
1155 if (ret < 0 && errno != EPERM) {
1156 SYSERROR("Failed to setgroups(0, NULL)");
1157 return -1;
1158 }
1159
1160 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1161 }
1162
1163 static void cgfsng_destroy(struct cgroup_ops *ops, struct lxc_handler *handler)
1164 {
1165 int ret;
1166 struct generic_userns_exec_data wrap;
1167
1168 wrap.origuid = 0;
1169 wrap.container_cgroup = ops->container_cgroup;
1170 wrap.hierarchies = ops->hierarchies;
1171 wrap.conf = handler->conf;
1172
1173 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1174 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1175 "cgroup_rmdir_wrapper");
1176 else
1177 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1178 if (ret < 0) {
1179 WARN("Failed to destroy cgroups");
1180 return;
1181 }
1182 }
1183
1184 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1185 {
1186 size_t i, parts_len;
1187 char **it;
1188 size_t full_len = 0;
1189 char *add_controllers = NULL, *cgroup = NULL;
1190 char **parts = NULL;
1191 bool bret = false;
1192
1193 if (h->version != CGROUP2_SUPER_MAGIC)
1194 return true;
1195
1196 if (!h->controllers)
1197 return true;
1198
1199 /* For now we simply enable all controllers that we have detected by
1200 * creating a string like "+memory +pids +cpu +io".
1201 * TODO: In the near future we might want to support "-<controller>"
1202 * etc. but whether supporting semantics like this make sense will need
1203 * some thinking.
1204 */
1205 for (it = h->controllers; it && *it; it++) {
1206 full_len += strlen(*it) + 2;
1207 add_controllers = must_realloc(add_controllers, full_len + 1);
1208
1209 if (h->controllers[0] == *it)
1210 add_controllers[0] = '\0';
1211
1212 (void)strlcat(add_controllers, "+", full_len + 1);
1213 (void)strlcat(add_controllers, *it, full_len + 1);
1214
1215 if ((it + 1) && *(it + 1))
1216 (void)strlcat(add_controllers, " ", full_len + 1);
1217 }
1218
1219 parts = lxc_string_split(cgname, '/');
1220 if (!parts)
1221 goto on_error;
1222
1223 parts_len = lxc_array_len((void **)parts);
1224 if (parts_len > 0)
1225 parts_len--;
1226
1227 cgroup = must_make_path(h->mountpoint, h->base_cgroup, NULL);
1228 for (i = 0; i < parts_len; i++) {
1229 int ret;
1230 char *target;
1231
1232 cgroup = must_append_path(cgroup, parts[i], NULL);
1233 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1234 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1235 free(target);
1236 if (ret < 0) {
1237 SYSERROR("Could not enable \"%s\" controllers in the "
1238 "unified cgroup \"%s\"", add_controllers, cgroup);
1239 goto on_error;
1240 }
1241 }
1242
1243 bret = true;
1244
1245 on_error:
1246 lxc_free_array((void **)parts, free);
1247 free(add_controllers);
1248 free(cgroup);
1249 return bret;
1250 }
1251
1252 static bool create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1253 {
1254 int ret;
1255
1256 h->fullcgpath = must_make_path(h->mountpoint, h->base_cgroup, cgname, NULL);
1257 if (dir_exists(h->fullcgpath)) {
1258 ERROR("The cgroup \"%s\" already existed", h->fullcgpath);
1259 return false;
1260 }
1261
1262 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1263 ERROR("Failed to handle legacy cpuset controller");
1264 return false;
1265 }
1266
1267 ret = mkdir_p(h->fullcgpath, 0755);
1268 if (ret < 0) {
1269 ERROR("Failed to create cgroup \"%s\"", h->fullcgpath);
1270 return false;
1271 }
1272
1273 return cg_unified_create_cgroup(h, cgname);
1274 }
1275
1276 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname)
1277 {
1278 int ret;
1279
1280 ret = rmdir(h->fullcgpath);
1281 if (ret < 0)
1282 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", h->fullcgpath);
1283
1284 free(h->fullcgpath);
1285 h->fullcgpath = NULL;
1286 }
1287
1288 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1289 * next cgroup_pattern-1, -2, ..., -999.
1290 */
1291 static inline bool cgfsng_create(struct cgroup_ops *ops,
1292 struct lxc_handler *handler)
1293 {
1294 int i;
1295 size_t len;
1296 char *container_cgroup, *offset, *tmp;
1297 int idx = 0;
1298 struct lxc_conf *conf = handler->conf;
1299
1300 if (ops->container_cgroup) {
1301 WARN("cgfsng_create called a second time: %s", ops->container_cgroup);
1302 return false;
1303 }
1304
1305 if (!conf)
1306 return false;
1307
1308 if (conf->cgroup_meta.dir)
1309 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1310 else
1311 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1312 if (!tmp) {
1313 ERROR("Failed expanding cgroup name pattern");
1314 return false;
1315 }
1316
1317 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1318 container_cgroup = must_alloc(len);
1319 (void)strlcpy(container_cgroup, tmp, len);
1320 free(tmp);
1321 offset = container_cgroup + len - 5;
1322
1323 again:
1324 if (idx == 1000) {
1325 ERROR("Too many conflicting cgroup names");
1326 goto out_free;
1327 }
1328
1329 if (idx) {
1330 int ret;
1331
1332 ret = snprintf(offset, 5, "-%d", idx);
1333 if (ret < 0 || (size_t)ret >= 5) {
1334 FILE *f = fopen("/dev/null", "w");
1335 if (f) {
1336 fprintf(f, "Workaround for GCC7 bug: "
1337 "https://gcc.gnu.org/bugzilla/"
1338 "show_bug.cgi?id=78969");
1339 fclose(f);
1340 }
1341 }
1342 }
1343
1344 for (i = 0; ops->hierarchies[i]; i++) {
1345 if (!create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1346 int j;
1347 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->fullcgpath);
1348 free(ops->hierarchies[i]->fullcgpath);
1349 ops->hierarchies[i]->fullcgpath = NULL;
1350 for (j = 0; j < i; j++)
1351 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup);
1352 idx++;
1353 goto again;
1354 }
1355 }
1356
1357 ops->container_cgroup = container_cgroup;
1358
1359 return true;
1360
1361 out_free:
1362 free(container_cgroup);
1363
1364 return false;
1365 }
1366
1367 static bool cgfsng_enter(struct cgroup_ops *ops, pid_t pid)
1368 {
1369 int i, len;
1370 char pidstr[25];
1371
1372 len = snprintf(pidstr, 25, "%d", pid);
1373 if (len < 0 || len >= 25)
1374 return false;
1375
1376 for (i = 0; ops->hierarchies[i]; i++) {
1377 int ret;
1378 char *fullpath;
1379
1380 fullpath = must_make_path(ops->hierarchies[i]->fullcgpath,
1381 "cgroup.procs", NULL);
1382 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
1383 if (ret != 0) {
1384 SYSERROR("Failed to enter cgroup \"%s\"", fullpath);
1385 free(fullpath);
1386 return false;
1387 }
1388 free(fullpath);
1389 }
1390
1391 return true;
1392 }
1393
1394 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1395 mode_t chmod_mode)
1396 {
1397 int ret;
1398
1399 ret = chown(path, chown_uid, chown_gid);
1400 if (ret < 0) {
1401 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1402 return -1;
1403 }
1404
1405 ret = chmod(path, chmod_mode);
1406 if (ret < 0) {
1407 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1408 return -1;
1409 }
1410
1411 return 0;
1412 }
1413
1414 /* chgrp the container cgroups to container group. We leave
1415 * the container owner as cgroup owner. So we must make the
1416 * directories 775 so that the container can create sub-cgroups.
1417 *
1418 * Also chown the tasks and cgroup.procs files. Those may not
1419 * exist depending on kernel version.
1420 */
1421 static int chown_cgroup_wrapper(void *data)
1422 {
1423 int i, ret;
1424 uid_t destuid;
1425 struct generic_userns_exec_data *arg = data;
1426 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1427 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1428
1429 ret = setresgid(nsgid, nsgid, nsgid);
1430 if (ret < 0) {
1431 SYSERROR("Failed to setresgid(%d, %d, %d)",
1432 (int)nsgid, (int)nsgid, (int)nsgid);
1433 return -1;
1434 }
1435
1436 ret = setresuid(nsuid, nsuid, nsuid);
1437 if (ret < 0) {
1438 SYSERROR("Failed to setresuid(%d, %d, %d)",
1439 (int)nsuid, (int)nsuid, (int)nsuid);
1440 return -1;
1441 }
1442
1443 ret = setgroups(0, NULL);
1444 if (ret < 0 && errno != EPERM) {
1445 SYSERROR("Failed to setgroups(0, NULL)");
1446 return -1;
1447 }
1448
1449 destuid = get_ns_uid(arg->origuid);
1450
1451 for (i = 0; arg->hierarchies[i]; i++) {
1452 char *fullpath;
1453 char *path = arg->hierarchies[i]->fullcgpath;
1454
1455 ret = chowmod(path, destuid, nsgid, 0775);
1456 if (ret < 0)
1457 return -1;
1458
1459 /* Failures to chown() these are inconvenient but not
1460 * detrimental We leave these owned by the container launcher,
1461 * so that container root can write to the files to attach. We
1462 * chmod() them 664 so that container systemd can write to the
1463 * files (which systemd in wily insists on doing).
1464 */
1465
1466 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1467 fullpath = must_make_path(path, "tasks", NULL);
1468 (void)chowmod(fullpath, destuid, nsgid, 0664);
1469 free(fullpath);
1470 }
1471
1472 fullpath = must_make_path(path, "cgroup.procs", NULL);
1473 (void)chowmod(fullpath, destuid, nsgid, 0664);
1474 free(fullpath);
1475
1476 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1477 continue;
1478
1479 fullpath = must_make_path(path, "cgroup.subtree_control", NULL);
1480 (void)chowmod(fullpath, destuid, nsgid, 0664);
1481 free(fullpath);
1482
1483 fullpath = must_make_path(path, "cgroup.threads", NULL);
1484 (void)chowmod(fullpath, destuid, nsgid, 0664);
1485 free(fullpath);
1486 }
1487
1488 return 0;
1489 }
1490
1491 static bool cgfsng_chown(struct cgroup_ops *ops, struct lxc_conf *conf)
1492 {
1493 struct generic_userns_exec_data wrap;
1494
1495 if (lxc_list_empty(&conf->id_map))
1496 return true;
1497
1498 wrap.origuid = geteuid();
1499 wrap.path = NULL;
1500 wrap.hierarchies = ops->hierarchies;
1501 wrap.conf = conf;
1502
1503 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1504 "chown_cgroup_wrapper") < 0) {
1505 ERROR("Error requesting cgroup chown in new user namespace");
1506 return false;
1507 }
1508
1509 return true;
1510 }
1511
1512 /* cgroup-full:* is done, no need to create subdirs */
1513 static bool cg_mount_needs_subdirs(int type)
1514 {
1515 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1516 return false;
1517
1518 return true;
1519 }
1520
1521 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1522 * remount controller ro if needed and bindmount the cgroupfs onto
1523 * controll/the/cg/path.
1524 */
1525 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1526 char *controllerpath, char *cgpath,
1527 const char *container_cgroup)
1528 {
1529 int ret, remount_flags;
1530 char *sourcepath;
1531 int flags = MS_BIND;
1532
1533 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1534 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1535 if (ret < 0) {
1536 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1537 controllerpath, controllerpath);
1538 return -1;
1539 }
1540
1541 remount_flags = add_required_remount_flags(controllerpath,
1542 controllerpath,
1543 flags | MS_REMOUNT);
1544 ret = mount(controllerpath, controllerpath, "cgroup",
1545 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1546 NULL);
1547 if (ret < 0) {
1548 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1549 return -1;
1550 }
1551
1552 INFO("Remounted %s read-only", controllerpath);
1553 }
1554
1555 sourcepath = must_make_path(h->mountpoint, h->base_cgroup,
1556 container_cgroup, NULL);
1557 if (type == LXC_AUTO_CGROUP_RO)
1558 flags |= MS_RDONLY;
1559
1560 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1561 if (ret < 0) {
1562 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1563 free(sourcepath);
1564 return -1;
1565 }
1566 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1567
1568 if (flags & MS_RDONLY) {
1569 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1570 flags | MS_REMOUNT);
1571 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1572 if (ret < 0) {
1573 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1574 free(sourcepath);
1575 return -1;
1576 }
1577 INFO("Remounted %s read-only", cgpath);
1578 }
1579
1580 free(sourcepath);
1581 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1582 return 0;
1583 }
1584
1585 /* __cg_mount_direct
1586 *
1587 * Mount cgroup hierarchies directly without using bind-mounts. The main
1588 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1589 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1590 */
1591 static int __cg_mount_direct(int type, struct hierarchy *h,
1592 const char *controllerpath)
1593 {
1594 int ret;
1595 char *controllers = NULL;
1596 char *fstype = "cgroup2";
1597 unsigned long flags = 0;
1598
1599 flags |= MS_NOSUID;
1600 flags |= MS_NOEXEC;
1601 flags |= MS_NODEV;
1602 flags |= MS_RELATIME;
1603
1604 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1605 flags |= MS_RDONLY;
1606
1607 if (h->version != CGROUP2_SUPER_MAGIC) {
1608 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1609 if (!controllers)
1610 return -ENOMEM;
1611 fstype = "cgroup";
1612 }
1613
1614 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1615 free(controllers);
1616 if (ret < 0) {
1617 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1618 return -1;
1619 }
1620
1621 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1622 return 0;
1623 }
1624
1625 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1626 const char *controllerpath)
1627 {
1628 return __cg_mount_direct(type, h, controllerpath);
1629 }
1630
1631 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1632 const char *controllerpath)
1633 {
1634 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1635 return 0;
1636
1637 return __cg_mount_direct(type, h, controllerpath);
1638 }
1639
1640 static bool cgfsng_mount(struct cgroup_ops *ops, struct lxc_handler *handler,
1641 const char *root, int type)
1642 {
1643 int i, ret;
1644 char *tmpfspath = NULL;
1645 bool has_cgns = false, retval = false, wants_force_mount = false;
1646
1647 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1648 return true;
1649
1650 if (type & LXC_AUTO_CGROUP_FORCE) {
1651 type &= ~LXC_AUTO_CGROUP_FORCE;
1652 wants_force_mount = true;
1653 }
1654
1655 if (!wants_force_mount){
1656 if (!lxc_list_empty(&handler->conf->keepcaps))
1657 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1658 else
1659 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1660 }
1661
1662 has_cgns = cgns_supported();
1663 if (has_cgns && !wants_force_mount)
1664 return true;
1665
1666 if (type == LXC_AUTO_CGROUP_NOSPEC)
1667 type = LXC_AUTO_CGROUP_MIXED;
1668 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1669 type = LXC_AUTO_CGROUP_FULL_MIXED;
1670
1671 /* Mount tmpfs */
1672 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1673 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1674 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1675 "size=10240k,mode=755", root);
1676 if (ret < 0)
1677 goto on_error;
1678
1679 for (i = 0; ops->hierarchies[i]; i++) {
1680 char *controllerpath, *path2;
1681 struct hierarchy *h = ops->hierarchies[i];
1682 char *controller = strrchr(h->mountpoint, '/');
1683
1684 if (!controller)
1685 continue;
1686 controller++;
1687
1688 controllerpath = must_make_path(tmpfspath, controller, NULL);
1689 if (dir_exists(controllerpath)) {
1690 free(controllerpath);
1691 continue;
1692 }
1693
1694 ret = mkdir(controllerpath, 0755);
1695 if (ret < 0) {
1696 SYSERROR("Error creating cgroup path: %s", controllerpath);
1697 free(controllerpath);
1698 goto on_error;
1699 }
1700
1701 if (has_cgns && wants_force_mount) {
1702 /* If cgroup namespaces are supported but the container
1703 * will not have CAP_SYS_ADMIN after it has started we
1704 * need to mount the cgroups manually.
1705 */
1706 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1707 free(controllerpath);
1708 if (ret < 0)
1709 goto on_error;
1710
1711 continue;
1712 }
1713
1714 ret = cg_mount_cgroup_full(type, h, controllerpath);
1715 if (ret < 0) {
1716 free(controllerpath);
1717 goto on_error;
1718 }
1719
1720 if (!cg_mount_needs_subdirs(type)) {
1721 free(controllerpath);
1722 continue;
1723 }
1724
1725 path2 = must_make_path(controllerpath, h->base_cgroup,
1726 ops->container_cgroup, NULL);
1727 ret = mkdir_p(path2, 0755);
1728 if (ret < 0) {
1729 free(controllerpath);
1730 free(path2);
1731 goto on_error;
1732 }
1733
1734 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1735 path2, ops->container_cgroup);
1736 free(controllerpath);
1737 free(path2);
1738 if (ret < 0)
1739 goto on_error;
1740 }
1741 retval = true;
1742
1743 on_error:
1744 free(tmpfspath);
1745 return retval;
1746 }
1747
1748 static int recursive_count_nrtasks(char *dirname)
1749 {
1750 struct dirent *direntp;
1751 DIR *dir;
1752 int count = 0, ret;
1753 char *path;
1754
1755 dir = opendir(dirname);
1756 if (!dir)
1757 return 0;
1758
1759 while ((direntp = readdir(dir))) {
1760 struct stat mystat;
1761
1762 if (!strcmp(direntp->d_name, ".") ||
1763 !strcmp(direntp->d_name, ".."))
1764 continue;
1765
1766 path = must_make_path(dirname, direntp->d_name, NULL);
1767
1768 if (lstat(path, &mystat))
1769 goto next;
1770
1771 if (!S_ISDIR(mystat.st_mode))
1772 goto next;
1773
1774 count += recursive_count_nrtasks(path);
1775 next:
1776 free(path);
1777 }
1778
1779 path = must_make_path(dirname, "cgroup.procs", NULL);
1780 ret = lxc_count_file_lines(path);
1781 if (ret != -1)
1782 count += ret;
1783 free(path);
1784
1785 (void)closedir(dir);
1786
1787 return count;
1788 }
1789
1790 static int cgfsng_nrtasks(struct cgroup_ops *ops)
1791 {
1792 int count;
1793 char *path;
1794
1795 if (!ops->container_cgroup || !ops->hierarchies)
1796 return -1;
1797
1798 path = must_make_path(ops->hierarchies[0]->fullcgpath, NULL);
1799 count = recursive_count_nrtasks(path);
1800 free(path);
1801 return count;
1802 }
1803
1804 /* Only root needs to escape to the cgroup of its init. */
1805 static bool cgfsng_escape(const struct cgroup_ops *ops)
1806 {
1807 int i;
1808
1809 if (geteuid())
1810 return true;
1811
1812 for (i = 0; ops->hierarchies[i]; i++) {
1813 int ret;
1814 char *fullpath;
1815
1816 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1817 ops->hierarchies[i]->base_cgroup,
1818 "cgroup.procs", NULL);
1819 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1820 if (ret != 0) {
1821 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1822 free(fullpath);
1823 return false;
1824 }
1825 free(fullpath);
1826 }
1827
1828 return true;
1829 }
1830
1831 static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1832 {
1833 int i;
1834
1835 for (i = 0; ops->hierarchies[i]; i++)
1836 ;
1837
1838 return i;
1839 }
1840
1841 static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
1842 {
1843 int i;
1844
1845 /* sanity check n */
1846 for (i = 0; i < n; i++)
1847 if (!ops->hierarchies[i])
1848 return false;
1849
1850 *out = ops->hierarchies[i]->controllers;
1851
1852 return true;
1853 }
1854
1855 #define THAWED "THAWED"
1856 #define THAWED_LEN (strlen(THAWED))
1857
1858 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1859 * to be adapted.
1860 */
1861 static bool cgfsng_unfreeze(struct cgroup_ops *ops)
1862 {
1863 int ret;
1864 char *fullpath;
1865 struct hierarchy *h;
1866
1867 h = get_hierarchy(ops, "freezer");
1868 if (!h)
1869 return false;
1870
1871 fullpath = must_make_path(h->fullcgpath, "freezer.state", NULL);
1872 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
1873 free(fullpath);
1874 if (ret < 0)
1875 return false;
1876
1877 return true;
1878 }
1879
1880 static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
1881 const char *controller)
1882 {
1883 struct hierarchy *h;
1884
1885 h = get_hierarchy(ops, controller);
1886 if (!h) {
1887 WARN("Failed to find hierarchy for controller \"%s\"",
1888 controller ? controller : "(null)");
1889 return NULL;
1890 }
1891
1892 return h->fullcgpath ? h->fullcgpath + strlen(h->mountpoint) : NULL;
1893 }
1894
1895 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
1896 * which must be freed by the caller.
1897 */
1898 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
1899 const char *inpath,
1900 const char *filename)
1901 {
1902 return must_make_path(h->mountpoint, inpath, filename, NULL);
1903 }
1904
1905 /* Technically, we're always at a delegation boundary here (This is especially
1906 * true when cgroup namespaces are available.). The reasoning is that in order
1907 * for us to have been able to start a container in the first place the root
1908 * cgroup must have been a leaf node. Now, either the container's init system
1909 * has populated the cgroup and kept it as a leaf node or it has created
1910 * subtrees. In the former case we will simply attach to the leaf node we
1911 * created when we started the container in the latter case we create our own
1912 * cgroup for the attaching process.
1913 */
1914 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
1915 const char *lxcpath, const char *pidstr,
1916 size_t pidstr_len, const char *controller)
1917 {
1918 int ret;
1919 size_t len;
1920 int fret = -1, idx = 0;
1921 char *base_path = NULL, *container_cgroup = NULL, *full_path = NULL;
1922
1923 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
1924 /* not running */
1925 if (!container_cgroup)
1926 return 0;
1927
1928 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
1929 full_path = must_make_path(base_path, "cgroup.procs", NULL);
1930 /* cgroup is populated */
1931 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
1932 if (ret < 0 && errno != EBUSY)
1933 goto on_error;
1934
1935 if (ret == 0)
1936 goto on_success;
1937
1938 free(full_path);
1939
1940 len = strlen(base_path) + sizeof("/lxc-1000") - 1 +
1941 sizeof("/cgroup-procs") - 1;
1942 full_path = must_alloc(len + 1);
1943 do {
1944 if (idx)
1945 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
1946 base_path, idx);
1947 else
1948 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
1949 if (ret < 0 || (size_t)ret >= len + 1)
1950 goto on_error;
1951
1952 ret = mkdir_p(full_path, 0755);
1953 if (ret < 0 && errno != EEXIST)
1954 goto on_error;
1955
1956 (void)strlcat(full_path, "/cgroup.procs", len + 1);
1957 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
1958 if (ret == 0)
1959 goto on_success;
1960
1961 /* this is a non-leaf node */
1962 if (errno != EBUSY)
1963 goto on_error;
1964
1965 } while (++idx > 0 && idx < 1000);
1966
1967 on_success:
1968 if (idx < 1000)
1969 fret = 0;
1970
1971 on_error:
1972 free(base_path);
1973 free(container_cgroup);
1974 free(full_path);
1975
1976 return fret;
1977 }
1978
1979 static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
1980 const char *lxcpath, pid_t pid)
1981 {
1982 int i, len, ret;
1983 char pidstr[25];
1984
1985 len = snprintf(pidstr, 25, "%d", pid);
1986 if (len < 0 || len >= 25)
1987 return false;
1988
1989 for (i = 0; ops->hierarchies[i]; i++) {
1990 char *path;
1991 char *fullpath = NULL;
1992 struct hierarchy *h = ops->hierarchies[i];
1993
1994 if (h->version == CGROUP2_SUPER_MAGIC) {
1995 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
1996 h->controllers[0]);
1997 if (ret < 0)
1998 return false;
1999
2000 continue;
2001 }
2002
2003 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2004 /* not running */
2005 if (!path)
2006 continue;
2007
2008 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2009 free(path);
2010 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2011 if (ret < 0) {
2012 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2013 free(fullpath);
2014 return false;
2015 }
2016 free(fullpath);
2017 }
2018
2019 return true;
2020 }
2021
2022 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2023 * don't have a cgroup_data set up, so we ask the running container through the
2024 * commands API for the cgroup path.
2025 */
2026 static int cgfsng_get(struct cgroup_ops *ops, const char *filename, char *value,
2027 size_t len, const char *name, const char *lxcpath)
2028 {
2029 int ret = -1;
2030 size_t controller_len;
2031 char *controller, *p, *path;
2032 struct hierarchy *h;
2033
2034 controller_len = strlen(filename);
2035 controller = alloca(controller_len + 1);
2036 (void)strlcpy(controller, filename, controller_len + 1);
2037
2038 p = strchr(controller, '.');
2039 if (p)
2040 *p = '\0';
2041
2042 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2043 /* not running */
2044 if (!path)
2045 return -1;
2046
2047 h = get_hierarchy(ops, controller);
2048 if (h) {
2049 char *fullpath;
2050
2051 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2052 ret = lxc_read_from_file(fullpath, value, len);
2053 free(fullpath);
2054 }
2055 free(path);
2056
2057 return ret;
2058 }
2059
2060 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2061 * don't have a cgroup_data set up, so we ask the running container through the
2062 * commands API for the cgroup path.
2063 */
2064 static int cgfsng_set(struct cgroup_ops *ops, const char *filename,
2065 const char *value, const char *name, const char *lxcpath)
2066 {
2067 int ret = -1;
2068 size_t controller_len;
2069 char *controller, *p, *path;
2070 struct hierarchy *h;
2071
2072 controller_len = strlen(filename);
2073 controller = alloca(controller_len + 1);
2074 (void)strlcpy(controller, filename, controller_len + 1);
2075
2076 p = strchr(controller, '.');
2077 if (p)
2078 *p = '\0';
2079
2080 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2081 /* not running */
2082 if (!path)
2083 return -1;
2084
2085 h = get_hierarchy(ops, controller);
2086 if (h) {
2087 char *fullpath;
2088
2089 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2090 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2091 free(fullpath);
2092 }
2093 free(path);
2094
2095 return ret;
2096 }
2097
2098 /* take devices cgroup line
2099 * /dev/foo rwx
2100 * and convert it to a valid
2101 * type major:minor mode
2102 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2103 * the output.
2104 */
2105 static int convert_devpath(const char *invalue, char *dest)
2106 {
2107 int n_parts;
2108 char *p, *path, type;
2109 unsigned long minor, major;
2110 struct stat sb;
2111 int ret = -EINVAL;
2112 char *mode = NULL;
2113
2114 path = must_copy_string(invalue);
2115
2116 /* Read path followed by mode. Ignore any trailing text.
2117 * A ' # comment' would be legal. Technically other text is not
2118 * legal, we could check for that if we cared to.
2119 */
2120 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2121 if (*p != ' ')
2122 continue;
2123 *p = '\0';
2124
2125 if (n_parts != 1)
2126 break;
2127 p++;
2128 n_parts++;
2129
2130 while (*p == ' ')
2131 p++;
2132
2133 mode = p;
2134
2135 if (*p == '\0')
2136 goto out;
2137 }
2138
2139 if (n_parts == 1)
2140 goto out;
2141
2142 ret = stat(path, &sb);
2143 if (ret < 0)
2144 goto out;
2145
2146 mode_t m = sb.st_mode & S_IFMT;
2147 switch (m) {
2148 case S_IFBLK:
2149 type = 'b';
2150 break;
2151 case S_IFCHR:
2152 type = 'c';
2153 break;
2154 default:
2155 ERROR("Unsupported device type %i for \"%s\"", m, path);
2156 ret = -EINVAL;
2157 goto out;
2158 }
2159
2160 major = MAJOR(sb.st_rdev);
2161 minor = MINOR(sb.st_rdev);
2162 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2163 if (ret < 0 || ret >= 50) {
2164 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2165 "chars)", type, major, minor, mode);
2166 ret = -ENAMETOOLONG;
2167 goto out;
2168 }
2169 ret = 0;
2170
2171 out:
2172 free(path);
2173 return ret;
2174 }
2175
2176 /* Called from setup_limits - here we have the container's cgroup_data because
2177 * we created the cgroups.
2178 */
2179 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2180 const char *value)
2181 {
2182 size_t len;
2183 char *fullpath, *p;
2184 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2185 char converted_value[50];
2186 struct hierarchy *h;
2187 int ret = 0;
2188 char *controller = NULL;
2189
2190 len = strlen(filename);
2191 controller = alloca(len + 1);
2192 (void)strlcpy(controller, filename, len + 1);
2193
2194 p = strchr(controller, '.');
2195 if (p)
2196 *p = '\0';
2197
2198 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2199 ret = convert_devpath(value, converted_value);
2200 if (ret < 0)
2201 return ret;
2202 value = converted_value;
2203 }
2204
2205 h = get_hierarchy(ops, controller);
2206 if (!h) {
2207 ERROR("Failed to setup limits for the \"%s\" controller. "
2208 "The controller seems to be unused by \"cgfsng\" cgroup "
2209 "driver or not enabled on the cgroup hierarchy",
2210 controller);
2211 errno = ENOENT;
2212 return -ENOENT;
2213 }
2214
2215 fullpath = must_make_path(h->fullcgpath, filename, NULL);
2216 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2217 free(fullpath);
2218 return ret;
2219 }
2220
2221 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2222 struct lxc_list *cgroup_settings,
2223 bool do_devices)
2224 {
2225 struct lxc_list *iterator, *next, *sorted_cgroup_settings;
2226 struct lxc_cgroup *cg;
2227 bool ret = false;
2228
2229 if (lxc_list_empty(cgroup_settings))
2230 return true;
2231
2232 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2233 if (!sorted_cgroup_settings)
2234 return false;
2235
2236 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2237 cg = iterator->elem;
2238
2239 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2240 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2241 if (do_devices && (errno == EACCES || errno == EPERM)) {
2242 WARN("Failed to set \"%s\" to \"%s\"",
2243 cg->subsystem, cg->value);
2244 continue;
2245 }
2246 WARN("Failed to set \"%s\" to \"%s\"",
2247 cg->subsystem, cg->value);
2248 goto out;
2249 }
2250 DEBUG("Set controller \"%s\" set to \"%s\"",
2251 cg->subsystem, cg->value);
2252 }
2253 }
2254
2255 ret = true;
2256 INFO("Limits for the legacy cgroup hierarchies have been setup");
2257 out:
2258 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2259 lxc_list_del(iterator);
2260 free(iterator);
2261 }
2262 free(sorted_cgroup_settings);
2263 return ret;
2264 }
2265
2266 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2267 struct lxc_list *cgroup_settings)
2268 {
2269 struct lxc_list *iterator;
2270 struct hierarchy *h = ops->unified;
2271
2272 if (lxc_list_empty(cgroup_settings))
2273 return true;
2274
2275 if (!h)
2276 return false;
2277
2278 lxc_list_for_each(iterator, cgroup_settings) {
2279 int ret;
2280 char *fullpath;
2281 struct lxc_cgroup *cg = iterator->elem;
2282
2283 fullpath = must_make_path(h->fullcgpath, cg->subsystem, NULL);
2284 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2285 free(fullpath);
2286 if (ret < 0) {
2287 SYSERROR("Failed to set \"%s\" to \"%s\"",
2288 cg->subsystem, cg->value);
2289 return false;
2290 }
2291 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2292 }
2293
2294 INFO("Limits for the unified cgroup hierarchy have been setup");
2295 return true;
2296 }
2297
2298 static bool cgfsng_setup_limits(struct cgroup_ops *ops, struct lxc_conf *conf,
2299 bool do_devices)
2300 {
2301 bool bret;
2302
2303 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2304 if (!bret)
2305 return false;
2306
2307 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2308 }
2309
2310 /* At startup, parse_hierarchies finds all the info we need about cgroup
2311 * mountpoints and current cgroups, and stores it in @d.
2312 */
2313 static bool cg_hybrid_init(struct cgroup_ops *ops)
2314 {
2315 int ret;
2316 char *basecginfo;
2317 bool will_escape;
2318 FILE *f;
2319 size_t len = 0;
2320 char *line = NULL;
2321 char **klist = NULL, **nlist = NULL;
2322
2323 /* Root spawned containers escape the current cgroup, so use init's
2324 * cgroups as our base in that case.
2325 */
2326 will_escape = (geteuid() == 0);
2327 if (will_escape)
2328 basecginfo = read_file("/proc/1/cgroup");
2329 else
2330 basecginfo = read_file("/proc/self/cgroup");
2331 if (!basecginfo)
2332 return false;
2333
2334 ret = get_existing_subsystems(&klist, &nlist);
2335 if (ret < 0) {
2336 ERROR("Failed to retrieve available legacy cgroup controllers");
2337 free(basecginfo);
2338 return false;
2339 }
2340
2341 f = fopen("/proc/self/mountinfo", "r");
2342 if (!f) {
2343 ERROR("Failed to open \"/proc/self/mountinfo\"");
2344 free(basecginfo);
2345 return false;
2346 }
2347
2348 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2349
2350 while (getline(&line, &len, f) != -1) {
2351 int type;
2352 bool writeable;
2353 struct hierarchy *new;
2354 char *base_cgroup = NULL, *mountpoint = NULL;
2355 char **controller_list = NULL;
2356
2357 type = get_cgroup_version(line);
2358 if (type == 0)
2359 continue;
2360
2361 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2362 continue;
2363
2364 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2365 if (type == CGROUP2_SUPER_MAGIC)
2366 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2367 else if (type == CGROUP_SUPER_MAGIC)
2368 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2369 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2370 if (type == CGROUP_SUPER_MAGIC)
2371 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2372 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2373 if (type == CGROUP2_SUPER_MAGIC)
2374 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2375 }
2376
2377 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2378 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2379 continue;
2380
2381 if (type == CGROUP_SUPER_MAGIC)
2382 if (controller_list_is_dup(ops->hierarchies, controller_list))
2383 goto next;
2384
2385 mountpoint = cg_hybrid_get_mountpoint(line);
2386 if (!mountpoint) {
2387 ERROR("Failed parsing mountpoint from \"%s\"", line);
2388 goto next;
2389 }
2390
2391 if (type == CGROUP_SUPER_MAGIC)
2392 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2393 else
2394 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2395 if (!base_cgroup) {
2396 ERROR("Failed to find current cgroup");
2397 goto next;
2398 }
2399
2400 trim(base_cgroup);
2401 prune_init_scope(base_cgroup);
2402 if (type == CGROUP2_SUPER_MAGIC)
2403 writeable = test_writeable_v2(mountpoint, base_cgroup);
2404 else
2405 writeable = test_writeable_v1(mountpoint, base_cgroup);
2406 if (!writeable)
2407 goto next;
2408
2409 if (type == CGROUP2_SUPER_MAGIC) {
2410 char *cgv2_ctrl_path;
2411
2412 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2413 "cgroup.controllers",
2414 NULL);
2415
2416 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2417 free(cgv2_ctrl_path);
2418 if (!controller_list) {
2419 controller_list = cg_unified_make_empty_controller();
2420 TRACE("No controllers are enabled for "
2421 "delegation in the unified hierarchy");
2422 }
2423 }
2424
2425 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2426 if (type == CGROUP2_SUPER_MAGIC && !ops->unified)
2427 ops->unified = new;
2428
2429 continue;
2430
2431 next:
2432 free_string_list(controller_list);
2433 free(mountpoint);
2434 free(base_cgroup);
2435 }
2436
2437 free_string_list(klist);
2438 free_string_list(nlist);
2439
2440 free(basecginfo);
2441
2442 fclose(f);
2443 free(line);
2444
2445 TRACE("Writable cgroup hierarchies:");
2446 lxc_cgfsng_print_hierarchies(ops);
2447
2448 /* verify that all controllers in cgroup.use and all crucial
2449 * controllers are accounted for
2450 */
2451 if (!all_controllers_found(ops))
2452 return false;
2453
2454 return true;
2455 }
2456
2457 static int cg_is_pure_unified(void)
2458 {
2459
2460 int ret;
2461 struct statfs fs;
2462
2463 ret = statfs("/sys/fs/cgroup", &fs);
2464 if (ret < 0)
2465 return -ENOMEDIUM;
2466
2467 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2468 return CGROUP2_SUPER_MAGIC;
2469
2470 return 0;
2471 }
2472
2473 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2474 static char *cg_unified_get_current_cgroup(void)
2475 {
2476 char *basecginfo, *base_cgroup;
2477 bool will_escape;
2478 char *copy = NULL;
2479
2480 will_escape = (geteuid() == 0);
2481 if (will_escape)
2482 basecginfo = read_file("/proc/1/cgroup");
2483 else
2484 basecginfo = read_file("/proc/self/cgroup");
2485 if (!basecginfo)
2486 return NULL;
2487
2488 base_cgroup = strstr(basecginfo, "0::/");
2489 if (!base_cgroup)
2490 goto cleanup_on_err;
2491
2492 base_cgroup = base_cgroup + 3;
2493 copy = copy_to_eol(base_cgroup);
2494 if (!copy)
2495 goto cleanup_on_err;
2496
2497 cleanup_on_err:
2498 free(basecginfo);
2499 if (copy)
2500 trim(copy);
2501
2502 return copy;
2503 }
2504
2505 static int cg_unified_init(struct cgroup_ops *ops)
2506 {
2507 int ret;
2508 char *mountpoint, *subtree_path;
2509 char **delegatable;
2510 char *base_cgroup = NULL;
2511
2512 ret = cg_is_pure_unified();
2513 if (ret == -ENOMEDIUM)
2514 return -ENOMEDIUM;
2515
2516 if (ret != CGROUP2_SUPER_MAGIC)
2517 return 0;
2518
2519 base_cgroup = cg_unified_get_current_cgroup();
2520 if (!base_cgroup)
2521 return -EINVAL;
2522 prune_init_scope(base_cgroup);
2523
2524 /* We assume that we have already been given controllers to delegate
2525 * further down the hierarchy. If not it is up to the user to delegate
2526 * them to us.
2527 */
2528 mountpoint = must_copy_string("/sys/fs/cgroup");
2529 subtree_path = must_make_path(mountpoint, base_cgroup,
2530 "cgroup.subtree_control", NULL);
2531 delegatable = cg_unified_get_controllers(subtree_path);
2532 free(subtree_path);
2533 if (!delegatable)
2534 delegatable = cg_unified_make_empty_controller();
2535 if (!delegatable[0])
2536 TRACE("No controllers are enabled for delegation");
2537
2538 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2539 * we should verify here. The reason I'm not doing it right is that I'm
2540 * not convinced that lxc.cgroup.use will be the future since it is a
2541 * global property. I much rather have an option that lets you request
2542 * controllers per container.
2543 */
2544
2545 add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2546
2547 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2548 return CGROUP2_SUPER_MAGIC;
2549 }
2550
2551 static bool cg_init(struct cgroup_ops *ops)
2552 {
2553 int ret;
2554 const char *tmp;
2555
2556 tmp = lxc_global_config_value("lxc.cgroup.use");
2557 if (tmp)
2558 ops->cgroup_use = must_copy_string(tmp);
2559
2560 ret = cg_unified_init(ops);
2561 if (ret < 0)
2562 return false;
2563
2564 if (ret == CGROUP2_SUPER_MAGIC)
2565 return true;
2566
2567 return cg_hybrid_init(ops);
2568 }
2569
2570 static bool cgfsng_data_init(struct cgroup_ops *ops)
2571 {
2572 const char *cgroup_pattern;
2573
2574 /* copy system-wide cgroup information */
2575 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2576 if (!cgroup_pattern) {
2577 /* lxc.cgroup.pattern is only NULL on error. */
2578 ERROR("Failed to retrieve cgroup pattern");
2579 return false;
2580 }
2581 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2582
2583 return true;
2584 }
2585
2586 struct cgroup_ops *cgfsng_ops_init(void)
2587 {
2588 struct cgroup_ops *cgfsng_ops;
2589
2590 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2591 if (!cgfsng_ops)
2592 return NULL;
2593
2594 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2595 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2596
2597 if (!cg_init(cgfsng_ops)) {
2598 free(cgfsng_ops);
2599 return NULL;
2600 }
2601
2602 cgfsng_ops->data_init = cgfsng_data_init;
2603 cgfsng_ops->destroy = cgfsng_destroy;
2604 cgfsng_ops->create = cgfsng_create;
2605 cgfsng_ops->enter = cgfsng_enter;
2606 cgfsng_ops->escape = cgfsng_escape;
2607 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2608 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2609 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2610 cgfsng_ops->get = cgfsng_get;
2611 cgfsng_ops->set = cgfsng_set;
2612 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2613 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2614 cgfsng_ops->driver = "cgfsng";
2615 cgfsng_ops->version = "1.0.0";
2616 cgfsng_ops->attach = cgfsng_attach;
2617 cgfsng_ops->chown = cgfsng_chown;
2618 cgfsng_ops->mount = cgfsng_mount;
2619 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2620
2621 return cgfsng_ops;
2622 }