]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
Merge pull request #2827 from brauner/2019-02-07/auto_cleanup
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comma-separated list of controllers.
35 */
36
37 #ifndef _GNU_SOURCE
38 #define _GNU_SOURCE 1
39 #endif
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <grp.h>
44 #include <linux/kdev_t.h>
45 #include <linux/types.h>
46 #include <stdint.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/types.h>
51 #include <unistd.h>
52
53 #include "caps.h"
54 #include "cgroup.h"
55 #include "cgroup_utils.h"
56 #include "commands.h"
57 #include "conf.h"
58 #include "config.h"
59 #include "log.h"
60 #include "macro.h"
61 #include "memory_utils.h"
62 #include "storage/storage.h"
63 #include "utils.h"
64
65 #ifndef HAVE_STRLCPY
66 #include "include/strlcpy.h"
67 #endif
68
69 #ifndef HAVE_STRLCAT
70 #include "include/strlcat.h"
71 #endif
72
73 lxc_log_define(cgfsng, cgroup);
74
75 static void free_string_list(char **clist)
76 {
77 int i;
78
79 if (!clist)
80 return;
81
82 for (i = 0; clist[i]; i++)
83 free(clist[i]);
84
85 free(clist);
86 }
87
88 /* Given a pointer to a null-terminated array of pointers, realloc to add one
89 * entry, and point the new entry to NULL. Do not fail. Return the index to the
90 * second-to-last entry - that is, the one which is now available for use
91 * (keeping the list null-terminated).
92 */
93 static int append_null_to_list(void ***list)
94 {
95 int newentry = 0;
96
97 if (*list)
98 for (; (*list)[newentry]; newentry++)
99 ;
100
101 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
102 (*list)[newentry + 1] = NULL;
103 return newentry;
104 }
105
106 /* Given a null-terminated array of strings, check whether @entry is one of the
107 * strings.
108 */
109 static bool string_in_list(char **list, const char *entry)
110 {
111 int i;
112
113 if (!list)
114 return false;
115
116 for (i = 0; list[i]; i++)
117 if (strcmp(list[i], entry) == 0)
118 return true;
119
120 return false;
121 }
122
123 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
124 * "name=systemd". Do not fail.
125 */
126 static char *cg_legacy_must_prefix_named(char *entry)
127 {
128 size_t len;
129 char *prefixed;
130
131 len = strlen(entry);
132 prefixed = must_realloc(NULL, len + 6);
133
134 memcpy(prefixed, "name=", STRLITERALLEN("name="));
135 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
136 prefixed[len + 5] = '\0';
137
138 return prefixed;
139 }
140
141 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
142 * we are called.
143 *
144 * We also handle named subsystems here. Any controller which is not a kernel
145 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
146 * we refuse to use because we're not sure which we have here.
147 * (TODO: We could work around this in some cases by just remounting to be
148 * unambiguous, or by comparing mountpoint contents with current cgroup.)
149 *
150 * The last entry will always be NULL.
151 */
152 static void must_append_controller(char **klist, char **nlist, char ***clist,
153 char *entry)
154 {
155 int newentry;
156 char *copy;
157
158 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
159 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
160 ERROR("It is both a named and kernel subsystem");
161 return;
162 }
163
164 newentry = append_null_to_list((void ***)clist);
165
166 if (strncmp(entry, "name=", 5) == 0)
167 copy = must_copy_string(entry);
168 else if (string_in_list(klist, entry))
169 copy = must_copy_string(entry);
170 else
171 copy = cg_legacy_must_prefix_named(entry);
172
173 (*clist)[newentry] = copy;
174 }
175
176 /* Given a handler's cgroup data, return the struct hierarchy for the controller
177 * @c, or NULL if there is none.
178 */
179 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
180 {
181 int i;
182
183 errno = ENOENT;
184
185 if (!ops->hierarchies) {
186 TRACE("There are no useable cgroup controllers");
187 return NULL;
188 }
189
190 for (i = 0; ops->hierarchies[i]; i++) {
191 if (!controller) {
192 /* This is the empty unified hierarchy. */
193 if (ops->hierarchies[i]->controllers &&
194 !ops->hierarchies[i]->controllers[0])
195 return ops->hierarchies[i];
196
197 continue;
198 }
199
200 if (string_in_list(ops->hierarchies[i]->controllers, controller))
201 return ops->hierarchies[i];
202 }
203
204 if (controller)
205 WARN("There is no useable %s controller", controller);
206 else
207 WARN("There is no empty unified cgroup hierarchy");
208
209 return NULL;
210 }
211
212 #define BATCH_SIZE 50
213 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
214 {
215 int newbatches = (newlen / BATCH_SIZE) + 1;
216 int oldbatches = (oldlen / BATCH_SIZE) + 1;
217
218 if (!*mem || newbatches > oldbatches) {
219 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
220 }
221 }
222
223 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
224 {
225 size_t full = oldlen + newlen;
226
227 batch_realloc(dest, oldlen, full + 1);
228
229 memcpy(*dest + oldlen, new, newlen + 1);
230 }
231
232 /* Slurp in a whole file */
233 static char *read_file(const char *fnam)
234 {
235 __do_free char *line = NULL;
236 __do_fclose FILE *f = NULL;
237 int linelen;
238 char *buf = NULL;
239 size_t len = 0, fulllen = 0;
240
241 f = fopen(fnam, "r");
242 if (!f)
243 return NULL;
244 while ((linelen = getline(&line, &len, f)) != -1) {
245 append_line(&buf, fulllen, line, linelen);
246 fulllen += linelen;
247 }
248 return buf;
249 }
250
251 /* Taken over modified from the kernel sources. */
252 #define NBITS 32 /* bits in uint32_t */
253 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
254 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
255
256 static void set_bit(unsigned bit, uint32_t *bitarr)
257 {
258 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
259 }
260
261 static void clear_bit(unsigned bit, uint32_t *bitarr)
262 {
263 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
264 }
265
266 static bool is_set(unsigned bit, uint32_t *bitarr)
267 {
268 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
269 }
270
271 /* Create cpumask from cpulist aka turn:
272 *
273 * 0,2-3
274 *
275 * into bit array
276 *
277 * 1 0 1 1
278 */
279 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
280 {
281 char *token;
282 size_t arrlen;
283 uint32_t *bitarr;
284
285 arrlen = BITS_TO_LONGS(nbits);
286 bitarr = calloc(arrlen, sizeof(uint32_t));
287 if (!bitarr)
288 return NULL;
289
290 lxc_iterate_parts(token, buf, ",") {
291 errno = 0;
292 unsigned end, start;
293 char *range;
294
295 start = strtoul(token, NULL, 0);
296 end = start;
297 range = strchr(token, '-');
298 if (range)
299 end = strtoul(range + 1, NULL, 0);
300
301 if (!(start <= end)) {
302 free(bitarr);
303 return NULL;
304 }
305
306 if (end >= nbits) {
307 free(bitarr);
308 return NULL;
309 }
310
311 while (start <= end)
312 set_bit(start++, bitarr);
313 }
314
315 return bitarr;
316 }
317
318 /* Turn cpumask into simple, comma-separated cpulist. */
319 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
320 {
321 int ret;
322 size_t i;
323 char **cpulist = NULL;
324 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
325
326 for (i = 0; i <= nbits; i++) {
327 if (!is_set(i, bitarr))
328 continue;
329
330 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
331 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
332 lxc_free_array((void **)cpulist, free);
333 return NULL;
334 }
335
336 ret = lxc_append_string(&cpulist, numstr);
337 if (ret < 0) {
338 lxc_free_array((void **)cpulist, free);
339 return NULL;
340 }
341 }
342
343 if (!cpulist)
344 return NULL;
345
346 return lxc_string_join(",", (const char **)cpulist, false);
347 }
348
349 static ssize_t get_max_cpus(char *cpulist)
350 {
351 char *c1, *c2;
352 char *maxcpus = cpulist;
353 size_t cpus = 0;
354
355 c1 = strrchr(maxcpus, ',');
356 if (c1)
357 c1++;
358
359 c2 = strrchr(maxcpus, '-');
360 if (c2)
361 c2++;
362
363 if (!c1 && !c2)
364 c1 = maxcpus;
365 else if (c1 > c2)
366 c2 = c1;
367 else if (c1 < c2)
368 c1 = c2;
369 else if (!c1 && c2)
370 c1 = c2;
371
372 errno = 0;
373 cpus = strtoul(c1, NULL, 0);
374 if (errno != 0)
375 return -1;
376
377 return cpus;
378 }
379
380 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
381 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
382 {
383 __do_free char *cpulist = NULL, *fpath = NULL, *isolcpus = NULL,
384 *posscpus;
385 __do_free uint32_t *isolmask = NULL, *possmask = NULL;
386 int ret;
387 ssize_t i;
388 char oldv;
389 char *lastslash, *posscpus_tmp;
390 ssize_t maxisol = 0, maxposs = 0;
391 bool bret = false, flipped_bit = false;
392
393 lastslash = strrchr(path, '/');
394 if (!lastslash) {
395 ERROR("Failed to detect \"/\" in \"%s\"", path);
396 return bret;
397 }
398 oldv = *lastslash;
399 *lastslash = '\0';
400 fpath = must_make_path(path, "cpuset.cpus", NULL);
401 posscpus = read_file(fpath);
402 if (!posscpus) {
403 SYSERROR("Failed to read file \"%s\"", fpath);
404 return false;
405 }
406
407 /* Get maximum number of cpus found in possible cpuset. */
408 maxposs = get_max_cpus(posscpus);
409 if (maxposs < 0 || maxposs >= INT_MAX - 1)
410 return false;
411
412 if (!file_exists(__ISOL_CPUS)) {
413 /* This system doesn't expose isolated cpus. */
414 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
415 /* No isolated cpus but we weren't already initialized by
416 * someone. We should simply copy the parents cpuset.cpus
417 * values.
418 */
419 if (!am_initialized) {
420 DEBUG("Copying cpu settings of parent cgroup");
421 cpulist = posscpus;
422 goto copy_parent;
423 }
424 /* No isolated cpus but we were already initialized by someone.
425 * Nothing more to do for us.
426 */
427 return true;
428 }
429
430 isolcpus = read_file(__ISOL_CPUS);
431 if (!isolcpus) {
432 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
433 return false;
434 }
435 if (!isdigit(isolcpus[0])) {
436 TRACE("No isolated cpus detected");
437 /* No isolated cpus but we weren't already initialized by
438 * someone. We should simply copy the parents cpuset.cpus
439 * values.
440 */
441 if (!am_initialized) {
442 DEBUG("Copying cpu settings of parent cgroup");
443 cpulist = posscpus;
444 goto copy_parent;
445 }
446 /* No isolated cpus but we were already initialized by someone.
447 * Nothing more to do for us.
448 */
449 return true;
450 }
451
452 /* Get maximum number of cpus found in isolated cpuset. */
453 maxisol = get_max_cpus(isolcpus);
454 if (maxisol < 0 || maxisol >= INT_MAX - 1)
455 return false;
456
457 if (maxposs < maxisol)
458 maxposs = maxisol;
459 maxposs++;
460
461 possmask = lxc_cpumask(posscpus, maxposs);
462 if (!possmask) {
463 ERROR("Failed to create cpumask for possible cpus");
464 return false;
465 }
466
467 isolmask = lxc_cpumask(isolcpus, maxposs);
468 if (!isolmask) {
469 ERROR("Failed to create cpumask for isolated cpus");
470 return false;
471 }
472
473 for (i = 0; i <= maxposs; i++) {
474 if (!is_set(i, isolmask) || !is_set(i, possmask))
475 continue;
476
477 flipped_bit = true;
478 clear_bit(i, possmask);
479 }
480
481 if (!flipped_bit) {
482 DEBUG("No isolated cpus present in cpuset");
483 return true;
484 }
485 DEBUG("Removed isolated cpus from cpuset");
486
487 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
488 if (!cpulist) {
489 ERROR("Failed to create cpu list");
490 return false;
491 }
492
493 copy_parent:
494 *lastslash = oldv;
495 fpath = must_make_path(path, "cpuset.cpus", NULL);
496 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
497 if (cpulist == posscpus)
498 cpulist = NULL;
499 if (ret < 0) {
500 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
501 return false;
502 }
503
504 return true;
505 }
506
507 /* Copy contents of parent(@path)/@file to @path/@file */
508 static bool copy_parent_file(char *path, char *file)
509 {
510 __do_free char *child_path = NULL, *parent_path = NULL, *value = NULL;
511 int ret;
512 char oldv;
513 int len = 0;
514 char *lastslash = NULL;
515
516 lastslash = strrchr(path, '/');
517 if (!lastslash) {
518 ERROR("Failed to detect \"/\" in \"%s\"", path);
519 return false;
520 }
521 oldv = *lastslash;
522 *lastslash = '\0';
523 parent_path = must_make_path(path, file, NULL);
524 len = lxc_read_from_file(parent_path, NULL, 0);
525 if (len <= 0)
526 goto on_error;
527
528 value = must_realloc(NULL, len + 1);
529 ret = lxc_read_from_file(parent_path, value, len);
530 if (ret != len)
531 goto on_error;
532
533 *lastslash = oldv;
534 child_path = must_make_path(path, file, NULL);
535 ret = lxc_write_to_file(child_path, value, len, false, 0666);
536 if (ret < 0)
537 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, child_path);
538 return ret >= 0;
539
540 on_error:
541 SYSERROR("Failed to read file \"%s\"", child_path);
542 return false;
543 }
544
545 /* Initialize the cpuset hierarchy in first directory of @gname and set
546 * cgroup.clone_children so that children inherit settings. Since the
547 * h->base_path is populated by init or ourselves, we know it is already
548 * initialized.
549 */
550 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
551 {
552 __do_free char *cgpath = NULL, *clonechildrenpath = NULL;
553 int ret;
554 char v;
555 char *slash;
556
557 if (!string_in_list(h->controllers, "cpuset"))
558 return true;
559
560 if (*cgname == '/')
561 cgname++;
562 slash = strchr(cgname, '/');
563 if (slash)
564 *slash = '\0';
565
566 cgpath = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
567 if (slash)
568 *slash = '/';
569
570 ret = mkdir(cgpath, 0755);
571 if (ret < 0) {
572 if (errno != EEXIST) {
573 SYSERROR("Failed to create directory \"%s\"", cgpath);
574 return false;
575 }
576 }
577
578 clonechildrenpath = must_make_path(cgpath, "cgroup.clone_children", NULL);
579 /* unified hierarchy doesn't have clone_children */
580 if (!file_exists(clonechildrenpath))
581 return true;
582
583 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
584 if (ret < 0) {
585 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
586 return false;
587 }
588
589 /* Make sure any isolated cpus are removed from cpuset.cpus. */
590 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
591 SYSERROR("Failed to remove isolated cpus");
592 return false;
593 }
594
595 /* Already set for us by someone else. */
596 if (v == '1') {
597 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
598 return true;
599 }
600
601 /* copy parent's settings */
602 if (!copy_parent_file(cgpath, "cpuset.mems")) {
603 SYSERROR("Failed to copy \"cpuset.mems\" settings");
604 return false;
605 }
606
607 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
608 if (ret < 0) {
609 /* Set clone_children so children inherit our settings */
610 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
611 return false;
612 }
613
614 return true;
615 }
616
617 /* Given two null-terminated lists of strings, return true if any string is in
618 * both.
619 */
620 static bool controller_lists_intersect(char **l1, char **l2)
621 {
622 int i;
623
624 if (!l1 || !l2)
625 return false;
626
627 for (i = 0; l1[i]; i++) {
628 if (string_in_list(l2, l1[i]))
629 return true;
630 }
631
632 return false;
633 }
634
635 /* For a null-terminated list of controllers @clist, return true if any of those
636 * controllers is already listed the null-terminated list of hierarchies @hlist.
637 * Realistically, if one is present, all must be present.
638 */
639 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
640 {
641 int i;
642
643 if (!hlist)
644 return false;
645
646 for (i = 0; hlist[i]; i++)
647 if (controller_lists_intersect(hlist[i]->controllers, clist))
648 return true;
649
650 return false;
651 }
652
653 /* Return true if the controller @entry is found in the null-terminated list of
654 * hierarchies @hlist.
655 */
656 static bool controller_found(struct hierarchy **hlist, char *entry)
657 {
658 int i;
659
660 if (!hlist)
661 return false;
662
663 for (i = 0; hlist[i]; i++)
664 if (string_in_list(hlist[i]->controllers, entry))
665 return true;
666
667 return false;
668 }
669
670 /* Return true if all of the controllers which we require have been found. The
671 * required list is freezer and anything in lxc.cgroup.use.
672 */
673 static bool all_controllers_found(struct cgroup_ops *ops)
674 {
675 char **cur;
676 struct hierarchy **hlist = ops->hierarchies;
677
678 if (!ops->cgroup_use)
679 return true;
680
681 for (cur = ops->cgroup_use; cur && *cur; cur++)
682 if (!controller_found(hlist, *cur)) {
683 ERROR("No %s controller mountpoint found", *cur);
684 return false;
685 }
686
687 return true;
688 }
689
690 /* Get the controllers from a mountinfo line There are other ways we could get
691 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
692 * could parse the mount options. But we simply assume that the mountpoint must
693 * be /sys/fs/cgroup/controller-list
694 */
695 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
696 int type)
697 {
698 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
699 * for legacy hierarchies.
700 */
701 int i;
702 char *p2, *tok;
703 char *p = line, *sep = ",";
704 char **aret = NULL;
705
706 for (i = 0; i < 4; i++) {
707 p = strchr(p, ' ');
708 if (!p)
709 return NULL;
710 p++;
711 }
712
713 /* Note, if we change how mountinfo works, then our caller will need to
714 * verify /sys/fs/cgroup/ in this field.
715 */
716 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
717 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
718 return NULL;
719 }
720
721 p += 15;
722 p2 = strchr(p, ' ');
723 if (!p2) {
724 ERROR("Corrupt mountinfo");
725 return NULL;
726 }
727 *p2 = '\0';
728
729 if (type == CGROUP_SUPER_MAGIC) {
730 __do_free char *dup;
731
732 /* strdup() here for v1 hierarchies. Otherwise
733 * lxc_iterate_parts() will destroy mountpoints such as
734 * "/sys/fs/cgroup/cpu,cpuacct".
735 */
736 dup = must_copy_string(p);
737 if (!dup)
738 return NULL;
739
740 lxc_iterate_parts (tok, dup, sep)
741 must_append_controller(klist, nlist, &aret, tok);
742 }
743 *p2 = ' ';
744
745 return aret;
746 }
747
748 static char **cg_unified_make_empty_controller(void)
749 {
750 int newentry;
751 char **aret = NULL;
752
753 newentry = append_null_to_list((void ***)&aret);
754 aret[newentry] = NULL;
755 return aret;
756 }
757
758 static char **cg_unified_get_controllers(const char *file)
759 {
760 __do_free char *buf = NULL;
761 char *tok;
762 char *sep = " \t\n";
763 char **aret = NULL;
764
765 buf = read_file(file);
766 if (!buf)
767 return NULL;
768
769 lxc_iterate_parts(tok, buf, sep) {
770 int newentry;
771 char *copy;
772
773 newentry = append_null_to_list((void ***)&aret);
774 copy = must_copy_string(tok);
775 aret[newentry] = copy;
776 }
777
778 return aret;
779 }
780
781 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
782 char *container_base_path, int type)
783 {
784 struct hierarchy *new;
785 int newentry;
786
787 new = must_realloc(NULL, sizeof(*new));
788 new->controllers = clist;
789 new->mountpoint = mountpoint;
790 new->container_base_path = container_base_path;
791 new->container_full_path = NULL;
792 new->monitor_full_path = NULL;
793 new->version = type;
794 new->cgroup2_chown = NULL;
795
796 newentry = append_null_to_list((void ***)h);
797 (*h)[newentry] = new;
798 return new;
799 }
800
801 /* Get a copy of the mountpoint from @line, which is a line from
802 * /proc/self/mountinfo.
803 */
804 static char *cg_hybrid_get_mountpoint(char *line)
805 {
806 int i;
807 size_t len;
808 char *p2;
809 char *p = line, *sret = NULL;
810
811 for (i = 0; i < 4; i++) {
812 p = strchr(p, ' ');
813 if (!p)
814 return NULL;
815 p++;
816 }
817
818 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
819 return NULL;
820
821 p2 = strchr(p + 15, ' ');
822 if (!p2)
823 return NULL;
824 *p2 = '\0';
825
826 len = strlen(p);
827 sret = must_realloc(NULL, len + 1);
828 memcpy(sret, p, len);
829 sret[len] = '\0';
830 return sret;
831 }
832
833 /* Given a multi-line string, return a null-terminated copy of the current line. */
834 static char *copy_to_eol(char *p)
835 {
836 char *p2 = strchr(p, '\n'), *sret;
837 size_t len;
838
839 if (!p2)
840 return NULL;
841
842 len = p2 - p;
843 sret = must_realloc(NULL, len + 1);
844 memcpy(sret, p, len);
845 sret[len] = '\0';
846 return sret;
847 }
848
849 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
850 * /proc/self/cgroup file. Check whether controller c is present.
851 */
852 static bool controller_in_clist(char *cgline, char *c)
853 {
854 __do_free char *tmp = NULL;
855 char *tok, *eol;
856 size_t len;
857
858 eol = strchr(cgline, ':');
859 if (!eol)
860 return false;
861
862 len = eol - cgline;
863 tmp = must_realloc(NULL, len + 1);
864 memcpy(tmp, cgline, len);
865 tmp[len] = '\0';
866
867 lxc_iterate_parts(tok, tmp, ",")
868 if (strcmp(tok, c) == 0)
869 return true;
870
871 return false;
872 }
873
874 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
875 * @controller.
876 */
877 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
878 int type)
879 {
880 char *p = basecginfo;
881
882 for (;;) {
883 bool is_cgv2_base_cgroup = false;
884
885 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
886 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
887 is_cgv2_base_cgroup = true;
888
889 p = strchr(p, ':');
890 if (!p)
891 return NULL;
892 p++;
893
894 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
895 p = strchr(p, ':');
896 if (!p)
897 return NULL;
898 p++;
899 return copy_to_eol(p);
900 }
901
902 p = strchr(p, '\n');
903 if (!p)
904 return NULL;
905 p++;
906 }
907 }
908
909 static void must_append_string(char ***list, char *entry)
910 {
911 int newentry;
912 char *copy;
913
914 newentry = append_null_to_list((void ***)list);
915 copy = must_copy_string(entry);
916 (*list)[newentry] = copy;
917 }
918
919 static int get_existing_subsystems(char ***klist, char ***nlist)
920 {
921 __do_free char *line = NULL;
922 __do_fclose FILE *f = NULL;
923 size_t len = 0;
924
925 f = fopen("/proc/self/cgroup", "r");
926 if (!f)
927 return -1;
928
929 while (getline(&line, &len, f) != -1) {
930 char *p, *p2, *tok;
931 p = strchr(line, ':');
932 if (!p)
933 continue;
934 p++;
935 p2 = strchr(p, ':');
936 if (!p2)
937 continue;
938 *p2 = '\0';
939
940 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
941 * contains an entry of the form:
942 *
943 * 0::/some/path
944 *
945 * In this case we use "cgroup2" as controller name.
946 */
947 if ((p2 - p) == 0) {
948 must_append_string(klist, "cgroup2");
949 continue;
950 }
951
952 lxc_iterate_parts(tok, p, ",") {
953 if (strncmp(tok, "name=", 5) == 0)
954 must_append_string(nlist, tok);
955 else
956 must_append_string(klist, tok);
957 }
958 }
959
960 return 0;
961 }
962
963 static void trim(char *s)
964 {
965 size_t len;
966
967 len = strlen(s);
968 while ((len > 1) && (s[len - 1] == '\n'))
969 s[--len] = '\0';
970 }
971
972 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
973 {
974 int i;
975 struct hierarchy **it;
976
977 if (!ops->hierarchies) {
978 TRACE(" No hierarchies found");
979 return;
980 }
981
982 TRACE(" Hierarchies:");
983 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
984 int j;
985 char **cit;
986
987 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
988 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
989 TRACE(" controllers:");
990 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
991 TRACE(" %d: %s", j, *cit);
992 }
993 }
994
995 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
996 char **nlist)
997 {
998 int k;
999 char **it;
1000
1001 TRACE("basecginfo is:");
1002 TRACE("%s", basecginfo);
1003
1004 for (k = 0, it = klist; it && *it; it++, k++)
1005 TRACE("kernel subsystem %d: %s", k, *it);
1006
1007 for (k = 0, it = nlist; it && *it; it++, k++)
1008 TRACE("named subsystem %d: %s", k, *it);
1009 }
1010
1011 static int cgroup_rmdir(struct hierarchy **hierarchies,
1012 const char *container_cgroup)
1013 {
1014 int i;
1015
1016 if (!container_cgroup || !hierarchies)
1017 return 0;
1018
1019 for (i = 0; hierarchies[i]; i++) {
1020 int ret;
1021 struct hierarchy *h = hierarchies[i];
1022
1023 if (!h->container_full_path)
1024 continue;
1025
1026 ret = recursive_destroy(h->container_full_path);
1027 if (ret < 0)
1028 WARN("Failed to destroy \"%s\"", h->container_full_path);
1029
1030 free(h->container_full_path);
1031 h->container_full_path = NULL;
1032 }
1033
1034 return 0;
1035 }
1036
1037 struct generic_userns_exec_data {
1038 struct hierarchy **hierarchies;
1039 const char *container_cgroup;
1040 struct lxc_conf *conf;
1041 uid_t origuid; /* target uid in parent namespace */
1042 char *path;
1043 };
1044
1045 static int cgroup_rmdir_wrapper(void *data)
1046 {
1047 int ret;
1048 struct generic_userns_exec_data *arg = data;
1049 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1050 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1051
1052 ret = setresgid(nsgid, nsgid, nsgid);
1053 if (ret < 0) {
1054 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1055 (int)nsgid, (int)nsgid);
1056 return -1;
1057 }
1058
1059 ret = setresuid(nsuid, nsuid, nsuid);
1060 if (ret < 0) {
1061 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1062 (int)nsuid, (int)nsuid);
1063 return -1;
1064 }
1065
1066 ret = setgroups(0, NULL);
1067 if (ret < 0 && errno != EPERM) {
1068 SYSERROR("Failed to setgroups(0, NULL)");
1069 return -1;
1070 }
1071
1072 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1073 }
1074
1075 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1076 struct lxc_handler *handler)
1077 {
1078 int ret;
1079 struct generic_userns_exec_data wrap;
1080
1081 if (!ops->hierarchies)
1082 return;
1083
1084 wrap.origuid = 0;
1085 wrap.container_cgroup = ops->container_cgroup;
1086 wrap.hierarchies = ops->hierarchies;
1087 wrap.conf = handler->conf;
1088
1089 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1090 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1091 "cgroup_rmdir_wrapper");
1092 else
1093 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1094 if (ret < 0) {
1095 WARN("Failed to destroy cgroups");
1096 return;
1097 }
1098 }
1099
1100 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1101 struct lxc_handler *handler)
1102 {
1103 int len;
1104 struct lxc_conf *conf = handler->conf;
1105 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1106
1107 if (!ops->hierarchies)
1108 return;
1109
1110 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1111 if (len < 0 || (size_t)len >= sizeof(pidstr))
1112 return;
1113
1114 for (int i = 0; ops->hierarchies[i]; i++) {
1115 __do_free char *pivot_path = NULL;
1116 int ret;
1117 char *chop;
1118 char pivot_cgroup[] = PIVOT_CGROUP;
1119 struct hierarchy *h = ops->hierarchies[i];
1120
1121 if (!h->monitor_full_path)
1122 continue;
1123
1124 if (conf && conf->cgroup_meta.dir)
1125 pivot_path = must_make_path(h->mountpoint,
1126 h->container_base_path,
1127 conf->cgroup_meta.dir,
1128 PIVOT_CGROUP,
1129 "cgroup.procs", NULL);
1130 else
1131 pivot_path = must_make_path(h->mountpoint,
1132 h->container_base_path,
1133 PIVOT_CGROUP,
1134 "cgroup.procs", NULL);
1135
1136 chop = strrchr(pivot_path, '/');
1137 if (chop)
1138 *chop = '\0';
1139
1140 /*
1141 * Make sure not to pass in the ro string literal PIVOT_CGROUP
1142 * here.
1143 */
1144 if (!cg_legacy_handle_cpuset_hierarchy(h, pivot_cgroup)) {
1145 WARN("Failed to handle legacy cpuset controller");
1146 continue;
1147 }
1148
1149 ret = mkdir_p(pivot_path, 0755);
1150 if (ret < 0 && errno != EEXIST) {
1151 SYSWARN("Failed to create cgroup \"%s\"\n", pivot_path);
1152 continue;
1153 }
1154
1155 if (chop)
1156 *chop = '/';
1157
1158 /* Move ourselves into the pivot cgroup to delete our own
1159 * cgroup.
1160 */
1161 ret = lxc_write_to_file(pivot_path, pidstr, len, false, 0666);
1162 if (ret != 0) {
1163 SYSWARN("Failed to move monitor %s to \"%s\"\n", pidstr, pivot_path);
1164 continue;
1165 }
1166
1167 ret = recursive_destroy(h->monitor_full_path);
1168 if (ret < 0)
1169 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1170 }
1171 }
1172
1173 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1174 {
1175 __do_free char *add_controllers = NULL, *cgroup = NULL;
1176 size_t i, parts_len;
1177 char **it;
1178 size_t full_len = 0;
1179 char **parts = NULL;
1180 bool bret = false;
1181
1182 if (h->version != CGROUP2_SUPER_MAGIC)
1183 return true;
1184
1185 if (!h->controllers)
1186 return true;
1187
1188 /* For now we simply enable all controllers that we have detected by
1189 * creating a string like "+memory +pids +cpu +io".
1190 * TODO: In the near future we might want to support "-<controller>"
1191 * etc. but whether supporting semantics like this make sense will need
1192 * some thinking.
1193 */
1194 for (it = h->controllers; it && *it; it++) {
1195 full_len += strlen(*it) + 2;
1196 add_controllers = must_realloc(add_controllers, full_len + 1);
1197
1198 if (h->controllers[0] == *it)
1199 add_controllers[0] = '\0';
1200
1201 (void)strlcat(add_controllers, "+", full_len + 1);
1202 (void)strlcat(add_controllers, *it, full_len + 1);
1203
1204 if ((it + 1) && *(it + 1))
1205 (void)strlcat(add_controllers, " ", full_len + 1);
1206 }
1207
1208 parts = lxc_string_split(cgname, '/');
1209 if (!parts)
1210 goto on_error;
1211
1212 parts_len = lxc_array_len((void **)parts);
1213 if (parts_len > 0)
1214 parts_len--;
1215
1216 cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
1217 for (i = 0; i < parts_len; i++) {
1218 int ret;
1219 __do_free char *target;
1220
1221 cgroup = must_append_path(cgroup, parts[i], NULL);
1222 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1223 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1224 if (ret < 0) {
1225 SYSERROR("Could not enable \"%s\" controllers in the "
1226 "unified cgroup \"%s\"", add_controllers, cgroup);
1227 goto on_error;
1228 }
1229 }
1230
1231 bret = true;
1232
1233 on_error:
1234 lxc_free_array((void **)parts, free);
1235 return bret;
1236 }
1237
1238 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1239 {
1240 const char *tmp = dir;
1241 const char *orig = dir;
1242 size_t orig_len;
1243
1244 orig_len = strlen(dir);
1245 do {
1246 __do_free char *makeme;
1247 int ret;
1248 size_t cur_len;
1249
1250 dir = tmp + strspn(tmp, "/");
1251 tmp = dir + strcspn(dir, "/");
1252
1253 errno = ENOMEM;
1254 cur_len = dir - orig;
1255 makeme = strndup(orig, cur_len);
1256 if (!makeme)
1257 return -1;
1258
1259 ret = mkdir(makeme, mode);
1260 if (ret < 0) {
1261 if ((errno != EEXIST) || (orig_len == cur_len)) {
1262 SYSERROR("Failed to create directory \"%s\"", makeme);
1263 return -1;
1264 }
1265 }
1266 } while (tmp != dir);
1267
1268 return 0;
1269 }
1270
1271 static bool monitor_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1272 {
1273 int ret;
1274
1275 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1276 ERROR("Failed to handle legacy cpuset controller");
1277 return false;
1278 }
1279
1280 h->monitor_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1281 ret = mkdir_eexist_on_last(h->monitor_full_path, 0755);
1282 if (ret < 0) {
1283 ERROR("Failed to create cgroup \"%s\"", h->monitor_full_path);
1284 return false;
1285 }
1286
1287 return cg_unified_create_cgroup(h, cgname);
1288 }
1289
1290 static bool container_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1291 {
1292 int ret;
1293
1294 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1295 ERROR("Failed to handle legacy cpuset controller");
1296 return false;
1297 }
1298
1299 h->container_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1300 ret = mkdir_eexist_on_last(h->container_full_path, 0755);
1301 if (ret < 0) {
1302 ERROR("Failed to create cgroup \"%s\"", h->container_full_path);
1303 return false;
1304 }
1305
1306 return cg_unified_create_cgroup(h, cgname);
1307 }
1308
1309 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname, bool monitor)
1310 {
1311 int ret;
1312 char *full_path;
1313
1314 if (monitor)
1315 full_path = h->monitor_full_path;
1316 else
1317 full_path = h->container_full_path;
1318
1319 ret = rmdir(full_path);
1320 if (ret < 0)
1321 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", full_path);
1322
1323 free(full_path);
1324
1325 if (monitor)
1326 h->monitor_full_path = NULL;
1327 else
1328 h->container_full_path = NULL;
1329 }
1330
1331 __cgfsng_ops static inline bool cgfsng_monitor_create(struct cgroup_ops *ops,
1332 struct lxc_handler *handler)
1333 {
1334 __do_free char *monitor_cgroup = NULL;
1335 char *offset, *tmp;
1336 int i, idx = 0;
1337 size_t len;
1338 struct lxc_conf *conf = handler->conf;
1339
1340 if (!conf)
1341 return false;
1342
1343 if (!ops->hierarchies)
1344 return true;
1345
1346 if (conf->cgroup_meta.dir)
1347 tmp = lxc_string_join("/",
1348 (const char *[]){conf->cgroup_meta.dir,
1349 ops->monitor_pattern,
1350 handler->name, NULL},
1351 false);
1352 else
1353 tmp = must_make_path(ops->monitor_pattern, handler->name, NULL);
1354 if (!tmp)
1355 return false;
1356
1357 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1358 monitor_cgroup = must_realloc(tmp, len);
1359 offset = monitor_cgroup + len - 5;
1360 *offset = 0;
1361
1362 do {
1363 if (idx) {
1364 int ret = snprintf(offset, 5, "-%d", idx);
1365 if (ret < 0 || (size_t)ret >= 5)
1366 return false;
1367 }
1368
1369 for (i = 0; ops->hierarchies[i]; i++) {
1370 if (!monitor_create_path_for_hierarchy(ops->hierarchies[i], monitor_cgroup)) {
1371 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->monitor_full_path);
1372 for (int j = 0; j < i; j++)
1373 remove_path_for_hierarchy(ops->hierarchies[j], monitor_cgroup, true);
1374
1375 idx++;
1376 break;
1377 }
1378 }
1379 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1380
1381 if (idx == 1000)
1382 return false;
1383
1384 INFO("The monitor process uses \"%s\" as cgroup", monitor_cgroup);
1385 return true;
1386 }
1387
1388 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1389 * next cgroup_pattern-1, -2, ..., -999.
1390 */
1391 __cgfsng_ops static inline bool cgfsng_payload_create(struct cgroup_ops *ops,
1392 struct lxc_handler *handler)
1393 {
1394 __do_free char *container_cgroup = NULL, *tmp = NULL;
1395 int i;
1396 size_t len;
1397 char *offset;
1398 int idx = 0;
1399 struct lxc_conf *conf = handler->conf;
1400
1401 if (ops->container_cgroup)
1402 return false;
1403
1404 if (!conf)
1405 return false;
1406
1407 if (!ops->hierarchies)
1408 return true;
1409
1410 if (conf->cgroup_meta.dir)
1411 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1412 else
1413 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1414 if (!tmp) {
1415 ERROR("Failed expanding cgroup name pattern");
1416 return false;
1417 }
1418
1419 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1420 container_cgroup = must_realloc(NULL, len);
1421 (void)strlcpy(container_cgroup, tmp, len);
1422 offset = container_cgroup + len - 5;
1423
1424 do {
1425 int ret = snprintf(offset, 5, "-%d", idx);
1426 if (ret < 0 || (size_t)ret >= 5)
1427 return false;
1428
1429 for (i = 0; ops->hierarchies[i]; i++) {
1430 if (!container_create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1431 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path);
1432 for (int j = 0; j < i; j++)
1433 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup, false);
1434 idx++;
1435 break;
1436 }
1437 }
1438
1439 ops->container_cgroup = container_cgroup;
1440 container_cgroup = NULL;
1441 INFO("The container uses \"%s\" as cgroup", ops->container_cgroup);
1442 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1443
1444 if (idx == 1000)
1445 return false;
1446
1447 INFO("The container process uses \"%s\" as cgroup", ops->container_cgroup);
1448 return true;
1449 }
1450
1451 __cgfsng_ops static bool __do_cgroup_enter(struct cgroup_ops *ops, pid_t pid,
1452 bool monitor)
1453 {
1454 int len;
1455 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1456
1457 if (!ops->hierarchies)
1458 return true;
1459
1460 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
1461 if (len < 0 || (size_t)len >= sizeof(pidstr))
1462 return false;
1463
1464 for (int i = 0; ops->hierarchies[i]; i++) {
1465 int ret;
1466 __do_free char *path;
1467
1468 if (monitor)
1469 path = must_make_path(ops->hierarchies[i]->monitor_full_path,
1470 "cgroup.procs", NULL);
1471 else
1472 path = must_make_path(ops->hierarchies[i]->container_full_path,
1473 "cgroup.procs", NULL);
1474 ret = lxc_write_to_file(path, pidstr, len, false, 0666);
1475 if (ret != 0) {
1476 SYSERROR("Failed to enter cgroup \"%s\"", path);
1477 return false;
1478 }
1479 }
1480
1481 return true;
1482 }
1483
1484 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops, pid_t pid)
1485 {
1486 return __do_cgroup_enter(ops, pid, true);
1487 }
1488
1489 static bool cgfsng_payload_enter(struct cgroup_ops *ops, pid_t pid)
1490 {
1491 return __do_cgroup_enter(ops, pid, false);
1492 }
1493
1494 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1495 mode_t chmod_mode)
1496 {
1497 int ret;
1498
1499 ret = chown(path, chown_uid, chown_gid);
1500 if (ret < 0) {
1501 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1502 return -1;
1503 }
1504
1505 ret = chmod(path, chmod_mode);
1506 if (ret < 0) {
1507 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1508 return -1;
1509 }
1510
1511 return 0;
1512 }
1513
1514 /* chgrp the container cgroups to container group. We leave
1515 * the container owner as cgroup owner. So we must make the
1516 * directories 775 so that the container can create sub-cgroups.
1517 *
1518 * Also chown the tasks and cgroup.procs files. Those may not
1519 * exist depending on kernel version.
1520 */
1521 static int chown_cgroup_wrapper(void *data)
1522 {
1523 int i, ret;
1524 uid_t destuid;
1525 struct generic_userns_exec_data *arg = data;
1526 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1527 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1528
1529 ret = setresgid(nsgid, nsgid, nsgid);
1530 if (ret < 0) {
1531 SYSERROR("Failed to setresgid(%d, %d, %d)",
1532 (int)nsgid, (int)nsgid, (int)nsgid);
1533 return -1;
1534 }
1535
1536 ret = setresuid(nsuid, nsuid, nsuid);
1537 if (ret < 0) {
1538 SYSERROR("Failed to setresuid(%d, %d, %d)",
1539 (int)nsuid, (int)nsuid, (int)nsuid);
1540 return -1;
1541 }
1542
1543 ret = setgroups(0, NULL);
1544 if (ret < 0 && errno != EPERM) {
1545 SYSERROR("Failed to setgroups(0, NULL)");
1546 return -1;
1547 }
1548
1549 destuid = get_ns_uid(arg->origuid);
1550 if (destuid == LXC_INVALID_UID)
1551 destuid = 0;
1552
1553 for (i = 0; arg->hierarchies[i]; i++) {
1554 __do_free char *fullpath = NULL;
1555 char *path = arg->hierarchies[i]->container_full_path;
1556
1557 ret = chowmod(path, destuid, nsgid, 0775);
1558 if (ret < 0)
1559 return -1;
1560
1561 /* Failures to chown() these are inconvenient but not
1562 * detrimental We leave these owned by the container launcher,
1563 * so that container root can write to the files to attach. We
1564 * chmod() them 664 so that container systemd can write to the
1565 * files (which systemd in wily insists on doing).
1566 */
1567
1568 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1569 fullpath = must_make_path(path, "tasks", NULL);
1570 (void)chowmod(fullpath, destuid, nsgid, 0664);
1571 }
1572
1573 fullpath = must_make_path(path, "cgroup.procs", NULL);
1574 (void)chowmod(fullpath, destuid, nsgid, 0664);
1575
1576 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1577 continue;
1578
1579 for (char **p = arg->hierarchies[i]->cgroup2_chown; p && *p; p++) {
1580 fullpath = must_make_path(path, *p, NULL);
1581 (void)chowmod(fullpath, destuid, nsgid, 0664);
1582 }
1583 }
1584
1585 return 0;
1586 }
1587
1588 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1589 struct lxc_conf *conf)
1590 {
1591 struct generic_userns_exec_data wrap;
1592
1593 if (lxc_list_empty(&conf->id_map))
1594 return true;
1595
1596 if (!ops->hierarchies)
1597 return true;
1598
1599 wrap.origuid = geteuid();
1600 wrap.path = NULL;
1601 wrap.hierarchies = ops->hierarchies;
1602 wrap.conf = conf;
1603
1604 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1605 "chown_cgroup_wrapper") < 0) {
1606 ERROR("Error requesting cgroup chown in new user namespace");
1607 return false;
1608 }
1609
1610 return true;
1611 }
1612
1613 /* cgroup-full:* is done, no need to create subdirs */
1614 static bool cg_mount_needs_subdirs(int type)
1615 {
1616 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1617 return false;
1618
1619 return true;
1620 }
1621
1622 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1623 * remount controller ro if needed and bindmount the cgroupfs onto
1624 * control/the/cg/path.
1625 */
1626 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1627 char *controllerpath, char *cgpath,
1628 const char *container_cgroup)
1629 {
1630 __do_free char *sourcepath = NULL;
1631 int ret, remount_flags;
1632 int flags = MS_BIND;
1633
1634 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1635 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1636 if (ret < 0) {
1637 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1638 controllerpath, controllerpath);
1639 return -1;
1640 }
1641
1642 remount_flags = add_required_remount_flags(controllerpath,
1643 controllerpath,
1644 flags | MS_REMOUNT);
1645 ret = mount(controllerpath, controllerpath, "cgroup",
1646 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1647 NULL);
1648 if (ret < 0) {
1649 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1650 return -1;
1651 }
1652
1653 INFO("Remounted %s read-only", controllerpath);
1654 }
1655
1656 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1657 container_cgroup, NULL);
1658 if (type == LXC_AUTO_CGROUP_RO)
1659 flags |= MS_RDONLY;
1660
1661 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1662 if (ret < 0) {
1663 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1664 return -1;
1665 }
1666 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1667
1668 if (flags & MS_RDONLY) {
1669 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1670 flags | MS_REMOUNT);
1671 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1672 if (ret < 0) {
1673 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1674 return -1;
1675 }
1676 INFO("Remounted %s read-only", cgpath);
1677 }
1678
1679 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1680 return 0;
1681 }
1682
1683 /* __cg_mount_direct
1684 *
1685 * Mount cgroup hierarchies directly without using bind-mounts. The main
1686 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1687 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1688 */
1689 static int __cg_mount_direct(int type, struct hierarchy *h,
1690 const char *controllerpath)
1691 {
1692 int ret;
1693 __do_free char *controllers = NULL;
1694 char *fstype = "cgroup2";
1695 unsigned long flags = 0;
1696
1697 flags |= MS_NOSUID;
1698 flags |= MS_NOEXEC;
1699 flags |= MS_NODEV;
1700 flags |= MS_RELATIME;
1701
1702 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1703 flags |= MS_RDONLY;
1704
1705 if (h->version != CGROUP2_SUPER_MAGIC) {
1706 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1707 if (!controllers)
1708 return -ENOMEM;
1709 fstype = "cgroup";
1710 }
1711
1712 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1713 if (ret < 0) {
1714 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1715 return -1;
1716 }
1717
1718 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1719 return 0;
1720 }
1721
1722 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1723 const char *controllerpath)
1724 {
1725 return __cg_mount_direct(type, h, controllerpath);
1726 }
1727
1728 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1729 const char *controllerpath)
1730 {
1731 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1732 return 0;
1733
1734 return __cg_mount_direct(type, h, controllerpath);
1735 }
1736
1737 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1738 struct lxc_handler *handler,
1739 const char *root, int type)
1740 {
1741 __do_free char *tmpfspath = NULL;
1742 int i, ret;
1743 bool has_cgns = false, retval = false, wants_force_mount = false;
1744
1745 if (!ops->hierarchies)
1746 return true;
1747
1748 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1749 return true;
1750
1751 if (type & LXC_AUTO_CGROUP_FORCE) {
1752 type &= ~LXC_AUTO_CGROUP_FORCE;
1753 wants_force_mount = true;
1754 }
1755
1756 if (!wants_force_mount){
1757 if (!lxc_list_empty(&handler->conf->keepcaps))
1758 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1759 else
1760 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1761 }
1762
1763 has_cgns = cgns_supported();
1764 if (has_cgns && !wants_force_mount)
1765 return true;
1766
1767 if (type == LXC_AUTO_CGROUP_NOSPEC)
1768 type = LXC_AUTO_CGROUP_MIXED;
1769 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1770 type = LXC_AUTO_CGROUP_FULL_MIXED;
1771
1772 /* Mount tmpfs */
1773 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1774 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1775 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1776 "size=10240k,mode=755", root);
1777 if (ret < 0)
1778 goto on_error;
1779
1780 for (i = 0; ops->hierarchies[i]; i++) {
1781 __do_free char *controllerpath = NULL, *path2 = NULL;
1782 struct hierarchy *h = ops->hierarchies[i];
1783 char *controller = strrchr(h->mountpoint, '/');
1784
1785 if (!controller)
1786 continue;
1787 controller++;
1788
1789 controllerpath = must_make_path(tmpfspath, controller, NULL);
1790 if (dir_exists(controllerpath))
1791 continue;
1792
1793 ret = mkdir(controllerpath, 0755);
1794 if (ret < 0) {
1795 SYSERROR("Error creating cgroup path: %s", controllerpath);
1796 goto on_error;
1797 }
1798
1799 if (has_cgns && wants_force_mount) {
1800 /* If cgroup namespaces are supported but the container
1801 * will not have CAP_SYS_ADMIN after it has started we
1802 * need to mount the cgroups manually.
1803 */
1804 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1805 if (ret < 0)
1806 goto on_error;
1807
1808 continue;
1809 }
1810
1811 ret = cg_mount_cgroup_full(type, h, controllerpath);
1812 if (ret < 0)
1813 goto on_error;
1814
1815 if (!cg_mount_needs_subdirs(type))
1816 continue;
1817
1818 path2 = must_make_path(controllerpath, h->container_base_path,
1819 ops->container_cgroup, NULL);
1820 ret = mkdir_p(path2, 0755);
1821 if (ret < 0)
1822 goto on_error;
1823
1824 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1825 path2, ops->container_cgroup);
1826 if (ret < 0)
1827 goto on_error;
1828 }
1829 retval = true;
1830
1831 on_error:
1832 return retval;
1833 }
1834
1835 static int recursive_count_nrtasks(char *dirname)
1836 {
1837 __do_free char *path = NULL;
1838 __do_closedir DIR *dir;
1839 struct dirent *direntp;
1840 int count = 0, ret;
1841
1842 dir = opendir(dirname);
1843 if (!dir)
1844 return 0;
1845
1846 while ((direntp = readdir(dir))) {
1847 struct stat mystat;
1848
1849 if (!strcmp(direntp->d_name, ".") ||
1850 !strcmp(direntp->d_name, ".."))
1851 continue;
1852
1853 path = must_make_path(dirname, direntp->d_name, NULL);
1854
1855 if (lstat(path, &mystat))
1856 continue;
1857
1858 if (!S_ISDIR(mystat.st_mode))
1859 continue;
1860
1861 count += recursive_count_nrtasks(path);
1862 }
1863
1864 path = must_make_path(dirname, "cgroup.procs", NULL);
1865 ret = lxc_count_file_lines(path);
1866 if (ret != -1)
1867 count += ret;
1868
1869 return count;
1870 }
1871
1872 __cgfsng_ops static int cgfsng_nrtasks(struct cgroup_ops *ops)
1873 {
1874 __do_free char *path = NULL;
1875 int count;
1876
1877 if (!ops->container_cgroup || !ops->hierarchies)
1878 return -1;
1879
1880 path = must_make_path(ops->hierarchies[0]->container_full_path, NULL);
1881 count = recursive_count_nrtasks(path);
1882 return count;
1883 }
1884
1885 /* Only root needs to escape to the cgroup of its init. */
1886 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1887 struct lxc_conf *conf)
1888 {
1889 int i;
1890
1891 if (conf->cgroup_meta.relative || geteuid() || !ops->hierarchies)
1892 return true;
1893
1894 for (i = 0; ops->hierarchies[i]; i++) {
1895 int ret;
1896 __do_free char *fullpath;
1897
1898 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1899 ops->hierarchies[i]->container_base_path,
1900 "cgroup.procs", NULL);
1901 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1902 if (ret != 0) {
1903 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1904 return false;
1905 }
1906 }
1907
1908 return true;
1909 }
1910
1911 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1912 {
1913 int i = 0;
1914
1915 if (!ops->hierarchies)
1916 return 0;
1917
1918 for (; ops->hierarchies[i]; i++)
1919 ;
1920
1921 return i;
1922 }
1923
1924 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
1925 {
1926 int i;
1927
1928 if (!ops->hierarchies)
1929 return false;
1930
1931 /* sanity check n */
1932 for (i = 0; i < n; i++)
1933 if (!ops->hierarchies[i])
1934 return false;
1935
1936 *out = ops->hierarchies[i]->controllers;
1937
1938 return true;
1939 }
1940
1941 #define THAWED "THAWED"
1942 #define THAWED_LEN (strlen(THAWED))
1943
1944 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1945 * to be adapted.
1946 */
1947 __cgfsng_ops static bool cgfsng_unfreeze(struct cgroup_ops *ops)
1948 {
1949 int ret;
1950 __do_free char *fullpath = NULL;
1951 struct hierarchy *h;
1952
1953 h = get_hierarchy(ops, "freezer");
1954 if (!h)
1955 return false;
1956
1957 fullpath = must_make_path(h->container_full_path, "freezer.state", NULL);
1958 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
1959 if (ret < 0)
1960 return false;
1961
1962 return true;
1963 }
1964
1965 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
1966 const char *controller)
1967 {
1968 struct hierarchy *h;
1969
1970 h = get_hierarchy(ops, controller);
1971 if (!h) {
1972 WARN("Failed to find hierarchy for controller \"%s\"",
1973 controller ? controller : "(null)");
1974 return NULL;
1975 }
1976
1977 return h->container_full_path ? h->container_full_path + strlen(h->mountpoint) : NULL;
1978 }
1979
1980 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
1981 * which must be freed by the caller.
1982 */
1983 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
1984 const char *inpath,
1985 const char *filename)
1986 {
1987 return must_make_path(h->mountpoint, inpath, filename, NULL);
1988 }
1989
1990 /* Technically, we're always at a delegation boundary here (This is especially
1991 * true when cgroup namespaces are available.). The reasoning is that in order
1992 * for us to have been able to start a container in the first place the root
1993 * cgroup must have been a leaf node. Now, either the container's init system
1994 * has populated the cgroup and kept it as a leaf node or it has created
1995 * subtrees. In the former case we will simply attach to the leaf node we
1996 * created when we started the container in the latter case we create our own
1997 * cgroup for the attaching process.
1998 */
1999 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
2000 const char *lxcpath, const char *pidstr,
2001 size_t pidstr_len, const char *controller)
2002 {
2003 __do_free char *base_path = NULL, *container_cgroup = NULL,
2004 *full_path = NULL;
2005 int ret;
2006 size_t len;
2007 int fret = -1, idx = 0;
2008
2009 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2010 /* not running */
2011 if (!container_cgroup)
2012 return 0;
2013
2014 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
2015 full_path = must_make_path(base_path, "cgroup.procs", NULL);
2016 /* cgroup is populated */
2017 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
2018 if (ret < 0 && errno != EBUSY)
2019 goto on_error;
2020
2021 if (ret == 0)
2022 goto on_success;
2023
2024 len = strlen(base_path) + STRLITERALLEN("/lxc-1000") +
2025 STRLITERALLEN("/cgroup-procs");
2026 full_path = must_realloc(NULL, len + 1);
2027 do {
2028 if (idx)
2029 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
2030 base_path, idx);
2031 else
2032 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
2033 if (ret < 0 || (size_t)ret >= len + 1)
2034 goto on_error;
2035
2036 ret = mkdir_p(full_path, 0755);
2037 if (ret < 0 && errno != EEXIST)
2038 goto on_error;
2039
2040 (void)strlcat(full_path, "/cgroup.procs", len + 1);
2041 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
2042 if (ret == 0)
2043 goto on_success;
2044
2045 /* this is a non-leaf node */
2046 if (errno != EBUSY)
2047 goto on_error;
2048
2049 idx++;
2050 } while (idx < 1000);
2051
2052 on_success:
2053 if (idx < 1000)
2054 fret = 0;
2055
2056 on_error:
2057 return fret;
2058 }
2059
2060 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
2061 const char *lxcpath, pid_t pid)
2062 {
2063 int i, len, ret;
2064 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2065
2066 if (!ops->hierarchies)
2067 return true;
2068
2069 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2070 if (len < 0 || (size_t)len >= sizeof(pidstr))
2071 return false;
2072
2073 for (i = 0; ops->hierarchies[i]; i++) {
2074 __do_free char *path = NULL;
2075 char *fullpath = NULL;
2076 struct hierarchy *h = ops->hierarchies[i];
2077
2078 if (h->version == CGROUP2_SUPER_MAGIC) {
2079 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
2080 h->controllers[0]);
2081 if (ret < 0)
2082 return false;
2083
2084 continue;
2085 }
2086
2087 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2088 /* not running */
2089 if (!path)
2090 continue;
2091
2092 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2093 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2094 if (ret < 0) {
2095 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2096 return false;
2097 }
2098 }
2099
2100 return true;
2101 }
2102
2103 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2104 * don't have a cgroup_data set up, so we ask the running container through the
2105 * commands API for the cgroup path.
2106 */
2107 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2108 char *value, size_t len, const char *name,
2109 const char *lxcpath)
2110 {
2111 __do_free char *path = NULL;
2112 __do_free char *controller;
2113 char *p;
2114 struct hierarchy *h;
2115 int ret = -1;
2116
2117 controller = must_copy_string(filename);
2118 p = strchr(controller, '.');
2119 if (p)
2120 *p = '\0';
2121
2122 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2123 /* not running */
2124 if (!path)
2125 return -1;
2126
2127 h = get_hierarchy(ops, controller);
2128 if (h) {
2129 __do_free char *fullpath;
2130
2131 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2132 ret = lxc_read_from_file(fullpath, value, len);
2133 }
2134
2135 return ret;
2136 }
2137
2138 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2139 * don't have a cgroup_data set up, so we ask the running container through the
2140 * commands API for the cgroup path.
2141 */
2142 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2143 const char *filename, const char *value,
2144 const char *name, const char *lxcpath)
2145 {
2146 __do_free char *path = NULL;
2147 __do_free char *controller;
2148 char *p;
2149 struct hierarchy *h;
2150 int ret = -1;
2151
2152 controller = must_copy_string(filename);
2153 p = strchr(controller, '.');
2154 if (p)
2155 *p = '\0';
2156
2157 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2158 /* not running */
2159 if (!path)
2160 return -1;
2161
2162 h = get_hierarchy(ops, controller);
2163 if (h) {
2164 __do_free char *fullpath;
2165
2166 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2167 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2168 }
2169
2170 return ret;
2171 }
2172
2173 /* take devices cgroup line
2174 * /dev/foo rwx
2175 * and convert it to a valid
2176 * type major:minor mode
2177 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2178 * the output.
2179 */
2180 static int convert_devpath(const char *invalue, char *dest)
2181 {
2182 __do_free char *path;
2183 int n_parts;
2184 char *p, type;
2185 unsigned long minor, major;
2186 struct stat sb;
2187 int ret = -EINVAL;
2188 char *mode = NULL;
2189
2190 path = must_copy_string(invalue);
2191
2192 /* Read path followed by mode. Ignore any trailing text.
2193 * A ' # comment' would be legal. Technically other text is not
2194 * legal, we could check for that if we cared to.
2195 */
2196 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2197 if (*p != ' ')
2198 continue;
2199 *p = '\0';
2200
2201 if (n_parts != 1)
2202 break;
2203 p++;
2204 n_parts++;
2205
2206 while (*p == ' ')
2207 p++;
2208
2209 mode = p;
2210
2211 if (*p == '\0')
2212 goto out;
2213 }
2214
2215 if (n_parts == 1)
2216 goto out;
2217
2218 ret = stat(path, &sb);
2219 if (ret < 0)
2220 goto out;
2221
2222 mode_t m = sb.st_mode & S_IFMT;
2223 switch (m) {
2224 case S_IFBLK:
2225 type = 'b';
2226 break;
2227 case S_IFCHR:
2228 type = 'c';
2229 break;
2230 default:
2231 ERROR("Unsupported device type %i for \"%s\"", m, path);
2232 ret = -EINVAL;
2233 goto out;
2234 }
2235
2236 major = MAJOR(sb.st_rdev);
2237 minor = MINOR(sb.st_rdev);
2238 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2239 if (ret < 0 || ret >= 50) {
2240 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2241 "chars)", type, major, minor, mode);
2242 ret = -ENAMETOOLONG;
2243 goto out;
2244 }
2245 ret = 0;
2246
2247 out:
2248 return ret;
2249 }
2250
2251 /* Called from setup_limits - here we have the container's cgroup_data because
2252 * we created the cgroups.
2253 */
2254 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2255 const char *value)
2256 {
2257 __do_free char *controller;
2258 __do_free char *fullpath = NULL;
2259 char *p;
2260 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2261 char converted_value[50];
2262 struct hierarchy *h;
2263 int ret = 0;
2264
2265 controller = must_copy_string(filename);
2266 p = strchr(controller, '.');
2267 if (p)
2268 *p = '\0';
2269
2270 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2271 ret = convert_devpath(value, converted_value);
2272 if (ret < 0)
2273 return ret;
2274 value = converted_value;
2275 }
2276
2277 h = get_hierarchy(ops, controller);
2278 if (!h) {
2279 ERROR("Failed to setup limits for the \"%s\" controller. "
2280 "The controller seems to be unused by \"cgfsng\" cgroup "
2281 "driver or not enabled on the cgroup hierarchy",
2282 controller);
2283 errno = ENOENT;
2284 return -ENOENT;
2285 }
2286
2287 fullpath = must_make_path(h->container_full_path, filename, NULL);
2288 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2289 return ret;
2290 }
2291
2292 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2293 struct lxc_list *cgroup_settings,
2294 bool do_devices)
2295 {
2296 __do_free struct lxc_list *sorted_cgroup_settings = NULL;
2297 struct lxc_list *iterator, *next;
2298 struct lxc_cgroup *cg;
2299 bool ret = false;
2300
2301 if (lxc_list_empty(cgroup_settings))
2302 return true;
2303
2304 if (!ops->hierarchies)
2305 return false;
2306
2307 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2308 if (!sorted_cgroup_settings)
2309 return false;
2310
2311 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2312 cg = iterator->elem;
2313
2314 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2315 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2316 if (do_devices && (errno == EACCES || errno == EPERM)) {
2317 WARN("Failed to set \"%s\" to \"%s\"",
2318 cg->subsystem, cg->value);
2319 continue;
2320 }
2321 WARN("Failed to set \"%s\" to \"%s\"",
2322 cg->subsystem, cg->value);
2323 goto out;
2324 }
2325 DEBUG("Set controller \"%s\" set to \"%s\"",
2326 cg->subsystem, cg->value);
2327 }
2328 }
2329
2330 ret = true;
2331 INFO("Limits for the legacy cgroup hierarchies have been setup");
2332 out:
2333 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2334 lxc_list_del(iterator);
2335 free(iterator);
2336 }
2337
2338 return ret;
2339 }
2340
2341 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2342 struct lxc_list *cgroup_settings)
2343 {
2344 struct lxc_list *iterator;
2345 struct hierarchy *h = ops->unified;
2346
2347 if (lxc_list_empty(cgroup_settings))
2348 return true;
2349
2350 if (!h)
2351 return false;
2352
2353 lxc_list_for_each(iterator, cgroup_settings) {
2354 __do_free char *fullpath;
2355 int ret;
2356 struct lxc_cgroup *cg = iterator->elem;
2357
2358 fullpath = must_make_path(h->container_full_path, cg->subsystem, NULL);
2359 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2360 if (ret < 0) {
2361 SYSERROR("Failed to set \"%s\" to \"%s\"",
2362 cg->subsystem, cg->value);
2363 return false;
2364 }
2365 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2366 }
2367
2368 INFO("Limits for the unified cgroup hierarchy have been setup");
2369 return true;
2370 }
2371
2372 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2373 struct lxc_conf *conf,
2374 bool do_devices)
2375 {
2376 bool bret;
2377
2378 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2379 if (!bret)
2380 return false;
2381
2382 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2383 }
2384
2385 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2386 char **controllers)
2387 {
2388 char **cur_ctrl, **cur_use;
2389
2390 if (!ops->cgroup_use)
2391 return true;
2392
2393 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2394 bool found = false;
2395
2396 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2397 if (strcmp(*cur_use, *cur_ctrl) != 0)
2398 continue;
2399
2400 found = true;
2401 break;
2402 }
2403
2404 if (found)
2405 continue;
2406
2407 return false;
2408 }
2409
2410 return true;
2411 }
2412
2413 static void cg_unified_delegate(char ***delegate)
2414 {
2415 __do_free char *tmp;
2416 int idx;
2417 char *standard[] = {"cgroup.subtree_control", "cgroup.threads", NULL};
2418
2419 tmp = read_file("/sys/kernel/cgroup/delegate");
2420 if (!tmp) {
2421 for (char **p = standard; p && *p; p++) {
2422 idx = append_null_to_list((void ***)delegate);
2423 (*delegate)[idx] = must_copy_string(*p);
2424 }
2425 } else {
2426 char *token;
2427 lxc_iterate_parts (token, tmp, " \t\n") {
2428 /*
2429 * We always need to chown this for both cgroup and
2430 * cgroup2.
2431 */
2432 if (strcmp(token, "cgroup.procs") == 0)
2433 continue;
2434
2435 idx = append_null_to_list((void ***)delegate);
2436 (*delegate)[idx] = must_copy_string(token);
2437 }
2438 }
2439 }
2440
2441 /* At startup, parse_hierarchies finds all the info we need about cgroup
2442 * mountpoints and current cgroups, and stores it in @d.
2443 */
2444 static bool cg_hybrid_init(struct cgroup_ops *ops, bool relative,
2445 bool unprivileged)
2446 {
2447 __do_free char *basecginfo;
2448 __do_free char *line = NULL;
2449 __do_fclose FILE *f = NULL;
2450 int ret;
2451 size_t len = 0;
2452 char **klist = NULL, **nlist = NULL;
2453
2454 /* Root spawned containers escape the current cgroup, so use init's
2455 * cgroups as our base in that case.
2456 */
2457 if (!relative && (geteuid() == 0))
2458 basecginfo = read_file("/proc/1/cgroup");
2459 else
2460 basecginfo = read_file("/proc/self/cgroup");
2461 if (!basecginfo)
2462 return false;
2463
2464 ret = get_existing_subsystems(&klist, &nlist);
2465 if (ret < 0) {
2466 ERROR("Failed to retrieve available legacy cgroup controllers");
2467 return false;
2468 }
2469
2470 f = fopen("/proc/self/mountinfo", "r");
2471 if (!f) {
2472 ERROR("Failed to open \"/proc/self/mountinfo\"");
2473 return false;
2474 }
2475
2476 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2477
2478 while (getline(&line, &len, f) != -1) {
2479 int type;
2480 bool writeable;
2481 struct hierarchy *new;
2482 char *base_cgroup = NULL, *mountpoint = NULL;
2483 char **controller_list = NULL;
2484
2485 type = get_cgroup_version(line);
2486 if (type == 0)
2487 continue;
2488
2489 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2490 continue;
2491
2492 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2493 if (type == CGROUP2_SUPER_MAGIC)
2494 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2495 else if (type == CGROUP_SUPER_MAGIC)
2496 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2497 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2498 if (type == CGROUP_SUPER_MAGIC)
2499 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2500 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2501 if (type == CGROUP2_SUPER_MAGIC)
2502 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2503 }
2504
2505 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2506 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2507 continue;
2508
2509 if (type == CGROUP_SUPER_MAGIC)
2510 if (controller_list_is_dup(ops->hierarchies, controller_list))
2511 goto next;
2512
2513 mountpoint = cg_hybrid_get_mountpoint(line);
2514 if (!mountpoint) {
2515 ERROR("Failed parsing mountpoint from \"%s\"", line);
2516 goto next;
2517 }
2518
2519 if (type == CGROUP_SUPER_MAGIC)
2520 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2521 else
2522 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2523 if (!base_cgroup) {
2524 ERROR("Failed to find current cgroup");
2525 goto next;
2526 }
2527
2528 trim(base_cgroup);
2529 prune_init_scope(base_cgroup);
2530 if (type == CGROUP2_SUPER_MAGIC)
2531 writeable = test_writeable_v2(mountpoint, base_cgroup);
2532 else
2533 writeable = test_writeable_v1(mountpoint, base_cgroup);
2534 if (!writeable)
2535 goto next;
2536
2537 if (type == CGROUP2_SUPER_MAGIC) {
2538 char *cgv2_ctrl_path;
2539
2540 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2541 "cgroup.controllers",
2542 NULL);
2543
2544 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2545 free(cgv2_ctrl_path);
2546 if (!controller_list) {
2547 controller_list = cg_unified_make_empty_controller();
2548 TRACE("No controllers are enabled for "
2549 "delegation in the unified hierarchy");
2550 }
2551 }
2552
2553 /* Exclude all controllers that cgroup use does not want. */
2554 if (!cgroup_use_wants_controllers(ops, controller_list))
2555 goto next;
2556
2557 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2558 if (type == CGROUP2_SUPER_MAGIC && !ops->unified) {
2559 if (unprivileged)
2560 cg_unified_delegate(&new->cgroup2_chown);
2561 ops->unified = new;
2562 }
2563
2564 continue;
2565
2566 next:
2567 free_string_list(controller_list);
2568 free(mountpoint);
2569 free(base_cgroup);
2570 }
2571
2572 free_string_list(klist);
2573 free_string_list(nlist);
2574
2575 TRACE("Writable cgroup hierarchies:");
2576 lxc_cgfsng_print_hierarchies(ops);
2577
2578 /* verify that all controllers in cgroup.use and all crucial
2579 * controllers are accounted for
2580 */
2581 if (!all_controllers_found(ops))
2582 return false;
2583
2584 return true;
2585 }
2586
2587 static int cg_is_pure_unified(void)
2588 {
2589
2590 int ret;
2591 struct statfs fs;
2592
2593 ret = statfs("/sys/fs/cgroup", &fs);
2594 if (ret < 0)
2595 return -ENOMEDIUM;
2596
2597 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2598 return CGROUP2_SUPER_MAGIC;
2599
2600 return 0;
2601 }
2602
2603 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2604 static char *cg_unified_get_current_cgroup(bool relative)
2605 {
2606 __do_free char *basecginfo;
2607 char *base_cgroup;
2608 char *copy = NULL;
2609
2610 if (!relative && (geteuid() == 0))
2611 basecginfo = read_file("/proc/1/cgroup");
2612 else
2613 basecginfo = read_file("/proc/self/cgroup");
2614 if (!basecginfo)
2615 return NULL;
2616
2617 base_cgroup = strstr(basecginfo, "0::/");
2618 if (!base_cgroup)
2619 goto cleanup_on_err;
2620
2621 base_cgroup = base_cgroup + 3;
2622 copy = copy_to_eol(base_cgroup);
2623 if (!copy)
2624 goto cleanup_on_err;
2625
2626 cleanup_on_err:
2627 if (copy)
2628 trim(copy);
2629
2630 return copy;
2631 }
2632
2633 static int cg_unified_init(struct cgroup_ops *ops, bool relative,
2634 bool unprivileged)
2635 {
2636 __do_free char *subtree_path = NULL;
2637 int ret;
2638 char *mountpoint, *tmp;
2639 char **delegatable;
2640 struct hierarchy *new;
2641 char *base_cgroup = NULL;
2642
2643 ret = cg_is_pure_unified();
2644 if (ret == -ENOMEDIUM)
2645 return -ENOMEDIUM;
2646
2647 if (ret != CGROUP2_SUPER_MAGIC)
2648 return 0;
2649
2650 base_cgroup = cg_unified_get_current_cgroup(relative);
2651 if (!base_cgroup)
2652 return -EINVAL;
2653 prune_init_scope(base_cgroup);
2654
2655 /* We assume that we have already been given controllers to delegate
2656 * further down the hierarchy. If not it is up to the user to delegate
2657 * them to us.
2658 */
2659 mountpoint = must_copy_string("/sys/fs/cgroup");
2660 subtree_path = must_make_path(mountpoint, base_cgroup,
2661 "cgroup.subtree_control", NULL);
2662 delegatable = cg_unified_get_controllers(subtree_path);
2663 if (!delegatable)
2664 delegatable = cg_unified_make_empty_controller();
2665 if (!delegatable[0])
2666 TRACE("No controllers are enabled for delegation");
2667
2668 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2669 * we should verify here. The reason I'm not doing it right is that I'm
2670 * not convinced that lxc.cgroup.use will be the future since it is a
2671 * global property. I much rather have an option that lets you request
2672 * controllers per container.
2673 */
2674
2675 new = add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2676 if (!unprivileged)
2677 cg_unified_delegate(&new->cgroup2_chown);
2678
2679 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2680 return CGROUP2_SUPER_MAGIC;
2681 }
2682
2683 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2684 {
2685 int ret;
2686 const char *tmp;
2687 bool relative = conf->cgroup_meta.relative;
2688
2689 tmp = lxc_global_config_value("lxc.cgroup.use");
2690 if (tmp) {
2691 __do_free char *pin;
2692 char *chop, *cur;
2693
2694 pin = must_copy_string(tmp);
2695 chop = pin;
2696
2697 lxc_iterate_parts(cur, chop, ",")
2698 must_append_string(&ops->cgroup_use, cur);
2699 }
2700
2701 ret = cg_unified_init(ops, relative, !lxc_list_empty(&conf->id_map));
2702 if (ret < 0)
2703 return false;
2704
2705 if (ret == CGROUP2_SUPER_MAGIC)
2706 return true;
2707
2708 return cg_hybrid_init(ops, relative, !lxc_list_empty(&conf->id_map));
2709 }
2710
2711 __cgfsng_ops static bool cgfsng_data_init(struct cgroup_ops *ops)
2712 {
2713 const char *cgroup_pattern;
2714
2715 /* copy system-wide cgroup information */
2716 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2717 if (!cgroup_pattern) {
2718 /* lxc.cgroup.pattern is only NULL on error. */
2719 ERROR("Failed to retrieve cgroup pattern");
2720 return false;
2721 }
2722 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2723 ops->monitor_pattern = MONITOR_CGROUP;
2724
2725 return true;
2726 }
2727
2728 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2729 {
2730 struct cgroup_ops *cgfsng_ops;
2731
2732 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2733 if (!cgfsng_ops)
2734 return NULL;
2735
2736 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2737 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2738
2739 if (!cg_init(cgfsng_ops, conf)) {
2740 free(cgfsng_ops);
2741 return NULL;
2742 }
2743
2744 cgfsng_ops->data_init = cgfsng_data_init;
2745 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
2746 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
2747 cgfsng_ops->monitor_create = cgfsng_monitor_create;
2748 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
2749 cgfsng_ops->payload_create = cgfsng_payload_create;
2750 cgfsng_ops->payload_enter = cgfsng_payload_enter;
2751 cgfsng_ops->escape = cgfsng_escape;
2752 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2753 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2754 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2755 cgfsng_ops->get = cgfsng_get;
2756 cgfsng_ops->set = cgfsng_set;
2757 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2758 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2759 cgfsng_ops->driver = "cgfsng";
2760 cgfsng_ops->version = "1.0.0";
2761 cgfsng_ops->attach = cgfsng_attach;
2762 cgfsng_ops->chown = cgfsng_chown;
2763 cgfsng_ops->mount = cgfsng_mount;
2764 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2765
2766 return cgfsng_ops;
2767 }