]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
c8be81b9329f5f000ae4d92c36d401879918775f
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comma-separated list of controllers.
35 */
36
37 #ifndef _GNU_SOURCE
38 #define _GNU_SOURCE 1
39 #endif
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <grp.h>
44 #include <linux/kdev_t.h>
45 #include <linux/types.h>
46 #include <stdint.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/types.h>
51 #include <unistd.h>
52
53 #include "caps.h"
54 #include "cgroup.h"
55 #include "cgroup_utils.h"
56 #include "commands.h"
57 #include "conf.h"
58 #include "config.h"
59 #include "log.h"
60 #include "macro.h"
61 #include "memory_utils.h"
62 #include "storage/storage.h"
63 #include "utils.h"
64
65 #ifndef HAVE_STRLCPY
66 #include "include/strlcpy.h"
67 #endif
68
69 #ifndef HAVE_STRLCAT
70 #include "include/strlcat.h"
71 #endif
72
73 lxc_log_define(cgfsng, cgroup);
74
75 static void free_string_list(char **clist)
76 {
77 int i;
78
79 if (!clist)
80 return;
81
82 for (i = 0; clist[i]; i++)
83 free(clist[i]);
84
85 free(clist);
86 }
87
88 /* Given a pointer to a null-terminated array of pointers, realloc to add one
89 * entry, and point the new entry to NULL. Do not fail. Return the index to the
90 * second-to-last entry - that is, the one which is now available for use
91 * (keeping the list null-terminated).
92 */
93 static int append_null_to_list(void ***list)
94 {
95 int newentry = 0;
96
97 if (*list)
98 for (; (*list)[newentry]; newentry++)
99 ;
100
101 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
102 (*list)[newentry + 1] = NULL;
103 return newentry;
104 }
105
106 /* Given a null-terminated array of strings, check whether @entry is one of the
107 * strings.
108 */
109 static bool string_in_list(char **list, const char *entry)
110 {
111 int i;
112
113 if (!list)
114 return false;
115
116 for (i = 0; list[i]; i++)
117 if (strcmp(list[i], entry) == 0)
118 return true;
119
120 return false;
121 }
122
123 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
124 * "name=systemd". Do not fail.
125 */
126 static char *cg_legacy_must_prefix_named(char *entry)
127 {
128 size_t len;
129 char *prefixed;
130
131 len = strlen(entry);
132 prefixed = must_realloc(NULL, len + 6);
133
134 memcpy(prefixed, "name=", STRLITERALLEN("name="));
135 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
136 prefixed[len + 5] = '\0';
137
138 return prefixed;
139 }
140
141 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
142 * we are called.
143 *
144 * We also handle named subsystems here. Any controller which is not a kernel
145 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
146 * we refuse to use because we're not sure which we have here.
147 * (TODO: We could work around this in some cases by just remounting to be
148 * unambiguous, or by comparing mountpoint contents with current cgroup.)
149 *
150 * The last entry will always be NULL.
151 */
152 static void must_append_controller(char **klist, char **nlist, char ***clist,
153 char *entry)
154 {
155 int newentry;
156 char *copy;
157
158 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
159 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
160 ERROR("It is both a named and kernel subsystem");
161 return;
162 }
163
164 newentry = append_null_to_list((void ***)clist);
165
166 if (strncmp(entry, "name=", 5) == 0)
167 copy = must_copy_string(entry);
168 else if (string_in_list(klist, entry))
169 copy = must_copy_string(entry);
170 else
171 copy = cg_legacy_must_prefix_named(entry);
172
173 (*clist)[newentry] = copy;
174 }
175
176 /* Given a handler's cgroup data, return the struct hierarchy for the controller
177 * @c, or NULL if there is none.
178 */
179 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
180 {
181 int i;
182
183 errno = ENOENT;
184
185 if (!ops->hierarchies) {
186 TRACE("There are no useable cgroup controllers");
187 return NULL;
188 }
189
190 for (i = 0; ops->hierarchies[i]; i++) {
191 if (!controller) {
192 /* This is the empty unified hierarchy. */
193 if (ops->hierarchies[i]->controllers &&
194 !ops->hierarchies[i]->controllers[0])
195 return ops->hierarchies[i];
196
197 continue;
198 }
199
200 if (string_in_list(ops->hierarchies[i]->controllers, controller))
201 return ops->hierarchies[i];
202 }
203
204 if (controller)
205 WARN("There is no useable %s controller", controller);
206 else
207 WARN("There is no empty unified cgroup hierarchy");
208
209 return NULL;
210 }
211
212 #define BATCH_SIZE 50
213 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
214 {
215 int newbatches = (newlen / BATCH_SIZE) + 1;
216 int oldbatches = (oldlen / BATCH_SIZE) + 1;
217
218 if (!*mem || newbatches > oldbatches) {
219 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
220 }
221 }
222
223 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
224 {
225 size_t full = oldlen + newlen;
226
227 batch_realloc(dest, oldlen, full + 1);
228
229 memcpy(*dest + oldlen, new, newlen + 1);
230 }
231
232 /* Slurp in a whole file */
233 static char *read_file(const char *fnam)
234 {
235 __do_free char *line = NULL;
236 __do_fclose FILE *f = NULL;
237 int linelen;
238 char *buf = NULL;
239 size_t len = 0, fulllen = 0;
240
241 f = fopen(fnam, "r");
242 if (!f)
243 return NULL;
244 while ((linelen = getline(&line, &len, f)) != -1) {
245 append_line(&buf, fulllen, line, linelen);
246 fulllen += linelen;
247 }
248 return buf;
249 }
250
251 /* Taken over modified from the kernel sources. */
252 #define NBITS 32 /* bits in uint32_t */
253 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
254 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
255
256 static void set_bit(unsigned bit, uint32_t *bitarr)
257 {
258 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
259 }
260
261 static void clear_bit(unsigned bit, uint32_t *bitarr)
262 {
263 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
264 }
265
266 static bool is_set(unsigned bit, uint32_t *bitarr)
267 {
268 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
269 }
270
271 /* Create cpumask from cpulist aka turn:
272 *
273 * 0,2-3
274 *
275 * into bit array
276 *
277 * 1 0 1 1
278 */
279 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
280 {
281 char *token;
282 size_t arrlen;
283 uint32_t *bitarr;
284
285 arrlen = BITS_TO_LONGS(nbits);
286 bitarr = calloc(arrlen, sizeof(uint32_t));
287 if (!bitarr)
288 return NULL;
289
290 lxc_iterate_parts(token, buf, ",") {
291 errno = 0;
292 unsigned end, start;
293 char *range;
294
295 start = strtoul(token, NULL, 0);
296 end = start;
297 range = strchr(token, '-');
298 if (range)
299 end = strtoul(range + 1, NULL, 0);
300
301 if (!(start <= end)) {
302 free(bitarr);
303 return NULL;
304 }
305
306 if (end >= nbits) {
307 free(bitarr);
308 return NULL;
309 }
310
311 while (start <= end)
312 set_bit(start++, bitarr);
313 }
314
315 return bitarr;
316 }
317
318 /* Turn cpumask into simple, comma-separated cpulist. */
319 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
320 {
321 int ret;
322 size_t i;
323 char **cpulist = NULL;
324 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
325
326 for (i = 0; i <= nbits; i++) {
327 if (!is_set(i, bitarr))
328 continue;
329
330 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
331 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
332 lxc_free_array((void **)cpulist, free);
333 return NULL;
334 }
335
336 ret = lxc_append_string(&cpulist, numstr);
337 if (ret < 0) {
338 lxc_free_array((void **)cpulist, free);
339 return NULL;
340 }
341 }
342
343 if (!cpulist)
344 return NULL;
345
346 return lxc_string_join(",", (const char **)cpulist, false);
347 }
348
349 static ssize_t get_max_cpus(char *cpulist)
350 {
351 char *c1, *c2;
352 char *maxcpus = cpulist;
353 size_t cpus = 0;
354
355 c1 = strrchr(maxcpus, ',');
356 if (c1)
357 c1++;
358
359 c2 = strrchr(maxcpus, '-');
360 if (c2)
361 c2++;
362
363 if (!c1 && !c2)
364 c1 = maxcpus;
365 else if (c1 > c2)
366 c2 = c1;
367 else if (c1 < c2)
368 c1 = c2;
369 else if (!c1 && c2)
370 c1 = c2;
371
372 errno = 0;
373 cpus = strtoul(c1, NULL, 0);
374 if (errno != 0)
375 return -1;
376
377 return cpus;
378 }
379
380 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
381 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
382 {
383 __do_free char *cpulist = NULL, *fpath = NULL, *isolcpus = NULL,
384 *posscpus = NULL;
385 __do_free uint32_t *isolmask = NULL, *possmask = NULL;
386 int ret;
387 ssize_t i;
388 char oldv;
389 char *lastslash;
390 ssize_t maxisol = 0, maxposs = 0;
391 bool bret = false, flipped_bit = false;
392
393 lastslash = strrchr(path, '/');
394 if (!lastslash) {
395 ERROR("Failed to detect \"/\" in \"%s\"", path);
396 return bret;
397 }
398 oldv = *lastslash;
399 *lastslash = '\0';
400 fpath = must_make_path(path, "cpuset.cpus", NULL);
401 posscpus = read_file(fpath);
402 if (!posscpus) {
403 SYSERROR("Failed to read file \"%s\"", fpath);
404 return false;
405 }
406
407 /* Get maximum number of cpus found in possible cpuset. */
408 maxposs = get_max_cpus(posscpus);
409 if (maxposs < 0 || maxposs >= INT_MAX - 1)
410 return false;
411
412 if (!file_exists(__ISOL_CPUS)) {
413 /* This system doesn't expose isolated cpus. */
414 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
415 /* No isolated cpus but we weren't already initialized by
416 * someone. We should simply copy the parents cpuset.cpus
417 * values.
418 */
419 if (!am_initialized) {
420 DEBUG("Copying cpu settings of parent cgroup");
421 cpulist = posscpus;
422 goto copy_parent;
423 }
424 /* No isolated cpus but we were already initialized by someone.
425 * Nothing more to do for us.
426 */
427 return true;
428 }
429
430 isolcpus = read_file(__ISOL_CPUS);
431 if (!isolcpus) {
432 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
433 return false;
434 }
435 if (!isdigit(isolcpus[0])) {
436 TRACE("No isolated cpus detected");
437 /* No isolated cpus but we weren't already initialized by
438 * someone. We should simply copy the parents cpuset.cpus
439 * values.
440 */
441 if (!am_initialized) {
442 DEBUG("Copying cpu settings of parent cgroup");
443 cpulist = posscpus;
444 goto copy_parent;
445 }
446 /* No isolated cpus but we were already initialized by someone.
447 * Nothing more to do for us.
448 */
449 return true;
450 }
451
452 /* Get maximum number of cpus found in isolated cpuset. */
453 maxisol = get_max_cpus(isolcpus);
454 if (maxisol < 0 || maxisol >= INT_MAX - 1)
455 return false;
456
457 if (maxposs < maxisol)
458 maxposs = maxisol;
459 maxposs++;
460
461 possmask = lxc_cpumask(posscpus, maxposs);
462 if (!possmask) {
463 ERROR("Failed to create cpumask for possible cpus");
464 return false;
465 }
466
467 isolmask = lxc_cpumask(isolcpus, maxposs);
468 if (!isolmask) {
469 ERROR("Failed to create cpumask for isolated cpus");
470 return false;
471 }
472
473 for (i = 0; i <= maxposs; i++) {
474 if (!is_set(i, isolmask) || !is_set(i, possmask))
475 continue;
476
477 flipped_bit = true;
478 clear_bit(i, possmask);
479 }
480
481 if (!flipped_bit) {
482 DEBUG("No isolated cpus present in cpuset");
483 return true;
484 }
485 DEBUG("Removed isolated cpus from cpuset");
486
487 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
488 if (!cpulist) {
489 ERROR("Failed to create cpu list");
490 return false;
491 }
492
493 copy_parent:
494 *lastslash = oldv;
495 fpath = must_make_path(path, "cpuset.cpus", NULL);
496 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
497 if (cpulist == posscpus)
498 cpulist = NULL;
499 if (ret < 0) {
500 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
501 return false;
502 }
503
504 return true;
505 }
506
507 /* Copy contents of parent(@path)/@file to @path/@file */
508 static bool copy_parent_file(char *path, char *file)
509 {
510 __do_free char *child_path = NULL, *parent_path = NULL, *value = NULL;
511 int ret;
512 char oldv;
513 int len = 0;
514 char *lastslash = NULL;
515
516 lastslash = strrchr(path, '/');
517 if (!lastslash) {
518 ERROR("Failed to detect \"/\" in \"%s\"", path);
519 return false;
520 }
521 oldv = *lastslash;
522 *lastslash = '\0';
523 parent_path = must_make_path(path, file, NULL);
524 len = lxc_read_from_file(parent_path, NULL, 0);
525 if (len <= 0)
526 goto on_error;
527
528 value = must_realloc(NULL, len + 1);
529 ret = lxc_read_from_file(parent_path, value, len);
530 if (ret != len)
531 goto on_error;
532
533 *lastslash = oldv;
534 child_path = must_make_path(path, file, NULL);
535 ret = lxc_write_to_file(child_path, value, len, false, 0666);
536 if (ret < 0)
537 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, child_path);
538 return ret >= 0;
539
540 on_error:
541 SYSERROR("Failed to read file \"%s\"", child_path);
542 return false;
543 }
544
545 /* Initialize the cpuset hierarchy in first directory of @gname and set
546 * cgroup.clone_children so that children inherit settings. Since the
547 * h->base_path is populated by init or ourselves, we know it is already
548 * initialized.
549 */
550 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
551 {
552 __do_free char *cgpath = NULL, *clonechildrenpath = NULL;
553 int ret;
554 char v;
555 char *slash;
556
557 if (!string_in_list(h->controllers, "cpuset"))
558 return true;
559
560 if (*cgname == '/')
561 cgname++;
562 slash = strchr(cgname, '/');
563 if (slash)
564 *slash = '\0';
565
566 cgpath = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
567 if (slash)
568 *slash = '/';
569
570 ret = mkdir(cgpath, 0755);
571 if (ret < 0) {
572 if (errno != EEXIST) {
573 SYSERROR("Failed to create directory \"%s\"", cgpath);
574 return false;
575 }
576 }
577
578 clonechildrenpath = must_make_path(cgpath, "cgroup.clone_children", NULL);
579 /* unified hierarchy doesn't have clone_children */
580 if (!file_exists(clonechildrenpath))
581 return true;
582
583 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
584 if (ret < 0) {
585 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
586 return false;
587 }
588
589 /* Make sure any isolated cpus are removed from cpuset.cpus. */
590 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
591 SYSERROR("Failed to remove isolated cpus");
592 return false;
593 }
594
595 /* Already set for us by someone else. */
596 if (v == '1') {
597 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
598 return true;
599 }
600
601 /* copy parent's settings */
602 if (!copy_parent_file(cgpath, "cpuset.mems")) {
603 SYSERROR("Failed to copy \"cpuset.mems\" settings");
604 return false;
605 }
606
607 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
608 if (ret < 0) {
609 /* Set clone_children so children inherit our settings */
610 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
611 return false;
612 }
613
614 return true;
615 }
616
617 /* Given two null-terminated lists of strings, return true if any string is in
618 * both.
619 */
620 static bool controller_lists_intersect(char **l1, char **l2)
621 {
622 int i;
623
624 if (!l1 || !l2)
625 return false;
626
627 for (i = 0; l1[i]; i++) {
628 if (string_in_list(l2, l1[i]))
629 return true;
630 }
631
632 return false;
633 }
634
635 /* For a null-terminated list of controllers @clist, return true if any of those
636 * controllers is already listed the null-terminated list of hierarchies @hlist.
637 * Realistically, if one is present, all must be present.
638 */
639 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
640 {
641 int i;
642
643 if (!hlist)
644 return false;
645
646 for (i = 0; hlist[i]; i++)
647 if (controller_lists_intersect(hlist[i]->controllers, clist))
648 return true;
649
650 return false;
651 }
652
653 /* Return true if the controller @entry is found in the null-terminated list of
654 * hierarchies @hlist.
655 */
656 static bool controller_found(struct hierarchy **hlist, char *entry)
657 {
658 int i;
659
660 if (!hlist)
661 return false;
662
663 for (i = 0; hlist[i]; i++)
664 if (string_in_list(hlist[i]->controllers, entry))
665 return true;
666
667 return false;
668 }
669
670 /* Return true if all of the controllers which we require have been found. The
671 * required list is freezer and anything in lxc.cgroup.use.
672 */
673 static bool all_controllers_found(struct cgroup_ops *ops)
674 {
675 char **cur;
676 struct hierarchy **hlist = ops->hierarchies;
677
678 if (!ops->cgroup_use)
679 return true;
680
681 for (cur = ops->cgroup_use; cur && *cur; cur++)
682 if (!controller_found(hlist, *cur)) {
683 ERROR("No %s controller mountpoint found", *cur);
684 return false;
685 }
686
687 return true;
688 }
689
690 /* Get the controllers from a mountinfo line There are other ways we could get
691 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
692 * could parse the mount options. But we simply assume that the mountpoint must
693 * be /sys/fs/cgroup/controller-list
694 */
695 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
696 int type)
697 {
698 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
699 * for legacy hierarchies.
700 */
701 int i;
702 char *p2, *tok;
703 char *p = line, *sep = ",";
704 char **aret = NULL;
705
706 for (i = 0; i < 4; i++) {
707 p = strchr(p, ' ');
708 if (!p)
709 return NULL;
710 p++;
711 }
712
713 /* Note, if we change how mountinfo works, then our caller will need to
714 * verify /sys/fs/cgroup/ in this field.
715 */
716 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
717 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
718 return NULL;
719 }
720
721 p += 15;
722 p2 = strchr(p, ' ');
723 if (!p2) {
724 ERROR("Corrupt mountinfo");
725 return NULL;
726 }
727 *p2 = '\0';
728
729 if (type == CGROUP_SUPER_MAGIC) {
730 __do_free char *dup = NULL;
731
732 /* strdup() here for v1 hierarchies. Otherwise
733 * lxc_iterate_parts() will destroy mountpoints such as
734 * "/sys/fs/cgroup/cpu,cpuacct".
735 */
736 dup = must_copy_string(p);
737 if (!dup)
738 return NULL;
739
740 lxc_iterate_parts (tok, dup, sep)
741 must_append_controller(klist, nlist, &aret, tok);
742 }
743 *p2 = ' ';
744
745 return aret;
746 }
747
748 static char **cg_unified_make_empty_controller(void)
749 {
750 int newentry;
751 char **aret = NULL;
752
753 newentry = append_null_to_list((void ***)&aret);
754 aret[newentry] = NULL;
755 return aret;
756 }
757
758 static char **cg_unified_get_controllers(const char *file)
759 {
760 __do_free char *buf = NULL;
761 char *tok;
762 char *sep = " \t\n";
763 char **aret = NULL;
764
765 buf = read_file(file);
766 if (!buf)
767 return NULL;
768
769 lxc_iterate_parts(tok, buf, sep) {
770 int newentry;
771 char *copy;
772
773 newentry = append_null_to_list((void ***)&aret);
774 copy = must_copy_string(tok);
775 aret[newentry] = copy;
776 }
777
778 return aret;
779 }
780
781 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
782 char *container_base_path, int type)
783 {
784 struct hierarchy *new;
785 int newentry;
786
787 new = must_realloc(NULL, sizeof(*new));
788 new->controllers = clist;
789 new->mountpoint = mountpoint;
790 new->container_base_path = container_base_path;
791 new->container_full_path = NULL;
792 new->monitor_full_path = NULL;
793 new->version = type;
794 new->cgroup2_chown = NULL;
795
796 newentry = append_null_to_list((void ***)h);
797 (*h)[newentry] = new;
798 return new;
799 }
800
801 /* Get a copy of the mountpoint from @line, which is a line from
802 * /proc/self/mountinfo.
803 */
804 static char *cg_hybrid_get_mountpoint(char *line)
805 {
806 int i;
807 size_t len;
808 char *p2;
809 char *p = line, *sret = NULL;
810
811 for (i = 0; i < 4; i++) {
812 p = strchr(p, ' ');
813 if (!p)
814 return NULL;
815 p++;
816 }
817
818 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
819 return NULL;
820
821 p2 = strchr(p + 15, ' ');
822 if (!p2)
823 return NULL;
824 *p2 = '\0';
825
826 len = strlen(p);
827 sret = must_realloc(NULL, len + 1);
828 memcpy(sret, p, len);
829 sret[len] = '\0';
830 return sret;
831 }
832
833 /* Given a multi-line string, return a null-terminated copy of the current line. */
834 static char *copy_to_eol(char *p)
835 {
836 char *p2 = strchr(p, '\n'), *sret;
837 size_t len;
838
839 if (!p2)
840 return NULL;
841
842 len = p2 - p;
843 sret = must_realloc(NULL, len + 1);
844 memcpy(sret, p, len);
845 sret[len] = '\0';
846 return sret;
847 }
848
849 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
850 * /proc/self/cgroup file. Check whether controller c is present.
851 */
852 static bool controller_in_clist(char *cgline, char *c)
853 {
854 __do_free char *tmp = NULL;
855 char *tok, *eol;
856 size_t len;
857
858 eol = strchr(cgline, ':');
859 if (!eol)
860 return false;
861
862 len = eol - cgline;
863 tmp = must_realloc(NULL, len + 1);
864 memcpy(tmp, cgline, len);
865 tmp[len] = '\0';
866
867 lxc_iterate_parts(tok, tmp, ",")
868 if (strcmp(tok, c) == 0)
869 return true;
870
871 return false;
872 }
873
874 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
875 * @controller.
876 */
877 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
878 int type)
879 {
880 char *p = basecginfo;
881
882 for (;;) {
883 bool is_cgv2_base_cgroup = false;
884
885 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
886 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
887 is_cgv2_base_cgroup = true;
888
889 p = strchr(p, ':');
890 if (!p)
891 return NULL;
892 p++;
893
894 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
895 p = strchr(p, ':');
896 if (!p)
897 return NULL;
898 p++;
899 return copy_to_eol(p);
900 }
901
902 p = strchr(p, '\n');
903 if (!p)
904 return NULL;
905 p++;
906 }
907 }
908
909 static void must_append_string(char ***list, char *entry)
910 {
911 int newentry;
912 char *copy;
913
914 newentry = append_null_to_list((void ***)list);
915 copy = must_copy_string(entry);
916 (*list)[newentry] = copy;
917 }
918
919 static int get_existing_subsystems(char ***klist, char ***nlist)
920 {
921 __do_free char *line = NULL;
922 __do_fclose FILE *f = NULL;
923 size_t len = 0;
924
925 f = fopen("/proc/self/cgroup", "r");
926 if (!f)
927 return -1;
928
929 while (getline(&line, &len, f) != -1) {
930 char *p, *p2, *tok;
931 p = strchr(line, ':');
932 if (!p)
933 continue;
934 p++;
935 p2 = strchr(p, ':');
936 if (!p2)
937 continue;
938 *p2 = '\0';
939
940 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
941 * contains an entry of the form:
942 *
943 * 0::/some/path
944 *
945 * In this case we use "cgroup2" as controller name.
946 */
947 if ((p2 - p) == 0) {
948 must_append_string(klist, "cgroup2");
949 continue;
950 }
951
952 lxc_iterate_parts(tok, p, ",") {
953 if (strncmp(tok, "name=", 5) == 0)
954 must_append_string(nlist, tok);
955 else
956 must_append_string(klist, tok);
957 }
958 }
959
960 return 0;
961 }
962
963 static void trim(char *s)
964 {
965 size_t len;
966
967 len = strlen(s);
968 while ((len > 1) && (s[len - 1] == '\n'))
969 s[--len] = '\0';
970 }
971
972 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
973 {
974 int i;
975 struct hierarchy **it;
976
977 if (!ops->hierarchies) {
978 TRACE(" No hierarchies found");
979 return;
980 }
981
982 TRACE(" Hierarchies:");
983 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
984 int j;
985 char **cit;
986
987 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
988 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
989 TRACE(" controllers:");
990 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
991 TRACE(" %d: %s", j, *cit);
992 }
993 }
994
995 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
996 char **nlist)
997 {
998 int k;
999 char **it;
1000
1001 TRACE("basecginfo is:");
1002 TRACE("%s", basecginfo);
1003
1004 for (k = 0, it = klist; it && *it; it++, k++)
1005 TRACE("kernel subsystem %d: %s", k, *it);
1006
1007 for (k = 0, it = nlist; it && *it; it++, k++)
1008 TRACE("named subsystem %d: %s", k, *it);
1009 }
1010
1011 static int cgroup_rmdir(struct hierarchy **hierarchies,
1012 const char *container_cgroup)
1013 {
1014 int i;
1015
1016 if (!container_cgroup || !hierarchies)
1017 return 0;
1018
1019 for (i = 0; hierarchies[i]; i++) {
1020 int ret;
1021 struct hierarchy *h = hierarchies[i];
1022
1023 if (!h->container_full_path)
1024 continue;
1025
1026 ret = recursive_destroy(h->container_full_path);
1027 if (ret < 0)
1028 WARN("Failed to destroy \"%s\"", h->container_full_path);
1029
1030 free(h->container_full_path);
1031 h->container_full_path = NULL;
1032 }
1033
1034 return 0;
1035 }
1036
1037 struct generic_userns_exec_data {
1038 struct hierarchy **hierarchies;
1039 const char *container_cgroup;
1040 struct lxc_conf *conf;
1041 uid_t origuid; /* target uid in parent namespace */
1042 char *path;
1043 };
1044
1045 static int cgroup_rmdir_wrapper(void *data)
1046 {
1047 int ret;
1048 struct generic_userns_exec_data *arg = data;
1049 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1050 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1051
1052 ret = setresgid(nsgid, nsgid, nsgid);
1053 if (ret < 0) {
1054 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1055 (int)nsgid, (int)nsgid);
1056 return -1;
1057 }
1058
1059 ret = setresuid(nsuid, nsuid, nsuid);
1060 if (ret < 0) {
1061 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1062 (int)nsuid, (int)nsuid);
1063 return -1;
1064 }
1065
1066 ret = setgroups(0, NULL);
1067 if (ret < 0 && errno != EPERM) {
1068 SYSERROR("Failed to setgroups(0, NULL)");
1069 return -1;
1070 }
1071
1072 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1073 }
1074
1075 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1076 struct lxc_handler *handler)
1077 {
1078 int ret;
1079 struct generic_userns_exec_data wrap;
1080
1081 if (!ops->hierarchies)
1082 return;
1083
1084 wrap.origuid = 0;
1085 wrap.container_cgroup = ops->container_cgroup;
1086 wrap.hierarchies = ops->hierarchies;
1087 wrap.conf = handler->conf;
1088
1089 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1090 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1091 "cgroup_rmdir_wrapper");
1092 else
1093 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1094 if (ret < 0) {
1095 WARN("Failed to destroy cgroups");
1096 return;
1097 }
1098 }
1099
1100 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1101 struct lxc_handler *handler)
1102 {
1103 int len;
1104 struct lxc_conf *conf = handler->conf;
1105 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1106
1107 if (!ops->hierarchies)
1108 return;
1109
1110 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1111 if (len < 0 || (size_t)len >= sizeof(pidstr))
1112 return;
1113
1114 for (int i = 0; ops->hierarchies[i]; i++) {
1115 __do_free char *pivot_path = NULL;
1116 int ret;
1117 char *chop;
1118 char pivot_cgroup[] = PIVOT_CGROUP;
1119 struct hierarchy *h = ops->hierarchies[i];
1120
1121 if (!h->monitor_full_path)
1122 continue;
1123
1124 if (conf && conf->cgroup_meta.dir)
1125 pivot_path = must_make_path(h->mountpoint,
1126 h->container_base_path,
1127 conf->cgroup_meta.dir,
1128 PIVOT_CGROUP,
1129 "cgroup.procs", NULL);
1130 else
1131 pivot_path = must_make_path(h->mountpoint,
1132 h->container_base_path,
1133 PIVOT_CGROUP,
1134 "cgroup.procs", NULL);
1135
1136 chop = strrchr(pivot_path, '/');
1137 if (chop)
1138 *chop = '\0';
1139
1140 /*
1141 * Make sure not to pass in the ro string literal PIVOT_CGROUP
1142 * here.
1143 */
1144 if (!cg_legacy_handle_cpuset_hierarchy(h, pivot_cgroup)) {
1145 WARN("Failed to handle legacy cpuset controller");
1146 continue;
1147 }
1148
1149 ret = mkdir_p(pivot_path, 0755);
1150 if (ret < 0 && errno != EEXIST) {
1151 SYSWARN("Failed to create cgroup \"%s\"\n", pivot_path);
1152 continue;
1153 }
1154
1155 if (chop)
1156 *chop = '/';
1157
1158 /* Move ourselves into the pivot cgroup to delete our own
1159 * cgroup.
1160 */
1161 ret = lxc_write_to_file(pivot_path, pidstr, len, false, 0666);
1162 if (ret != 0) {
1163 SYSWARN("Failed to move monitor %s to \"%s\"\n", pidstr, pivot_path);
1164 continue;
1165 }
1166
1167 ret = recursive_destroy(h->monitor_full_path);
1168 if (ret < 0)
1169 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1170 }
1171 }
1172
1173 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1174 {
1175 __do_free char *add_controllers = NULL, *cgroup = NULL;
1176 size_t i, parts_len;
1177 char **it;
1178 size_t full_len = 0;
1179 char **parts = NULL;
1180 bool bret = false;
1181
1182 if (h->version != CGROUP2_SUPER_MAGIC)
1183 return true;
1184
1185 if (!h->controllers)
1186 return true;
1187
1188 /* For now we simply enable all controllers that we have detected by
1189 * creating a string like "+memory +pids +cpu +io".
1190 * TODO: In the near future we might want to support "-<controller>"
1191 * etc. but whether supporting semantics like this make sense will need
1192 * some thinking.
1193 */
1194 for (it = h->controllers; it && *it; it++) {
1195 full_len += strlen(*it) + 2;
1196 add_controllers = must_realloc(add_controllers, full_len + 1);
1197
1198 if (h->controllers[0] == *it)
1199 add_controllers[0] = '\0';
1200
1201 (void)strlcat(add_controllers, "+", full_len + 1);
1202 (void)strlcat(add_controllers, *it, full_len + 1);
1203
1204 if ((it + 1) && *(it + 1))
1205 (void)strlcat(add_controllers, " ", full_len + 1);
1206 }
1207
1208 parts = lxc_string_split(cgname, '/');
1209 if (!parts)
1210 goto on_error;
1211
1212 parts_len = lxc_array_len((void **)parts);
1213 if (parts_len > 0)
1214 parts_len--;
1215
1216 cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
1217 for (i = 0; i < parts_len; i++) {
1218 int ret;
1219 __do_free char *target = NULL;
1220
1221 cgroup = must_append_path(cgroup, parts[i], NULL);
1222 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1223 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1224 if (ret < 0) {
1225 SYSERROR("Could not enable \"%s\" controllers in the "
1226 "unified cgroup \"%s\"", add_controllers, cgroup);
1227 goto on_error;
1228 }
1229 }
1230
1231 bret = true;
1232
1233 on_error:
1234 lxc_free_array((void **)parts, free);
1235 return bret;
1236 }
1237
1238 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1239 {
1240 const char *tmp = dir;
1241 const char *orig = dir;
1242 size_t orig_len;
1243
1244 orig_len = strlen(dir);
1245 do {
1246 __do_free char *makeme;
1247 int ret;
1248 size_t cur_len;
1249
1250 dir = tmp + strspn(tmp, "/");
1251 tmp = dir + strcspn(dir, "/");
1252
1253 errno = ENOMEM;
1254 cur_len = dir - orig;
1255 makeme = strndup(orig, cur_len);
1256 if (!makeme)
1257 return -1;
1258
1259 ret = mkdir(makeme, mode);
1260 if (ret < 0) {
1261 if ((errno != EEXIST) || (orig_len == cur_len)) {
1262 SYSERROR("Failed to create directory \"%s\"", makeme);
1263 return -1;
1264 }
1265 }
1266 } while (tmp != dir);
1267
1268 return 0;
1269 }
1270
1271 static bool monitor_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1272 {
1273 int ret;
1274
1275 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1276 ERROR("Failed to handle legacy cpuset controller");
1277 return false;
1278 }
1279
1280 h->monitor_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1281 ret = mkdir_eexist_on_last(h->monitor_full_path, 0755);
1282 if (ret < 0) {
1283 ERROR("Failed to create cgroup \"%s\"", h->monitor_full_path);
1284 return false;
1285 }
1286
1287 return cg_unified_create_cgroup(h, cgname);
1288 }
1289
1290 static bool container_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1291 {
1292 int ret;
1293
1294 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1295 ERROR("Failed to handle legacy cpuset controller");
1296 return false;
1297 }
1298
1299 h->container_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1300 ret = mkdir_eexist_on_last(h->container_full_path, 0755);
1301 if (ret < 0) {
1302 ERROR("Failed to create cgroup \"%s\"", h->container_full_path);
1303 return false;
1304 }
1305
1306 return cg_unified_create_cgroup(h, cgname);
1307 }
1308
1309 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname, bool monitor)
1310 {
1311 int ret;
1312 char *full_path;
1313
1314 if (monitor)
1315 full_path = h->monitor_full_path;
1316 else
1317 full_path = h->container_full_path;
1318
1319 ret = rmdir(full_path);
1320 if (ret < 0)
1321 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", full_path);
1322
1323 free(full_path);
1324
1325 if (monitor)
1326 h->monitor_full_path = NULL;
1327 else
1328 h->container_full_path = NULL;
1329 }
1330
1331 __cgfsng_ops static inline bool cgfsng_monitor_create(struct cgroup_ops *ops,
1332 struct lxc_handler *handler)
1333 {
1334 __do_free char *monitor_cgroup = NULL;
1335 char *offset, *tmp;
1336 int i, idx = 0;
1337 size_t len;
1338 struct lxc_conf *conf = handler->conf;
1339
1340 if (!conf)
1341 return false;
1342
1343 if (!ops->hierarchies)
1344 return true;
1345
1346 if (conf->cgroup_meta.dir)
1347 tmp = lxc_string_join("/",
1348 (const char *[]){conf->cgroup_meta.dir,
1349 ops->monitor_pattern,
1350 handler->name, NULL},
1351 false);
1352 else
1353 tmp = must_make_path(ops->monitor_pattern, handler->name, NULL);
1354 if (!tmp)
1355 return false;
1356
1357 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1358 monitor_cgroup = must_realloc(tmp, len);
1359 offset = monitor_cgroup + len - 5;
1360 *offset = 0;
1361
1362 do {
1363 if (idx) {
1364 int ret = snprintf(offset, 5, "-%d", idx);
1365 if (ret < 0 || (size_t)ret >= 5)
1366 return false;
1367 }
1368
1369 for (i = 0; ops->hierarchies[i]; i++) {
1370 if (!monitor_create_path_for_hierarchy(ops->hierarchies[i],
1371 monitor_cgroup)) {
1372 ERROR("Failed to create cgroup \"%s\"",
1373 ops->hierarchies[i]->monitor_full_path);
1374 for (int j = 0; j < i; j++)
1375 remove_path_for_hierarchy(ops->hierarchies[j],
1376 monitor_cgroup,
1377 true);
1378
1379 idx++;
1380 break;
1381 }
1382 }
1383 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1384
1385 if (idx == 1000)
1386 return false;
1387
1388 INFO("The monitor process uses \"%s\" as cgroup", monitor_cgroup);
1389 return true;
1390 }
1391
1392 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1393 * next cgroup_pattern-1, -2, ..., -999.
1394 */
1395 __cgfsng_ops static inline bool cgfsng_payload_create(struct cgroup_ops *ops,
1396 struct lxc_handler *handler)
1397 {
1398 __do_free char *container_cgroup = NULL, *tmp = NULL;
1399 int i;
1400 size_t len;
1401 char *offset;
1402 int idx = 0;
1403 struct lxc_conf *conf = handler->conf;
1404
1405 if (ops->container_cgroup)
1406 return false;
1407
1408 if (!conf)
1409 return false;
1410
1411 if (!ops->hierarchies)
1412 return true;
1413
1414 if (conf->cgroup_meta.dir)
1415 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1416 else
1417 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1418 if (!tmp) {
1419 ERROR("Failed expanding cgroup name pattern");
1420 return false;
1421 }
1422
1423 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1424 container_cgroup = must_realloc(NULL, len);
1425 (void)strlcpy(container_cgroup, tmp, len);
1426 offset = container_cgroup + len - 5;
1427
1428 do {
1429 if (idx) {
1430 int ret = snprintf(offset, 5, "-%d", idx);
1431 if (ret < 0 || (size_t)ret >= 5)
1432 return false;
1433 }
1434
1435 for (i = 0; ops->hierarchies[i]; i++) {
1436 if (!container_create_path_for_hierarchy(ops->hierarchies[i],
1437 container_cgroup)) {
1438 ERROR("Failed to create cgroup \"%s\"",
1439 ops->hierarchies[i]->container_full_path);
1440 for (int j = 0; j < i; j++)
1441 remove_path_for_hierarchy(ops->hierarchies[j],
1442 container_cgroup,
1443 false);
1444 idx++;
1445 break;
1446 }
1447 }
1448 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1449
1450 if (idx == 1000)
1451 return false;
1452
1453 INFO("The container process uses \"%s\" as cgroup", container_cgroup);
1454 ops->container_cgroup = move_ptr(container_cgroup);
1455 return true;
1456 }
1457
1458 __cgfsng_ops static bool __do_cgroup_enter(struct cgroup_ops *ops, pid_t pid,
1459 bool monitor)
1460 {
1461 int len;
1462 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1463
1464 if (!ops->hierarchies)
1465 return true;
1466
1467 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
1468 if (len < 0 || (size_t)len >= sizeof(pidstr))
1469 return false;
1470
1471 for (int i = 0; ops->hierarchies[i]; i++) {
1472 int ret;
1473 __do_free char *path = NULL;
1474
1475 if (monitor)
1476 path = must_make_path(ops->hierarchies[i]->monitor_full_path,
1477 "cgroup.procs", NULL);
1478 else
1479 path = must_make_path(ops->hierarchies[i]->container_full_path,
1480 "cgroup.procs", NULL);
1481 ret = lxc_write_to_file(path, pidstr, len, false, 0666);
1482 if (ret != 0) {
1483 SYSERROR("Failed to enter cgroup \"%s\"", path);
1484 return false;
1485 }
1486 }
1487
1488 return true;
1489 }
1490
1491 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops, pid_t pid)
1492 {
1493 return __do_cgroup_enter(ops, pid, true);
1494 }
1495
1496 static bool cgfsng_payload_enter(struct cgroup_ops *ops, pid_t pid)
1497 {
1498 return __do_cgroup_enter(ops, pid, false);
1499 }
1500
1501 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1502 mode_t chmod_mode)
1503 {
1504 int ret;
1505
1506 ret = chown(path, chown_uid, chown_gid);
1507 if (ret < 0) {
1508 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1509 return -1;
1510 }
1511
1512 ret = chmod(path, chmod_mode);
1513 if (ret < 0) {
1514 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1515 return -1;
1516 }
1517
1518 return 0;
1519 }
1520
1521 /* chgrp the container cgroups to container group. We leave
1522 * the container owner as cgroup owner. So we must make the
1523 * directories 775 so that the container can create sub-cgroups.
1524 *
1525 * Also chown the tasks and cgroup.procs files. Those may not
1526 * exist depending on kernel version.
1527 */
1528 static int chown_cgroup_wrapper(void *data)
1529 {
1530 int i, ret;
1531 uid_t destuid;
1532 struct generic_userns_exec_data *arg = data;
1533 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1534 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1535
1536 ret = setresgid(nsgid, nsgid, nsgid);
1537 if (ret < 0) {
1538 SYSERROR("Failed to setresgid(%d, %d, %d)",
1539 (int)nsgid, (int)nsgid, (int)nsgid);
1540 return -1;
1541 }
1542
1543 ret = setresuid(nsuid, nsuid, nsuid);
1544 if (ret < 0) {
1545 SYSERROR("Failed to setresuid(%d, %d, %d)",
1546 (int)nsuid, (int)nsuid, (int)nsuid);
1547 return -1;
1548 }
1549
1550 ret = setgroups(0, NULL);
1551 if (ret < 0 && errno != EPERM) {
1552 SYSERROR("Failed to setgroups(0, NULL)");
1553 return -1;
1554 }
1555
1556 destuid = get_ns_uid(arg->origuid);
1557 if (destuid == LXC_INVALID_UID)
1558 destuid = 0;
1559
1560 for (i = 0; arg->hierarchies[i]; i++) {
1561 __do_free char *fullpath = NULL;
1562 char *path = arg->hierarchies[i]->container_full_path;
1563
1564 ret = chowmod(path, destuid, nsgid, 0775);
1565 if (ret < 0)
1566 return -1;
1567
1568 /* Failures to chown() these are inconvenient but not
1569 * detrimental We leave these owned by the container launcher,
1570 * so that container root can write to the files to attach. We
1571 * chmod() them 664 so that container systemd can write to the
1572 * files (which systemd in wily insists on doing).
1573 */
1574
1575 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1576 fullpath = must_make_path(path, "tasks", NULL);
1577 (void)chowmod(fullpath, destuid, nsgid, 0664);
1578 }
1579
1580 fullpath = must_make_path(path, "cgroup.procs", NULL);
1581 (void)chowmod(fullpath, destuid, nsgid, 0664);
1582
1583 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1584 continue;
1585
1586 for (char **p = arg->hierarchies[i]->cgroup2_chown; p && *p; p++) {
1587 fullpath = must_make_path(path, *p, NULL);
1588 (void)chowmod(fullpath, destuid, nsgid, 0664);
1589 }
1590 }
1591
1592 return 0;
1593 }
1594
1595 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1596 struct lxc_conf *conf)
1597 {
1598 struct generic_userns_exec_data wrap;
1599
1600 if (lxc_list_empty(&conf->id_map))
1601 return true;
1602
1603 if (!ops->hierarchies)
1604 return true;
1605
1606 wrap.origuid = geteuid();
1607 wrap.path = NULL;
1608 wrap.hierarchies = ops->hierarchies;
1609 wrap.conf = conf;
1610
1611 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1612 "chown_cgroup_wrapper") < 0) {
1613 ERROR("Error requesting cgroup chown in new user namespace");
1614 return false;
1615 }
1616
1617 return true;
1618 }
1619
1620 /* cgroup-full:* is done, no need to create subdirs */
1621 static bool cg_mount_needs_subdirs(int type)
1622 {
1623 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1624 return false;
1625
1626 return true;
1627 }
1628
1629 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1630 * remount controller ro if needed and bindmount the cgroupfs onto
1631 * control/the/cg/path.
1632 */
1633 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1634 char *controllerpath, char *cgpath,
1635 const char *container_cgroup)
1636 {
1637 __do_free char *sourcepath = NULL;
1638 int ret, remount_flags;
1639 int flags = MS_BIND;
1640
1641 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1642 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1643 if (ret < 0) {
1644 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1645 controllerpath, controllerpath);
1646 return -1;
1647 }
1648
1649 remount_flags = add_required_remount_flags(controllerpath,
1650 controllerpath,
1651 flags | MS_REMOUNT);
1652 ret = mount(controllerpath, controllerpath, "cgroup",
1653 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1654 NULL);
1655 if (ret < 0) {
1656 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1657 return -1;
1658 }
1659
1660 INFO("Remounted %s read-only", controllerpath);
1661 }
1662
1663 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1664 container_cgroup, NULL);
1665 if (type == LXC_AUTO_CGROUP_RO)
1666 flags |= MS_RDONLY;
1667
1668 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1669 if (ret < 0) {
1670 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1671 return -1;
1672 }
1673 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1674
1675 if (flags & MS_RDONLY) {
1676 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1677 flags | MS_REMOUNT);
1678 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1679 if (ret < 0) {
1680 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1681 return -1;
1682 }
1683 INFO("Remounted %s read-only", cgpath);
1684 }
1685
1686 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1687 return 0;
1688 }
1689
1690 /* __cg_mount_direct
1691 *
1692 * Mount cgroup hierarchies directly without using bind-mounts. The main
1693 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1694 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1695 */
1696 static int __cg_mount_direct(int type, struct hierarchy *h,
1697 const char *controllerpath)
1698 {
1699 int ret;
1700 __do_free char *controllers = NULL;
1701 char *fstype = "cgroup2";
1702 unsigned long flags = 0;
1703
1704 flags |= MS_NOSUID;
1705 flags |= MS_NOEXEC;
1706 flags |= MS_NODEV;
1707 flags |= MS_RELATIME;
1708
1709 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1710 flags |= MS_RDONLY;
1711
1712 if (h->version != CGROUP2_SUPER_MAGIC) {
1713 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1714 if (!controllers)
1715 return -ENOMEM;
1716 fstype = "cgroup";
1717 }
1718
1719 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1720 if (ret < 0) {
1721 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1722 return -1;
1723 }
1724
1725 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1726 return 0;
1727 }
1728
1729 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1730 const char *controllerpath)
1731 {
1732 return __cg_mount_direct(type, h, controllerpath);
1733 }
1734
1735 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1736 const char *controllerpath)
1737 {
1738 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1739 return 0;
1740
1741 return __cg_mount_direct(type, h, controllerpath);
1742 }
1743
1744 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1745 struct lxc_handler *handler,
1746 const char *root, int type)
1747 {
1748 __do_free char *tmpfspath = NULL;
1749 int i, ret;
1750 bool has_cgns = false, retval = false, wants_force_mount = false;
1751
1752 if (!ops->hierarchies)
1753 return true;
1754
1755 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1756 return true;
1757
1758 if (type & LXC_AUTO_CGROUP_FORCE) {
1759 type &= ~LXC_AUTO_CGROUP_FORCE;
1760 wants_force_mount = true;
1761 }
1762
1763 if (!wants_force_mount){
1764 if (!lxc_list_empty(&handler->conf->keepcaps))
1765 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1766 else
1767 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1768 }
1769
1770 has_cgns = cgns_supported();
1771 if (has_cgns && !wants_force_mount)
1772 return true;
1773
1774 if (type == LXC_AUTO_CGROUP_NOSPEC)
1775 type = LXC_AUTO_CGROUP_MIXED;
1776 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1777 type = LXC_AUTO_CGROUP_FULL_MIXED;
1778
1779 /* Mount tmpfs */
1780 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1781 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1782 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1783 "size=10240k,mode=755", root);
1784 if (ret < 0)
1785 goto on_error;
1786
1787 for (i = 0; ops->hierarchies[i]; i++) {
1788 __do_free char *controllerpath = NULL, *path2 = NULL;
1789 struct hierarchy *h = ops->hierarchies[i];
1790 char *controller = strrchr(h->mountpoint, '/');
1791
1792 if (!controller)
1793 continue;
1794 controller++;
1795
1796 controllerpath = must_make_path(tmpfspath, controller, NULL);
1797 if (dir_exists(controllerpath))
1798 continue;
1799
1800 ret = mkdir(controllerpath, 0755);
1801 if (ret < 0) {
1802 SYSERROR("Error creating cgroup path: %s", controllerpath);
1803 goto on_error;
1804 }
1805
1806 if (has_cgns && wants_force_mount) {
1807 /* If cgroup namespaces are supported but the container
1808 * will not have CAP_SYS_ADMIN after it has started we
1809 * need to mount the cgroups manually.
1810 */
1811 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1812 if (ret < 0)
1813 goto on_error;
1814
1815 continue;
1816 }
1817
1818 ret = cg_mount_cgroup_full(type, h, controllerpath);
1819 if (ret < 0)
1820 goto on_error;
1821
1822 if (!cg_mount_needs_subdirs(type))
1823 continue;
1824
1825 path2 = must_make_path(controllerpath, h->container_base_path,
1826 ops->container_cgroup, NULL);
1827 ret = mkdir_p(path2, 0755);
1828 if (ret < 0)
1829 goto on_error;
1830
1831 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1832 path2, ops->container_cgroup);
1833 if (ret < 0)
1834 goto on_error;
1835 }
1836 retval = true;
1837
1838 on_error:
1839 return retval;
1840 }
1841
1842 static int recursive_count_nrtasks(char *dirname)
1843 {
1844 __do_free char *path = NULL;
1845 __do_closedir DIR *dir = NULL;
1846 struct dirent *direntp;
1847 int count = 0, ret;
1848
1849 dir = opendir(dirname);
1850 if (!dir)
1851 return 0;
1852
1853 while ((direntp = readdir(dir))) {
1854 struct stat mystat;
1855
1856 if (!strcmp(direntp->d_name, ".") ||
1857 !strcmp(direntp->d_name, ".."))
1858 continue;
1859
1860 path = must_make_path(dirname, direntp->d_name, NULL);
1861
1862 if (lstat(path, &mystat))
1863 continue;
1864
1865 if (!S_ISDIR(mystat.st_mode))
1866 continue;
1867
1868 count += recursive_count_nrtasks(path);
1869 }
1870
1871 path = must_make_path(dirname, "cgroup.procs", NULL);
1872 ret = lxc_count_file_lines(path);
1873 if (ret != -1)
1874 count += ret;
1875
1876 return count;
1877 }
1878
1879 __cgfsng_ops static int cgfsng_nrtasks(struct cgroup_ops *ops)
1880 {
1881 __do_free char *path = NULL;
1882 int count;
1883
1884 if (!ops->container_cgroup || !ops->hierarchies)
1885 return -1;
1886
1887 path = must_make_path(ops->hierarchies[0]->container_full_path, NULL);
1888 count = recursive_count_nrtasks(path);
1889 return count;
1890 }
1891
1892 /* Only root needs to escape to the cgroup of its init. */
1893 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1894 struct lxc_conf *conf)
1895 {
1896 int i;
1897
1898 if (conf->cgroup_meta.relative || geteuid() || !ops->hierarchies)
1899 return true;
1900
1901 for (i = 0; ops->hierarchies[i]; i++) {
1902 int ret;
1903 __do_free char *fullpath = NULL;
1904
1905 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1906 ops->hierarchies[i]->container_base_path,
1907 "cgroup.procs", NULL);
1908 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1909 if (ret != 0) {
1910 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1911 return false;
1912 }
1913 }
1914
1915 return true;
1916 }
1917
1918 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1919 {
1920 int i = 0;
1921
1922 if (!ops->hierarchies)
1923 return 0;
1924
1925 for (; ops->hierarchies[i]; i++)
1926 ;
1927
1928 return i;
1929 }
1930
1931 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
1932 {
1933 int i;
1934
1935 if (!ops->hierarchies)
1936 return false;
1937
1938 /* sanity check n */
1939 for (i = 0; i < n; i++)
1940 if (!ops->hierarchies[i])
1941 return false;
1942
1943 *out = ops->hierarchies[i]->controllers;
1944
1945 return true;
1946 }
1947
1948 #define THAWED "THAWED"
1949 #define THAWED_LEN (strlen(THAWED))
1950
1951 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1952 * to be adapted.
1953 */
1954 __cgfsng_ops static bool cgfsng_unfreeze(struct cgroup_ops *ops)
1955 {
1956 int ret;
1957 __do_free char *fullpath = NULL;
1958 struct hierarchy *h;
1959
1960 h = get_hierarchy(ops, "freezer");
1961 if (!h)
1962 return false;
1963
1964 fullpath = must_make_path(h->container_full_path, "freezer.state", NULL);
1965 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
1966 if (ret < 0)
1967 return false;
1968
1969 return true;
1970 }
1971
1972 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
1973 const char *controller)
1974 {
1975 struct hierarchy *h;
1976
1977 h = get_hierarchy(ops, controller);
1978 if (!h) {
1979 WARN("Failed to find hierarchy for controller \"%s\"",
1980 controller ? controller : "(null)");
1981 return NULL;
1982 }
1983
1984 return h->container_full_path ? h->container_full_path + strlen(h->mountpoint) : NULL;
1985 }
1986
1987 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
1988 * which must be freed by the caller.
1989 */
1990 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
1991 const char *inpath,
1992 const char *filename)
1993 {
1994 return must_make_path(h->mountpoint, inpath, filename, NULL);
1995 }
1996
1997 /* Technically, we're always at a delegation boundary here (This is especially
1998 * true when cgroup namespaces are available.). The reasoning is that in order
1999 * for us to have been able to start a container in the first place the root
2000 * cgroup must have been a leaf node. Now, either the container's init system
2001 * has populated the cgroup and kept it as a leaf node or it has created
2002 * subtrees. In the former case we will simply attach to the leaf node we
2003 * created when we started the container in the latter case we create our own
2004 * cgroup for the attaching process.
2005 */
2006 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
2007 const char *lxcpath, const char *pidstr,
2008 size_t pidstr_len, const char *controller)
2009 {
2010 __do_free char *base_path = NULL, *container_cgroup = NULL,
2011 *full_path = NULL;
2012 int ret;
2013 size_t len;
2014 int fret = -1, idx = 0;
2015
2016 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2017 /* not running */
2018 if (!container_cgroup)
2019 return 0;
2020
2021 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
2022 full_path = must_make_path(base_path, "cgroup.procs", NULL);
2023 /* cgroup is populated */
2024 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
2025 if (ret < 0 && errno != EBUSY)
2026 goto on_error;
2027
2028 if (ret == 0)
2029 goto on_success;
2030
2031 len = strlen(base_path) + STRLITERALLEN("/lxc-1000") +
2032 STRLITERALLEN("/cgroup-procs");
2033 full_path = must_realloc(NULL, len + 1);
2034 do {
2035 if (idx)
2036 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
2037 base_path, idx);
2038 else
2039 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
2040 if (ret < 0 || (size_t)ret >= len + 1)
2041 goto on_error;
2042
2043 ret = mkdir_p(full_path, 0755);
2044 if (ret < 0 && errno != EEXIST)
2045 goto on_error;
2046
2047 (void)strlcat(full_path, "/cgroup.procs", len + 1);
2048 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
2049 if (ret == 0)
2050 goto on_success;
2051
2052 /* this is a non-leaf node */
2053 if (errno != EBUSY)
2054 goto on_error;
2055
2056 idx++;
2057 } while (idx < 1000);
2058
2059 on_success:
2060 if (idx < 1000)
2061 fret = 0;
2062
2063 on_error:
2064 return fret;
2065 }
2066
2067 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
2068 const char *lxcpath, pid_t pid)
2069 {
2070 int i, len, ret;
2071 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2072
2073 if (!ops->hierarchies)
2074 return true;
2075
2076 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2077 if (len < 0 || (size_t)len >= sizeof(pidstr))
2078 return false;
2079
2080 for (i = 0; ops->hierarchies[i]; i++) {
2081 __do_free char *path = NULL;
2082 char *fullpath = NULL;
2083 struct hierarchy *h = ops->hierarchies[i];
2084
2085 if (h->version == CGROUP2_SUPER_MAGIC) {
2086 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
2087 h->controllers[0]);
2088 if (ret < 0)
2089 return false;
2090
2091 continue;
2092 }
2093
2094 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2095 /* not running */
2096 if (!path)
2097 continue;
2098
2099 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2100 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2101 if (ret < 0) {
2102 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2103 return false;
2104 }
2105 }
2106
2107 return true;
2108 }
2109
2110 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2111 * don't have a cgroup_data set up, so we ask the running container through the
2112 * commands API for the cgroup path.
2113 */
2114 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2115 char *value, size_t len, const char *name,
2116 const char *lxcpath)
2117 {
2118 __do_free char *path = NULL;
2119 __do_free char *controller = NULL;
2120 char *p;
2121 struct hierarchy *h;
2122 int ret = -1;
2123
2124 controller = must_copy_string(filename);
2125 p = strchr(controller, '.');
2126 if (p)
2127 *p = '\0';
2128
2129 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2130 /* not running */
2131 if (!path)
2132 return -1;
2133
2134 h = get_hierarchy(ops, controller);
2135 if (h) {
2136 __do_free char *fullpath = NULL;
2137
2138 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2139 ret = lxc_read_from_file(fullpath, value, len);
2140 }
2141
2142 return ret;
2143 }
2144
2145 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2146 * don't have a cgroup_data set up, so we ask the running container through the
2147 * commands API for the cgroup path.
2148 */
2149 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2150 const char *filename, const char *value,
2151 const char *name, const char *lxcpath)
2152 {
2153 __do_free char *path = NULL;
2154 __do_free char *controller = NULL;
2155 char *p;
2156 struct hierarchy *h;
2157 int ret = -1;
2158
2159 controller = must_copy_string(filename);
2160 p = strchr(controller, '.');
2161 if (p)
2162 *p = '\0';
2163
2164 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2165 /* not running */
2166 if (!path)
2167 return -1;
2168
2169 h = get_hierarchy(ops, controller);
2170 if (h) {
2171 __do_free char *fullpath = NULL;
2172
2173 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2174 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2175 }
2176
2177 return ret;
2178 }
2179
2180 /* take devices cgroup line
2181 * /dev/foo rwx
2182 * and convert it to a valid
2183 * type major:minor mode
2184 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2185 * the output.
2186 */
2187 static int convert_devpath(const char *invalue, char *dest)
2188 {
2189 __do_free char *path = NULL;
2190 int n_parts;
2191 char *p, type;
2192 unsigned long minor, major;
2193 struct stat sb;
2194 int ret = -EINVAL;
2195 char *mode = NULL;
2196
2197 path = must_copy_string(invalue);
2198
2199 /* Read path followed by mode. Ignore any trailing text.
2200 * A ' # comment' would be legal. Technically other text is not
2201 * legal, we could check for that if we cared to.
2202 */
2203 for (n_parts = 1, p = path; *p; p++) {
2204 if (*p != ' ')
2205 continue;
2206 *p = '\0';
2207
2208 if (n_parts != 1)
2209 break;
2210 p++;
2211 n_parts++;
2212
2213 while (*p == ' ')
2214 p++;
2215
2216 mode = p;
2217
2218 if (*p == '\0')
2219 goto out;
2220 }
2221
2222 if (n_parts == 1)
2223 goto out;
2224
2225 ret = stat(path, &sb);
2226 if (ret < 0)
2227 goto out;
2228
2229 mode_t m = sb.st_mode & S_IFMT;
2230 switch (m) {
2231 case S_IFBLK:
2232 type = 'b';
2233 break;
2234 case S_IFCHR:
2235 type = 'c';
2236 break;
2237 default:
2238 ERROR("Unsupported device type %i for \"%s\"", m, path);
2239 ret = -EINVAL;
2240 goto out;
2241 }
2242
2243 major = MAJOR(sb.st_rdev);
2244 minor = MINOR(sb.st_rdev);
2245 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2246 if (ret < 0 || ret >= 50) {
2247 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2248 "chars)", type, major, minor, mode);
2249 ret = -ENAMETOOLONG;
2250 goto out;
2251 }
2252 ret = 0;
2253
2254 out:
2255 return ret;
2256 }
2257
2258 /* Called from setup_limits - here we have the container's cgroup_data because
2259 * we created the cgroups.
2260 */
2261 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2262 const char *value)
2263 {
2264 __do_free char *controller = NULL;
2265 __do_free char *fullpath = NULL;
2266 char *p;
2267 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2268 char converted_value[50];
2269 struct hierarchy *h;
2270 int ret = 0;
2271
2272 controller = must_copy_string(filename);
2273 p = strchr(controller, '.');
2274 if (p)
2275 *p = '\0';
2276
2277 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2278 ret = convert_devpath(value, converted_value);
2279 if (ret < 0)
2280 return ret;
2281 value = converted_value;
2282 }
2283
2284 h = get_hierarchy(ops, controller);
2285 if (!h) {
2286 ERROR("Failed to setup limits for the \"%s\" controller. "
2287 "The controller seems to be unused by \"cgfsng\" cgroup "
2288 "driver or not enabled on the cgroup hierarchy",
2289 controller);
2290 errno = ENOENT;
2291 return -ENOENT;
2292 }
2293
2294 fullpath = must_make_path(h->container_full_path, filename, NULL);
2295 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2296 return ret;
2297 }
2298
2299 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2300 struct lxc_list *cgroup_settings,
2301 bool do_devices)
2302 {
2303 __do_free struct lxc_list *sorted_cgroup_settings = NULL;
2304 struct lxc_list *iterator, *next;
2305 struct lxc_cgroup *cg;
2306 bool ret = false;
2307
2308 if (lxc_list_empty(cgroup_settings))
2309 return true;
2310
2311 if (!ops->hierarchies)
2312 return false;
2313
2314 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2315 if (!sorted_cgroup_settings)
2316 return false;
2317
2318 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2319 cg = iterator->elem;
2320
2321 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2322 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2323 if (do_devices && (errno == EACCES || errno == EPERM)) {
2324 WARN("Failed to set \"%s\" to \"%s\"",
2325 cg->subsystem, cg->value);
2326 continue;
2327 }
2328 WARN("Failed to set \"%s\" to \"%s\"",
2329 cg->subsystem, cg->value);
2330 goto out;
2331 }
2332 DEBUG("Set controller \"%s\" set to \"%s\"",
2333 cg->subsystem, cg->value);
2334 }
2335 }
2336
2337 ret = true;
2338 INFO("Limits for the legacy cgroup hierarchies have been setup");
2339 out:
2340 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2341 lxc_list_del(iterator);
2342 free(iterator);
2343 }
2344
2345 return ret;
2346 }
2347
2348 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2349 struct lxc_list *cgroup_settings)
2350 {
2351 struct lxc_list *iterator;
2352 struct hierarchy *h = ops->unified;
2353
2354 if (lxc_list_empty(cgroup_settings))
2355 return true;
2356
2357 if (!h)
2358 return false;
2359
2360 lxc_list_for_each(iterator, cgroup_settings) {
2361 __do_free char *fullpath = NULL;
2362 int ret;
2363 struct lxc_cgroup *cg = iterator->elem;
2364
2365 fullpath = must_make_path(h->container_full_path, cg->subsystem, NULL);
2366 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2367 if (ret < 0) {
2368 SYSERROR("Failed to set \"%s\" to \"%s\"",
2369 cg->subsystem, cg->value);
2370 return false;
2371 }
2372 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2373 }
2374
2375 INFO("Limits for the unified cgroup hierarchy have been setup");
2376 return true;
2377 }
2378
2379 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2380 struct lxc_conf *conf,
2381 bool do_devices)
2382 {
2383 bool bret;
2384
2385 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2386 if (!bret)
2387 return false;
2388
2389 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2390 }
2391
2392 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2393 char **controllers)
2394 {
2395 char **cur_ctrl, **cur_use;
2396
2397 if (!ops->cgroup_use)
2398 return true;
2399
2400 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2401 bool found = false;
2402
2403 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2404 if (strcmp(*cur_use, *cur_ctrl) != 0)
2405 continue;
2406
2407 found = true;
2408 break;
2409 }
2410
2411 if (found)
2412 continue;
2413
2414 return false;
2415 }
2416
2417 return true;
2418 }
2419
2420 static void cg_unified_delegate(char ***delegate)
2421 {
2422 __do_free char *tmp = NULL;
2423 int idx;
2424 char *standard[] = {"cgroup.subtree_control", "cgroup.threads", NULL};
2425
2426 tmp = read_file("/sys/kernel/cgroup/delegate");
2427 if (!tmp) {
2428 for (char **p = standard; p && *p; p++) {
2429 idx = append_null_to_list((void ***)delegate);
2430 (*delegate)[idx] = must_copy_string(*p);
2431 }
2432 } else {
2433 char *token;
2434 lxc_iterate_parts (token, tmp, " \t\n") {
2435 /*
2436 * We always need to chown this for both cgroup and
2437 * cgroup2.
2438 */
2439 if (strcmp(token, "cgroup.procs") == 0)
2440 continue;
2441
2442 idx = append_null_to_list((void ***)delegate);
2443 (*delegate)[idx] = must_copy_string(token);
2444 }
2445 }
2446 }
2447
2448 /* At startup, parse_hierarchies finds all the info we need about cgroup
2449 * mountpoints and current cgroups, and stores it in @d.
2450 */
2451 static bool cg_hybrid_init(struct cgroup_ops *ops, bool relative,
2452 bool unprivileged)
2453 {
2454 __do_free char *basecginfo = NULL;
2455 __do_free char *line = NULL;
2456 __do_fclose FILE *f = NULL;
2457 int ret;
2458 size_t len = 0;
2459 char **klist = NULL, **nlist = NULL;
2460
2461 /* Root spawned containers escape the current cgroup, so use init's
2462 * cgroups as our base in that case.
2463 */
2464 if (!relative && (geteuid() == 0))
2465 basecginfo = read_file("/proc/1/cgroup");
2466 else
2467 basecginfo = read_file("/proc/self/cgroup");
2468 if (!basecginfo)
2469 return false;
2470
2471 ret = get_existing_subsystems(&klist, &nlist);
2472 if (ret < 0) {
2473 ERROR("Failed to retrieve available legacy cgroup controllers");
2474 return false;
2475 }
2476
2477 f = fopen("/proc/self/mountinfo", "r");
2478 if (!f) {
2479 ERROR("Failed to open \"/proc/self/mountinfo\"");
2480 return false;
2481 }
2482
2483 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2484
2485 while (getline(&line, &len, f) != -1) {
2486 int type;
2487 bool writeable;
2488 struct hierarchy *new;
2489 char *base_cgroup = NULL, *mountpoint = NULL;
2490 char **controller_list = NULL;
2491
2492 type = get_cgroup_version(line);
2493 if (type == 0)
2494 continue;
2495
2496 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2497 continue;
2498
2499 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2500 if (type == CGROUP2_SUPER_MAGIC)
2501 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2502 else if (type == CGROUP_SUPER_MAGIC)
2503 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2504 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2505 if (type == CGROUP_SUPER_MAGIC)
2506 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2507 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2508 if (type == CGROUP2_SUPER_MAGIC)
2509 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2510 }
2511
2512 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2513 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2514 continue;
2515
2516 if (type == CGROUP_SUPER_MAGIC)
2517 if (controller_list_is_dup(ops->hierarchies, controller_list))
2518 goto next;
2519
2520 mountpoint = cg_hybrid_get_mountpoint(line);
2521 if (!mountpoint) {
2522 ERROR("Failed parsing mountpoint from \"%s\"", line);
2523 goto next;
2524 }
2525
2526 if (type == CGROUP_SUPER_MAGIC)
2527 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2528 else
2529 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2530 if (!base_cgroup) {
2531 ERROR("Failed to find current cgroup");
2532 goto next;
2533 }
2534
2535 trim(base_cgroup);
2536 prune_init_scope(base_cgroup);
2537 if (type == CGROUP2_SUPER_MAGIC)
2538 writeable = test_writeable_v2(mountpoint, base_cgroup);
2539 else
2540 writeable = test_writeable_v1(mountpoint, base_cgroup);
2541 if (!writeable)
2542 goto next;
2543
2544 if (type == CGROUP2_SUPER_MAGIC) {
2545 char *cgv2_ctrl_path;
2546
2547 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2548 "cgroup.controllers",
2549 NULL);
2550
2551 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2552 free(cgv2_ctrl_path);
2553 if (!controller_list) {
2554 controller_list = cg_unified_make_empty_controller();
2555 TRACE("No controllers are enabled for "
2556 "delegation in the unified hierarchy");
2557 }
2558 }
2559
2560 /* Exclude all controllers that cgroup use does not want. */
2561 if (!cgroup_use_wants_controllers(ops, controller_list))
2562 goto next;
2563
2564 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2565 if (type == CGROUP2_SUPER_MAGIC && !ops->unified) {
2566 if (unprivileged)
2567 cg_unified_delegate(&new->cgroup2_chown);
2568 ops->unified = new;
2569 }
2570
2571 continue;
2572
2573 next:
2574 free_string_list(controller_list);
2575 free(mountpoint);
2576 free(base_cgroup);
2577 }
2578
2579 free_string_list(klist);
2580 free_string_list(nlist);
2581
2582 TRACE("Writable cgroup hierarchies:");
2583 lxc_cgfsng_print_hierarchies(ops);
2584
2585 /* verify that all controllers in cgroup.use and all crucial
2586 * controllers are accounted for
2587 */
2588 if (!all_controllers_found(ops))
2589 return false;
2590
2591 return true;
2592 }
2593
2594 static int cg_is_pure_unified(void)
2595 {
2596
2597 int ret;
2598 struct statfs fs;
2599
2600 ret = statfs("/sys/fs/cgroup", &fs);
2601 if (ret < 0)
2602 return -ENOMEDIUM;
2603
2604 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2605 return CGROUP2_SUPER_MAGIC;
2606
2607 return 0;
2608 }
2609
2610 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2611 static char *cg_unified_get_current_cgroup(bool relative)
2612 {
2613 __do_free char *basecginfo = NULL;
2614 char *base_cgroup;
2615 char *copy = NULL;
2616
2617 if (!relative && (geteuid() == 0))
2618 basecginfo = read_file("/proc/1/cgroup");
2619 else
2620 basecginfo = read_file("/proc/self/cgroup");
2621 if (!basecginfo)
2622 return NULL;
2623
2624 base_cgroup = strstr(basecginfo, "0::/");
2625 if (!base_cgroup)
2626 goto cleanup_on_err;
2627
2628 base_cgroup = base_cgroup + 3;
2629 copy = copy_to_eol(base_cgroup);
2630 if (!copy)
2631 goto cleanup_on_err;
2632
2633 cleanup_on_err:
2634 if (copy)
2635 trim(copy);
2636
2637 return copy;
2638 }
2639
2640 static int cg_unified_init(struct cgroup_ops *ops, bool relative,
2641 bool unprivileged)
2642 {
2643 __do_free char *subtree_path = NULL;
2644 int ret;
2645 char *mountpoint;
2646 char **delegatable;
2647 struct hierarchy *new;
2648 char *base_cgroup = NULL;
2649
2650 ret = cg_is_pure_unified();
2651 if (ret == -ENOMEDIUM)
2652 return -ENOMEDIUM;
2653
2654 if (ret != CGROUP2_SUPER_MAGIC)
2655 return 0;
2656
2657 base_cgroup = cg_unified_get_current_cgroup(relative);
2658 if (!base_cgroup)
2659 return -EINVAL;
2660 prune_init_scope(base_cgroup);
2661
2662 /* We assume that we have already been given controllers to delegate
2663 * further down the hierarchy. If not it is up to the user to delegate
2664 * them to us.
2665 */
2666 mountpoint = must_copy_string("/sys/fs/cgroup");
2667 subtree_path = must_make_path(mountpoint, base_cgroup,
2668 "cgroup.subtree_control", NULL);
2669 delegatable = cg_unified_get_controllers(subtree_path);
2670 if (!delegatable)
2671 delegatable = cg_unified_make_empty_controller();
2672 if (!delegatable[0])
2673 TRACE("No controllers are enabled for delegation");
2674
2675 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2676 * we should verify here. The reason I'm not doing it right is that I'm
2677 * not convinced that lxc.cgroup.use will be the future since it is a
2678 * global property. I much rather have an option that lets you request
2679 * controllers per container.
2680 */
2681
2682 new = add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2683 if (!unprivileged)
2684 cg_unified_delegate(&new->cgroup2_chown);
2685
2686 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2687 return CGROUP2_SUPER_MAGIC;
2688 }
2689
2690 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2691 {
2692 int ret;
2693 const char *tmp;
2694 bool relative = conf->cgroup_meta.relative;
2695
2696 tmp = lxc_global_config_value("lxc.cgroup.use");
2697 if (tmp) {
2698 __do_free char *pin = NULL;
2699 char *chop, *cur;
2700
2701 pin = must_copy_string(tmp);
2702 chop = pin;
2703
2704 lxc_iterate_parts(cur, chop, ",")
2705 must_append_string(&ops->cgroup_use, cur);
2706 }
2707
2708 ret = cg_unified_init(ops, relative, !lxc_list_empty(&conf->id_map));
2709 if (ret < 0)
2710 return false;
2711
2712 if (ret == CGROUP2_SUPER_MAGIC)
2713 return true;
2714
2715 return cg_hybrid_init(ops, relative, !lxc_list_empty(&conf->id_map));
2716 }
2717
2718 __cgfsng_ops static bool cgfsng_data_init(struct cgroup_ops *ops)
2719 {
2720 const char *cgroup_pattern;
2721
2722 /* copy system-wide cgroup information */
2723 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2724 if (!cgroup_pattern) {
2725 /* lxc.cgroup.pattern is only NULL on error. */
2726 ERROR("Failed to retrieve cgroup pattern");
2727 return false;
2728 }
2729 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2730 ops->monitor_pattern = MONITOR_CGROUP;
2731
2732 return true;
2733 }
2734
2735 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2736 {
2737 struct cgroup_ops *cgfsng_ops;
2738
2739 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2740 if (!cgfsng_ops)
2741 return NULL;
2742
2743 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2744 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2745
2746 if (!cg_init(cgfsng_ops, conf)) {
2747 free(cgfsng_ops);
2748 return NULL;
2749 }
2750
2751 cgfsng_ops->data_init = cgfsng_data_init;
2752 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
2753 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
2754 cgfsng_ops->monitor_create = cgfsng_monitor_create;
2755 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
2756 cgfsng_ops->payload_create = cgfsng_payload_create;
2757 cgfsng_ops->payload_enter = cgfsng_payload_enter;
2758 cgfsng_ops->escape = cgfsng_escape;
2759 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2760 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2761 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2762 cgfsng_ops->get = cgfsng_get;
2763 cgfsng_ops->set = cgfsng_set;
2764 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2765 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2766 cgfsng_ops->driver = "cgfsng";
2767 cgfsng_ops->version = "1.0.0";
2768 cgfsng_ops->attach = cgfsng_attach;
2769 cgfsng_ops->chown = cgfsng_chown;
2770 cgfsng_ops->mount = cgfsng_mount;
2771 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2772
2773 return cgfsng_ops;
2774 }