]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
cgfsng: coding style for cgfsng_monitor_create()
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comma-separated list of controllers.
35 */
36
37 #ifndef _GNU_SOURCE
38 #define _GNU_SOURCE 1
39 #endif
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <grp.h>
44 #include <linux/kdev_t.h>
45 #include <linux/types.h>
46 #include <stdint.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/types.h>
51 #include <unistd.h>
52
53 #include "caps.h"
54 #include "cgroup.h"
55 #include "cgroup_utils.h"
56 #include "commands.h"
57 #include "conf.h"
58 #include "config.h"
59 #include "log.h"
60 #include "macro.h"
61 #include "memory_utils.h"
62 #include "storage/storage.h"
63 #include "utils.h"
64
65 #ifndef HAVE_STRLCPY
66 #include "include/strlcpy.h"
67 #endif
68
69 #ifndef HAVE_STRLCAT
70 #include "include/strlcat.h"
71 #endif
72
73 lxc_log_define(cgfsng, cgroup);
74
75 static void free_string_list(char **clist)
76 {
77 int i;
78
79 if (!clist)
80 return;
81
82 for (i = 0; clist[i]; i++)
83 free(clist[i]);
84
85 free(clist);
86 }
87
88 /* Given a pointer to a null-terminated array of pointers, realloc to add one
89 * entry, and point the new entry to NULL. Do not fail. Return the index to the
90 * second-to-last entry - that is, the one which is now available for use
91 * (keeping the list null-terminated).
92 */
93 static int append_null_to_list(void ***list)
94 {
95 int newentry = 0;
96
97 if (*list)
98 for (; (*list)[newentry]; newentry++)
99 ;
100
101 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
102 (*list)[newentry + 1] = NULL;
103 return newentry;
104 }
105
106 /* Given a null-terminated array of strings, check whether @entry is one of the
107 * strings.
108 */
109 static bool string_in_list(char **list, const char *entry)
110 {
111 int i;
112
113 if (!list)
114 return false;
115
116 for (i = 0; list[i]; i++)
117 if (strcmp(list[i], entry) == 0)
118 return true;
119
120 return false;
121 }
122
123 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
124 * "name=systemd". Do not fail.
125 */
126 static char *cg_legacy_must_prefix_named(char *entry)
127 {
128 size_t len;
129 char *prefixed;
130
131 len = strlen(entry);
132 prefixed = must_realloc(NULL, len + 6);
133
134 memcpy(prefixed, "name=", STRLITERALLEN("name="));
135 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
136 prefixed[len + 5] = '\0';
137
138 return prefixed;
139 }
140
141 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
142 * we are called.
143 *
144 * We also handle named subsystems here. Any controller which is not a kernel
145 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
146 * we refuse to use because we're not sure which we have here.
147 * (TODO: We could work around this in some cases by just remounting to be
148 * unambiguous, or by comparing mountpoint contents with current cgroup.)
149 *
150 * The last entry will always be NULL.
151 */
152 static void must_append_controller(char **klist, char **nlist, char ***clist,
153 char *entry)
154 {
155 int newentry;
156 char *copy;
157
158 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
159 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
160 ERROR("It is both a named and kernel subsystem");
161 return;
162 }
163
164 newentry = append_null_to_list((void ***)clist);
165
166 if (strncmp(entry, "name=", 5) == 0)
167 copy = must_copy_string(entry);
168 else if (string_in_list(klist, entry))
169 copy = must_copy_string(entry);
170 else
171 copy = cg_legacy_must_prefix_named(entry);
172
173 (*clist)[newentry] = copy;
174 }
175
176 /* Given a handler's cgroup data, return the struct hierarchy for the controller
177 * @c, or NULL if there is none.
178 */
179 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
180 {
181 int i;
182
183 errno = ENOENT;
184
185 if (!ops->hierarchies) {
186 TRACE("There are no useable cgroup controllers");
187 return NULL;
188 }
189
190 for (i = 0; ops->hierarchies[i]; i++) {
191 if (!controller) {
192 /* This is the empty unified hierarchy. */
193 if (ops->hierarchies[i]->controllers &&
194 !ops->hierarchies[i]->controllers[0])
195 return ops->hierarchies[i];
196
197 continue;
198 }
199
200 if (string_in_list(ops->hierarchies[i]->controllers, controller))
201 return ops->hierarchies[i];
202 }
203
204 if (controller)
205 WARN("There is no useable %s controller", controller);
206 else
207 WARN("There is no empty unified cgroup hierarchy");
208
209 return NULL;
210 }
211
212 #define BATCH_SIZE 50
213 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
214 {
215 int newbatches = (newlen / BATCH_SIZE) + 1;
216 int oldbatches = (oldlen / BATCH_SIZE) + 1;
217
218 if (!*mem || newbatches > oldbatches) {
219 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
220 }
221 }
222
223 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
224 {
225 size_t full = oldlen + newlen;
226
227 batch_realloc(dest, oldlen, full + 1);
228
229 memcpy(*dest + oldlen, new, newlen + 1);
230 }
231
232 /* Slurp in a whole file */
233 static char *read_file(const char *fnam)
234 {
235 __do_free char *line = NULL;
236 __do_fclose FILE *f = NULL;
237 int linelen;
238 char *buf = NULL;
239 size_t len = 0, fulllen = 0;
240
241 f = fopen(fnam, "r");
242 if (!f)
243 return NULL;
244 while ((linelen = getline(&line, &len, f)) != -1) {
245 append_line(&buf, fulllen, line, linelen);
246 fulllen += linelen;
247 }
248 return buf;
249 }
250
251 /* Taken over modified from the kernel sources. */
252 #define NBITS 32 /* bits in uint32_t */
253 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
254 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
255
256 static void set_bit(unsigned bit, uint32_t *bitarr)
257 {
258 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
259 }
260
261 static void clear_bit(unsigned bit, uint32_t *bitarr)
262 {
263 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
264 }
265
266 static bool is_set(unsigned bit, uint32_t *bitarr)
267 {
268 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
269 }
270
271 /* Create cpumask from cpulist aka turn:
272 *
273 * 0,2-3
274 *
275 * into bit array
276 *
277 * 1 0 1 1
278 */
279 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
280 {
281 char *token;
282 size_t arrlen;
283 uint32_t *bitarr;
284
285 arrlen = BITS_TO_LONGS(nbits);
286 bitarr = calloc(arrlen, sizeof(uint32_t));
287 if (!bitarr)
288 return NULL;
289
290 lxc_iterate_parts(token, buf, ",") {
291 errno = 0;
292 unsigned end, start;
293 char *range;
294
295 start = strtoul(token, NULL, 0);
296 end = start;
297 range = strchr(token, '-');
298 if (range)
299 end = strtoul(range + 1, NULL, 0);
300
301 if (!(start <= end)) {
302 free(bitarr);
303 return NULL;
304 }
305
306 if (end >= nbits) {
307 free(bitarr);
308 return NULL;
309 }
310
311 while (start <= end)
312 set_bit(start++, bitarr);
313 }
314
315 return bitarr;
316 }
317
318 /* Turn cpumask into simple, comma-separated cpulist. */
319 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
320 {
321 int ret;
322 size_t i;
323 char **cpulist = NULL;
324 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
325
326 for (i = 0; i <= nbits; i++) {
327 if (!is_set(i, bitarr))
328 continue;
329
330 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
331 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
332 lxc_free_array((void **)cpulist, free);
333 return NULL;
334 }
335
336 ret = lxc_append_string(&cpulist, numstr);
337 if (ret < 0) {
338 lxc_free_array((void **)cpulist, free);
339 return NULL;
340 }
341 }
342
343 if (!cpulist)
344 return NULL;
345
346 return lxc_string_join(",", (const char **)cpulist, false);
347 }
348
349 static ssize_t get_max_cpus(char *cpulist)
350 {
351 char *c1, *c2;
352 char *maxcpus = cpulist;
353 size_t cpus = 0;
354
355 c1 = strrchr(maxcpus, ',');
356 if (c1)
357 c1++;
358
359 c2 = strrchr(maxcpus, '-');
360 if (c2)
361 c2++;
362
363 if (!c1 && !c2)
364 c1 = maxcpus;
365 else if (c1 > c2)
366 c2 = c1;
367 else if (c1 < c2)
368 c1 = c2;
369 else if (!c1 && c2)
370 c1 = c2;
371
372 errno = 0;
373 cpus = strtoul(c1, NULL, 0);
374 if (errno != 0)
375 return -1;
376
377 return cpus;
378 }
379
380 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
381 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
382 {
383 __do_free char *cpulist = NULL, *fpath = NULL, *isolcpus = NULL,
384 *posscpus;
385 __do_free uint32_t *isolmask = NULL, *possmask = NULL;
386 int ret;
387 ssize_t i;
388 char oldv;
389 char *lastslash, *posscpus_tmp;
390 ssize_t maxisol = 0, maxposs = 0;
391 bool bret = false, flipped_bit = false;
392
393 lastslash = strrchr(path, '/');
394 if (!lastslash) {
395 ERROR("Failed to detect \"/\" in \"%s\"", path);
396 return bret;
397 }
398 oldv = *lastslash;
399 *lastslash = '\0';
400 fpath = must_make_path(path, "cpuset.cpus", NULL);
401 posscpus = read_file(fpath);
402 if (!posscpus) {
403 SYSERROR("Failed to read file \"%s\"", fpath);
404 return false;
405 }
406
407 /* Get maximum number of cpus found in possible cpuset. */
408 maxposs = get_max_cpus(posscpus);
409 if (maxposs < 0 || maxposs >= INT_MAX - 1)
410 return false;
411
412 if (!file_exists(__ISOL_CPUS)) {
413 /* This system doesn't expose isolated cpus. */
414 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
415 /* No isolated cpus but we weren't already initialized by
416 * someone. We should simply copy the parents cpuset.cpus
417 * values.
418 */
419 if (!am_initialized) {
420 DEBUG("Copying cpu settings of parent cgroup");
421 cpulist = posscpus;
422 goto copy_parent;
423 }
424 /* No isolated cpus but we were already initialized by someone.
425 * Nothing more to do for us.
426 */
427 return true;
428 }
429
430 isolcpus = read_file(__ISOL_CPUS);
431 if (!isolcpus) {
432 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
433 return false;
434 }
435 if (!isdigit(isolcpus[0])) {
436 TRACE("No isolated cpus detected");
437 /* No isolated cpus but we weren't already initialized by
438 * someone. We should simply copy the parents cpuset.cpus
439 * values.
440 */
441 if (!am_initialized) {
442 DEBUG("Copying cpu settings of parent cgroup");
443 cpulist = posscpus;
444 goto copy_parent;
445 }
446 /* No isolated cpus but we were already initialized by someone.
447 * Nothing more to do for us.
448 */
449 return true;
450 }
451
452 /* Get maximum number of cpus found in isolated cpuset. */
453 maxisol = get_max_cpus(isolcpus);
454 if (maxisol < 0 || maxisol >= INT_MAX - 1)
455 return false;
456
457 if (maxposs < maxisol)
458 maxposs = maxisol;
459 maxposs++;
460
461 possmask = lxc_cpumask(posscpus, maxposs);
462 if (!possmask) {
463 ERROR("Failed to create cpumask for possible cpus");
464 return false;
465 }
466
467 isolmask = lxc_cpumask(isolcpus, maxposs);
468 if (!isolmask) {
469 ERROR("Failed to create cpumask for isolated cpus");
470 return false;
471 }
472
473 for (i = 0; i <= maxposs; i++) {
474 if (!is_set(i, isolmask) || !is_set(i, possmask))
475 continue;
476
477 flipped_bit = true;
478 clear_bit(i, possmask);
479 }
480
481 if (!flipped_bit) {
482 DEBUG("No isolated cpus present in cpuset");
483 return true;
484 }
485 DEBUG("Removed isolated cpus from cpuset");
486
487 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
488 if (!cpulist) {
489 ERROR("Failed to create cpu list");
490 return false;
491 }
492
493 copy_parent:
494 *lastslash = oldv;
495 fpath = must_make_path(path, "cpuset.cpus", NULL);
496 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
497 if (cpulist == posscpus)
498 cpulist = NULL;
499 if (ret < 0) {
500 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
501 return false;
502 }
503
504 return true;
505 }
506
507 /* Copy contents of parent(@path)/@file to @path/@file */
508 static bool copy_parent_file(char *path, char *file)
509 {
510 __do_free char *child_path = NULL, *parent_path = NULL, *value = NULL;
511 int ret;
512 char oldv;
513 int len = 0;
514 char *lastslash = NULL;
515
516 lastslash = strrchr(path, '/');
517 if (!lastslash) {
518 ERROR("Failed to detect \"/\" in \"%s\"", path);
519 return false;
520 }
521 oldv = *lastslash;
522 *lastslash = '\0';
523 parent_path = must_make_path(path, file, NULL);
524 len = lxc_read_from_file(parent_path, NULL, 0);
525 if (len <= 0)
526 goto on_error;
527
528 value = must_realloc(NULL, len + 1);
529 ret = lxc_read_from_file(parent_path, value, len);
530 if (ret != len)
531 goto on_error;
532
533 *lastslash = oldv;
534 child_path = must_make_path(path, file, NULL);
535 ret = lxc_write_to_file(child_path, value, len, false, 0666);
536 if (ret < 0)
537 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, child_path);
538 return ret >= 0;
539
540 on_error:
541 SYSERROR("Failed to read file \"%s\"", child_path);
542 return false;
543 }
544
545 /* Initialize the cpuset hierarchy in first directory of @gname and set
546 * cgroup.clone_children so that children inherit settings. Since the
547 * h->base_path is populated by init or ourselves, we know it is already
548 * initialized.
549 */
550 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
551 {
552 __do_free char *cgpath = NULL, *clonechildrenpath = NULL;
553 int ret;
554 char v;
555 char *slash;
556
557 if (!string_in_list(h->controllers, "cpuset"))
558 return true;
559
560 if (*cgname == '/')
561 cgname++;
562 slash = strchr(cgname, '/');
563 if (slash)
564 *slash = '\0';
565
566 cgpath = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
567 if (slash)
568 *slash = '/';
569
570 ret = mkdir(cgpath, 0755);
571 if (ret < 0) {
572 if (errno != EEXIST) {
573 SYSERROR("Failed to create directory \"%s\"", cgpath);
574 return false;
575 }
576 }
577
578 clonechildrenpath = must_make_path(cgpath, "cgroup.clone_children", NULL);
579 /* unified hierarchy doesn't have clone_children */
580 if (!file_exists(clonechildrenpath))
581 return true;
582
583 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
584 if (ret < 0) {
585 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
586 return false;
587 }
588
589 /* Make sure any isolated cpus are removed from cpuset.cpus. */
590 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
591 SYSERROR("Failed to remove isolated cpus");
592 return false;
593 }
594
595 /* Already set for us by someone else. */
596 if (v == '1') {
597 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
598 return true;
599 }
600
601 /* copy parent's settings */
602 if (!copy_parent_file(cgpath, "cpuset.mems")) {
603 SYSERROR("Failed to copy \"cpuset.mems\" settings");
604 return false;
605 }
606
607 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
608 if (ret < 0) {
609 /* Set clone_children so children inherit our settings */
610 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
611 return false;
612 }
613
614 return true;
615 }
616
617 /* Given two null-terminated lists of strings, return true if any string is in
618 * both.
619 */
620 static bool controller_lists_intersect(char **l1, char **l2)
621 {
622 int i;
623
624 if (!l1 || !l2)
625 return false;
626
627 for (i = 0; l1[i]; i++) {
628 if (string_in_list(l2, l1[i]))
629 return true;
630 }
631
632 return false;
633 }
634
635 /* For a null-terminated list of controllers @clist, return true if any of those
636 * controllers is already listed the null-terminated list of hierarchies @hlist.
637 * Realistically, if one is present, all must be present.
638 */
639 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
640 {
641 int i;
642
643 if (!hlist)
644 return false;
645
646 for (i = 0; hlist[i]; i++)
647 if (controller_lists_intersect(hlist[i]->controllers, clist))
648 return true;
649
650 return false;
651 }
652
653 /* Return true if the controller @entry is found in the null-terminated list of
654 * hierarchies @hlist.
655 */
656 static bool controller_found(struct hierarchy **hlist, char *entry)
657 {
658 int i;
659
660 if (!hlist)
661 return false;
662
663 for (i = 0; hlist[i]; i++)
664 if (string_in_list(hlist[i]->controllers, entry))
665 return true;
666
667 return false;
668 }
669
670 /* Return true if all of the controllers which we require have been found. The
671 * required list is freezer and anything in lxc.cgroup.use.
672 */
673 static bool all_controllers_found(struct cgroup_ops *ops)
674 {
675 char **cur;
676 struct hierarchy **hlist = ops->hierarchies;
677
678 if (!ops->cgroup_use)
679 return true;
680
681 for (cur = ops->cgroup_use; cur && *cur; cur++)
682 if (!controller_found(hlist, *cur)) {
683 ERROR("No %s controller mountpoint found", *cur);
684 return false;
685 }
686
687 return true;
688 }
689
690 /* Get the controllers from a mountinfo line There are other ways we could get
691 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
692 * could parse the mount options. But we simply assume that the mountpoint must
693 * be /sys/fs/cgroup/controller-list
694 */
695 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
696 int type)
697 {
698 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
699 * for legacy hierarchies.
700 */
701 int i;
702 char *p2, *tok;
703 char *p = line, *sep = ",";
704 char **aret = NULL;
705
706 for (i = 0; i < 4; i++) {
707 p = strchr(p, ' ');
708 if (!p)
709 return NULL;
710 p++;
711 }
712
713 /* Note, if we change how mountinfo works, then our caller will need to
714 * verify /sys/fs/cgroup/ in this field.
715 */
716 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
717 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
718 return NULL;
719 }
720
721 p += 15;
722 p2 = strchr(p, ' ');
723 if (!p2) {
724 ERROR("Corrupt mountinfo");
725 return NULL;
726 }
727 *p2 = '\0';
728
729 if (type == CGROUP_SUPER_MAGIC) {
730 __do_free char *dup;
731
732 /* strdup() here for v1 hierarchies. Otherwise
733 * lxc_iterate_parts() will destroy mountpoints such as
734 * "/sys/fs/cgroup/cpu,cpuacct".
735 */
736 dup = must_copy_string(p);
737 if (!dup)
738 return NULL;
739
740 lxc_iterate_parts (tok, dup, sep)
741 must_append_controller(klist, nlist, &aret, tok);
742 }
743 *p2 = ' ';
744
745 return aret;
746 }
747
748 static char **cg_unified_make_empty_controller(void)
749 {
750 int newentry;
751 char **aret = NULL;
752
753 newentry = append_null_to_list((void ***)&aret);
754 aret[newentry] = NULL;
755 return aret;
756 }
757
758 static char **cg_unified_get_controllers(const char *file)
759 {
760 __do_free char *buf = NULL;
761 char *tok;
762 char *sep = " \t\n";
763 char **aret = NULL;
764
765 buf = read_file(file);
766 if (!buf)
767 return NULL;
768
769 lxc_iterate_parts(tok, buf, sep) {
770 int newentry;
771 char *copy;
772
773 newentry = append_null_to_list((void ***)&aret);
774 copy = must_copy_string(tok);
775 aret[newentry] = copy;
776 }
777
778 return aret;
779 }
780
781 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
782 char *container_base_path, int type)
783 {
784 struct hierarchy *new;
785 int newentry;
786
787 new = must_realloc(NULL, sizeof(*new));
788 new->controllers = clist;
789 new->mountpoint = mountpoint;
790 new->container_base_path = container_base_path;
791 new->container_full_path = NULL;
792 new->monitor_full_path = NULL;
793 new->version = type;
794 new->cgroup2_chown = NULL;
795
796 newentry = append_null_to_list((void ***)h);
797 (*h)[newentry] = new;
798 return new;
799 }
800
801 /* Get a copy of the mountpoint from @line, which is a line from
802 * /proc/self/mountinfo.
803 */
804 static char *cg_hybrid_get_mountpoint(char *line)
805 {
806 int i;
807 size_t len;
808 char *p2;
809 char *p = line, *sret = NULL;
810
811 for (i = 0; i < 4; i++) {
812 p = strchr(p, ' ');
813 if (!p)
814 return NULL;
815 p++;
816 }
817
818 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
819 return NULL;
820
821 p2 = strchr(p + 15, ' ');
822 if (!p2)
823 return NULL;
824 *p2 = '\0';
825
826 len = strlen(p);
827 sret = must_realloc(NULL, len + 1);
828 memcpy(sret, p, len);
829 sret[len] = '\0';
830 return sret;
831 }
832
833 /* Given a multi-line string, return a null-terminated copy of the current line. */
834 static char *copy_to_eol(char *p)
835 {
836 char *p2 = strchr(p, '\n'), *sret;
837 size_t len;
838
839 if (!p2)
840 return NULL;
841
842 len = p2 - p;
843 sret = must_realloc(NULL, len + 1);
844 memcpy(sret, p, len);
845 sret[len] = '\0';
846 return sret;
847 }
848
849 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
850 * /proc/self/cgroup file. Check whether controller c is present.
851 */
852 static bool controller_in_clist(char *cgline, char *c)
853 {
854 __do_free char *tmp = NULL;
855 char *tok, *eol;
856 size_t len;
857
858 eol = strchr(cgline, ':');
859 if (!eol)
860 return false;
861
862 len = eol - cgline;
863 tmp = must_realloc(NULL, len + 1);
864 memcpy(tmp, cgline, len);
865 tmp[len] = '\0';
866
867 lxc_iterate_parts(tok, tmp, ",")
868 if (strcmp(tok, c) == 0)
869 return true;
870
871 return false;
872 }
873
874 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
875 * @controller.
876 */
877 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
878 int type)
879 {
880 char *p = basecginfo;
881
882 for (;;) {
883 bool is_cgv2_base_cgroup = false;
884
885 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
886 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
887 is_cgv2_base_cgroup = true;
888
889 p = strchr(p, ':');
890 if (!p)
891 return NULL;
892 p++;
893
894 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
895 p = strchr(p, ':');
896 if (!p)
897 return NULL;
898 p++;
899 return copy_to_eol(p);
900 }
901
902 p = strchr(p, '\n');
903 if (!p)
904 return NULL;
905 p++;
906 }
907 }
908
909 static void must_append_string(char ***list, char *entry)
910 {
911 int newentry;
912 char *copy;
913
914 newentry = append_null_to_list((void ***)list);
915 copy = must_copy_string(entry);
916 (*list)[newentry] = copy;
917 }
918
919 static int get_existing_subsystems(char ***klist, char ***nlist)
920 {
921 __do_free char *line = NULL;
922 __do_fclose FILE *f = NULL;
923 size_t len = 0;
924
925 f = fopen("/proc/self/cgroup", "r");
926 if (!f)
927 return -1;
928
929 while (getline(&line, &len, f) != -1) {
930 char *p, *p2, *tok;
931 p = strchr(line, ':');
932 if (!p)
933 continue;
934 p++;
935 p2 = strchr(p, ':');
936 if (!p2)
937 continue;
938 *p2 = '\0';
939
940 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
941 * contains an entry of the form:
942 *
943 * 0::/some/path
944 *
945 * In this case we use "cgroup2" as controller name.
946 */
947 if ((p2 - p) == 0) {
948 must_append_string(klist, "cgroup2");
949 continue;
950 }
951
952 lxc_iterate_parts(tok, p, ",") {
953 if (strncmp(tok, "name=", 5) == 0)
954 must_append_string(nlist, tok);
955 else
956 must_append_string(klist, tok);
957 }
958 }
959
960 return 0;
961 }
962
963 static void trim(char *s)
964 {
965 size_t len;
966
967 len = strlen(s);
968 while ((len > 1) && (s[len - 1] == '\n'))
969 s[--len] = '\0';
970 }
971
972 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
973 {
974 int i;
975 struct hierarchy **it;
976
977 if (!ops->hierarchies) {
978 TRACE(" No hierarchies found");
979 return;
980 }
981
982 TRACE(" Hierarchies:");
983 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
984 int j;
985 char **cit;
986
987 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
988 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
989 TRACE(" controllers:");
990 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
991 TRACE(" %d: %s", j, *cit);
992 }
993 }
994
995 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
996 char **nlist)
997 {
998 int k;
999 char **it;
1000
1001 TRACE("basecginfo is:");
1002 TRACE("%s", basecginfo);
1003
1004 for (k = 0, it = klist; it && *it; it++, k++)
1005 TRACE("kernel subsystem %d: %s", k, *it);
1006
1007 for (k = 0, it = nlist; it && *it; it++, k++)
1008 TRACE("named subsystem %d: %s", k, *it);
1009 }
1010
1011 static int cgroup_rmdir(struct hierarchy **hierarchies,
1012 const char *container_cgroup)
1013 {
1014 int i;
1015
1016 if (!container_cgroup || !hierarchies)
1017 return 0;
1018
1019 for (i = 0; hierarchies[i]; i++) {
1020 int ret;
1021 struct hierarchy *h = hierarchies[i];
1022
1023 if (!h->container_full_path)
1024 continue;
1025
1026 ret = recursive_destroy(h->container_full_path);
1027 if (ret < 0)
1028 WARN("Failed to destroy \"%s\"", h->container_full_path);
1029
1030 free(h->container_full_path);
1031 h->container_full_path = NULL;
1032 }
1033
1034 return 0;
1035 }
1036
1037 struct generic_userns_exec_data {
1038 struct hierarchy **hierarchies;
1039 const char *container_cgroup;
1040 struct lxc_conf *conf;
1041 uid_t origuid; /* target uid in parent namespace */
1042 char *path;
1043 };
1044
1045 static int cgroup_rmdir_wrapper(void *data)
1046 {
1047 int ret;
1048 struct generic_userns_exec_data *arg = data;
1049 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1050 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1051
1052 ret = setresgid(nsgid, nsgid, nsgid);
1053 if (ret < 0) {
1054 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1055 (int)nsgid, (int)nsgid);
1056 return -1;
1057 }
1058
1059 ret = setresuid(nsuid, nsuid, nsuid);
1060 if (ret < 0) {
1061 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1062 (int)nsuid, (int)nsuid);
1063 return -1;
1064 }
1065
1066 ret = setgroups(0, NULL);
1067 if (ret < 0 && errno != EPERM) {
1068 SYSERROR("Failed to setgroups(0, NULL)");
1069 return -1;
1070 }
1071
1072 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1073 }
1074
1075 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1076 struct lxc_handler *handler)
1077 {
1078 int ret;
1079 struct generic_userns_exec_data wrap;
1080
1081 if (!ops->hierarchies)
1082 return;
1083
1084 wrap.origuid = 0;
1085 wrap.container_cgroup = ops->container_cgroup;
1086 wrap.hierarchies = ops->hierarchies;
1087 wrap.conf = handler->conf;
1088
1089 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1090 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1091 "cgroup_rmdir_wrapper");
1092 else
1093 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1094 if (ret < 0) {
1095 WARN("Failed to destroy cgroups");
1096 return;
1097 }
1098 }
1099
1100 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1101 struct lxc_handler *handler)
1102 {
1103 int len;
1104 struct lxc_conf *conf = handler->conf;
1105 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1106
1107 if (!ops->hierarchies)
1108 return;
1109
1110 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1111 if (len < 0 || (size_t)len >= sizeof(pidstr))
1112 return;
1113
1114 for (int i = 0; ops->hierarchies[i]; i++) {
1115 __do_free char *pivot_path = NULL;
1116 int ret;
1117 char *chop;
1118 char pivot_cgroup[] = PIVOT_CGROUP;
1119 struct hierarchy *h = ops->hierarchies[i];
1120
1121 if (!h->monitor_full_path)
1122 continue;
1123
1124 if (conf && conf->cgroup_meta.dir)
1125 pivot_path = must_make_path(h->mountpoint,
1126 h->container_base_path,
1127 conf->cgroup_meta.dir,
1128 PIVOT_CGROUP,
1129 "cgroup.procs", NULL);
1130 else
1131 pivot_path = must_make_path(h->mountpoint,
1132 h->container_base_path,
1133 PIVOT_CGROUP,
1134 "cgroup.procs", NULL);
1135
1136 chop = strrchr(pivot_path, '/');
1137 if (chop)
1138 *chop = '\0';
1139
1140 /*
1141 * Make sure not to pass in the ro string literal PIVOT_CGROUP
1142 * here.
1143 */
1144 if (!cg_legacy_handle_cpuset_hierarchy(h, pivot_cgroup)) {
1145 WARN("Failed to handle legacy cpuset controller");
1146 continue;
1147 }
1148
1149 ret = mkdir_p(pivot_path, 0755);
1150 if (ret < 0 && errno != EEXIST) {
1151 SYSWARN("Failed to create cgroup \"%s\"\n", pivot_path);
1152 continue;
1153 }
1154
1155 if (chop)
1156 *chop = '/';
1157
1158 /* Move ourselves into the pivot cgroup to delete our own
1159 * cgroup.
1160 */
1161 ret = lxc_write_to_file(pivot_path, pidstr, len, false, 0666);
1162 if (ret != 0) {
1163 SYSWARN("Failed to move monitor %s to \"%s\"\n", pidstr, pivot_path);
1164 continue;
1165 }
1166
1167 ret = recursive_destroy(h->monitor_full_path);
1168 if (ret < 0)
1169 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1170 }
1171 }
1172
1173 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1174 {
1175 __do_free char *add_controllers = NULL, *cgroup = NULL;
1176 size_t i, parts_len;
1177 char **it;
1178 size_t full_len = 0;
1179 char **parts = NULL;
1180 bool bret = false;
1181
1182 if (h->version != CGROUP2_SUPER_MAGIC)
1183 return true;
1184
1185 if (!h->controllers)
1186 return true;
1187
1188 /* For now we simply enable all controllers that we have detected by
1189 * creating a string like "+memory +pids +cpu +io".
1190 * TODO: In the near future we might want to support "-<controller>"
1191 * etc. but whether supporting semantics like this make sense will need
1192 * some thinking.
1193 */
1194 for (it = h->controllers; it && *it; it++) {
1195 full_len += strlen(*it) + 2;
1196 add_controllers = must_realloc(add_controllers, full_len + 1);
1197
1198 if (h->controllers[0] == *it)
1199 add_controllers[0] = '\0';
1200
1201 (void)strlcat(add_controllers, "+", full_len + 1);
1202 (void)strlcat(add_controllers, *it, full_len + 1);
1203
1204 if ((it + 1) && *(it + 1))
1205 (void)strlcat(add_controllers, " ", full_len + 1);
1206 }
1207
1208 parts = lxc_string_split(cgname, '/');
1209 if (!parts)
1210 goto on_error;
1211
1212 parts_len = lxc_array_len((void **)parts);
1213 if (parts_len > 0)
1214 parts_len--;
1215
1216 cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
1217 for (i = 0; i < parts_len; i++) {
1218 int ret;
1219 __do_free char *target;
1220
1221 cgroup = must_append_path(cgroup, parts[i], NULL);
1222 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1223 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1224 if (ret < 0) {
1225 SYSERROR("Could not enable \"%s\" controllers in the "
1226 "unified cgroup \"%s\"", add_controllers, cgroup);
1227 goto on_error;
1228 }
1229 }
1230
1231 bret = true;
1232
1233 on_error:
1234 lxc_free_array((void **)parts, free);
1235 return bret;
1236 }
1237
1238 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1239 {
1240 const char *tmp = dir;
1241 const char *orig = dir;
1242 size_t orig_len;
1243
1244 orig_len = strlen(dir);
1245 do {
1246 __do_free char *makeme;
1247 int ret;
1248 size_t cur_len;
1249
1250 dir = tmp + strspn(tmp, "/");
1251 tmp = dir + strcspn(dir, "/");
1252
1253 errno = ENOMEM;
1254 cur_len = dir - orig;
1255 makeme = strndup(orig, cur_len);
1256 if (!makeme)
1257 return -1;
1258
1259 ret = mkdir(makeme, mode);
1260 if (ret < 0) {
1261 if ((errno != EEXIST) || (orig_len == cur_len)) {
1262 SYSERROR("Failed to create directory \"%s\"", makeme);
1263 return -1;
1264 }
1265 }
1266 } while (tmp != dir);
1267
1268 return 0;
1269 }
1270
1271 static bool monitor_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1272 {
1273 int ret;
1274
1275 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1276 ERROR("Failed to handle legacy cpuset controller");
1277 return false;
1278 }
1279
1280 h->monitor_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1281 ret = mkdir_eexist_on_last(h->monitor_full_path, 0755);
1282 if (ret < 0) {
1283 ERROR("Failed to create cgroup \"%s\"", h->monitor_full_path);
1284 return false;
1285 }
1286
1287 return cg_unified_create_cgroup(h, cgname);
1288 }
1289
1290 static bool container_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1291 {
1292 int ret;
1293
1294 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1295 ERROR("Failed to handle legacy cpuset controller");
1296 return false;
1297 }
1298
1299 h->container_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1300 ret = mkdir_eexist_on_last(h->container_full_path, 0755);
1301 if (ret < 0) {
1302 ERROR("Failed to create cgroup \"%s\"", h->container_full_path);
1303 return false;
1304 }
1305
1306 return cg_unified_create_cgroup(h, cgname);
1307 }
1308
1309 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname, bool monitor)
1310 {
1311 int ret;
1312 char *full_path;
1313
1314 if (monitor)
1315 full_path = h->monitor_full_path;
1316 else
1317 full_path = h->container_full_path;
1318
1319 ret = rmdir(full_path);
1320 if (ret < 0)
1321 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", full_path);
1322
1323 free(full_path);
1324
1325 if (monitor)
1326 h->monitor_full_path = NULL;
1327 else
1328 h->container_full_path = NULL;
1329 }
1330
1331 __cgfsng_ops static inline bool cgfsng_monitor_create(struct cgroup_ops *ops,
1332 struct lxc_handler *handler)
1333 {
1334 __do_free char *monitor_cgroup = NULL;
1335 char *offset, *tmp;
1336 int i, idx = 0;
1337 size_t len;
1338 struct lxc_conf *conf = handler->conf;
1339
1340 if (!conf)
1341 return false;
1342
1343 if (!ops->hierarchies)
1344 return true;
1345
1346 if (conf->cgroup_meta.dir)
1347 tmp = lxc_string_join("/",
1348 (const char *[]){conf->cgroup_meta.dir,
1349 ops->monitor_pattern,
1350 handler->name, NULL},
1351 false);
1352 else
1353 tmp = must_make_path(ops->monitor_pattern, handler->name, NULL);
1354 if (!tmp)
1355 return false;
1356
1357 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1358 monitor_cgroup = must_realloc(tmp, len);
1359 offset = monitor_cgroup + len - 5;
1360 *offset = 0;
1361
1362 do {
1363 if (idx) {
1364 int ret = snprintf(offset, 5, "-%d", idx);
1365 if (ret < 0 || (size_t)ret >= 5)
1366 return false;
1367 }
1368
1369 for (i = 0; ops->hierarchies[i]; i++) {
1370 if (!monitor_create_path_for_hierarchy(ops->hierarchies[i],
1371 monitor_cgroup)) {
1372 ERROR("Failed to create cgroup \"%s\"",
1373 ops->hierarchies[i]->monitor_full_path);
1374 for (int j = 0; j < i; j++)
1375 remove_path_for_hierarchy(ops->hierarchies[j],
1376 monitor_cgroup,
1377 true);
1378
1379 idx++;
1380 break;
1381 }
1382 }
1383 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1384
1385 if (idx == 1000)
1386 return false;
1387
1388 INFO("The monitor process uses \"%s\" as cgroup", monitor_cgroup);
1389 return true;
1390 }
1391
1392 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1393 * next cgroup_pattern-1, -2, ..., -999.
1394 */
1395 __cgfsng_ops static inline bool cgfsng_payload_create(struct cgroup_ops *ops,
1396 struct lxc_handler *handler)
1397 {
1398 __do_free char *container_cgroup = NULL, *tmp = NULL;
1399 int i;
1400 size_t len;
1401 char *offset;
1402 int idx = 0;
1403 struct lxc_conf *conf = handler->conf;
1404
1405 if (ops->container_cgroup)
1406 return false;
1407
1408 if (!conf)
1409 return false;
1410
1411 if (!ops->hierarchies)
1412 return true;
1413
1414 if (conf->cgroup_meta.dir)
1415 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1416 else
1417 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1418 if (!tmp) {
1419 ERROR("Failed expanding cgroup name pattern");
1420 return false;
1421 }
1422
1423 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1424 container_cgroup = must_realloc(NULL, len);
1425 (void)strlcpy(container_cgroup, tmp, len);
1426 offset = container_cgroup + len - 5;
1427
1428 do {
1429 int ret = snprintf(offset, 5, "-%d", idx);
1430 if (ret < 0 || (size_t)ret >= 5)
1431 return false;
1432
1433 for (i = 0; ops->hierarchies[i]; i++) {
1434 if (!container_create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1435 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path);
1436 for (int j = 0; j < i; j++)
1437 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup, false);
1438 idx++;
1439 break;
1440 }
1441 }
1442
1443 ops->container_cgroup = container_cgroup;
1444 container_cgroup = NULL;
1445 INFO("The container uses \"%s\" as cgroup", ops->container_cgroup);
1446 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1447
1448 if (idx == 1000)
1449 return false;
1450
1451 INFO("The container process uses \"%s\" as cgroup", ops->container_cgroup);
1452 return true;
1453 }
1454
1455 __cgfsng_ops static bool __do_cgroup_enter(struct cgroup_ops *ops, pid_t pid,
1456 bool monitor)
1457 {
1458 int len;
1459 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1460
1461 if (!ops->hierarchies)
1462 return true;
1463
1464 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
1465 if (len < 0 || (size_t)len >= sizeof(pidstr))
1466 return false;
1467
1468 for (int i = 0; ops->hierarchies[i]; i++) {
1469 int ret;
1470 __do_free char *path;
1471
1472 if (monitor)
1473 path = must_make_path(ops->hierarchies[i]->monitor_full_path,
1474 "cgroup.procs", NULL);
1475 else
1476 path = must_make_path(ops->hierarchies[i]->container_full_path,
1477 "cgroup.procs", NULL);
1478 ret = lxc_write_to_file(path, pidstr, len, false, 0666);
1479 if (ret != 0) {
1480 SYSERROR("Failed to enter cgroup \"%s\"", path);
1481 return false;
1482 }
1483 }
1484
1485 return true;
1486 }
1487
1488 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops, pid_t pid)
1489 {
1490 return __do_cgroup_enter(ops, pid, true);
1491 }
1492
1493 static bool cgfsng_payload_enter(struct cgroup_ops *ops, pid_t pid)
1494 {
1495 return __do_cgroup_enter(ops, pid, false);
1496 }
1497
1498 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1499 mode_t chmod_mode)
1500 {
1501 int ret;
1502
1503 ret = chown(path, chown_uid, chown_gid);
1504 if (ret < 0) {
1505 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1506 return -1;
1507 }
1508
1509 ret = chmod(path, chmod_mode);
1510 if (ret < 0) {
1511 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1512 return -1;
1513 }
1514
1515 return 0;
1516 }
1517
1518 /* chgrp the container cgroups to container group. We leave
1519 * the container owner as cgroup owner. So we must make the
1520 * directories 775 so that the container can create sub-cgroups.
1521 *
1522 * Also chown the tasks and cgroup.procs files. Those may not
1523 * exist depending on kernel version.
1524 */
1525 static int chown_cgroup_wrapper(void *data)
1526 {
1527 int i, ret;
1528 uid_t destuid;
1529 struct generic_userns_exec_data *arg = data;
1530 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1531 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1532
1533 ret = setresgid(nsgid, nsgid, nsgid);
1534 if (ret < 0) {
1535 SYSERROR("Failed to setresgid(%d, %d, %d)",
1536 (int)nsgid, (int)nsgid, (int)nsgid);
1537 return -1;
1538 }
1539
1540 ret = setresuid(nsuid, nsuid, nsuid);
1541 if (ret < 0) {
1542 SYSERROR("Failed to setresuid(%d, %d, %d)",
1543 (int)nsuid, (int)nsuid, (int)nsuid);
1544 return -1;
1545 }
1546
1547 ret = setgroups(0, NULL);
1548 if (ret < 0 && errno != EPERM) {
1549 SYSERROR("Failed to setgroups(0, NULL)");
1550 return -1;
1551 }
1552
1553 destuid = get_ns_uid(arg->origuid);
1554 if (destuid == LXC_INVALID_UID)
1555 destuid = 0;
1556
1557 for (i = 0; arg->hierarchies[i]; i++) {
1558 __do_free char *fullpath = NULL;
1559 char *path = arg->hierarchies[i]->container_full_path;
1560
1561 ret = chowmod(path, destuid, nsgid, 0775);
1562 if (ret < 0)
1563 return -1;
1564
1565 /* Failures to chown() these are inconvenient but not
1566 * detrimental We leave these owned by the container launcher,
1567 * so that container root can write to the files to attach. We
1568 * chmod() them 664 so that container systemd can write to the
1569 * files (which systemd in wily insists on doing).
1570 */
1571
1572 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1573 fullpath = must_make_path(path, "tasks", NULL);
1574 (void)chowmod(fullpath, destuid, nsgid, 0664);
1575 }
1576
1577 fullpath = must_make_path(path, "cgroup.procs", NULL);
1578 (void)chowmod(fullpath, destuid, nsgid, 0664);
1579
1580 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1581 continue;
1582
1583 for (char **p = arg->hierarchies[i]->cgroup2_chown; p && *p; p++) {
1584 fullpath = must_make_path(path, *p, NULL);
1585 (void)chowmod(fullpath, destuid, nsgid, 0664);
1586 }
1587 }
1588
1589 return 0;
1590 }
1591
1592 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1593 struct lxc_conf *conf)
1594 {
1595 struct generic_userns_exec_data wrap;
1596
1597 if (lxc_list_empty(&conf->id_map))
1598 return true;
1599
1600 if (!ops->hierarchies)
1601 return true;
1602
1603 wrap.origuid = geteuid();
1604 wrap.path = NULL;
1605 wrap.hierarchies = ops->hierarchies;
1606 wrap.conf = conf;
1607
1608 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1609 "chown_cgroup_wrapper") < 0) {
1610 ERROR("Error requesting cgroup chown in new user namespace");
1611 return false;
1612 }
1613
1614 return true;
1615 }
1616
1617 /* cgroup-full:* is done, no need to create subdirs */
1618 static bool cg_mount_needs_subdirs(int type)
1619 {
1620 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1621 return false;
1622
1623 return true;
1624 }
1625
1626 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1627 * remount controller ro if needed and bindmount the cgroupfs onto
1628 * control/the/cg/path.
1629 */
1630 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1631 char *controllerpath, char *cgpath,
1632 const char *container_cgroup)
1633 {
1634 __do_free char *sourcepath = NULL;
1635 int ret, remount_flags;
1636 int flags = MS_BIND;
1637
1638 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1639 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1640 if (ret < 0) {
1641 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1642 controllerpath, controllerpath);
1643 return -1;
1644 }
1645
1646 remount_flags = add_required_remount_flags(controllerpath,
1647 controllerpath,
1648 flags | MS_REMOUNT);
1649 ret = mount(controllerpath, controllerpath, "cgroup",
1650 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1651 NULL);
1652 if (ret < 0) {
1653 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1654 return -1;
1655 }
1656
1657 INFO("Remounted %s read-only", controllerpath);
1658 }
1659
1660 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1661 container_cgroup, NULL);
1662 if (type == LXC_AUTO_CGROUP_RO)
1663 flags |= MS_RDONLY;
1664
1665 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1666 if (ret < 0) {
1667 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1668 return -1;
1669 }
1670 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1671
1672 if (flags & MS_RDONLY) {
1673 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1674 flags | MS_REMOUNT);
1675 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1676 if (ret < 0) {
1677 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1678 return -1;
1679 }
1680 INFO("Remounted %s read-only", cgpath);
1681 }
1682
1683 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1684 return 0;
1685 }
1686
1687 /* __cg_mount_direct
1688 *
1689 * Mount cgroup hierarchies directly without using bind-mounts. The main
1690 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1691 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1692 */
1693 static int __cg_mount_direct(int type, struct hierarchy *h,
1694 const char *controllerpath)
1695 {
1696 int ret;
1697 __do_free char *controllers = NULL;
1698 char *fstype = "cgroup2";
1699 unsigned long flags = 0;
1700
1701 flags |= MS_NOSUID;
1702 flags |= MS_NOEXEC;
1703 flags |= MS_NODEV;
1704 flags |= MS_RELATIME;
1705
1706 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1707 flags |= MS_RDONLY;
1708
1709 if (h->version != CGROUP2_SUPER_MAGIC) {
1710 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1711 if (!controllers)
1712 return -ENOMEM;
1713 fstype = "cgroup";
1714 }
1715
1716 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1717 if (ret < 0) {
1718 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1719 return -1;
1720 }
1721
1722 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1723 return 0;
1724 }
1725
1726 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1727 const char *controllerpath)
1728 {
1729 return __cg_mount_direct(type, h, controllerpath);
1730 }
1731
1732 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1733 const char *controllerpath)
1734 {
1735 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1736 return 0;
1737
1738 return __cg_mount_direct(type, h, controllerpath);
1739 }
1740
1741 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1742 struct lxc_handler *handler,
1743 const char *root, int type)
1744 {
1745 __do_free char *tmpfspath = NULL;
1746 int i, ret;
1747 bool has_cgns = false, retval = false, wants_force_mount = false;
1748
1749 if (!ops->hierarchies)
1750 return true;
1751
1752 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1753 return true;
1754
1755 if (type & LXC_AUTO_CGROUP_FORCE) {
1756 type &= ~LXC_AUTO_CGROUP_FORCE;
1757 wants_force_mount = true;
1758 }
1759
1760 if (!wants_force_mount){
1761 if (!lxc_list_empty(&handler->conf->keepcaps))
1762 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1763 else
1764 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1765 }
1766
1767 has_cgns = cgns_supported();
1768 if (has_cgns && !wants_force_mount)
1769 return true;
1770
1771 if (type == LXC_AUTO_CGROUP_NOSPEC)
1772 type = LXC_AUTO_CGROUP_MIXED;
1773 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1774 type = LXC_AUTO_CGROUP_FULL_MIXED;
1775
1776 /* Mount tmpfs */
1777 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1778 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1779 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1780 "size=10240k,mode=755", root);
1781 if (ret < 0)
1782 goto on_error;
1783
1784 for (i = 0; ops->hierarchies[i]; i++) {
1785 __do_free char *controllerpath = NULL, *path2 = NULL;
1786 struct hierarchy *h = ops->hierarchies[i];
1787 char *controller = strrchr(h->mountpoint, '/');
1788
1789 if (!controller)
1790 continue;
1791 controller++;
1792
1793 controllerpath = must_make_path(tmpfspath, controller, NULL);
1794 if (dir_exists(controllerpath))
1795 continue;
1796
1797 ret = mkdir(controllerpath, 0755);
1798 if (ret < 0) {
1799 SYSERROR("Error creating cgroup path: %s", controllerpath);
1800 goto on_error;
1801 }
1802
1803 if (has_cgns && wants_force_mount) {
1804 /* If cgroup namespaces are supported but the container
1805 * will not have CAP_SYS_ADMIN after it has started we
1806 * need to mount the cgroups manually.
1807 */
1808 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1809 if (ret < 0)
1810 goto on_error;
1811
1812 continue;
1813 }
1814
1815 ret = cg_mount_cgroup_full(type, h, controllerpath);
1816 if (ret < 0)
1817 goto on_error;
1818
1819 if (!cg_mount_needs_subdirs(type))
1820 continue;
1821
1822 path2 = must_make_path(controllerpath, h->container_base_path,
1823 ops->container_cgroup, NULL);
1824 ret = mkdir_p(path2, 0755);
1825 if (ret < 0)
1826 goto on_error;
1827
1828 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1829 path2, ops->container_cgroup);
1830 if (ret < 0)
1831 goto on_error;
1832 }
1833 retval = true;
1834
1835 on_error:
1836 return retval;
1837 }
1838
1839 static int recursive_count_nrtasks(char *dirname)
1840 {
1841 __do_free char *path = NULL;
1842 __do_closedir DIR *dir;
1843 struct dirent *direntp;
1844 int count = 0, ret;
1845
1846 dir = opendir(dirname);
1847 if (!dir)
1848 return 0;
1849
1850 while ((direntp = readdir(dir))) {
1851 struct stat mystat;
1852
1853 if (!strcmp(direntp->d_name, ".") ||
1854 !strcmp(direntp->d_name, ".."))
1855 continue;
1856
1857 path = must_make_path(dirname, direntp->d_name, NULL);
1858
1859 if (lstat(path, &mystat))
1860 continue;
1861
1862 if (!S_ISDIR(mystat.st_mode))
1863 continue;
1864
1865 count += recursive_count_nrtasks(path);
1866 }
1867
1868 path = must_make_path(dirname, "cgroup.procs", NULL);
1869 ret = lxc_count_file_lines(path);
1870 if (ret != -1)
1871 count += ret;
1872
1873 return count;
1874 }
1875
1876 __cgfsng_ops static int cgfsng_nrtasks(struct cgroup_ops *ops)
1877 {
1878 __do_free char *path = NULL;
1879 int count;
1880
1881 if (!ops->container_cgroup || !ops->hierarchies)
1882 return -1;
1883
1884 path = must_make_path(ops->hierarchies[0]->container_full_path, NULL);
1885 count = recursive_count_nrtasks(path);
1886 return count;
1887 }
1888
1889 /* Only root needs to escape to the cgroup of its init. */
1890 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1891 struct lxc_conf *conf)
1892 {
1893 int i;
1894
1895 if (conf->cgroup_meta.relative || geteuid() || !ops->hierarchies)
1896 return true;
1897
1898 for (i = 0; ops->hierarchies[i]; i++) {
1899 int ret;
1900 __do_free char *fullpath;
1901
1902 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1903 ops->hierarchies[i]->container_base_path,
1904 "cgroup.procs", NULL);
1905 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1906 if (ret != 0) {
1907 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1908 return false;
1909 }
1910 }
1911
1912 return true;
1913 }
1914
1915 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1916 {
1917 int i = 0;
1918
1919 if (!ops->hierarchies)
1920 return 0;
1921
1922 for (; ops->hierarchies[i]; i++)
1923 ;
1924
1925 return i;
1926 }
1927
1928 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
1929 {
1930 int i;
1931
1932 if (!ops->hierarchies)
1933 return false;
1934
1935 /* sanity check n */
1936 for (i = 0; i < n; i++)
1937 if (!ops->hierarchies[i])
1938 return false;
1939
1940 *out = ops->hierarchies[i]->controllers;
1941
1942 return true;
1943 }
1944
1945 #define THAWED "THAWED"
1946 #define THAWED_LEN (strlen(THAWED))
1947
1948 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
1949 * to be adapted.
1950 */
1951 __cgfsng_ops static bool cgfsng_unfreeze(struct cgroup_ops *ops)
1952 {
1953 int ret;
1954 __do_free char *fullpath = NULL;
1955 struct hierarchy *h;
1956
1957 h = get_hierarchy(ops, "freezer");
1958 if (!h)
1959 return false;
1960
1961 fullpath = must_make_path(h->container_full_path, "freezer.state", NULL);
1962 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
1963 if (ret < 0)
1964 return false;
1965
1966 return true;
1967 }
1968
1969 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
1970 const char *controller)
1971 {
1972 struct hierarchy *h;
1973
1974 h = get_hierarchy(ops, controller);
1975 if (!h) {
1976 WARN("Failed to find hierarchy for controller \"%s\"",
1977 controller ? controller : "(null)");
1978 return NULL;
1979 }
1980
1981 return h->container_full_path ? h->container_full_path + strlen(h->mountpoint) : NULL;
1982 }
1983
1984 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
1985 * which must be freed by the caller.
1986 */
1987 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
1988 const char *inpath,
1989 const char *filename)
1990 {
1991 return must_make_path(h->mountpoint, inpath, filename, NULL);
1992 }
1993
1994 /* Technically, we're always at a delegation boundary here (This is especially
1995 * true when cgroup namespaces are available.). The reasoning is that in order
1996 * for us to have been able to start a container in the first place the root
1997 * cgroup must have been a leaf node. Now, either the container's init system
1998 * has populated the cgroup and kept it as a leaf node or it has created
1999 * subtrees. In the former case we will simply attach to the leaf node we
2000 * created when we started the container in the latter case we create our own
2001 * cgroup for the attaching process.
2002 */
2003 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
2004 const char *lxcpath, const char *pidstr,
2005 size_t pidstr_len, const char *controller)
2006 {
2007 __do_free char *base_path = NULL, *container_cgroup = NULL,
2008 *full_path = NULL;
2009 int ret;
2010 size_t len;
2011 int fret = -1, idx = 0;
2012
2013 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2014 /* not running */
2015 if (!container_cgroup)
2016 return 0;
2017
2018 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
2019 full_path = must_make_path(base_path, "cgroup.procs", NULL);
2020 /* cgroup is populated */
2021 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
2022 if (ret < 0 && errno != EBUSY)
2023 goto on_error;
2024
2025 if (ret == 0)
2026 goto on_success;
2027
2028 len = strlen(base_path) + STRLITERALLEN("/lxc-1000") +
2029 STRLITERALLEN("/cgroup-procs");
2030 full_path = must_realloc(NULL, len + 1);
2031 do {
2032 if (idx)
2033 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
2034 base_path, idx);
2035 else
2036 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
2037 if (ret < 0 || (size_t)ret >= len + 1)
2038 goto on_error;
2039
2040 ret = mkdir_p(full_path, 0755);
2041 if (ret < 0 && errno != EEXIST)
2042 goto on_error;
2043
2044 (void)strlcat(full_path, "/cgroup.procs", len + 1);
2045 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
2046 if (ret == 0)
2047 goto on_success;
2048
2049 /* this is a non-leaf node */
2050 if (errno != EBUSY)
2051 goto on_error;
2052
2053 idx++;
2054 } while (idx < 1000);
2055
2056 on_success:
2057 if (idx < 1000)
2058 fret = 0;
2059
2060 on_error:
2061 return fret;
2062 }
2063
2064 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
2065 const char *lxcpath, pid_t pid)
2066 {
2067 int i, len, ret;
2068 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2069
2070 if (!ops->hierarchies)
2071 return true;
2072
2073 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2074 if (len < 0 || (size_t)len >= sizeof(pidstr))
2075 return false;
2076
2077 for (i = 0; ops->hierarchies[i]; i++) {
2078 __do_free char *path = NULL;
2079 char *fullpath = NULL;
2080 struct hierarchy *h = ops->hierarchies[i];
2081
2082 if (h->version == CGROUP2_SUPER_MAGIC) {
2083 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
2084 h->controllers[0]);
2085 if (ret < 0)
2086 return false;
2087
2088 continue;
2089 }
2090
2091 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2092 /* not running */
2093 if (!path)
2094 continue;
2095
2096 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2097 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2098 if (ret < 0) {
2099 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2100 return false;
2101 }
2102 }
2103
2104 return true;
2105 }
2106
2107 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2108 * don't have a cgroup_data set up, so we ask the running container through the
2109 * commands API for the cgroup path.
2110 */
2111 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2112 char *value, size_t len, const char *name,
2113 const char *lxcpath)
2114 {
2115 __do_free char *path = NULL;
2116 __do_free char *controller;
2117 char *p;
2118 struct hierarchy *h;
2119 int ret = -1;
2120
2121 controller = must_copy_string(filename);
2122 p = strchr(controller, '.');
2123 if (p)
2124 *p = '\0';
2125
2126 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2127 /* not running */
2128 if (!path)
2129 return -1;
2130
2131 h = get_hierarchy(ops, controller);
2132 if (h) {
2133 __do_free char *fullpath;
2134
2135 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2136 ret = lxc_read_from_file(fullpath, value, len);
2137 }
2138
2139 return ret;
2140 }
2141
2142 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2143 * don't have a cgroup_data set up, so we ask the running container through the
2144 * commands API for the cgroup path.
2145 */
2146 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2147 const char *filename, const char *value,
2148 const char *name, const char *lxcpath)
2149 {
2150 __do_free char *path = NULL;
2151 __do_free char *controller;
2152 char *p;
2153 struct hierarchy *h;
2154 int ret = -1;
2155
2156 controller = must_copy_string(filename);
2157 p = strchr(controller, '.');
2158 if (p)
2159 *p = '\0';
2160
2161 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2162 /* not running */
2163 if (!path)
2164 return -1;
2165
2166 h = get_hierarchy(ops, controller);
2167 if (h) {
2168 __do_free char *fullpath;
2169
2170 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2171 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2172 }
2173
2174 return ret;
2175 }
2176
2177 /* take devices cgroup line
2178 * /dev/foo rwx
2179 * and convert it to a valid
2180 * type major:minor mode
2181 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2182 * the output.
2183 */
2184 static int convert_devpath(const char *invalue, char *dest)
2185 {
2186 __do_free char *path;
2187 int n_parts;
2188 char *p, type;
2189 unsigned long minor, major;
2190 struct stat sb;
2191 int ret = -EINVAL;
2192 char *mode = NULL;
2193
2194 path = must_copy_string(invalue);
2195
2196 /* Read path followed by mode. Ignore any trailing text.
2197 * A ' # comment' would be legal. Technically other text is not
2198 * legal, we could check for that if we cared to.
2199 */
2200 for (n_parts = 1, p = path; *p; p++) {
2201 if (*p != ' ')
2202 continue;
2203 *p = '\0';
2204
2205 if (n_parts != 1)
2206 break;
2207 p++;
2208 n_parts++;
2209
2210 while (*p == ' ')
2211 p++;
2212
2213 mode = p;
2214
2215 if (*p == '\0')
2216 goto out;
2217 }
2218
2219 if (n_parts == 1)
2220 goto out;
2221
2222 ret = stat(path, &sb);
2223 if (ret < 0)
2224 goto out;
2225
2226 mode_t m = sb.st_mode & S_IFMT;
2227 switch (m) {
2228 case S_IFBLK:
2229 type = 'b';
2230 break;
2231 case S_IFCHR:
2232 type = 'c';
2233 break;
2234 default:
2235 ERROR("Unsupported device type %i for \"%s\"", m, path);
2236 ret = -EINVAL;
2237 goto out;
2238 }
2239
2240 major = MAJOR(sb.st_rdev);
2241 minor = MINOR(sb.st_rdev);
2242 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2243 if (ret < 0 || ret >= 50) {
2244 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2245 "chars)", type, major, minor, mode);
2246 ret = -ENAMETOOLONG;
2247 goto out;
2248 }
2249 ret = 0;
2250
2251 out:
2252 return ret;
2253 }
2254
2255 /* Called from setup_limits - here we have the container's cgroup_data because
2256 * we created the cgroups.
2257 */
2258 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2259 const char *value)
2260 {
2261 __do_free char *controller;
2262 __do_free char *fullpath = NULL;
2263 char *p;
2264 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2265 char converted_value[50];
2266 struct hierarchy *h;
2267 int ret = 0;
2268
2269 controller = must_copy_string(filename);
2270 p = strchr(controller, '.');
2271 if (p)
2272 *p = '\0';
2273
2274 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2275 ret = convert_devpath(value, converted_value);
2276 if (ret < 0)
2277 return ret;
2278 value = converted_value;
2279 }
2280
2281 h = get_hierarchy(ops, controller);
2282 if (!h) {
2283 ERROR("Failed to setup limits for the \"%s\" controller. "
2284 "The controller seems to be unused by \"cgfsng\" cgroup "
2285 "driver or not enabled on the cgroup hierarchy",
2286 controller);
2287 errno = ENOENT;
2288 return -ENOENT;
2289 }
2290
2291 fullpath = must_make_path(h->container_full_path, filename, NULL);
2292 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2293 return ret;
2294 }
2295
2296 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2297 struct lxc_list *cgroup_settings,
2298 bool do_devices)
2299 {
2300 __do_free struct lxc_list *sorted_cgroup_settings = NULL;
2301 struct lxc_list *iterator, *next;
2302 struct lxc_cgroup *cg;
2303 bool ret = false;
2304
2305 if (lxc_list_empty(cgroup_settings))
2306 return true;
2307
2308 if (!ops->hierarchies)
2309 return false;
2310
2311 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2312 if (!sorted_cgroup_settings)
2313 return false;
2314
2315 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2316 cg = iterator->elem;
2317
2318 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2319 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2320 if (do_devices && (errno == EACCES || errno == EPERM)) {
2321 WARN("Failed to set \"%s\" to \"%s\"",
2322 cg->subsystem, cg->value);
2323 continue;
2324 }
2325 WARN("Failed to set \"%s\" to \"%s\"",
2326 cg->subsystem, cg->value);
2327 goto out;
2328 }
2329 DEBUG("Set controller \"%s\" set to \"%s\"",
2330 cg->subsystem, cg->value);
2331 }
2332 }
2333
2334 ret = true;
2335 INFO("Limits for the legacy cgroup hierarchies have been setup");
2336 out:
2337 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2338 lxc_list_del(iterator);
2339 free(iterator);
2340 }
2341
2342 return ret;
2343 }
2344
2345 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2346 struct lxc_list *cgroup_settings)
2347 {
2348 struct lxc_list *iterator;
2349 struct hierarchy *h = ops->unified;
2350
2351 if (lxc_list_empty(cgroup_settings))
2352 return true;
2353
2354 if (!h)
2355 return false;
2356
2357 lxc_list_for_each(iterator, cgroup_settings) {
2358 __do_free char *fullpath;
2359 int ret;
2360 struct lxc_cgroup *cg = iterator->elem;
2361
2362 fullpath = must_make_path(h->container_full_path, cg->subsystem, NULL);
2363 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2364 if (ret < 0) {
2365 SYSERROR("Failed to set \"%s\" to \"%s\"",
2366 cg->subsystem, cg->value);
2367 return false;
2368 }
2369 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2370 }
2371
2372 INFO("Limits for the unified cgroup hierarchy have been setup");
2373 return true;
2374 }
2375
2376 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2377 struct lxc_conf *conf,
2378 bool do_devices)
2379 {
2380 bool bret;
2381
2382 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2383 if (!bret)
2384 return false;
2385
2386 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2387 }
2388
2389 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2390 char **controllers)
2391 {
2392 char **cur_ctrl, **cur_use;
2393
2394 if (!ops->cgroup_use)
2395 return true;
2396
2397 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2398 bool found = false;
2399
2400 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2401 if (strcmp(*cur_use, *cur_ctrl) != 0)
2402 continue;
2403
2404 found = true;
2405 break;
2406 }
2407
2408 if (found)
2409 continue;
2410
2411 return false;
2412 }
2413
2414 return true;
2415 }
2416
2417 static void cg_unified_delegate(char ***delegate)
2418 {
2419 __do_free char *tmp;
2420 int idx;
2421 char *standard[] = {"cgroup.subtree_control", "cgroup.threads", NULL};
2422
2423 tmp = read_file("/sys/kernel/cgroup/delegate");
2424 if (!tmp) {
2425 for (char **p = standard; p && *p; p++) {
2426 idx = append_null_to_list((void ***)delegate);
2427 (*delegate)[idx] = must_copy_string(*p);
2428 }
2429 } else {
2430 char *token;
2431 lxc_iterate_parts (token, tmp, " \t\n") {
2432 /*
2433 * We always need to chown this for both cgroup and
2434 * cgroup2.
2435 */
2436 if (strcmp(token, "cgroup.procs") == 0)
2437 continue;
2438
2439 idx = append_null_to_list((void ***)delegate);
2440 (*delegate)[idx] = must_copy_string(token);
2441 }
2442 }
2443 }
2444
2445 /* At startup, parse_hierarchies finds all the info we need about cgroup
2446 * mountpoints and current cgroups, and stores it in @d.
2447 */
2448 static bool cg_hybrid_init(struct cgroup_ops *ops, bool relative,
2449 bool unprivileged)
2450 {
2451 __do_free char *basecginfo;
2452 __do_free char *line = NULL;
2453 __do_fclose FILE *f = NULL;
2454 int ret;
2455 size_t len = 0;
2456 char **klist = NULL, **nlist = NULL;
2457
2458 /* Root spawned containers escape the current cgroup, so use init's
2459 * cgroups as our base in that case.
2460 */
2461 if (!relative && (geteuid() == 0))
2462 basecginfo = read_file("/proc/1/cgroup");
2463 else
2464 basecginfo = read_file("/proc/self/cgroup");
2465 if (!basecginfo)
2466 return false;
2467
2468 ret = get_existing_subsystems(&klist, &nlist);
2469 if (ret < 0) {
2470 ERROR("Failed to retrieve available legacy cgroup controllers");
2471 return false;
2472 }
2473
2474 f = fopen("/proc/self/mountinfo", "r");
2475 if (!f) {
2476 ERROR("Failed to open \"/proc/self/mountinfo\"");
2477 return false;
2478 }
2479
2480 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2481
2482 while (getline(&line, &len, f) != -1) {
2483 int type;
2484 bool writeable;
2485 struct hierarchy *new;
2486 char *base_cgroup = NULL, *mountpoint = NULL;
2487 char **controller_list = NULL;
2488
2489 type = get_cgroup_version(line);
2490 if (type == 0)
2491 continue;
2492
2493 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2494 continue;
2495
2496 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2497 if (type == CGROUP2_SUPER_MAGIC)
2498 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2499 else if (type == CGROUP_SUPER_MAGIC)
2500 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2501 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2502 if (type == CGROUP_SUPER_MAGIC)
2503 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2504 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2505 if (type == CGROUP2_SUPER_MAGIC)
2506 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2507 }
2508
2509 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2510 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2511 continue;
2512
2513 if (type == CGROUP_SUPER_MAGIC)
2514 if (controller_list_is_dup(ops->hierarchies, controller_list))
2515 goto next;
2516
2517 mountpoint = cg_hybrid_get_mountpoint(line);
2518 if (!mountpoint) {
2519 ERROR("Failed parsing mountpoint from \"%s\"", line);
2520 goto next;
2521 }
2522
2523 if (type == CGROUP_SUPER_MAGIC)
2524 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2525 else
2526 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2527 if (!base_cgroup) {
2528 ERROR("Failed to find current cgroup");
2529 goto next;
2530 }
2531
2532 trim(base_cgroup);
2533 prune_init_scope(base_cgroup);
2534 if (type == CGROUP2_SUPER_MAGIC)
2535 writeable = test_writeable_v2(mountpoint, base_cgroup);
2536 else
2537 writeable = test_writeable_v1(mountpoint, base_cgroup);
2538 if (!writeable)
2539 goto next;
2540
2541 if (type == CGROUP2_SUPER_MAGIC) {
2542 char *cgv2_ctrl_path;
2543
2544 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2545 "cgroup.controllers",
2546 NULL);
2547
2548 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2549 free(cgv2_ctrl_path);
2550 if (!controller_list) {
2551 controller_list = cg_unified_make_empty_controller();
2552 TRACE("No controllers are enabled for "
2553 "delegation in the unified hierarchy");
2554 }
2555 }
2556
2557 /* Exclude all controllers that cgroup use does not want. */
2558 if (!cgroup_use_wants_controllers(ops, controller_list))
2559 goto next;
2560
2561 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2562 if (type == CGROUP2_SUPER_MAGIC && !ops->unified) {
2563 if (unprivileged)
2564 cg_unified_delegate(&new->cgroup2_chown);
2565 ops->unified = new;
2566 }
2567
2568 continue;
2569
2570 next:
2571 free_string_list(controller_list);
2572 free(mountpoint);
2573 free(base_cgroup);
2574 }
2575
2576 free_string_list(klist);
2577 free_string_list(nlist);
2578
2579 TRACE("Writable cgroup hierarchies:");
2580 lxc_cgfsng_print_hierarchies(ops);
2581
2582 /* verify that all controllers in cgroup.use and all crucial
2583 * controllers are accounted for
2584 */
2585 if (!all_controllers_found(ops))
2586 return false;
2587
2588 return true;
2589 }
2590
2591 static int cg_is_pure_unified(void)
2592 {
2593
2594 int ret;
2595 struct statfs fs;
2596
2597 ret = statfs("/sys/fs/cgroup", &fs);
2598 if (ret < 0)
2599 return -ENOMEDIUM;
2600
2601 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2602 return CGROUP2_SUPER_MAGIC;
2603
2604 return 0;
2605 }
2606
2607 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2608 static char *cg_unified_get_current_cgroup(bool relative)
2609 {
2610 __do_free char *basecginfo;
2611 char *base_cgroup;
2612 char *copy = NULL;
2613
2614 if (!relative && (geteuid() == 0))
2615 basecginfo = read_file("/proc/1/cgroup");
2616 else
2617 basecginfo = read_file("/proc/self/cgroup");
2618 if (!basecginfo)
2619 return NULL;
2620
2621 base_cgroup = strstr(basecginfo, "0::/");
2622 if (!base_cgroup)
2623 goto cleanup_on_err;
2624
2625 base_cgroup = base_cgroup + 3;
2626 copy = copy_to_eol(base_cgroup);
2627 if (!copy)
2628 goto cleanup_on_err;
2629
2630 cleanup_on_err:
2631 if (copy)
2632 trim(copy);
2633
2634 return copy;
2635 }
2636
2637 static int cg_unified_init(struct cgroup_ops *ops, bool relative,
2638 bool unprivileged)
2639 {
2640 __do_free char *subtree_path = NULL;
2641 int ret;
2642 char *mountpoint, *tmp;
2643 char **delegatable;
2644 struct hierarchy *new;
2645 char *base_cgroup = NULL;
2646
2647 ret = cg_is_pure_unified();
2648 if (ret == -ENOMEDIUM)
2649 return -ENOMEDIUM;
2650
2651 if (ret != CGROUP2_SUPER_MAGIC)
2652 return 0;
2653
2654 base_cgroup = cg_unified_get_current_cgroup(relative);
2655 if (!base_cgroup)
2656 return -EINVAL;
2657 prune_init_scope(base_cgroup);
2658
2659 /* We assume that we have already been given controllers to delegate
2660 * further down the hierarchy. If not it is up to the user to delegate
2661 * them to us.
2662 */
2663 mountpoint = must_copy_string("/sys/fs/cgroup");
2664 subtree_path = must_make_path(mountpoint, base_cgroup,
2665 "cgroup.subtree_control", NULL);
2666 delegatable = cg_unified_get_controllers(subtree_path);
2667 if (!delegatable)
2668 delegatable = cg_unified_make_empty_controller();
2669 if (!delegatable[0])
2670 TRACE("No controllers are enabled for delegation");
2671
2672 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2673 * we should verify here. The reason I'm not doing it right is that I'm
2674 * not convinced that lxc.cgroup.use will be the future since it is a
2675 * global property. I much rather have an option that lets you request
2676 * controllers per container.
2677 */
2678
2679 new = add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2680 if (!unprivileged)
2681 cg_unified_delegate(&new->cgroup2_chown);
2682
2683 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2684 return CGROUP2_SUPER_MAGIC;
2685 }
2686
2687 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2688 {
2689 int ret;
2690 const char *tmp;
2691 bool relative = conf->cgroup_meta.relative;
2692
2693 tmp = lxc_global_config_value("lxc.cgroup.use");
2694 if (tmp) {
2695 __do_free char *pin;
2696 char *chop, *cur;
2697
2698 pin = must_copy_string(tmp);
2699 chop = pin;
2700
2701 lxc_iterate_parts(cur, chop, ",")
2702 must_append_string(&ops->cgroup_use, cur);
2703 }
2704
2705 ret = cg_unified_init(ops, relative, !lxc_list_empty(&conf->id_map));
2706 if (ret < 0)
2707 return false;
2708
2709 if (ret == CGROUP2_SUPER_MAGIC)
2710 return true;
2711
2712 return cg_hybrid_init(ops, relative, !lxc_list_empty(&conf->id_map));
2713 }
2714
2715 __cgfsng_ops static bool cgfsng_data_init(struct cgroup_ops *ops)
2716 {
2717 const char *cgroup_pattern;
2718
2719 /* copy system-wide cgroup information */
2720 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2721 if (!cgroup_pattern) {
2722 /* lxc.cgroup.pattern is only NULL on error. */
2723 ERROR("Failed to retrieve cgroup pattern");
2724 return false;
2725 }
2726 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2727 ops->monitor_pattern = MONITOR_CGROUP;
2728
2729 return true;
2730 }
2731
2732 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2733 {
2734 struct cgroup_ops *cgfsng_ops;
2735
2736 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2737 if (!cgfsng_ops)
2738 return NULL;
2739
2740 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2741 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2742
2743 if (!cg_init(cgfsng_ops, conf)) {
2744 free(cgfsng_ops);
2745 return NULL;
2746 }
2747
2748 cgfsng_ops->data_init = cgfsng_data_init;
2749 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
2750 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
2751 cgfsng_ops->monitor_create = cgfsng_monitor_create;
2752 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
2753 cgfsng_ops->payload_create = cgfsng_payload_create;
2754 cgfsng_ops->payload_enter = cgfsng_payload_enter;
2755 cgfsng_ops->escape = cgfsng_escape;
2756 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2757 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2758 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2759 cgfsng_ops->get = cgfsng_get;
2760 cgfsng_ops->set = cgfsng_set;
2761 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2762 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2763 cgfsng_ops->driver = "cgfsng";
2764 cgfsng_ops->version = "1.0.0";
2765 cgfsng_ops->attach = cgfsng_attach;
2766 cgfsng_ops->chown = cgfsng_chown;
2767 cgfsng_ops->mount = cgfsng_mount;
2768 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2769
2770 return cgfsng_ops;
2771 }