]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
Merge pull request #2821 from brauner/2019-02-05/remove_stack_allocation
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comma-separated list of controllers.
35 */
36
37 #ifndef _GNU_SOURCE
38 #define _GNU_SOURCE 1
39 #endif
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <grp.h>
44 #include <linux/kdev_t.h>
45 #include <linux/types.h>
46 #include <stdint.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/types.h>
51 #include <unistd.h>
52
53 #include "caps.h"
54 #include "cgroup.h"
55 #include "cgroup_utils.h"
56 #include "commands.h"
57 #include "conf.h"
58 #include "config.h"
59 #include "log.h"
60 #include "macro.h"
61 #include "memory_utils.h"
62 #include "storage/storage.h"
63 #include "utils.h"
64
65 #ifndef HAVE_STRLCPY
66 #include "include/strlcpy.h"
67 #endif
68
69 #ifndef HAVE_STRLCAT
70 #include "include/strlcat.h"
71 #endif
72
73 lxc_log_define(cgfsng, cgroup);
74
75 static void free_string_list(char **clist)
76 {
77 int i;
78
79 if (!clist)
80 return;
81
82 for (i = 0; clist[i]; i++)
83 free(clist[i]);
84
85 free(clist);
86 }
87
88 /* Given a pointer to a null-terminated array of pointers, realloc to add one
89 * entry, and point the new entry to NULL. Do not fail. Return the index to the
90 * second-to-last entry - that is, the one which is now available for use
91 * (keeping the list null-terminated).
92 */
93 static int append_null_to_list(void ***list)
94 {
95 int newentry = 0;
96
97 if (*list)
98 for (; (*list)[newentry]; newentry++)
99 ;
100
101 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
102 (*list)[newentry + 1] = NULL;
103 return newentry;
104 }
105
106 /* Given a null-terminated array of strings, check whether @entry is one of the
107 * strings.
108 */
109 static bool string_in_list(char **list, const char *entry)
110 {
111 int i;
112
113 if (!list)
114 return false;
115
116 for (i = 0; list[i]; i++)
117 if (strcmp(list[i], entry) == 0)
118 return true;
119
120 return false;
121 }
122
123 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
124 * "name=systemd". Do not fail.
125 */
126 static char *cg_legacy_must_prefix_named(char *entry)
127 {
128 size_t len;
129 char *prefixed;
130
131 len = strlen(entry);
132 prefixed = must_realloc(NULL, len + 6);
133
134 memcpy(prefixed, "name=", STRLITERALLEN("name="));
135 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
136 prefixed[len + 5] = '\0';
137
138 return prefixed;
139 }
140
141 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
142 * we are called.
143 *
144 * We also handle named subsystems here. Any controller which is not a kernel
145 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
146 * we refuse to use because we're not sure which we have here.
147 * (TODO: We could work around this in some cases by just remounting to be
148 * unambiguous, or by comparing mountpoint contents with current cgroup.)
149 *
150 * The last entry will always be NULL.
151 */
152 static void must_append_controller(char **klist, char **nlist, char ***clist,
153 char *entry)
154 {
155 int newentry;
156 char *copy;
157
158 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
159 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
160 ERROR("It is both a named and kernel subsystem");
161 return;
162 }
163
164 newentry = append_null_to_list((void ***)clist);
165
166 if (strncmp(entry, "name=", 5) == 0)
167 copy = must_copy_string(entry);
168 else if (string_in_list(klist, entry))
169 copy = must_copy_string(entry);
170 else
171 copy = cg_legacy_must_prefix_named(entry);
172
173 (*clist)[newentry] = copy;
174 }
175
176 /* Given a handler's cgroup data, return the struct hierarchy for the controller
177 * @c, or NULL if there is none.
178 */
179 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
180 {
181 int i;
182
183 errno = ENOENT;
184
185 if (!ops->hierarchies) {
186 TRACE("There are no useable cgroup controllers");
187 return NULL;
188 }
189
190 for (i = 0; ops->hierarchies[i]; i++) {
191 if (!controller) {
192 /* This is the empty unified hierarchy. */
193 if (ops->hierarchies[i]->controllers &&
194 !ops->hierarchies[i]->controllers[0])
195 return ops->hierarchies[i];
196
197 continue;
198 }
199
200 if (string_in_list(ops->hierarchies[i]->controllers, controller))
201 return ops->hierarchies[i];
202 }
203
204 if (controller)
205 WARN("There is no useable %s controller", controller);
206 else
207 WARN("There is no empty unified cgroup hierarchy");
208
209 return NULL;
210 }
211
212 #define BATCH_SIZE 50
213 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
214 {
215 int newbatches = (newlen / BATCH_SIZE) + 1;
216 int oldbatches = (oldlen / BATCH_SIZE) + 1;
217
218 if (!*mem || newbatches > oldbatches) {
219 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
220 }
221 }
222
223 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
224 {
225 size_t full = oldlen + newlen;
226
227 batch_realloc(dest, oldlen, full + 1);
228
229 memcpy(*dest + oldlen, new, newlen + 1);
230 }
231
232 /* Slurp in a whole file */
233 static char *read_file(const char *fnam)
234 {
235 FILE *f;
236 char *line = NULL, *buf = NULL;
237 size_t len = 0, fulllen = 0;
238 int linelen;
239
240 f = fopen(fnam, "r");
241 if (!f)
242 return NULL;
243 while ((linelen = getline(&line, &len, f)) != -1) {
244 append_line(&buf, fulllen, line, linelen);
245 fulllen += linelen;
246 }
247 fclose(f);
248 free(line);
249 return buf;
250 }
251
252 /* Taken over modified from the kernel sources. */
253 #define NBITS 32 /* bits in uint32_t */
254 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
255 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
256
257 static void set_bit(unsigned bit, uint32_t *bitarr)
258 {
259 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
260 }
261
262 static void clear_bit(unsigned bit, uint32_t *bitarr)
263 {
264 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
265 }
266
267 static bool is_set(unsigned bit, uint32_t *bitarr)
268 {
269 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
270 }
271
272 /* Create cpumask from cpulist aka turn:
273 *
274 * 0,2-3
275 *
276 * into bit array
277 *
278 * 1 0 1 1
279 */
280 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
281 {
282 char *token;
283 size_t arrlen;
284 uint32_t *bitarr;
285
286 arrlen = BITS_TO_LONGS(nbits);
287 bitarr = calloc(arrlen, sizeof(uint32_t));
288 if (!bitarr)
289 return NULL;
290
291 lxc_iterate_parts(token, buf, ",") {
292 errno = 0;
293 unsigned end, start;
294 char *range;
295
296 start = strtoul(token, NULL, 0);
297 end = start;
298 range = strchr(token, '-');
299 if (range)
300 end = strtoul(range + 1, NULL, 0);
301
302 if (!(start <= end)) {
303 free(bitarr);
304 return NULL;
305 }
306
307 if (end >= nbits) {
308 free(bitarr);
309 return NULL;
310 }
311
312 while (start <= end)
313 set_bit(start++, bitarr);
314 }
315
316 return bitarr;
317 }
318
319 /* Turn cpumask into simple, comma-separated cpulist. */
320 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
321 {
322 int ret;
323 size_t i;
324 char **cpulist = NULL;
325 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
326
327 for (i = 0; i <= nbits; i++) {
328 if (!is_set(i, bitarr))
329 continue;
330
331 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
332 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
333 lxc_free_array((void **)cpulist, free);
334 return NULL;
335 }
336
337 ret = lxc_append_string(&cpulist, numstr);
338 if (ret < 0) {
339 lxc_free_array((void **)cpulist, free);
340 return NULL;
341 }
342 }
343
344 if (!cpulist)
345 return NULL;
346
347 return lxc_string_join(",", (const char **)cpulist, false);
348 }
349
350 static ssize_t get_max_cpus(char *cpulist)
351 {
352 char *c1, *c2;
353 char *maxcpus = cpulist;
354 size_t cpus = 0;
355
356 c1 = strrchr(maxcpus, ',');
357 if (c1)
358 c1++;
359
360 c2 = strrchr(maxcpus, '-');
361 if (c2)
362 c2++;
363
364 if (!c1 && !c2)
365 c1 = maxcpus;
366 else if (c1 > c2)
367 c2 = c1;
368 else if (c1 < c2)
369 c1 = c2;
370 else if (!c1 && c2)
371 c1 = c2;
372
373 errno = 0;
374 cpus = strtoul(c1, NULL, 0);
375 if (errno != 0)
376 return -1;
377
378 return cpus;
379 }
380
381 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
382 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
383 {
384 int ret;
385 ssize_t i;
386 char *lastslash, *fpath, oldv;
387 ssize_t maxisol = 0, maxposs = 0;
388 char *cpulist = NULL, *isolcpus = NULL, *posscpus = NULL;
389 uint32_t *isolmask = NULL, *possmask = NULL;
390 bool bret = false, flipped_bit = false;
391
392 lastslash = strrchr(path, '/');
393 if (!lastslash) {
394 ERROR("Failed to detect \"/\" in \"%s\"", path);
395 return bret;
396 }
397 oldv = *lastslash;
398 *lastslash = '\0';
399 fpath = must_make_path(path, "cpuset.cpus", NULL);
400 posscpus = read_file(fpath);
401 if (!posscpus) {
402 SYSERROR("Failed to read file \"%s\"", fpath);
403 goto on_error;
404 }
405
406 /* Get maximum number of cpus found in possible cpuset. */
407 maxposs = get_max_cpus(posscpus);
408 if (maxposs < 0 || maxposs >= INT_MAX - 1)
409 goto on_error;
410
411 if (!file_exists(__ISOL_CPUS)) {
412 /* This system doesn't expose isolated cpus. */
413 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
414 cpulist = posscpus;
415 /* No isolated cpus but we weren't already initialized by
416 * someone. We should simply copy the parents cpuset.cpus
417 * values.
418 */
419 if (!am_initialized) {
420 DEBUG("Copying cpu settings of parent cgroup");
421 goto copy_parent;
422 }
423 /* No isolated cpus but we were already initialized by someone.
424 * Nothing more to do for us.
425 */
426 goto on_success;
427 }
428
429 isolcpus = read_file(__ISOL_CPUS);
430 if (!isolcpus) {
431 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
432 goto on_error;
433 }
434 if (!isdigit(isolcpus[0])) {
435 TRACE("No isolated cpus detected");
436 cpulist = posscpus;
437 /* No isolated cpus but we weren't already initialized by
438 * someone. We should simply copy the parents cpuset.cpus
439 * values.
440 */
441 if (!am_initialized) {
442 DEBUG("Copying cpu settings of parent cgroup");
443 goto copy_parent;
444 }
445 /* No isolated cpus but we were already initialized by someone.
446 * Nothing more to do for us.
447 */
448 goto on_success;
449 }
450
451 /* Get maximum number of cpus found in isolated cpuset. */
452 maxisol = get_max_cpus(isolcpus);
453 if (maxisol < 0 || maxisol >= INT_MAX - 1)
454 goto on_error;
455
456 if (maxposs < maxisol)
457 maxposs = maxisol;
458 maxposs++;
459
460 possmask = lxc_cpumask(posscpus, maxposs);
461 if (!possmask) {
462 ERROR("Failed to create cpumask for possible cpus");
463 goto on_error;
464 }
465
466 isolmask = lxc_cpumask(isolcpus, maxposs);
467 if (!isolmask) {
468 ERROR("Failed to create cpumask for isolated cpus");
469 goto on_error;
470 }
471
472 for (i = 0; i <= maxposs; i++) {
473 if (!is_set(i, isolmask) || !is_set(i, possmask))
474 continue;
475
476 flipped_bit = true;
477 clear_bit(i, possmask);
478 }
479
480 if (!flipped_bit) {
481 DEBUG("No isolated cpus present in cpuset");
482 goto on_success;
483 }
484 DEBUG("Removed isolated cpus from cpuset");
485
486 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
487 if (!cpulist) {
488 ERROR("Failed to create cpu list");
489 goto on_error;
490 }
491
492 copy_parent:
493 *lastslash = oldv;
494 free(fpath);
495 fpath = must_make_path(path, "cpuset.cpus", NULL);
496 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
497 if (ret < 0) {
498 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
499 goto on_error;
500 }
501
502 on_success:
503 bret = true;
504
505 on_error:
506 free(fpath);
507
508 free(isolcpus);
509 free(isolmask);
510
511 if (posscpus != cpulist)
512 free(posscpus);
513 free(possmask);
514
515 free(cpulist);
516 return bret;
517 }
518
519 /* Copy contents of parent(@path)/@file to @path/@file */
520 static bool copy_parent_file(char *path, char *file)
521 {
522 int ret;
523 char *fpath, *lastslash, oldv;
524 int len = 0;
525 char *value = NULL;
526
527 lastslash = strrchr(path, '/');
528 if (!lastslash) {
529 ERROR("Failed to detect \"/\" in \"%s\"", path);
530 return false;
531 }
532 oldv = *lastslash;
533 *lastslash = '\0';
534 fpath = must_make_path(path, file, NULL);
535 len = lxc_read_from_file(fpath, NULL, 0);
536 if (len <= 0)
537 goto on_error;
538
539 value = must_realloc(NULL, len + 1);
540 ret = lxc_read_from_file(fpath, value, len);
541 if (ret != len)
542 goto on_error;
543 free(fpath);
544
545 *lastslash = oldv;
546 fpath = must_make_path(path, file, NULL);
547 ret = lxc_write_to_file(fpath, value, len, false, 0666);
548 if (ret < 0)
549 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, fpath);
550 free(fpath);
551 free(value);
552 return ret >= 0;
553
554 on_error:
555 SYSERROR("Failed to read file \"%s\"", fpath);
556 free(fpath);
557 free(value);
558 return false;
559 }
560
561 /* Initialize the cpuset hierarchy in first directory of @gname and set
562 * cgroup.clone_children so that children inherit settings. Since the
563 * h->base_path is populated by init or ourselves, we know it is already
564 * initialized.
565 */
566 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
567 {
568 int ret;
569 char v;
570 char *cgpath, *clonechildrenpath, *slash;
571
572 if (!string_in_list(h->controllers, "cpuset"))
573 return true;
574
575 if (*cgname == '/')
576 cgname++;
577 slash = strchr(cgname, '/');
578 if (slash)
579 *slash = '\0';
580
581 cgpath = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
582 if (slash)
583 *slash = '/';
584
585 ret = mkdir(cgpath, 0755);
586 if (ret < 0) {
587 if (errno != EEXIST) {
588 SYSERROR("Failed to create directory \"%s\"", cgpath);
589 free(cgpath);
590 return false;
591 }
592 }
593
594 clonechildrenpath = must_make_path(cgpath, "cgroup.clone_children", NULL);
595 /* unified hierarchy doesn't have clone_children */
596 if (!file_exists(clonechildrenpath)) {
597 free(clonechildrenpath);
598 free(cgpath);
599 return true;
600 }
601
602 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
603 if (ret < 0) {
604 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
605 free(clonechildrenpath);
606 free(cgpath);
607 return false;
608 }
609
610 /* Make sure any isolated cpus are removed from cpuset.cpus. */
611 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
612 SYSERROR("Failed to remove isolated cpus");
613 free(clonechildrenpath);
614 free(cgpath);
615 return false;
616 }
617
618 /* Already set for us by someone else. */
619 if (v == '1') {
620 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
621 free(clonechildrenpath);
622 free(cgpath);
623 return true;
624 }
625
626 /* copy parent's settings */
627 if (!copy_parent_file(cgpath, "cpuset.mems")) {
628 SYSERROR("Failed to copy \"cpuset.mems\" settings");
629 free(cgpath);
630 free(clonechildrenpath);
631 return false;
632 }
633 free(cgpath);
634
635 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
636 if (ret < 0) {
637 /* Set clone_children so children inherit our settings */
638 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
639 free(clonechildrenpath);
640 return false;
641 }
642 free(clonechildrenpath);
643 return true;
644 }
645
646 /* Given two null-terminated lists of strings, return true if any string is in
647 * both.
648 */
649 static bool controller_lists_intersect(char **l1, char **l2)
650 {
651 int i;
652
653 if (!l1 || !l2)
654 return false;
655
656 for (i = 0; l1[i]; i++) {
657 if (string_in_list(l2, l1[i]))
658 return true;
659 }
660
661 return false;
662 }
663
664 /* For a null-terminated list of controllers @clist, return true if any of those
665 * controllers is already listed the null-terminated list of hierarchies @hlist.
666 * Realistically, if one is present, all must be present.
667 */
668 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
669 {
670 int i;
671
672 if (!hlist)
673 return false;
674
675 for (i = 0; hlist[i]; i++)
676 if (controller_lists_intersect(hlist[i]->controllers, clist))
677 return true;
678
679 return false;
680 }
681
682 /* Return true if the controller @entry is found in the null-terminated list of
683 * hierarchies @hlist.
684 */
685 static bool controller_found(struct hierarchy **hlist, char *entry)
686 {
687 int i;
688
689 if (!hlist)
690 return false;
691
692 for (i = 0; hlist[i]; i++)
693 if (string_in_list(hlist[i]->controllers, entry))
694 return true;
695
696 return false;
697 }
698
699 /* Return true if all of the controllers which we require have been found. The
700 * required list is freezer and anything in lxc.cgroup.use.
701 */
702 static bool all_controllers_found(struct cgroup_ops *ops)
703 {
704 char **cur;
705 struct hierarchy **hlist = ops->hierarchies;
706
707 if (!ops->cgroup_use)
708 return true;
709
710 for (cur = ops->cgroup_use; cur && *cur; cur++)
711 if (!controller_found(hlist, *cur)) {
712 ERROR("No %s controller mountpoint found", *cur);
713 return false;
714 }
715
716 return true;
717 }
718
719 /* Get the controllers from a mountinfo line There are other ways we could get
720 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
721 * could parse the mount options. But we simply assume that the mountpoint must
722 * be /sys/fs/cgroup/controller-list
723 */
724 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
725 int type)
726 {
727 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
728 * for legacy hierarchies.
729 */
730 int i;
731 char *dup, *p2, *tok;
732 char *p = line, *sep = ",";
733 char **aret = NULL;
734
735 for (i = 0; i < 4; i++) {
736 p = strchr(p, ' ');
737 if (!p)
738 return NULL;
739 p++;
740 }
741
742 /* Note, if we change how mountinfo works, then our caller will need to
743 * verify /sys/fs/cgroup/ in this field.
744 */
745 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
746 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
747 return NULL;
748 }
749
750 p += 15;
751 p2 = strchr(p, ' ');
752 if (!p2) {
753 ERROR("Corrupt mountinfo");
754 return NULL;
755 }
756 *p2 = '\0';
757
758 if (type == CGROUP_SUPER_MAGIC) {
759 /* strdup() here for v1 hierarchies. Otherwise
760 * lxc_iterate_parts() will destroy mountpoints such as
761 * "/sys/fs/cgroup/cpu,cpuacct".
762 */
763 dup = strdup(p);
764 if (!dup)
765 return NULL;
766
767 lxc_iterate_parts(tok, dup, sep) {
768 must_append_controller(klist, nlist, &aret, tok);
769 }
770
771 free(dup);
772 }
773 *p2 = ' ';
774
775 return aret;
776 }
777
778 static char **cg_unified_make_empty_controller(void)
779 {
780 int newentry;
781 char **aret = NULL;
782
783 newentry = append_null_to_list((void ***)&aret);
784 aret[newentry] = NULL;
785 return aret;
786 }
787
788 static char **cg_unified_get_controllers(const char *file)
789 {
790 char *buf, *tok;
791 char *sep = " \t\n";
792 char **aret = NULL;
793
794 buf = read_file(file);
795 if (!buf)
796 return NULL;
797
798 lxc_iterate_parts(tok, buf, sep) {
799 int newentry;
800 char *copy;
801
802 newentry = append_null_to_list((void ***)&aret);
803 copy = must_copy_string(tok);
804 aret[newentry] = copy;
805 }
806
807 free(buf);
808 return aret;
809 }
810
811 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
812 char *container_base_path, int type)
813 {
814 struct hierarchy *new;
815 int newentry;
816
817 new = must_realloc(NULL, sizeof(*new));
818 new->controllers = clist;
819 new->mountpoint = mountpoint;
820 new->container_base_path = container_base_path;
821 new->container_full_path = NULL;
822 new->monitor_full_path = NULL;
823 new->version = type;
824 new->cgroup2_chown = NULL;
825
826 newentry = append_null_to_list((void ***)h);
827 (*h)[newentry] = new;
828 return new;
829 }
830
831 /* Get a copy of the mountpoint from @line, which is a line from
832 * /proc/self/mountinfo.
833 */
834 static char *cg_hybrid_get_mountpoint(char *line)
835 {
836 int i;
837 size_t len;
838 char *p2;
839 char *p = line, *sret = NULL;
840
841 for (i = 0; i < 4; i++) {
842 p = strchr(p, ' ');
843 if (!p)
844 return NULL;
845 p++;
846 }
847
848 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
849 return NULL;
850
851 p2 = strchr(p + 15, ' ');
852 if (!p2)
853 return NULL;
854 *p2 = '\0';
855
856 len = strlen(p);
857 sret = must_realloc(NULL, len + 1);
858 memcpy(sret, p, len);
859 sret[len] = '\0';
860 return sret;
861 }
862
863 /* Given a multi-line string, return a null-terminated copy of the current line. */
864 static char *copy_to_eol(char *p)
865 {
866 char *p2 = strchr(p, '\n'), *sret;
867 size_t len;
868
869 if (!p2)
870 return NULL;
871
872 len = p2 - p;
873 sret = must_realloc(NULL, len + 1);
874 memcpy(sret, p, len);
875 sret[len] = '\0';
876 return sret;
877 }
878
879 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
880 * /proc/self/cgroup file. Check whether controller c is present.
881 */
882 static bool controller_in_clist(char *cgline, char *c)
883 {
884 char *tok, *eol, *tmp;
885 size_t len;
886
887 eol = strchr(cgline, ':');
888 if (!eol)
889 return false;
890
891 len = eol - cgline;
892 tmp = must_realloc(NULL, len + 1);
893 memcpy(tmp, cgline, len);
894 tmp[len] = '\0';
895
896 lxc_iterate_parts(tok, tmp, ",") {
897 if (strcmp(tok, c) == 0) {
898 free(tmp);
899 return true;
900 }
901 }
902
903 free(tmp);
904 return false;
905 }
906
907 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
908 * @controller.
909 */
910 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
911 int type)
912 {
913 char *p = basecginfo;
914
915 for (;;) {
916 bool is_cgv2_base_cgroup = false;
917
918 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
919 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
920 is_cgv2_base_cgroup = true;
921
922 p = strchr(p, ':');
923 if (!p)
924 return NULL;
925 p++;
926
927 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
928 p = strchr(p, ':');
929 if (!p)
930 return NULL;
931 p++;
932 return copy_to_eol(p);
933 }
934
935 p = strchr(p, '\n');
936 if (!p)
937 return NULL;
938 p++;
939 }
940 }
941
942 static void must_append_string(char ***list, char *entry)
943 {
944 int newentry;
945 char *copy;
946
947 newentry = append_null_to_list((void ***)list);
948 copy = must_copy_string(entry);
949 (*list)[newentry] = copy;
950 }
951
952 static int get_existing_subsystems(char ***klist, char ***nlist)
953 {
954 FILE *f;
955 char *line = NULL;
956 size_t len = 0;
957
958 f = fopen("/proc/self/cgroup", "r");
959 if (!f)
960 return -1;
961
962 while (getline(&line, &len, f) != -1) {
963 char *p, *p2, *tok;
964 p = strchr(line, ':');
965 if (!p)
966 continue;
967 p++;
968 p2 = strchr(p, ':');
969 if (!p2)
970 continue;
971 *p2 = '\0';
972
973 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
974 * contains an entry of the form:
975 *
976 * 0::/some/path
977 *
978 * In this case we use "cgroup2" as controller name.
979 */
980 if ((p2 - p) == 0) {
981 must_append_string(klist, "cgroup2");
982 continue;
983 }
984
985 lxc_iterate_parts(tok, p, ",") {
986 if (strncmp(tok, "name=", 5) == 0)
987 must_append_string(nlist, tok);
988 else
989 must_append_string(klist, tok);
990 }
991 }
992
993 free(line);
994 fclose(f);
995 return 0;
996 }
997
998 static void trim(char *s)
999 {
1000 size_t len;
1001
1002 len = strlen(s);
1003 while ((len > 1) && (s[len - 1] == '\n'))
1004 s[--len] = '\0';
1005 }
1006
1007 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
1008 {
1009 int i;
1010 struct hierarchy **it;
1011
1012 if (!ops->hierarchies) {
1013 TRACE(" No hierarchies found");
1014 return;
1015 }
1016
1017 TRACE(" Hierarchies:");
1018 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
1019 int j;
1020 char **cit;
1021
1022 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
1023 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
1024 TRACE(" controllers:");
1025 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
1026 TRACE(" %d: %s", j, *cit);
1027 }
1028 }
1029
1030 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
1031 char **nlist)
1032 {
1033 int k;
1034 char **it;
1035
1036 TRACE("basecginfo is:");
1037 TRACE("%s", basecginfo);
1038
1039 for (k = 0, it = klist; it && *it; it++, k++)
1040 TRACE("kernel subsystem %d: %s", k, *it);
1041
1042 for (k = 0, it = nlist; it && *it; it++, k++)
1043 TRACE("named subsystem %d: %s", k, *it);
1044 }
1045
1046 static int cgroup_rmdir(struct hierarchy **hierarchies,
1047 const char *container_cgroup)
1048 {
1049 int i;
1050
1051 if (!container_cgroup || !hierarchies)
1052 return 0;
1053
1054 for (i = 0; hierarchies[i]; i++) {
1055 int ret;
1056 struct hierarchy *h = hierarchies[i];
1057
1058 if (!h->container_full_path)
1059 continue;
1060
1061 ret = recursive_destroy(h->container_full_path);
1062 if (ret < 0)
1063 WARN("Failed to destroy \"%s\"", h->container_full_path);
1064
1065 free(h->container_full_path);
1066 h->container_full_path = NULL;
1067 }
1068
1069 return 0;
1070 }
1071
1072 struct generic_userns_exec_data {
1073 struct hierarchy **hierarchies;
1074 const char *container_cgroup;
1075 struct lxc_conf *conf;
1076 uid_t origuid; /* target uid in parent namespace */
1077 char *path;
1078 };
1079
1080 static int cgroup_rmdir_wrapper(void *data)
1081 {
1082 int ret;
1083 struct generic_userns_exec_data *arg = data;
1084 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1085 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1086
1087 ret = setresgid(nsgid, nsgid, nsgid);
1088 if (ret < 0) {
1089 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1090 (int)nsgid, (int)nsgid);
1091 return -1;
1092 }
1093
1094 ret = setresuid(nsuid, nsuid, nsuid);
1095 if (ret < 0) {
1096 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1097 (int)nsuid, (int)nsuid);
1098 return -1;
1099 }
1100
1101 ret = setgroups(0, NULL);
1102 if (ret < 0 && errno != EPERM) {
1103 SYSERROR("Failed to setgroups(0, NULL)");
1104 return -1;
1105 }
1106
1107 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1108 }
1109
1110 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1111 struct lxc_handler *handler)
1112 {
1113 int ret;
1114 struct generic_userns_exec_data wrap;
1115
1116 if (!ops->hierarchies)
1117 return;
1118
1119 wrap.origuid = 0;
1120 wrap.container_cgroup = ops->container_cgroup;
1121 wrap.hierarchies = ops->hierarchies;
1122 wrap.conf = handler->conf;
1123
1124 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1125 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1126 "cgroup_rmdir_wrapper");
1127 else
1128 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1129 if (ret < 0) {
1130 WARN("Failed to destroy cgroups");
1131 return;
1132 }
1133 }
1134
1135 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1136 struct lxc_handler *handler)
1137 {
1138 int len;
1139 char *pivot_path;
1140 struct lxc_conf *conf = handler->conf;
1141 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1142
1143 if (!ops->hierarchies)
1144 return;
1145
1146 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1147 if (len < 0 || (size_t)len >= sizeof(pidstr))
1148 return;
1149
1150 for (int i = 0; ops->hierarchies[i]; i++) {
1151 int ret;
1152 char *chop;
1153 char pivot_cgroup[] = PIVOT_CGROUP;
1154 struct hierarchy *h = ops->hierarchies[i];
1155
1156 if (!h->monitor_full_path)
1157 continue;
1158
1159 if (conf && conf->cgroup_meta.dir)
1160 pivot_path = must_make_path(h->mountpoint,
1161 h->container_base_path,
1162 conf->cgroup_meta.dir,
1163 PIVOT_CGROUP,
1164 "cgroup.procs", NULL);
1165 else
1166 pivot_path = must_make_path(h->mountpoint,
1167 h->container_base_path,
1168 PIVOT_CGROUP,
1169 "cgroup.procs", NULL);
1170
1171 chop = strrchr(pivot_path, '/');
1172 if (chop)
1173 *chop = '\0';
1174
1175 /*
1176 * Make sure not to pass in the ro string literal PIVOT_CGROUP
1177 * here.
1178 */
1179 if (!cg_legacy_handle_cpuset_hierarchy(h, pivot_cgroup)) {
1180 WARN("Failed to handle legacy cpuset controller");
1181 goto next;
1182 }
1183
1184 ret = mkdir_p(pivot_path, 0755);
1185 if (ret < 0 && errno != EEXIST) {
1186 SYSWARN("Failed to create cgroup \"%s\"\n", pivot_path);
1187 goto next;
1188 }
1189
1190 if (chop)
1191 *chop = '/';
1192
1193 /* Move ourselves into the pivot cgroup to delete our own
1194 * cgroup.
1195 */
1196 ret = lxc_write_to_file(pivot_path, pidstr, len, false, 0666);
1197 if (ret != 0) {
1198 SYSWARN("Failed to move monitor %s to \"%s\"\n", pidstr, pivot_path);
1199 goto next;
1200 }
1201
1202 ret = recursive_destroy(h->monitor_full_path);
1203 if (ret < 0)
1204 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1205
1206 next:
1207 free(pivot_path);
1208 }
1209 }
1210
1211 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1212 {
1213 size_t i, parts_len;
1214 char **it;
1215 size_t full_len = 0;
1216 char *add_controllers = NULL, *cgroup = NULL;
1217 char **parts = NULL;
1218 bool bret = false;
1219
1220 if (h->version != CGROUP2_SUPER_MAGIC)
1221 return true;
1222
1223 if (!h->controllers)
1224 return true;
1225
1226 /* For now we simply enable all controllers that we have detected by
1227 * creating a string like "+memory +pids +cpu +io".
1228 * TODO: In the near future we might want to support "-<controller>"
1229 * etc. but whether supporting semantics like this make sense will need
1230 * some thinking.
1231 */
1232 for (it = h->controllers; it && *it; it++) {
1233 full_len += strlen(*it) + 2;
1234 add_controllers = must_realloc(add_controllers, full_len + 1);
1235
1236 if (h->controllers[0] == *it)
1237 add_controllers[0] = '\0';
1238
1239 (void)strlcat(add_controllers, "+", full_len + 1);
1240 (void)strlcat(add_controllers, *it, full_len + 1);
1241
1242 if ((it + 1) && *(it + 1))
1243 (void)strlcat(add_controllers, " ", full_len + 1);
1244 }
1245
1246 parts = lxc_string_split(cgname, '/');
1247 if (!parts)
1248 goto on_error;
1249
1250 parts_len = lxc_array_len((void **)parts);
1251 if (parts_len > 0)
1252 parts_len--;
1253
1254 cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
1255 for (i = 0; i < parts_len; i++) {
1256 int ret;
1257 char *target;
1258
1259 cgroup = must_append_path(cgroup, parts[i], NULL);
1260 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1261 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1262 free(target);
1263 if (ret < 0) {
1264 SYSERROR("Could not enable \"%s\" controllers in the "
1265 "unified cgroup \"%s\"", add_controllers, cgroup);
1266 goto on_error;
1267 }
1268 }
1269
1270 bret = true;
1271
1272 on_error:
1273 lxc_free_array((void **)parts, free);
1274 free(add_controllers);
1275 free(cgroup);
1276 return bret;
1277 }
1278
1279 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1280 {
1281 const char *tmp = dir;
1282 const char *orig = dir;
1283 size_t orig_len;
1284
1285 orig_len = strlen(dir);
1286 do {
1287 int ret;
1288 size_t cur_len;
1289 char *makeme;
1290
1291 dir = tmp + strspn(tmp, "/");
1292 tmp = dir + strcspn(dir, "/");
1293
1294 errno = ENOMEM;
1295 cur_len = dir - orig;
1296 makeme = strndup(orig, cur_len);
1297 if (!makeme)
1298 return -1;
1299
1300 ret = mkdir(makeme, mode);
1301 if (ret < 0) {
1302 if ((errno != EEXIST) || (orig_len == cur_len)) {
1303 SYSERROR("Failed to create directory \"%s\"", makeme);
1304 free(makeme);
1305 return -1;
1306 }
1307 }
1308 free(makeme);
1309
1310 } while (tmp != dir);
1311
1312 return 0;
1313 }
1314
1315 static bool monitor_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1316 {
1317 int ret;
1318
1319 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1320 ERROR("Failed to handle legacy cpuset controller");
1321 return false;
1322 }
1323
1324 h->monitor_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1325 ret = mkdir_eexist_on_last(h->monitor_full_path, 0755);
1326 if (ret < 0) {
1327 ERROR("Failed to create cgroup \"%s\"", h->monitor_full_path);
1328 return false;
1329 }
1330
1331 return cg_unified_create_cgroup(h, cgname);
1332 }
1333
1334 static bool container_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1335 {
1336 int ret;
1337
1338 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1339 ERROR("Failed to handle legacy cpuset controller");
1340 return false;
1341 }
1342
1343 h->container_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1344 ret = mkdir_eexist_on_last(h->container_full_path, 0755);
1345 if (ret < 0) {
1346 ERROR("Failed to create cgroup \"%s\"", h->container_full_path);
1347 return false;
1348 }
1349
1350 return cg_unified_create_cgroup(h, cgname);
1351 }
1352
1353 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname, bool monitor)
1354 {
1355 int ret;
1356 char *full_path;
1357
1358 if (monitor)
1359 full_path = h->monitor_full_path;
1360 else
1361 full_path = h->container_full_path;
1362
1363 ret = rmdir(full_path);
1364 if (ret < 0)
1365 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", full_path);
1366
1367 free(full_path);
1368
1369 if (monitor)
1370 h->monitor_full_path = NULL;
1371 else
1372 h->container_full_path = NULL;
1373 }
1374
1375 __cgfsng_ops static inline bool cgfsng_monitor_create(struct cgroup_ops *ops,
1376 struct lxc_handler *handler)
1377 {
1378 char *monitor_cgroup, *offset, *tmp;
1379 int i, idx = 0;
1380 size_t len;
1381 bool bret = false;
1382 struct lxc_conf *conf = handler->conf;
1383
1384 if (!conf)
1385 return bret;
1386
1387 if (!ops->hierarchies)
1388 return true;
1389
1390 if (conf->cgroup_meta.dir)
1391 tmp = lxc_string_join("/",
1392 (const char *[]){conf->cgroup_meta.dir,
1393 ops->monitor_pattern,
1394 handler->name, NULL},
1395 false);
1396 else
1397 tmp = must_make_path(ops->monitor_pattern, handler->name, NULL);
1398 if (!tmp)
1399 return bret;
1400
1401 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1402 monitor_cgroup = must_realloc(tmp, len);
1403 offset = monitor_cgroup + len - 5;
1404 *offset = 0;
1405
1406 do {
1407 if (idx) {
1408 int ret = snprintf(offset, 5, "-%d", idx);
1409 if (ret < 0 || (size_t)ret >= 5)
1410 goto on_error;
1411 }
1412
1413 for (i = 0; ops->hierarchies[i]; i++) {
1414 if (!monitor_create_path_for_hierarchy(ops->hierarchies[i], monitor_cgroup)) {
1415 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->monitor_full_path);
1416 for (int j = 0; j < i; j++)
1417 remove_path_for_hierarchy(ops->hierarchies[j], monitor_cgroup, true);
1418
1419 idx++;
1420 break;
1421 }
1422 }
1423 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1424
1425 if (idx < 1000) {
1426 bret = true;
1427 INFO("The monitor process uses \"%s\" as cgroup", monitor_cgroup);
1428 }
1429
1430 on_error:
1431 free(monitor_cgroup);
1432
1433 return bret;
1434 }
1435
1436 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1437 * next cgroup_pattern-1, -2, ..., -999.
1438 */
1439 __cgfsng_ops static inline bool cgfsng_payload_create(struct cgroup_ops *ops,
1440 struct lxc_handler *handler)
1441 {
1442 int i;
1443 size_t len;
1444 char *container_cgroup, *offset, *tmp;
1445 int idx = 0;
1446 struct lxc_conf *conf = handler->conf;
1447
1448 if (ops->container_cgroup) {
1449 WARN("cgfsng_create called a second time: %s", ops->container_cgroup);
1450 return false;
1451 }
1452
1453 if (!conf)
1454 return false;
1455
1456 if (!ops->hierarchies)
1457 return true;
1458
1459 if (conf->cgroup_meta.dir)
1460 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1461 else
1462 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1463 if (!tmp) {
1464 ERROR("Failed expanding cgroup name pattern");
1465 return false;
1466 }
1467
1468 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1469 container_cgroup = must_realloc(NULL, len);
1470 (void)strlcpy(container_cgroup, tmp, len);
1471 free(tmp);
1472 offset = container_cgroup + len - 5;
1473
1474 again:
1475 if (idx == 1000) {
1476 ERROR("Too many conflicting cgroup names");
1477 goto out_free;
1478 }
1479
1480 if (idx) {
1481 int ret;
1482
1483 ret = snprintf(offset, 5, "-%d", idx);
1484 if (ret < 0 || (size_t)ret >= 5) {
1485 FILE *f = fopen("/dev/null", "w");
1486 if (f) {
1487 fprintf(f, "Workaround for GCC7 bug: "
1488 "https://gcc.gnu.org/bugzilla/"
1489 "show_bug.cgi?id=78969");
1490 fclose(f);
1491 }
1492 }
1493 }
1494
1495 for (i = 0; ops->hierarchies[i]; i++) {
1496 if (!container_create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1497 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path);
1498 for (int j = 0; j < i; j++)
1499 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup, false);
1500 idx++;
1501 goto again;
1502 }
1503 }
1504
1505 ops->container_cgroup = container_cgroup;
1506 INFO("The container uses \"%s\" as cgroup", container_cgroup);
1507
1508 return true;
1509
1510 out_free:
1511 free(container_cgroup);
1512
1513 return false;
1514 }
1515
1516 __cgfsng_ops static bool __do_cgroup_enter(struct cgroup_ops *ops, pid_t pid,
1517 bool monitor)
1518 {
1519 int len;
1520 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1521
1522 if (!ops->hierarchies)
1523 return true;
1524
1525 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
1526 if (len < 0 || (size_t)len >= sizeof(pidstr))
1527 return false;
1528
1529 for (int i = 0; ops->hierarchies[i]; i++) {
1530 int ret;
1531 char *path;
1532
1533 if (monitor)
1534 path = must_make_path(ops->hierarchies[i]->monitor_full_path,
1535 "cgroup.procs", NULL);
1536 else
1537 path = must_make_path(ops->hierarchies[i]->container_full_path,
1538 "cgroup.procs", NULL);
1539 ret = lxc_write_to_file(path, pidstr, len, false, 0666);
1540 if (ret != 0) {
1541 SYSERROR("Failed to enter cgroup \"%s\"", path);
1542 free(path);
1543 return false;
1544 }
1545 free(path);
1546 }
1547
1548 return true;
1549 }
1550
1551 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops, pid_t pid)
1552 {
1553 return __do_cgroup_enter(ops, pid, true);
1554 }
1555
1556 static bool cgfsng_payload_enter(struct cgroup_ops *ops, pid_t pid)
1557 {
1558 return __do_cgroup_enter(ops, pid, false);
1559 }
1560
1561 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1562 mode_t chmod_mode)
1563 {
1564 int ret;
1565
1566 ret = chown(path, chown_uid, chown_gid);
1567 if (ret < 0) {
1568 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1569 return -1;
1570 }
1571
1572 ret = chmod(path, chmod_mode);
1573 if (ret < 0) {
1574 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1575 return -1;
1576 }
1577
1578 return 0;
1579 }
1580
1581 /* chgrp the container cgroups to container group. We leave
1582 * the container owner as cgroup owner. So we must make the
1583 * directories 775 so that the container can create sub-cgroups.
1584 *
1585 * Also chown the tasks and cgroup.procs files. Those may not
1586 * exist depending on kernel version.
1587 */
1588 static int chown_cgroup_wrapper(void *data)
1589 {
1590 int i, ret;
1591 uid_t destuid;
1592 struct generic_userns_exec_data *arg = data;
1593 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1594 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1595
1596 ret = setresgid(nsgid, nsgid, nsgid);
1597 if (ret < 0) {
1598 SYSERROR("Failed to setresgid(%d, %d, %d)",
1599 (int)nsgid, (int)nsgid, (int)nsgid);
1600 return -1;
1601 }
1602
1603 ret = setresuid(nsuid, nsuid, nsuid);
1604 if (ret < 0) {
1605 SYSERROR("Failed to setresuid(%d, %d, %d)",
1606 (int)nsuid, (int)nsuid, (int)nsuid);
1607 return -1;
1608 }
1609
1610 ret = setgroups(0, NULL);
1611 if (ret < 0 && errno != EPERM) {
1612 SYSERROR("Failed to setgroups(0, NULL)");
1613 return -1;
1614 }
1615
1616 destuid = get_ns_uid(arg->origuid);
1617 if (destuid == LXC_INVALID_UID)
1618 destuid = 0;
1619
1620 for (i = 0; arg->hierarchies[i]; i++) {
1621 char *fullpath;
1622 char *path = arg->hierarchies[i]->container_full_path;
1623
1624 ret = chowmod(path, destuid, nsgid, 0775);
1625 if (ret < 0)
1626 return -1;
1627
1628 /* Failures to chown() these are inconvenient but not
1629 * detrimental We leave these owned by the container launcher,
1630 * so that container root can write to the files to attach. We
1631 * chmod() them 664 so that container systemd can write to the
1632 * files (which systemd in wily insists on doing).
1633 */
1634
1635 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1636 fullpath = must_make_path(path, "tasks", NULL);
1637 (void)chowmod(fullpath, destuid, nsgid, 0664);
1638 free(fullpath);
1639 }
1640
1641 fullpath = must_make_path(path, "cgroup.procs", NULL);
1642 (void)chowmod(fullpath, destuid, nsgid, 0664);
1643 free(fullpath);
1644
1645 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1646 continue;
1647
1648 for (char **p = arg->hierarchies[i]->cgroup2_chown; p && *p; p++) {
1649 fullpath = must_make_path(path, *p, NULL);
1650 (void)chowmod(fullpath, destuid, nsgid, 0664);
1651 free(fullpath);
1652 }
1653 }
1654
1655 return 0;
1656 }
1657
1658 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1659 struct lxc_conf *conf)
1660 {
1661 struct generic_userns_exec_data wrap;
1662
1663 if (lxc_list_empty(&conf->id_map))
1664 return true;
1665
1666 if (!ops->hierarchies)
1667 return true;
1668
1669 wrap.origuid = geteuid();
1670 wrap.path = NULL;
1671 wrap.hierarchies = ops->hierarchies;
1672 wrap.conf = conf;
1673
1674 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1675 "chown_cgroup_wrapper") < 0) {
1676 ERROR("Error requesting cgroup chown in new user namespace");
1677 return false;
1678 }
1679
1680 return true;
1681 }
1682
1683 /* cgroup-full:* is done, no need to create subdirs */
1684 static bool cg_mount_needs_subdirs(int type)
1685 {
1686 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1687 return false;
1688
1689 return true;
1690 }
1691
1692 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1693 * remount controller ro if needed and bindmount the cgroupfs onto
1694 * control/the/cg/path.
1695 */
1696 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1697 char *controllerpath, char *cgpath,
1698 const char *container_cgroup)
1699 {
1700 int ret, remount_flags;
1701 char *sourcepath;
1702 int flags = MS_BIND;
1703
1704 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1705 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1706 if (ret < 0) {
1707 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1708 controllerpath, controllerpath);
1709 return -1;
1710 }
1711
1712 remount_flags = add_required_remount_flags(controllerpath,
1713 controllerpath,
1714 flags | MS_REMOUNT);
1715 ret = mount(controllerpath, controllerpath, "cgroup",
1716 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1717 NULL);
1718 if (ret < 0) {
1719 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1720 return -1;
1721 }
1722
1723 INFO("Remounted %s read-only", controllerpath);
1724 }
1725
1726 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1727 container_cgroup, NULL);
1728 if (type == LXC_AUTO_CGROUP_RO)
1729 flags |= MS_RDONLY;
1730
1731 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1732 if (ret < 0) {
1733 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1734 free(sourcepath);
1735 return -1;
1736 }
1737 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1738
1739 if (flags & MS_RDONLY) {
1740 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1741 flags | MS_REMOUNT);
1742 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1743 if (ret < 0) {
1744 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1745 free(sourcepath);
1746 return -1;
1747 }
1748 INFO("Remounted %s read-only", cgpath);
1749 }
1750
1751 free(sourcepath);
1752 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1753 return 0;
1754 }
1755
1756 /* __cg_mount_direct
1757 *
1758 * Mount cgroup hierarchies directly without using bind-mounts. The main
1759 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1760 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1761 */
1762 static int __cg_mount_direct(int type, struct hierarchy *h,
1763 const char *controllerpath)
1764 {
1765 int ret;
1766 char *controllers = NULL;
1767 char *fstype = "cgroup2";
1768 unsigned long flags = 0;
1769
1770 flags |= MS_NOSUID;
1771 flags |= MS_NOEXEC;
1772 flags |= MS_NODEV;
1773 flags |= MS_RELATIME;
1774
1775 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1776 flags |= MS_RDONLY;
1777
1778 if (h->version != CGROUP2_SUPER_MAGIC) {
1779 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1780 if (!controllers)
1781 return -ENOMEM;
1782 fstype = "cgroup";
1783 }
1784
1785 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1786 free(controllers);
1787 if (ret < 0) {
1788 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1789 return -1;
1790 }
1791
1792 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1793 return 0;
1794 }
1795
1796 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1797 const char *controllerpath)
1798 {
1799 return __cg_mount_direct(type, h, controllerpath);
1800 }
1801
1802 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1803 const char *controllerpath)
1804 {
1805 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1806 return 0;
1807
1808 return __cg_mount_direct(type, h, controllerpath);
1809 }
1810
1811 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1812 struct lxc_handler *handler,
1813 const char *root, int type)
1814 {
1815 int i, ret;
1816 char *tmpfspath = NULL;
1817 bool has_cgns = false, retval = false, wants_force_mount = false;
1818
1819 if (!ops->hierarchies)
1820 return true;
1821
1822 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1823 return true;
1824
1825 if (type & LXC_AUTO_CGROUP_FORCE) {
1826 type &= ~LXC_AUTO_CGROUP_FORCE;
1827 wants_force_mount = true;
1828 }
1829
1830 if (!wants_force_mount){
1831 if (!lxc_list_empty(&handler->conf->keepcaps))
1832 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1833 else
1834 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1835 }
1836
1837 has_cgns = cgns_supported();
1838 if (has_cgns && !wants_force_mount)
1839 return true;
1840
1841 if (type == LXC_AUTO_CGROUP_NOSPEC)
1842 type = LXC_AUTO_CGROUP_MIXED;
1843 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1844 type = LXC_AUTO_CGROUP_FULL_MIXED;
1845
1846 /* Mount tmpfs */
1847 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1848 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1849 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1850 "size=10240k,mode=755", root);
1851 if (ret < 0)
1852 goto on_error;
1853
1854 for (i = 0; ops->hierarchies[i]; i++) {
1855 char *controllerpath, *path2;
1856 struct hierarchy *h = ops->hierarchies[i];
1857 char *controller = strrchr(h->mountpoint, '/');
1858
1859 if (!controller)
1860 continue;
1861 controller++;
1862
1863 controllerpath = must_make_path(tmpfspath, controller, NULL);
1864 if (dir_exists(controllerpath)) {
1865 free(controllerpath);
1866 continue;
1867 }
1868
1869 ret = mkdir(controllerpath, 0755);
1870 if (ret < 0) {
1871 SYSERROR("Error creating cgroup path: %s", controllerpath);
1872 free(controllerpath);
1873 goto on_error;
1874 }
1875
1876 if (has_cgns && wants_force_mount) {
1877 /* If cgroup namespaces are supported but the container
1878 * will not have CAP_SYS_ADMIN after it has started we
1879 * need to mount the cgroups manually.
1880 */
1881 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1882 free(controllerpath);
1883 if (ret < 0)
1884 goto on_error;
1885
1886 continue;
1887 }
1888
1889 ret = cg_mount_cgroup_full(type, h, controllerpath);
1890 if (ret < 0) {
1891 free(controllerpath);
1892 goto on_error;
1893 }
1894
1895 if (!cg_mount_needs_subdirs(type)) {
1896 free(controllerpath);
1897 continue;
1898 }
1899
1900 path2 = must_make_path(controllerpath, h->container_base_path,
1901 ops->container_cgroup, NULL);
1902 ret = mkdir_p(path2, 0755);
1903 if (ret < 0) {
1904 free(controllerpath);
1905 free(path2);
1906 goto on_error;
1907 }
1908
1909 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1910 path2, ops->container_cgroup);
1911 free(controllerpath);
1912 free(path2);
1913 if (ret < 0)
1914 goto on_error;
1915 }
1916 retval = true;
1917
1918 on_error:
1919 free(tmpfspath);
1920 return retval;
1921 }
1922
1923 static int recursive_count_nrtasks(char *dirname)
1924 {
1925 struct dirent *direntp;
1926 DIR *dir;
1927 int count = 0, ret;
1928 char *path;
1929
1930 dir = opendir(dirname);
1931 if (!dir)
1932 return 0;
1933
1934 while ((direntp = readdir(dir))) {
1935 struct stat mystat;
1936
1937 if (!strcmp(direntp->d_name, ".") ||
1938 !strcmp(direntp->d_name, ".."))
1939 continue;
1940
1941 path = must_make_path(dirname, direntp->d_name, NULL);
1942
1943 if (lstat(path, &mystat))
1944 goto next;
1945
1946 if (!S_ISDIR(mystat.st_mode))
1947 goto next;
1948
1949 count += recursive_count_nrtasks(path);
1950 next:
1951 free(path);
1952 }
1953
1954 path = must_make_path(dirname, "cgroup.procs", NULL);
1955 ret = lxc_count_file_lines(path);
1956 if (ret != -1)
1957 count += ret;
1958 free(path);
1959
1960 (void)closedir(dir);
1961
1962 return count;
1963 }
1964
1965 __cgfsng_ops static int cgfsng_nrtasks(struct cgroup_ops *ops)
1966 {
1967 int count;
1968 char *path;
1969
1970 if (!ops->container_cgroup || !ops->hierarchies)
1971 return -1;
1972
1973 path = must_make_path(ops->hierarchies[0]->container_full_path, NULL);
1974 count = recursive_count_nrtasks(path);
1975 free(path);
1976 return count;
1977 }
1978
1979 /* Only root needs to escape to the cgroup of its init. */
1980 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1981 struct lxc_conf *conf)
1982 {
1983 int i;
1984
1985 if (conf->cgroup_meta.relative || geteuid() || !ops->hierarchies)
1986 return true;
1987
1988 for (i = 0; ops->hierarchies[i]; i++) {
1989 int ret;
1990 char *fullpath;
1991
1992 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1993 ops->hierarchies[i]->container_base_path,
1994 "cgroup.procs", NULL);
1995 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1996 if (ret != 0) {
1997 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1998 free(fullpath);
1999 return false;
2000 }
2001 free(fullpath);
2002 }
2003
2004 return true;
2005 }
2006
2007 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
2008 {
2009 int i = 0;
2010
2011 if (!ops->hierarchies)
2012 return 0;
2013
2014 for (; ops->hierarchies[i]; i++)
2015 ;
2016
2017 return i;
2018 }
2019
2020 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
2021 {
2022 int i;
2023
2024 if (!ops->hierarchies)
2025 return false;
2026
2027 /* sanity check n */
2028 for (i = 0; i < n; i++)
2029 if (!ops->hierarchies[i])
2030 return false;
2031
2032 *out = ops->hierarchies[i]->controllers;
2033
2034 return true;
2035 }
2036
2037 #define THAWED "THAWED"
2038 #define THAWED_LEN (strlen(THAWED))
2039
2040 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
2041 * to be adapted.
2042 */
2043 __cgfsng_ops static bool cgfsng_unfreeze(struct cgroup_ops *ops)
2044 {
2045 int ret;
2046 char *fullpath;
2047 struct hierarchy *h;
2048
2049 h = get_hierarchy(ops, "freezer");
2050 if (!h)
2051 return false;
2052
2053 fullpath = must_make_path(h->container_full_path, "freezer.state", NULL);
2054 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
2055 free(fullpath);
2056 if (ret < 0)
2057 return false;
2058
2059 return true;
2060 }
2061
2062 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
2063 const char *controller)
2064 {
2065 struct hierarchy *h;
2066
2067 h = get_hierarchy(ops, controller);
2068 if (!h) {
2069 WARN("Failed to find hierarchy for controller \"%s\"",
2070 controller ? controller : "(null)");
2071 return NULL;
2072 }
2073
2074 return h->container_full_path ? h->container_full_path + strlen(h->mountpoint) : NULL;
2075 }
2076
2077 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2078 * which must be freed by the caller.
2079 */
2080 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
2081 const char *inpath,
2082 const char *filename)
2083 {
2084 return must_make_path(h->mountpoint, inpath, filename, NULL);
2085 }
2086
2087 /* Technically, we're always at a delegation boundary here (This is especially
2088 * true when cgroup namespaces are available.). The reasoning is that in order
2089 * for us to have been able to start a container in the first place the root
2090 * cgroup must have been a leaf node. Now, either the container's init system
2091 * has populated the cgroup and kept it as a leaf node or it has created
2092 * subtrees. In the former case we will simply attach to the leaf node we
2093 * created when we started the container in the latter case we create our own
2094 * cgroup for the attaching process.
2095 */
2096 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
2097 const char *lxcpath, const char *pidstr,
2098 size_t pidstr_len, const char *controller)
2099 {
2100 int ret;
2101 size_t len;
2102 int fret = -1, idx = 0;
2103 char *base_path = NULL, *container_cgroup = NULL, *full_path = NULL;
2104
2105 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2106 /* not running */
2107 if (!container_cgroup)
2108 return 0;
2109
2110 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
2111 full_path = must_make_path(base_path, "cgroup.procs", NULL);
2112 /* cgroup is populated */
2113 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
2114 if (ret < 0 && errno != EBUSY)
2115 goto on_error;
2116
2117 if (ret == 0)
2118 goto on_success;
2119
2120 free(full_path);
2121
2122 len = strlen(base_path) + STRLITERALLEN("/lxc-1000") +
2123 STRLITERALLEN("/cgroup-procs");
2124 full_path = must_realloc(NULL, len + 1);
2125 do {
2126 if (idx)
2127 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
2128 base_path, idx);
2129 else
2130 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
2131 if (ret < 0 || (size_t)ret >= len + 1)
2132 goto on_error;
2133
2134 ret = mkdir_p(full_path, 0755);
2135 if (ret < 0 && errno != EEXIST)
2136 goto on_error;
2137
2138 (void)strlcat(full_path, "/cgroup.procs", len + 1);
2139 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
2140 if (ret == 0)
2141 goto on_success;
2142
2143 /* this is a non-leaf node */
2144 if (errno != EBUSY)
2145 goto on_error;
2146
2147 idx++;
2148 } while (idx < 1000);
2149
2150 on_success:
2151 if (idx < 1000)
2152 fret = 0;
2153
2154 on_error:
2155 free(base_path);
2156 free(container_cgroup);
2157 free(full_path);
2158
2159 return fret;
2160 }
2161
2162 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
2163 const char *lxcpath, pid_t pid)
2164 {
2165 int i, len, ret;
2166 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2167
2168 if (!ops->hierarchies)
2169 return true;
2170
2171 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2172 if (len < 0 || (size_t)len >= sizeof(pidstr))
2173 return false;
2174
2175 for (i = 0; ops->hierarchies[i]; i++) {
2176 char *path;
2177 char *fullpath = NULL;
2178 struct hierarchy *h = ops->hierarchies[i];
2179
2180 if (h->version == CGROUP2_SUPER_MAGIC) {
2181 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
2182 h->controllers[0]);
2183 if (ret < 0)
2184 return false;
2185
2186 continue;
2187 }
2188
2189 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2190 /* not running */
2191 if (!path)
2192 continue;
2193
2194 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2195 free(path);
2196 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2197 if (ret < 0) {
2198 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2199 free(fullpath);
2200 return false;
2201 }
2202 free(fullpath);
2203 }
2204
2205 return true;
2206 }
2207
2208 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2209 * don't have a cgroup_data set up, so we ask the running container through the
2210 * commands API for the cgroup path.
2211 */
2212 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2213 char *value, size_t len, const char *name,
2214 const char *lxcpath)
2215 {
2216 __do_free char *controller;
2217 char *p, *path;
2218 struct hierarchy *h;
2219 int ret = -1;
2220
2221 controller = must_copy_string(filename);
2222 p = strchr(controller, '.');
2223 if (p)
2224 *p = '\0';
2225
2226 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2227 /* not running */
2228 if (!path)
2229 return -1;
2230
2231 h = get_hierarchy(ops, controller);
2232 if (h) {
2233 char *fullpath;
2234
2235 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2236 ret = lxc_read_from_file(fullpath, value, len);
2237 free(fullpath);
2238 }
2239 free(path);
2240
2241 return ret;
2242 }
2243
2244 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2245 * don't have a cgroup_data set up, so we ask the running container through the
2246 * commands API for the cgroup path.
2247 */
2248 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2249 const char *filename, const char *value,
2250 const char *name, const char *lxcpath)
2251 {
2252 __do_free char *controller;
2253 char *p, *path;
2254 struct hierarchy *h;
2255 int ret = -1;
2256
2257 controller = must_copy_string(filename);
2258 p = strchr(controller, '.');
2259 if (p)
2260 *p = '\0';
2261
2262 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2263 /* not running */
2264 if (!path)
2265 return -1;
2266
2267 h = get_hierarchy(ops, controller);
2268 if (h) {
2269 char *fullpath;
2270
2271 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2272 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2273 free(fullpath);
2274 }
2275 free(path);
2276
2277 return ret;
2278 }
2279
2280 /* take devices cgroup line
2281 * /dev/foo rwx
2282 * and convert it to a valid
2283 * type major:minor mode
2284 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2285 * the output.
2286 */
2287 static int convert_devpath(const char *invalue, char *dest)
2288 {
2289 int n_parts;
2290 char *p, *path, type;
2291 unsigned long minor, major;
2292 struct stat sb;
2293 int ret = -EINVAL;
2294 char *mode = NULL;
2295
2296 path = must_copy_string(invalue);
2297
2298 /* Read path followed by mode. Ignore any trailing text.
2299 * A ' # comment' would be legal. Technically other text is not
2300 * legal, we could check for that if we cared to.
2301 */
2302 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2303 if (*p != ' ')
2304 continue;
2305 *p = '\0';
2306
2307 if (n_parts != 1)
2308 break;
2309 p++;
2310 n_parts++;
2311
2312 while (*p == ' ')
2313 p++;
2314
2315 mode = p;
2316
2317 if (*p == '\0')
2318 goto out;
2319 }
2320
2321 if (n_parts == 1)
2322 goto out;
2323
2324 ret = stat(path, &sb);
2325 if (ret < 0)
2326 goto out;
2327
2328 mode_t m = sb.st_mode & S_IFMT;
2329 switch (m) {
2330 case S_IFBLK:
2331 type = 'b';
2332 break;
2333 case S_IFCHR:
2334 type = 'c';
2335 break;
2336 default:
2337 ERROR("Unsupported device type %i for \"%s\"", m, path);
2338 ret = -EINVAL;
2339 goto out;
2340 }
2341
2342 major = MAJOR(sb.st_rdev);
2343 minor = MINOR(sb.st_rdev);
2344 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2345 if (ret < 0 || ret >= 50) {
2346 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2347 "chars)", type, major, minor, mode);
2348 ret = -ENAMETOOLONG;
2349 goto out;
2350 }
2351 ret = 0;
2352
2353 out:
2354 free(path);
2355 return ret;
2356 }
2357
2358 /* Called from setup_limits - here we have the container's cgroup_data because
2359 * we created the cgroups.
2360 */
2361 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2362 const char *value)
2363 {
2364 __do_free char *controller;
2365 char *fullpath, *p;
2366 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2367 char converted_value[50];
2368 struct hierarchy *h;
2369 int ret = 0;
2370
2371 controller = must_copy_string(filename);
2372 p = strchr(controller, '.');
2373 if (p)
2374 *p = '\0';
2375
2376 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2377 ret = convert_devpath(value, converted_value);
2378 if (ret < 0)
2379 return ret;
2380 value = converted_value;
2381 }
2382
2383 h = get_hierarchy(ops, controller);
2384 if (!h) {
2385 ERROR("Failed to setup limits for the \"%s\" controller. "
2386 "The controller seems to be unused by \"cgfsng\" cgroup "
2387 "driver or not enabled on the cgroup hierarchy",
2388 controller);
2389 errno = ENOENT;
2390 return -ENOENT;
2391 }
2392
2393 fullpath = must_make_path(h->container_full_path, filename, NULL);
2394 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2395 free(fullpath);
2396 return ret;
2397 }
2398
2399 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2400 struct lxc_list *cgroup_settings,
2401 bool do_devices)
2402 {
2403 struct lxc_list *iterator, *next, *sorted_cgroup_settings;
2404 struct lxc_cgroup *cg;
2405 bool ret = false;
2406
2407 if (lxc_list_empty(cgroup_settings))
2408 return true;
2409
2410 if (!ops->hierarchies)
2411 return false;
2412
2413 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2414 if (!sorted_cgroup_settings)
2415 return false;
2416
2417 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2418 cg = iterator->elem;
2419
2420 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2421 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2422 if (do_devices && (errno == EACCES || errno == EPERM)) {
2423 WARN("Failed to set \"%s\" to \"%s\"",
2424 cg->subsystem, cg->value);
2425 continue;
2426 }
2427 WARN("Failed to set \"%s\" to \"%s\"",
2428 cg->subsystem, cg->value);
2429 goto out;
2430 }
2431 DEBUG("Set controller \"%s\" set to \"%s\"",
2432 cg->subsystem, cg->value);
2433 }
2434 }
2435
2436 ret = true;
2437 INFO("Limits for the legacy cgroup hierarchies have been setup");
2438 out:
2439 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2440 lxc_list_del(iterator);
2441 free(iterator);
2442 }
2443 free(sorted_cgroup_settings);
2444 return ret;
2445 }
2446
2447 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2448 struct lxc_list *cgroup_settings)
2449 {
2450 struct lxc_list *iterator;
2451 struct hierarchy *h = ops->unified;
2452
2453 if (lxc_list_empty(cgroup_settings))
2454 return true;
2455
2456 if (!h)
2457 return false;
2458
2459 lxc_list_for_each(iterator, cgroup_settings) {
2460 int ret;
2461 char *fullpath;
2462 struct lxc_cgroup *cg = iterator->elem;
2463
2464 fullpath = must_make_path(h->container_full_path, cg->subsystem, NULL);
2465 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2466 free(fullpath);
2467 if (ret < 0) {
2468 SYSERROR("Failed to set \"%s\" to \"%s\"",
2469 cg->subsystem, cg->value);
2470 return false;
2471 }
2472 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2473 }
2474
2475 INFO("Limits for the unified cgroup hierarchy have been setup");
2476 return true;
2477 }
2478
2479 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2480 struct lxc_conf *conf,
2481 bool do_devices)
2482 {
2483 bool bret;
2484
2485 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2486 if (!bret)
2487 return false;
2488
2489 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2490 }
2491
2492 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2493 char **controllers)
2494 {
2495 char **cur_ctrl, **cur_use;
2496
2497 if (!ops->cgroup_use)
2498 return true;
2499
2500 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2501 bool found = false;
2502
2503 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2504 if (strcmp(*cur_use, *cur_ctrl) != 0)
2505 continue;
2506
2507 found = true;
2508 break;
2509 }
2510
2511 if (found)
2512 continue;
2513
2514 return false;
2515 }
2516
2517 return true;
2518 }
2519
2520 static void cg_unified_delegate(char ***delegate)
2521 {
2522 char *tmp;
2523 int idx;
2524 char *standard[] = {"cgroup.subtree_control", "cgroup.threads", NULL};
2525
2526 tmp = read_file("/sys/kernel/cgroup/delegate");
2527 if (!tmp) {
2528 for (char **p = standard; p && *p; p++) {
2529 idx = append_null_to_list((void ***)delegate);
2530 (*delegate)[idx] = must_copy_string(*p);
2531 }
2532 } else {
2533 char *token;
2534 lxc_iterate_parts (token, tmp, " \t\n") {
2535 /*
2536 * We always need to chown this for both cgroup and
2537 * cgroup2.
2538 */
2539 if (strcmp(token, "cgroup.procs") == 0)
2540 continue;
2541
2542 idx = append_null_to_list((void ***)delegate);
2543 (*delegate)[idx] = must_copy_string(token);
2544 }
2545 free(tmp);
2546 }
2547 }
2548
2549 /* At startup, parse_hierarchies finds all the info we need about cgroup
2550 * mountpoints and current cgroups, and stores it in @d.
2551 */
2552 static bool cg_hybrid_init(struct cgroup_ops *ops, bool relative,
2553 bool unprivileged)
2554 {
2555 int ret;
2556 char *basecginfo;
2557 FILE *f;
2558 size_t len = 0;
2559 char *line = NULL;
2560 char **klist = NULL, **nlist = NULL;
2561
2562 /* Root spawned containers escape the current cgroup, so use init's
2563 * cgroups as our base in that case.
2564 */
2565 if (!relative && (geteuid() == 0))
2566 basecginfo = read_file("/proc/1/cgroup");
2567 else
2568 basecginfo = read_file("/proc/self/cgroup");
2569 if (!basecginfo)
2570 return false;
2571
2572 ret = get_existing_subsystems(&klist, &nlist);
2573 if (ret < 0) {
2574 ERROR("Failed to retrieve available legacy cgroup controllers");
2575 free(basecginfo);
2576 return false;
2577 }
2578
2579 f = fopen("/proc/self/mountinfo", "r");
2580 if (!f) {
2581 ERROR("Failed to open \"/proc/self/mountinfo\"");
2582 free(basecginfo);
2583 return false;
2584 }
2585
2586 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2587
2588 while (getline(&line, &len, f) != -1) {
2589 int type;
2590 bool writeable;
2591 struct hierarchy *new;
2592 char *base_cgroup = NULL, *mountpoint = NULL;
2593 char **controller_list = NULL;
2594
2595 type = get_cgroup_version(line);
2596 if (type == 0)
2597 continue;
2598
2599 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2600 continue;
2601
2602 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2603 if (type == CGROUP2_SUPER_MAGIC)
2604 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2605 else if (type == CGROUP_SUPER_MAGIC)
2606 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2607 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2608 if (type == CGROUP_SUPER_MAGIC)
2609 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2610 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2611 if (type == CGROUP2_SUPER_MAGIC)
2612 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2613 }
2614
2615 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2616 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2617 continue;
2618
2619 if (type == CGROUP_SUPER_MAGIC)
2620 if (controller_list_is_dup(ops->hierarchies, controller_list))
2621 goto next;
2622
2623 mountpoint = cg_hybrid_get_mountpoint(line);
2624 if (!mountpoint) {
2625 ERROR("Failed parsing mountpoint from \"%s\"", line);
2626 goto next;
2627 }
2628
2629 if (type == CGROUP_SUPER_MAGIC)
2630 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2631 else
2632 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2633 if (!base_cgroup) {
2634 ERROR("Failed to find current cgroup");
2635 goto next;
2636 }
2637
2638 trim(base_cgroup);
2639 prune_init_scope(base_cgroup);
2640 if (type == CGROUP2_SUPER_MAGIC)
2641 writeable = test_writeable_v2(mountpoint, base_cgroup);
2642 else
2643 writeable = test_writeable_v1(mountpoint, base_cgroup);
2644 if (!writeable)
2645 goto next;
2646
2647 if (type == CGROUP2_SUPER_MAGIC) {
2648 char *cgv2_ctrl_path;
2649
2650 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2651 "cgroup.controllers",
2652 NULL);
2653
2654 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2655 free(cgv2_ctrl_path);
2656 if (!controller_list) {
2657 controller_list = cg_unified_make_empty_controller();
2658 TRACE("No controllers are enabled for "
2659 "delegation in the unified hierarchy");
2660 }
2661 }
2662
2663 /* Exclude all controllers that cgroup use does not want. */
2664 if (!cgroup_use_wants_controllers(ops, controller_list))
2665 goto next;
2666
2667 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2668 if (type == CGROUP2_SUPER_MAGIC && !ops->unified) {
2669 if (unprivileged)
2670 cg_unified_delegate(&new->cgroup2_chown);
2671 ops->unified = new;
2672 }
2673
2674 continue;
2675
2676 next:
2677 free_string_list(controller_list);
2678 free(mountpoint);
2679 free(base_cgroup);
2680 }
2681
2682 free_string_list(klist);
2683 free_string_list(nlist);
2684
2685 free(basecginfo);
2686
2687 fclose(f);
2688 free(line);
2689
2690 TRACE("Writable cgroup hierarchies:");
2691 lxc_cgfsng_print_hierarchies(ops);
2692
2693 /* verify that all controllers in cgroup.use and all crucial
2694 * controllers are accounted for
2695 */
2696 if (!all_controllers_found(ops))
2697 return false;
2698
2699 return true;
2700 }
2701
2702 static int cg_is_pure_unified(void)
2703 {
2704
2705 int ret;
2706 struct statfs fs;
2707
2708 ret = statfs("/sys/fs/cgroup", &fs);
2709 if (ret < 0)
2710 return -ENOMEDIUM;
2711
2712 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2713 return CGROUP2_SUPER_MAGIC;
2714
2715 return 0;
2716 }
2717
2718 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2719 static char *cg_unified_get_current_cgroup(bool relative)
2720 {
2721 char *basecginfo, *base_cgroup;
2722 char *copy = NULL;
2723
2724 if (!relative && (geteuid() == 0))
2725 basecginfo = read_file("/proc/1/cgroup");
2726 else
2727 basecginfo = read_file("/proc/self/cgroup");
2728 if (!basecginfo)
2729 return NULL;
2730
2731 base_cgroup = strstr(basecginfo, "0::/");
2732 if (!base_cgroup)
2733 goto cleanup_on_err;
2734
2735 base_cgroup = base_cgroup + 3;
2736 copy = copy_to_eol(base_cgroup);
2737 if (!copy)
2738 goto cleanup_on_err;
2739
2740 cleanup_on_err:
2741 free(basecginfo);
2742 if (copy)
2743 trim(copy);
2744
2745 return copy;
2746 }
2747
2748 static int cg_unified_init(struct cgroup_ops *ops, bool relative,
2749 bool unprivileged)
2750 {
2751 int ret;
2752 char *mountpoint, *subtree_path, *tmp;
2753 char **delegatable;
2754 struct hierarchy *new;
2755 char *base_cgroup = NULL;
2756
2757 ret = cg_is_pure_unified();
2758 if (ret == -ENOMEDIUM)
2759 return -ENOMEDIUM;
2760
2761 if (ret != CGROUP2_SUPER_MAGIC)
2762 return 0;
2763
2764 base_cgroup = cg_unified_get_current_cgroup(relative);
2765 if (!base_cgroup)
2766 return -EINVAL;
2767 prune_init_scope(base_cgroup);
2768
2769 /* We assume that we have already been given controllers to delegate
2770 * further down the hierarchy. If not it is up to the user to delegate
2771 * them to us.
2772 */
2773 mountpoint = must_copy_string("/sys/fs/cgroup");
2774 subtree_path = must_make_path(mountpoint, base_cgroup,
2775 "cgroup.subtree_control", NULL);
2776 delegatable = cg_unified_get_controllers(subtree_path);
2777 free(subtree_path);
2778 if (!delegatable)
2779 delegatable = cg_unified_make_empty_controller();
2780 if (!delegatable[0])
2781 TRACE("No controllers are enabled for delegation");
2782
2783 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2784 * we should verify here. The reason I'm not doing it right is that I'm
2785 * not convinced that lxc.cgroup.use will be the future since it is a
2786 * global property. I much rather have an option that lets you request
2787 * controllers per container.
2788 */
2789
2790 new = add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2791 if (!unprivileged)
2792 cg_unified_delegate(&new->cgroup2_chown);
2793
2794 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2795 return CGROUP2_SUPER_MAGIC;
2796 }
2797
2798 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2799 {
2800 int ret;
2801 const char *tmp;
2802 bool relative = conf->cgroup_meta.relative;
2803
2804 tmp = lxc_global_config_value("lxc.cgroup.use");
2805 if (tmp) {
2806 char *chop, *cur, *pin;
2807
2808 pin = must_copy_string(tmp);
2809 chop = pin;
2810
2811 lxc_iterate_parts(cur, chop, ",") {
2812 must_append_string(&ops->cgroup_use, cur);
2813 }
2814
2815 free(pin);
2816 }
2817
2818 ret = cg_unified_init(ops, relative, !lxc_list_empty(&conf->id_map));
2819 if (ret < 0)
2820 return false;
2821
2822 if (ret == CGROUP2_SUPER_MAGIC)
2823 return true;
2824
2825 return cg_hybrid_init(ops, relative, !lxc_list_empty(&conf->id_map));
2826 }
2827
2828 __cgfsng_ops static bool cgfsng_data_init(struct cgroup_ops *ops)
2829 {
2830 const char *cgroup_pattern;
2831
2832 /* copy system-wide cgroup information */
2833 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2834 if (!cgroup_pattern) {
2835 /* lxc.cgroup.pattern is only NULL on error. */
2836 ERROR("Failed to retrieve cgroup pattern");
2837 return false;
2838 }
2839 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2840 ops->monitor_pattern = MONITOR_CGROUP;
2841
2842 return true;
2843 }
2844
2845 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2846 {
2847 struct cgroup_ops *cgfsng_ops;
2848
2849 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2850 if (!cgfsng_ops)
2851 return NULL;
2852
2853 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2854 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2855
2856 if (!cg_init(cgfsng_ops, conf)) {
2857 free(cgfsng_ops);
2858 return NULL;
2859 }
2860
2861 cgfsng_ops->data_init = cgfsng_data_init;
2862 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
2863 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
2864 cgfsng_ops->monitor_create = cgfsng_monitor_create;
2865 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
2866 cgfsng_ops->payload_create = cgfsng_payload_create;
2867 cgfsng_ops->payload_enter = cgfsng_payload_enter;
2868 cgfsng_ops->escape = cgfsng_escape;
2869 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2870 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2871 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2872 cgfsng_ops->get = cgfsng_get;
2873 cgfsng_ops->set = cgfsng_set;
2874 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2875 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2876 cgfsng_ops->driver = "cgfsng";
2877 cgfsng_ops->version = "1.0.0";
2878 cgfsng_ops->attach = cgfsng_attach;
2879 cgfsng_ops->chown = cgfsng_chown;
2880 cgfsng_ops->mount = cgfsng_mount;
2881 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2882
2883 return cgfsng_ops;
2884 }